@mastra/core 0.24.0 → 1.0.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (810) hide show
  1. package/CHANGELOG.md +478 -141
  2. package/README.md +2 -3
  3. package/agent/message-list.d.ts +1 -0
  4. package/dist/action/index.d.ts +0 -2
  5. package/dist/action/index.d.ts.map +1 -1
  6. package/dist/agent/__tests__/mock-model.d.ts +3 -1
  7. package/dist/agent/__tests__/mock-model.d.ts.map +1 -1
  8. package/dist/agent/agent-legacy.d.ts +176 -0
  9. package/dist/agent/agent-legacy.d.ts.map +1 -0
  10. package/dist/agent/agent.d.ts +83 -229
  11. package/dist/agent/agent.d.ts.map +1 -1
  12. package/dist/agent/agent.types.d.ts +10 -25
  13. package/dist/agent/agent.types.d.ts.map +1 -1
  14. package/dist/agent/index.cjs +10 -26
  15. package/dist/agent/index.d.ts +1 -2
  16. package/dist/agent/index.d.ts.map +1 -1
  17. package/dist/agent/index.js +2 -2
  18. package/dist/agent/message-list/index.cjs +16 -0
  19. package/dist/agent/message-list/index.d.ts +77 -87
  20. package/dist/agent/message-list/index.d.ts.map +1 -1
  21. package/dist/agent/message-list/index.js +3 -0
  22. package/dist/agent/message-list/prompt/attachments-to-parts.d.ts +2 -2
  23. package/dist/agent/message-list/prompt/attachments-to-parts.d.ts.map +1 -1
  24. package/dist/agent/message-list/prompt/convert-to-mastra-v1.d.ts +2 -2
  25. package/dist/agent/message-list/prompt/download-assets.d.ts.map +1 -1
  26. package/dist/agent/message-list/prompt/invalid-content-error.d.ts +1 -1
  27. package/dist/agent/message-list/prompt/invalid-content-error.d.ts.map +1 -1
  28. package/dist/agent/message-list/types.d.ts +2 -1
  29. package/dist/agent/message-list/types.d.ts.map +1 -1
  30. package/dist/agent/message-list/utils/ai-v4-v5/core-model-message.d.ts +3 -2
  31. package/dist/agent/message-list/utils/ai-v4-v5/core-model-message.d.ts.map +1 -1
  32. package/dist/agent/message-list/utils/ai-v4-v5/ui-message.d.ts +3 -2
  33. package/dist/agent/message-list/utils/ai-v4-v5/ui-message.d.ts.map +1 -1
  34. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts +1 -1
  35. package/dist/agent/message-list/utils/ai-v5/gemini-compatibility.d.ts.map +1 -1
  36. package/dist/agent/message-list/utils/convert-messages.d.ts +4 -4
  37. package/dist/agent/message-list/utils/convert-messages.d.ts.map +1 -1
  38. package/dist/agent/test-utils.d.ts +2 -79
  39. package/dist/agent/test-utils.d.ts.map +1 -1
  40. package/dist/agent/trip-wire.d.ts +1 -1
  41. package/dist/agent/trip-wire.d.ts.map +1 -1
  42. package/dist/agent/types.d.ts +23 -37
  43. package/dist/agent/types.d.ts.map +1 -1
  44. package/dist/agent/utils.d.ts +15 -64
  45. package/dist/agent/utils.d.ts.map +1 -1
  46. package/dist/agent/workflows/prepare-stream/index.d.ts +16 -20
  47. package/dist/agent/workflows/prepare-stream/index.d.ts.map +1 -1
  48. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts +5 -7
  49. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts.map +1 -1
  50. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts +14 -18
  51. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts.map +1 -1
  52. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts +6 -6
  53. package/dist/agent/workflows/prepare-stream/prepare-tools-step.d.ts.map +1 -1
  54. package/dist/agent/workflows/prepare-stream/schema.d.ts +10 -12
  55. package/dist/agent/workflows/prepare-stream/schema.d.ts.map +1 -1
  56. package/dist/agent/workflows/prepare-stream/stream-step.d.ts +3 -7
  57. package/dist/agent/workflows/prepare-stream/stream-step.d.ts.map +1 -1
  58. package/dist/ai-sdk.types.d.ts +4705 -0
  59. package/dist/base.cjs +2 -2
  60. package/dist/base.d.ts +0 -16
  61. package/dist/base.d.ts.map +1 -1
  62. package/dist/base.js +1 -1
  63. package/dist/bundler/index.cjs +2 -2
  64. package/dist/bundler/index.d.ts +2 -1
  65. package/dist/bundler/index.d.ts.map +1 -1
  66. package/dist/bundler/index.js +1 -1
  67. package/dist/cache/index.cjs +3 -3
  68. package/dist/cache/index.js +1 -1
  69. package/dist/chunk-26SQQNMU.js +15754 -0
  70. package/dist/chunk-26SQQNMU.js.map +1 -0
  71. package/dist/{chunk-FHVFGVIO.js → chunk-32CTMD2C.js} +90 -32
  72. package/dist/chunk-32CTMD2C.js.map +1 -0
  73. package/dist/chunk-4CDL2QJT.js +649 -0
  74. package/dist/chunk-4CDL2QJT.js.map +1 -0
  75. package/dist/chunk-5WRI5ZAA.js +29 -0
  76. package/dist/{chunk-3HXBPDKN.js.map → chunk-5WRI5ZAA.js.map} +1 -1
  77. package/dist/{chunk-KAEQISOW.js → chunk-76K3IYWM.js} +5 -5
  78. package/dist/chunk-76K3IYWM.js.map +1 -0
  79. package/dist/chunk-7AHYOMHJ.js +149 -0
  80. package/dist/chunk-7AHYOMHJ.js.map +1 -0
  81. package/dist/{chunk-BWGXM3D4.js → chunk-7SKXKUYT.js} +712 -541
  82. package/dist/chunk-7SKXKUYT.js.map +1 -0
  83. package/dist/{chunk-UZKIGB7M.cjs → chunk-BMAFVZ2D.cjs} +5 -5
  84. package/dist/chunk-BMAFVZ2D.cjs.map +1 -0
  85. package/dist/chunk-BNBRQS7N.js +910 -0
  86. package/dist/chunk-BNBRQS7N.js.map +1 -0
  87. package/dist/{chunk-34ZCWSUA.js → chunk-BU4IAJWF.js} +6 -9
  88. package/dist/chunk-BU4IAJWF.js.map +1 -0
  89. package/dist/{chunk-ABZOBBLL.cjs → chunk-BXOL277H.cjs} +735 -562
  90. package/dist/chunk-BXOL277H.cjs.map +1 -0
  91. package/dist/{chunk-6TEQIYXV.cjs → chunk-CBAB7GOD.cjs} +15 -15
  92. package/dist/chunk-CBAB7GOD.cjs.map +1 -0
  93. package/dist/{chunk-A5KDVZDL.cjs → chunk-DSNPWVIG.cjs} +9 -9
  94. package/dist/chunk-DSNPWVIG.cjs.map +1 -0
  95. package/dist/chunk-DZUJEN5N.cjs +32 -0
  96. package/dist/{chunk-EBVYYC2Q.cjs.map → chunk-DZUJEN5N.cjs.map} +1 -1
  97. package/dist/chunk-E7K4FTLN.cjs +273 -0
  98. package/dist/chunk-E7K4FTLN.cjs.map +1 -0
  99. package/dist/{chunk-MJMID7LX.cjs → chunk-ECFXGXWO.cjs} +609 -644
  100. package/dist/chunk-ECFXGXWO.cjs.map +1 -0
  101. package/dist/chunk-ET6UOTTU.cjs +154 -0
  102. package/dist/chunk-ET6UOTTU.cjs.map +1 -0
  103. package/dist/chunk-FD734TPS.cjs +15818 -0
  104. package/dist/chunk-FD734TPS.cjs.map +1 -0
  105. package/dist/chunk-GGYKYORQ.cjs +2060 -0
  106. package/dist/chunk-GGYKYORQ.cjs.map +1 -0
  107. package/dist/{chunk-HLRWYUFN.js → chunk-GRBGQ2GE.js} +5 -5
  108. package/dist/{chunk-HLRWYUFN.js.map → chunk-GRBGQ2GE.js.map} +1 -1
  109. package/dist/chunk-IU2SZXJQ.cjs +913 -0
  110. package/dist/chunk-IU2SZXJQ.cjs.map +1 -0
  111. package/dist/{chunk-GPWMM745.cjs → chunk-J7O6WENZ.cjs} +5 -5
  112. package/dist/{chunk-GPWMM745.cjs.map → chunk-J7O6WENZ.cjs.map} +1 -1
  113. package/dist/{chunk-PZUZNPFM.js → chunk-JJ5O45LH.js} +3 -4
  114. package/dist/chunk-JJ5O45LH.js.map +1 -0
  115. package/dist/{chunk-E3PG7G6E.js → chunk-JV2KH24V.js} +599 -633
  116. package/dist/chunk-JV2KH24V.js.map +1 -0
  117. package/dist/chunk-KEXGB7FK.cjs +29 -0
  118. package/dist/chunk-KEXGB7FK.cjs.map +1 -0
  119. package/dist/chunk-KIZIOFZC.js +265 -0
  120. package/dist/chunk-KIZIOFZC.js.map +1 -0
  121. package/dist/{chunk-UXG7PYML.js → chunk-KJ2SW6VA.js} +9 -9
  122. package/dist/chunk-KJ2SW6VA.js.map +1 -0
  123. package/dist/{chunk-TTELJD4F.js → chunk-L54GIUCB.js} +2 -2
  124. package/dist/chunk-L54GIUCB.js.map +1 -0
  125. package/dist/chunk-L7XKOKOW.js +12385 -0
  126. package/dist/chunk-L7XKOKOW.js.map +1 -0
  127. package/dist/{chunk-TSNDVBUU.cjs → chunk-MR7ZWBL6.cjs} +2 -2
  128. package/dist/chunk-MR7ZWBL6.cjs.map +1 -0
  129. package/dist/chunk-MV7KHWUT.js +2031 -0
  130. package/dist/chunk-MV7KHWUT.js.map +1 -0
  131. package/dist/{chunk-3NTOFNIU.js → chunk-OJNJA5ZI.js} +3 -3
  132. package/dist/{chunk-3NTOFNIU.js.map → chunk-OJNJA5ZI.js.map} +1 -1
  133. package/dist/{chunk-3VXXCPKX.js → chunk-P6APHXPZ.js} +334 -410
  134. package/dist/chunk-P6APHXPZ.js.map +1 -0
  135. package/dist/{chunk-77JHIM4E.cjs → chunk-QCQLOMJM.cjs} +348 -424
  136. package/dist/chunk-QCQLOMJM.cjs.map +1 -0
  137. package/dist/chunk-QM5SRDJX.js +9022 -0
  138. package/dist/chunk-QM5SRDJX.js.map +1 -0
  139. package/dist/{chunk-WBAXXG34.cjs → chunk-QUKUN6NR.cjs} +32 -66
  140. package/dist/chunk-QUKUN6NR.cjs.map +1 -0
  141. package/dist/{chunk-ROS5CMJS.cjs → chunk-QUZGDSWE.cjs} +63 -50
  142. package/dist/chunk-QUZGDSWE.cjs.map +1 -0
  143. package/dist/chunk-S5MJLXMG.cjs +12424 -0
  144. package/dist/chunk-S5MJLXMG.cjs.map +1 -0
  145. package/dist/chunk-S6URFGCZ.js +27 -0
  146. package/dist/chunk-S6URFGCZ.js.map +1 -0
  147. package/dist/chunk-SVLMF4UZ.cjs +9033 -0
  148. package/dist/chunk-SVLMF4UZ.cjs.map +1 -0
  149. package/dist/{chunk-ZMELUU72.js → chunk-TQTAMPSC.js} +3 -3
  150. package/dist/chunk-TQTAMPSC.js.map +1 -0
  151. package/dist/{chunk-5NTO7S5I.cjs → chunk-TWH4PTDG.cjs} +2 -4
  152. package/dist/chunk-TWH4PTDG.cjs.map +1 -0
  153. package/dist/{chunk-WCHE6FJ7.js → chunk-UIZSWUKP.js} +15 -50
  154. package/dist/chunk-UIZSWUKP.js.map +1 -0
  155. package/dist/{chunk-Z4RIRDU3.js → chunk-VJUZZB2I.js} +45 -32
  156. package/dist/chunk-VJUZZB2I.js.map +1 -0
  157. package/dist/{chunk-MCASUJWY.cjs → chunk-VOY2RXOC.cjs} +36 -59
  158. package/dist/chunk-VOY2RXOC.cjs.map +1 -0
  159. package/dist/{chunk-3JX2Y3WH.cjs → chunk-VV753WCB.cjs} +18 -21
  160. package/dist/chunk-VV753WCB.cjs.map +1 -0
  161. package/dist/chunk-WM6CK2F3.cjs +674 -0
  162. package/dist/chunk-WM6CK2F3.cjs.map +1 -0
  163. package/dist/{chunk-PFXXH2RP.js → chunk-X7JMA3IY.js} +15 -15
  164. package/dist/chunk-X7JMA3IY.js.map +1 -0
  165. package/dist/{chunk-NR77P3TK.js → chunk-XEVG546F.js} +14 -37
  166. package/dist/chunk-XEVG546F.js.map +1 -0
  167. package/dist/{chunk-QFF5JUKT.cjs → chunk-Y63IFHEZ.cjs} +4 -4
  168. package/dist/chunk-Y63IFHEZ.cjs.map +1 -0
  169. package/dist/{chunk-B7V6NYWH.cjs → chunk-Y6ROD72V.cjs} +4 -4
  170. package/dist/{chunk-B7V6NYWH.cjs.map → chunk-Y6ROD72V.cjs.map} +1 -1
  171. package/dist/{chunk-7J3XX4AO.cjs → chunk-YIK3ASEG.cjs} +100 -42
  172. package/dist/chunk-YIK3ASEG.cjs.map +1 -0
  173. package/dist/deployer/index.cjs +8 -6
  174. package/dist/deployer/index.cjs.map +1 -1
  175. package/dist/deployer/index.js +10 -1
  176. package/dist/deployer/index.js.map +1 -1
  177. package/dist/di/index.cjs +3 -3
  178. package/dist/di/index.d.ts +1 -1
  179. package/dist/di/index.js +1 -1
  180. package/dist/error/index.cjs +6 -6
  181. package/dist/error/index.d.ts +0 -1
  182. package/dist/error/index.d.ts.map +1 -1
  183. package/dist/error/index.js +1 -1
  184. package/dist/{scores → evals}/base.d.ts +20 -18
  185. package/dist/evals/base.d.ts.map +1 -0
  186. package/dist/{scores → evals}/base.test-utils.d.ts +25 -25
  187. package/dist/evals/base.test-utils.d.ts.map +1 -0
  188. package/dist/{scores → evals}/hooks.d.ts +3 -3
  189. package/dist/evals/hooks.d.ts.map +1 -0
  190. package/dist/evals/index.cjs +37 -0
  191. package/dist/evals/index.d.ts +4 -0
  192. package/dist/evals/index.d.ts.map +1 -0
  193. package/dist/evals/index.js +4 -0
  194. package/dist/{scores/run-experiment → evals/run}/index.d.ts +18 -18
  195. package/dist/evals/run/index.d.ts.map +1 -0
  196. package/dist/evals/run/scorerAccumulator.d.ts.map +1 -0
  197. package/dist/{scores → evals}/scoreTraces/index.cjs +79 -99
  198. package/dist/evals/scoreTraces/index.cjs.map +1 -0
  199. package/dist/evals/scoreTraces/index.d.ts.map +1 -0
  200. package/dist/{scores → evals}/scoreTraces/index.js +72 -92
  201. package/dist/evals/scoreTraces/index.js.map +1 -0
  202. package/dist/{scores → evals}/scoreTraces/scoreTraces.d.ts +2 -2
  203. package/dist/evals/scoreTraces/scoreTraces.d.ts.map +1 -0
  204. package/dist/{scores → evals}/scoreTraces/scoreTracesWorkflow.d.ts +11 -11
  205. package/dist/evals/scoreTraces/scoreTracesWorkflow.d.ts.map +1 -0
  206. package/dist/{scores → evals}/scoreTraces/utils.d.ts +7 -7
  207. package/dist/evals/scoreTraces/utils.d.ts.map +1 -0
  208. package/dist/{scores → evals}/types.d.ts +22 -22
  209. package/dist/evals/types.d.ts.map +1 -0
  210. package/dist/features/index.cjs +8 -0
  211. package/dist/features/index.cjs.map +1 -0
  212. package/dist/features/index.d.ts +17 -0
  213. package/dist/features/index.d.ts.map +1 -0
  214. package/dist/features/index.js +6 -0
  215. package/dist/features/index.js.map +1 -0
  216. package/dist/hooks/index.cjs +4 -4
  217. package/dist/hooks/index.d.ts +1 -27
  218. package/dist/hooks/index.d.ts.map +1 -1
  219. package/dist/hooks/index.js +1 -1
  220. package/dist/index.cjs +2 -340
  221. package/dist/index.cjs.map +1 -1
  222. package/dist/index.d.ts +1 -18
  223. package/dist/index.d.ts.map +1 -1
  224. package/dist/index.js +1 -113
  225. package/dist/index.js.map +1 -1
  226. package/dist/integration/index.cjs +87 -10
  227. package/dist/integration/index.cjs.map +1 -1
  228. package/dist/integration/index.js +88 -1
  229. package/dist/integration/index.js.map +1 -1
  230. package/dist/integration/integration.d.ts +3 -3
  231. package/dist/integration/integration.d.ts.map +1 -1
  232. package/dist/llm/index.cjs +7 -7
  233. package/dist/llm/index.d.ts +5 -6
  234. package/dist/llm/index.d.ts.map +1 -1
  235. package/dist/llm/index.js +1 -1
  236. package/dist/llm/model/base.types.d.ts +6 -7
  237. package/dist/llm/model/base.types.d.ts.map +1 -1
  238. package/dist/llm/model/model.d.ts +7 -7
  239. package/dist/llm/model/model.d.ts.map +1 -1
  240. package/dist/llm/model/model.loop.d.ts +1 -1
  241. package/dist/llm/model/model.loop.d.ts.map +1 -1
  242. package/dist/llm/model/model.loop.types.d.ts +3 -3
  243. package/dist/llm/model/model.loop.types.d.ts.map +1 -1
  244. package/dist/llm/model/provider-types.generated.d.ts +32 -7
  245. package/dist/llm/model/resolve-model.d.ts +8 -8
  246. package/dist/llm/model/shared.types.d.ts +2 -2
  247. package/dist/llm/model/shared.types.d.ts.map +1 -1
  248. package/dist/logger/constants.d.ts +1 -1
  249. package/dist/logger/default-logger.d.ts +2 -2
  250. package/dist/logger/default-logger.d.ts.map +1 -1
  251. package/dist/logger/index.cjs +11 -11
  252. package/dist/logger/index.js +2 -2
  253. package/dist/logger/logger.d.ts +4 -4
  254. package/dist/logger/logger.d.ts.map +1 -1
  255. package/dist/logger/multi-logger.d.ts +2 -2
  256. package/dist/logger/multi-logger.d.ts.map +1 -1
  257. package/dist/logger/transport.d.ts +3 -3
  258. package/dist/logger/transport.d.ts.map +1 -1
  259. package/dist/loop/index.cjs +2 -2
  260. package/dist/loop/index.js +1 -1
  261. package/dist/loop/loop.d.ts +1 -1
  262. package/dist/loop/loop.d.ts.map +1 -1
  263. package/dist/loop/network/index.d.ts +15 -15
  264. package/dist/loop/network/index.d.ts.map +1 -1
  265. package/dist/loop/test-utils/generateText.d.ts.map +1 -1
  266. package/dist/loop/test-utils/options.d.ts.map +1 -1
  267. package/dist/loop/test-utils/resultObject.d.ts.map +1 -1
  268. package/dist/loop/test-utils/streamObject.d.ts.map +1 -1
  269. package/dist/loop/types.d.ts +4 -7
  270. package/dist/loop/types.d.ts.map +1 -1
  271. package/dist/loop/workflows/agentic-execution/index.d.ts +55 -55
  272. package/dist/loop/workflows/agentic-execution/index.d.ts.map +1 -1
  273. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts +38 -38
  274. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
  275. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts +20 -20
  276. package/dist/loop/workflows/agentic-execution/llm-mapping-step.d.ts.map +1 -1
  277. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts +2 -2
  278. package/dist/loop/workflows/agentic-execution/tool-call-step.d.ts.map +1 -1
  279. package/dist/loop/workflows/agentic-loop/index.d.ts +55 -55
  280. package/dist/loop/workflows/agentic-loop/index.d.ts.map +1 -1
  281. package/dist/loop/workflows/schema.d.ts +19 -20
  282. package/dist/loop/workflows/schema.d.ts.map +1 -1
  283. package/dist/loop/workflows/stream.d.ts +1 -1
  284. package/dist/loop/workflows/stream.d.ts.map +1 -1
  285. package/dist/mastra/hooks.d.ts +2 -2
  286. package/dist/mastra/hooks.d.ts.map +1 -1
  287. package/dist/mastra/index.cjs +2 -2
  288. package/dist/mastra/index.d.ts +389 -180
  289. package/dist/mastra/index.d.ts.map +1 -1
  290. package/dist/mastra/index.js +1 -1
  291. package/dist/mcp/index.cjs +41 -4
  292. package/dist/mcp/index.cjs.map +1 -1
  293. package/dist/mcp/index.d.ts +6 -4
  294. package/dist/mcp/index.d.ts.map +1 -1
  295. package/dist/mcp/index.js +39 -2
  296. package/dist/mcp/index.js.map +1 -1
  297. package/dist/mcp/types.d.ts +0 -15
  298. package/dist/mcp/types.d.ts.map +1 -1
  299. package/dist/memory/index.cjs +327 -13
  300. package/dist/memory/index.cjs.map +1 -1
  301. package/dist/memory/index.d.ts +1 -0
  302. package/dist/memory/index.d.ts.map +1 -1
  303. package/dist/memory/index.js +327 -1
  304. package/dist/memory/index.js.map +1 -1
  305. package/dist/memory/memory.d.ts +33 -57
  306. package/dist/memory/memory.d.ts.map +1 -1
  307. package/dist/memory/mock.d.ts +61 -0
  308. package/dist/memory/mock.d.ts.map +1 -0
  309. package/dist/memory/types.d.ts +48 -45
  310. package/dist/memory/types.d.ts.map +1 -1
  311. package/dist/models-dev-7U4NRMM3.js +3 -0
  312. package/dist/{models-dev-4VGIWYS3.js.map → models-dev-7U4NRMM3.js.map} +1 -1
  313. package/dist/models-dev-VKSAQPRK.cjs +12 -0
  314. package/dist/{models-dev-AXZASLL2.cjs.map → models-dev-VKSAQPRK.cjs.map} +1 -1
  315. package/dist/netlify-2IDXTNFW.cjs +12 -0
  316. package/dist/{netlify-TX6V7SJJ.cjs.map → netlify-2IDXTNFW.cjs.map} +1 -1
  317. package/dist/netlify-42ZNWIDQ.js +3 -0
  318. package/dist/{netlify-VJXBII33.js.map → netlify-42ZNWIDQ.js.map} +1 -1
  319. package/dist/{ai-tracing → observability}/context.d.ts +5 -5
  320. package/dist/observability/context.d.ts.map +1 -0
  321. package/dist/observability/index.cjs +36 -0
  322. package/dist/observability/index.d.ts +11 -0
  323. package/dist/observability/index.d.ts.map +1 -0
  324. package/dist/observability/index.js +3 -0
  325. package/dist/observability/no-op.d.ts +22 -0
  326. package/dist/observability/no-op.d.ts.map +1 -0
  327. package/dist/observability/types/index.d.ts +2 -0
  328. package/dist/observability/types/index.d.ts.map +1 -0
  329. package/dist/{ai-tracing/types.d.ts → observability/types/tracing.d.ts} +161 -107
  330. package/dist/observability/types/tracing.d.ts.map +1 -0
  331. package/dist/observability/utils.d.ts +12 -0
  332. package/dist/observability/utils.d.ts.map +1 -0
  333. package/dist/processors/index.cjs +11 -11
  334. package/dist/processors/index.d.ts +10 -9
  335. package/dist/processors/index.d.ts.map +1 -1
  336. package/dist/processors/index.js +1 -1
  337. package/dist/processors/processors/batch-parts.d.ts +2 -1
  338. package/dist/processors/processors/batch-parts.d.ts.map +1 -1
  339. package/dist/processors/processors/language-detector.d.ts +6 -5
  340. package/dist/processors/processors/language-detector.d.ts.map +1 -1
  341. package/dist/processors/processors/moderation.d.ts +8 -7
  342. package/dist/processors/processors/moderation.d.ts.map +1 -1
  343. package/dist/processors/processors/pii-detector.d.ts +8 -7
  344. package/dist/processors/processors/pii-detector.d.ts.map +1 -1
  345. package/dist/processors/processors/prompt-injection-detector.d.ts +6 -5
  346. package/dist/processors/processors/prompt-injection-detector.d.ts.map +1 -1
  347. package/dist/processors/processors/structured-output.d.ts +3 -2
  348. package/dist/processors/processors/structured-output.d.ts.map +1 -1
  349. package/dist/processors/processors/system-prompt-scrubber.d.ts +6 -5
  350. package/dist/processors/processors/system-prompt-scrubber.d.ts.map +1 -1
  351. package/dist/processors/processors/token-limiter.d.ts +5 -4
  352. package/dist/processors/processors/token-limiter.d.ts.map +1 -1
  353. package/dist/processors/processors/unicode-normalizer.d.ts +5 -4
  354. package/dist/processors/processors/unicode-normalizer.d.ts.map +1 -1
  355. package/dist/processors/runner.d.ts +5 -5
  356. package/dist/processors/runner.d.ts.map +1 -1
  357. package/dist/provider-registry.json +76 -18
  358. package/dist/{registry-generator-DXRSYYYT.js → registry-generator-H4YNODDH.js} +2 -2
  359. package/dist/{registry-generator-DXRSYYYT.js.map → registry-generator-H4YNODDH.js.map} +1 -1
  360. package/dist/{registry-generator-6WVOHM2L.cjs → registry-generator-MK63POJO.cjs} +2 -2
  361. package/dist/{registry-generator-6WVOHM2L.cjs.map → registry-generator-MK63POJO.cjs.map} +1 -1
  362. package/dist/relevance/index.cjs +47 -13
  363. package/dist/relevance/index.cjs.map +1 -1
  364. package/dist/relevance/index.d.ts +0 -1
  365. package/dist/relevance/index.d.ts.map +1 -1
  366. package/dist/relevance/index.js +49 -1
  367. package/dist/relevance/index.js.map +1 -1
  368. package/dist/relevance/mastra-agent/index.d.ts.map +1 -1
  369. package/dist/request-context/index.cjs +12 -0
  370. package/dist/{runtime-context → request-context}/index.d.ts +1 -1
  371. package/dist/{runtime-context → request-context}/index.d.ts.map +1 -1
  372. package/dist/request-context/index.js +3 -0
  373. package/dist/server/auth.d.ts.map +1 -1
  374. package/dist/server/index.cjs +9 -25
  375. package/dist/server/index.cjs.map +1 -1
  376. package/dist/server/index.js +4 -21
  377. package/dist/server/index.js.map +1 -1
  378. package/dist/server/types.d.ts +3 -3
  379. package/dist/server/types.d.ts.map +1 -1
  380. package/dist/storage/base.d.ts +75 -143
  381. package/dist/storage/base.d.ts.map +1 -1
  382. package/dist/storage/constants.d.ts +3 -4
  383. package/dist/storage/constants.d.ts.map +1 -1
  384. package/dist/storage/domains/index.d.ts +0 -2
  385. package/dist/storage/domains/index.d.ts.map +1 -1
  386. package/dist/storage/domains/memory/base.d.ts +19 -56
  387. package/dist/storage/domains/memory/base.d.ts.map +1 -1
  388. package/dist/storage/domains/memory/inmemory.d.ts +15 -35
  389. package/dist/storage/domains/memory/inmemory.d.ts.map +1 -1
  390. package/dist/storage/domains/observability/base.d.ts +24 -24
  391. package/dist/storage/domains/observability/base.d.ts.map +1 -1
  392. package/dist/storage/domains/observability/inmemory.d.ts +18 -18
  393. package/dist/storage/domains/observability/inmemory.d.ts.map +1 -1
  394. package/dist/storage/domains/operations/inmemory.d.ts.map +1 -1
  395. package/dist/storage/domains/scores/base.d.ts +5 -5
  396. package/dist/storage/domains/scores/base.d.ts.map +1 -1
  397. package/dist/storage/domains/scores/inmemory.d.ts +5 -5
  398. package/dist/storage/domains/scores/inmemory.d.ts.map +1 -1
  399. package/dist/storage/domains/workflows/base.d.ts +4 -11
  400. package/dist/storage/domains/workflows/base.d.ts.map +1 -1
  401. package/dist/storage/domains/workflows/inmemory.d.ts +4 -11
  402. package/dist/storage/domains/workflows/inmemory.d.ts.map +1 -1
  403. package/dist/storage/index.cjs +74 -1775
  404. package/dist/storage/index.cjs.map +1 -1
  405. package/dist/storage/index.js +1 -1744
  406. package/dist/storage/index.js.map +1 -1
  407. package/dist/storage/mock.d.ts +35 -92
  408. package/dist/storage/mock.d.ts.map +1 -1
  409. package/dist/storage/types.d.ts +77 -75
  410. package/dist/storage/types.d.ts.map +1 -1
  411. package/dist/stream/MastraAgentNetworkStream.d.ts +4 -2
  412. package/dist/stream/MastraAgentNetworkStream.d.ts.map +1 -1
  413. package/dist/stream/MastraWorkflowStream.d.ts +1 -1
  414. package/dist/stream/MastraWorkflowStream.d.ts.map +1 -1
  415. package/dist/stream/RunOutput.d.ts +1 -0
  416. package/dist/stream/RunOutput.d.ts.map +1 -1
  417. package/dist/stream/aisdk/v4/input.d.ts +1 -1
  418. package/dist/stream/aisdk/v4/input.d.ts.map +1 -1
  419. package/dist/stream/aisdk/v5/execute.d.ts +4 -11
  420. package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
  421. package/dist/stream/aisdk/v5/output.d.ts +1 -1
  422. package/dist/stream/aisdk/v5/output.d.ts.map +1 -1
  423. package/dist/stream/base/output.d.ts +1 -1
  424. package/dist/stream/base/output.d.ts.map +1 -1
  425. package/dist/stream/index.cjs +11 -11
  426. package/dist/stream/index.d.ts +1 -1
  427. package/dist/stream/index.d.ts.map +1 -1
  428. package/dist/stream/index.js +2 -2
  429. package/dist/stream/types.d.ts +2 -6
  430. package/dist/stream/types.d.ts.map +1 -1
  431. package/dist/test-utils/llm-mock.cjs +8 -8
  432. package/dist/test-utils/llm-mock.cjs.map +1 -1
  433. package/dist/test-utils/llm-mock.d.ts +1 -1
  434. package/dist/test-utils/llm-mock.d.ts.map +1 -1
  435. package/dist/test-utils/llm-mock.js +3 -3
  436. package/dist/test-utils/llm-mock.js.map +1 -1
  437. package/dist/tools/index.cjs +4 -4
  438. package/dist/tools/index.js +1 -1
  439. package/dist/tools/is-vercel-tool.cjs +2 -2
  440. package/dist/tools/is-vercel-tool.js +1 -1
  441. package/dist/tools/tool-builder/builder.d.ts +3 -71
  442. package/dist/tools/tool-builder/builder.d.ts.map +1 -1
  443. package/dist/tools/tool.d.ts +25 -25
  444. package/dist/tools/tool.d.ts.map +1 -1
  445. package/dist/tools/types.d.ts +102 -11
  446. package/dist/tools/types.d.ts.map +1 -1
  447. package/dist/tools/validation.d.ts +4 -3
  448. package/dist/tools/validation.d.ts.map +1 -1
  449. package/dist/tts/index.cjs +12 -6
  450. package/dist/tts/index.cjs.map +1 -1
  451. package/dist/tts/index.d.ts +0 -1
  452. package/dist/tts/index.d.ts.map +1 -1
  453. package/dist/tts/index.js +14 -1
  454. package/dist/tts/index.js.map +1 -1
  455. package/dist/types/dynamic-argument.d.ts +3 -3
  456. package/dist/utils/fetchWithRetry.d.ts +9 -0
  457. package/dist/utils/fetchWithRetry.d.ts.map +1 -0
  458. package/dist/utils.cjs +33 -17
  459. package/dist/utils.d.ts +39 -6
  460. package/dist/utils.d.ts.map +1 -1
  461. package/dist/utils.js +1 -1
  462. package/dist/vector/embed.d.ts +1 -1
  463. package/dist/vector/embed.d.ts.map +1 -1
  464. package/dist/vector/index.cjs +80 -8
  465. package/dist/vector/index.cjs.map +1 -1
  466. package/dist/vector/index.js +80 -1
  467. package/dist/vector/index.js.map +1 -1
  468. package/dist/vector/vector.d.ts +5 -2
  469. package/dist/vector/vector.d.ts.map +1 -1
  470. package/dist/voice/composite-voice.d.ts +1 -7
  471. package/dist/voice/composite-voice.d.ts.map +1 -1
  472. package/dist/voice/index.cjs +4 -4
  473. package/dist/voice/index.js +1 -1
  474. package/dist/voice/voice.d.ts +0 -1
  475. package/dist/voice/voice.d.ts.map +1 -1
  476. package/dist/workflows/default.d.ts +35 -43
  477. package/dist/workflows/default.d.ts.map +1 -1
  478. package/dist/workflows/evented/execution-engine.d.ts +5 -3
  479. package/dist/workflows/evented/execution-engine.d.ts.map +1 -1
  480. package/dist/workflows/evented/index.cjs +10 -10
  481. package/dist/workflows/evented/index.js +1 -1
  482. package/dist/workflows/evented/step-executor.d.ts +15 -13
  483. package/dist/workflows/evented/step-executor.d.ts.map +1 -1
  484. package/dist/workflows/evented/workflow-event-processor/index.d.ts +5 -5
  485. package/dist/workflows/evented/workflow-event-processor/index.d.ts.map +1 -1
  486. package/dist/workflows/evented/workflow-event-processor/loop.d.ts +3 -3
  487. package/dist/workflows/evented/workflow-event-processor/loop.d.ts.map +1 -1
  488. package/dist/workflows/evented/workflow-event-processor/parallel.d.ts +2 -2
  489. package/dist/workflows/evented/workflow-event-processor/sleep.d.ts +2 -2
  490. package/dist/workflows/evented/workflow-event-processor/utils.d.ts +2 -7
  491. package/dist/workflows/evented/workflow-event-processor/utils.d.ts.map +1 -1
  492. package/dist/workflows/evented/workflow.d.ts +11 -14
  493. package/dist/workflows/evented/workflow.d.ts.map +1 -1
  494. package/dist/workflows/execution-engine.d.ts +6 -6
  495. package/dist/workflows/execution-engine.d.ts.map +1 -1
  496. package/dist/workflows/index.cjs +22 -14
  497. package/dist/workflows/index.js +1 -1
  498. package/dist/workflows/step.d.ts +5 -5
  499. package/dist/workflows/step.d.ts.map +1 -1
  500. package/dist/workflows/types.d.ts +20 -44
  501. package/dist/workflows/types.d.ts.map +1 -1
  502. package/dist/workflows/utils.d.ts +14 -0
  503. package/dist/workflows/utils.d.ts.map +1 -1
  504. package/dist/workflows/workflow.d.ts +74 -64
  505. package/dist/workflows/workflow.d.ts.map +1 -1
  506. package/evals/scoreTraces.d.ts +1 -0
  507. package/evals.d.ts +1 -0
  508. package/features.d.ts +1 -0
  509. package/observability.d.ts +1 -0
  510. package/package.json +48 -64
  511. package/request-context.d.ts +1 -0
  512. package/src/_types/ai-sdk.types.d.ts +4705 -0
  513. package/src/llm/model/provider-types.generated.d.ts +32 -7
  514. package/agent/input-processor/processors.d.ts +0 -1
  515. package/ai-tracing.d.ts +0 -1
  516. package/dist/agent/index.warning.d.ts +0 -13
  517. package/dist/agent/index.warning.d.ts.map +0 -1
  518. package/dist/agent/input-processor/index.cjs +0 -28
  519. package/dist/agent/input-processor/index.d.ts +0 -10
  520. package/dist/agent/input-processor/index.d.ts.map +0 -1
  521. package/dist/agent/input-processor/index.js +0 -3
  522. package/dist/agent/input-processor/processors/index.d.ts +0 -6
  523. package/dist/agent/input-processor/processors/index.d.ts.map +0 -1
  524. package/dist/agent/input-processor/processors/language-detector.d.ts +0 -18
  525. package/dist/agent/input-processor/processors/language-detector.d.ts.map +0 -1
  526. package/dist/agent/input-processor/processors/moderation.d.ts +0 -18
  527. package/dist/agent/input-processor/processors/moderation.d.ts.map +0 -1
  528. package/dist/agent/input-processor/processors/pii-detector.d.ts +0 -18
  529. package/dist/agent/input-processor/processors/pii-detector.d.ts.map +0 -1
  530. package/dist/agent/input-processor/processors/prompt-injection-detector.d.ts +0 -18
  531. package/dist/agent/input-processor/processors/prompt-injection-detector.d.ts.map +0 -1
  532. package/dist/agent/input-processor/processors/unicode-normalizer.d.ts +0 -18
  533. package/dist/agent/input-processor/processors/unicode-normalizer.d.ts.map +0 -1
  534. package/dist/ai-tracing/context.d.ts.map +0 -1
  535. package/dist/ai-tracing/exporters/base.d.ts +0 -111
  536. package/dist/ai-tracing/exporters/base.d.ts.map +0 -1
  537. package/dist/ai-tracing/exporters/cloud.d.ts +0 -30
  538. package/dist/ai-tracing/exporters/cloud.d.ts.map +0 -1
  539. package/dist/ai-tracing/exporters/console.d.ts +0 -10
  540. package/dist/ai-tracing/exporters/console.d.ts.map +0 -1
  541. package/dist/ai-tracing/exporters/default.d.ts +0 -98
  542. package/dist/ai-tracing/exporters/default.d.ts.map +0 -1
  543. package/dist/ai-tracing/exporters/index.d.ts +0 -9
  544. package/dist/ai-tracing/exporters/index.d.ts.map +0 -1
  545. package/dist/ai-tracing/index.cjs +0 -148
  546. package/dist/ai-tracing/index.d.ts +0 -13
  547. package/dist/ai-tracing/index.d.ts.map +0 -1
  548. package/dist/ai-tracing/index.js +0 -3
  549. package/dist/ai-tracing/model-tracing.d.ts +0 -63
  550. package/dist/ai-tracing/model-tracing.d.ts.map +0 -1
  551. package/dist/ai-tracing/registry.d.ts +0 -51
  552. package/dist/ai-tracing/registry.d.ts.map +0 -1
  553. package/dist/ai-tracing/span_processors/index.d.ts +0 -5
  554. package/dist/ai-tracing/span_processors/index.d.ts.map +0 -1
  555. package/dist/ai-tracing/span_processors/sensitive-data-filter.d.ts +0 -85
  556. package/dist/ai-tracing/span_processors/sensitive-data-filter.d.ts.map +0 -1
  557. package/dist/ai-tracing/spans/base.d.ts +0 -65
  558. package/dist/ai-tracing/spans/base.d.ts.map +0 -1
  559. package/dist/ai-tracing/spans/default.d.ts +0 -13
  560. package/dist/ai-tracing/spans/default.d.ts.map +0 -1
  561. package/dist/ai-tracing/spans/index.d.ts +0 -7
  562. package/dist/ai-tracing/spans/index.d.ts.map +0 -1
  563. package/dist/ai-tracing/spans/no-op.d.ts +0 -15
  564. package/dist/ai-tracing/spans/no-op.d.ts.map +0 -1
  565. package/dist/ai-tracing/tracers/base.d.ts +0 -105
  566. package/dist/ai-tracing/tracers/base.d.ts.map +0 -1
  567. package/dist/ai-tracing/tracers/default.d.ts +0 -7
  568. package/dist/ai-tracing/tracers/default.d.ts.map +0 -1
  569. package/dist/ai-tracing/tracers/index.d.ts +0 -6
  570. package/dist/ai-tracing/tracers/index.d.ts.map +0 -1
  571. package/dist/ai-tracing/types.d.ts.map +0 -1
  572. package/dist/ai-tracing/utils.d.ts +0 -68
  573. package/dist/ai-tracing/utils.d.ts.map +0 -1
  574. package/dist/base.warning.d.ts +0 -9
  575. package/dist/base.warning.d.ts.map +0 -1
  576. package/dist/bundler/index.warning.d.ts +0 -9
  577. package/dist/bundler/index.warning.d.ts.map +0 -1
  578. package/dist/chunk-253FBVD4.cjs +0 -1303
  579. package/dist/chunk-253FBVD4.cjs.map +0 -1
  580. package/dist/chunk-2B3VLGTH.js +0 -87
  581. package/dist/chunk-2B3VLGTH.js.map +0 -1
  582. package/dist/chunk-34ZCWSUA.js.map +0 -1
  583. package/dist/chunk-3HXBPDKN.js +0 -105
  584. package/dist/chunk-3JX2Y3WH.cjs.map +0 -1
  585. package/dist/chunk-3VXXCPKX.js.map +0 -1
  586. package/dist/chunk-4R2TBRS7.cjs +0 -191
  587. package/dist/chunk-4R2TBRS7.cjs.map +0 -1
  588. package/dist/chunk-5NTO7S5I.cjs.map +0 -1
  589. package/dist/chunk-6C7VGVK4.js +0 -267
  590. package/dist/chunk-6C7VGVK4.js.map +0 -1
  591. package/dist/chunk-6KOL2B3A.cjs +0 -75
  592. package/dist/chunk-6KOL2B3A.cjs.map +0 -1
  593. package/dist/chunk-6TEQIYXV.cjs.map +0 -1
  594. package/dist/chunk-6VOPKVYH.cjs +0 -57
  595. package/dist/chunk-6VOPKVYH.cjs.map +0 -1
  596. package/dist/chunk-77JHIM4E.cjs.map +0 -1
  597. package/dist/chunk-7J3XX4AO.cjs.map +0 -1
  598. package/dist/chunk-A5KDVZDL.cjs.map +0 -1
  599. package/dist/chunk-ABZOBBLL.cjs.map +0 -1
  600. package/dist/chunk-AQGLVU53.cjs +0 -603
  601. package/dist/chunk-AQGLVU53.cjs.map +0 -1
  602. package/dist/chunk-BLUDYAPI.js +0 -596
  603. package/dist/chunk-BLUDYAPI.js.map +0 -1
  604. package/dist/chunk-BWGXM3D4.js.map +0 -1
  605. package/dist/chunk-C7OVZMPW.cjs +0 -91
  606. package/dist/chunk-C7OVZMPW.cjs.map +0 -1
  607. package/dist/chunk-DMBN72QF.cjs +0 -39
  608. package/dist/chunk-DMBN72QF.cjs.map +0 -1
  609. package/dist/chunk-DTTOVV72.js +0 -2880
  610. package/dist/chunk-DTTOVV72.js.map +0 -1
  611. package/dist/chunk-E3PG7G6E.js.map +0 -1
  612. package/dist/chunk-EBVYYC2Q.cjs +0 -111
  613. package/dist/chunk-FHVFGVIO.js.map +0 -1
  614. package/dist/chunk-GB5BIA4V.js +0 -167
  615. package/dist/chunk-GB5BIA4V.js.map +0 -1
  616. package/dist/chunk-HEQTUMUA.cjs +0 -271
  617. package/dist/chunk-HEQTUMUA.cjs.map +0 -1
  618. package/dist/chunk-HF3GZRFP.cjs +0 -9
  619. package/dist/chunk-HF3GZRFP.cjs.map +0 -1
  620. package/dist/chunk-JVV5LREI.js +0 -431
  621. package/dist/chunk-JVV5LREI.js.map +0 -1
  622. package/dist/chunk-KAEQISOW.js.map +0 -1
  623. package/dist/chunk-KDX3ZMQ2.js +0 -12
  624. package/dist/chunk-KDX3ZMQ2.js.map +0 -1
  625. package/dist/chunk-LG5B3KIW.js +0 -1215
  626. package/dist/chunk-LG5B3KIW.js.map +0 -1
  627. package/dist/chunk-LZUSZT7R.cjs +0 -170
  628. package/dist/chunk-LZUSZT7R.cjs.map +0 -1
  629. package/dist/chunk-MCASUJWY.cjs.map +0 -1
  630. package/dist/chunk-MJMID7LX.cjs.map +0 -1
  631. package/dist/chunk-MKCC5K77.js +0 -90
  632. package/dist/chunk-MKCC5K77.js.map +0 -1
  633. package/dist/chunk-NPNGPMT2.js +0 -39
  634. package/dist/chunk-NPNGPMT2.js.map +0 -1
  635. package/dist/chunk-NR77P3TK.js.map +0 -1
  636. package/dist/chunk-NUAURT4I.cjs +0 -82
  637. package/dist/chunk-NUAURT4I.cjs.map +0 -1
  638. package/dist/chunk-OIT3PCWA.js +0 -72
  639. package/dist/chunk-OIT3PCWA.js.map +0 -1
  640. package/dist/chunk-P35FNLTQ.cjs +0 -2936
  641. package/dist/chunk-P35FNLTQ.cjs.map +0 -1
  642. package/dist/chunk-PFXXH2RP.js.map +0 -1
  643. package/dist/chunk-PZUZNPFM.js.map +0 -1
  644. package/dist/chunk-QFF5JUKT.cjs.map +0 -1
  645. package/dist/chunk-RKXWLG33.js +0 -7
  646. package/dist/chunk-RKXWLG33.js.map +0 -1
  647. package/dist/chunk-ROS5CMJS.cjs.map +0 -1
  648. package/dist/chunk-SJMKDSRF.js +0 -179
  649. package/dist/chunk-SJMKDSRF.js.map +0 -1
  650. package/dist/chunk-SSULK22X.cjs +0 -14
  651. package/dist/chunk-SSULK22X.cjs.map +0 -1
  652. package/dist/chunk-TSNDVBUU.cjs.map +0 -1
  653. package/dist/chunk-TTELJD4F.js.map +0 -1
  654. package/dist/chunk-TX4TTPYJ.cjs +0 -436
  655. package/dist/chunk-TX4TTPYJ.cjs.map +0 -1
  656. package/dist/chunk-UXG7PYML.js.map +0 -1
  657. package/dist/chunk-UZKIGB7M.cjs.map +0 -1
  658. package/dist/chunk-VF676YCO.cjs +0 -150
  659. package/dist/chunk-VF676YCO.cjs.map +0 -1
  660. package/dist/chunk-VQASQG5D.js +0 -55
  661. package/dist/chunk-VQASQG5D.js.map +0 -1
  662. package/dist/chunk-WBAXXG34.cjs.map +0 -1
  663. package/dist/chunk-WCHE6FJ7.js.map +0 -1
  664. package/dist/chunk-WM4RO23J.js +0 -145
  665. package/dist/chunk-WM4RO23J.js.map +0 -1
  666. package/dist/chunk-Z4RIRDU3.js.map +0 -1
  667. package/dist/chunk-ZABG3SZ2.cjs +0 -93
  668. package/dist/chunk-ZABG3SZ2.cjs.map +0 -1
  669. package/dist/chunk-ZISECZZO.js +0 -72
  670. package/dist/chunk-ZISECZZO.js.map +0 -1
  671. package/dist/chunk-ZMELUU72.js.map +0 -1
  672. package/dist/deployer/index.warning.d.ts +0 -9
  673. package/dist/deployer/index.warning.d.ts.map +0 -1
  674. package/dist/eval/evaluation.d.ts +0 -14
  675. package/dist/eval/evaluation.d.ts.map +0 -1
  676. package/dist/eval/index.cjs +0 -16
  677. package/dist/eval/index.d.ts +0 -4
  678. package/dist/eval/index.d.ts.map +0 -1
  679. package/dist/eval/index.js +0 -3
  680. package/dist/eval/metric.d.ts +0 -8
  681. package/dist/eval/metric.d.ts.map +0 -1
  682. package/dist/eval/types.d.ts +0 -9
  683. package/dist/eval/types.d.ts.map +0 -1
  684. package/dist/integration/index.warning.d.ts +0 -3
  685. package/dist/integration/index.warning.d.ts.map +0 -1
  686. package/dist/integration/integration.warning.d.ts +0 -5
  687. package/dist/integration/integration.warning.d.ts.map +0 -1
  688. package/dist/integration/openapi-toolset.warning.d.ts +0 -5
  689. package/dist/integration/openapi-toolset.warning.d.ts.map +0 -1
  690. package/dist/loop/telemetry/index.d.ts +0 -36
  691. package/dist/loop/telemetry/index.d.ts.map +0 -1
  692. package/dist/loop/telemetry/noop.d.ts +0 -3
  693. package/dist/loop/telemetry/noop.d.ts.map +0 -1
  694. package/dist/loop/test-utils/mockTracer.d.ts +0 -47
  695. package/dist/loop/test-utils/mockTracer.d.ts.map +0 -1
  696. package/dist/loop/test-utils/telemetry.d.ts +0 -6
  697. package/dist/loop/test-utils/telemetry.d.ts.map +0 -1
  698. package/dist/memory/index.warning.d.ts +0 -6
  699. package/dist/memory/index.warning.d.ts.map +0 -1
  700. package/dist/models-dev-4VGIWYS3.js +0 -3
  701. package/dist/models-dev-AXZASLL2.cjs +0 -12
  702. package/dist/netlify-TX6V7SJJ.cjs +0 -12
  703. package/dist/netlify-VJXBII33.js +0 -3
  704. package/dist/relevance/cohere/index.d.ts +0 -9
  705. package/dist/relevance/cohere/index.d.ts.map +0 -1
  706. package/dist/runtime-context/index.cjs +0 -12
  707. package/dist/runtime-context/index.js +0 -3
  708. package/dist/scores/base.d.ts.map +0 -1
  709. package/dist/scores/base.test-utils.d.ts.map +0 -1
  710. package/dist/scores/hooks.d.ts.map +0 -1
  711. package/dist/scores/index.cjs +0 -37
  712. package/dist/scores/index.cjs.map +0 -1
  713. package/dist/scores/index.d.ts +0 -4
  714. package/dist/scores/index.d.ts.map +0 -1
  715. package/dist/scores/index.js +0 -4
  716. package/dist/scores/index.js.map +0 -1
  717. package/dist/scores/run-experiment/index.d.ts.map +0 -1
  718. package/dist/scores/run-experiment/scorerAccumulator.d.ts.map +0 -1
  719. package/dist/scores/scoreTraces/index.cjs.map +0 -1
  720. package/dist/scores/scoreTraces/index.d.ts.map +0 -1
  721. package/dist/scores/scoreTraces/index.js.map +0 -1
  722. package/dist/scores/scoreTraces/scoreTraces.d.ts.map +0 -1
  723. package/dist/scores/scoreTraces/scoreTracesWorkflow.d.ts.map +0 -1
  724. package/dist/scores/scoreTraces/utils.d.ts.map +0 -1
  725. package/dist/scores/types.d.ts.map +0 -1
  726. package/dist/storage/base.warning.d.ts +0 -8
  727. package/dist/storage/base.warning.d.ts.map +0 -1
  728. package/dist/storage/domains/legacy-evals/base.d.ts +0 -13
  729. package/dist/storage/domains/legacy-evals/base.d.ts.map +0 -1
  730. package/dist/storage/domains/legacy-evals/index.d.ts +0 -2
  731. package/dist/storage/domains/legacy-evals/index.d.ts.map +0 -1
  732. package/dist/storage/domains/legacy-evals/inmemory.d.ts +0 -17
  733. package/dist/storage/domains/legacy-evals/inmemory.d.ts.map +0 -1
  734. package/dist/storage/domains/traces/base.d.ts +0 -14
  735. package/dist/storage/domains/traces/base.d.ts.map +0 -1
  736. package/dist/storage/domains/traces/index.d.ts +0 -3
  737. package/dist/storage/domains/traces/index.d.ts.map +0 -1
  738. package/dist/storage/domains/traces/inmemory.d.ts +0 -22
  739. package/dist/storage/domains/traces/inmemory.d.ts.map +0 -1
  740. package/dist/storage/index.warning.d.ts +0 -3
  741. package/dist/storage/index.warning.d.ts.map +0 -1
  742. package/dist/telemetry/composite-exporter.d.ts +0 -10
  743. package/dist/telemetry/composite-exporter.d.ts.map +0 -1
  744. package/dist/telemetry/index.cjs +0 -32
  745. package/dist/telemetry/index.cjs.map +0 -1
  746. package/dist/telemetry/index.d.ts +0 -6
  747. package/dist/telemetry/index.d.ts.map +0 -1
  748. package/dist/telemetry/index.js +0 -3
  749. package/dist/telemetry/index.js.map +0 -1
  750. package/dist/telemetry/otel-vendor.cjs +0 -105
  751. package/dist/telemetry/otel-vendor.cjs.map +0 -1
  752. package/dist/telemetry/otel-vendor.d.ts +0 -11
  753. package/dist/telemetry/otel-vendor.d.ts.map +0 -1
  754. package/dist/telemetry/otel-vendor.js +0 -59
  755. package/dist/telemetry/otel-vendor.js.map +0 -1
  756. package/dist/telemetry/storage-exporter.d.ts +0 -21
  757. package/dist/telemetry/storage-exporter.d.ts.map +0 -1
  758. package/dist/telemetry/telemetry.d.ts +0 -59
  759. package/dist/telemetry/telemetry.d.ts.map +0 -1
  760. package/dist/telemetry/telemetry.decorators.d.ts +0 -15
  761. package/dist/telemetry/telemetry.decorators.d.ts.map +0 -1
  762. package/dist/telemetry/types.d.ts +0 -74
  763. package/dist/telemetry/types.d.ts.map +0 -1
  764. package/dist/telemetry/utility.d.ts +0 -15
  765. package/dist/telemetry/utility.d.ts.map +0 -1
  766. package/dist/tools/index.warning.d.ts +0 -8
  767. package/dist/tools/index.warning.d.ts.map +0 -1
  768. package/dist/tts/index.warning.d.ts +0 -7
  769. package/dist/tts/index.warning.d.ts.map +0 -1
  770. package/dist/vector/index.warning.d.ts +0 -6
  771. package/dist/vector/index.warning.d.ts.map +0 -1
  772. package/dist/workflows/index.warning.d.ts +0 -6
  773. package/dist/workflows/index.warning.d.ts.map +0 -1
  774. package/dist/workflows/legacy/index.cjs +0 -92
  775. package/dist/workflows/legacy/index.cjs.map +0 -1
  776. package/dist/workflows/legacy/index.d.ts +0 -5
  777. package/dist/workflows/legacy/index.d.ts.map +0 -1
  778. package/dist/workflows/legacy/index.js +0 -3
  779. package/dist/workflows/legacy/index.js.map +0 -1
  780. package/dist/workflows/legacy/machine.d.ts +0 -386
  781. package/dist/workflows/legacy/machine.d.ts.map +0 -1
  782. package/dist/workflows/legacy/step.d.ts +0 -15
  783. package/dist/workflows/legacy/step.d.ts.map +0 -1
  784. package/dist/workflows/legacy/types.d.ts +0 -397
  785. package/dist/workflows/legacy/types.d.ts.map +0 -1
  786. package/dist/workflows/legacy/utils.d.ts +0 -68
  787. package/dist/workflows/legacy/utils.d.ts.map +0 -1
  788. package/dist/workflows/legacy/workflow-instance.d.ts +0 -97
  789. package/dist/workflows/legacy/workflow-instance.d.ts.map +0 -1
  790. package/dist/workflows/legacy/workflow.d.ts +0 -109
  791. package/dist/workflows/legacy/workflow.d.ts.map +0 -1
  792. package/dist/workflows/workflow.warning.d.ts +0 -9
  793. package/dist/workflows/workflow.warning.d.ts.map +0 -1
  794. package/eval.d.ts +0 -1
  795. package/runtime-context.d.ts +0 -1
  796. package/scores/scoreTraces.d.ts +0 -1
  797. package/scores.d.ts +0 -1
  798. package/telemetry/otel-vendor.d.ts +0 -1
  799. package/telemetry.d.ts +0 -1
  800. package/workflows/legacy.d.ts +0 -1
  801. /package/dist/agent/{input-processor → message-list}/index.cjs.map +0 -0
  802. /package/dist/agent/{input-processor → message-list}/index.js.map +0 -0
  803. /package/dist/{ai-tracing → evals}/index.cjs.map +0 -0
  804. /package/dist/{ai-tracing → evals}/index.js.map +0 -0
  805. /package/dist/{scores/run-experiment → evals/run}/scorerAccumulator.d.ts +0 -0
  806. /package/dist/{scores → evals}/scoreTraces/index.d.ts +0 -0
  807. /package/dist/{eval → observability}/index.cjs.map +0 -0
  808. /package/dist/{eval → observability}/index.js.map +0 -0
  809. /package/dist/{runtime-context → request-context}/index.cjs.map +0 -0
  810. /package/dist/{runtime-context → request-context}/index.js.map +0 -0
@@ -0,0 +1,4705 @@
1
+ /* eslint-disable */
2
+ import type { ServerResponse } from 'http';
3
+ import type { ServerResponse as ServerResponse_2 } from 'node:http';
4
+ import type { JSONSchema7 } from 'json-schema';
5
+ import type { z } from 'zod';
6
+
7
+ /**
8
+ * Custom error class for AI SDK related errors.
9
+ * @extends Error
10
+ */
11
+ export declare class AISDKError extends Error {
12
+ private readonly [symbol$d];
13
+ /**
14
+ * The underlying cause of the error, if any.
15
+ */
16
+ readonly cause?: unknown;
17
+ /**
18
+ * Creates an AI SDK Error.
19
+ *
20
+ * @param {Object} params - The parameters for creating the error.
21
+ * @param {string} params.name - The name of the error.
22
+ * @param {string} params.message - The error message.
23
+ * @param {unknown} [params.cause] - The underlying cause of the error.
24
+ */
25
+ constructor({ name, message, cause }: { name: string; message: string; cause?: unknown });
26
+ /**
27
+ * Checks if the given error is an AI SDK Error.
28
+ * @param {unknown} error - The error to check.
29
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
30
+ */
31
+ static isInstance(error: unknown): error is AISDKError;
32
+ protected static hasMarker(error: unknown, marker: string): boolean;
33
+ }
34
+
35
+ /**
36
+ * Appends a client message to the messages array.
37
+ * If the last message in the array has the same id as the new message, it will be replaced.
38
+ * Otherwise, the new message will be appended.
39
+ */
40
+ export declare function appendClientMessage({
41
+ messages,
42
+ message,
43
+ }: {
44
+ messages: Message[];
45
+ message: Message;
46
+ }): Message[];
47
+
48
+ /**
49
+ * Appends the ResponseMessage[] from the response to a Message[] (for useChat).
50
+ * The messages are converted to Messages before being appended.
51
+ * Timestamps are generated for the new messages.
52
+ *
53
+ * @returns A new Message[] with the response messages appended.
54
+ */
55
+ export declare function appendResponseMessages({
56
+ messages,
57
+ responseMessages,
58
+ _internal: { currentDate },
59
+ }: {
60
+ messages: Message[];
61
+ responseMessages: ResponseMessage[];
62
+ /**
63
+ Internal. For test use only. May change without notice.
64
+ */
65
+ _internal?: {
66
+ currentDate?: () => Date;
67
+ };
68
+ }): Message[];
69
+
70
+ /**
71
+ Content of an assistant message.
72
+ It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts.
73
+ */
74
+ export declare type AssistantContent =
75
+ | string
76
+ | Array<TextPart | FilePart | ReasoningPart | RedactedReasoningPart | ToolCallPart>;
77
+
78
+ declare type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
79
+
80
+ /**
81
+ * An attachment that can be sent along with a message.
82
+ */
83
+ declare interface Attachment {
84
+ /**
85
+ * The name of the attachment, usually the file name.
86
+ */
87
+ name?: string;
88
+ /**
89
+ * A string indicating the [media type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
90
+ * By default, it's extracted from the pathname's extension.
91
+ */
92
+ contentType?: string;
93
+ /**
94
+ * The URL of the attachment. It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
95
+ */
96
+ url: string;
97
+ }
98
+
99
+ /**
100
+ * Attributes is a map from string to attribute values.
101
+ *
102
+ * Note: only the own enumerable keys are counted as valid attribute keys.
103
+ */
104
+ declare interface Attributes {
105
+ [attributeKey: string]: AttributeValue | undefined;
106
+ }
107
+
108
+ /**
109
+ * Attribute values may be any non-nullish primitive value except an object.
110
+ *
111
+ * null or undefined attribute values are invalid and will result in undefined behavior.
112
+ */
113
+ declare type AttributeValue =
114
+ | string
115
+ | number
116
+ | boolean
117
+ | Array<null | undefined | string>
118
+ | Array<null | undefined | number>
119
+ | Array<null | undefined | boolean>;
120
+
121
+ declare type CallSettings = {
122
+ /**
123
+ Maximum number of tokens to generate.
124
+ */
125
+ maxTokens?: number;
126
+ /**
127
+ Temperature setting. This is a number between 0 (almost no randomness) and
128
+ 1 (very random).
129
+
130
+ It is recommended to set either `temperature` or `topP`, but not both.
131
+
132
+ @default 0
133
+ */
134
+ temperature?: number;
135
+ /**
136
+ Nucleus sampling. This is a number between 0 and 1.
137
+
138
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
139
+ are considered.
140
+
141
+ It is recommended to set either `temperature` or `topP`, but not both.
142
+ */
143
+ topP?: number;
144
+ /**
145
+ Only sample from the top K options for each subsequent token.
146
+
147
+ Used to remove "long tail" low probability responses.
148
+ Recommended for advanced use cases only. You usually only need to use temperature.
149
+ */
150
+ topK?: number;
151
+ /**
152
+ Presence penalty setting. It affects the likelihood of the model to
153
+ repeat information that is already in the prompt.
154
+
155
+ The presence penalty is a number between -1 (increase repetition)
156
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
157
+ */
158
+ presencePenalty?: number;
159
+ /**
160
+ Frequency penalty setting. It affects the likelihood of the model
161
+ to repeatedly use the same words or phrases.
162
+
163
+ The frequency penalty is a number between -1 (increase repetition)
164
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
165
+ */
166
+ frequencyPenalty?: number;
167
+ /**
168
+ Stop sequences.
169
+ If set, the model will stop generating text when one of the stop sequences is generated.
170
+ Providers may have limits on the number of stop sequences.
171
+ */
172
+ stopSequences?: string[];
173
+ /**
174
+ The seed (integer) to use for random sampling. If set and supported
175
+ by the model, calls will generate deterministic results.
176
+ */
177
+ seed?: number;
178
+ /**
179
+ Maximum number of retries. Set to 0 to disable retries.
180
+
181
+ @default 2
182
+ */
183
+ maxRetries?: number;
184
+ /**
185
+ Abort signal.
186
+ */
187
+ abortSignal?: AbortSignal;
188
+ /**
189
+ Additional HTTP headers to be sent with the request.
190
+ Only applicable for HTTP-based providers.
191
+ */
192
+ headers?: Record<string, string | undefined>;
193
+ };
194
+
195
+ /**
196
+ Warning from the model provider for this call. The call will proceed, but e.g.
197
+ some settings might not be supported, which can lead to suboptimal results.
198
+ */
199
+ declare type CallWarning = LanguageModelV1CallWarning;
200
+
201
+ declare type ConsumeStreamOptions = {
202
+ onError?: (error: unknown) => void;
203
+ };
204
+
205
+ declare interface Context {
206
+ /**
207
+ * Get a value from the context.
208
+ *
209
+ * @param key key which identifies a context value
210
+ */
211
+ getValue(key: symbol): unknown;
212
+ /**
213
+ * Create a new context which inherits from this context and has
214
+ * the given key set to the given value.
215
+ *
216
+ * @param key context key for which to set the value
217
+ * @param value value to set for the given key
218
+ */
219
+ setValue(key: symbol, value: unknown): Context;
220
+ /**
221
+ * Return a new context which inherits from this context but does
222
+ * not contain a value for the given key.
223
+ *
224
+ * @param key context key for which to clear a value
225
+ */
226
+ deleteValue(key: symbol): Context;
227
+ }
228
+
229
+ /**
230
+ Converts an array of messages from useChat into an array of CoreMessages that can be used
231
+ with the AI core functions (e.g. `streamText`).
232
+ */
233
+ export declare function convertToCoreMessages<TOOLS extends ToolSet = never>(
234
+ messages: Array<Omit<Message, 'id'>>,
235
+ options?: {
236
+ tools?: TOOLS;
237
+ },
238
+ ): CoreMessage[];
239
+
240
+ export declare function convertUint8ArrayToBase64(array: Uint8Array): string;
241
+
242
+ /**
243
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
244
+ */
245
+ export declare type CoreAssistantMessage = {
246
+ role: 'assistant';
247
+ content: AssistantContent;
248
+ /**
249
+ Additional provider-specific metadata. They are passed through
250
+ to the provider from the AI SDK and enable provider-specific
251
+ functionality that can be fully encapsulated in the provider.
252
+ */
253
+ providerOptions?: ProviderOptions;
254
+ /**
255
+ @deprecated Use `providerOptions` instead.
256
+ */
257
+ experimental_providerMetadata?: ProviderMetadata;
258
+ };
259
+
260
+ /**
261
+ A message that can be used in the `messages` field of a prompt.
262
+ It can be a user message, an assistant message, or a tool message.
263
+ */
264
+ export declare type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
265
+
266
+ /**
267
+ A system message. It can contain system information.
268
+
269
+ Note: using the "system" part of the prompt is strongly preferred
270
+ to increase the resilience against prompt injection attacks,
271
+ and because not all providers support several system messages.
272
+ */
273
+ export declare type CoreSystemMessage = {
274
+ role: 'system';
275
+ content: string;
276
+ /**
277
+ Additional provider-specific metadata. They are passed through
278
+ to the provider from the AI SDK and enable provider-specific
279
+ functionality that can be fully encapsulated in the provider.
280
+ */
281
+ providerOptions?: ProviderOptions;
282
+ /**
283
+ @deprecated Use `providerOptions` instead.
284
+ */
285
+ experimental_providerMetadata?: ProviderMetadata;
286
+ };
287
+
288
+ /**
289
+ A tool message. It contains the result of one or more tool calls.
290
+ */
291
+ export declare type CoreToolMessage = {
292
+ role: 'tool';
293
+ content: ToolContent;
294
+ /**
295
+ Additional provider-specific metadata. They are passed through
296
+ to the provider from the AI SDK and enable provider-specific
297
+ functionality that can be fully encapsulated in the provider.
298
+ */
299
+ providerOptions?: ProviderOptions;
300
+ /**
301
+ @deprecated Use `providerOptions` instead.
302
+ */
303
+ experimental_providerMetadata?: ProviderMetadata;
304
+ };
305
+
306
+ /**
307
+ A user message. It can contain text or a combination of text and images.
308
+ */
309
+ export declare type CoreUserMessage = {
310
+ role: 'user';
311
+ content: UserContent;
312
+ /**
313
+ Additional provider-specific metadata. They are passed through
314
+ to the provider from the AI SDK and enable provider-specific
315
+ functionality that can be fully encapsulated in the provider.
316
+ */
317
+ providerOptions?: ProviderOptions;
318
+ /**
319
+ @deprecated Use `providerOptions` instead.
320
+ */
321
+ experimental_providerMetadata?: ProviderMetadata;
322
+ };
323
+
324
+ /**
325
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
326
+ */
327
+ declare type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
328
+
329
+ declare type DataStreamOptions = {
330
+ /**
331
+ * Send usage parts to the client.
332
+ * Default to true.
333
+ */
334
+ sendUsage?: boolean;
335
+ /**
336
+ * Send reasoning parts to the client.
337
+ * Default to false.
338
+ */
339
+ sendReasoning?: boolean;
340
+ /**
341
+ * Send source parts to the client.
342
+ * Default to false.
343
+ */
344
+ sendSources?: boolean;
345
+ /**
346
+ * Send the finish event to the client.
347
+ * Set to false if you are using additional streamText calls
348
+ * that send additional data.
349
+ * Default to true.
350
+ */
351
+ experimental_sendFinish?: boolean;
352
+ /**
353
+ * Send the message start event to the client.
354
+ * Set to false if you are using additional streamText calls
355
+ * and the message start event has already been sent.
356
+ * Default to true.
357
+ *
358
+ * Note: this setting is currently not used, but you should
359
+ * already set it to false if you are using additional
360
+ * streamText calls that send additional data to prevent
361
+ * the message start event from being sent multiple times.
362
+ */
363
+ experimental_sendStart?: boolean;
364
+ };
365
+
366
+ declare interface DataStreamPart<CODE extends string, NAME extends string, TYPE> {
367
+ code: CODE;
368
+ name: NAME;
369
+ parse: (value: JSONValue_2) => {
370
+ type: NAME;
371
+ value: TYPE;
372
+ };
373
+ }
374
+
375
+ declare type DataStreamParts = (typeof dataStreamParts)[number];
376
+
377
+ declare const dataStreamParts: readonly [
378
+ DataStreamPart<'0', 'text', string>,
379
+ DataStreamPart<'2', 'data', JSONValue_2[]>,
380
+ DataStreamPart<'3', 'error', string>,
381
+ DataStreamPart<'8', 'message_annotations', JSONValue_2[]>,
382
+ DataStreamPart<'9', 'tool_call', ToolCall<string, any>>,
383
+ DataStreamPart<'a', 'tool_result', Omit<ToolResult<string, any, any>, 'args' | 'toolName'>>,
384
+ DataStreamPart<
385
+ 'b',
386
+ 'tool_call_streaming_start',
387
+ {
388
+ toolCallId: string;
389
+ toolName: string;
390
+ }
391
+ >,
392
+ DataStreamPart<
393
+ 'c',
394
+ 'tool_call_delta',
395
+ {
396
+ toolCallId: string;
397
+ argsTextDelta: string;
398
+ }
399
+ >,
400
+ DataStreamPart<
401
+ 'd',
402
+ 'finish_message',
403
+ {
404
+ finishReason: LanguageModelV1FinishReason;
405
+ usage?: {
406
+ promptTokens: number;
407
+ completionTokens: number;
408
+ };
409
+ }
410
+ >,
411
+ DataStreamPart<
412
+ 'e',
413
+ 'finish_step',
414
+ {
415
+ isContinued: boolean;
416
+ finishReason: LanguageModelV1FinishReason;
417
+ usage?: {
418
+ promptTokens: number;
419
+ completionTokens: number;
420
+ };
421
+ }
422
+ >,
423
+ DataStreamPart<
424
+ 'f',
425
+ 'start_step',
426
+ {
427
+ messageId: string;
428
+ }
429
+ >,
430
+ DataStreamPart<'g', 'reasoning', string>,
431
+ DataStreamPart<'h', 'source', LanguageModelV1Source>,
432
+ DataStreamPart<
433
+ 'i',
434
+ 'redacted_reasoning',
435
+ {
436
+ data: string;
437
+ }
438
+ >,
439
+ DataStreamPart<
440
+ 'j',
441
+ 'reasoning_signature',
442
+ {
443
+ signature: string;
444
+ }
445
+ >,
446
+ DataStreamPart<
447
+ 'k',
448
+ 'file',
449
+ {
450
+ data: string;
451
+ mimeType: string;
452
+ }
453
+ >,
454
+ ];
455
+
456
+ declare type DataStreamString =
457
+ `${(typeof DataStreamStringPrefixes)[keyof typeof DataStreamStringPrefixes]}:${string}\n`;
458
+
459
+ /**
460
+ * The map of prefixes for data in the stream
461
+ *
462
+ * - 0: Text from the LLM response
463
+ * - 1: (OpenAI) function_call responses
464
+ * - 2: custom JSON added by the user using `Data`
465
+ * - 6: (OpenAI) tool_call responses
466
+ *
467
+ * Example:
468
+ * ```
469
+ * 0:Vercel
470
+ * 0:'s
471
+ * 0: AI
472
+ * 0: AI
473
+ * 0: SDK
474
+ * 0: is great
475
+ * 0:!
476
+ * 2: { "someJson": "value" }
477
+ * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
478
+ * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
479
+ *```
480
+ */
481
+ declare const DataStreamStringPrefixes: { [K in DataStreamParts['name']]: (typeof dataStreamParts)[number]['code'] };
482
+
483
+ declare interface DataStreamWriter {
484
+ /**
485
+ * Appends a data part to the stream.
486
+ */
487
+ write(data: DataStreamString): void;
488
+ /**
489
+ * Appends a data part to the stream.
490
+ */
491
+ writeData(value: JSONValue): void;
492
+ /**
493
+ * Appends a message annotation to the stream.
494
+ */
495
+ writeMessageAnnotation(value: JSONValue): void;
496
+ /**
497
+ * Appends a source part to the stream.
498
+ */
499
+ writeSource(source: Source): void;
500
+ /**
501
+ * Merges the contents of another stream to this stream.
502
+ */
503
+ merge(stream: ReadableStream<DataStreamString>): void;
504
+ /**
505
+ * Error handler that is used by the data stream writer.
506
+ * This is intended for forwarding when merging streams
507
+ * to prevent duplicated error masking.
508
+ */
509
+ onError: ((error: unknown) => string) | undefined;
510
+ }
511
+
512
+ /**
513
+ Create a type from an object with all keys and nested keys set to optional.
514
+ The helper supports normal objects and Zod schemas (which are resolved automatically).
515
+ It always recurses into arrays.
516
+
517
+ Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
518
+ */
519
+ export declare type DeepPartial<T> = T extends z.ZodTypeAny ? DeepPartialInternal<z.infer<T>> : DeepPartialInternal<T>;
520
+
521
+ declare type DeepPartialInternal<T> = T extends
522
+ | null
523
+ | undefined
524
+ | string
525
+ | number
526
+ | boolean
527
+ | symbol
528
+ | bigint
529
+ | void
530
+ | Date
531
+ | RegExp
532
+ | ((...arguments_: any[]) => unknown)
533
+ | (new (...arguments_: any[]) => unknown)
534
+ ? T
535
+ : T extends Map<infer KeyType, infer ValueType>
536
+ ? PartialMap<KeyType, ValueType>
537
+ : T extends Set<infer ItemType>
538
+ ? PartialSet<ItemType>
539
+ : T extends ReadonlyMap<infer KeyType, infer ValueType>
540
+ ? PartialReadonlyMap<KeyType, ValueType>
541
+ : T extends ReadonlySet<infer ItemType>
542
+ ? PartialReadonlySet<ItemType>
543
+ : T extends object
544
+ ? T extends ReadonlyArray<infer ItemType>
545
+ ? ItemType[] extends T
546
+ ? readonly ItemType[] extends T
547
+ ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>>
548
+ : Array<DeepPartialInternal<ItemType | undefined>>
549
+ : PartialObject<T>
550
+ : PartialObject<T>
551
+ : unknown;
552
+
553
+ /**
554
+ Embed a value using an embedding model. The type of the value is defined by the embedding model.
555
+
556
+ @param model - The embedding model to use.
557
+ @param value - The value that should be embedded.
558
+
559
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
560
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
561
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
562
+
563
+ @returns A result object that contains the embedding, the value, and additional information.
564
+ */
565
+ export declare function embed<VALUE>({
566
+ model,
567
+ value,
568
+ maxRetries: maxRetriesArg,
569
+ abortSignal,
570
+ headers,
571
+ experimental_telemetry: telemetry,
572
+ }: {
573
+ /**
574
+ The embedding model to use.
575
+ */
576
+ model: EmbeddingModel<VALUE>;
577
+ /**
578
+ The value that should be embedded.
579
+ */
580
+ value: VALUE;
581
+ /**
582
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
583
+
584
+ @default 2
585
+ */
586
+ maxRetries?: number;
587
+ /**
588
+ Abort signal.
589
+ */
590
+ abortSignal?: AbortSignal;
591
+ /**
592
+ Additional headers to include in the request.
593
+ Only applicable for HTTP-based providers.
594
+ */
595
+ headers?: Record<string, string>;
596
+ /**
597
+ * Optional telemetry configuration (experimental).
598
+ */
599
+ experimental_telemetry?: TelemetrySettings;
600
+ }): Promise<EmbedResult<VALUE>>;
601
+
602
+ /**
603
+ Embedding.
604
+ */
605
+ declare type Embedding = EmbeddingModelV1Embedding;
606
+
607
+ /**
608
+ Embedding model that is used by the AI SDK Core functions.
609
+ */
610
+ export declare type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
611
+
612
+ /**
613
+ Represents the number of tokens used in an embedding.
614
+ */
615
+ declare type EmbeddingModelUsage = {
616
+ /**
617
+ The number of tokens used in the embedding.
618
+ */
619
+ tokens: number;
620
+ };
621
+
622
+ /**
623
+ Specification for an embedding model that implements the embedding model
624
+ interface version 1.
625
+
626
+ VALUE is the type of the values that the model can embed.
627
+ This will allow us to go beyond text embeddings in the future,
628
+ e.g. to support image embeddings
629
+ */
630
+ declare type EmbeddingModelV1<VALUE> = {
631
+ /**
632
+ The embedding model must specify which embedding model interface
633
+ version it implements. This will allow us to evolve the embedding
634
+ model interface and retain backwards compatibility. The different
635
+ implementation versions can be handled as a discriminated union
636
+ on our side.
637
+ */
638
+ readonly specificationVersion: 'v1';
639
+ /**
640
+ Name of the provider for logging purposes.
641
+ */
642
+ readonly provider: string;
643
+ /**
644
+ Provider-specific model ID for logging purposes.
645
+ */
646
+ readonly modelId: string;
647
+ /**
648
+ Limit of how many embeddings can be generated in a single API call.
649
+ */
650
+ readonly maxEmbeddingsPerCall: number | undefined;
651
+ /**
652
+ True if the model can handle multiple embedding calls in parallel.
653
+ */
654
+ readonly supportsParallelCalls: boolean;
655
+ /**
656
+ Generates a list of embeddings for the given input text.
657
+
658
+ Naming: "do" prefix to prevent accidental direct usage of the method
659
+ by the user.
660
+ */
661
+ doEmbed(options: {
662
+ /**
663
+ List of values to embed.
664
+ */
665
+ values: Array<VALUE>;
666
+ /**
667
+ Abort signal for cancelling the operation.
668
+ */
669
+ abortSignal?: AbortSignal;
670
+ /**
671
+ Additional HTTP headers to be sent with the request.
672
+ Only applicable for HTTP-based providers.
673
+ */
674
+ headers?: Record<string, string | undefined>;
675
+ }): PromiseLike<{
676
+ /**
677
+ Generated embeddings. They are in the same order as the input values.
678
+ */
679
+ embeddings: Array<EmbeddingModelV1Embedding>;
680
+ /**
681
+ Token usage. We only have input tokens for embeddings.
682
+ */
683
+ usage?: {
684
+ tokens: number;
685
+ };
686
+ /**
687
+ Optional raw response information for debugging purposes.
688
+ */
689
+ rawResponse?: {
690
+ /**
691
+ Response headers.
692
+ */
693
+ headers?: Record<string, string>;
694
+ };
695
+ }>;
696
+ };
697
+
698
+ /**
699
+ An embedding is a vector, i.e. an array of numbers.
700
+ It is e.g. used to represent a text as a vector of word embeddings.
701
+ */
702
+ declare type EmbeddingModelV1Embedding = Array<number>;
703
+
704
+ /**
705
+ The result of a `embedMany` call.
706
+ It contains the embeddings, the values, and additional information.
707
+ */
708
+ export declare interface EmbedManyResult<VALUE> {
709
+ /**
710
+ The values that were embedded.
711
+ */
712
+ readonly values: Array<VALUE>;
713
+ /**
714
+ The embeddings. They are in the same order as the values.
715
+ */
716
+ readonly embeddings: Array<Embedding>;
717
+ /**
718
+ The embedding token usage.
719
+ */
720
+ readonly usage: EmbeddingModelUsage;
721
+ }
722
+
723
+ /**
724
+ The result of an `embed` call.
725
+ It contains the embedding, the value, and additional information.
726
+ */
727
+ export declare interface EmbedResult<VALUE> {
728
+ /**
729
+ The value that was embedded.
730
+ */
731
+ readonly value: VALUE;
732
+ /**
733
+ The embedding of the value.
734
+ */
735
+ readonly embedding: Embedding;
736
+ /**
737
+ The embedding token usage.
738
+ */
739
+ readonly usage: EmbeddingModelUsage;
740
+ /**
741
+ Optional raw response data.
742
+ */
743
+ readonly rawResponse?: {
744
+ /**
745
+ Response headers.
746
+ */
747
+ headers?: Record<string, string>;
748
+ };
749
+ }
750
+
751
+ /**
752
+ * Defines Exception.
753
+ *
754
+ * string or an object with one of (message or name or code) and optional stack
755
+ */
756
+ declare type Exception = ExceptionWithCode | ExceptionWithMessage | ExceptionWithName | string;
757
+
758
+ declare interface ExceptionWithCode {
759
+ code: string | number;
760
+ name?: string;
761
+ message?: string;
762
+ stack?: string;
763
+ }
764
+
765
+ declare interface ExceptionWithMessage {
766
+ code?: string | number;
767
+ message: string;
768
+ name?: string;
769
+ stack?: string;
770
+ }
771
+
772
+ declare interface ExceptionWithName {
773
+ code?: string | number;
774
+ message?: string;
775
+ name: string;
776
+ stack?: string;
777
+ }
778
+
779
+ /**
780
+ File content part of a prompt. It contains a file.
781
+ */
782
+ export declare interface FilePart {
783
+ type: 'file';
784
+ /**
785
+ File data. Can either be:
786
+
787
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
788
+ - URL: a URL that points to the image
789
+ */
790
+ data: DataContent | URL;
791
+ /**
792
+ Optional filename of the file.
793
+ */
794
+ filename?: string;
795
+ /**
796
+ Mime type of the file.
797
+ */
798
+ mimeType: string;
799
+ /**
800
+ Additional provider-specific metadata. They are passed through
801
+ to the provider from the AI SDK and enable provider-specific
802
+ functionality that can be fully encapsulated in the provider.
803
+ */
804
+ providerOptions?: ProviderOptions;
805
+ /**
806
+ @deprecated Use `providerOptions` instead.
807
+ */
808
+ experimental_providerMetadata?: ProviderMetadata;
809
+ }
810
+
811
+ /**
812
+ * A file part of a message.
813
+ */
814
+ declare type FileUIPart = {
815
+ type: 'file';
816
+ mimeType: string;
817
+ data: string;
818
+ };
819
+
820
+ /**
821
+ Reason why a language model finished generating a response.
822
+
823
+ Can be one of the following:
824
+ - `stop`: model generated stop sequence
825
+ - `length`: model generated maximum number of tokens
826
+ - `content-filter`: content filter violation stopped the model
827
+ - `tool-calls`: model triggered tool calls
828
+ - `error`: model stopped because of an error
829
+ - `other`: model stopped for other reasons
830
+ */
831
+ export declare type FinishReason = LanguageModelV1FinishReason;
832
+
833
+ /**
834
+ * A generated file.
835
+ */
836
+ declare interface GeneratedFile {
837
+ /**
838
+ File as a base64 encoded string.
839
+ */
840
+ readonly base64: string;
841
+ /**
842
+ File as a Uint8Array.
843
+ */
844
+ readonly uint8Array: Uint8Array;
845
+ /**
846
+ MIME type of the file
847
+ */
848
+ readonly mimeType: string;
849
+ }
850
+
851
+ /**
852
+ Generate a structured, typed object for a given prompt and schema using a language model.
853
+
854
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
855
+
856
+ @returns
857
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
858
+ */
859
+ export declare function generateObject<OBJECT>(
860
+ options: Omit<CallSettings, 'stopSequences'> &
861
+ Prompt & {
862
+ output?: 'object' | undefined;
863
+ /**
864
+ The language model to use.
865
+ */
866
+ model: LanguageModel;
867
+ /**
868
+ The schema of the object that the model should generate.
869
+ */
870
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
871
+ /**
872
+ Optional name of the output that should be generated.
873
+ Used by some providers for additional LLM guidance, e.g.
874
+ via tool or schema name.
875
+ */
876
+ schemaName?: string;
877
+ /**
878
+ Optional description of the output that should be generated.
879
+ Used by some providers for additional LLM guidance, e.g.
880
+ via tool or schema description.
881
+ */
882
+ schemaDescription?: string;
883
+ /**
884
+ The mode to use for object generation.
885
+
886
+ The schema is converted into a JSON schema and used in one of the following ways
887
+
888
+ - 'auto': The provider will choose the best mode for the model.
889
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
890
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
891
+
892
+ Please note that most providers do not support all modes.
893
+
894
+ Default and recommended: 'auto' (best mode for the model).
895
+ */
896
+ mode?: 'auto' | 'json' | 'tool';
897
+ /**
898
+ A function that attempts to repair the raw output of the mode
899
+ to enable JSON parsing.
900
+ */
901
+ experimental_repairText?: RepairTextFunction;
902
+ /**
903
+ Optional telemetry configuration (experimental).
904
+ */
905
+ experimental_telemetry?: TelemetrySettings;
906
+ /**
907
+ Additional provider-specific options. They are passed through
908
+ to the provider from the AI SDK and enable provider-specific
909
+ functionality that can be fully encapsulated in the provider.
910
+ */
911
+ providerOptions?: ProviderOptions;
912
+ /**
913
+ @deprecated Use `providerOptions` instead.
914
+ */
915
+ experimental_providerMetadata?: ProviderMetadata;
916
+ /**
917
+ * Internal. For test use only. May change without notice.
918
+ */
919
+ _internal?: {
920
+ generateId?: () => string;
921
+ currentDate?: () => Date;
922
+ };
923
+ },
924
+ ): Promise<GenerateObjectResult<OBJECT>>;
925
+
926
+ /**
927
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
928
+
929
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
930
+
931
+ @return
932
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
933
+ */
934
+ export declare function generateObject<ELEMENT>(
935
+ options: Omit<CallSettings, 'stopSequences'> &
936
+ Prompt & {
937
+ output: 'array';
938
+ /**
939
+ The language model to use.
940
+ */
941
+ model: LanguageModel;
942
+ /**
943
+ The element schema of the array that the model should generate.
944
+ */
945
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
946
+ /**
947
+ Optional name of the array that should be generated.
948
+ Used by some providers for additional LLM guidance, e.g.
949
+ via tool or schema name.
950
+ */
951
+ schemaName?: string;
952
+ /**
953
+ Optional description of the array that should be generated.
954
+ Used by some providers for additional LLM guidance, e.g.
955
+ via tool or schema description.
956
+ */
957
+ schemaDescription?: string;
958
+ /**
959
+ The mode to use for object generation.
960
+
961
+ The schema is converted into a JSON schema and used in one of the following ways
962
+
963
+ - 'auto': The provider will choose the best mode for the model.
964
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
965
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
966
+
967
+ Please note that most providers do not support all modes.
968
+
969
+ Default and recommended: 'auto' (best mode for the model).
970
+ */
971
+ mode?: 'auto' | 'json' | 'tool';
972
+ /**
973
+ A function that attempts to repair the raw output of the mode
974
+ to enable JSON parsing.
975
+ */
976
+ experimental_repairText?: RepairTextFunction;
977
+ /**
978
+ Optional telemetry configuration (experimental).
979
+ */
980
+ experimental_telemetry?: TelemetrySettings;
981
+ /**
982
+ Additional provider-specific options. They are passed through
983
+ to the provider from the AI SDK and enable provider-specific
984
+ functionality that can be fully encapsulated in the provider.
985
+ */
986
+ providerOptions?: ProviderOptions;
987
+ /**
988
+ @deprecated Use `providerOptions` instead.
989
+ */
990
+ experimental_providerMetadata?: ProviderMetadata;
991
+ /**
992
+ * Internal. For test use only. May change without notice.
993
+ */
994
+ _internal?: {
995
+ generateId?: () => string;
996
+ currentDate?: () => Date;
997
+ };
998
+ },
999
+ ): Promise<GenerateObjectResult<Array<ELEMENT>>>;
1000
+
1001
+ /**
1002
+ Generate a value from an enum (limited list of string values) using a language model.
1003
+
1004
+ This function does not stream the output.
1005
+
1006
+ @return
1007
+ A result object that contains the generated value, the finish reason, the token usage, and additional information.
1008
+ */
1009
+ export declare function generateObject<ENUM extends string>(
1010
+ options: Omit<CallSettings, 'stopSequences'> &
1011
+ Prompt & {
1012
+ output: 'enum';
1013
+ /**
1014
+ The language model to use.
1015
+ */
1016
+ model: LanguageModel;
1017
+ /**
1018
+ The enum values that the model should use.
1019
+ */
1020
+ enum: Array<ENUM>;
1021
+ /**
1022
+ The mode to use for object generation.
1023
+
1024
+ The schema is converted into a JSON schema and used in one of the following ways
1025
+
1026
+ - 'auto': The provider will choose the best mode for the model.
1027
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1028
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
1029
+
1030
+ Please note that most providers do not support all modes.
1031
+
1032
+ Default and recommended: 'auto' (best mode for the model).
1033
+ */
1034
+ mode?: 'auto' | 'json' | 'tool';
1035
+ /**
1036
+ A function that attempts to repair the raw output of the mode
1037
+ to enable JSON parsing.
1038
+ */
1039
+ experimental_repairText?: RepairTextFunction;
1040
+ /**
1041
+ Optional telemetry configuration (experimental).
1042
+ */
1043
+ experimental_telemetry?: TelemetrySettings;
1044
+ /**
1045
+ Additional provider-specific options. They are passed through
1046
+ to the provider from the AI SDK and enable provider-specific
1047
+ functionality that can be fully encapsulated in the provider.
1048
+ */
1049
+ providerOptions?: ProviderOptions;
1050
+ /**
1051
+ @deprecated Use `providerOptions` instead.
1052
+ */
1053
+ experimental_providerMetadata?: ProviderMetadata;
1054
+ /**
1055
+ * Internal. For test use only. May change without notice.
1056
+ */
1057
+ _internal?: {
1058
+ generateId?: () => string;
1059
+ currentDate?: () => Date;
1060
+ };
1061
+ },
1062
+ ): Promise<GenerateObjectResult<ENUM>>;
1063
+
1064
+ /**
1065
+ Generate JSON with any schema for a given prompt using a language model.
1066
+
1067
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
1068
+
1069
+ @returns
1070
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
1071
+ */
1072
+ export declare function generateObject(
1073
+ options: Omit<CallSettings, 'stopSequences'> &
1074
+ Prompt & {
1075
+ output: 'no-schema';
1076
+ /**
1077
+ The language model to use.
1078
+ */
1079
+ model: LanguageModel;
1080
+ /**
1081
+ The mode to use for object generation. Must be "json" for no-schema output.
1082
+ */
1083
+ mode?: 'json';
1084
+ /**
1085
+ A function that attempts to repair the raw output of the mode
1086
+ to enable JSON parsing.
1087
+ */
1088
+ experimental_repairText?: RepairTextFunction;
1089
+ /**
1090
+ Optional telemetry configuration (experimental).
1091
+ */
1092
+ experimental_telemetry?: TelemetrySettings;
1093
+ /**
1094
+ Additional provider-specific options. They are passed through
1095
+ to the provider from the AI SDK and enable provider-specific
1096
+ functionality that can be fully encapsulated in the provider.
1097
+ */
1098
+ providerOptions?: ProviderOptions;
1099
+ /**
1100
+ @deprecated Use `providerOptions` instead.
1101
+ */
1102
+ experimental_providerMetadata?: ProviderMetadata;
1103
+ /**
1104
+ * Internal. For test use only. May change without notice.
1105
+ */
1106
+ _internal?: {
1107
+ generateId?: () => string;
1108
+ currentDate?: () => Date;
1109
+ };
1110
+ },
1111
+ ): Promise<GenerateObjectResult<JSONValue>>;
1112
+
1113
+ /**
1114
+ The result of a `generateObject` call.
1115
+ */
1116
+ export declare interface GenerateObjectResult<OBJECT> {
1117
+ /**
1118
+ The generated object (typed according to the schema).
1119
+ */
1120
+ readonly object: OBJECT;
1121
+ /**
1122
+ The reason why the generation finished.
1123
+ */
1124
+ readonly finishReason: FinishReason;
1125
+ /**
1126
+ The token usage of the generated text.
1127
+ */
1128
+ readonly usage: LanguageModelUsage;
1129
+ /**
1130
+ Warnings from the model provider (e.g. unsupported settings).
1131
+ */
1132
+ readonly warnings: CallWarning[] | undefined;
1133
+ /**
1134
+ Additional request information.
1135
+ */
1136
+ readonly request: LanguageModelRequestMetadata;
1137
+ /**
1138
+ Additional response information.
1139
+ */
1140
+ readonly response: LanguageModelResponseMetadata & {
1141
+ /**
1142
+ Response body (available only for providers that use HTTP requests).
1143
+ */
1144
+ body?: unknown;
1145
+ };
1146
+ /**
1147
+ Logprobs for the completion.
1148
+ `undefined` if the mode does not support logprobs or if was not enabled.
1149
+
1150
+ @deprecated Will become a provider extension in the future.
1151
+ */
1152
+ readonly logprobs: LogProbs | undefined;
1153
+ /**
1154
+ Additional provider-specific metadata. They are passed through
1155
+ from the provider to the AI SDK and enable provider-specific
1156
+ results that can be fully encapsulated in the provider.
1157
+ */
1158
+ readonly providerMetadata: ProviderMetadata | undefined;
1159
+ /**
1160
+ @deprecated Use `providerMetadata` instead.
1161
+ */
1162
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
1163
+ /**
1164
+ Converts the object to a JSON response.
1165
+ The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
1166
+ */
1167
+ toJsonResponse(init?: ResponseInit): Response;
1168
+ }
1169
+
1170
+ /**
1171
+ Generate a text and call tools for a given prompt using a language model.
1172
+
1173
+ This function does not stream the output. If you want to stream the output, use `streamText` instead.
1174
+
1175
+ @param model - The language model to use.
1176
+
1177
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
1178
+ @param toolChoice - The tool choice strategy. Default: 'auto'.
1179
+
1180
+ @param system - A system message that will be part of the prompt.
1181
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
1182
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
1183
+
1184
+ @param maxTokens - Maximum number of tokens to generate.
1185
+ @param temperature - Temperature setting.
1186
+ The value is passed through to the provider. The range depends on the provider and model.
1187
+ It is recommended to set either `temperature` or `topP`, but not both.
1188
+ @param topP - Nucleus sampling.
1189
+ The value is passed through to the provider. The range depends on the provider and model.
1190
+ It is recommended to set either `temperature` or `topP`, but not both.
1191
+ @param topK - Only sample from the top K options for each subsequent token.
1192
+ Used to remove "long tail" low probability responses.
1193
+ Recommended for advanced use cases only. You usually only need to use temperature.
1194
+ @param presencePenalty - Presence penalty setting.
1195
+ It affects the likelihood of the model to repeat information that is already in the prompt.
1196
+ The value is passed through to the provider. The range depends on the provider and model.
1197
+ @param frequencyPenalty - Frequency penalty setting.
1198
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
1199
+ The value is passed through to the provider. The range depends on the provider and model.
1200
+ @param stopSequences - Stop sequences.
1201
+ If set, the model will stop generating text when one of the stop sequences is generated.
1202
+ @param seed - The seed (integer) to use for random sampling.
1203
+ If set and supported by the model, calls will generate deterministic results.
1204
+
1205
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
1206
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
1207
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
1208
+
1209
+ @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
1210
+ @param experimental_generateMessageId - Generate a unique ID for each message.
1211
+
1212
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1213
+
1214
+ @returns
1215
+ A result object that contains the generated text, the results of the tool calls, and additional information.
1216
+ */
1217
+ export declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({
1218
+ model,
1219
+ tools,
1220
+ toolChoice,
1221
+ system,
1222
+ prompt,
1223
+ messages,
1224
+ maxRetries: maxRetriesArg,
1225
+ abortSignal,
1226
+ headers,
1227
+ maxSteps,
1228
+ experimental_generateMessageId: generateMessageId,
1229
+ experimental_output: output,
1230
+ experimental_continueSteps: continueSteps,
1231
+ experimental_telemetry: telemetry,
1232
+ experimental_providerMetadata,
1233
+ providerOptions,
1234
+ experimental_activeTools: activeTools,
1235
+ experimental_prepareStep: prepareStep,
1236
+ experimental_repairToolCall: repairToolCall,
1237
+ _internal: { generateId, currentDate },
1238
+ onStepFinish,
1239
+ ...settings
1240
+ }: CallSettings &
1241
+ Prompt & {
1242
+ /**
1243
+ The language model to use.
1244
+ */
1245
+ model: LanguageModel;
1246
+ /**
1247
+ The tools that the model can call. The model needs to support calling tools.
1248
+ */
1249
+ tools?: TOOLS;
1250
+ /**
1251
+ The tool choice strategy. Default: 'auto'.
1252
+ */
1253
+ toolChoice?: ToolChoice<TOOLS>;
1254
+ /**
1255
+ Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
1256
+
1257
+ A maximum number is required to prevent infinite loops in the case of misconfigured tools.
1258
+
1259
+ By default, it's set to 1, which means that only a single LLM call is made.
1260
+ */
1261
+ maxSteps?: number;
1262
+ /**
1263
+ Generate a unique ID for each message.
1264
+ */
1265
+ experimental_generateMessageId?: IDGenerator;
1266
+ /**
1267
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1268
+
1269
+ By default, it's set to false.
1270
+ */
1271
+ experimental_continueSteps?: boolean;
1272
+ /**
1273
+ Optional telemetry configuration (experimental).
1274
+ */
1275
+ experimental_telemetry?: TelemetrySettings;
1276
+ /**
1277
+ Additional provider-specific options. They are passed through
1278
+ to the provider from the AI SDK and enable provider-specific
1279
+ functionality that can be fully encapsulated in the provider.
1280
+ */
1281
+ providerOptions?: ProviderOptions;
1282
+ /**
1283
+ @deprecated Use `providerOptions` instead.
1284
+ */
1285
+ experimental_providerMetadata?: ProviderMetadata;
1286
+ /**
1287
+ Limits the tools that are available for the model to call without
1288
+ changing the tool call and result types in the result.
1289
+ */
1290
+ experimental_activeTools?: Array<keyof TOOLS>;
1291
+ /**
1292
+ Optional specification for parsing structured outputs from the LLM response.
1293
+ */
1294
+ experimental_output?: Output_2<OUTPUT, OUTPUT_PARTIAL>;
1295
+ /**
1296
+ Optional function that you can use to provide different settings for a step.
1297
+
1298
+ @param options - The options for the step.
1299
+ @param options.steps - The steps that have been executed so far.
1300
+ @param options.stepNumber - The number of the step that is being executed.
1301
+ @param options.maxSteps - The maximum number of steps.
1302
+ @param options.model - The model that is being used.
1303
+
1304
+ @returns An object that contains the settings for the step.
1305
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
1306
+ */
1307
+ experimental_prepareStep?: (options: {
1308
+ steps: Array<StepResult<TOOLS>>;
1309
+ stepNumber: number;
1310
+ maxSteps: number;
1311
+ model: LanguageModel;
1312
+ }) => PromiseLike<
1313
+ | {
1314
+ model?: LanguageModel;
1315
+ toolChoice?: ToolChoice<TOOLS>;
1316
+ experimental_activeTools?: Array<keyof TOOLS>;
1317
+ }
1318
+ | undefined
1319
+ >;
1320
+ /**
1321
+ A function that attempts to repair a tool call that failed to parse.
1322
+ */
1323
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
1324
+ /**
1325
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1326
+ */
1327
+ onStepFinish?: GenerateTextOnStepFinishCallback<TOOLS>;
1328
+ /**
1329
+ * Internal. For test use only. May change without notice.
1330
+ */
1331
+ _internal?: {
1332
+ generateId?: IDGenerator;
1333
+ currentDate?: () => Date;
1334
+ };
1335
+ }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1336
+
1337
+ /**
1338
+ Callback that is set using the `onStepFinish` option.
1339
+
1340
+ @param stepResult - The result of the step.
1341
+ */
1342
+ export declare type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (
1343
+ stepResult: StepResult<TOOLS>,
1344
+ ) => Promise<void> | void;
1345
+
1346
+ /**
1347
+ The result of a `generateText` call.
1348
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
1349
+ */
1350
+ export declare interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
1351
+ /**
1352
+ The generated text.
1353
+ */
1354
+ readonly text: string;
1355
+ /**
1356
+ The reasoning text that the model has generated. Can be undefined if the model
1357
+ has only generated text.
1358
+ */
1359
+ readonly reasoning: string | undefined;
1360
+ /**
1361
+ The files that were generated. Empty array if no files were generated.
1362
+ */
1363
+ readonly files: Array<GeneratedFile>;
1364
+ /**
1365
+ The full reasoning that the model has generated.
1366
+ */
1367
+ readonly reasoningDetails: Array<ReasoningDetail>;
1368
+ /**
1369
+ Sources that have been used as input to generate the response.
1370
+ For multi-step generation, the sources are accumulated from all steps.
1371
+ */
1372
+ readonly sources: Source[];
1373
+ /**
1374
+ The generated structured output. It uses the `experimental_output` specification.
1375
+ */
1376
+ readonly experimental_output: OUTPUT;
1377
+ /**
1378
+ The tool calls that were made during the generation.
1379
+ */
1380
+ readonly toolCalls: ToolCallArray<TOOLS>;
1381
+ /**
1382
+ The results of the tool calls.
1383
+ */
1384
+ readonly toolResults: ToolResultArray<TOOLS>;
1385
+ /**
1386
+ The reason why the generation finished.
1387
+ */
1388
+ readonly finishReason: FinishReason;
1389
+ /**
1390
+ The token usage of the generated text.
1391
+ */
1392
+ readonly usage: LanguageModelUsage;
1393
+ /**
1394
+ Warnings from the model provider (e.g. unsupported settings)
1395
+ */
1396
+ readonly warnings: CallWarning[] | undefined;
1397
+ /**
1398
+ Details for all steps.
1399
+ You can use this to get information about intermediate steps,
1400
+ such as the tool calls or the response headers.
1401
+ */
1402
+ readonly steps: Array<StepResult<TOOLS>>;
1403
+ /**
1404
+ Additional request information.
1405
+ */
1406
+ readonly request: LanguageModelRequestMetadata;
1407
+ /**
1408
+ Additional response information.
1409
+ */
1410
+ readonly response: LanguageModelResponseMetadata & {
1411
+ /**
1412
+ The response messages that were generated during the call. It consists of an assistant message,
1413
+ potentially containing tool calls.
1414
+
1415
+ When there are tool results, there is an additional tool message with the tool results that are available.
1416
+ If there are tools that do not have execute functions, they are not included in the tool results and
1417
+ need to be added separately.
1418
+ */
1419
+ messages: Array<ResponseMessage>;
1420
+ /**
1421
+ Response body (available only for providers that use HTTP requests).
1422
+ */
1423
+ body?: unknown;
1424
+ };
1425
+ /**
1426
+ Logprobs for the completion.
1427
+ `undefined` if the mode does not support logprobs or if it was not enabled.
1428
+
1429
+ @deprecated Will become a provider extension in the future.
1430
+ */
1431
+ readonly logprobs: LogProbs | undefined;
1432
+ /**
1433
+ Additional provider-specific metadata. They are passed through
1434
+ from the provider to the AI SDK and enable provider-specific
1435
+ results that can be fully encapsulated in the provider.
1436
+ */
1437
+ readonly providerMetadata: ProviderMetadata | undefined;
1438
+ /**
1439
+ @deprecated Use `providerMetadata` instead.
1440
+ */
1441
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
1442
+ }
1443
+
1444
+ /**
1445
+ * Defines High-Resolution Time.
1446
+ *
1447
+ * The first number, HrTime[0], is UNIX Epoch time in seconds since 00:00:00 UTC on 1 January 1970.
1448
+ * The second number, HrTime[1], represents the partial second elapsed since Unix Epoch time represented by first number in nanoseconds.
1449
+ * For example, 2021-01-01T12:30:10.150Z in UNIX Epoch time in milliseconds is represented as 1609504210150.
1450
+ * The first number is calculated by converting and truncating the Epoch time in milliseconds to seconds:
1451
+ * HrTime[0] = Math.trunc(1609504210150 / 1000) = 1609504210.
1452
+ * The second number is calculated by converting the digits after the decimal point of the subtraction, (1609504210150 / 1000) - HrTime[0], to nanoseconds:
1453
+ * HrTime[1] = Number((1609504210.150 - HrTime[0]).toFixed(9)) * 1e9 = 150000000.
1454
+ * This is represented in HrTime format as [1609504210, 150000000].
1455
+ */
1456
+ declare type HrTime = [number, number];
1457
+
1458
+ /**
1459
+ A function that generates an ID.
1460
+ */
1461
+ declare type IDGenerator = () => string;
1462
+
1463
+ export declare type IdGenerator = () => string;
1464
+
1465
+ /**
1466
+ Image content part of a prompt. It contains an image.
1467
+ */
1468
+ export declare interface ImagePart {
1469
+ type: 'image';
1470
+ /**
1471
+ Image data. Can either be:
1472
+
1473
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
1474
+ - URL: a URL that points to the image
1475
+ */
1476
+ image: DataContent | URL;
1477
+ /**
1478
+ Optional mime type of the image.
1479
+ */
1480
+ mimeType?: string;
1481
+ /**
1482
+ Additional provider-specific metadata. They are passed through
1483
+ to the provider from the AI SDK and enable provider-specific
1484
+ functionality that can be fully encapsulated in the provider.
1485
+ */
1486
+ providerOptions?: ProviderOptions;
1487
+ /**
1488
+ @deprecated Use `providerOptions` instead.
1489
+ */
1490
+ experimental_providerMetadata?: ProviderMetadata;
1491
+ }
1492
+
1493
+ declare type inferParameters<PARAMETERS extends ToolParameters> =
1494
+ PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
1495
+
1496
+ declare class InvalidToolArgumentsError extends AISDKError {
1497
+ private readonly [symbol$f];
1498
+ readonly toolName: string;
1499
+ readonly toolArgs: string;
1500
+ constructor({
1501
+ toolArgs,
1502
+ toolName,
1503
+ cause,
1504
+ message,
1505
+ }: {
1506
+ message?: string;
1507
+ toolArgs: string;
1508
+ toolName: string;
1509
+ cause: unknown;
1510
+ });
1511
+ static isInstance(error: unknown): error is InvalidToolArgumentsError;
1512
+ }
1513
+
1514
+ declare type JSONArray = JSONValue[];
1515
+
1516
+ declare type JSONObject = {
1517
+ [key: string]: JSONValue;
1518
+ };
1519
+
1520
+ declare class JSONParseError extends AISDKError {
1521
+ private readonly [symbol$7];
1522
+ readonly text: string;
1523
+ constructor({ text, cause }: { text: string; cause: unknown });
1524
+ static isInstance(error: unknown): error is JSONParseError;
1525
+ }
1526
+
1527
+ /**
1528
+ * Create a schema using a JSON Schema.
1529
+ *
1530
+ * @param jsonSchema The JSON Schema for the schema.
1531
+ * @param options.validate Optional. A validation function for the schema.
1532
+ */
1533
+ export declare function jsonSchema<OBJECT = unknown>(
1534
+ jsonSchema: JSONSchema7,
1535
+ {
1536
+ validate,
1537
+ }?: {
1538
+ validate?: (value: unknown) =>
1539
+ | {
1540
+ success: true;
1541
+ value: OBJECT;
1542
+ }
1543
+ | {
1544
+ success: false;
1545
+ error: Error;
1546
+ };
1547
+ },
1548
+ ): Schema<OBJECT>;
1549
+
1550
+ declare type JSONValue = null | string | number | boolean | JSONObject | JSONArray;
1551
+
1552
+ /**
1553
+ A JSON value can be a string, number, boolean, object, array, or null.
1554
+ JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
1555
+ */
1556
+ declare type JSONValue_2 =
1557
+ | null
1558
+ | string
1559
+ | number
1560
+ | boolean
1561
+ | {
1562
+ [value: string]: JSONValue_2;
1563
+ }
1564
+ | Array<JSONValue_2>;
1565
+
1566
+ /**
1567
+ Language model that is used by the AI SDK Core functions.
1568
+ */
1569
+ export declare type LanguageModel = LanguageModelV1;
1570
+
1571
+ export declare type LanguageModelRequestMetadata = {
1572
+ /**
1573
+ Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
1574
+ */
1575
+ body?: string;
1576
+ };
1577
+
1578
+ declare type LanguageModelResponseMetadata = {
1579
+ /**
1580
+ ID for the generated response.
1581
+ */
1582
+ id: string;
1583
+ /**
1584
+ Timestamp for the start of the generated response.
1585
+ */
1586
+ timestamp: Date;
1587
+ /**
1588
+ The ID of the response model that was used to generate the response.
1589
+ */
1590
+ modelId: string;
1591
+ /**
1592
+ Response headers (available only for providers that use HTTP requests).
1593
+ */
1594
+ headers?: Record<string, string>;
1595
+ };
1596
+
1597
+ /**
1598
+ Represents the number of tokens used in a prompt and completion.
1599
+ */
1600
+ declare type LanguageModelUsage = {
1601
+ /**
1602
+ The number of tokens used in the prompt.
1603
+ */
1604
+ promptTokens: number;
1605
+ /**
1606
+ The number of tokens used in the completion.
1607
+ */
1608
+ completionTokens: number;
1609
+ /**
1610
+ The total number of tokens used (promptTokens + completionTokens).
1611
+ */
1612
+ totalTokens: number;
1613
+ };
1614
+
1615
+ /**
1616
+ Specification for a language model that implements the language model interface version 1.
1617
+ */
1618
+ export declare type LanguageModelV1 = {
1619
+ /**
1620
+ The language model must specify which language model interface
1621
+ version it implements. This will allow us to evolve the language
1622
+ model interface and retain backwards compatibility. The different
1623
+ implementation versions can be handled as a discriminated union
1624
+ on our side.
1625
+ */
1626
+ readonly specificationVersion: 'v1';
1627
+ /**
1628
+ Name of the provider for logging purposes.
1629
+ */
1630
+ readonly provider: string;
1631
+ /**
1632
+ Provider-specific model ID for logging purposes.
1633
+ */
1634
+ readonly modelId: string;
1635
+ /**
1636
+ Default object generation mode that should be used with this model when
1637
+ no mode is specified. Should be the mode with the best results for this
1638
+ model. `undefined` can be returned if object generation is not supported.
1639
+
1640
+ This is needed to generate the best objects possible w/o requiring the
1641
+ user to explicitly specify the object generation mode.
1642
+ */
1643
+ readonly defaultObjectGenerationMode: LanguageModelV1ObjectGenerationMode;
1644
+ /**
1645
+ Flag whether this model supports image URLs. Default is `true`.
1646
+
1647
+ When the flag is set to `false`, the AI SDK will download the image and
1648
+ pass the image data to the model.
1649
+ */
1650
+ readonly supportsImageUrls?: boolean;
1651
+ /**
1652
+ Flag whether this model supports grammar-guided generation,
1653
+ i.e. follows JSON schemas for object generation
1654
+ when the response format is set to 'json' or
1655
+ when the `object-json` mode is used.
1656
+
1657
+ This means that the model guarantees that the generated JSON
1658
+ will be a valid JSON object AND that the object will match the
1659
+ JSON schema.
1660
+
1661
+ Please note that `generateObject` and `streamObject` will work
1662
+ regardless of this flag, but might send different prompts and
1663
+ use further optimizations if this flag is set to `true`.
1664
+
1665
+ Defaults to `false`.
1666
+ */
1667
+ readonly supportsStructuredOutputs?: boolean;
1668
+ /**
1669
+ Checks if the model supports the given URL for file parts natively.
1670
+ If the model does not support the URL,
1671
+ the AI SDK will download the file and pass the file data to the model.
1672
+
1673
+ When undefined, the AI SDK will download the file.
1674
+ */
1675
+ supportsUrl?(url: URL): boolean;
1676
+ /**
1677
+ Generates a language model output (non-streaming).
1678
+
1679
+ Naming: "do" prefix to prevent accidental direct usage of the method
1680
+ by the user.
1681
+ */
1682
+ doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
1683
+ /**
1684
+ Text that the model has generated.
1685
+ Can be undefined if the model did not generate any text.
1686
+ */
1687
+ text?: string;
1688
+ /**
1689
+ Reasoning that the model has generated.
1690
+ Can be undefined if the model does not support reasoning.
1691
+ */
1692
+ reasoning?:
1693
+ | string
1694
+ | Array<
1695
+ | {
1696
+ type: 'text';
1697
+ text: string;
1698
+ /**
1699
+ An optional signature for verifying that the reasoning originated from the model.
1700
+ */
1701
+ signature?: string;
1702
+ }
1703
+ | {
1704
+ type: 'redacted';
1705
+ data: string;
1706
+ }
1707
+ >;
1708
+ /**
1709
+ Generated files as base64 encoded strings or binary data.
1710
+ The files should be returned without any unnecessary conversion.
1711
+ If the API returns base64 encoded strings, the files should be returned
1712
+ as base64 encoded strings. If the API returns binary data, the files should
1713
+ be returned as binary data.
1714
+ */
1715
+ files?: Array<{
1716
+ data: string | Uint8Array;
1717
+ mimeType: string;
1718
+ }>;
1719
+ /**
1720
+ Tool calls that the model has generated.
1721
+ Can be undefined if the model did not generate any tool calls.
1722
+ */
1723
+ toolCalls?: Array<LanguageModelV1FunctionToolCall>;
1724
+ /**
1725
+ Finish reason.
1726
+ */
1727
+ finishReason: LanguageModelV1FinishReason;
1728
+ /**
1729
+ Usage information.
1730
+ */
1731
+ usage: {
1732
+ promptTokens: number;
1733
+ completionTokens: number;
1734
+ };
1735
+ /**
1736
+ Raw prompt and setting information for observability provider integration.
1737
+ */
1738
+ rawCall: {
1739
+ /**
1740
+ Raw prompt after expansion and conversion to the format that the
1741
+ provider uses to send the information to their API.
1742
+ */
1743
+ rawPrompt: unknown;
1744
+ /**
1745
+ Raw settings that are used for the API call. Includes provider-specific
1746
+ settings.
1747
+ */
1748
+ rawSettings: Record<string, unknown>;
1749
+ };
1750
+ /**
1751
+ Optional response information for telemetry and debugging purposes.
1752
+ */
1753
+ rawResponse?: {
1754
+ /**
1755
+ Response headers.
1756
+ */
1757
+ headers?: Record<string, string>;
1758
+ /**
1759
+ Response body.
1760
+ */
1761
+ body?: unknown;
1762
+ };
1763
+ /**
1764
+ Optional request information for telemetry and debugging purposes.
1765
+ */
1766
+ request?: {
1767
+ /**
1768
+ Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
1769
+ Non-HTTP(s) providers should not set this.
1770
+ */
1771
+ body?: string;
1772
+ };
1773
+ /**
1774
+ Optional response information for telemetry and debugging purposes.
1775
+ */
1776
+ response?: {
1777
+ /**
1778
+ ID for the generated response, if the provider sends one.
1779
+ */
1780
+ id?: string;
1781
+ /**
1782
+ Timestamp for the start of the generated response, if the provider sends one.
1783
+ */
1784
+ timestamp?: Date;
1785
+ /**
1786
+ The ID of the response model that was used to generate the response, if the provider sends one.
1787
+ */
1788
+ modelId?: string;
1789
+ };
1790
+ warnings?: LanguageModelV1CallWarning[];
1791
+ /**
1792
+ Additional provider-specific metadata. They are passed through
1793
+ from the provider to the AI SDK and enable provider-specific
1794
+ results that can be fully encapsulated in the provider.
1795
+ */
1796
+ providerMetadata?: LanguageModelV1ProviderMetadata;
1797
+ /**
1798
+ Sources that have been used as input to generate the response.
1799
+ */
1800
+ sources?: LanguageModelV1Source[];
1801
+ /**
1802
+ Logprobs for the completion.
1803
+ `undefined` if the mode does not support logprobs or if was not enabled
1804
+
1805
+ @deprecated will be changed into a provider-specific extension in v2
1806
+ */
1807
+ logprobs?: LanguageModelV1LogProbs;
1808
+ }>;
1809
+ /**
1810
+ Generates a language model output (streaming).
1811
+
1812
+ Naming: "do" prefix to prevent accidental direct usage of the method
1813
+ by the user.
1814
+ *
1815
+ @return A stream of higher-level language model output parts.
1816
+ */
1817
+ doStream(options: LanguageModelV1CallOptions): PromiseLike<{
1818
+ stream: ReadableStream<LanguageModelV1StreamPart>;
1819
+ /**
1820
+ Raw prompt and setting information for observability provider integration.
1821
+ */
1822
+ rawCall: {
1823
+ /**
1824
+ Raw prompt after expansion and conversion to the format that the
1825
+ provider uses to send the information to their API.
1826
+ */
1827
+ rawPrompt: unknown;
1828
+ /**
1829
+ Raw settings that are used for the API call. Includes provider-specific
1830
+ settings.
1831
+ */
1832
+ rawSettings: Record<string, unknown>;
1833
+ };
1834
+ /**
1835
+ Optional raw response data.
1836
+ */
1837
+ rawResponse?: {
1838
+ /**
1839
+ Response headers.
1840
+ */
1841
+ headers?: Record<string, string>;
1842
+ };
1843
+ /**
1844
+ Optional request information for telemetry and debugging purposes.
1845
+ */
1846
+ request?: {
1847
+ /**
1848
+ Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
1849
+ Non-HTTP(s) providers should not set this.
1850
+ */
1851
+ body?: string;
1852
+ };
1853
+ /**
1854
+ Warnings for the call, e.g. unsupported settings.
1855
+ */
1856
+ warnings?: Array<LanguageModelV1CallWarning>;
1857
+ }>;
1858
+ };
1859
+
1860
+ declare type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
1861
+ /**
1862
+ Whether the user provided the input as messages or as
1863
+ a prompt. This can help guide non-chat models in the
1864
+ expansion, bc different expansions can be needed for
1865
+ chat/non-chat use cases.
1866
+ */
1867
+ inputFormat: 'messages' | 'prompt';
1868
+ /**
1869
+ The mode affects the behavior of the language model. It is required to
1870
+ support provider-independent streaming and generation of structured objects.
1871
+ The model can take this information and e.g. configure json mode, the correct
1872
+ low level grammar, etc. It can also be used to optimize the efficiency of the
1873
+ streaming, e.g. tool-delta stream parts are only needed in the
1874
+ object-tool mode.
1875
+
1876
+ @deprecated mode will be removed in v2.
1877
+ All necessary settings will be directly supported through the call settings,
1878
+ in particular responseFormat, toolChoice, and tools.
1879
+ */
1880
+ mode:
1881
+ | {
1882
+ type: 'regular';
1883
+ /**
1884
+ The tools that are available for the model.
1885
+ */
1886
+ tools?: Array<LanguageModelV1FunctionTool | LanguageModelV1ProviderDefinedTool>;
1887
+ /**
1888
+ Specifies how the tool should be selected. Defaults to 'auto'.
1889
+ */
1890
+ toolChoice?: LanguageModelV1ToolChoice;
1891
+ }
1892
+ | {
1893
+ type: 'object-json';
1894
+ /**
1895
+ * JSON schema that the generated output should conform to.
1896
+ */
1897
+ schema?: JSONSchema7;
1898
+ /**
1899
+ * Name of output that should be generated. Used by some providers for additional LLM guidance.
1900
+ */
1901
+ name?: string;
1902
+ /**
1903
+ * Description of the output that should be generated. Used by some providers for additional LLM guidance.
1904
+ */
1905
+ description?: string;
1906
+ }
1907
+ | {
1908
+ type: 'object-tool';
1909
+ tool: LanguageModelV1FunctionTool;
1910
+ };
1911
+ /**
1912
+ A language mode prompt is a standardized prompt type.
1913
+
1914
+ Note: This is **not** the user-facing prompt. The AI SDK methods will map the
1915
+ user-facing prompt types such as chat or instruction prompts to this format.
1916
+ That approach allows us to evolve the user facing prompts without breaking
1917
+ the language model interface.
1918
+ */
1919
+ prompt: LanguageModelV1Prompt;
1920
+ /**
1921
+ Additional provider-specific metadata.
1922
+ The metadata is passed through to the provider from the AI SDK and enables
1923
+ provider-specific functionality that can be fully encapsulated in the provider.
1924
+ */
1925
+ providerMetadata?: LanguageModelV1ProviderMetadata;
1926
+ };
1927
+
1928
+ declare type LanguageModelV1CallSettings = {
1929
+ /**
1930
+ Maximum number of tokens to generate.
1931
+ */
1932
+ maxTokens?: number;
1933
+ /**
1934
+ Temperature setting.
1935
+
1936
+ It is recommended to set either `temperature` or `topP`, but not both.
1937
+ */
1938
+ temperature?: number;
1939
+ /**
1940
+ Stop sequences.
1941
+ If set, the model will stop generating text when one of the stop sequences is generated.
1942
+ Providers may have limits on the number of stop sequences.
1943
+ */
1944
+ stopSequences?: string[];
1945
+ /**
1946
+ Nucleus sampling.
1947
+
1948
+ It is recommended to set either `temperature` or `topP`, but not both.
1949
+ */
1950
+ topP?: number;
1951
+ /**
1952
+ Only sample from the top K options for each subsequent token.
1953
+
1954
+ Used to remove "long tail" low probability responses.
1955
+ Recommended for advanced use cases only. You usually only need to use temperature.
1956
+ */
1957
+ topK?: number;
1958
+ /**
1959
+ Presence penalty setting. It affects the likelihood of the model to
1960
+ repeat information that is already in the prompt.
1961
+ */
1962
+ presencePenalty?: number;
1963
+ /**
1964
+ Frequency penalty setting. It affects the likelihood of the model
1965
+ to repeatedly use the same words or phrases.
1966
+ */
1967
+ frequencyPenalty?: number;
1968
+ /**
1969
+ Response format. The output can either be text or JSON. Default is text.
1970
+
1971
+ If JSON is selected, a schema can optionally be provided to guide the LLM.
1972
+ */
1973
+ responseFormat?:
1974
+ | {
1975
+ type: 'text';
1976
+ }
1977
+ | {
1978
+ type: 'json';
1979
+ /**
1980
+ * JSON schema that the generated output should conform to.
1981
+ */
1982
+ schema?: JSONSchema7;
1983
+ /**
1984
+ * Name of output that should be generated. Used by some providers for additional LLM guidance.
1985
+ */
1986
+ name?: string;
1987
+ /**
1988
+ * Description of the output that should be generated. Used by some providers for additional LLM guidance.
1989
+ */
1990
+ description?: string;
1991
+ };
1992
+ /**
1993
+ The seed (integer) to use for random sampling. If set and supported
1994
+ by the model, calls will generate deterministic results.
1995
+ */
1996
+ seed?: number;
1997
+ /**
1998
+ Abort signal for cancelling the operation.
1999
+ */
2000
+ abortSignal?: AbortSignal;
2001
+ /**
2002
+ Additional HTTP headers to be sent with the request.
2003
+ Only applicable for HTTP-based providers.
2004
+ */
2005
+ headers?: Record<string, string | undefined>;
2006
+ };
2007
+
2008
+ /**
2009
+ Warning from the model provider for this call. The call will proceed, but e.g.
2010
+ some settings might not be supported, which can lead to suboptimal results.
2011
+ */
2012
+ declare type LanguageModelV1CallWarning =
2013
+ | {
2014
+ type: 'unsupported-setting';
2015
+ setting: keyof LanguageModelV1CallSettings;
2016
+ details?: string;
2017
+ }
2018
+ | {
2019
+ type: 'unsupported-tool';
2020
+ tool: LanguageModelV1FunctionTool | LanguageModelV1ProviderDefinedTool;
2021
+ details?: string;
2022
+ }
2023
+ | {
2024
+ type: 'other';
2025
+ message: string;
2026
+ };
2027
+
2028
+ /**
2029
+ File content part of a prompt. It contains a file.
2030
+ */
2031
+ declare interface LanguageModelV1FilePart {
2032
+ type: 'file';
2033
+ /**
2034
+ * Optional filename of the file.
2035
+ */
2036
+ filename?: string;
2037
+ /**
2038
+ File data as base64 encoded string or as a URL.
2039
+ */
2040
+ data: string | URL;
2041
+ /**
2042
+ Mime type of the file.
2043
+ */
2044
+ mimeType: string;
2045
+ /**
2046
+ * Additional provider-specific metadata. They are passed through
2047
+ * to the provider from the AI SDK and enable provider-specific
2048
+ * functionality that can be fully encapsulated in the provider.
2049
+ */
2050
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2051
+ }
2052
+
2053
+ /**
2054
+ Reason why a language model finished generating a response.
2055
+
2056
+ Can be one of the following:
2057
+ - `stop`: model generated stop sequence
2058
+ - `length`: model generated maximum number of tokens
2059
+ - `content-filter`: content filter violation stopped the model
2060
+ - `tool-calls`: model triggered tool calls
2061
+ - `error`: model stopped because of an error
2062
+ - `other`: model stopped for other reasons
2063
+ - `unknown`: the model has not transmitted a finish reason
2064
+ */
2065
+ declare type LanguageModelV1FinishReason =
2066
+ | 'stop'
2067
+ | 'length'
2068
+ | 'content-filter'
2069
+ | 'tool-calls'
2070
+ | 'error'
2071
+ | 'other'
2072
+ | 'unknown';
2073
+
2074
+ /**
2075
+ A tool has a name, a description, and a set of parameters.
2076
+
2077
+ Note: this is **not** the user-facing tool definition. The AI SDK methods will
2078
+ map the user-facing tool definitions to this format.
2079
+ */
2080
+ declare type LanguageModelV1FunctionTool = {
2081
+ /**
2082
+ The type of the tool (always 'function').
2083
+ */
2084
+ type: 'function';
2085
+ /**
2086
+ The name of the tool. Unique within this model call.
2087
+ */
2088
+ name: string;
2089
+ /**
2090
+ A description of the tool. The language model uses this to understand the
2091
+ tool's purpose and to provide better completion suggestions.
2092
+ */
2093
+ description?: string;
2094
+ /**
2095
+ The parameters that the tool expects. The language model uses this to
2096
+ understand the tool's input requirements and to provide matching suggestions.
2097
+ */
2098
+ parameters: JSONSchema7;
2099
+ };
2100
+
2101
+ declare type LanguageModelV1FunctionToolCall = {
2102
+ toolCallType: 'function';
2103
+ toolCallId: string;
2104
+ toolName: string;
2105
+ /**
2106
+ Stringified JSON object with the tool call arguments. Must match the
2107
+ parameters schema of the tool.
2108
+ */
2109
+ args: string;
2110
+ };
2111
+
2112
+ /**
2113
+ Image content part of a prompt. It contains an image.
2114
+ */
2115
+ declare interface LanguageModelV1ImagePart {
2116
+ type: 'image';
2117
+ /**
2118
+ Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
2119
+ */
2120
+ image: Uint8Array | URL;
2121
+ /**
2122
+ Optional mime type of the image.
2123
+ */
2124
+ mimeType?: string;
2125
+ /**
2126
+ * Additional provider-specific metadata. They are passed through
2127
+ * to the provider from the AI SDK and enable provider-specific
2128
+ * functionality that can be fully encapsulated in the provider.
2129
+ */
2130
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2131
+ }
2132
+
2133
+ /**
2134
+ Log probabilities for each token and its top log probabilities.
2135
+ */
2136
+ export declare type LanguageModelV1LogProbs = Array<{
2137
+ token: string;
2138
+ logprob: number;
2139
+ topLogprobs: Array<{
2140
+ token: string;
2141
+ logprob: number;
2142
+ }>;
2143
+ }>;
2144
+
2145
+ export declare type LanguageModelV1Message = (
2146
+ | {
2147
+ role: 'system';
2148
+ content: string;
2149
+ }
2150
+ | {
2151
+ role: 'user';
2152
+ content: Array<LanguageModelV1TextPart | LanguageModelV1ImagePart | LanguageModelV1FilePart>;
2153
+ }
2154
+ | {
2155
+ role: 'assistant';
2156
+ content: Array<
2157
+ | LanguageModelV1TextPart
2158
+ | LanguageModelV1FilePart
2159
+ | LanguageModelV1ReasoningPart
2160
+ | LanguageModelV1RedactedReasoningPart
2161
+ | LanguageModelV1ToolCallPart
2162
+ >;
2163
+ }
2164
+ | {
2165
+ role: 'tool';
2166
+ content: Array<LanguageModelV1ToolResultPart>;
2167
+ }
2168
+ ) & {
2169
+ /**
2170
+ * Additional provider-specific metadata. They are passed through
2171
+ * to the provider from the AI SDK and enable provider-specific
2172
+ * functionality that can be fully encapsulated in the provider.
2173
+ */
2174
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2175
+ };
2176
+
2177
+ /**
2178
+ The object generation modes available for use with a model. `undefined`
2179
+ represents no support for object generation.
2180
+ */
2181
+ declare type LanguageModelV1ObjectGenerationMode = 'json' | 'tool' | undefined;
2182
+
2183
+ /**
2184
+ A prompt is a list of messages.
2185
+
2186
+ Note: Not all models and prompt formats support multi-modal inputs and
2187
+ tool calls. The validation happens at runtime.
2188
+
2189
+ Note: This is not a user-facing prompt. The AI SDK methods will map the
2190
+ user-facing prompt types such as chat or instruction prompts to this format.
2191
+ */
2192
+ export declare type LanguageModelV1Prompt = Array<LanguageModelV1Message>;
2193
+
2194
+ /**
2195
+ The configuration of a tool that is defined by the provider.
2196
+ */
2197
+ declare type LanguageModelV1ProviderDefinedTool = {
2198
+ /**
2199
+ The type of the tool (always 'provider-defined').
2200
+ */
2201
+ type: 'provider-defined';
2202
+ /**
2203
+ The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
2204
+ */
2205
+ id: `${string}.${string}`;
2206
+ /**
2207
+ The name of the tool. Unique within this model call.
2208
+ */
2209
+ name: string;
2210
+ /**
2211
+ The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
2212
+ */
2213
+ args: Record<string, unknown>;
2214
+ };
2215
+
2216
+ /**
2217
+ * Additional provider-specific metadata. They are passed through
2218
+ * to the provider from the AI SDK and enable provider-specific
2219
+ * functionality that can be fully encapsulated in the provider.
2220
+ *
2221
+ * This enables us to quickly ship provider-specific functionality
2222
+ * without affecting the core AI SDK.
2223
+ *
2224
+ * The outer record is keyed by the provider name, and the inner
2225
+ * record is keyed by the provider-specific metadata key.
2226
+ *
2227
+ * ```ts
2228
+ * {
2229
+ * "anthropic": {
2230
+ * "cacheControl": { "type": "ephemeral" }
2231
+ * }
2232
+ * }
2233
+ * ```
2234
+ */
2235
+ declare type LanguageModelV1ProviderMetadata = Record<string, Record<string, JSONValue>>;
2236
+
2237
+ /**
2238
+ Reasoning content part of a prompt. It contains a string of reasoning text.
2239
+ */
2240
+ declare interface LanguageModelV1ReasoningPart {
2241
+ type: 'reasoning';
2242
+ /**
2243
+ The reasoning text.
2244
+ */
2245
+ text: string;
2246
+ /**
2247
+ An optional signature for verifying that the reasoning originated from the model.
2248
+ */
2249
+ signature?: string;
2250
+ /**
2251
+ Additional provider-specific metadata. They are passed through
2252
+ to the provider from the AI SDK and enable provider-specific
2253
+ functionality that can be fully encapsulated in the provider.
2254
+ */
2255
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2256
+ }
2257
+
2258
+ /**
2259
+ Redacted reasoning content part of a prompt.
2260
+ */
2261
+ declare interface LanguageModelV1RedactedReasoningPart {
2262
+ type: 'redacted-reasoning';
2263
+ /**
2264
+ Redacted reasoning data.
2265
+ */
2266
+ data: string;
2267
+ /**
2268
+ Additional provider-specific metadata. They are passed through
2269
+ to the provider from the AI SDK and enable provider-specific
2270
+ functionality that can be fully encapsulated in the provider.
2271
+ */
2272
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2273
+ }
2274
+
2275
+ /**
2276
+ * A source that has been used as input to generate the response.
2277
+ */
2278
+ declare type LanguageModelV1Source = {
2279
+ /**
2280
+ * A URL source. This is return by web search RAG models.
2281
+ */
2282
+ sourceType: 'url';
2283
+ /**
2284
+ * The ID of the source.
2285
+ */
2286
+ id: string;
2287
+ /**
2288
+ * The URL of the source.
2289
+ */
2290
+ url: string;
2291
+ /**
2292
+ * The title of the source.
2293
+ */
2294
+ title?: string;
2295
+ /**
2296
+ * Additional provider metadata for the source.
2297
+ */
2298
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2299
+ };
2300
+
2301
+ export declare type LanguageModelV1StreamPart =
2302
+ | {
2303
+ type: 'text-delta';
2304
+ textDelta: string;
2305
+ }
2306
+ | {
2307
+ type: 'reasoning';
2308
+ textDelta: string;
2309
+ }
2310
+ | {
2311
+ type: 'reasoning-signature';
2312
+ signature: string;
2313
+ }
2314
+ | {
2315
+ type: 'redacted-reasoning';
2316
+ data: string;
2317
+ }
2318
+ | {
2319
+ type: 'source';
2320
+ source: LanguageModelV1Source;
2321
+ }
2322
+ | {
2323
+ type: 'file';
2324
+ mimeType: string;
2325
+ /**
2326
+ Generated file data as base64 encoded strings or binary data.
2327
+ The file data should be returned without any unnecessary conversion.
2328
+ If the API returns base64 encoded strings, the file data should be returned
2329
+ as base64 encoded strings. If the API returns binary data, the file data should
2330
+ be returned as binary data.
2331
+ */
2332
+ data: string | Uint8Array;
2333
+ }
2334
+ | ({
2335
+ type: 'tool-call';
2336
+ } & LanguageModelV1FunctionToolCall)
2337
+ | {
2338
+ type: 'tool-call-delta';
2339
+ toolCallType: 'function';
2340
+ toolCallId: string;
2341
+ toolName: string;
2342
+ argsTextDelta: string;
2343
+ }
2344
+ | {
2345
+ type: 'response-metadata';
2346
+ id?: string;
2347
+ timestamp?: Date;
2348
+ modelId?: string;
2349
+ }
2350
+ | {
2351
+ type: 'finish';
2352
+ finishReason: LanguageModelV1FinishReason;
2353
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2354
+ usage: {
2355
+ promptTokens: number;
2356
+ completionTokens: number;
2357
+ };
2358
+ logprobs?: LanguageModelV1LogProbs;
2359
+ }
2360
+ | {
2361
+ type: 'error';
2362
+ error: unknown;
2363
+ };
2364
+
2365
+ /**
2366
+ Text content part of a prompt. It contains a string of text.
2367
+ */
2368
+ declare interface LanguageModelV1TextPart {
2369
+ type: 'text';
2370
+ /**
2371
+ The text content.
2372
+ */
2373
+ text: string;
2374
+ /**
2375
+ * Additional provider-specific metadata. They are passed through
2376
+ * to the provider from the AI SDK and enable provider-specific
2377
+ * functionality that can be fully encapsulated in the provider.
2378
+ */
2379
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2380
+ }
2381
+
2382
+ /**
2383
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
2384
+ */
2385
+ declare interface LanguageModelV1ToolCallPart {
2386
+ type: 'tool-call';
2387
+ /**
2388
+ ID of the tool call. This ID is used to match the tool call with the tool result.
2389
+ */
2390
+ toolCallId: string;
2391
+ /**
2392
+ Name of the tool that is being called.
2393
+ */
2394
+ toolName: string;
2395
+ /**
2396
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
2397
+ */
2398
+ args: unknown;
2399
+ /**
2400
+ * Additional provider-specific metadata. They are passed through
2401
+ * to the provider from the AI SDK and enable provider-specific
2402
+ * functionality that can be fully encapsulated in the provider.
2403
+ */
2404
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2405
+ }
2406
+
2407
+ declare type LanguageModelV1ToolChoice =
2408
+ | {
2409
+ type: 'auto';
2410
+ }
2411
+ | {
2412
+ type: 'none';
2413
+ }
2414
+ | {
2415
+ type: 'required';
2416
+ }
2417
+ | {
2418
+ type: 'tool';
2419
+ toolName: string;
2420
+ };
2421
+
2422
+ /**
2423
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
2424
+ */
2425
+ declare interface LanguageModelV1ToolResultPart {
2426
+ type: 'tool-result';
2427
+ /**
2428
+ ID of the tool call that this result is associated with.
2429
+ */
2430
+ toolCallId: string;
2431
+ /**
2432
+ Name of the tool that generated this result.
2433
+ */
2434
+ toolName: string;
2435
+ /**
2436
+ Result of the tool call. This is a JSON-serializable object.
2437
+ */
2438
+ result: unknown;
2439
+ /**
2440
+ Optional flag if the result is an error or an error message.
2441
+ */
2442
+ isError?: boolean;
2443
+ /**
2444
+ Tool results as an array of parts. This enables advanced tool results including images.
2445
+ When this is used, the `result` field should be ignored (if the provider supports content).
2446
+ */
2447
+ content?: Array<
2448
+ | {
2449
+ type: 'text';
2450
+ /**
2451
+ Text content.
2452
+ */
2453
+ text: string;
2454
+ }
2455
+ | {
2456
+ type: 'image';
2457
+ /**
2458
+ base-64 encoded image data
2459
+ */
2460
+ data: string;
2461
+ /**
2462
+ Mime type of the image.
2463
+ */
2464
+ mimeType?: string;
2465
+ }
2466
+ >;
2467
+ /**
2468
+ * Additional provider-specific metadata. They are passed through
2469
+ * to the provider from the AI SDK and enable provider-specific
2470
+ * functionality that can be fully encapsulated in the provider.
2471
+ */
2472
+ providerMetadata?: LanguageModelV1ProviderMetadata;
2473
+ }
2474
+
2475
+ /**
2476
+ * A pointer from the current {@link Span} to another span in the same trace or
2477
+ * in a different trace.
2478
+ * Few examples of Link usage.
2479
+ * 1. Batch Processing: A batch of elements may contain elements associated
2480
+ * with one or more traces/spans. Since there can only be one parent
2481
+ * SpanContext, Link is used to keep reference to SpanContext of all
2482
+ * elements in the batch.
2483
+ * 2. Public Endpoint: A SpanContext in incoming client request on a public
2484
+ * endpoint is untrusted from service provider perspective. In such case it
2485
+ * is advisable to start a new trace with appropriate sampling decision.
2486
+ * However, it is desirable to associate incoming SpanContext to new trace
2487
+ * initiated on service provider side so two traces (from Client and from
2488
+ * Service Provider) can be correlated.
2489
+ */
2490
+ declare interface Link {
2491
+ /** The {@link SpanContext} of a linked span. */
2492
+ context: SpanContext;
2493
+ /** A set of {@link SpanAttributes} on the link. */
2494
+ attributes?: SpanAttributes;
2495
+ /** Count of attributes of the link that were dropped due to collection limits */
2496
+ droppedAttributesCount?: number;
2497
+ }
2498
+
2499
+ /**
2500
+ Log probabilities for each token and its top log probabilities.
2501
+
2502
+ @deprecated Will become a provider extension in the future.
2503
+ */
2504
+ declare type LogProbs = LanguageModelV1LogProbs;
2505
+
2506
+ /**
2507
+ * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
2508
+ */
2509
+ export declare interface Message {
2510
+ /**
2511
+ A unique identifier for the message.
2512
+ */
2513
+ id: string;
2514
+ /**
2515
+ The timestamp of the message.
2516
+ */
2517
+ createdAt?: Date;
2518
+ /**
2519
+ Text content of the message. Use parts when possible.
2520
+ */
2521
+ content: string;
2522
+ /**
2523
+ Reasoning for the message.
2524
+
2525
+ @deprecated Use `parts` instead.
2526
+ */
2527
+ reasoning?: string;
2528
+ /**
2529
+ * Additional attachments to be sent along with the message.
2530
+ */
2531
+ experimental_attachments?: Attachment[];
2532
+ /**
2533
+ The 'data' role is deprecated.
2534
+ */
2535
+ role: 'system' | 'user' | 'assistant' | 'data';
2536
+ /**
2537
+ For data messages.
2538
+
2539
+ @deprecated Data messages will be removed.
2540
+ */
2541
+ data?: JSONValue_2;
2542
+ /**
2543
+ * Additional message-specific information added on the server via StreamData
2544
+ */
2545
+ annotations?: JSONValue_2[] | undefined;
2546
+ /**
2547
+ Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
2548
+ that the assistant made as part of this message.
2549
+
2550
+ @deprecated Use `parts` instead.
2551
+ */
2552
+ toolInvocations?: Array<ToolInvocation>;
2553
+ /**
2554
+ * The parts of the message. Use this for rendering the message in the UI.
2555
+ *
2556
+ * Assistant messages can have text, reasoning and tool invocation parts.
2557
+ * User messages can have text parts.
2558
+ */
2559
+ parts?: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
2560
+ }
2561
+
2562
+ export declare class MockLanguageModelV1 implements LanguageModelV1 {
2563
+ readonly specificationVersion = 'v1';
2564
+ readonly provider: LanguageModelV1['provider'];
2565
+ readonly modelId: LanguageModelV1['modelId'];
2566
+ supportsUrl: LanguageModelV1['supportsUrl'];
2567
+ doGenerate: LanguageModelV1['doGenerate'];
2568
+ doStream: LanguageModelV1['doStream'];
2569
+ readonly defaultObjectGenerationMode: LanguageModelV1['defaultObjectGenerationMode'];
2570
+ readonly supportsStructuredOutputs: LanguageModelV1['supportsStructuredOutputs'];
2571
+ constructor({
2572
+ provider,
2573
+ modelId,
2574
+ supportsUrl,
2575
+ doGenerate,
2576
+ doStream,
2577
+ defaultObjectGenerationMode,
2578
+ supportsStructuredOutputs,
2579
+ }?: {
2580
+ provider?: LanguageModelV1['provider'];
2581
+ modelId?: LanguageModelV1['modelId'];
2582
+ supportsUrl?: LanguageModelV1['supportsUrl'];
2583
+ doGenerate?: LanguageModelV1['doGenerate'];
2584
+ doStream?: LanguageModelV1['doStream'];
2585
+ defaultObjectGenerationMode?: LanguageModelV1['defaultObjectGenerationMode'];
2586
+ supportsStructuredOutputs?: LanguageModelV1['supportsStructuredOutputs'];
2587
+ });
2588
+ }
2589
+
2590
+ declare class NoSuchToolError extends AISDKError {
2591
+ private readonly [symbol$e];
2592
+ readonly toolName: string;
2593
+ readonly availableTools: string[] | undefined;
2594
+ constructor({
2595
+ toolName,
2596
+ availableTools,
2597
+ message,
2598
+ }: {
2599
+ toolName: string;
2600
+ availableTools?: string[] | undefined;
2601
+ message?: string;
2602
+ });
2603
+ static isInstance(error: unknown): error is NoSuchToolError;
2604
+ }
2605
+
2606
+ declare type ObjectStreamPart<PARTIAL> =
2607
+ | {
2608
+ type: 'object';
2609
+ object: PARTIAL;
2610
+ }
2611
+ | {
2612
+ type: 'text-delta';
2613
+ textDelta: string;
2614
+ }
2615
+ | {
2616
+ type: 'error';
2617
+ error: unknown;
2618
+ }
2619
+ | {
2620
+ type: 'finish';
2621
+ finishReason: FinishReason;
2622
+ logprobs?: LogProbs;
2623
+ usage: LanguageModelUsage;
2624
+ response: LanguageModelResponseMetadata;
2625
+ providerMetadata?: ProviderMetadata;
2626
+ };
2627
+
2628
+ export declare namespace Output {
2629
+ export { output_Output as Output, output_object as object, output_text as text };
2630
+ }
2631
+
2632
+ declare interface Output_2<OUTPUT, PARTIAL> {
2633
+ readonly type: 'object' | 'text';
2634
+ injectIntoSystemPrompt(options: { system: string | undefined; model: LanguageModel }): string | undefined;
2635
+ responseFormat: (options: { model: LanguageModel }) => LanguageModelV1CallOptions['responseFormat'];
2636
+ parsePartial(options: { text: string }):
2637
+ | {
2638
+ partial: PARTIAL;
2639
+ }
2640
+ | undefined;
2641
+ parseOutput(
2642
+ options: {
2643
+ text: string;
2644
+ },
2645
+ context: {
2646
+ response: LanguageModelResponseMetadata;
2647
+ usage: LanguageModelUsage;
2648
+ finishReason: FinishReason;
2649
+ },
2650
+ ): OUTPUT;
2651
+ }
2652
+
2653
+ declare type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
2654
+
2655
+ declare type PartialObject<ObjectType extends object> = {
2656
+ [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
2657
+ };
2658
+
2659
+ declare type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<
2660
+ DeepPartialInternal<KeyType>,
2661
+ DeepPartialInternal<ValueType>
2662
+ >;
2663
+
2664
+ declare type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
2665
+
2666
+ declare type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
2667
+
2668
+ /**
2669
+ Prompt part of the AI function options.
2670
+ It contains a system message, a simple text prompt, or a list of messages.
2671
+ */
2672
+ declare type Prompt = {
2673
+ /**
2674
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
2675
+ */
2676
+ system?: string;
2677
+ /**
2678
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
2679
+ */
2680
+ prompt?: string;
2681
+ /**
2682
+ A list of messages. You can either use `prompt` or `messages` but not both.
2683
+ */
2684
+ messages?: Array<CoreMessage> | Array<Omit<Message, 'id'>>;
2685
+ };
2686
+
2687
+ /**
2688
+ Additional provider-specific metadata that is returned from the provider.
2689
+
2690
+ This is needed to enable provider-specific functionality that can be
2691
+ fully encapsulated in the provider.
2692
+ */
2693
+ declare type ProviderMetadata = LanguageModelV1ProviderMetadata;
2694
+
2695
+ /**
2696
+ Additional provider-specific options.
2697
+
2698
+ They are passed through to the provider from the AI SDK and enable
2699
+ provider-specific functionality that can be fully encapsulated in the provider.
2700
+ */
2701
+ declare type ProviderOptions = LanguageModelV1ProviderMetadata;
2702
+
2703
+ declare type ReasoningDetail =
2704
+ | {
2705
+ type: 'text';
2706
+ text: string;
2707
+ signature?: string;
2708
+ }
2709
+ | {
2710
+ type: 'redacted';
2711
+ data: string;
2712
+ };
2713
+
2714
+ /**
2715
+ * Reasoning content part of a prompt. It contains a reasoning.
2716
+ */
2717
+ declare interface ReasoningPart {
2718
+ type: 'reasoning';
2719
+ /**
2720
+ The reasoning text.
2721
+ */
2722
+ text: string;
2723
+ /**
2724
+ An optional signature for verifying that the reasoning originated from the model.
2725
+ */
2726
+ signature?: string;
2727
+ /**
2728
+ Additional provider-specific metadata. They are passed through
2729
+ to the provider from the AI SDK and enable provider-specific
2730
+ functionality that can be fully encapsulated in the provider.
2731
+ */
2732
+ providerOptions?: ProviderOptions;
2733
+ /**
2734
+ @deprecated Use `providerOptions` instead.
2735
+ */
2736
+ experimental_providerMetadata?: ProviderMetadata;
2737
+ }
2738
+
2739
+ /**
2740
+ * A reasoning part of a message.
2741
+ */
2742
+ declare type ReasoningUIPart = {
2743
+ type: 'reasoning';
2744
+ /**
2745
+ * The reasoning text.
2746
+ */
2747
+ reasoning: string;
2748
+ details: Array<
2749
+ | {
2750
+ type: 'text';
2751
+ text: string;
2752
+ signature?: string;
2753
+ }
2754
+ | {
2755
+ type: 'redacted';
2756
+ data: string;
2757
+ }
2758
+ >;
2759
+ };
2760
+
2761
+ /**
2762
+ Redacted reasoning content part of a prompt.
2763
+ */
2764
+ declare interface RedactedReasoningPart {
2765
+ type: 'redacted-reasoning';
2766
+ /**
2767
+ Redacted reasoning data.
2768
+ */
2769
+ data: string;
2770
+ /**
2771
+ Additional provider-specific metadata. They are passed through
2772
+ to the provider from the AI SDK and enable provider-specific
2773
+ functionality that can be fully encapsulated in the provider.
2774
+ */
2775
+ providerOptions?: ProviderOptions;
2776
+ /**
2777
+ @deprecated Use `providerOptions` instead.
2778
+ */
2779
+ experimental_providerMetadata?: ProviderMetadata;
2780
+ }
2781
+
2782
+ /**
2783
+ A function that attempts to repair the raw output of the mode
2784
+ to enable JSON parsing.
2785
+
2786
+ Should return the repaired text or null if the text cannot be repaired.
2787
+ */
2788
+ declare type RepairTextFunction = (options: {
2789
+ text: string;
2790
+ error: JSONParseError | TypeValidationError;
2791
+ }) => Promise<string | null>;
2792
+
2793
+ /**
2794
+ A message that was generated during the generation process.
2795
+ It can be either an assistant message or a tool message.
2796
+ */
2797
+ declare type ResponseMessage = (CoreAssistantMessage | CoreToolMessage) & {
2798
+ /**
2799
+ Message ID generated by the AI SDK.
2800
+ */
2801
+ id: string;
2802
+ };
2803
+
2804
+ export declare type Schema<OBJECT = unknown> = Validator<OBJECT> & {
2805
+ /**
2806
+ * Used to mark schemas so we can support both Zod and custom schemas.
2807
+ */
2808
+ [schemaSymbol]: true;
2809
+ /**
2810
+ * Schema type for inference.
2811
+ */
2812
+ _type: OBJECT;
2813
+ /**
2814
+ * The JSON Schema for the schema. It is passed to the providers.
2815
+ */
2816
+ readonly jsonSchema: JSONSchema7;
2817
+ };
2818
+
2819
+ /**
2820
+ * Used to mark schemas so we can support both Zod and custom schemas.
2821
+ */
2822
+ declare const schemaSymbol: unique symbol;
2823
+
2824
+ /**
2825
+ * Creates a ReadableStream that emits the provided values with an optional delay between each value.
2826
+ *
2827
+ * @param options - The configuration options
2828
+ * @param options.chunks - Array of values to be emitted by the stream
2829
+ * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2830
+ * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2831
+ * @returns A ReadableStream that emits the provided values
2832
+ */
2833
+ export declare function simulateReadableStream<T>({
2834
+ chunks,
2835
+ initialDelayInMs,
2836
+ chunkDelayInMs,
2837
+ _internal,
2838
+ }: {
2839
+ chunks: T[];
2840
+ initialDelayInMs?: number | null;
2841
+ chunkDelayInMs?: number | null;
2842
+ _internal?: {
2843
+ delay?: (ms: number | null) => Promise<void>;
2844
+ };
2845
+ }): ReadableStream<T>;
2846
+
2847
+ /**
2848
+ A source that has been used as input to generate the response.
2849
+ */
2850
+ declare type Source = LanguageModelV1Source;
2851
+
2852
+ /**
2853
+ * A source part of a message.
2854
+ */
2855
+ declare type SourceUIPart = {
2856
+ type: 'source';
2857
+ /**
2858
+ * The source.
2859
+ */
2860
+ source: LanguageModelV1Source;
2861
+ };
2862
+
2863
+ /**
2864
+ * An interface that represents a span. A span represents a single operation
2865
+ * within a trace. Examples of span might include remote procedure calls or a
2866
+ * in-process function calls to sub-components. A Trace has a single, top-level
2867
+ * "root" Span that in turn may have zero or more child Spans, which in turn
2868
+ * may have children.
2869
+ *
2870
+ * Spans are created by the {@link Tracer.startSpan} method.
2871
+ */
2872
+ declare interface Span {
2873
+ /**
2874
+ * Returns the {@link SpanContext} object associated with this Span.
2875
+ *
2876
+ * Get an immutable, serializable identifier for this span that can be used
2877
+ * to create new child spans. Returned SpanContext is usable even after the
2878
+ * span ends.
2879
+ *
2880
+ * @returns the SpanContext object associated with this Span.
2881
+ */
2882
+ spanContext(): SpanContext;
2883
+ /**
2884
+ * Sets an attribute to the span.
2885
+ *
2886
+ * Sets a single Attribute with the key and value passed as arguments.
2887
+ *
2888
+ * @param key the key for this attribute.
2889
+ * @param value the value for this attribute. Setting a value null or
2890
+ * undefined is invalid and will result in undefined behavior.
2891
+ */
2892
+ setAttribute(key: string, value: SpanAttributeValue): this;
2893
+ /**
2894
+ * Sets attributes to the span.
2895
+ *
2896
+ * @param attributes the attributes that will be added.
2897
+ * null or undefined attribute values
2898
+ * are invalid and will result in undefined behavior.
2899
+ */
2900
+ setAttributes(attributes: SpanAttributes): this;
2901
+ /**
2902
+ * Adds an event to the Span.
2903
+ *
2904
+ * @param name the name of the event.
2905
+ * @param [attributesOrStartTime] the attributes that will be added; these are
2906
+ * associated with this event. Can be also a start time
2907
+ * if type is {@type TimeInput} and 3rd param is undefined
2908
+ * @param [startTime] start time of the event.
2909
+ */
2910
+ addEvent(name: string, attributesOrStartTime?: SpanAttributes | TimeInput, startTime?: TimeInput): this;
2911
+ /**
2912
+ * Adds a single link to the span.
2913
+ *
2914
+ * Links added after the creation will not affect the sampling decision.
2915
+ * It is preferred span links be added at span creation.
2916
+ *
2917
+ * @param link the link to add.
2918
+ */
2919
+ addLink(link: Link): this;
2920
+ /**
2921
+ * Adds multiple links to the span.
2922
+ *
2923
+ * Links added after the creation will not affect the sampling decision.
2924
+ * It is preferred span links be added at span creation.
2925
+ *
2926
+ * @param links the links to add.
2927
+ */
2928
+ addLinks(links: Link[]): this;
2929
+ /**
2930
+ * Sets a status to the span. If used, this will override the default Span
2931
+ * status. Default is {@link SpanStatusCode.UNSET}. SetStatus overrides the value
2932
+ * of previous calls to SetStatus on the Span.
2933
+ *
2934
+ * @param status the SpanStatus to set.
2935
+ */
2936
+ setStatus(status: SpanStatus): this;
2937
+ /**
2938
+ * Updates the Span name.
2939
+ *
2940
+ * This will override the name provided via {@link Tracer.startSpan}.
2941
+ *
2942
+ * Upon this update, any sampling behavior based on Span name will depend on
2943
+ * the implementation.
2944
+ *
2945
+ * @param name the Span name.
2946
+ */
2947
+ updateName(name: string): this;
2948
+ /**
2949
+ * Marks the end of Span execution.
2950
+ *
2951
+ * Call to End of a Span MUST not have any effects on child spans. Those may
2952
+ * still be running and can be ended later.
2953
+ *
2954
+ * Do not return `this`. The Span generally should not be used after it
2955
+ * is ended so chaining is not desired in this context.
2956
+ *
2957
+ * @param [endTime] the time to set as Span's end time. If not provided,
2958
+ * use the current time as the span's end time.
2959
+ */
2960
+ end(endTime?: TimeInput): void;
2961
+ /**
2962
+ * Returns the flag whether this span will be recorded.
2963
+ *
2964
+ * @returns true if this Span is active and recording information like events
2965
+ * with the `AddEvent` operation and attributes using `setAttributes`.
2966
+ */
2967
+ isRecording(): boolean;
2968
+ /**
2969
+ * Sets exception as a span event
2970
+ * @param exception the exception the only accepted values are string or Error
2971
+ * @param [time] the time to set as Span's event time. If not provided,
2972
+ * use the current time.
2973
+ */
2974
+ recordException(exception: Exception, time?: TimeInput): void;
2975
+ }
2976
+
2977
+ /**
2978
+ * @deprecated please use {@link Attributes}
2979
+ */
2980
+ declare type SpanAttributes = Attributes;
2981
+
2982
+ /**
2983
+ * @deprecated please use {@link AttributeValue}
2984
+ */
2985
+ declare type SpanAttributeValue = AttributeValue;
2986
+
2987
+ /**
2988
+ * A SpanContext represents the portion of a {@link Span} which must be
2989
+ * serialized and propagated along side of a {@link Baggage}.
2990
+ */
2991
+ declare interface SpanContext {
2992
+ /**
2993
+ * The ID of the trace that this span belongs to. It is worldwide unique
2994
+ * with practically sufficient probability by being made as 16 randomly
2995
+ * generated bytes, encoded as a 32 lowercase hex characters corresponding to
2996
+ * 128 bits.
2997
+ */
2998
+ traceId: string;
2999
+ /**
3000
+ * The ID of the Span. It is globally unique with practically sufficient
3001
+ * probability by being made as 8 randomly generated bytes, encoded as a 16
3002
+ * lowercase hex characters corresponding to 64 bits.
3003
+ */
3004
+ spanId: string;
3005
+ /**
3006
+ * Only true if the SpanContext was propagated from a remote parent.
3007
+ */
3008
+ isRemote?: boolean;
3009
+ /**
3010
+ * Trace flags to propagate.
3011
+ *
3012
+ * It is represented as 1 byte (bitmap). Bit to represent whether trace is
3013
+ * sampled or not. When set, the least significant bit documents that the
3014
+ * caller may have recorded trace data. A caller who does not record trace
3015
+ * data out-of-band leaves this flag unset.
3016
+ *
3017
+ * see {@link TraceFlags} for valid flag values.
3018
+ */
3019
+ traceFlags: number;
3020
+ /**
3021
+ * Tracing-system-specific info to propagate.
3022
+ *
3023
+ * The tracestate field value is a `list` as defined below. The `list` is a
3024
+ * series of `list-members` separated by commas `,`, and a list-member is a
3025
+ * key/value pair separated by an equals sign `=`. Spaces and horizontal tabs
3026
+ * surrounding `list-members` are ignored. There can be a maximum of 32
3027
+ * `list-members` in a `list`.
3028
+ * More Info: https://www.w3.org/TR/trace-context/#tracestate-field
3029
+ *
3030
+ * Examples:
3031
+ * Single tracing system (generic format):
3032
+ * tracestate: rojo=00f067aa0ba902b7
3033
+ * Multiple tracing systems (with different formatting):
3034
+ * tracestate: rojo=00f067aa0ba902b7,congo=t61rcWkgMzE
3035
+ */
3036
+ traceState?: TraceState;
3037
+ }
3038
+
3039
+ declare enum SpanKind {
3040
+ /** Default value. Indicates that the span is used internally. */
3041
+ INTERNAL = 0,
3042
+ /**
3043
+ * Indicates that the span covers server-side handling of an RPC or other
3044
+ * remote request.
3045
+ */
3046
+ SERVER = 1,
3047
+ /**
3048
+ * Indicates that the span covers the client-side wrapper around an RPC or
3049
+ * other remote request.
3050
+ */
3051
+ CLIENT = 2,
3052
+ /**
3053
+ * Indicates that the span describes producer sending a message to a
3054
+ * broker. Unlike client and server, there is no direct critical path latency
3055
+ * relationship between producer and consumer spans.
3056
+ */
3057
+ PRODUCER = 3,
3058
+ /**
3059
+ * Indicates that the span describes consumer receiving a message from a
3060
+ * broker. Unlike client and server, there is no direct critical path latency
3061
+ * relationship between producer and consumer spans.
3062
+ */
3063
+ CONSUMER = 4,
3064
+ }
3065
+
3066
+ /**
3067
+ * Options needed for span creation
3068
+ */
3069
+ declare interface SpanOptions {
3070
+ /**
3071
+ * The SpanKind of a span
3072
+ * @default {@link SpanKind.INTERNAL}
3073
+ */
3074
+ kind?: SpanKind;
3075
+ /** A span's attributes */
3076
+ attributes?: SpanAttributes;
3077
+ /** {@link Link}s span to other spans */
3078
+ links?: Link[];
3079
+ /** A manually specified start time for the created `Span` object. */
3080
+ startTime?: TimeInput;
3081
+ /** The new span should be a root span. (Ignore parent from context). */
3082
+ root?: boolean;
3083
+ }
3084
+
3085
+ declare interface SpanStatus {
3086
+ /** The status code of this message. */
3087
+ code: SpanStatusCode;
3088
+ /** A developer-facing error message. */
3089
+ message?: string;
3090
+ }
3091
+
3092
+ /**
3093
+ * An enumeration of status codes.
3094
+ */
3095
+ declare enum SpanStatusCode {
3096
+ /**
3097
+ * The default status.
3098
+ */
3099
+ UNSET = 0,
3100
+ /**
3101
+ * The operation has been validated by an Application developer or
3102
+ * Operator to have completed successfully.
3103
+ */
3104
+ OK = 1,
3105
+ /**
3106
+ * The operation contains an error.
3107
+ */
3108
+ ERROR = 2,
3109
+ }
3110
+
3111
+ /**
3112
+ * The result of a single step in the generation process.
3113
+ */
3114
+ declare type StepResult<TOOLS extends ToolSet> = {
3115
+ /**
3116
+ The generated text.
3117
+ */
3118
+ readonly text: string;
3119
+ /**
3120
+ The reasoning that was generated during the generation.
3121
+ */
3122
+ readonly reasoning: string | undefined;
3123
+ readonly reasoningDetails: Array<ReasoningDetail>;
3124
+ /**
3125
+ The files that were generated during the generation.
3126
+ */
3127
+ readonly files: GeneratedFile[];
3128
+ /**
3129
+ The sources that were used to generate the text.
3130
+ */
3131
+ readonly sources: Source[];
3132
+ /**
3133
+ The tool calls that were made during the generation.
3134
+ */
3135
+ readonly toolCalls: ToolCallArray<TOOLS>;
3136
+ /**
3137
+ The results of the tool calls.
3138
+ */
3139
+ readonly toolResults: ToolResultArray<TOOLS>;
3140
+ /**
3141
+ The reason why the generation finished.
3142
+ */
3143
+ readonly finishReason: FinishReason;
3144
+ /**
3145
+ The token usage of the generated text.
3146
+ */
3147
+ readonly usage: LanguageModelUsage;
3148
+ /**
3149
+ Warnings from the model provider (e.g. unsupported settings).
3150
+ */
3151
+ readonly warnings: CallWarning[] | undefined;
3152
+ /**
3153
+ Logprobs for the completion.
3154
+ `undefined` if the mode does not support logprobs or if was not enabled.
3155
+ */
3156
+ readonly logprobs: LogProbs | undefined;
3157
+ /**
3158
+ Additional request information.
3159
+ */
3160
+ readonly request: LanguageModelRequestMetadata;
3161
+ /**
3162
+ Additional response information.
3163
+ */
3164
+ readonly response: LanguageModelResponseMetadata & {
3165
+ /**
3166
+ The response messages that were generated during the call.
3167
+ Response messages can be either assistant messages or tool messages.
3168
+ They contain a generated id.
3169
+ */
3170
+ readonly messages: Array<ResponseMessage>;
3171
+ /**
3172
+ Response body (available only for providers that use HTTP requests).
3173
+ */
3174
+ body?: unknown;
3175
+ };
3176
+ /**
3177
+ Additional provider-specific metadata. They are passed through
3178
+ from the provider to the AI SDK and enable provider-specific
3179
+ results that can be fully encapsulated in the provider.
3180
+ */
3181
+ readonly providerMetadata: ProviderMetadata | undefined;
3182
+ /**
3183
+ @deprecated Use `providerMetadata` instead.
3184
+ */
3185
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
3186
+ /**
3187
+ The type of step that this result is for. The first step is always
3188
+ an "initial" step, and subsequent steps are either "continue" steps
3189
+ or "tool-result" steps.
3190
+ */
3191
+ readonly stepType: 'initial' | 'continue' | 'tool-result';
3192
+ /**
3193
+ True when there will be a continuation step with a continuation text.
3194
+ */
3195
+ readonly isContinued: boolean;
3196
+ };
3197
+
3198
+ /**
3199
+ * A step boundary part of a message.
3200
+ */
3201
+ declare type StepStartUIPart = {
3202
+ type: 'step-start';
3203
+ };
3204
+
3205
+ /**
3206
+ * A stream wrapper to send custom JSON-encoded data back to the client.
3207
+ *
3208
+ * @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
3209
+ */
3210
+ declare class StreamData {
3211
+ private encoder;
3212
+ private controller;
3213
+ stream: ReadableStream<Uint8Array>;
3214
+ private isClosed;
3215
+ private warningTimeout;
3216
+ constructor();
3217
+ close(): Promise<void>;
3218
+ append(value: JSONValue_2): void;
3219
+ appendMessageAnnotation(value: JSONValue_2): void;
3220
+ }
3221
+
3222
+ /**
3223
+ Generate a structured, typed object for a given prompt and schema using a language model.
3224
+
3225
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
3226
+
3227
+ @return
3228
+ A result object for accessing the partial object stream and additional information.
3229
+ */
3230
+ export declare function streamObject<OBJECT>(
3231
+ options: Omit<CallSettings, 'stopSequences'> &
3232
+ Prompt & {
3233
+ output?: 'object' | undefined;
3234
+ /**
3235
+ The language model to use.
3236
+ */
3237
+ model: LanguageModel;
3238
+ /**
3239
+ The schema of the object that the model should generate.
3240
+ */
3241
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
3242
+ /**
3243
+ Optional name of the output that should be generated.
3244
+ Used by some providers for additional LLM guidance, e.g.
3245
+ via tool or schema name.
3246
+ */
3247
+ schemaName?: string;
3248
+ /**
3249
+ Optional description of the output that should be generated.
3250
+ Used by some providers for additional LLM guidance, e.g.
3251
+ via tool or schema description.
3252
+ */
3253
+ schemaDescription?: string;
3254
+ /**
3255
+ The mode to use for object generation.
3256
+
3257
+ The schema is converted into a JSON schema and used in one of the following ways
3258
+
3259
+ - 'auto': The provider will choose the best mode for the model.
3260
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3261
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3262
+
3263
+ Please note that most providers do not support all modes.
3264
+
3265
+ Default and recommended: 'auto' (best mode for the model).
3266
+ */
3267
+ mode?: 'auto' | 'json' | 'tool';
3268
+ /**
3269
+ Optional telemetry configuration (experimental).
3270
+ */
3271
+ experimental_telemetry?: TelemetrySettings;
3272
+ /**
3273
+ Additional provider-specific options. They are passed through
3274
+ to the provider from the AI SDK and enable provider-specific
3275
+ functionality that can be fully encapsulated in the provider.
3276
+ */
3277
+ providerOptions?: ProviderOptions;
3278
+ /**
3279
+ @deprecated Use `providerOptions` instead.
3280
+ */
3281
+ experimental_providerMetadata?: ProviderMetadata;
3282
+ /**
3283
+ Callback that is invoked when an error occurs during streaming.
3284
+ You can use it to log errors.
3285
+ The stream processing will pause until the callback promise is resolved.
3286
+ */
3287
+ onError?: StreamObjectOnErrorCallback;
3288
+ /**
3289
+ Callback that is called when the LLM response and the final object validation are finished.
3290
+ */
3291
+ onFinish?: StreamObjectOnFinishCallback<OBJECT>;
3292
+ /**
3293
+ * Internal. For test use only. May change without notice.
3294
+ */
3295
+ _internal?: {
3296
+ generateId?: () => string;
3297
+ currentDate?: () => Date;
3298
+ now?: () => number;
3299
+ };
3300
+ },
3301
+ ): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
3302
+
3303
+ /**
3304
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
3305
+
3306
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
3307
+
3308
+ @return
3309
+ A result object for accessing the partial object stream and additional information.
3310
+ */
3311
+ export declare function streamObject<ELEMENT>(
3312
+ options: Omit<CallSettings, 'stopSequences'> &
3313
+ Prompt & {
3314
+ output: 'array';
3315
+ /**
3316
+ The language model to use.
3317
+ */
3318
+ model: LanguageModel;
3319
+ /**
3320
+ The element schema of the array that the model should generate.
3321
+ */
3322
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
3323
+ /**
3324
+ Optional name of the array that should be generated.
3325
+ Used by some providers for additional LLM guidance, e.g.
3326
+ via tool or schema name.
3327
+ */
3328
+ schemaName?: string;
3329
+ /**
3330
+ Optional description of the array that should be generated.
3331
+ Used by some providers for additional LLM guidance, e.g.
3332
+ via tool or schema description.
3333
+ */
3334
+ schemaDescription?: string;
3335
+ /**
3336
+ The mode to use for object generation.
3337
+
3338
+ The schema is converted into a JSON schema and used in one of the following ways
3339
+
3340
+ - 'auto': The provider will choose the best mode for the model.
3341
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3342
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3343
+
3344
+ Please note that most providers do not support all modes.
3345
+
3346
+ Default and recommended: 'auto' (best mode for the model).
3347
+ */
3348
+ mode?: 'auto' | 'json' | 'tool';
3349
+ /**
3350
+ Optional telemetry configuration (experimental).
3351
+ */
3352
+ experimental_telemetry?: TelemetrySettings;
3353
+ /**
3354
+ Additional provider-specific options. They are passed through
3355
+ to the provider from the AI SDK and enable provider-specific
3356
+ functionality that can be fully encapsulated in the provider.
3357
+ */
3358
+ providerOptions?: ProviderOptions;
3359
+ /**
3360
+ @deprecated Use `providerOptions` instead.
3361
+ */
3362
+ experimental_providerMetadata?: ProviderMetadata;
3363
+ /**
3364
+ Callback that is invoked when an error occurs during streaming.
3365
+ You can use it to log errors.
3366
+ The stream processing will pause until the callback promise is resolved.
3367
+ */
3368
+ onError?: StreamObjectOnErrorCallback;
3369
+ /**
3370
+ Callback that is called when the LLM response and the final object validation are finished.
3371
+ */
3372
+ onFinish?: StreamObjectOnFinishCallback<Array<ELEMENT>>;
3373
+ /**
3374
+ * Internal. For test use only. May change without notice.
3375
+ */
3376
+ _internal?: {
3377
+ generateId?: () => string;
3378
+ currentDate?: () => Date;
3379
+ now?: () => number;
3380
+ };
3381
+ },
3382
+ ): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
3383
+
3384
+ /**
3385
+ Generate JSON with any schema for a given prompt using a language model.
3386
+
3387
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
3388
+
3389
+ @return
3390
+ A result object for accessing the partial object stream and additional information.
3391
+ */
3392
+ export declare function streamObject(
3393
+ options: Omit<CallSettings, 'stopSequences'> &
3394
+ Prompt & {
3395
+ output: 'no-schema';
3396
+ /**
3397
+ The language model to use.
3398
+ */
3399
+ model: LanguageModel;
3400
+ /**
3401
+ The mode to use for object generation. Must be "json" for no-schema output.
3402
+ */
3403
+ mode?: 'json';
3404
+ /**
3405
+ Optional telemetry configuration (experimental).
3406
+ */
3407
+ experimental_telemetry?: TelemetrySettings;
3408
+ /**
3409
+ Additional provider-specific options. They are passed through
3410
+ to the provider from the AI SDK and enable provider-specific
3411
+ functionality that can be fully encapsulated in the provider.
3412
+ */
3413
+ providerOptions?: ProviderOptions;
3414
+ /**
3415
+ @deprecated Use `providerOptions` instead.
3416
+ */
3417
+ experimental_providerMetadata?: ProviderMetadata;
3418
+ /**
3419
+ Callback that is invoked when an error occurs during streaming.
3420
+ You can use it to log errors.
3421
+ The stream processing will pause until the callback promise is resolved.
3422
+ */
3423
+ onError?: StreamObjectOnErrorCallback;
3424
+ /**
3425
+ Callback that is called when the LLM response and the final object validation are finished.
3426
+ */
3427
+ onFinish?: StreamObjectOnFinishCallback<JSONValue>;
3428
+ /**
3429
+ * Internal. For test use only. May change without notice.
3430
+ */
3431
+ _internal?: {
3432
+ generateId?: () => string;
3433
+ currentDate?: () => Date;
3434
+ now?: () => number;
3435
+ };
3436
+ },
3437
+ ): StreamObjectResult<JSONValue, JSONValue, never>;
3438
+
3439
+ /**
3440
+ Callback that is set using the `onError` option.
3441
+
3442
+ @param event - The event that is passed to the callback.
3443
+ */
3444
+ declare type StreamObjectOnErrorCallback = (event: { error: unknown }) => Promise<void> | void;
3445
+
3446
+ /**
3447
+ Callback that is set using the `onFinish` option.
3448
+
3449
+ @param event - The event that is passed to the callback.
3450
+ */
3451
+ export declare type StreamObjectOnFinishCallback<RESULT> = (event: {
3452
+ /**
3453
+ The token usage of the generated response.
3454
+ */
3455
+ usage: LanguageModelUsage;
3456
+ /**
3457
+ The generated object. Can be undefined if the final object does not match the schema.
3458
+ */
3459
+ object: RESULT | undefined;
3460
+ /**
3461
+ Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
3462
+ */
3463
+ error: unknown | undefined;
3464
+ /**
3465
+ Response metadata.
3466
+ */
3467
+ response: LanguageModelResponseMetadata;
3468
+ /**
3469
+ Warnings from the model provider (e.g. unsupported settings).
3470
+ */
3471
+ warnings?: CallWarning[];
3472
+ /**
3473
+ Additional provider-specific metadata. They are passed through
3474
+ to the provider from the AI SDK and enable provider-specific
3475
+ functionality that can be fully encapsulated in the provider.
3476
+ */
3477
+ providerMetadata: ProviderMetadata | undefined;
3478
+ /**
3479
+ @deprecated Use `providerMetadata` instead.
3480
+ */
3481
+ experimental_providerMetadata?: ProviderMetadata;
3482
+ }) => Promise<void> | void;
3483
+
3484
+ /**
3485
+ The result of a `streamObject` call that contains the partial object stream and additional information.
3486
+ */
3487
+ export declare interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3488
+ /**
3489
+ Warnings from the model provider (e.g. unsupported settings)
3490
+ */
3491
+ readonly warnings: Promise<CallWarning[] | undefined>;
3492
+ /**
3493
+ The token usage of the generated response. Resolved when the response is finished.
3494
+ */
3495
+ readonly usage: Promise<LanguageModelUsage>;
3496
+ /**
3497
+ Additional provider-specific metadata. They are passed through
3498
+ from the provider to the AI SDK and enable provider-specific
3499
+ results that can be fully encapsulated in the provider.
3500
+ */
3501
+ readonly providerMetadata: Promise<ProviderMetadata | undefined>;
3502
+ /**
3503
+ @deprecated Use `providerMetadata` instead.
3504
+ */
3505
+ readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
3506
+ /**
3507
+ Additional request information from the last step.
3508
+ */
3509
+ readonly request: Promise<LanguageModelRequestMetadata>;
3510
+ /**
3511
+ Additional response information.
3512
+ */
3513
+ readonly response: Promise<LanguageModelResponseMetadata>;
3514
+ /**
3515
+ The generated object (typed according to the schema). Resolved when the response is finished.
3516
+ */
3517
+ readonly object: Promise<RESULT>;
3518
+ /**
3519
+ Stream of partial objects. It gets more complete as the stream progresses.
3520
+
3521
+ Note that the partial object is not validated.
3522
+ If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
3523
+ */
3524
+ readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
3525
+ /**
3526
+ * Stream over complete array elements. Only available if the output strategy is set to `array`.
3527
+ */
3528
+ readonly elementStream: ELEMENT_STREAM;
3529
+ /**
3530
+ Text stream of the JSON representation of the generated object. It contains text chunks.
3531
+ When the stream is finished, the object is valid JSON that can be parsed.
3532
+ */
3533
+ readonly textStream: AsyncIterableStream<string>;
3534
+ /**
3535
+ Stream of different types of events, including partial objects, errors, and finish events.
3536
+ Only errors that stop the stream, such as network errors, are thrown.
3537
+ */
3538
+ readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
3539
+ /**
3540
+ Writes text delta output to a Node.js response-like object.
3541
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
3542
+ writes each text delta as a separate chunk.
3543
+
3544
+ @param response A Node.js response-like object (ServerResponse).
3545
+ @param init Optional headers, status code, and status text.
3546
+ */
3547
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
3548
+ /**
3549
+ Creates a simple text stream response.
3550
+ The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
3551
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
3552
+ Non-text-delta events are ignored.
3553
+
3554
+ @param init Optional headers, status code, and status text.
3555
+ */
3556
+ toTextStreamResponse(init?: ResponseInit): Response;
3557
+ }
3558
+
3559
+ /**
3560
+ Generate a text and call tools for a given prompt using a language model.
3561
+
3562
+ This function streams the output. If you do not want to stream the output, use `generateText` instead.
3563
+
3564
+ @param model - The language model to use.
3565
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
3566
+
3567
+ @param system - A system message that will be part of the prompt.
3568
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
3569
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
3570
+
3571
+ @param maxTokens - Maximum number of tokens to generate.
3572
+ @param temperature - Temperature setting.
3573
+ The value is passed through to the provider. The range depends on the provider and model.
3574
+ It is recommended to set either `temperature` or `topP`, but not both.
3575
+ @param topP - Nucleus sampling.
3576
+ The value is passed through to the provider. The range depends on the provider and model.
3577
+ It is recommended to set either `temperature` or `topP`, but not both.
3578
+ @param topK - Only sample from the top K options for each subsequent token.
3579
+ Used to remove "long tail" low probability responses.
3580
+ Recommended for advanced use cases only. You usually only need to use temperature.
3581
+ @param presencePenalty - Presence penalty setting.
3582
+ It affects the likelihood of the model to repeat information that is already in the prompt.
3583
+ The value is passed through to the provider. The range depends on the provider and model.
3584
+ @param frequencyPenalty - Frequency penalty setting.
3585
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
3586
+ The value is passed through to the provider. The range depends on the provider and model.
3587
+ @param stopSequences - Stop sequences.
3588
+ If set, the model will stop generating text when one of the stop sequences is generated.
3589
+ @param seed - The seed (integer) to use for random sampling.
3590
+ If set and supported by the model, calls will generate deterministic results.
3591
+
3592
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3593
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
3594
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3595
+
3596
+ @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
3597
+ @param experimental_generateMessageId - Generate a unique ID for each message.
3598
+
3599
+ @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
3600
+ @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
3601
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
3602
+ @param onFinish - Callback that is called when the LLM response and all request tool executions
3603
+ (for tools that have an `execute` function) are finished.
3604
+
3605
+ @return
3606
+ A result object for accessing different stream types and additional information.
3607
+ */
3608
+ export declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({
3609
+ model,
3610
+ tools,
3611
+ toolChoice,
3612
+ system,
3613
+ prompt,
3614
+ messages,
3615
+ maxRetries,
3616
+ abortSignal,
3617
+ headers,
3618
+ maxSteps,
3619
+ experimental_generateMessageId: generateMessageId,
3620
+ experimental_output: output,
3621
+ experimental_continueSteps: continueSteps,
3622
+ experimental_telemetry: telemetry,
3623
+ experimental_providerMetadata,
3624
+ providerOptions,
3625
+ experimental_toolCallStreaming,
3626
+ toolCallStreaming,
3627
+ experimental_activeTools: activeTools,
3628
+ experimental_repairToolCall: repairToolCall,
3629
+ experimental_transform: transform,
3630
+ onChunk,
3631
+ onError,
3632
+ onFinish,
3633
+ onStepFinish,
3634
+ _internal: { now, generateId, currentDate },
3635
+ ...settings
3636
+ }: CallSettings &
3637
+ Prompt & {
3638
+ /**
3639
+ The language model to use.
3640
+ */
3641
+ model: LanguageModel;
3642
+ /**
3643
+ The tools that the model can call. The model needs to support calling tools.
3644
+ */
3645
+ tools?: TOOLS;
3646
+ /**
3647
+ The tool choice strategy. Default: 'auto'.
3648
+ */
3649
+ toolChoice?: ToolChoice<TOOLS>;
3650
+ /**
3651
+ Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
3652
+
3653
+ A maximum number is required to prevent infinite loops in the case of misconfigured tools.
3654
+
3655
+ By default, it's set to 1, which means that only a single LLM call is made.
3656
+ */
3657
+ maxSteps?: number;
3658
+ /**
3659
+ Generate a unique ID for each message.
3660
+ */
3661
+ experimental_generateMessageId?: IDGenerator;
3662
+ /**
3663
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
3664
+
3665
+ By default, it's set to false.
3666
+ */
3667
+ experimental_continueSteps?: boolean;
3668
+ /**
3669
+ Optional telemetry configuration (experimental).
3670
+ */
3671
+ experimental_telemetry?: TelemetrySettings;
3672
+ /**
3673
+ Additional provider-specific options. They are passed through
3674
+ to the provider from the AI SDK and enable provider-specific
3675
+ functionality that can be fully encapsulated in the provider.
3676
+ */
3677
+ providerOptions?: ProviderOptions;
3678
+ /**
3679
+ @deprecated Use `providerOptions` instead.
3680
+ */
3681
+ experimental_providerMetadata?: ProviderMetadata;
3682
+ /**
3683
+ Limits the tools that are available for the model to call without
3684
+ changing the tool call and result types in the result.
3685
+ */
3686
+ experimental_activeTools?: Array<keyof TOOLS>;
3687
+ /**
3688
+ Optional specification for parsing structured outputs from the LLM response.
3689
+ */
3690
+ experimental_output?: Output_2<OUTPUT, PARTIAL_OUTPUT>;
3691
+ /**
3692
+ A function that attempts to repair a tool call that failed to parse.
3693
+ */
3694
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
3695
+ /**
3696
+ Enable streaming of tool call deltas as they are generated. Disabled by default.
3697
+ */
3698
+ toolCallStreaming?: boolean;
3699
+ /**
3700
+ @deprecated Use `toolCallStreaming` instead.
3701
+ */
3702
+ experimental_toolCallStreaming?: boolean;
3703
+ /**
3704
+ Optional stream transformations.
3705
+ They are applied in the order they are provided.
3706
+ The stream transformations must maintain the stream structure for streamText to work correctly.
3707
+ */
3708
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
3709
+ /**
3710
+ Callback that is called for each chunk of the stream.
3711
+ The stream processing will pause until the callback promise is resolved.
3712
+ */
3713
+ onChunk?: StreamTextOnChunkCallback<TOOLS>;
3714
+ /**
3715
+ Callback that is invoked when an error occurs during streaming.
3716
+ You can use it to log errors.
3717
+ The stream processing will pause until the callback promise is resolved.
3718
+ */
3719
+ onError?: StreamTextOnErrorCallback;
3720
+ /**
3721
+ Callback that is called when the LLM response and all request tool executions
3722
+ (for tools that have an `execute` function) are finished.
3723
+
3724
+ The usage is the combined usage of all steps.
3725
+ */
3726
+ onFinish?: StreamTextOnFinishCallback<TOOLS>;
3727
+ /**
3728
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
3729
+ */
3730
+ onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
3731
+ /**
3732
+ Internal. For test use only. May change without notice.
3733
+ */
3734
+ _internal?: {
3735
+ now?: () => number;
3736
+ generateId?: IDGenerator;
3737
+ currentDate?: () => Date;
3738
+ };
3739
+ }): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
3740
+
3741
+ /**
3742
+ Callback that is set using the `onChunk` option.
3743
+
3744
+ @param event - The event that is passed to the callback.
3745
+ */
3746
+ declare type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
3747
+ chunk: Extract<
3748
+ TextStreamPart<TOOLS>,
3749
+ {
3750
+ type:
3751
+ | 'text-delta'
3752
+ | 'reasoning'
3753
+ | 'source'
3754
+ | 'tool-call'
3755
+ | 'tool-call-streaming-start'
3756
+ | 'tool-call-delta'
3757
+ | 'tool-result';
3758
+ }
3759
+ >;
3760
+ }) => Promise<void> | void;
3761
+
3762
+ /**
3763
+ Callback that is set using the `onError` option.
3764
+
3765
+ @param event - The event that is passed to the callback.
3766
+ */
3767
+ declare type StreamTextOnErrorCallback = (event: { error: unknown }) => Promise<void> | void;
3768
+
3769
+ /**
3770
+ Callback that is set using the `onFinish` option.
3771
+
3772
+ @param event - The event that is passed to the callback.
3773
+ */
3774
+ export declare type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (
3775
+ event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
3776
+ /**
3777
+ Details for all steps.
3778
+ */
3779
+ readonly steps: StepResult<TOOLS>[];
3780
+ },
3781
+ ) => Promise<void> | void;
3782
+
3783
+ /**
3784
+ Callback that is set using the `onStepFinish` option.
3785
+
3786
+ @param stepResult - The result of the step.
3787
+ */
3788
+ export declare type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (
3789
+ stepResult: StepResult<TOOLS>,
3790
+ ) => Promise<void> | void;
3791
+
3792
+ /**
3793
+ A result object for accessing different stream types and additional information.
3794
+ */
3795
+ export declare interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3796
+ /**
3797
+ Warnings from the model provider (e.g. unsupported settings) for the first step.
3798
+ */
3799
+ readonly warnings: Promise<CallWarning[] | undefined>;
3800
+ /**
3801
+ The total token usage of the generated response.
3802
+ When there are multiple steps, the usage is the sum of all step usages.
3803
+
3804
+ Resolved when the response is finished.
3805
+ */
3806
+ readonly usage: Promise<LanguageModelUsage>;
3807
+ /**
3808
+ Sources that have been used as input to generate the response.
3809
+ For multi-step generation, the sources are accumulated from all steps.
3810
+
3811
+ Resolved when the response is finished.
3812
+ */
3813
+ readonly sources: Promise<Source[]>;
3814
+ /**
3815
+ Files that have been generated by the model in the last step.
3816
+
3817
+ Resolved when the response is finished.
3818
+ */
3819
+ readonly files: Promise<GeneratedFile[]>;
3820
+ /**
3821
+ The reason why the generation finished. Taken from the last step.
3822
+
3823
+ Resolved when the response is finished.
3824
+ */
3825
+ readonly finishReason: Promise<FinishReason>;
3826
+ /**
3827
+ Additional provider-specific metadata from the last step.
3828
+ Metadata is passed through from the provider to the AI SDK and
3829
+ enables provider-specific results that can be fully encapsulated in the provider.
3830
+ */
3831
+ readonly providerMetadata: Promise<ProviderMetadata | undefined>;
3832
+ /**
3833
+ @deprecated Use `providerMetadata` instead.
3834
+ */
3835
+ readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
3836
+ /**
3837
+ The full text that has been generated by the last step.
3838
+
3839
+ Resolved when the response is finished.
3840
+ */
3841
+ readonly text: Promise<string>;
3842
+ /**
3843
+ The reasoning that has been generated by the last step.
3844
+
3845
+ Resolved when the response is finished.
3846
+ */
3847
+ readonly reasoning: Promise<string | undefined>;
3848
+ /**
3849
+ The full reasoning that the model has generated.
3850
+
3851
+ Resolved when the response is finished.
3852
+ */
3853
+ readonly reasoningDetails: Promise<Array<ReasoningDetail>>;
3854
+ /**
3855
+ The tool calls that have been executed in the last step.
3856
+
3857
+ Resolved when the response is finished.
3858
+ */
3859
+ readonly toolCalls: Promise<ToolCallUnion<TOOLS>[]>;
3860
+ /**
3861
+ The tool results that have been generated in the last step.
3862
+
3863
+ Resolved when the all tool executions are finished.
3864
+ */
3865
+ readonly toolResults: Promise<ToolResultUnion<TOOLS>[]>;
3866
+ /**
3867
+ Details for all steps.
3868
+ You can use this to get information about intermediate steps,
3869
+ such as the tool calls or the response headers.
3870
+ */
3871
+ readonly steps: Promise<Array<StepResult<TOOLS>>>;
3872
+ /**
3873
+ Additional request information from the last step.
3874
+ */
3875
+ readonly request: Promise<LanguageModelRequestMetadata>;
3876
+ /**
3877
+ Additional response information from the last step.
3878
+ */
3879
+ readonly response: Promise<
3880
+ LanguageModelResponseMetadata & {
3881
+ /**
3882
+ The response messages that were generated during the call. It consists of an assistant message,
3883
+ potentially containing tool calls.
3884
+
3885
+ When there are tool results, there is an additional tool message with the tool results that are available.
3886
+ If there are tools that do not have execute functions, they are not included in the tool results and
3887
+ need to be added separately.
3888
+ */
3889
+ messages: Array<ResponseMessage>;
3890
+ }
3891
+ >;
3892
+ /**
3893
+ A text stream that returns only the generated text deltas. You can use it
3894
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
3895
+ stream will throw the error.
3896
+ */
3897
+ readonly textStream: AsyncIterableStream<string>;
3898
+ /**
3899
+ A stream with all events, including text deltas, tool calls, tool results, and
3900
+ errors.
3901
+ You can use it as either an AsyncIterable or a ReadableStream.
3902
+ Only errors that stop the stream, such as network errors, are thrown.
3903
+ */
3904
+ readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
3905
+ /**
3906
+ A stream of partial outputs. It uses the `experimental_output` specification.
3907
+ */
3908
+ readonly experimental_partialOutputStream: AsyncIterableStream<PARTIAL_OUTPUT>;
3909
+ /**
3910
+ Consumes the stream without processing the parts.
3911
+ This is useful to force the stream to finish.
3912
+ It effectively removes the backpressure and allows the stream to finish,
3913
+ triggering the `onFinish` callback and the promise resolution.
3914
+
3915
+ If an error occurs, it is passed to the optional `onError` callback.
3916
+ */
3917
+ consumeStream(options?: ConsumeStreamOptions): Promise<void>;
3918
+ /**
3919
+ Converts the result to a data stream.
3920
+
3921
+ @param data an optional StreamData object that will be merged into the stream.
3922
+ @param getErrorMessage an optional function that converts an error to an error message.
3923
+ @param sendUsage whether to send the usage information to the client. Defaults to true.
3924
+ @param sendReasoning whether to send the reasoning information to the client. Defaults to false.
3925
+ @return A data stream.
3926
+ */
3927
+ toDataStream(
3928
+ options?: {
3929
+ data?: StreamData;
3930
+ getErrorMessage?: (error: unknown) => string;
3931
+ } & DataStreamOptions,
3932
+ ): ReadableStream<Uint8Array>;
3933
+ /**
3934
+ * Merges the result as a data stream into another data stream.
3935
+ *
3936
+ * @param dataStream A data stream writer.
3937
+ * @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3938
+ * @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3939
+ */
3940
+ mergeIntoDataStream(dataStream: DataStreamWriter, options?: DataStreamOptions): void;
3941
+ /**
3942
+ Writes data stream output to a Node.js response-like object.
3943
+
3944
+ @param response A Node.js response-like object (ServerResponse).
3945
+ @param options.status The status code.
3946
+ @param options.statusText The status text.
3947
+ @param options.headers The headers.
3948
+ @param options.data The stream data.
3949
+ @param options.getErrorMessage An optional function that converts an error to an error message.
3950
+ @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3951
+ @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3952
+ */
3953
+ pipeDataStreamToResponse(
3954
+ response: ServerResponse_2,
3955
+ options?: ResponseInit & {
3956
+ data?: StreamData;
3957
+ getErrorMessage?: (error: unknown) => string;
3958
+ } & DataStreamOptions,
3959
+ ): void;
3960
+ /**
3961
+ Writes text delta output to a Node.js response-like object.
3962
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
3963
+ writes each text delta as a separate chunk.
3964
+
3965
+ @param response A Node.js response-like object (ServerResponse).
3966
+ @param init Optional headers, status code, and status text.
3967
+ */
3968
+ pipeTextStreamToResponse(response: ServerResponse_2, init?: ResponseInit): void;
3969
+ /**
3970
+ Converts the result to a streamed response object with a stream data part stream.
3971
+ It can be used with the `useChat` and `useCompletion` hooks.
3972
+
3973
+ @param options.status The status code.
3974
+ @param options.statusText The status text.
3975
+ @param options.headers The headers.
3976
+ @param options.data The stream data.
3977
+ @param options.getErrorMessage An optional function that converts an error to an error message.
3978
+ @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3979
+ @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3980
+
3981
+ @return A response object.
3982
+ */
3983
+ toDataStreamResponse(
3984
+ options?: ResponseInit & {
3985
+ data?: StreamData;
3986
+ getErrorMessage?: (error: unknown) => string;
3987
+ } & DataStreamOptions,
3988
+ ): Response;
3989
+ /**
3990
+ Creates a simple text stream response.
3991
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
3992
+ Non-text-delta events are ignored.
3993
+
3994
+ @param init Optional headers, status code, and status text.
3995
+ */
3996
+ toTextStreamResponse(init?: ResponseInit): Response;
3997
+ }
3998
+
3999
+ /**
4000
+ A transformation that is applied to the stream.
4001
+
4002
+ @param stopStream - A function that stops the source stream.
4003
+ @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
4004
+ */
4005
+ declare type StreamTextTransform<TOOLS extends ToolSet> = (options: {
4006
+ tools: TOOLS;
4007
+ stopStream: () => void;
4008
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
4009
+
4010
+ declare const symbol$1: unique symbol;
4011
+
4012
+ declare const symbol$7: unique symbol;
4013
+
4014
+ declare const symbol$d: unique symbol;
4015
+
4016
+ declare const symbol$e: unique symbol;
4017
+
4018
+ declare const symbol$f: unique symbol;
4019
+
4020
+ /**
4021
+ * Telemetry configuration.
4022
+ */
4023
+ declare type TelemetrySettings = {
4024
+ /**
4025
+ * Enable or disable telemetry. Disabled by default while experimental.
4026
+ */
4027
+ isEnabled?: boolean;
4028
+ /**
4029
+ * Enable or disable input recording. Enabled by default.
4030
+ *
4031
+ * You might want to disable input recording to avoid recording sensitive
4032
+ * information, to reduce data transfers, or to increase performance.
4033
+ */
4034
+ recordInputs?: boolean;
4035
+ /**
4036
+ * Enable or disable output recording. Enabled by default.
4037
+ *
4038
+ * You might want to disable output recording to avoid recording sensitive
4039
+ * information, to reduce data transfers, or to increase performance.
4040
+ */
4041
+ recordOutputs?: boolean;
4042
+ /**
4043
+ * Identifier for this function. Used to group telemetry data by function.
4044
+ */
4045
+ functionId?: string;
4046
+ /**
4047
+ * Additional information to include in the telemetry data.
4048
+ */
4049
+ metadata?: Record<string, AttributeValue>;
4050
+ /**
4051
+ * A custom tracer to use for the telemetry data.
4052
+ */
4053
+ tracer?: Tracer;
4054
+ };
4055
+
4056
+ /**
4057
+ Text content part of a prompt. It contains a string of text.
4058
+ */
4059
+ export declare interface TextPart {
4060
+ type: 'text';
4061
+ /**
4062
+ The text content.
4063
+ */
4064
+ text: string;
4065
+ /**
4066
+ Additional provider-specific metadata. They are passed through
4067
+ to the provider from the AI SDK and enable provider-specific
4068
+ functionality that can be fully encapsulated in the provider.
4069
+ */
4070
+ providerOptions?: ProviderOptions;
4071
+ /**
4072
+ @deprecated Use `providerOptions` instead.
4073
+ */
4074
+ experimental_providerMetadata?: ProviderMetadata;
4075
+ }
4076
+
4077
+ export declare type TextStreamPart<TOOLS extends ToolSet> =
4078
+ | {
4079
+ type: 'text-delta';
4080
+ textDelta: string;
4081
+ }
4082
+ | {
4083
+ type: 'reasoning';
4084
+ textDelta: string;
4085
+ }
4086
+ | {
4087
+ type: 'reasoning-signature';
4088
+ signature: string;
4089
+ }
4090
+ | {
4091
+ type: 'redacted-reasoning';
4092
+ data: string;
4093
+ }
4094
+ | {
4095
+ type: 'source';
4096
+ source: Source;
4097
+ }
4098
+ | ({
4099
+ type: 'file';
4100
+ } & GeneratedFile)
4101
+ | ({
4102
+ type: 'tool-call';
4103
+ } & ToolCallUnion<TOOLS>)
4104
+ | {
4105
+ type: 'tool-call-streaming-start';
4106
+ toolCallId: string;
4107
+ toolName: string;
4108
+ }
4109
+ | {
4110
+ type: 'tool-call-delta';
4111
+ toolCallId: string;
4112
+ toolName: string;
4113
+ argsTextDelta: string;
4114
+ }
4115
+ | ({
4116
+ type: 'tool-result';
4117
+ } & ToolResultUnion<TOOLS>)
4118
+ | {
4119
+ type: 'step-start';
4120
+ messageId: string;
4121
+ request: LanguageModelRequestMetadata;
4122
+ warnings: CallWarning[];
4123
+ }
4124
+ | {
4125
+ type: 'step-finish';
4126
+ messageId: string;
4127
+ logprobs?: LogProbs;
4128
+ request: LanguageModelRequestMetadata;
4129
+ warnings: CallWarning[] | undefined;
4130
+ response: LanguageModelResponseMetadata;
4131
+ usage: LanguageModelUsage;
4132
+ finishReason: FinishReason;
4133
+ providerMetadata: ProviderMetadata | undefined;
4134
+ /**
4135
+ * @deprecated Use `providerMetadata` instead.
4136
+ */
4137
+ experimental_providerMetadata?: ProviderMetadata;
4138
+ isContinued: boolean;
4139
+ }
4140
+ | {
4141
+ type: 'finish';
4142
+ finishReason: FinishReason;
4143
+ usage: LanguageModelUsage;
4144
+ providerMetadata: ProviderMetadata | undefined;
4145
+ /**
4146
+ * @deprecated Use `providerMetadata` instead.
4147
+ */
4148
+ experimental_providerMetadata?: ProviderMetadata;
4149
+ /**
4150
+ * @deprecated will be moved into provider metadata
4151
+ */
4152
+ logprobs?: LogProbs;
4153
+ /**
4154
+ * @deprecated use response on step-finish instead
4155
+ */
4156
+ response: LanguageModelResponseMetadata;
4157
+ }
4158
+ | {
4159
+ type: 'error';
4160
+ error: unknown;
4161
+ };
4162
+
4163
+ /**
4164
+ * A text part of a message.
4165
+ */
4166
+ declare type TextUIPart = {
4167
+ type: 'text';
4168
+ /**
4169
+ * The text content.
4170
+ */
4171
+ text: string;
4172
+ };
4173
+
4174
+ /**
4175
+ * Defines TimeInput.
4176
+ *
4177
+ * hrtime, epoch milliseconds, performance.now() or Date
4178
+ */
4179
+ declare type TimeInput = HrTime | number | Date;
4180
+
4181
+ /**
4182
+ A tool contains the description and the schema of the input that the tool expects.
4183
+ This enables the language model to generate the input.
4184
+
4185
+ The tool can also contain an optional execute function for the actual execution function of the tool.
4186
+ */
4187
+ export declare type Tool<PARAMETERS extends ToolParameters = any, RESULT = any> = {
4188
+ /**
4189
+ The schema of the input that the tool expects. The language model will use this to generate the input.
4190
+ It is also used to validate the output of the language model.
4191
+ Use descriptions to make the input understandable for the language model.
4192
+ */
4193
+ parameters: PARAMETERS;
4194
+ /**
4195
+ An optional description of what the tool does.
4196
+ Will be used by the language model to decide whether to use the tool.
4197
+ Not used for provider-defined tools.
4198
+ */
4199
+ description?: string;
4200
+ /**
4201
+ Optional conversion function that maps the tool result to multi-part tool content for LLMs.
4202
+ */
4203
+ experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
4204
+ /**
4205
+ An async function that is called with the arguments from the tool call and produces a result.
4206
+ If not provided, the tool will not be executed automatically.
4207
+
4208
+ @args is the input of the tool call.
4209
+ @options.abortSignal is a signal that can be used to abort the tool call.
4210
+ */
4211
+ execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
4212
+ } & (
4213
+ | {
4214
+ /**
4215
+ Function tool.
4216
+ */
4217
+ type?: undefined | 'function';
4218
+ }
4219
+ | {
4220
+ /**
4221
+ Provider-defined tool.
4222
+ */
4223
+ type: 'provider-defined';
4224
+ /**
4225
+ The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
4226
+ */
4227
+ id: `${string}.${string}`;
4228
+ /**
4229
+ The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
4230
+ */
4231
+ args: Record<string, unknown>;
4232
+ }
4233
+ );
4234
+
4235
+ /**
4236
+ Typed tool call that is returned by generateText and streamText.
4237
+ It contains the tool call ID, the tool name, and the tool arguments.
4238
+ */
4239
+ declare interface ToolCall<NAME extends string, ARGS> {
4240
+ /**
4241
+ ID of the tool call. This ID is used to match the tool call with the tool result.
4242
+ */
4243
+ toolCallId: string;
4244
+ /**
4245
+ Name of the tool that is being called.
4246
+ */
4247
+ toolName: NAME;
4248
+ /**
4249
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
4250
+ */
4251
+ args: ARGS;
4252
+ }
4253
+
4254
+ declare type ToolCallArray<TOOLS extends ToolSet> = Array<ToolCallUnion<TOOLS>>;
4255
+
4256
+ /**
4257
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
4258
+ */
4259
+ declare interface ToolCallPart {
4260
+ type: 'tool-call';
4261
+ /**
4262
+ ID of the tool call. This ID is used to match the tool call with the tool result.
4263
+ */
4264
+ toolCallId: string;
4265
+ /**
4266
+ Name of the tool that is being called.
4267
+ */
4268
+ toolName: string;
4269
+ /**
4270
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
4271
+ */
4272
+ args: unknown;
4273
+ /**
4274
+ Additional provider-specific metadata. They are passed through
4275
+ to the provider from the AI SDK and enable provider-specific
4276
+ functionality that can be fully encapsulated in the provider.
4277
+ */
4278
+ providerOptions?: ProviderOptions;
4279
+ /**
4280
+ @deprecated Use `providerOptions` instead.
4281
+ */
4282
+ experimental_providerMetadata?: ProviderMetadata;
4283
+ }
4284
+
4285
+ /**
4286
+ * A function that attempts to repair a tool call that failed to parse.
4287
+ *
4288
+ * It receives the error and the context as arguments and returns the repair
4289
+ * tool call JSON as text.
4290
+ *
4291
+ * @param options.system - The system prompt.
4292
+ * @param options.messages - The messages in the current generation step.
4293
+ * @param options.toolCall - The tool call that failed to parse.
4294
+ * @param options.tools - The tools that are available.
4295
+ * @param options.parameterSchema - A function that returns the JSON Schema for a tool.
4296
+ * @param options.error - The error that occurred while parsing the tool call.
4297
+ */
4298
+ declare type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
4299
+ system: string | undefined;
4300
+ messages: CoreMessage[];
4301
+ toolCall: LanguageModelV1FunctionToolCall;
4302
+ tools: TOOLS;
4303
+ parameterSchema: (options: { toolName: string }) => JSONSchema7;
4304
+ error: NoSuchToolError | InvalidToolArgumentsError;
4305
+ }) => Promise<LanguageModelV1FunctionToolCall | null>;
4306
+
4307
+ declare type ToolCallUnion<TOOLS extends ToolSet> = ValueOf<{
4308
+ [NAME in keyof TOOLS]: {
4309
+ type: 'tool-call';
4310
+ toolCallId: string;
4311
+ toolName: NAME & string;
4312
+ args: inferParameters<TOOLS[NAME]['parameters']>;
4313
+ };
4314
+ }>;
4315
+
4316
+ /**
4317
+ Tool choice for the generation. It supports the following settings:
4318
+
4319
+ - `auto` (default): the model can choose whether and which tools to call.
4320
+ - `required`: the model must call a tool. It can choose which tool to call.
4321
+ - `none`: the model must not call tools
4322
+ - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
4323
+ */
4324
+ declare type ToolChoice<TOOLS extends Record<string, unknown>> =
4325
+ | 'auto'
4326
+ | 'none'
4327
+ | 'required'
4328
+ | {
4329
+ type: 'tool';
4330
+ toolName: keyof TOOLS;
4331
+ };
4332
+
4333
+ /**
4334
+ Content of a tool message. It is an array of tool result parts.
4335
+ */
4336
+ export declare type ToolContent = Array<ToolResultPart>;
4337
+
4338
+ export declare interface ToolExecutionOptions {
4339
+ /**
4340
+ * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
4341
+ */
4342
+ toolCallId: string;
4343
+ /**
4344
+ * Messages that were sent to the language model to initiate the response that contained the tool call.
4345
+ * The messages **do not** include the system prompt nor the assistant response that contained the tool call.
4346
+ */
4347
+ messages: CoreMessage[];
4348
+ /**
4349
+ * An optional abort signal that indicates that the overall operation should be aborted.
4350
+ */
4351
+ abortSignal?: AbortSignal;
4352
+ }
4353
+
4354
+ /**
4355
+ Tool invocations are either tool calls or tool results. For each assistant tool call,
4356
+ there is one tool invocation. While the call is in progress, the invocation is a tool call.
4357
+ Once the call is complete, the invocation is a tool result.
4358
+
4359
+ The step is used to track how to map an assistant UI message with many tool invocations
4360
+ back to a sequence of LLM assistant/tool result message pairs.
4361
+ It is optional for backwards compatibility.
4362
+ */
4363
+ export declare type ToolInvocation =
4364
+ | ({
4365
+ state: 'partial-call';
4366
+ step?: number;
4367
+ } & ToolCall<string, any>)
4368
+ | ({
4369
+ state: 'call';
4370
+ step?: number;
4371
+ } & ToolCall<string, any>)
4372
+ | ({
4373
+ state: 'result';
4374
+ step?: number;
4375
+ } & ToolResult<string, any, any>);
4376
+
4377
+ /**
4378
+ * A tool invocation part of a message.
4379
+ */
4380
+ declare type ToolInvocationUIPart = {
4381
+ type: 'tool-invocation';
4382
+ /**
4383
+ * The tool invocation.
4384
+ */
4385
+ toolInvocation: ToolInvocation;
4386
+ };
4387
+
4388
+ declare type ToolParameters = z.ZodTypeAny | Schema<any>;
4389
+
4390
+ /**
4391
+ Typed tool result that is returned by `generateText` and `streamText`.
4392
+ It contains the tool call ID, the tool name, the tool arguments, and the tool result.
4393
+ */
4394
+ declare interface ToolResult<NAME extends string, ARGS, RESULT> {
4395
+ /**
4396
+ ID of the tool call. This ID is used to match the tool call with the tool result.
4397
+ */
4398
+ toolCallId: string;
4399
+ /**
4400
+ Name of the tool that was called.
4401
+ */
4402
+ toolName: NAME;
4403
+ /**
4404
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
4405
+ */
4406
+ args: ARGS;
4407
+ /**
4408
+ Result of the tool call. This is the result of the tool's execution.
4409
+ */
4410
+ result: RESULT;
4411
+ }
4412
+
4413
+ declare type ToolResultArray<TOOLS extends ToolSet> = Array<ToolResultUnion<TOOLS>>;
4414
+
4415
+ declare type ToolResultContent = Array<
4416
+ | {
4417
+ type: 'text';
4418
+ text: string;
4419
+ }
4420
+ | {
4421
+ type: 'image';
4422
+ data: string;
4423
+ mimeType?: string;
4424
+ }
4425
+ >;
4426
+
4427
+ /**
4428
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
4429
+ */
4430
+ export declare interface ToolResultPart {
4431
+ type: 'tool-result';
4432
+ /**
4433
+ ID of the tool call that this result is associated with.
4434
+ */
4435
+ toolCallId: string;
4436
+ /**
4437
+ Name of the tool that generated this result.
4438
+ */
4439
+ toolName: string;
4440
+ /**
4441
+ Result of the tool call. This is a JSON-serializable object.
4442
+ */
4443
+ result: unknown;
4444
+ /**
4445
+ Multi-part content of the tool result. Only for tools that support multipart results.
4446
+ */
4447
+ experimental_content?: ToolResultContent;
4448
+ /**
4449
+ Optional flag if the result is an error or an error message.
4450
+ */
4451
+ isError?: boolean;
4452
+ /**
4453
+ Additional provider-specific metadata. They are passed through
4454
+ to the provider from the AI SDK and enable provider-specific
4455
+ functionality that can be fully encapsulated in the provider.
4456
+ */
4457
+ providerOptions?: ProviderOptions;
4458
+ /**
4459
+ @deprecated Use `providerOptions` instead.
4460
+ */
4461
+ experimental_providerMetadata?: ProviderMetadata;
4462
+ }
4463
+
4464
+ declare type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<
4465
+ ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>
4466
+ >;
4467
+
4468
+ export declare type ToolSet = Record<string, Tool>;
4469
+
4470
+ declare type ToToolResultObject<TOOLS extends ToolSet> = ValueOf<{
4471
+ [NAME in keyof TOOLS]: {
4472
+ type: 'tool-result';
4473
+ toolCallId: string;
4474
+ toolName: NAME & string;
4475
+ args: inferParameters<TOOLS[NAME]['parameters']>;
4476
+ result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
4477
+ };
4478
+ }>;
4479
+
4480
+ declare type ToToolsWithDefinedExecute<TOOLS extends ToolSet> = {
4481
+ [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
4482
+ };
4483
+
4484
+ declare type ToToolsWithExecute<TOOLS extends ToolSet> = {
4485
+ [K in keyof TOOLS as TOOLS[K] extends {
4486
+ execute: any;
4487
+ }
4488
+ ? K
4489
+ : never]: TOOLS[K];
4490
+ };
4491
+
4492
+ /**
4493
+ * Tracer provides an interface for creating {@link Span}s.
4494
+ */
4495
+ declare interface Tracer {
4496
+ /**
4497
+ * Starts a new {@link Span}. Start the span without setting it on context.
4498
+ *
4499
+ * This method do NOT modify the current Context.
4500
+ *
4501
+ * @param name The name of the span
4502
+ * @param [options] SpanOptions used for span creation
4503
+ * @param [context] Context to use to extract parent
4504
+ * @returns Span The newly created span
4505
+ * @example
4506
+ * const span = tracer.startSpan('op');
4507
+ * span.setAttribute('key', 'value');
4508
+ * span.end();
4509
+ */
4510
+ startSpan(name: string, options?: SpanOptions, context?: Context): Span;
4511
+ /**
4512
+ * Starts a new {@link Span} and calls the given function passing it the
4513
+ * created span as first argument.
4514
+ * Additionally the new span gets set in context and this context is activated
4515
+ * for the duration of the function call.
4516
+ *
4517
+ * @param name The name of the span
4518
+ * @param [options] SpanOptions used for span creation
4519
+ * @param [context] Context to use to extract parent
4520
+ * @param fn function called in the context of the span and receives the newly created span as an argument
4521
+ * @returns return value of fn
4522
+ * @example
4523
+ * const something = tracer.startActiveSpan('op', span => {
4524
+ * try {
4525
+ * do some work
4526
+ * span.setStatus({code: SpanStatusCode.OK});
4527
+ * return something;
4528
+ * } catch (err) {
4529
+ * span.setStatus({
4530
+ * code: SpanStatusCode.ERROR,
4531
+ * message: err.message,
4532
+ * });
4533
+ * throw err;
4534
+ * } finally {
4535
+ * span.end();
4536
+ * }
4537
+ * });
4538
+ *
4539
+ * @example
4540
+ * const span = tracer.startActiveSpan('op', span => {
4541
+ * try {
4542
+ * do some work
4543
+ * return span;
4544
+ * } catch (err) {
4545
+ * span.setStatus({
4546
+ * code: SpanStatusCode.ERROR,
4547
+ * message: err.message,
4548
+ * });
4549
+ * throw err;
4550
+ * }
4551
+ * });
4552
+ * do some more work
4553
+ * span.end();
4554
+ */
4555
+ startActiveSpan<F extends (span: Span) => unknown>(name: string, fn: F): ReturnType<F>;
4556
+ startActiveSpan<F extends (span: Span) => unknown>(name: string, options: SpanOptions, fn: F): ReturnType<F>;
4557
+ startActiveSpan<F extends (span: Span) => unknown>(
4558
+ name: string,
4559
+ options: SpanOptions,
4560
+ context: Context,
4561
+ fn: F,
4562
+ ): ReturnType<F>;
4563
+ }
4564
+
4565
+ declare interface TraceState {
4566
+ /**
4567
+ * Create a new TraceState which inherits from this TraceState and has the
4568
+ * given key set.
4569
+ * The new entry will always be added in the front of the list of states.
4570
+ *
4571
+ * @param key key of the TraceState entry.
4572
+ * @param value value of the TraceState entry.
4573
+ */
4574
+ set(key: string, value: string): TraceState;
4575
+ /**
4576
+ * Return a new TraceState which inherits from this TraceState but does not
4577
+ * contain the given key.
4578
+ *
4579
+ * @param key the key for the TraceState entry to be removed.
4580
+ */
4581
+ unset(key: string): TraceState;
4582
+ /**
4583
+ * Returns the value to which the specified key is mapped, or `undefined` if
4584
+ * this map contains no mapping for the key.
4585
+ *
4586
+ * @param key with which the specified value is to be associated.
4587
+ * @returns the value to which the specified key is mapped, or `undefined` if
4588
+ * this map contains no mapping for the key.
4589
+ */
4590
+ get(key: string): string | undefined;
4591
+ /**
4592
+ * Serializes the TraceState to a `list` as defined below. The `list` is a
4593
+ * series of `list-members` separated by commas `,`, and a list-member is a
4594
+ * key/value pair separated by an equals sign `=`. Spaces and horizontal tabs
4595
+ * surrounding `list-members` are ignored. There can be a maximum of 32
4596
+ * `list-members` in a `list`.
4597
+ *
4598
+ * @returns the serialized string.
4599
+ */
4600
+ serialize(): string;
4601
+ }
4602
+
4603
+ declare class TypeValidationError extends AISDKError {
4604
+ private readonly [symbol$1];
4605
+ readonly value: unknown;
4606
+ constructor({ value, cause }: { value: unknown; cause: unknown });
4607
+ static isInstance(error: unknown): error is TypeValidationError;
4608
+ /**
4609
+ * Wraps an error into a TypeValidationError.
4610
+ * If the cause is already a TypeValidationError with the same value, it returns the cause.
4611
+ * Otherwise, it creates a new TypeValidationError.
4612
+ *
4613
+ * @param {Object} params - The parameters for wrapping the error.
4614
+ * @param {unknown} params.value - The value that failed validation.
4615
+ * @param {unknown} params.cause - The original error or cause of the validation failure.
4616
+ * @returns {TypeValidationError} A TypeValidationError instance.
4617
+ */
4618
+ static wrap({ value, cause }: { value: unknown; cause: unknown }): TypeValidationError;
4619
+ }
4620
+
4621
+ export declare type UIMessage = Message & {
4622
+ /**
4623
+ * The parts of the message. Use this for rendering the message in the UI.
4624
+ *
4625
+ * Assistant messages can have text, reasoning and tool invocation parts.
4626
+ * User messages can have text parts.
4627
+ */
4628
+ parts: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
4629
+ };
4630
+
4631
+ /**
4632
+ Content of a user message. It can be a string or an array of text and image parts.
4633
+ */
4634
+ export declare type UserContent = string | Array<TextPart | ImagePart | FilePart>;
4635
+
4636
+ declare type ValidationResult<OBJECT> =
4637
+ | {
4638
+ success: true;
4639
+ value: OBJECT;
4640
+ }
4641
+ | {
4642
+ success: false;
4643
+ error: Error;
4644
+ };
4645
+
4646
+ declare type Validator<OBJECT = unknown> = {
4647
+ /**
4648
+ * Used to mark validator functions so we can support both Zod and custom schemas.
4649
+ */
4650
+ [validatorSymbol]: true;
4651
+ /**
4652
+ * Optional. Validates that the structure of a value matches this schema,
4653
+ * and returns a typed version of the value if it does.
4654
+ */
4655
+ readonly validate?: (value: unknown) => ValidationResult<OBJECT>;
4656
+ };
4657
+
4658
+ /**
4659
+ * Used to mark validator functions so we can support both Zod and custom schemas.
4660
+ */
4661
+ declare const validatorSymbol: unique symbol;
4662
+
4663
+ /**
4664
+ Create a union of the given object's values, and optionally specify which keys to get the values from.
4665
+
4666
+ Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
4667
+
4668
+ @example
4669
+ ```
4670
+ // data.json
4671
+ {
4672
+ 'foo': 1,
4673
+ 'bar': 2,
4674
+ 'biz': 3
4675
+ }
4676
+
4677
+ // main.ts
4678
+ import type {ValueOf} from 'type-fest';
4679
+ import data = require('./data.json');
4680
+
4681
+ export function getData(name: string): ValueOf<typeof data> {
4682
+ return data[name];
4683
+ }
4684
+
4685
+ export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
4686
+ return data[name];
4687
+ }
4688
+
4689
+ // file.ts
4690
+ import {getData, onlyBar} from './main';
4691
+
4692
+ getData('foo');
4693
+ //=> 1
4694
+
4695
+ onlyBar('foo');
4696
+ //=> TypeError ...
4697
+
4698
+ onlyBar('bar');
4699
+ //=> 2
4700
+ ```
4701
+ * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
4702
+ */
4703
+ declare type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
4704
+
4705
+ export {};