@fondation-io/ai 7.0.0-beta.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (536) hide show
  1. package/CHANGELOG.md +7687 -0
  2. package/README.md +238 -0
  3. package/dist/index.d.mts +7056 -0
  4. package/dist/index.d.ts +7056 -0
  5. package/dist/index.js +14607 -0
  6. package/dist/index.js.map +1 -0
  7. package/dist/index.mjs +14578 -0
  8. package/dist/index.mjs.map +1 -0
  9. package/dist/internal/index.d.mts +303 -0
  10. package/dist/internal/index.d.ts +303 -0
  11. package/dist/internal/index.js +1352 -0
  12. package/dist/internal/index.js.map +1 -0
  13. package/dist/internal/index.mjs +1336 -0
  14. package/dist/internal/index.mjs.map +1 -0
  15. package/dist/test/index.d.mts +265 -0
  16. package/dist/test/index.d.ts +265 -0
  17. package/dist/test/index.js +509 -0
  18. package/dist/test/index.js.map +1 -0
  19. package/dist/test/index.mjs +472 -0
  20. package/dist/test/index.mjs.map +1 -0
  21. package/docs/00-introduction/index.mdx +76 -0
  22. package/docs/02-foundations/01-overview.mdx +43 -0
  23. package/docs/02-foundations/02-providers-and-models.mdx +160 -0
  24. package/docs/02-foundations/03-prompts.mdx +616 -0
  25. package/docs/02-foundations/04-tools.mdx +251 -0
  26. package/docs/02-foundations/05-streaming.mdx +62 -0
  27. package/docs/02-foundations/06-provider-options.mdx +345 -0
  28. package/docs/02-foundations/index.mdx +49 -0
  29. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  30. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  31. package/docs/02-getting-started/02-nextjs-app-router.mdx +559 -0
  32. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  33. package/docs/02-getting-started/04-svelte.mdx +627 -0
  34. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  35. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  36. package/docs/02-getting-started/07-expo.mdx +766 -0
  37. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  38. package/docs/02-getting-started/09-coding-agents.mdx +179 -0
  39. package/docs/02-getting-started/index.mdx +44 -0
  40. package/docs/03-agents/01-overview.mdx +96 -0
  41. package/docs/03-agents/02-building-agents.mdx +449 -0
  42. package/docs/03-agents/03-workflows.mdx +386 -0
  43. package/docs/03-agents/04-loop-control.mdx +394 -0
  44. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  45. package/docs/03-agents/06-memory.mdx +222 -0
  46. package/docs/03-agents/06-subagents.mdx +362 -0
  47. package/docs/03-agents/index.mdx +46 -0
  48. package/docs/03-ai-sdk-core/01-overview.mdx +31 -0
  49. package/docs/03-ai-sdk-core/05-generating-text.mdx +707 -0
  50. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +498 -0
  51. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1148 -0
  52. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +383 -0
  53. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +146 -0
  54. package/docs/03-ai-sdk-core/25-settings.mdx +216 -0
  55. package/docs/03-ai-sdk-core/26-reasoning.mdx +190 -0
  56. package/docs/03-ai-sdk-core/30-embeddings.mdx +236 -0
  57. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  58. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  59. package/docs/03-ai-sdk-core/36-transcription.mdx +227 -0
  60. package/docs/03-ai-sdk-core/37-speech.mdx +169 -0
  61. package/docs/03-ai-sdk-core/38-video-generation.mdx +366 -0
  62. package/docs/03-ai-sdk-core/40-middleware.mdx +485 -0
  63. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  64. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  65. package/docs/03-ai-sdk-core/55-testing.mdx +219 -0
  66. package/docs/03-ai-sdk-core/60-telemetry.mdx +391 -0
  67. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  68. package/docs/03-ai-sdk-core/65-event-listeners.mdx +1303 -0
  69. package/docs/03-ai-sdk-core/index.mdx +99 -0
  70. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  71. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1320 -0
  72. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +534 -0
  73. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  74. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  75. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  76. package/docs/04-ai-sdk-ui/05-completion.mdx +181 -0
  77. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  78. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  79. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  80. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  81. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  82. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  83. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +503 -0
  84. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  85. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  86. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  87. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  88. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  89. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  90. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +157 -0
  91. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  92. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +94 -0
  93. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  94. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  95. package/docs/05-ai-sdk-rsc/index.mdx +63 -0
  96. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  97. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  98. package/docs/06-advanced/03-backpressure.mdx +173 -0
  99. package/docs/06-advanced/04-caching.mdx +169 -0
  100. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  101. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  102. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +225 -0
  103. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  104. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  105. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  106. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  107. package/docs/06-advanced/index.mdx +11 -0
  108. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2785 -0
  109. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3752 -0
  110. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +332 -0
  111. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +330 -0
  112. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +323 -0
  113. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +251 -0
  114. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +152 -0
  115. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +221 -0
  116. package/docs/07-reference/01-ai-sdk-core/13-generate-video.mdx +264 -0
  117. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +235 -0
  118. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +973 -0
  119. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +154 -0
  120. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +173 -0
  121. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +150 -0
  122. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +209 -0
  123. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +223 -0
  124. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +423 -0
  125. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  126. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  127. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  128. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +58 -0
  129. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  130. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +435 -0
  131. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +264 -0
  132. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  133. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  134. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +198 -0
  135. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +157 -0
  136. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  137. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  138. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  139. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +74 -0
  140. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  141. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  142. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  143. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  144. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  145. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  146. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  147. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  148. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  149. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +30 -0
  150. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  151. package/docs/07-reference/01-ai-sdk-core/92-default-generated-file.mdx +68 -0
  152. package/docs/07-reference/01-ai-sdk-core/index.mdx +160 -0
  153. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +493 -0
  154. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +185 -0
  155. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +196 -0
  156. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +231 -0
  157. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  158. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +162 -0
  159. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +119 -0
  160. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +77 -0
  161. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  162. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  163. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  164. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  165. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  166. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  167. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  168. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  169. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +78 -0
  170. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +79 -0
  171. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  172. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  173. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  174. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  175. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  176. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  177. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +266 -0
  178. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +31 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +28 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +26 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +24 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-output-generated-error.mdx +25 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +24 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +24 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-no-video-generated-error.mdx +39 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  205. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +25 -0
  206. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  207. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  208. package/docs/07-reference/05-ai-sdk-errors/ai-ui-message-stream-error.mdx +67 -0
  209. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  210. package/docs/07-reference/05-ai-sdk-errors/index.mdx +39 -0
  211. package/docs/07-reference/index.mdx +28 -0
  212. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  213. package/docs/08-migration-guides/23-migration-guide-7-0.mdx +95 -0
  214. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  215. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  216. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  217. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  218. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  219. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  220. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  221. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  222. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  223. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  224. package/docs/08-migration-guides/index.mdx +22 -0
  225. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  226. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  227. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  228. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  229. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  230. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  231. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  232. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  233. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  234. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  235. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  236. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  237. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  238. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  239. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  240. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  241. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  242. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  243. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  244. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  245. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  246. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  247. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  248. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  249. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +76 -0
  250. package/docs/09-troubleshooting/21-missing-tool-results-error.mdx +82 -0
  251. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  252. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  253. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  254. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  255. package/docs/09-troubleshooting/70-high-memory-usage-with-images.mdx +108 -0
  256. package/docs/09-troubleshooting/index.mdx +11 -0
  257. package/internal.d.ts +1 -0
  258. package/package.json +120 -0
  259. package/src/agent/agent.ts +156 -0
  260. package/src/agent/create-agent-ui-stream-response.ts +61 -0
  261. package/src/agent/create-agent-ui-stream.ts +84 -0
  262. package/src/agent/index.ts +37 -0
  263. package/src/agent/infer-agent-tools.ts +7 -0
  264. package/src/agent/infer-agent-ui-message.ts +11 -0
  265. package/src/agent/pipe-agent-ui-stream-to-response.ts +64 -0
  266. package/src/agent/tool-loop-agent-settings.ts +252 -0
  267. package/src/agent/tool-loop-agent.ts +205 -0
  268. package/src/embed/embed-events.ts +181 -0
  269. package/src/embed/embed-many-result.ts +53 -0
  270. package/src/embed/embed-many.ts +428 -0
  271. package/src/embed/embed-result.ts +50 -0
  272. package/src/embed/embed.ts +266 -0
  273. package/src/embed/index.ts +5 -0
  274. package/src/error/index.ts +37 -0
  275. package/src/error/invalid-argument-error.ts +34 -0
  276. package/src/error/invalid-stream-part-error.ts +28 -0
  277. package/src/error/invalid-tool-approval-error.ts +26 -0
  278. package/src/error/invalid-tool-input-error.ts +33 -0
  279. package/src/error/missing-tool-result-error.ts +28 -0
  280. package/src/error/no-image-generated-error.ts +39 -0
  281. package/src/error/no-object-generated-error.ts +70 -0
  282. package/src/error/no-output-generated-error.ts +26 -0
  283. package/src/error/no-speech-generated-error.ts +28 -0
  284. package/src/error/no-such-tool-error.ts +35 -0
  285. package/src/error/no-transcript-generated-error.ts +30 -0
  286. package/src/error/no-video-generated-error.ts +57 -0
  287. package/src/error/tool-call-not-found-for-approval-error.ts +32 -0
  288. package/src/error/tool-call-repair-error.ts +30 -0
  289. package/src/error/ui-message-stream-error.ts +48 -0
  290. package/src/error/unsupported-model-version-error.ts +23 -0
  291. package/src/error/verify-no-object-generated-error.ts +27 -0
  292. package/src/generate-image/generate-image-result.ts +42 -0
  293. package/src/generate-image/generate-image.ts +361 -0
  294. package/src/generate-image/index.ts +18 -0
  295. package/src/generate-object/generate-object-result.ts +67 -0
  296. package/src/generate-object/generate-object.ts +514 -0
  297. package/src/generate-object/index.ts +9 -0
  298. package/src/generate-object/inject-json-instruction.ts +30 -0
  299. package/src/generate-object/output-strategy.ts +415 -0
  300. package/src/generate-object/parse-and-validate-object-result.ts +111 -0
  301. package/src/generate-object/repair-text.ts +12 -0
  302. package/src/generate-object/stream-object-result.ts +120 -0
  303. package/src/generate-object/stream-object.ts +984 -0
  304. package/src/generate-object/validate-object-generation-input.ts +144 -0
  305. package/src/generate-speech/generate-speech-result.ts +30 -0
  306. package/src/generate-speech/generate-speech.ts +191 -0
  307. package/src/generate-speech/generated-audio-file.ts +65 -0
  308. package/src/generate-speech/index.ts +3 -0
  309. package/src/generate-text/collect-tool-approvals.ts +116 -0
  310. package/src/generate-text/content-part.ts +31 -0
  311. package/src/generate-text/core-events.ts +390 -0
  312. package/src/generate-text/create-execute-tools-transformation.ts +144 -0
  313. package/src/generate-text/execute-tool-call.ts +190 -0
  314. package/src/generate-text/extract-reasoning-content.ts +17 -0
  315. package/src/generate-text/extract-text-content.ts +15 -0
  316. package/src/generate-text/generate-text-result.ts +168 -0
  317. package/src/generate-text/generate-text.ts +1445 -0
  318. package/src/generate-text/generated-file.ts +70 -0
  319. package/src/generate-text/index.ts +78 -0
  320. package/src/generate-text/invoke-tool-callbacks-from-stream.ts +81 -0
  321. package/src/generate-text/is-approval-needed.ts +29 -0
  322. package/src/generate-text/output-utils.ts +23 -0
  323. package/src/generate-text/output.ts +590 -0
  324. package/src/generate-text/parse-tool-call.ts +188 -0
  325. package/src/generate-text/prepare-step.ts +103 -0
  326. package/src/generate-text/prune-messages.ts +167 -0
  327. package/src/generate-text/reasoning-output.ts +99 -0
  328. package/src/generate-text/reasoning.ts +10 -0
  329. package/src/generate-text/response-message.ts +10 -0
  330. package/src/generate-text/smooth-stream.ts +162 -0
  331. package/src/generate-text/step-result.ts +310 -0
  332. package/src/generate-text/stop-condition.ts +29 -0
  333. package/src/generate-text/stream-model-call.ts +418 -0
  334. package/src/generate-text/stream-text-result.ts +536 -0
  335. package/src/generate-text/stream-text.ts +2696 -0
  336. package/src/generate-text/to-response-messages.ts +195 -0
  337. package/src/generate-text/tool-approval-request-output.ts +21 -0
  338. package/src/generate-text/tool-call-repair-function.ts +27 -0
  339. package/src/generate-text/tool-call.ts +47 -0
  340. package/src/generate-text/tool-error.ts +34 -0
  341. package/src/generate-text/tool-output-denied.ts +21 -0
  342. package/src/generate-text/tool-output.ts +7 -0
  343. package/src/generate-text/tool-result.ts +36 -0
  344. package/src/generate-text/tool-set.ts +14 -0
  345. package/src/generate-video/generate-video-result.ts +36 -0
  346. package/src/generate-video/generate-video.ts +402 -0
  347. package/src/generate-video/index.ts +3 -0
  348. package/src/global.ts +36 -0
  349. package/src/index.ts +49 -0
  350. package/src/logger/index.ts +6 -0
  351. package/src/logger/log-warnings.ts +140 -0
  352. package/src/middleware/add-tool-input-examples-middleware.ts +90 -0
  353. package/src/middleware/default-embedding-settings-middleware.ts +22 -0
  354. package/src/middleware/default-settings-middleware.ts +33 -0
  355. package/src/middleware/extract-json-middleware.ts +197 -0
  356. package/src/middleware/extract-reasoning-middleware.ts +249 -0
  357. package/src/middleware/index.ts +10 -0
  358. package/src/middleware/simulate-streaming-middleware.ts +79 -0
  359. package/src/middleware/wrap-embedding-model.ts +89 -0
  360. package/src/middleware/wrap-image-model.ts +92 -0
  361. package/src/middleware/wrap-language-model.ts +108 -0
  362. package/src/middleware/wrap-provider.ts +51 -0
  363. package/src/model/as-embedding-model-v3.ts +24 -0
  364. package/src/model/as-embedding-model-v4.ts +25 -0
  365. package/src/model/as-image-model-v3.ts +24 -0
  366. package/src/model/as-image-model-v4.ts +21 -0
  367. package/src/model/as-language-model-v3.ts +103 -0
  368. package/src/model/as-language-model-v4.ts +25 -0
  369. package/src/model/as-provider-v3.ts +36 -0
  370. package/src/model/as-provider-v4.ts +47 -0
  371. package/src/model/as-reranking-model-v4.ts +16 -0
  372. package/src/model/as-speech-model-v3.ts +24 -0
  373. package/src/model/as-speech-model-v4.ts +21 -0
  374. package/src/model/as-transcription-model-v3.ts +24 -0
  375. package/src/model/as-transcription-model-v4.ts +25 -0
  376. package/src/model/as-video-model-v4.ts +19 -0
  377. package/src/model/resolve-model.ts +172 -0
  378. package/src/prompt/call-settings.ts +169 -0
  379. package/src/prompt/content-part.ts +236 -0
  380. package/src/prompt/convert-to-language-model-prompt.ts +548 -0
  381. package/src/prompt/create-tool-model-output.ts +34 -0
  382. package/src/prompt/data-content.ts +134 -0
  383. package/src/prompt/index.ts +27 -0
  384. package/src/prompt/invalid-data-content-error.ts +29 -0
  385. package/src/prompt/invalid-message-role-error.ts +27 -0
  386. package/src/prompt/message-conversion-error.ts +28 -0
  387. package/src/prompt/message.ts +72 -0
  388. package/src/prompt/prepare-call-settings.ts +110 -0
  389. package/src/prompt/prepare-tools-and-tool-choice.ts +86 -0
  390. package/src/prompt/prompt.ts +43 -0
  391. package/src/prompt/split-data-url.ts +17 -0
  392. package/src/prompt/standardize-prompt.ts +99 -0
  393. package/src/prompt/wrap-gateway-error.ts +29 -0
  394. package/src/registry/custom-provider.ts +210 -0
  395. package/src/registry/index.ts +7 -0
  396. package/src/registry/no-such-provider-error.ts +41 -0
  397. package/src/registry/provider-registry.ts +331 -0
  398. package/src/rerank/index.ts +8 -0
  399. package/src/rerank/rerank-events.ts +189 -0
  400. package/src/rerank/rerank-result.ts +70 -0
  401. package/src/rerank/rerank.ts +348 -0
  402. package/src/telemetry/assemble-operation-name.ts +21 -0
  403. package/src/telemetry/get-base-telemetry-attributes.ts +45 -0
  404. package/src/telemetry/get-global-telemetry-integration.ts +126 -0
  405. package/src/telemetry/get-tracer.ts +20 -0
  406. package/src/telemetry/index.ts +4 -0
  407. package/src/telemetry/noop-tracer.ts +69 -0
  408. package/src/telemetry/open-telemetry-integration.ts +875 -0
  409. package/src/telemetry/record-span.ts +75 -0
  410. package/src/telemetry/select-telemetry-attributes.ts +78 -0
  411. package/src/telemetry/stringify-for-telemetry.ts +33 -0
  412. package/src/telemetry/telemetry-integration-registry.ts +22 -0
  413. package/src/telemetry/telemetry-integration.ts +139 -0
  414. package/src/telemetry/telemetry-settings.ts +55 -0
  415. package/src/test/mock-embedding-model-v2.ts +35 -0
  416. package/src/test/mock-embedding-model-v3.ts +48 -0
  417. package/src/test/mock-embedding-model-v4.ts +48 -0
  418. package/src/test/mock-image-model-v2.ts +28 -0
  419. package/src/test/mock-image-model-v3.ts +28 -0
  420. package/src/test/mock-image-model-v4.ts +28 -0
  421. package/src/test/mock-language-model-v2.ts +72 -0
  422. package/src/test/mock-language-model-v3.ts +77 -0
  423. package/src/test/mock-language-model-v4.ts +77 -0
  424. package/src/test/mock-provider-v2.ts +68 -0
  425. package/src/test/mock-provider-v3.ts +80 -0
  426. package/src/test/mock-provider-v4.ts +80 -0
  427. package/src/test/mock-reranking-model-v3.ts +25 -0
  428. package/src/test/mock-reranking-model-v4.ts +25 -0
  429. package/src/test/mock-server-response.ts +69 -0
  430. package/src/test/mock-speech-model-v2.ts +24 -0
  431. package/src/test/mock-speech-model-v3.ts +24 -0
  432. package/src/test/mock-speech-model-v4.ts +24 -0
  433. package/src/test/mock-tracer.ts +156 -0
  434. package/src/test/mock-transcription-model-v2.ts +24 -0
  435. package/src/test/mock-transcription-model-v3.ts +24 -0
  436. package/src/test/mock-transcription-model-v4.ts +24 -0
  437. package/src/test/mock-values.ts +4 -0
  438. package/src/test/mock-video-model-v3.ts +28 -0
  439. package/src/test/mock-video-model-v4.ts +28 -0
  440. package/src/test/not-implemented.ts +3 -0
  441. package/src/text-stream/create-text-stream-response.ts +30 -0
  442. package/src/text-stream/index.ts +2 -0
  443. package/src/text-stream/pipe-text-stream-to-response.ts +38 -0
  444. package/src/transcribe/index.ts +2 -0
  445. package/src/transcribe/transcribe-result.ts +60 -0
  446. package/src/transcribe/transcribe.ts +187 -0
  447. package/src/types/embedding-model-middleware.ts +15 -0
  448. package/src/types/embedding-model.ts +20 -0
  449. package/src/types/image-model-middleware.ts +15 -0
  450. package/src/types/image-model-response-metadata.ts +16 -0
  451. package/src/types/image-model.ts +19 -0
  452. package/src/types/index.ts +29 -0
  453. package/src/types/json-value.ts +15 -0
  454. package/src/types/language-model-middleware.ts +15 -0
  455. package/src/types/language-model-request-metadata.ts +6 -0
  456. package/src/types/language-model-response-metadata.ts +21 -0
  457. package/src/types/language-model.ts +106 -0
  458. package/src/types/provider-metadata.ts +16 -0
  459. package/src/types/provider.ts +55 -0
  460. package/src/types/reranking-model.ts +6 -0
  461. package/src/types/speech-model-response-metadata.ts +21 -0
  462. package/src/types/speech-model.ts +10 -0
  463. package/src/types/transcription-model-response-metadata.ts +16 -0
  464. package/src/types/transcription-model.ts +14 -0
  465. package/src/types/usage.ts +200 -0
  466. package/src/types/video-model-response-metadata.ts +28 -0
  467. package/src/types/video-model.ts +15 -0
  468. package/src/types/warning.ts +7 -0
  469. package/src/ui/call-completion-api.ts +157 -0
  470. package/src/ui/chat-transport.ts +83 -0
  471. package/src/ui/chat.ts +786 -0
  472. package/src/ui/convert-file-list-to-file-ui-parts.ts +36 -0
  473. package/src/ui/convert-to-model-messages.ts +403 -0
  474. package/src/ui/default-chat-transport.ts +36 -0
  475. package/src/ui/direct-chat-transport.ts +117 -0
  476. package/src/ui/http-chat-transport.ts +273 -0
  477. package/src/ui/index.ts +76 -0
  478. package/src/ui/last-assistant-message-is-complete-with-approval-responses.ts +44 -0
  479. package/src/ui/last-assistant-message-is-complete-with-tool-calls.ts +39 -0
  480. package/src/ui/process-text-stream.ts +16 -0
  481. package/src/ui/process-ui-message-stream.ts +858 -0
  482. package/src/ui/text-stream-chat-transport.ts +23 -0
  483. package/src/ui/transform-text-to-ui-message-stream.ts +27 -0
  484. package/src/ui/ui-messages.ts +602 -0
  485. package/src/ui/use-completion.ts +84 -0
  486. package/src/ui/validate-ui-messages.ts +521 -0
  487. package/src/ui-message-stream/create-ui-message-stream-response.ts +44 -0
  488. package/src/ui-message-stream/create-ui-message-stream.ts +145 -0
  489. package/src/ui-message-stream/get-response-ui-message-id.ts +35 -0
  490. package/src/ui-message-stream/handle-ui-message-stream-finish.ts +170 -0
  491. package/src/ui-message-stream/index.ts +14 -0
  492. package/src/ui-message-stream/json-to-sse-transform-stream.ts +17 -0
  493. package/src/ui-message-stream/pipe-ui-message-stream-to-response.ts +51 -0
  494. package/src/ui-message-stream/read-ui-message-stream.ts +87 -0
  495. package/src/ui-message-stream/ui-message-chunks.ts +372 -0
  496. package/src/ui-message-stream/ui-message-stream-headers.ts +7 -0
  497. package/src/ui-message-stream/ui-message-stream-on-finish-callback.ts +32 -0
  498. package/src/ui-message-stream/ui-message-stream-on-step-finish-callback.ts +25 -0
  499. package/src/ui-message-stream/ui-message-stream-response-init.ts +14 -0
  500. package/src/ui-message-stream/ui-message-stream-writer.ts +24 -0
  501. package/src/util/as-array.ts +3 -0
  502. package/src/util/async-iterable-stream.ts +94 -0
  503. package/src/util/consume-stream.ts +31 -0
  504. package/src/util/cosine-similarity.ts +46 -0
  505. package/src/util/create-resolvable-promise.ts +30 -0
  506. package/src/util/create-stitchable-stream.ts +112 -0
  507. package/src/util/data-url.ts +17 -0
  508. package/src/util/deep-partial.ts +84 -0
  509. package/src/util/detect-media-type.ts +226 -0
  510. package/src/util/download/create-download.ts +13 -0
  511. package/src/util/download/download-function.ts +45 -0
  512. package/src/util/download/download.ts +74 -0
  513. package/src/util/error-handler.ts +1 -0
  514. package/src/util/fix-json.ts +401 -0
  515. package/src/util/get-potential-start-index.ts +39 -0
  516. package/src/util/index.ts +12 -0
  517. package/src/util/is-deep-equal-data.ts +48 -0
  518. package/src/util/is-non-empty-object.ts +5 -0
  519. package/src/util/job.ts +1 -0
  520. package/src/util/log-v2-compatibility-warning.ts +21 -0
  521. package/src/util/merge-abort-signals.ts +43 -0
  522. package/src/util/merge-objects.ts +79 -0
  523. package/src/util/notify.ts +22 -0
  524. package/src/util/now.ts +4 -0
  525. package/src/util/parse-partial-json.ts +30 -0
  526. package/src/util/prepare-headers.ts +14 -0
  527. package/src/util/prepare-retries.ts +47 -0
  528. package/src/util/retry-error.ts +41 -0
  529. package/src/util/retry-with-exponential-backoff.ts +154 -0
  530. package/src/util/serial-job-executor.ts +36 -0
  531. package/src/util/simulate-readable-stream.ts +39 -0
  532. package/src/util/split-array.ts +20 -0
  533. package/src/util/value-of.ts +65 -0
  534. package/src/util/write-to-server-response.ts +49 -0
  535. package/src/version.ts +5 -0
  536. package/test.d.ts +1 -0
@@ -0,0 +1,2696 @@
1
+ import {
2
+ getErrorMessage,
3
+ LanguageModelV4,
4
+ SharedV4Warning,
5
+ UnsupportedFunctionalityError,
6
+ } from '@ai-sdk/provider';
7
+ import {
8
+ createIdGenerator,
9
+ DelayedPromise,
10
+ IdGenerator,
11
+ isAbortError,
12
+ ProviderOptions,
13
+ ToolApprovalResponse,
14
+ ToolContent,
15
+ } from '@ai-sdk/provider-utils';
16
+ import { ServerResponse } from 'node:http';
17
+ import { NoOutputGeneratedError } from '../error';
18
+ import { logWarnings } from '../logger/log-warnings';
19
+ import { resolveLanguageModel } from '../model/resolve-model';
20
+ import {
21
+ CallSettings,
22
+ getChunkTimeoutMs,
23
+ getStepTimeoutMs,
24
+ getTotalTimeoutMs,
25
+ TimeoutConfiguration,
26
+ } from '../prompt/call-settings';
27
+ import { createToolModelOutput } from '../prompt/create-tool-model-output';
28
+ import { prepareCallSettings } from '../prompt/prepare-call-settings';
29
+ import { prepareToolsAndToolChoice } from '../prompt/prepare-tools-and-tool-choice';
30
+ import { Prompt } from '../prompt/prompt';
31
+ import { standardizePrompt } from '../prompt/standardize-prompt';
32
+ import { wrapGatewayError } from '../prompt/wrap-gateway-error';
33
+ import { getGlobalTelemetryIntegration } from '../telemetry/get-global-telemetry-integration';
34
+ import { TelemetrySettings } from '../telemetry/telemetry-settings';
35
+ import { createTextStreamResponse } from '../text-stream/create-text-stream-response';
36
+ import { pipeTextStreamToResponse } from '../text-stream/pipe-text-stream-to-response';
37
+ import { LanguageModelRequestMetadata } from '../types';
38
+ import {
39
+ CallWarning,
40
+ FinishReason,
41
+ LanguageModel,
42
+ ToolChoice,
43
+ } from '../types/language-model';
44
+ import { ProviderMetadata } from '../types/provider-metadata';
45
+ import {
46
+ addLanguageModelUsage,
47
+ createNullLanguageModelUsage,
48
+ LanguageModelUsage,
49
+ } from '../types/usage';
50
+ import { UIMessage } from '../ui';
51
+ import { createUIMessageStreamResponse } from '../ui-message-stream/create-ui-message-stream-response';
52
+ import { getResponseUIMessageId } from '../ui-message-stream/get-response-ui-message-id';
53
+ import { handleUIMessageStreamFinish } from '../ui-message-stream/handle-ui-message-stream-finish';
54
+ import { pipeUIMessageStreamToResponse } from '../ui-message-stream/pipe-ui-message-stream-to-response';
55
+ import {
56
+ InferUIMessageChunk,
57
+ UIMessageChunk,
58
+ } from '../ui-message-stream/ui-message-chunks';
59
+ import { UIMessageStreamResponseInit } from '../ui-message-stream/ui-message-stream-response-init';
60
+ import { InferUIMessageData, InferUIMessageMetadata } from '../ui/ui-messages';
61
+ import { asArray } from '../util/as-array';
62
+ import {
63
+ AsyncIterableStream,
64
+ createAsyncIterableStream,
65
+ } from '../util/async-iterable-stream';
66
+ import { consumeStream } from '../util/consume-stream';
67
+ import { createStitchableStream } from '../util/create-stitchable-stream';
68
+ import { DownloadFunction } from '../util/download/download-function';
69
+ import { mergeAbortSignals } from '../util/merge-abort-signals';
70
+ import { mergeObjects } from '../util/merge-objects';
71
+ import { notify } from '../util/notify';
72
+ import { now as originalNow } from '../util/now';
73
+ import { prepareRetries } from '../util/prepare-retries';
74
+ import { collectToolApprovals } from './collect-tool-approvals';
75
+ import { ContentPart } from './content-part';
76
+ import type {
77
+ OnFinishEvent,
78
+ OnStartEvent,
79
+ OnStepFinishEvent,
80
+ OnStepStartEvent,
81
+ OnToolCallFinishEvent,
82
+ OnToolCallStartEvent,
83
+ } from './core-events';
84
+ import { createExecuteToolsTransformation } from './create-execute-tools-transformation';
85
+ import { executeToolCall } from './execute-tool-call';
86
+ import { invokeToolCallbacksFromStream } from './invoke-tool-callbacks-from-stream';
87
+ import { Output, text } from './output';
88
+ import {
89
+ InferCompleteOutput,
90
+ InferElementOutput,
91
+ InferPartialOutput,
92
+ } from './output-utils';
93
+ import { PrepareStepFunction } from './prepare-step';
94
+ import { convertToReasoningOutputs } from './reasoning-output';
95
+ import { ResponseMessage } from './response-message';
96
+ import { DefaultStepResult, StepResult } from './step-result';
97
+ import {
98
+ isStopConditionMet,
99
+ stepCountIs,
100
+ StopCondition,
101
+ } from './stop-condition';
102
+ import { ModelCallStreamPart, streamModelCall } from './stream-model-call';
103
+ import {
104
+ ConsumeStreamOptions,
105
+ StreamTextResult,
106
+ TextStreamPart,
107
+ UIMessageStreamOptions,
108
+ } from './stream-text-result';
109
+ import { toResponseMessages } from './to-response-messages';
110
+ import { TypedToolCall } from './tool-call';
111
+ import { ToolCallRepairFunction } from './tool-call-repair-function';
112
+ import { ToolOutput } from './tool-output';
113
+ import { StaticToolOutputDenied } from './tool-output-denied';
114
+ import { ToolSet } from './tool-set';
115
+
116
+ const originalGenerateId = createIdGenerator({
117
+ prefix: 'aitxt',
118
+ size: 24,
119
+ });
120
+
121
+ const originalGenerateCallId = createIdGenerator({
122
+ prefix: 'call',
123
+ size: 24,
124
+ });
125
+
126
+ /**
127
+ * A transformation that is applied to the stream.
128
+ *
129
+ * @param stopStream - A function that stops the source stream.
130
+ * @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
131
+ */
132
+ export type StreamTextTransform<TOOLS extends ToolSet> = (options: {
133
+ tools: TOOLS; // for type inference
134
+ stopStream: () => void;
135
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
136
+
137
+ /**
138
+ * Callback that is set using the `onError` option.
139
+ *
140
+ * @param event - The event that is passed to the callback.
141
+ */
142
+ export type StreamTextOnErrorCallback = (event: {
143
+ error: unknown;
144
+ }) => PromiseLike<void> | void;
145
+
146
+ /**
147
+ * Callback that is set using the `onStepFinish` option.
148
+ *
149
+ * @param stepResult - The result of the step.
150
+ */
151
+ export type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (
152
+ event: OnStepFinishEvent<TOOLS>,
153
+ ) => PromiseLike<void> | void;
154
+
155
+ /**
156
+ * Callback that is set using the `onChunk` option.
157
+ *
158
+ * @param event - The event that is passed to the callback.
159
+ */
160
+ export type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
161
+ chunk: Extract<
162
+ TextStreamPart<TOOLS>,
163
+ {
164
+ type:
165
+ | 'text-delta'
166
+ | 'reasoning-delta'
167
+ | 'custom'
168
+ | 'source'
169
+ | 'tool-call'
170
+ | 'tool-input-start'
171
+ | 'tool-input-delta'
172
+ | 'tool-result'
173
+ | 'raw';
174
+ }
175
+ >;
176
+ }) => PromiseLike<void> | void;
177
+
178
+ /**
179
+ * Callback that is set using the `onFinish` option.
180
+ *
181
+ * @param event - The event that is passed to the callback.
182
+ */
183
+ export type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (
184
+ event: OnFinishEvent<TOOLS>,
185
+ ) => PromiseLike<void> | void;
186
+
187
+ /**
188
+ * Callback that is set using the `onAbort` option.
189
+ *
190
+ * @param event - The event that is passed to the callback.
191
+ */
192
+ export type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: {
193
+ /**
194
+ * Details for all previously finished steps.
195
+ */
196
+ readonly steps: StepResult<TOOLS>[];
197
+ }) => PromiseLike<void> | void;
198
+
199
+ /**
200
+ * Include settings for streamText (requestBody only).
201
+ */
202
+ type StreamTextIncludeSettings = { requestBody?: boolean };
203
+
204
+ /**
205
+ * Callback that is set using the `experimental_onStart` option.
206
+ *
207
+ * Called when the streamText operation begins, before any LLM calls.
208
+ * Use this callback for logging, analytics, or initializing state at the
209
+ * start of a generation.
210
+ *
211
+ * @param event - The event object containing generation configuration.
212
+ */
213
+ export type StreamTextOnStartCallback<
214
+ TOOLS extends ToolSet = ToolSet,
215
+ OUTPUT extends Output = Output,
216
+ > = (
217
+ event: OnStartEvent<TOOLS, OUTPUT, StreamTextIncludeSettings>,
218
+ ) => PromiseLike<void> | void;
219
+
220
+ /**
221
+ * Callback that is set using the `experimental_onStepStart` option.
222
+ *
223
+ * Called when a step (LLM call) begins, before the provider is called.
224
+ * Each step represents a single LLM invocation. Multiple steps occur when
225
+ * using tool calls (the model may be called multiple times in a loop).
226
+ *
227
+ * @param event - The event object containing step configuration.
228
+ */
229
+ export type StreamTextOnStepStartCallback<
230
+ TOOLS extends ToolSet = ToolSet,
231
+ OUTPUT extends Output = Output,
232
+ > = (
233
+ event: OnStepStartEvent<TOOLS, OUTPUT, StreamTextIncludeSettings>,
234
+ ) => PromiseLike<void> | void;
235
+
236
+ export type StreamTextOnToolCallStartCallback<TOOLS extends ToolSet = ToolSet> =
237
+ (event: OnToolCallStartEvent<TOOLS>) => PromiseLike<void> | void;
238
+
239
+ export type StreamTextOnToolCallFinishCallback<
240
+ TOOLS extends ToolSet = ToolSet,
241
+ > = (event: OnToolCallFinishEvent<TOOLS>) => PromiseLike<void> | void;
242
+
243
+ /**
244
+ * Generate a text and call tools for a given prompt using a language model.
245
+ *
246
+ * This function streams the output. If you do not want to stream the output, use `generateText` instead.
247
+ *
248
+ * @param model - The language model to use.
249
+ * @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
250
+ *
251
+ * @param system - A system message that will be part of the prompt.
252
+ * @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
253
+ * @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
254
+ *
255
+ * @param maxOutputTokens - Maximum number of tokens to generate.
256
+ * @param temperature - Temperature setting.
257
+ * The value is passed through to the provider. The range depends on the provider and model.
258
+ * It is recommended to set either `temperature` or `topP`, but not both.
259
+ * @param topP - Nucleus sampling.
260
+ * The value is passed through to the provider. The range depends on the provider and model.
261
+ * It is recommended to set either `temperature` or `topP`, but not both.
262
+ * @param topK - Only sample from the top K options for each subsequent token.
263
+ * Used to remove "long tail" low probability responses.
264
+ * Recommended for advanced use cases only. You usually only need to use temperature.
265
+ * @param presencePenalty - Presence penalty setting.
266
+ * It affects the likelihood of the model to repeat information that is already in the prompt.
267
+ * The value is passed through to the provider. The range depends on the provider and model.
268
+ * @param frequencyPenalty - Frequency penalty setting.
269
+ * It affects the likelihood of the model to repeatedly use the same words or phrases.
270
+ * The value is passed through to the provider. The range depends on the provider and model.
271
+ * @param stopSequences - Stop sequences.
272
+ * If set, the model will stop generating text when one of the stop sequences is generated.
273
+ * @param seed - The seed (integer) to use for random sampling.
274
+ * If set and supported by the model, calls will generate deterministic results.
275
+ *
276
+ * @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
277
+ * @param abortSignal - An optional abort signal that can be used to cancel the call.
278
+ * @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
279
+ * @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
280
+ *
281
+ * @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
282
+ * @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
283
+ * @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
284
+ * @param onFinish - Callback that is called when all steps are finished and the response is complete.
285
+ *
286
+ * @returns
287
+ * A result object for accessing different stream types and additional information.
288
+ */
289
+ export function streamText<
290
+ TOOLS extends ToolSet,
291
+ OUTPUT extends Output = Output<string, string, never>,
292
+ >({
293
+ model,
294
+ tools,
295
+ toolChoice,
296
+ system,
297
+ prompt,
298
+ messages,
299
+ maxRetries,
300
+ abortSignal,
301
+ timeout,
302
+ headers,
303
+ stopWhen = stepCountIs(1),
304
+ experimental_output,
305
+ output = experimental_output,
306
+ experimental_telemetry: telemetry,
307
+ prepareStep,
308
+ providerOptions,
309
+ experimental_activeTools,
310
+ activeTools = experimental_activeTools,
311
+ experimental_repairToolCall: repairToolCall,
312
+ experimental_transform: transform,
313
+ experimental_download: download,
314
+ includeRawChunks = false,
315
+ onChunk,
316
+ onError = ({ error }) => {
317
+ console.error(error);
318
+ },
319
+ onFinish,
320
+ onAbort,
321
+ onStepFinish,
322
+ experimental_onStart: onStart,
323
+ experimental_onStepStart: onStepStart,
324
+ experimental_onToolCallStart: onToolCallStart,
325
+ experimental_onToolCallFinish: onToolCallFinish,
326
+ experimental_context,
327
+ experimental_include: include,
328
+ experimental_fireAndForgetToolNames,
329
+ _internal: {
330
+ now = originalNow,
331
+ generateId = originalGenerateId,
332
+ generateCallId = originalGenerateCallId,
333
+ } = {},
334
+ ...settings
335
+ }: CallSettings &
336
+ Prompt & {
337
+ /**
338
+ * The language model to use.
339
+ */
340
+ model: LanguageModel;
341
+
342
+ /**
343
+ * Timeout in milliseconds. The call will be aborted if it takes longer
344
+ * than the specified timeout. Can be used alongside abortSignal.
345
+ *
346
+ * Can be specified as a number (milliseconds) or as an object with `totalMs`.
347
+ */
348
+ timeout?: TimeoutConfiguration<TOOLS>;
349
+
350
+ /**
351
+ * The tools that the model can call. The model needs to support calling tools.
352
+ */
353
+ tools?: TOOLS;
354
+
355
+ /**
356
+ * The tool choice strategy. Default: 'auto'.
357
+ */
358
+ toolChoice?: ToolChoice<TOOLS>;
359
+
360
+ /**
361
+ * Condition for stopping the generation when there are tool results in the last step.
362
+ * When the condition is an array, any of the conditions can be met to stop the generation.
363
+ *
364
+ * @default stepCountIs(1)
365
+ */
366
+ stopWhen?:
367
+ | StopCondition<NoInfer<TOOLS>>
368
+ | Array<StopCondition<NoInfer<TOOLS>>>;
369
+
370
+ /**
371
+ * Optional telemetry configuration (experimental).
372
+ */
373
+ experimental_telemetry?: TelemetrySettings;
374
+
375
+ /**
376
+ * Additional provider-specific options. They are passed through
377
+ * to the provider from the AI SDK and enable provider-specific
378
+ * functionality that can be fully encapsulated in the provider.
379
+ */
380
+ providerOptions?: ProviderOptions;
381
+
382
+ /**
383
+ * @deprecated Use `activeTools` instead.
384
+ */
385
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
386
+
387
+ /**
388
+ * Limits the tools that are available for the model to call without
389
+ * changing the tool call and result types in the result.
390
+ */
391
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
392
+
393
+ /**
394
+ * Optional specification for parsing structured outputs from the LLM response.
395
+ */
396
+ output?: OUTPUT;
397
+
398
+ /**
399
+ * Optional specification for parsing structured outputs from the LLM response.
400
+ *
401
+ * @deprecated Use `output` instead.
402
+ */
403
+ experimental_output?: OUTPUT;
404
+
405
+ /**
406
+ * Optional function that you can use to provide different settings for a step.
407
+ *
408
+ * @param options - The options for the step.
409
+ * @param options.steps - The steps that have been executed so far.
410
+ * @param options.stepNumber - The number of the step that is being executed.
411
+ * @param options.model - The model that is being used.
412
+ *
413
+ * @returns An object that contains the settings for the step.
414
+ * If you return undefined (or for undefined settings), the settings from the outer level will be used.
415
+ */
416
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
417
+
418
+ /**
419
+ * A function that attempts to repair a tool call that failed to parse.
420
+ */
421
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
422
+
423
+ /**
424
+ * Optional stream transformations.
425
+ * They are applied in the order they are provided.
426
+ * The stream transformations must maintain the stream structure for streamText to work correctly.
427
+ */
428
+ experimental_transform?:
429
+ | StreamTextTransform<TOOLS>
430
+ | Array<StreamTextTransform<TOOLS>>;
431
+
432
+ /**
433
+ * Custom download function to use for URLs.
434
+ *
435
+ * By default, files are downloaded if the model does not support the URL for the given media type.
436
+ */
437
+ experimental_download?: DownloadFunction | undefined;
438
+
439
+ /**
440
+ * Whether to include raw chunks from the provider in the stream.
441
+ * When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
442
+ * This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
443
+ * Defaults to false.
444
+ */
445
+ includeRawChunks?: boolean;
446
+
447
+ /**
448
+ * Callback that is called for each chunk of the stream.
449
+ * The stream processing will pause until the callback promise is resolved.
450
+ */
451
+ onChunk?: StreamTextOnChunkCallback<TOOLS>;
452
+
453
+ /**
454
+ * Callback that is invoked when an error occurs during streaming.
455
+ * You can use it to log errors.
456
+ * The stream processing will pause until the callback promise is resolved.
457
+ */
458
+ onError?: StreamTextOnErrorCallback;
459
+
460
+ /**
461
+ * Callback that is called when the LLM response and all request tool executions
462
+ * (for tools that have an `execute` function) are finished.
463
+ *
464
+ * The usage is the combined usage of all steps.
465
+ */
466
+ onFinish?: StreamTextOnFinishCallback<TOOLS>;
467
+
468
+ onAbort?: StreamTextOnAbortCallback<TOOLS>;
469
+
470
+ /**
471
+ * Callback that is called when each step (LLM call) is finished, including intermediate steps.
472
+ */
473
+ onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
474
+
475
+ /**
476
+ * Callback that is called when the streamText operation begins,
477
+ * before any LLM calls are made.
478
+ */
479
+ experimental_onStart?: StreamTextOnStartCallback<NoInfer<TOOLS>, OUTPUT>;
480
+
481
+ /**
482
+ * Callback that is called when a step (LLM call) begins,
483
+ * before the provider is called.
484
+ */
485
+ experimental_onStepStart?: StreamTextOnStepStartCallback<
486
+ NoInfer<TOOLS>,
487
+ OUTPUT
488
+ >;
489
+
490
+ /**
491
+ * Callback that is called right before a tool's execute function runs.
492
+ */
493
+ experimental_onToolCallStart?: StreamTextOnToolCallStartCallback<
494
+ NoInfer<TOOLS>
495
+ >;
496
+
497
+ /**
498
+ * Callback that is called right after a tool's execute function completes (or errors).
499
+ */
500
+ experimental_onToolCallFinish?: StreamTextOnToolCallFinishCallback<
501
+ NoInfer<TOOLS>
502
+ >;
503
+
504
+ /**
505
+ * Context that is passed into tool execution.
506
+ *
507
+ * Experimental (can break in patch releases).
508
+ *
509
+ * @default undefined
510
+ */
511
+ experimental_context?: unknown;
512
+
513
+ /**
514
+ * Tool names that are treated as fire-and-forget (action tools).
515
+ * These tools are still executed for their side effects, but:
516
+ * - Their calls do not trigger a follow-up generation step
517
+ * - Their tool-call and tool-result are stripped from messages
518
+ * sent to the model in subsequent steps
519
+ *
520
+ * Use this for tools that perform side effects (e.g. setting a title,
521
+ * logging, notifications) whose results do not need to be incorporated
522
+ * into the model's response.
523
+ */
524
+ experimental_fireAndForgetToolNames?: Array<keyof NoInfer<TOOLS>>;
525
+
526
+ /**
527
+ * Settings for controlling what data is included in step results.
528
+ * Disabling inclusion can help reduce memory usage when processing
529
+ * large payloads like images.
530
+ *
531
+ * By default, all data is included for backwards compatibility.
532
+ */
533
+ experimental_include?: {
534
+ /**
535
+ * Whether to retain the request body in step results.
536
+ * The request body can be large when sending images or files.
537
+ * @default true
538
+ */
539
+ requestBody?: boolean;
540
+ };
541
+
542
+ /**
543
+ * Internal. For test use only. May change without notice.
544
+ */
545
+ _internal?: {
546
+ now?: () => number;
547
+ generateId?: IdGenerator;
548
+ generateCallId?: IdGenerator;
549
+ };
550
+ }): StreamTextResult<TOOLS, OUTPUT> {
551
+ const totalTimeoutMs = getTotalTimeoutMs(timeout);
552
+ const stepTimeoutMs = getStepTimeoutMs(timeout);
553
+ const chunkTimeoutMs = getChunkTimeoutMs(timeout);
554
+ const stepAbortController =
555
+ stepTimeoutMs != null ? new AbortController() : undefined;
556
+ const chunkAbortController =
557
+ chunkTimeoutMs != null ? new AbortController() : undefined;
558
+ return new DefaultStreamTextResult<TOOLS, OUTPUT>({
559
+ model: resolveLanguageModel(model),
560
+ telemetry,
561
+ headers,
562
+ settings,
563
+ maxRetries,
564
+ abortSignal: mergeAbortSignals(
565
+ abortSignal,
566
+ totalTimeoutMs != null ? AbortSignal.timeout(totalTimeoutMs) : undefined,
567
+ stepAbortController?.signal,
568
+ chunkAbortController?.signal,
569
+ ),
570
+ stepTimeoutMs,
571
+ stepAbortController,
572
+ chunkTimeoutMs,
573
+ chunkAbortController,
574
+ system,
575
+ prompt,
576
+ messages,
577
+ tools,
578
+ toolChoice,
579
+ transforms: asArray(transform),
580
+ activeTools,
581
+ repairToolCall,
582
+ stopConditions: asArray(stopWhen),
583
+ output,
584
+ providerOptions,
585
+ prepareStep,
586
+ includeRawChunks,
587
+ timeout,
588
+ stopWhen,
589
+ originalAbortSignal: abortSignal,
590
+ onChunk,
591
+ onError,
592
+ onFinish,
593
+ onAbort,
594
+ onStepFinish,
595
+ onStart,
596
+ onStepStart,
597
+ onToolCallStart,
598
+ onToolCallFinish,
599
+ now,
600
+ generateId,
601
+ generateCallId,
602
+ experimental_context,
603
+ download,
604
+ include,
605
+ fireAndForgetTools: new Set(
606
+ experimental_fireAndForgetToolNames?.map(String) ?? [],
607
+ ),
608
+ });
609
+ }
610
+
611
+ export type EnrichedStreamPart<TOOLS extends ToolSet, PARTIAL_OUTPUT> = {
612
+ part: TextStreamPart<TOOLS>;
613
+ partialOutput: PARTIAL_OUTPUT | undefined;
614
+ };
615
+
616
+ function createOutputTransformStream<
617
+ TOOLS extends ToolSet,
618
+ OUTPUT extends Output,
619
+ >(
620
+ output: OUTPUT,
621
+ ): TransformStream<
622
+ TextStreamPart<TOOLS>,
623
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>
624
+ > {
625
+ let firstTextChunkId: string | undefined = undefined;
626
+ let text = '';
627
+ let textChunk = '';
628
+ let textProviderMetadata: ProviderMetadata | undefined = undefined;
629
+ let lastPublishedJson = '';
630
+
631
+ function publishTextChunk({
632
+ controller,
633
+ partialOutput = undefined,
634
+ }: {
635
+ controller: TransformStreamDefaultController<
636
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>
637
+ >;
638
+ partialOutput?: InferPartialOutput<OUTPUT>;
639
+ }) {
640
+ controller.enqueue({
641
+ part: {
642
+ type: 'text-delta',
643
+ id: firstTextChunkId!,
644
+ text: textChunk,
645
+ providerMetadata: textProviderMetadata,
646
+ },
647
+ partialOutput,
648
+ });
649
+ textChunk = '';
650
+ }
651
+
652
+ return new TransformStream<
653
+ TextStreamPart<TOOLS>,
654
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>
655
+ >({
656
+ async transform(chunk, controller) {
657
+ // ensure that we publish the last text chunk before the step finish:
658
+ if (chunk.type === 'finish-step' && textChunk.length > 0) {
659
+ publishTextChunk({ controller });
660
+ }
661
+
662
+ if (
663
+ chunk.type !== 'text-delta' &&
664
+ chunk.type !== 'text-start' &&
665
+ chunk.type !== 'text-end'
666
+ ) {
667
+ controller.enqueue({ part: chunk, partialOutput: undefined });
668
+ return;
669
+ }
670
+
671
+ // we have to pick a text chunk which contains the json text
672
+ // since we are streaming, we have to pick the first text chunk
673
+ if (firstTextChunkId == null) {
674
+ firstTextChunkId = chunk.id;
675
+ } else if (chunk.id !== firstTextChunkId) {
676
+ controller.enqueue({ part: chunk, partialOutput: undefined });
677
+ return;
678
+ }
679
+
680
+ if (chunk.type === 'text-start') {
681
+ controller.enqueue({ part: chunk, partialOutput: undefined });
682
+ return;
683
+ }
684
+
685
+ if (chunk.type === 'text-end') {
686
+ if (textChunk.length > 0) {
687
+ publishTextChunk({ controller });
688
+ }
689
+ controller.enqueue({ part: chunk, partialOutput: undefined });
690
+ return;
691
+ }
692
+
693
+ text += chunk.text;
694
+ textChunk += chunk.text;
695
+ textProviderMetadata = chunk.providerMetadata ?? textProviderMetadata;
696
+
697
+ // only publish if partial json can be parsed:
698
+ const result = await output.parsePartialOutput({ text });
699
+
700
+ // null should be allowed (valid JSON value) but undefined should not:
701
+ if (result !== undefined) {
702
+ // only send new json if it has changed:
703
+ const currentJson = JSON.stringify(result.partial);
704
+ if (currentJson !== lastPublishedJson) {
705
+ publishTextChunk({ controller, partialOutput: result.partial });
706
+ lastPublishedJson = currentJson;
707
+ }
708
+ }
709
+ },
710
+ });
711
+ }
712
+
713
+ class DefaultStreamTextResult<
714
+ TOOLS extends ToolSet,
715
+ OUTPUT extends Output,
716
+ > implements StreamTextResult<TOOLS, OUTPUT> {
717
+ private readonly _totalUsage = new DelayedPromise<
718
+ Awaited<StreamTextResult<TOOLS, OUTPUT>['usage']>
719
+ >();
720
+ private readonly _finishReason = new DelayedPromise<
721
+ Awaited<StreamTextResult<TOOLS, OUTPUT>['finishReason']>
722
+ >();
723
+ private readonly _rawFinishReason = new DelayedPromise<
724
+ Awaited<StreamTextResult<TOOLS, OUTPUT>['rawFinishReason']>
725
+ >();
726
+ private readonly _steps = new DelayedPromise<
727
+ Awaited<StreamTextResult<TOOLS, OUTPUT>['steps']>
728
+ >();
729
+
730
+ private readonly addStream: (
731
+ stream: ReadableStream<TextStreamPart<TOOLS>>,
732
+ ) => void;
733
+
734
+ private readonly closeStream: () => void;
735
+
736
+ private baseStream: ReadableStream<
737
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>
738
+ >;
739
+
740
+ private outputSpecification: OUTPUT | undefined;
741
+
742
+ private includeRawChunks: boolean;
743
+
744
+ private tools: TOOLS | undefined;
745
+
746
+ constructor({
747
+ model,
748
+ telemetry,
749
+ headers,
750
+ settings,
751
+ maxRetries: maxRetriesArg,
752
+ abortSignal,
753
+ stepTimeoutMs,
754
+ stepAbortController,
755
+ chunkTimeoutMs,
756
+ chunkAbortController,
757
+ system,
758
+ prompt,
759
+ messages,
760
+ tools,
761
+ toolChoice,
762
+ transforms,
763
+ activeTools,
764
+ repairToolCall,
765
+ stopConditions,
766
+ output,
767
+ providerOptions,
768
+ prepareStep,
769
+ includeRawChunks,
770
+ now,
771
+ generateId,
772
+ generateCallId,
773
+ timeout,
774
+ stopWhen,
775
+ originalAbortSignal,
776
+ onChunk,
777
+ onError,
778
+ onFinish,
779
+ onAbort,
780
+ onStepFinish,
781
+ onStart,
782
+ onStepStart,
783
+ onToolCallStart,
784
+ onToolCallFinish,
785
+ experimental_context,
786
+ download,
787
+ include,
788
+ fireAndForgetTools,
789
+ }: {
790
+ model: LanguageModelV4;
791
+ telemetry: TelemetrySettings | undefined;
792
+ headers: Record<string, string | undefined> | undefined;
793
+ settings: Omit<CallSettings, 'abortSignal' | 'headers'>;
794
+ maxRetries: number | undefined;
795
+ abortSignal: AbortSignal | undefined;
796
+ stepTimeoutMs: number | undefined;
797
+ stepAbortController: AbortController | undefined;
798
+ chunkTimeoutMs: number | undefined;
799
+ chunkAbortController: AbortController | undefined;
800
+ system: Prompt['system'];
801
+ prompt: Prompt['prompt'];
802
+ messages: Prompt['messages'];
803
+ tools: TOOLS | undefined;
804
+ toolChoice: ToolChoice<TOOLS> | undefined;
805
+ transforms: Array<StreamTextTransform<TOOLS>>;
806
+ activeTools: Array<keyof TOOLS> | undefined;
807
+ repairToolCall: ToolCallRepairFunction<TOOLS> | undefined;
808
+ stopConditions: Array<StopCondition<NoInfer<TOOLS>>>;
809
+ output: OUTPUT | undefined;
810
+ providerOptions: ProviderOptions | undefined;
811
+ prepareStep: PrepareStepFunction<NoInfer<TOOLS>> | undefined;
812
+ includeRawChunks: boolean;
813
+ now: () => number;
814
+ generateId: () => string;
815
+ generateCallId: () => string;
816
+ timeout: TimeoutConfiguration<TOOLS> | undefined;
817
+ stopWhen:
818
+ | StopCondition<NoInfer<TOOLS>>
819
+ | Array<StopCondition<NoInfer<TOOLS>>>
820
+ | undefined;
821
+ originalAbortSignal: AbortSignal | undefined;
822
+ experimental_context: unknown;
823
+ download: DownloadFunction | undefined;
824
+ include: { requestBody?: boolean } | undefined;
825
+ fireAndForgetTools: Set<string>;
826
+
827
+ // callbacks:
828
+ onChunk: undefined | StreamTextOnChunkCallback<TOOLS>;
829
+ onError: StreamTextOnErrorCallback;
830
+ onFinish: undefined | StreamTextOnFinishCallback<TOOLS>;
831
+ onAbort: undefined | StreamTextOnAbortCallback<TOOLS>;
832
+ onStepFinish: undefined | StreamTextOnStepFinishCallback<TOOLS>;
833
+ onStart: undefined | StreamTextOnStartCallback<TOOLS, OUTPUT>;
834
+ onStepStart: undefined | StreamTextOnStepStartCallback<TOOLS, OUTPUT>;
835
+ onToolCallStart: undefined | StreamTextOnToolCallStartCallback<TOOLS>;
836
+ onToolCallFinish: undefined | StreamTextOnToolCallFinishCallback<TOOLS>;
837
+ }) {
838
+ this.outputSpecification = output;
839
+ this.includeRawChunks = includeRawChunks;
840
+ this.tools = tools;
841
+
842
+ const createGlobalTelemetry = getGlobalTelemetryIntegration<
843
+ TOOLS,
844
+ OUTPUT
845
+ >();
846
+ const globalTelemetry = createGlobalTelemetry({
847
+ integrations: telemetry?.integrations,
848
+ });
849
+
850
+ // promise to ensure that the step has been fully processed by the event processor
851
+ // before a new step is started. This is required because the continuation condition
852
+ // needs the updated steps to determine if another step is needed.
853
+ let stepFinish!: DelayedPromise<void>;
854
+
855
+ let recordedContent: Array<ContentPart<TOOLS>> = [];
856
+ const recordedResponseMessages: Array<ResponseMessage> = [];
857
+ let recordedFinishReason: FinishReason | undefined = undefined;
858
+ let recordedRawFinishReason: string | undefined = undefined;
859
+ let recordedTotalUsage: LanguageModelUsage | undefined = undefined;
860
+ let recordedRequest: LanguageModelRequestMetadata = {};
861
+ let recordedWarnings: Array<CallWarning> = [];
862
+ const recordedSteps: StepResult<TOOLS>[] = [];
863
+
864
+ // Track provider-executed tool calls that support deferred results
865
+ // (e.g., code_execution in programmatic tool calling scenarios).
866
+ // These tools may not return their results in the same turn as their call.
867
+ const pendingDeferredToolCalls = new Map<string, { toolName: string }>();
868
+
869
+ let activeTextContent: Record<
870
+ string,
871
+ {
872
+ type: 'text';
873
+ text: string;
874
+ providerMetadata: ProviderMetadata | undefined;
875
+ }
876
+ > = {};
877
+
878
+ let activeReasoningContent: Record<
879
+ string,
880
+ {
881
+ type: 'reasoning';
882
+ text: string;
883
+ providerMetadata: ProviderMetadata | undefined;
884
+ }
885
+ > = {};
886
+
887
+ const eventProcessor = new TransformStream<
888
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>,
889
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>
890
+ >({
891
+ async transform(chunk, controller) {
892
+ controller.enqueue(chunk); // forward the chunk to the next stream
893
+
894
+ const { part } = chunk;
895
+
896
+ if (
897
+ part.type === 'text-delta' ||
898
+ part.type === 'reasoning-delta' ||
899
+ part.type === 'custom' ||
900
+ part.type === 'source' ||
901
+ part.type === 'tool-call' ||
902
+ part.type === 'tool-result' ||
903
+ part.type === 'tool-input-start' ||
904
+ part.type === 'tool-input-delta' ||
905
+ part.type === 'raw'
906
+ ) {
907
+ await onChunk?.({ chunk: part });
908
+ }
909
+
910
+ if (part.type === 'error') {
911
+ await onError({ error: wrapGatewayError(part.error) });
912
+ }
913
+
914
+ if (part.type === 'text-start') {
915
+ activeTextContent[part.id] = {
916
+ type: 'text',
917
+ text: '',
918
+ providerMetadata: part.providerMetadata,
919
+ };
920
+
921
+ recordedContent.push(activeTextContent[part.id]);
922
+ }
923
+
924
+ if (part.type === 'text-delta') {
925
+ const activeText = activeTextContent[part.id];
926
+
927
+ if (activeText == null) {
928
+ controller.enqueue({
929
+ part: {
930
+ type: 'error',
931
+ error: `text part ${part.id} not found`,
932
+ },
933
+ partialOutput: undefined,
934
+ });
935
+ return;
936
+ }
937
+
938
+ activeText.text += part.text;
939
+ activeText.providerMetadata =
940
+ part.providerMetadata ?? activeText.providerMetadata;
941
+ }
942
+
943
+ if (part.type === 'text-end') {
944
+ const activeText = activeTextContent[part.id];
945
+
946
+ if (activeText == null) {
947
+ controller.enqueue({
948
+ part: {
949
+ type: 'error',
950
+ error: `text part ${part.id} not found`,
951
+ },
952
+ partialOutput: undefined,
953
+ });
954
+ return;
955
+ }
956
+
957
+ activeText.providerMetadata =
958
+ part.providerMetadata ?? activeText.providerMetadata;
959
+
960
+ delete activeTextContent[part.id];
961
+ }
962
+
963
+ if (part.type === 'reasoning-start') {
964
+ activeReasoningContent[part.id] = {
965
+ type: 'reasoning',
966
+ text: '',
967
+ providerMetadata: part.providerMetadata,
968
+ };
969
+
970
+ recordedContent.push(activeReasoningContent[part.id]);
971
+ }
972
+
973
+ if (part.type === 'reasoning-delta') {
974
+ const activeReasoning = activeReasoningContent[part.id];
975
+
976
+ if (activeReasoning == null) {
977
+ controller.enqueue({
978
+ part: {
979
+ type: 'error',
980
+ error: `reasoning part ${part.id} not found`,
981
+ },
982
+ partialOutput: undefined,
983
+ });
984
+ return;
985
+ }
986
+
987
+ activeReasoning.text += part.text;
988
+ activeReasoning.providerMetadata =
989
+ part.providerMetadata ?? activeReasoning.providerMetadata;
990
+ }
991
+
992
+ if (part.type === 'reasoning-end') {
993
+ const activeReasoning = activeReasoningContent[part.id];
994
+
995
+ if (activeReasoning == null) {
996
+ controller.enqueue({
997
+ part: {
998
+ type: 'error',
999
+ error: `reasoning part ${part.id} not found`,
1000
+ },
1001
+ partialOutput: undefined,
1002
+ });
1003
+ return;
1004
+ }
1005
+
1006
+ activeReasoning.providerMetadata =
1007
+ part.providerMetadata ?? activeReasoning.providerMetadata;
1008
+
1009
+ delete activeReasoningContent[part.id];
1010
+ }
1011
+
1012
+ if (part.type === 'file' || part.type === 'reasoning-file') {
1013
+ recordedContent.push({
1014
+ type: part.type,
1015
+ file: part.file,
1016
+ ...(part.providerMetadata != null
1017
+ ? { providerMetadata: part.providerMetadata }
1018
+ : {}),
1019
+ });
1020
+ }
1021
+
1022
+ if (part.type === 'custom') {
1023
+ recordedContent.push(part);
1024
+ }
1025
+
1026
+ if (part.type === 'source') {
1027
+ recordedContent.push(part);
1028
+ }
1029
+
1030
+ if (part.type === 'tool-call') {
1031
+ recordedContent.push(part);
1032
+ }
1033
+
1034
+ if (part.type === 'tool-result' && !part.preliminary) {
1035
+ recordedContent.push(part);
1036
+ }
1037
+
1038
+ if (part.type === 'tool-approval-request') {
1039
+ recordedContent.push(part);
1040
+ }
1041
+
1042
+ if (part.type === 'tool-error') {
1043
+ recordedContent.push(part);
1044
+ }
1045
+
1046
+ if (part.type === 'start-step') {
1047
+ // reset the recorded data when a new step starts:
1048
+ recordedContent = [];
1049
+ activeReasoningContent = {};
1050
+ activeTextContent = {};
1051
+
1052
+ recordedRequest = part.request;
1053
+ recordedWarnings = part.warnings;
1054
+ }
1055
+
1056
+ if (part.type === 'finish-step') {
1057
+ const stepMessages = await toResponseMessages({
1058
+ content: recordedContent,
1059
+ tools,
1060
+ fireAndForgetTools,
1061
+ });
1062
+
1063
+ // Add step information (after response messages are updated):
1064
+ const currentStepResult: StepResult<TOOLS> = new DefaultStepResult({
1065
+ callId,
1066
+ stepNumber: recordedSteps.length,
1067
+ provider: model.provider,
1068
+ modelId: model.modelId,
1069
+ ...callbackTelemetryProps,
1070
+ experimental_context,
1071
+ content: recordedContent,
1072
+ finishReason: part.finishReason,
1073
+ rawFinishReason: part.rawFinishReason,
1074
+ usage: part.usage,
1075
+ warnings: recordedWarnings,
1076
+ request: recordedRequest,
1077
+ response: {
1078
+ ...part.response,
1079
+ messages: [...recordedResponseMessages, ...stepMessages],
1080
+ },
1081
+ providerMetadata: part.providerMetadata,
1082
+ });
1083
+
1084
+ await notify({
1085
+ event: currentStepResult,
1086
+ callbacks: [onStepFinish, globalTelemetry.onStepFinish],
1087
+ });
1088
+
1089
+ logWarnings({
1090
+ warnings: recordedWarnings,
1091
+ provider: model.provider,
1092
+ model: model.modelId,
1093
+ });
1094
+
1095
+ recordedSteps.push(currentStepResult);
1096
+
1097
+ recordedResponseMessages.push(...stepMessages);
1098
+
1099
+ // resolve the promise to signal that the step has been fully processed
1100
+ // by the event processor:
1101
+ stepFinish.resolve();
1102
+ }
1103
+
1104
+ if (part.type === 'finish') {
1105
+ recordedTotalUsage = part.totalUsage;
1106
+ recordedFinishReason = part.finishReason;
1107
+ recordedRawFinishReason = part.rawFinishReason;
1108
+ }
1109
+ },
1110
+
1111
+ async flush(controller) {
1112
+ try {
1113
+ if (recordedSteps.length === 0) {
1114
+ const error = abortSignal?.aborted
1115
+ ? abortSignal.reason
1116
+ : new NoOutputGeneratedError({
1117
+ message: 'No output generated. Check the stream for errors.',
1118
+ });
1119
+
1120
+ self._finishReason.reject(error);
1121
+ self._rawFinishReason.reject(error);
1122
+ self._totalUsage.reject(error);
1123
+ self._steps.reject(error);
1124
+
1125
+ return; // no steps recorded (e.g. in error scenario)
1126
+ }
1127
+
1128
+ // derived:
1129
+ const finishReason = recordedFinishReason ?? 'other';
1130
+ const totalUsage =
1131
+ recordedTotalUsage ?? createNullLanguageModelUsage();
1132
+
1133
+ // from finish:
1134
+ self._finishReason.resolve(finishReason);
1135
+ self._rawFinishReason.resolve(recordedRawFinishReason);
1136
+ self._totalUsage.resolve(totalUsage);
1137
+
1138
+ // aggregate results:
1139
+ self._steps.resolve(recordedSteps);
1140
+
1141
+ // call onFinish callback:
1142
+ const finalStep = recordedSteps[recordedSteps.length - 1];
1143
+
1144
+ await notify({
1145
+ event: {
1146
+ callId,
1147
+ stepNumber: finalStep.stepNumber,
1148
+ model: finalStep.model,
1149
+ functionId: finalStep.functionId,
1150
+ metadata: finalStep.metadata,
1151
+ experimental_context: finalStep.experimental_context,
1152
+ finishReason: finalStep.finishReason,
1153
+ rawFinishReason: finalStep.rawFinishReason,
1154
+ totalUsage,
1155
+ usage: finalStep.usage,
1156
+ content: finalStep.content,
1157
+ text: finalStep.text,
1158
+ reasoningText: finalStep.reasoningText,
1159
+ reasoning: finalStep.reasoning,
1160
+ files: finalStep.files,
1161
+ sources: finalStep.sources,
1162
+ toolCalls: finalStep.toolCalls,
1163
+ staticToolCalls: finalStep.staticToolCalls,
1164
+ dynamicToolCalls: finalStep.dynamicToolCalls,
1165
+ toolResults: finalStep.toolResults,
1166
+ staticToolResults: finalStep.staticToolResults,
1167
+ dynamicToolResults: finalStep.dynamicToolResults,
1168
+ request: finalStep.request,
1169
+ response: finalStep.response,
1170
+ warnings: finalStep.warnings,
1171
+ providerMetadata: finalStep.providerMetadata,
1172
+ steps: recordedSteps,
1173
+ },
1174
+ callbacks: [
1175
+ onFinish,
1176
+ globalTelemetry.onFinish as
1177
+ | undefined
1178
+ | StreamTextOnFinishCallback<TOOLS>,
1179
+ ],
1180
+ });
1181
+ } catch (error) {
1182
+ controller.error(error);
1183
+ }
1184
+ },
1185
+ });
1186
+
1187
+ // initialize the stitchable stream and the transformed stream:
1188
+ const stitchableStream = createStitchableStream<TextStreamPart<TOOLS>>();
1189
+ this.addStream = stitchableStream.addStream;
1190
+ this.closeStream = stitchableStream.close;
1191
+
1192
+ // resilient stream that handles abort signals and errors:
1193
+ const reader = stitchableStream.stream.getReader();
1194
+ let stream = new ReadableStream<TextStreamPart<TOOLS>>({
1195
+ async start(controller) {
1196
+ // send start event:
1197
+ controller.enqueue({ type: 'start' });
1198
+ },
1199
+
1200
+ async pull(controller) {
1201
+ // abort handling:
1202
+ function abort() {
1203
+ onAbort?.({ steps: recordedSteps });
1204
+ controller.enqueue({
1205
+ type: 'abort',
1206
+ // The `reason` is usually of type DOMException, but it can also be of any type,
1207
+ // so we use getErrorMessage for serialization because it is already designed to accept values of the unknown type.
1208
+ // See: https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal/reason
1209
+ ...(abortSignal?.reason !== undefined
1210
+ ? { reason: getErrorMessage(abortSignal.reason) }
1211
+ : {}),
1212
+ });
1213
+ controller.close();
1214
+ }
1215
+
1216
+ try {
1217
+ const { done, value } = await reader.read();
1218
+
1219
+ if (done) {
1220
+ controller.close();
1221
+ return;
1222
+ }
1223
+
1224
+ if (abortSignal?.aborted) {
1225
+ abort();
1226
+ return;
1227
+ }
1228
+
1229
+ controller.enqueue(value);
1230
+ } catch (error) {
1231
+ if (isAbortError(error) && abortSignal?.aborted) {
1232
+ abort();
1233
+ } else {
1234
+ controller.error(error);
1235
+ }
1236
+ }
1237
+ },
1238
+
1239
+ cancel(reason) {
1240
+ return stitchableStream.stream.cancel(reason);
1241
+ },
1242
+ });
1243
+
1244
+ // introduce a gate that prevent further tokens from
1245
+ // being emitted after a transform calls stopStream
1246
+ let isRunning = true;
1247
+ stream = stream.pipeThrough(
1248
+ new TransformStream({
1249
+ async transform(chunk, controller) {
1250
+ if (isRunning) {
1251
+ controller.enqueue(chunk);
1252
+ }
1253
+ },
1254
+ }),
1255
+ );
1256
+
1257
+ // transform the stream before output parsing
1258
+ // to enable replacement of stream segments:
1259
+ for (const transform of transforms) {
1260
+ stream = stream.pipeThrough(
1261
+ transform({
1262
+ tools: tools as TOOLS,
1263
+ stopStream() {
1264
+ stitchableStream.terminate();
1265
+ isRunning = false;
1266
+ },
1267
+ }),
1268
+ );
1269
+ }
1270
+
1271
+ this.baseStream = stream
1272
+ .pipeThrough(createOutputTransformStream(output ?? text()))
1273
+ .pipeThrough(eventProcessor);
1274
+
1275
+ const { maxRetries } = prepareRetries({
1276
+ maxRetries: maxRetriesArg,
1277
+ abortSignal,
1278
+ });
1279
+
1280
+ const callSettings = prepareCallSettings(settings);
1281
+
1282
+ const self = this;
1283
+
1284
+ const callId = generateCallId();
1285
+ const callbackTelemetryProps = {
1286
+ functionId: telemetry?.functionId,
1287
+ metadata: telemetry?.metadata as Record<string, unknown> | undefined,
1288
+ };
1289
+ const onStartTelemetryProps = {
1290
+ isEnabled: telemetry?.isEnabled,
1291
+ recordInputs: telemetry?.recordInputs,
1292
+ recordOutputs: telemetry?.recordOutputs,
1293
+ functionId: telemetry?.functionId,
1294
+ metadata: telemetry?.metadata,
1295
+ };
1296
+
1297
+ (async () => {
1298
+ const initialPrompt = await standardizePrompt({
1299
+ system,
1300
+ prompt,
1301
+ messages,
1302
+ } as Prompt);
1303
+
1304
+ await notify({
1305
+ event: {
1306
+ callId,
1307
+ operationId: 'ai.streamText',
1308
+ provider: model.provider,
1309
+ modelId: model.modelId,
1310
+ system,
1311
+ prompt,
1312
+ messages,
1313
+ tools,
1314
+ toolChoice,
1315
+ activeTools,
1316
+ maxOutputTokens: callSettings.maxOutputTokens,
1317
+ temperature: callSettings.temperature,
1318
+ topP: callSettings.topP,
1319
+ topK: callSettings.topK,
1320
+ presencePenalty: callSettings.presencePenalty,
1321
+ frequencyPenalty: callSettings.frequencyPenalty,
1322
+ stopSequences: callSettings.stopSequences,
1323
+ seed: callSettings.seed,
1324
+ reasoning: callSettings.reasoning,
1325
+ maxRetries,
1326
+ timeout,
1327
+ headers,
1328
+ providerOptions,
1329
+ stopWhen,
1330
+ output,
1331
+ abortSignal: originalAbortSignal,
1332
+ include,
1333
+ ...onStartTelemetryProps,
1334
+ experimental_context,
1335
+ },
1336
+ callbacks: [
1337
+ onStart,
1338
+ globalTelemetry.onStart as
1339
+ | undefined
1340
+ | StreamTextOnStartCallback<TOOLS, OUTPUT>,
1341
+ ],
1342
+ });
1343
+
1344
+ const initialMessages = initialPrompt.messages;
1345
+ const initialResponseMessages: Array<ResponseMessage> = [];
1346
+
1347
+ const { approvedToolApprovals, deniedToolApprovals } =
1348
+ collectToolApprovals<TOOLS>({ messages: initialMessages });
1349
+
1350
+ // initial tool execution step stream
1351
+ if (deniedToolApprovals.length > 0 || approvedToolApprovals.length > 0) {
1352
+ const providerExecutedToolApprovals = [
1353
+ ...approvedToolApprovals,
1354
+ ...deniedToolApprovals,
1355
+ ].filter(toolApproval => toolApproval.toolCall.providerExecuted);
1356
+
1357
+ const localApprovedToolApprovals = approvedToolApprovals.filter(
1358
+ toolApproval => !toolApproval.toolCall.providerExecuted,
1359
+ );
1360
+ const localDeniedToolApprovals = deniedToolApprovals.filter(
1361
+ toolApproval => !toolApproval.toolCall.providerExecuted,
1362
+ );
1363
+
1364
+ const deniedProviderExecutedToolApprovals = deniedToolApprovals.filter(
1365
+ toolApproval => toolApproval.toolCall.providerExecuted,
1366
+ );
1367
+
1368
+ let toolExecutionStepStreamController:
1369
+ | ReadableStreamDefaultController<TextStreamPart<TOOLS>>
1370
+ | undefined;
1371
+ const toolExecutionStepStream = new ReadableStream<
1372
+ TextStreamPart<TOOLS>
1373
+ >({
1374
+ start(controller) {
1375
+ toolExecutionStepStreamController = controller;
1376
+ },
1377
+ });
1378
+
1379
+ self.addStream(toolExecutionStepStream);
1380
+
1381
+ try {
1382
+ for (const toolApproval of [
1383
+ ...localDeniedToolApprovals,
1384
+ ...deniedProviderExecutedToolApprovals,
1385
+ ]) {
1386
+ toolExecutionStepStreamController?.enqueue({
1387
+ type: 'tool-output-denied',
1388
+ toolCallId: toolApproval.toolCall.toolCallId,
1389
+ toolName: toolApproval.toolCall.toolName,
1390
+ } as StaticToolOutputDenied<TOOLS>);
1391
+ }
1392
+
1393
+ const toolOutputs: Array<ToolOutput<TOOLS>> = [];
1394
+
1395
+ await Promise.all(
1396
+ localApprovedToolApprovals.map(async toolApproval => {
1397
+ const result = await executeToolCall({
1398
+ toolCall: toolApproval.toolCall,
1399
+ tools,
1400
+ telemetry,
1401
+ callId,
1402
+ messages: initialMessages,
1403
+ abortSignal,
1404
+ timeout,
1405
+ experimental_context,
1406
+ stepNumber: recordedSteps.length,
1407
+ provider: model.provider,
1408
+ modelId: model.modelId,
1409
+ onToolCallStart: [
1410
+ onToolCallStart,
1411
+ globalTelemetry.onToolCallStart as
1412
+ | undefined
1413
+ | StreamTextOnToolCallStartCallback<TOOLS>,
1414
+ ],
1415
+ onToolCallFinish: [
1416
+ onToolCallFinish,
1417
+ globalTelemetry.onToolCallFinish,
1418
+ ],
1419
+ executeToolInTelemetryContext: globalTelemetry.executeTool,
1420
+ onPreliminaryToolResult: result => {
1421
+ toolExecutionStepStreamController?.enqueue(result);
1422
+ },
1423
+ });
1424
+
1425
+ if (result != null) {
1426
+ toolExecutionStepStreamController?.enqueue(result);
1427
+ toolOutputs.push(result);
1428
+ }
1429
+ }),
1430
+ );
1431
+
1432
+ // forward provider-executed approval responses to the provider (do not execute locally):
1433
+ if (providerExecutedToolApprovals.length > 0) {
1434
+ initialResponseMessages.push({
1435
+ role: 'tool',
1436
+ content: providerExecutedToolApprovals.map(
1437
+ toolApproval =>
1438
+ ({
1439
+ type: 'tool-approval-response',
1440
+ approvalId: toolApproval.approvalResponse.approvalId,
1441
+ approved: toolApproval.approvalResponse.approved,
1442
+ reason: toolApproval.approvalResponse.reason,
1443
+ providerExecuted: true,
1444
+ }) satisfies ToolApprovalResponse,
1445
+ ),
1446
+ });
1447
+ }
1448
+
1449
+ // Local tool results (approved + denied) are sent as tool results:
1450
+ if (toolOutputs.length > 0 || localDeniedToolApprovals.length > 0) {
1451
+ const localToolContent: ToolContent = [];
1452
+
1453
+ // add regular tool results for approved tool calls:
1454
+ for (const output of toolOutputs) {
1455
+ localToolContent.push({
1456
+ type: 'tool-result' as const,
1457
+ toolCallId: output.toolCallId,
1458
+ toolName: output.toolName,
1459
+ output: await createToolModelOutput({
1460
+ toolCallId: output.toolCallId,
1461
+ input: output.input,
1462
+ tool: tools?.[output.toolName],
1463
+ output:
1464
+ output.type === 'tool-result'
1465
+ ? output.output
1466
+ : output.error,
1467
+ errorMode: output.type === 'tool-error' ? 'text' : 'none',
1468
+ }),
1469
+ });
1470
+ }
1471
+
1472
+ // add execution denied tool results for denied local tool approvals:
1473
+ for (const toolApproval of localDeniedToolApprovals) {
1474
+ localToolContent.push({
1475
+ type: 'tool-result' as const,
1476
+ toolCallId: toolApproval.toolCall.toolCallId,
1477
+ toolName: toolApproval.toolCall.toolName,
1478
+ output: {
1479
+ type: 'execution-denied' as const,
1480
+ reason: toolApproval.approvalResponse.reason,
1481
+ },
1482
+ });
1483
+ }
1484
+
1485
+ initialResponseMessages.push({
1486
+ role: 'tool',
1487
+ content: localToolContent,
1488
+ });
1489
+ }
1490
+ } finally {
1491
+ toolExecutionStepStreamController?.close();
1492
+ }
1493
+ }
1494
+
1495
+ recordedResponseMessages.push(...initialResponseMessages);
1496
+
1497
+ async function streamStep({
1498
+ currentStep,
1499
+ responseMessages,
1500
+ usage,
1501
+ }: {
1502
+ currentStep: number;
1503
+ responseMessages: Array<ResponseMessage>;
1504
+ usage: LanguageModelUsage;
1505
+ }) {
1506
+ const includeRawChunks = self.includeRawChunks;
1507
+
1508
+ // Set up step timeout if configured
1509
+ const stepTimeoutId =
1510
+ stepTimeoutMs != null
1511
+ ? setTimeout(() => stepAbortController!.abort(), stepTimeoutMs)
1512
+ : undefined;
1513
+
1514
+ // Set up chunk timeout tracking (will be reset on each chunk)
1515
+ let chunkTimeoutId: ReturnType<typeof setTimeout> | undefined =
1516
+ undefined;
1517
+
1518
+ function resetChunkTimeout() {
1519
+ if (chunkTimeoutMs != null) {
1520
+ if (chunkTimeoutId != null) {
1521
+ clearTimeout(chunkTimeoutId);
1522
+ }
1523
+ chunkTimeoutId = setTimeout(
1524
+ () => chunkAbortController!.abort(),
1525
+ chunkTimeoutMs,
1526
+ );
1527
+ }
1528
+ }
1529
+
1530
+ function clearChunkTimeout() {
1531
+ if (chunkTimeoutId != null) {
1532
+ clearTimeout(chunkTimeoutId);
1533
+ chunkTimeoutId = undefined;
1534
+ }
1535
+ }
1536
+
1537
+ function clearStepTimeout() {
1538
+ if (stepTimeoutId != null) {
1539
+ clearTimeout(stepTimeoutId);
1540
+ }
1541
+ }
1542
+
1543
+ try {
1544
+ stepFinish = new DelayedPromise<void>();
1545
+
1546
+ const stepInputMessages = [...initialMessages, ...responseMessages];
1547
+
1548
+ const prepareStepResult = await prepareStep?.({
1549
+ model,
1550
+ steps: recordedSteps,
1551
+ stepNumber: recordedSteps.length,
1552
+ messages: stepInputMessages,
1553
+ experimental_context,
1554
+ });
1555
+
1556
+ const stepModel = resolveLanguageModel(
1557
+ prepareStepResult?.model ?? model,
1558
+ );
1559
+
1560
+ const stepActiveTools = prepareStepResult?.activeTools ?? activeTools;
1561
+
1562
+ const { toolChoice: stepToolChoice, tools: stepTools } =
1563
+ await prepareToolsAndToolChoice({
1564
+ tools,
1565
+ toolChoice: prepareStepResult?.toolChoice ?? toolChoice,
1566
+ activeTools: stepActiveTools,
1567
+ });
1568
+
1569
+ experimental_context =
1570
+ prepareStepResult?.experimental_context ?? experimental_context;
1571
+
1572
+ const stepMessages = prepareStepResult?.messages ?? stepInputMessages;
1573
+ const stepSystem = prepareStepResult?.system ?? initialPrompt.system;
1574
+
1575
+ const stepProviderOptions = mergeObjects(
1576
+ providerOptions,
1577
+ prepareStepResult?.providerOptions,
1578
+ );
1579
+
1580
+ const stepStartTimestampMs = now();
1581
+
1582
+ const {
1583
+ stream: languageModelStream,
1584
+ request,
1585
+ response,
1586
+ } = await streamModelCall({
1587
+ model: prepareStepResult?.model ?? model,
1588
+ tools,
1589
+ activeTools: prepareStepResult?.activeTools ?? activeTools,
1590
+ toolChoice: prepareStepResult?.toolChoice ?? toolChoice,
1591
+ system: stepSystem,
1592
+ messages: stepMessages,
1593
+ repairToolCall,
1594
+ abortSignal,
1595
+ headers,
1596
+ includeRawChunks,
1597
+ providerOptions: stepProviderOptions,
1598
+ download,
1599
+ maxRetries,
1600
+ output,
1601
+ onStart: async ({ promptMessages }) => {
1602
+ await notify({
1603
+ event: {
1604
+ callId,
1605
+ stepNumber: recordedSteps.length,
1606
+ provider: stepModel.provider,
1607
+ modelId: stepModel.modelId,
1608
+ system: stepSystem,
1609
+ messages: stepMessages,
1610
+ tools,
1611
+ toolChoice: stepToolChoice,
1612
+ activeTools: stepActiveTools,
1613
+ steps: [...recordedSteps],
1614
+ providerOptions: stepProviderOptions,
1615
+ timeout,
1616
+ headers,
1617
+ stopWhen,
1618
+ output,
1619
+ abortSignal: originalAbortSignal,
1620
+ include,
1621
+ ...callbackTelemetryProps,
1622
+ experimental_context,
1623
+ promptMessages,
1624
+ stepTools,
1625
+ stepToolChoice,
1626
+ },
1627
+ callbacks: [
1628
+ onStepStart,
1629
+ globalTelemetry.onStepStart as
1630
+ | undefined
1631
+ | StreamTextOnStepStartCallback<TOOLS, OUTPUT>,
1632
+ ],
1633
+ });
1634
+ },
1635
+ ...callSettings,
1636
+ });
1637
+
1638
+ const stream2 = invokeToolCallbacksFromStream({
1639
+ stream: languageModelStream,
1640
+ tools,
1641
+ stepInputMessages,
1642
+ abortSignal,
1643
+ experimental_context,
1644
+ });
1645
+
1646
+ const streamWithToolResults = stream2.pipeThrough(
1647
+ createExecuteToolsTransformation({
1648
+ tools,
1649
+ telemetry,
1650
+ callId,
1651
+ messages: stepInputMessages,
1652
+ abortSignal,
1653
+ timeout,
1654
+ experimental_context,
1655
+ generateId,
1656
+ stepNumber: recordedSteps.length,
1657
+ provider: stepModel.provider,
1658
+ modelId: stepModel.modelId,
1659
+ onToolCallStart: [
1660
+ onToolCallStart,
1661
+ globalTelemetry.onToolCallStart as
1662
+ | undefined
1663
+ | StreamTextOnToolCallStartCallback<TOOLS>,
1664
+ ],
1665
+ onToolCallFinish: [
1666
+ onToolCallFinish,
1667
+ globalTelemetry.onToolCallFinish,
1668
+ ],
1669
+ executeToolInTelemetryContext: globalTelemetry.executeTool,
1670
+ }),
1671
+ );
1672
+
1673
+ // Conditionally include request.body based on include settings.
1674
+ // Large payloads (e.g., base64-encoded images) can cause memory issues.
1675
+ const stepRequest: LanguageModelRequestMetadata =
1676
+ (include?.requestBody ?? true)
1677
+ ? (request ?? {})
1678
+ : { ...request, body: undefined };
1679
+ const stepToolCalls: TypedToolCall<TOOLS>[] = [];
1680
+ const stepToolOutputs: ToolOutput<TOOLS>[] = [];
1681
+ let warnings: SharedV4Warning[] | undefined;
1682
+
1683
+ let stepFinishReason: FinishReason = 'other';
1684
+ let stepRawFinishReason: string | undefined = undefined;
1685
+
1686
+ let stepUsage: LanguageModelUsage = createNullLanguageModelUsage();
1687
+ let stepProviderMetadata: ProviderMetadata | undefined;
1688
+ let stepFirstChunk = true;
1689
+ let stepResponse: { id: string; timestamp: Date; modelId: string } = {
1690
+ id: generateId(),
1691
+ timestamp: new Date(),
1692
+ modelId: model.modelId,
1693
+ };
1694
+
1695
+ self.addStream(
1696
+ streamWithToolResults.pipeThrough(
1697
+ new TransformStream<
1698
+ ModelCallStreamPart<TOOLS>,
1699
+ TextStreamPart<TOOLS>
1700
+ >({
1701
+ async transform(chunk, controller): Promise<void> {
1702
+ resetChunkTimeout();
1703
+
1704
+ if (chunk.type === 'model-call-start') {
1705
+ warnings = chunk.warnings;
1706
+ return; // stream start chunks are sent immediately and do not count as first chunk
1707
+ }
1708
+
1709
+ if (stepFirstChunk) {
1710
+ const msToFirstChunk = now() - stepStartTimestampMs;
1711
+ stepFirstChunk = false;
1712
+
1713
+ // Step start:
1714
+ controller.enqueue({
1715
+ type: 'start-step',
1716
+ request: stepRequest,
1717
+ warnings: warnings ?? [],
1718
+ });
1719
+
1720
+ // TODO considering changing to onStreamPart listener
1721
+ // which receives all stream parts as they are
1722
+ // (and add necessary information to the stream parts
1723
+ // where needed)
1724
+ void globalTelemetry.onChunk?.({
1725
+ chunk: {
1726
+ type: 'ai.stream.firstChunk',
1727
+ callId,
1728
+ stepNumber: recordedSteps.length,
1729
+ attributes: {
1730
+ 'ai.response.msToFirstChunk': msToFirstChunk,
1731
+ },
1732
+ },
1733
+ });
1734
+ }
1735
+
1736
+ const chunkType = chunk.type;
1737
+ switch (chunkType) {
1738
+ case 'tool-approval-request':
1739
+ case 'text-start':
1740
+ case 'text-end': {
1741
+ controller.enqueue(chunk);
1742
+ break;
1743
+ }
1744
+
1745
+ case 'text-delta': {
1746
+ if (chunk.text.length > 0) {
1747
+ controller.enqueue(chunk);
1748
+ }
1749
+ break;
1750
+ }
1751
+
1752
+ case 'reasoning-start':
1753
+ case 'reasoning-end': {
1754
+ controller.enqueue(chunk);
1755
+ break;
1756
+ }
1757
+
1758
+ case 'custom': {
1759
+ controller.enqueue(chunk);
1760
+ break;
1761
+ }
1762
+
1763
+ case 'reasoning-delta': {
1764
+ controller.enqueue(chunk);
1765
+ break;
1766
+ }
1767
+
1768
+ case 'tool-call': {
1769
+ controller.enqueue(chunk);
1770
+ // store tool calls for onFinish callback and toolCalls promise:
1771
+ stepToolCalls.push(chunk);
1772
+ break;
1773
+ }
1774
+
1775
+ case 'tool-result': {
1776
+ controller.enqueue(chunk);
1777
+
1778
+ if (!chunk.preliminary) {
1779
+ stepToolOutputs.push(chunk);
1780
+ }
1781
+
1782
+ break;
1783
+ }
1784
+
1785
+ case 'tool-error': {
1786
+ controller.enqueue(chunk);
1787
+ stepToolOutputs.push(chunk);
1788
+ break;
1789
+ }
1790
+
1791
+ case 'model-call-response-metadata': {
1792
+ stepResponse = {
1793
+ id: chunk.id ?? stepResponse.id,
1794
+ timestamp: chunk.timestamp ?? stepResponse.timestamp,
1795
+ modelId: chunk.modelId ?? stepResponse.modelId,
1796
+ };
1797
+ break;
1798
+ }
1799
+
1800
+ case 'model-call-end': {
1801
+ // Note: tool executions might not be finished yet when the finish event is emitted.
1802
+ // store usage and finish reason for promises and onFinish callback:
1803
+ stepUsage = chunk.usage;
1804
+ stepFinishReason = chunk.finishReason;
1805
+ stepRawFinishReason = chunk.rawFinishReason;
1806
+ stepProviderMetadata = chunk.providerMetadata;
1807
+ const msToFinish = now() - stepStartTimestampMs;
1808
+ void globalTelemetry.onChunk?.({
1809
+ chunk: {
1810
+ type: 'ai.stream.finish',
1811
+ callId,
1812
+ stepNumber: recordedSteps.length,
1813
+ attributes: {
1814
+ 'ai.response.msToFinish': msToFinish,
1815
+ 'ai.response.avgOutputTokensPerSecond':
1816
+ (1000 * (stepUsage.outputTokens ?? 0)) /
1817
+ msToFinish,
1818
+ },
1819
+ },
1820
+ });
1821
+
1822
+ break;
1823
+ }
1824
+
1825
+ case 'file':
1826
+ case 'reasoning-file': {
1827
+ controller.enqueue(chunk);
1828
+ break;
1829
+ }
1830
+
1831
+ case 'source': {
1832
+ controller.enqueue(chunk);
1833
+ break;
1834
+ }
1835
+
1836
+ case 'tool-input-start':
1837
+ case 'tool-input-end':
1838
+ case 'tool-input-delta': {
1839
+ controller.enqueue(chunk);
1840
+ break;
1841
+ }
1842
+
1843
+ case 'error': {
1844
+ controller.enqueue(chunk);
1845
+ stepFinishReason = 'error';
1846
+ break;
1847
+ }
1848
+
1849
+ case 'raw': {
1850
+ if (includeRawChunks) {
1851
+ controller.enqueue(chunk);
1852
+ }
1853
+ break;
1854
+ }
1855
+
1856
+ default: {
1857
+ const exhaustiveCheck: never = chunkType;
1858
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
1859
+ }
1860
+ }
1861
+ },
1862
+
1863
+ // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
1864
+ async flush(controller) {
1865
+ controller.enqueue({
1866
+ type: 'finish-step',
1867
+ finishReason: stepFinishReason,
1868
+ rawFinishReason: stepRawFinishReason,
1869
+ usage: stepUsage,
1870
+ providerMetadata: stepProviderMetadata,
1871
+ response: {
1872
+ ...stepResponse,
1873
+ headers: response?.headers,
1874
+ },
1875
+ });
1876
+
1877
+ const combinedUsage = addLanguageModelUsage(usage, stepUsage);
1878
+
1879
+ // wait for the step to be fully processed by the event processor
1880
+ // to ensure that the recorded steps are complete:
1881
+ await stepFinish.promise;
1882
+
1883
+ const clientToolCalls = stepToolCalls.filter(
1884
+ toolCall => toolCall.providerExecuted !== true,
1885
+ );
1886
+ const clientToolOutputs = stepToolOutputs.filter(
1887
+ toolOutput => toolOutput.providerExecuted !== true,
1888
+ );
1889
+
1890
+ // continuable tool calls exclude fire-and-forget tools from loop continuation:
1891
+ const continuableToolCalls = clientToolCalls.filter(
1892
+ toolCall => !fireAndForgetTools.has(toolCall.toolName),
1893
+ );
1894
+
1895
+ // Track provider-executed tool calls that support deferred results.
1896
+ // In programmatic tool calling, a server tool (e.g., code_execution) may
1897
+ // trigger a client tool, and the server tool's result is deferred until
1898
+ // the client tool's result is sent back.
1899
+ for (const toolCall of stepToolCalls) {
1900
+ if (toolCall.providerExecuted !== true) continue;
1901
+ const tool = tools?.[toolCall.toolName];
1902
+ if (
1903
+ tool?.type === 'provider' &&
1904
+ tool.supportsDeferredResults
1905
+ ) {
1906
+ // Check if this tool call already has a result in the current step
1907
+ const hasResultInStep = stepToolOutputs.some(
1908
+ output =>
1909
+ (output.type === 'tool-result' ||
1910
+ output.type === 'tool-error') &&
1911
+ output.toolCallId === toolCall.toolCallId,
1912
+ );
1913
+ if (!hasResultInStep) {
1914
+ pendingDeferredToolCalls.set(toolCall.toolCallId, {
1915
+ toolName: toolCall.toolName,
1916
+ });
1917
+ }
1918
+ }
1919
+ }
1920
+
1921
+ // Mark deferred tool calls as resolved when we receive their results
1922
+ for (const output of stepToolOutputs) {
1923
+ if (
1924
+ output.type === 'tool-result' ||
1925
+ output.type === 'tool-error'
1926
+ ) {
1927
+ pendingDeferredToolCalls.delete(output.toolCallId);
1928
+ }
1929
+ }
1930
+
1931
+ // Clear the step and chunk timeouts before the next step is started
1932
+ clearStepTimeout();
1933
+ clearChunkTimeout();
1934
+
1935
+ if (
1936
+ // Continue if:
1937
+ // 1. There are continuable (non-fire-and-forget) client tool calls
1938
+ // that have all been executed, OR
1939
+ // 2. There are pending deferred results from provider-executed tools
1940
+ ((continuableToolCalls.length > 0 &&
1941
+ clientToolOutputs.filter(
1942
+ o => !fireAndForgetTools.has(o.toolName),
1943
+ ).length === continuableToolCalls.length) ||
1944
+ pendingDeferredToolCalls.size > 0) &&
1945
+ // continue until a stop condition is met:
1946
+ !(await isStopConditionMet({
1947
+ stopConditions,
1948
+ steps: recordedSteps,
1949
+ }))
1950
+ ) {
1951
+ // append to messages for the next step:
1952
+ responseMessages.push(
1953
+ ...(await toResponseMessages({
1954
+ content:
1955
+ // use transformed content to create the messages for the next step:
1956
+ recordedSteps[recordedSteps.length - 1].content,
1957
+ tools,
1958
+ fireAndForgetTools,
1959
+ })),
1960
+ );
1961
+
1962
+ try {
1963
+ await streamStep({
1964
+ currentStep: currentStep + 1,
1965
+ responseMessages,
1966
+ usage: combinedUsage,
1967
+ });
1968
+ } catch (error) {
1969
+ controller.enqueue({
1970
+ type: 'error',
1971
+ error,
1972
+ });
1973
+
1974
+ self.closeStream();
1975
+ }
1976
+ } else {
1977
+ controller.enqueue({
1978
+ type: 'finish',
1979
+ finishReason: stepFinishReason,
1980
+ rawFinishReason: stepRawFinishReason,
1981
+ totalUsage: combinedUsage,
1982
+ });
1983
+
1984
+ self.closeStream(); // close the stitchable stream
1985
+ }
1986
+ },
1987
+ }),
1988
+ ),
1989
+ );
1990
+ } finally {
1991
+ clearStepTimeout();
1992
+ clearChunkTimeout();
1993
+ }
1994
+ }
1995
+
1996
+ // add the initial stream to the stitchable stream
1997
+ await streamStep({
1998
+ currentStep: 0,
1999
+ responseMessages: initialResponseMessages,
2000
+ usage: createNullLanguageModelUsage(),
2001
+ });
2002
+ })().catch(async error => {
2003
+ await globalTelemetry.onError?.({ callId, error });
2004
+
2005
+ // add an error stream part and close the streams:
2006
+ self.addStream(
2007
+ new ReadableStream({
2008
+ start(controller) {
2009
+ controller.enqueue({ type: 'error', error });
2010
+ controller.close();
2011
+ },
2012
+ }),
2013
+ );
2014
+ self.closeStream();
2015
+ });
2016
+ }
2017
+
2018
+ get steps() {
2019
+ // when any of the promises are accessed, the stream is consumed
2020
+ // so it resolves without needing to consume the stream separately
2021
+ this.consumeStream();
2022
+
2023
+ return this._steps.promise;
2024
+ }
2025
+
2026
+ private get finalStep() {
2027
+ return this.steps.then(steps => steps[steps.length - 1]);
2028
+ }
2029
+
2030
+ get content() {
2031
+ return this.finalStep.then(step => step.content);
2032
+ }
2033
+
2034
+ get warnings() {
2035
+ return this.finalStep.then(step => step.warnings);
2036
+ }
2037
+
2038
+ get providerMetadata() {
2039
+ return this.finalStep.then(step => step.providerMetadata);
2040
+ }
2041
+
2042
+ get text() {
2043
+ return this.finalStep.then(step => step.text);
2044
+ }
2045
+
2046
+ get reasoningText() {
2047
+ return this.finalStep.then(step => step.reasoningText);
2048
+ }
2049
+
2050
+ get reasoning() {
2051
+ return this.finalStep.then(step =>
2052
+ convertToReasoningOutputs(step.reasoning),
2053
+ );
2054
+ }
2055
+
2056
+ get sources() {
2057
+ return this.finalStep.then(step => step.sources);
2058
+ }
2059
+
2060
+ get files() {
2061
+ return this.finalStep.then(step => step.files);
2062
+ }
2063
+
2064
+ get toolCalls() {
2065
+ return this.finalStep.then(step => step.toolCalls);
2066
+ }
2067
+
2068
+ get staticToolCalls() {
2069
+ return this.finalStep.then(step => step.staticToolCalls);
2070
+ }
2071
+
2072
+ get dynamicToolCalls() {
2073
+ return this.finalStep.then(step => step.dynamicToolCalls);
2074
+ }
2075
+
2076
+ get toolResults() {
2077
+ return this.finalStep.then(step => step.toolResults);
2078
+ }
2079
+
2080
+ get staticToolResults() {
2081
+ return this.finalStep.then(step => step.staticToolResults);
2082
+ }
2083
+
2084
+ get dynamicToolResults() {
2085
+ return this.finalStep.then(step => step.dynamicToolResults);
2086
+ }
2087
+
2088
+ get usage() {
2089
+ return this.finalStep.then(step => step.usage);
2090
+ }
2091
+
2092
+ get request() {
2093
+ return this.finalStep.then(step => step.request);
2094
+ }
2095
+
2096
+ get response() {
2097
+ return this.finalStep.then(step => step.response);
2098
+ }
2099
+
2100
+ get totalUsage() {
2101
+ // when any of the promises are accessed, the stream is consumed
2102
+ // so it resolves without needing to consume the stream separately
2103
+ this.consumeStream();
2104
+
2105
+ return this._totalUsage.promise;
2106
+ }
2107
+
2108
+ get finishReason() {
2109
+ // when any of the promises are accessed, the stream is consumed
2110
+ // so it resolves without needing to consume the stream separately
2111
+ this.consumeStream();
2112
+
2113
+ return this._finishReason.promise;
2114
+ }
2115
+
2116
+ get rawFinishReason() {
2117
+ // when any of the promises are accessed, the stream is consumed
2118
+ // so it resolves without needing to consume the stream separately
2119
+ this.consumeStream();
2120
+
2121
+ return this._rawFinishReason.promise;
2122
+ }
2123
+
2124
+ /**
2125
+ * Split out a new stream from the original stream.
2126
+ * The original stream is replaced to allow for further splitting,
2127
+ * since we do not know how many times the stream will be split.
2128
+ *
2129
+ * Note: this leads to buffering the stream content on the server.
2130
+ * However, the LLM results are expected to be small enough to not cause issues.
2131
+ */
2132
+ private teeStream() {
2133
+ const [stream1, stream2] = this.baseStream.tee();
2134
+ this.baseStream = stream2;
2135
+ return stream1;
2136
+ }
2137
+
2138
+ get textStream(): AsyncIterableStream<string> {
2139
+ return createAsyncIterableStream(
2140
+ this.teeStream().pipeThrough(
2141
+ new TransformStream<
2142
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>,
2143
+ string
2144
+ >({
2145
+ transform({ part }, controller) {
2146
+ if (part.type === 'text-delta') {
2147
+ controller.enqueue(part.text);
2148
+ }
2149
+ },
2150
+ }),
2151
+ ),
2152
+ );
2153
+ }
2154
+
2155
+ get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>> {
2156
+ return createAsyncIterableStream(
2157
+ this.teeStream().pipeThrough(
2158
+ new TransformStream<
2159
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>,
2160
+ TextStreamPart<TOOLS>
2161
+ >({
2162
+ transform({ part }, controller) {
2163
+ controller.enqueue(part);
2164
+ },
2165
+ }),
2166
+ ),
2167
+ );
2168
+ }
2169
+
2170
+ async consumeStream(options?: ConsumeStreamOptions): Promise<void> {
2171
+ try {
2172
+ await consumeStream({
2173
+ stream: this.fullStream,
2174
+ onError: options?.onError,
2175
+ });
2176
+ } catch (error) {
2177
+ options?.onError?.(error);
2178
+ }
2179
+ }
2180
+
2181
+ get experimental_partialOutputStream(): AsyncIterableStream<
2182
+ InferPartialOutput<OUTPUT>
2183
+ > {
2184
+ return this.partialOutputStream;
2185
+ }
2186
+
2187
+ get partialOutputStream(): AsyncIterableStream<InferPartialOutput<OUTPUT>> {
2188
+ return createAsyncIterableStream(
2189
+ this.teeStream().pipeThrough(
2190
+ new TransformStream<
2191
+ EnrichedStreamPart<TOOLS, InferPartialOutput<OUTPUT>>,
2192
+ InferPartialOutput<OUTPUT>
2193
+ >({
2194
+ transform({ partialOutput }, controller) {
2195
+ if (partialOutput != null) {
2196
+ controller.enqueue(partialOutput);
2197
+ }
2198
+ },
2199
+ }),
2200
+ ),
2201
+ );
2202
+ }
2203
+
2204
+ get elementStream(): AsyncIterableStream<InferElementOutput<OUTPUT>> {
2205
+ const transform = this.outputSpecification?.createElementStreamTransform();
2206
+
2207
+ if (transform == null) {
2208
+ throw new UnsupportedFunctionalityError({
2209
+ functionality: `element streams in ${this.outputSpecification?.name ?? 'text'} mode`,
2210
+ });
2211
+ }
2212
+
2213
+ return createAsyncIterableStream(this.teeStream().pipeThrough(transform));
2214
+ }
2215
+
2216
+ get output(): Promise<InferCompleteOutput<OUTPUT>> {
2217
+ return this.finalStep.then(step => {
2218
+ const output = this.outputSpecification ?? text();
2219
+ return output.parseCompleteOutput(
2220
+ { text: step.text },
2221
+ {
2222
+ response: step.response,
2223
+ usage: step.usage,
2224
+ finishReason: step.finishReason,
2225
+ },
2226
+ );
2227
+ });
2228
+ }
2229
+
2230
+ toUIMessageStream<UI_MESSAGE extends UIMessage>({
2231
+ originalMessages,
2232
+ generateMessageId,
2233
+ onFinish,
2234
+ messageMetadata,
2235
+ sendReasoning = true,
2236
+ sendSources = false,
2237
+ sendStart = true,
2238
+ sendFinish = true,
2239
+ onError = getErrorMessage,
2240
+ }: UIMessageStreamOptions<UI_MESSAGE> = {}): AsyncIterableStream<
2241
+ InferUIMessageChunk<UI_MESSAGE>
2242
+ > {
2243
+ const responseMessageId =
2244
+ generateMessageId != null
2245
+ ? getResponseUIMessageId({
2246
+ originalMessages,
2247
+ responseMessageId: generateMessageId,
2248
+ })
2249
+ : undefined;
2250
+
2251
+ // TODO simplify once dynamic is no longer needed for invalid tool inputs
2252
+ const isDynamic = (part: { toolName: string; dynamic?: boolean }) => {
2253
+ const tool = this.tools?.[part.toolName];
2254
+
2255
+ // provider-executed, dynamic tools are not listed in the tools object
2256
+ if (tool == null) {
2257
+ return part.dynamic;
2258
+ }
2259
+
2260
+ return tool?.type === 'dynamic' ? true : undefined;
2261
+ };
2262
+
2263
+ const baseStream = this.fullStream.pipeThrough(
2264
+ new TransformStream<
2265
+ TextStreamPart<TOOLS>,
2266
+ UIMessageChunk<
2267
+ InferUIMessageMetadata<UI_MESSAGE>,
2268
+ InferUIMessageData<UI_MESSAGE>
2269
+ >
2270
+ >({
2271
+ transform: async (part, controller) => {
2272
+ const messageMetadataValue = messageMetadata?.({ part });
2273
+
2274
+ const partType = part.type;
2275
+ switch (partType) {
2276
+ case 'text-start': {
2277
+ controller.enqueue({
2278
+ type: 'text-start',
2279
+ id: part.id,
2280
+ ...(part.providerMetadata != null
2281
+ ? { providerMetadata: part.providerMetadata }
2282
+ : {}),
2283
+ });
2284
+ break;
2285
+ }
2286
+
2287
+ case 'text-delta': {
2288
+ controller.enqueue({
2289
+ type: 'text-delta',
2290
+ id: part.id,
2291
+ delta: part.text,
2292
+ ...(part.providerMetadata != null
2293
+ ? { providerMetadata: part.providerMetadata }
2294
+ : {}),
2295
+ });
2296
+ break;
2297
+ }
2298
+
2299
+ case 'text-end': {
2300
+ controller.enqueue({
2301
+ type: 'text-end',
2302
+ id: part.id,
2303
+ ...(part.providerMetadata != null
2304
+ ? { providerMetadata: part.providerMetadata }
2305
+ : {}),
2306
+ });
2307
+ break;
2308
+ }
2309
+
2310
+ case 'reasoning-start': {
2311
+ controller.enqueue({
2312
+ type: 'reasoning-start',
2313
+ id: part.id,
2314
+ ...(part.providerMetadata != null
2315
+ ? { providerMetadata: part.providerMetadata }
2316
+ : {}),
2317
+ });
2318
+ break;
2319
+ }
2320
+
2321
+ case 'reasoning-delta': {
2322
+ if (sendReasoning) {
2323
+ controller.enqueue({
2324
+ type: 'reasoning-delta',
2325
+ id: part.id,
2326
+ delta: part.text,
2327
+ ...(part.providerMetadata != null
2328
+ ? { providerMetadata: part.providerMetadata }
2329
+ : {}),
2330
+ });
2331
+ }
2332
+ break;
2333
+ }
2334
+
2335
+ case 'reasoning-end': {
2336
+ controller.enqueue({
2337
+ type: 'reasoning-end',
2338
+ id: part.id,
2339
+ ...(part.providerMetadata != null
2340
+ ? { providerMetadata: part.providerMetadata }
2341
+ : {}),
2342
+ });
2343
+ break;
2344
+ }
2345
+
2346
+ case 'file':
2347
+ case 'reasoning-file': {
2348
+ if (partType !== 'reasoning-file' || sendReasoning) {
2349
+ controller.enqueue({
2350
+ type: part.type,
2351
+ mediaType: part.file.mediaType,
2352
+ url: `data:${part.file.mediaType};base64,${part.file.base64}`,
2353
+ ...(part.providerMetadata != null
2354
+ ? { providerMetadata: part.providerMetadata }
2355
+ : {}),
2356
+ });
2357
+ }
2358
+ break;
2359
+ }
2360
+
2361
+ case 'source': {
2362
+ if (sendSources && part.sourceType === 'url') {
2363
+ controller.enqueue({
2364
+ type: 'source-url',
2365
+ sourceId: part.id,
2366
+ url: part.url,
2367
+ title: part.title,
2368
+ ...(part.providerMetadata != null
2369
+ ? { providerMetadata: part.providerMetadata }
2370
+ : {}),
2371
+ });
2372
+ }
2373
+
2374
+ if (sendSources && part.sourceType === 'document') {
2375
+ controller.enqueue({
2376
+ type: 'source-document',
2377
+ sourceId: part.id,
2378
+ mediaType: part.mediaType,
2379
+ title: part.title,
2380
+ filename: part.filename,
2381
+ ...(part.providerMetadata != null
2382
+ ? { providerMetadata: part.providerMetadata }
2383
+ : {}),
2384
+ });
2385
+ }
2386
+ break;
2387
+ }
2388
+
2389
+ case 'custom': {
2390
+ controller.enqueue({
2391
+ type: 'custom',
2392
+ kind: part.kind,
2393
+ ...(part.providerMetadata != null
2394
+ ? { providerMetadata: part.providerMetadata }
2395
+ : {}),
2396
+ });
2397
+ break;
2398
+ }
2399
+
2400
+ case 'tool-input-start': {
2401
+ const dynamic = isDynamic(part);
2402
+
2403
+ controller.enqueue({
2404
+ type: 'tool-input-start',
2405
+ toolCallId: part.id,
2406
+ toolName: part.toolName,
2407
+ ...(part.providerExecuted != null
2408
+ ? { providerExecuted: part.providerExecuted }
2409
+ : {}),
2410
+ ...(part.providerMetadata != null
2411
+ ? { providerMetadata: part.providerMetadata }
2412
+ : {}),
2413
+ ...(dynamic != null ? { dynamic } : {}),
2414
+ ...(part.title != null ? { title: part.title } : {}),
2415
+ });
2416
+ break;
2417
+ }
2418
+
2419
+ case 'tool-input-delta': {
2420
+ controller.enqueue({
2421
+ type: 'tool-input-delta',
2422
+ toolCallId: part.id,
2423
+ inputTextDelta: part.delta,
2424
+ });
2425
+ break;
2426
+ }
2427
+
2428
+ case 'tool-call': {
2429
+ const dynamic = isDynamic(part);
2430
+
2431
+ if (part.invalid) {
2432
+ controller.enqueue({
2433
+ type: 'tool-input-error',
2434
+ toolCallId: part.toolCallId,
2435
+ toolName: part.toolName,
2436
+ input: part.input,
2437
+ ...(part.providerExecuted != null
2438
+ ? { providerExecuted: part.providerExecuted }
2439
+ : {}),
2440
+ ...(part.providerMetadata != null
2441
+ ? { providerMetadata: part.providerMetadata }
2442
+ : {}),
2443
+ ...(dynamic != null ? { dynamic } : {}),
2444
+ errorText: onError(part.error),
2445
+ ...(part.title != null ? { title: part.title } : {}),
2446
+ });
2447
+ } else {
2448
+ controller.enqueue({
2449
+ type: 'tool-input-available',
2450
+ toolCallId: part.toolCallId,
2451
+ toolName: part.toolName,
2452
+ input: part.input,
2453
+ ...(part.providerExecuted != null
2454
+ ? { providerExecuted: part.providerExecuted }
2455
+ : {}),
2456
+ ...(part.providerMetadata != null
2457
+ ? { providerMetadata: part.providerMetadata }
2458
+ : {}),
2459
+ ...(dynamic != null ? { dynamic } : {}),
2460
+ ...(part.title != null ? { title: part.title } : {}),
2461
+ });
2462
+ }
2463
+
2464
+ break;
2465
+ }
2466
+
2467
+ case 'tool-approval-request': {
2468
+ controller.enqueue({
2469
+ type: 'tool-approval-request',
2470
+ approvalId: part.approvalId,
2471
+ toolCallId: part.toolCall.toolCallId,
2472
+ });
2473
+ break;
2474
+ }
2475
+
2476
+ case 'tool-result': {
2477
+ const dynamic = isDynamic(part);
2478
+
2479
+ controller.enqueue({
2480
+ type: 'tool-output-available',
2481
+ toolCallId: part.toolCallId,
2482
+ output: part.output,
2483
+ ...(part.providerExecuted != null
2484
+ ? { providerExecuted: part.providerExecuted }
2485
+ : {}),
2486
+ ...(part.providerMetadata != null
2487
+ ? { providerMetadata: part.providerMetadata }
2488
+ : {}),
2489
+ ...(part.preliminary != null
2490
+ ? { preliminary: part.preliminary }
2491
+ : {}),
2492
+ ...(dynamic != null ? { dynamic } : {}),
2493
+ });
2494
+ break;
2495
+ }
2496
+
2497
+ case 'tool-error': {
2498
+ const dynamic = isDynamic(part);
2499
+
2500
+ controller.enqueue({
2501
+ type: 'tool-output-error',
2502
+ toolCallId: part.toolCallId,
2503
+ errorText: part.providerExecuted
2504
+ ? typeof part.error === 'string'
2505
+ ? part.error
2506
+ : JSON.stringify(part.error)
2507
+ : onError(part.error),
2508
+ ...(part.providerExecuted != null
2509
+ ? { providerExecuted: part.providerExecuted }
2510
+ : {}),
2511
+ ...(part.providerMetadata != null
2512
+ ? { providerMetadata: part.providerMetadata }
2513
+ : {}),
2514
+ ...(dynamic != null ? { dynamic } : {}),
2515
+ });
2516
+ break;
2517
+ }
2518
+
2519
+ case 'tool-output-denied': {
2520
+ controller.enqueue({
2521
+ type: 'tool-output-denied',
2522
+ toolCallId: part.toolCallId,
2523
+ });
2524
+ break;
2525
+ }
2526
+
2527
+ case 'error': {
2528
+ controller.enqueue({
2529
+ type: 'error',
2530
+ errorText: onError(part.error),
2531
+ });
2532
+ break;
2533
+ }
2534
+
2535
+ case 'start-step': {
2536
+ controller.enqueue({ type: 'start-step' });
2537
+ break;
2538
+ }
2539
+
2540
+ case 'finish-step': {
2541
+ controller.enqueue({ type: 'finish-step' });
2542
+ break;
2543
+ }
2544
+
2545
+ case 'start': {
2546
+ if (sendStart) {
2547
+ controller.enqueue({
2548
+ type: 'start',
2549
+ ...(messageMetadataValue != null
2550
+ ? { messageMetadata: messageMetadataValue }
2551
+ : {}),
2552
+ ...(responseMessageId != null
2553
+ ? { messageId: responseMessageId }
2554
+ : {}),
2555
+ });
2556
+ }
2557
+ break;
2558
+ }
2559
+
2560
+ case 'finish': {
2561
+ if (sendFinish) {
2562
+ controller.enqueue({
2563
+ type: 'finish',
2564
+ finishReason: part.finishReason,
2565
+ ...(messageMetadataValue != null
2566
+ ? { messageMetadata: messageMetadataValue }
2567
+ : {}),
2568
+ });
2569
+ }
2570
+ break;
2571
+ }
2572
+
2573
+ case 'abort': {
2574
+ controller.enqueue(part);
2575
+ break;
2576
+ }
2577
+
2578
+ case 'tool-input-end': {
2579
+ break;
2580
+ }
2581
+
2582
+ case 'raw': {
2583
+ // Raw chunks are not included in UI message streams
2584
+ // as they contain provider-specific data for developer use
2585
+ break;
2586
+ }
2587
+
2588
+ default: {
2589
+ const exhaustiveCheck: never = partType;
2590
+ throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
2591
+ }
2592
+ }
2593
+
2594
+ // start and finish events already have metadata
2595
+ // so we only need to send metadata for other parts
2596
+ if (
2597
+ messageMetadataValue != null &&
2598
+ partType !== 'start' &&
2599
+ partType !== 'finish'
2600
+ ) {
2601
+ controller.enqueue({
2602
+ type: 'message-metadata',
2603
+ messageMetadata: messageMetadataValue,
2604
+ });
2605
+ }
2606
+ },
2607
+ }),
2608
+ );
2609
+
2610
+ return createAsyncIterableStream(
2611
+ handleUIMessageStreamFinish<UI_MESSAGE>({
2612
+ stream: baseStream,
2613
+ messageId: responseMessageId ?? generateMessageId?.(),
2614
+ originalMessages,
2615
+ onFinish,
2616
+ onError,
2617
+ }),
2618
+ );
2619
+ }
2620
+
2621
+ pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(
2622
+ response: ServerResponse,
2623
+ {
2624
+ originalMessages,
2625
+ generateMessageId,
2626
+ onFinish,
2627
+ messageMetadata,
2628
+ sendReasoning,
2629
+ sendSources,
2630
+ sendFinish,
2631
+ sendStart,
2632
+ onError,
2633
+ ...init
2634
+ }: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE> = {},
2635
+ ) {
2636
+ pipeUIMessageStreamToResponse({
2637
+ response,
2638
+ stream: this.toUIMessageStream({
2639
+ originalMessages,
2640
+ generateMessageId,
2641
+ onFinish,
2642
+ messageMetadata,
2643
+ sendReasoning,
2644
+ sendSources,
2645
+ sendFinish,
2646
+ sendStart,
2647
+ onError,
2648
+ }),
2649
+ ...init,
2650
+ });
2651
+ }
2652
+
2653
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit) {
2654
+ pipeTextStreamToResponse({
2655
+ response,
2656
+ textStream: this.textStream,
2657
+ ...init,
2658
+ });
2659
+ }
2660
+
2661
+ toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>({
2662
+ originalMessages,
2663
+ generateMessageId,
2664
+ onFinish,
2665
+ messageMetadata,
2666
+ sendReasoning,
2667
+ sendSources,
2668
+ sendFinish,
2669
+ sendStart,
2670
+ onError,
2671
+ ...init
2672
+ }: UIMessageStreamResponseInit &
2673
+ UIMessageStreamOptions<UI_MESSAGE> = {}): Response {
2674
+ return createUIMessageStreamResponse({
2675
+ stream: this.toUIMessageStream({
2676
+ originalMessages,
2677
+ generateMessageId,
2678
+ onFinish,
2679
+ messageMetadata,
2680
+ sendReasoning,
2681
+ sendSources,
2682
+ sendFinish,
2683
+ sendStart,
2684
+ onError,
2685
+ }),
2686
+ ...init,
2687
+ });
2688
+ }
2689
+
2690
+ toTextStreamResponse(init?: ResponseInit): Response {
2691
+ return createTextStreamResponse({
2692
+ textStream: this.textStream,
2693
+ ...init,
2694
+ });
2695
+ }
2696
+ }