ai 6.0.30 → 6.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/dist/internal/index.js +1 -1
  5. package/dist/internal/index.mjs +1 -1
  6. package/docs/00-introduction/index.mdx +76 -0
  7. package/docs/02-foundations/01-overview.mdx +43 -0
  8. package/docs/02-foundations/02-providers-and-models.mdx +163 -0
  9. package/docs/02-foundations/03-prompts.mdx +620 -0
  10. package/docs/02-foundations/04-tools.mdx +160 -0
  11. package/docs/02-foundations/05-streaming.mdx +62 -0
  12. package/docs/02-foundations/index.mdx +43 -0
  13. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  14. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  15. package/docs/02-getting-started/02-nextjs-app-router.mdx +556 -0
  16. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  17. package/docs/02-getting-started/04-svelte.mdx +627 -0
  18. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  19. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  20. package/docs/02-getting-started/07-expo.mdx +766 -0
  21. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  22. package/docs/02-getting-started/index.mdx +44 -0
  23. package/docs/03-agents/01-overview.mdx +96 -0
  24. package/docs/03-agents/02-building-agents.mdx +367 -0
  25. package/docs/03-agents/03-workflows.mdx +370 -0
  26. package/docs/03-agents/04-loop-control.mdx +350 -0
  27. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  28. package/docs/03-agents/index.mdx +40 -0
  29. package/docs/03-ai-sdk-core/01-overview.mdx +33 -0
  30. package/docs/03-ai-sdk-core/05-generating-text.mdx +600 -0
  31. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +662 -0
  32. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1102 -0
  33. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +375 -0
  34. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +144 -0
  35. package/docs/03-ai-sdk-core/25-settings.mdx +198 -0
  36. package/docs/03-ai-sdk-core/30-embeddings.mdx +247 -0
  37. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  38. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  39. package/docs/03-ai-sdk-core/36-transcription.mdx +173 -0
  40. package/docs/03-ai-sdk-core/37-speech.mdx +167 -0
  41. package/docs/03-ai-sdk-core/40-middleware.mdx +480 -0
  42. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  43. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  44. package/docs/03-ai-sdk-core/55-testing.mdx +218 -0
  45. package/docs/03-ai-sdk-core/60-telemetry.mdx +313 -0
  46. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  47. package/docs/03-ai-sdk-core/index.mdx +88 -0
  48. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  49. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1313 -0
  50. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +535 -0
  51. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  52. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  53. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  54. package/docs/04-ai-sdk-ui/05-completion.mdx +186 -0
  55. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  56. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  57. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  58. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  59. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  60. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  61. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +477 -0
  62. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  63. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  64. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  65. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  66. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  67. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  68. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +158 -0
  69. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  70. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +96 -0
  71. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  72. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  73. package/docs/05-ai-sdk-rsc/index.mdx +58 -0
  74. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  75. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  76. package/docs/06-advanced/03-backpressure.mdx +173 -0
  77. package/docs/06-advanced/04-caching.mdx +169 -0
  78. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  79. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  80. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +213 -0
  81. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  82. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  83. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  84. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  85. package/docs/06-advanced/index.mdx +11 -0
  86. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2142 -0
  87. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3215 -0
  88. package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +780 -0
  89. package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +1140 -0
  90. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +190 -0
  91. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +171 -0
  92. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +309 -0
  93. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +227 -0
  94. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +138 -0
  95. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +214 -0
  96. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +203 -0
  97. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +449 -0
  98. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +148 -0
  99. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +168 -0
  100. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +144 -0
  101. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +196 -0
  102. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +175 -0
  103. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +410 -0
  104. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  105. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  106. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  107. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +55 -0
  108. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  109. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +415 -0
  110. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +246 -0
  111. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  112. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  113. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +182 -0
  114. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +121 -0
  115. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  116. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  117. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  118. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +46 -0
  119. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  120. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  121. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  122. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  123. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  124. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  125. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  126. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  127. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  128. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +43 -0
  129. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  130. package/docs/07-reference/01-ai-sdk-core/index.mdx +159 -0
  131. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +446 -0
  132. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +179 -0
  133. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +178 -0
  134. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +230 -0
  135. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  136. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +151 -0
  137. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +113 -0
  138. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +73 -0
  139. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  140. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  141. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  142. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  143. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  144. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  145. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  146. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  147. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +48 -0
  148. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +78 -0
  149. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  150. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  151. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  152. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  153. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  154. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  155. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +262 -0
  156. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  157. package/docs/07-reference/04-stream-helpers/01-ai-stream.mdx +89 -0
  158. package/docs/07-reference/04-stream-helpers/02-streaming-text-response.mdx +79 -0
  159. package/docs/07-reference/04-stream-helpers/05-stream-to-response.mdx +108 -0
  160. package/docs/07-reference/04-stream-helpers/07-openai-stream.mdx +77 -0
  161. package/docs/07-reference/04-stream-helpers/08-anthropic-stream.mdx +79 -0
  162. package/docs/07-reference/04-stream-helpers/09-aws-bedrock-stream.mdx +91 -0
  163. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-anthropic-stream.mdx +96 -0
  164. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-messages-stream.mdx +96 -0
  165. package/docs/07-reference/04-stream-helpers/11-aws-bedrock-cohere-stream.mdx +93 -0
  166. package/docs/07-reference/04-stream-helpers/12-aws-bedrock-llama-2-stream.mdx +93 -0
  167. package/docs/07-reference/04-stream-helpers/13-cohere-stream.mdx +78 -0
  168. package/docs/07-reference/04-stream-helpers/14-google-generative-ai-stream.mdx +85 -0
  169. package/docs/07-reference/04-stream-helpers/15-hugging-face-stream.mdx +84 -0
  170. package/docs/07-reference/04-stream-helpers/16-langchain-adapter.mdx +98 -0
  171. package/docs/07-reference/04-stream-helpers/16-llamaindex-adapter.mdx +70 -0
  172. package/docs/07-reference/04-stream-helpers/17-mistral-stream.mdx +81 -0
  173. package/docs/07-reference/04-stream-helpers/18-replicate-stream.mdx +83 -0
  174. package/docs/07-reference/04-stream-helpers/19-inkeep-stream.mdx +80 -0
  175. package/docs/07-reference/04-stream-helpers/index.mdx +103 -0
  176. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +30 -0
  177. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +27 -0
  178. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +25 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content.mdx +26 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +25 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +25 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +25 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +26 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  205. package/docs/07-reference/05-ai-sdk-errors/index.mdx +38 -0
  206. package/docs/07-reference/index.mdx +34 -0
  207. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  208. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  209. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  210. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  211. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  212. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  213. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  214. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  215. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  216. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  217. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  218. package/docs/08-migration-guides/index.mdx +22 -0
  219. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  220. package/docs/09-troubleshooting/02-client-side-function-calls-not-invoked.mdx +22 -0
  221. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  222. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  223. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  224. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  225. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  226. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  227. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  228. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  229. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  230. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  231. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  232. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  233. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  234. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  235. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  236. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  237. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  238. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  239. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  240. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  241. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  242. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  243. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  244. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +72 -0
  245. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  246. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  247. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  248. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  249. package/docs/09-troubleshooting/index.mdx +11 -0
  250. package/package.json +7 -3
@@ -0,0 +1,3427 @@
1
+ ---
2
+ title: Migrate AI SDK 4.x to 5.0
3
+ description: Learn how to upgrade AI SDK 4.x to 5.0.
4
+ ---
5
+
6
+ # Migrate AI SDK 4.x to 5.0
7
+
8
+ ## Recommended Migration Process
9
+
10
+ 1. Backup your project. If you use a versioning control system, make sure all previous versions are committed.
11
+ 1. Upgrade to AI SDK 5.0.
12
+ 1. Automatically migrate your code using one of these approaches:
13
+ - Use the [AI SDK 5 Migration MCP Server](#ai-sdk-5-migration-mcp-server) for AI-assisted migration in Cursor or other MCP-compatible coding agents
14
+ - Use [codemods](#codemods) to automatically transform your code
15
+ 1. Follow the breaking changes guide below.
16
+ 1. Verify your project is working as expected.
17
+ 1. Commit your changes.
18
+
19
+ ## AI SDK 5 Migration MCP Server
20
+
21
+ The [AI SDK 5 Migration Model Context Protocol (MCP) Server](https://github.com/vercel-labs/ai-sdk-5-migration-mcp-server) provides an automated way to migrate your project using a coding agent. This server has been designed for Cursor, but should work with any coding agent that supports MCP.
22
+
23
+ To get started, create or edit `.cursor/mcp.json` in your project:
24
+
25
+ ```json
26
+ {
27
+ "mcpServers": {
28
+ "ai-sdk-5-migration": {
29
+ "url": "https://ai-sdk-5-migration-mcp-server.vercel.app/api/mcp"
30
+ }
31
+ }
32
+ }
33
+ ```
34
+
35
+ After saving, open the command palette (Cmd+Shift+P on macOS, Ctrl+Shift+P on Windows/Linux) and search for "View: Open MCP Settings". Verify the new server appears and is toggled on.
36
+
37
+ Then use this prompt:
38
+
39
+ ```
40
+ Please migrate this project to AI SDK 5 using the ai-sdk-5-migration mcp server. Start by creating a checklist.
41
+ ```
42
+
43
+ For more information, see the [AI SDK 5 Migration MCP Server repository](https://github.com/vercel-labs/ai-sdk-5-migration-mcp-server).
44
+
45
+ ## AI SDK 5.0 Package Versions
46
+
47
+ You need to update the following packages to the following versions in your `package.json` file(s):
48
+
49
+ - `ai` package: `5.0.0`
50
+ - `@ai-sdk/provider` package: `2.0.0`
51
+ - `@ai-sdk/provider-utils` package: `3.0.0`
52
+ - `@ai-sdk/*` packages: `2.0.0` (other `@ai-sdk` packages)
53
+
54
+ Additionally, you need to update the following peer dependencies:
55
+
56
+ - `zod` package: `4.1.8` or later (recommended to avoid TypeScript performance issues)
57
+
58
+ An example upgrade command would be:
59
+
60
+ ```
61
+ npm install ai @ai-sdk/react @ai-sdk/openai zod@^4.1.8
62
+ ```
63
+
64
+ <Note>
65
+ If you encounter TypeScript performance issues after upgrading, ensure you're
66
+ using Zod 4.1.8 or later. If the issue persists, update your `tsconfig.json`
67
+ to use `moduleResolution: "nodenext"`. See the [TypeScript performance
68
+ troubleshooting guide](/docs/troubleshooting/typescript-performance-zod) for
69
+ more details.
70
+ </Note>
71
+
72
+ ## Codemods
73
+
74
+ The AI SDK provides Codemod transformations to help upgrade your codebase when a
75
+ feature is deprecated, removed, or otherwise changed.
76
+
77
+ Codemods are transformations that run on your codebase automatically. They
78
+ allow you to easily apply many changes without having to manually go through
79
+ every file.
80
+
81
+ <Note>
82
+ Codemods are intended as a tool to help you with the upgrade process. They may
83
+ not cover all of the changes you need to make. You may need to make additional
84
+ changes manually.
85
+ </Note>
86
+
87
+ You can run all codemods provided as part of the 5.0 upgrade process by running
88
+ the following command from the root of your project:
89
+
90
+ ```sh
91
+ npx @ai-sdk/codemod upgrade
92
+ ```
93
+
94
+ To run only the v5 codemods (v4 → v5 migration):
95
+
96
+ ```sh
97
+ npx @ai-sdk/codemod v5
98
+ ```
99
+
100
+ Individual codemods can be run by specifying the name of the codemod:
101
+
102
+ ```sh
103
+ npx @ai-sdk/codemod <codemod-name> <path>
104
+ ```
105
+
106
+ For example, to run a specific v5 codemod:
107
+
108
+ ```sh
109
+ npx @ai-sdk/codemod v5/rename-format-stream-part src/
110
+ ```
111
+
112
+ See also the [table of codemods](#codemod-table). In addition, the latest set of
113
+ codemods can be found in the
114
+ [`@ai-sdk/codemod`](https://github.com/vercel/ai/tree/main/packages/codemod/src/codemods)
115
+ repository.
116
+
117
+ ## AI SDK Core Changes
118
+
119
+ ### generateText and streamText Changes
120
+
121
+ #### Maximum Output Tokens
122
+
123
+ The `maxTokens` parameter has been renamed to `maxOutputTokens` for clarity.
124
+
125
+ ```tsx filename="AI SDK 4.0"
126
+ const result = await generateText({
127
+ model: __MODEL__,
128
+ maxTokens: 1024,
129
+ prompt: 'Hello, world!',
130
+ });
131
+ ```
132
+
133
+ ```tsx filename="AI SDK 5.0"
134
+ const result = await generateText({
135
+ model: __MODEL__,
136
+ maxOutputTokens: 1024,
137
+ prompt: 'Hello, world!',
138
+ });
139
+ ```
140
+
141
+ ### Message and Type System Changes
142
+
143
+ #### Core Type Renames
144
+
145
+ ##### `CoreMessage` → `ModelMessage`
146
+
147
+ ```tsx filename="AI SDK 4.0"
148
+ import { CoreMessage } from 'ai';
149
+ ```
150
+
151
+ ```tsx filename="AI SDK 5.0"
152
+ import { ModelMessage } from 'ai';
153
+ ```
154
+
155
+ ##### `Message` → `UIMessage`
156
+
157
+ ```tsx filename="AI SDK 4.0"
158
+ import { Message, CreateMessage } from 'ai';
159
+ ```
160
+
161
+ ```tsx filename="AI SDK 5.0"
162
+ import { UIMessage, CreateUIMessage } from 'ai';
163
+ ```
164
+
165
+ ##### `convertToCoreMessages` → `convertToModelMessages`
166
+
167
+ ```tsx filename="AI SDK 4.0"
168
+ import { convertToCoreMessages, streamText } from 'ai';
169
+
170
+ const result = await streamText({
171
+ model: __MODEL__,
172
+ messages: convertToCoreMessages(messages),
173
+ });
174
+ ```
175
+
176
+ ```tsx filename="AI SDK 5.0"
177
+ import { convertToModelMessages, streamText } from 'ai';
178
+
179
+ const result = await streamText({
180
+ model: __MODEL__,
181
+ messages: convertToModelMessages(messages),
182
+ });
183
+ ```
184
+
185
+ <Note>
186
+ For more information about model messages, see the [Model Message
187
+ reference](/docs/reference/ai-sdk-core/model-message).
188
+ </Note>
189
+
190
+ ### UIMessage Changes
191
+
192
+ #### Content → Parts Array
193
+
194
+ For `UIMessage`s (previously called `Message`), the `.content` property has been replaced with a `parts` array structure.
195
+
196
+ ```tsx filename="AI SDK 4.0"
197
+ import { type Message } from 'ai'; // v4 Message type
198
+
199
+ // Messages (useChat) - had content property
200
+ const message: Message = {
201
+ id: '1',
202
+ role: 'user',
203
+ content: 'Bonjour!',
204
+ };
205
+ ```
206
+
207
+ ```tsx filename="AI SDK 5.0"
208
+ import { type UIMessage, type ModelMessage } from 'ai';
209
+
210
+ // UIMessages (useChat) - now use parts array
211
+ const uiMessage: UIMessage = {
212
+ id: '1',
213
+ role: 'user',
214
+ parts: [{ type: 'text', text: 'Bonjour!' }],
215
+ };
216
+ ```
217
+
218
+ #### Data Role Removed
219
+
220
+ The `data` role has been removed from UI messages.
221
+
222
+ ```tsx filename="AI SDK 4.0"
223
+ const message = {
224
+ role: 'data',
225
+ content: 'Some content',
226
+ data: { customField: 'value' },
227
+ };
228
+ ```
229
+
230
+ ```tsx filename="AI SDK 5.0"
231
+ // V5: Use UI message streams with custom data parts
232
+ const stream = createUIMessageStream({
233
+ execute({ writer }) {
234
+ // Write custom data instead of message annotations
235
+ writer.write({
236
+ type: 'data-custom',
237
+ id: 'custom-1',
238
+ data: { customField: 'value' },
239
+ });
240
+ },
241
+ });
242
+ ```
243
+
244
+ #### UIMessage Reasoning Structure
245
+
246
+ The reasoning property on UI messages has been moved to parts.
247
+
248
+ ```tsx filename="AI SDK 4.0"
249
+ const message: Message = {
250
+ role: 'assistant',
251
+ content: 'Hello',
252
+ reasoning: 'I will greet the user',
253
+ };
254
+ ```
255
+
256
+ ```tsx filename="AI SDK 5.0"
257
+ const message: UIMessage = {
258
+ role: 'assistant',
259
+ parts: [
260
+ {
261
+ type: 'reasoning',
262
+ text: 'I will greet the user',
263
+ },
264
+ {
265
+ type: 'text',
266
+ text: 'Hello',
267
+ },
268
+ ],
269
+ };
270
+ ```
271
+
272
+ #### Reasoning Part Property Rename
273
+
274
+ The `reasoning` property on reasoning UI parts has been renamed to `text`.
275
+
276
+ ```tsx filename="AI SDK 4.0"
277
+ {
278
+ message.parts.map((part, index) => {
279
+ if (part.type === 'reasoning') {
280
+ return (
281
+ <div key={index} className="reasoning-display">
282
+ {part.reasoning}
283
+ </div>
284
+ );
285
+ }
286
+ });
287
+ }
288
+ ```
289
+
290
+ ```tsx filename="AI SDK 5.0"
291
+ {
292
+ message.parts.map((part, index) => {
293
+ if (part.type === 'reasoning') {
294
+ return (
295
+ <div key={index} className="reasoning-display">
296
+ {part.text}
297
+ </div>
298
+ );
299
+ }
300
+ });
301
+ }
302
+ ```
303
+
304
+ ### File Part Changes
305
+
306
+ File parts now use `.url` instead of `.data` and `.mimeType`.
307
+
308
+ ```tsx filename="AI SDK 4.0"
309
+ {
310
+ messages.map(message => (
311
+ <div key={message.id}>
312
+ {message.parts.map((part, index) => {
313
+ if (part.type === 'text') {
314
+ return <div key={index}>{part.text}</div>;
315
+ } else if (part.type === 'file' && part.mimeType.startsWith('image/')) {
316
+ return (
317
+ <img
318
+ key={index}
319
+ src={`data:${part.mimeType};base64,${part.data}`}
320
+ />
321
+ );
322
+ }
323
+ })}
324
+ </div>
325
+ ));
326
+ }
327
+ ```
328
+
329
+ ```tsx filename="AI SDK 5.0"
330
+ {
331
+ messages.map(message => (
332
+ <div key={message.id}>
333
+ {message.parts.map((part, index) => {
334
+ if (part.type === 'text') {
335
+ return <div key={index}>{part.text}</div>;
336
+ } else if (
337
+ part.type === 'file' &&
338
+ part.mediaType.startsWith('image/')
339
+ ) {
340
+ return <img key={index} src={part.url} />;
341
+ }
342
+ })}
343
+ </div>
344
+ ));
345
+ }
346
+ ```
347
+
348
+ ### Stream Data Removal
349
+
350
+ The `StreamData` class has been completely removed and replaced with UI message streams for custom data.
351
+
352
+ ```tsx filename="AI SDK 4.0"
353
+ import { StreamData } from 'ai';
354
+
355
+ const streamData = new StreamData();
356
+ streamData.append('custom-data');
357
+ streamData.close();
358
+ ```
359
+
360
+ ```tsx filename="AI SDK 5.0"
361
+ import { createUIMessageStream, createUIMessageStreamResponse } from 'ai';
362
+
363
+ const stream = createUIMessageStream({
364
+ execute({ writer }) {
365
+ // Write custom data parts
366
+ writer.write({
367
+ type: 'data-custom',
368
+ id: 'custom-1',
369
+ data: 'custom-data',
370
+ });
371
+
372
+ // Can merge with LLM streams
373
+ const result = streamText({
374
+ model: __MODEL__,
375
+ messages,
376
+ });
377
+
378
+ writer.merge(result.toUIMessageStream());
379
+ },
380
+ });
381
+
382
+ return createUIMessageStreamResponse({ stream });
383
+ ```
384
+
385
+ ### Custom Data Streaming: writeMessageAnnotation/writeData Removed
386
+
387
+ The `writeMessageAnnotation` and `writeData` methods from `DataStreamWriter` have been removed. Instead, use custom data parts with the new `UIMessage` stream architecture.
388
+
389
+ ```tsx filename="AI SDK 4.0"
390
+ import { createDataStreamResponse, streamText } from 'ai';
391
+
392
+ export async function POST(req: Request) {
393
+ const { messages } = await req.json();
394
+
395
+ return createDataStreamResponse({
396
+ execute: dataStream => {
397
+ // Write general data
398
+ dataStream.writeData('call started');
399
+
400
+ const result = streamText({
401
+ model: __MODEL__,
402
+ messages,
403
+ onChunk() {
404
+ // Write message annotations
405
+ dataStream.writeMessageAnnotation({
406
+ status: 'streaming',
407
+ timestamp: Date.now(),
408
+ });
409
+ },
410
+ onFinish() {
411
+ // Write final annotations
412
+ dataStream.writeMessageAnnotation({
413
+ id: generateId(),
414
+ completed: true,
415
+ });
416
+
417
+ dataStream.writeData('call completed');
418
+ },
419
+ });
420
+
421
+ result.mergeIntoDataStream(dataStream);
422
+ },
423
+ });
424
+ }
425
+ ```
426
+
427
+ ```tsx filename="AI SDK 5.0"
428
+ import {
429
+ createUIMessageStream,
430
+ createUIMessageStreamResponse,
431
+ streamText,
432
+ generateId,
433
+ } from 'ai';
434
+
435
+ export async function POST(req: Request) {
436
+ const { messages } = await req.json();
437
+
438
+ const stream = createUIMessageStream({
439
+ execute: ({ writer }) => {
440
+ const statusId = generateId();
441
+
442
+ // Write general data (transient - not added to message history)
443
+ writer.write({
444
+ type: 'data-status',
445
+ id: statusId,
446
+ data: { status: 'call started' },
447
+ });
448
+
449
+ const result = streamText({
450
+ model: __MODEL__,
451
+ messages,
452
+ onChunk() {
453
+ // Write data parts that update during streaming
454
+ writer.write({
455
+ type: 'data-status',
456
+ id: statusId,
457
+ data: {
458
+ status: 'streaming',
459
+ timestamp: Date.now(),
460
+ },
461
+ });
462
+ },
463
+ onFinish() {
464
+ // Write final data parts
465
+ writer.write({
466
+ type: 'data-status',
467
+ id: statusId,
468
+ data: {
469
+ status: 'completed',
470
+ },
471
+ });
472
+ },
473
+ });
474
+
475
+ writer.merge(result.toUIMessageStream());
476
+ },
477
+ });
478
+
479
+ return createUIMessageStreamResponse({ stream });
480
+ }
481
+ ```
482
+
483
+ <Note>
484
+ For more detailed information about streaming custom data in v5, see the
485
+ [Streaming Data guide](/docs/ai-sdk-ui/streaming-data).
486
+ </Note>
487
+
488
+ ##### Provider Metadata → Provider Options
489
+
490
+ The `providerMetadata` input parameter has been renamed to `providerOptions`. Note that the returned metadata in results is still called `providerMetadata`.
491
+
492
+ ```tsx filename="AI SDK 4.0"
493
+ const result = await generateText({
494
+ model: 'openai/gpt-5',
495
+ prompt: 'Hello',
496
+ providerMetadata: {
497
+ openai: { store: false },
498
+ },
499
+ });
500
+ ```
501
+
502
+ ```tsx filename="AI SDK 5.0"
503
+ const result = await generateText({
504
+ model: 'openai/gpt-5',
505
+ prompt: 'Hello',
506
+ providerOptions: {
507
+ // Input parameter renamed
508
+ openai: { store: false },
509
+ },
510
+ });
511
+
512
+ // Returned metadata still uses providerMetadata:
513
+ console.log(result.providerMetadata?.openai);
514
+ ```
515
+
516
+ #### Tool Definition Changes (parameters → inputSchema)
517
+
518
+ Tool definitions have been updated to use `inputSchema` instead of `parameters` and error classes have been renamed.
519
+
520
+ ```tsx filename="AI SDK 4.0"
521
+ import { tool } from 'ai';
522
+
523
+ const weatherTool = tool({
524
+ description: 'Get the weather for a city',
525
+ parameters: z.object({
526
+ city: z.string(),
527
+ }),
528
+ execute: async ({ city }) => {
529
+ return `Weather in ${city}`;
530
+ },
531
+ });
532
+ ```
533
+
534
+ ```tsx filename="AI SDK 5.0"
535
+ import { tool } from 'ai';
536
+
537
+ const weatherTool = tool({
538
+ description: 'Get the weather for a city',
539
+ inputSchema: z.object({
540
+ city: z.string(),
541
+ }),
542
+ execute: async ({ city }) => {
543
+ return `Weather in ${city}`;
544
+ },
545
+ });
546
+ ```
547
+
548
+ #### Tool Result Content: experimental_toToolResultContent → toModelOutput
549
+
550
+ The `experimental_toToolResultContent` option has been renamed to `toModelOutput` and is no longer experimental.
551
+
552
+ ```tsx filename="AI SDK 4.0"
553
+ const screenshotTool = tool({
554
+ description: 'Take a screenshot',
555
+ parameters: z.object({}),
556
+ execute: async () => {
557
+ const imageData = await takeScreenshot();
558
+ return imageData; // base64 string
559
+ },
560
+ experimental_toToolResultContent: result => [{ type: 'image', data: result }],
561
+ });
562
+ ```
563
+
564
+ ```tsx filename="AI SDK 5.0"
565
+ const screenshotTool = tool({
566
+ description: 'Take a screenshot',
567
+ inputSchema: z.object({}),
568
+ execute: async () => {
569
+ const imageData = await takeScreenshot();
570
+ return imageData;
571
+ },
572
+ toModelOutput: result => ({
573
+ type: 'content',
574
+ value: [{ type: 'media', mediaType: 'image/png', data: result }],
575
+ }),
576
+ });
577
+ ```
578
+
579
+ ### Tool Property Changes (args/result → input/output)
580
+
581
+ Tool call and result properties have been renamed for better consistency with schemas.
582
+
583
+ ```tsx filename="AI SDK 4.0"
584
+ // Tool calls used "args" and "result"
585
+ for await (const part of result.fullStream) {
586
+ switch (part.type) {
587
+ case 'tool-call':
588
+ console.log('Tool args:', part.args);
589
+ break;
590
+ case 'tool-result':
591
+ console.log('Tool result:', part.result);
592
+ break;
593
+ }
594
+ }
595
+ ```
596
+
597
+ ```tsx filename="AI SDK 5.0"
598
+ // Tool calls now use "input" and "output"
599
+ for await (const part of result.fullStream) {
600
+ switch (part.type) {
601
+ case 'tool-call':
602
+ console.log('Tool input:', part.input);
603
+ break;
604
+ case 'tool-result':
605
+ console.log('Tool output:', part.output);
606
+ break;
607
+ }
608
+ }
609
+ ```
610
+
611
+ ### Tool Execution Error Handling
612
+
613
+ The `ToolExecutionError` class has been removed. Tool execution errors now appear as `tool-error` content parts in the result steps, enabling automated LLM roundtrips in multi-step scenarios.
614
+
615
+ ```tsx filename="AI SDK 4.0"
616
+ import { ToolExecutionError } from 'ai';
617
+
618
+ try {
619
+ const result = await generateText({
620
+ // ...
621
+ });
622
+ } catch (error) {
623
+ if (error instanceof ToolExecutionError) {
624
+ console.log('Tool execution failed:', error.message);
625
+ console.log('Tool name:', error.toolName);
626
+ console.log('Tool input:', error.toolInput);
627
+ }
628
+ }
629
+ ```
630
+
631
+ ```tsx filename="AI SDK 5.0"
632
+ // Tool execution errors now appear in result steps
633
+ const { steps } = await generateText({
634
+ // ...
635
+ });
636
+
637
+ // check for tool errors in the steps
638
+ const toolErrors = steps.flatMap(step =>
639
+ step.content.filter(part => part.type === 'tool-error'),
640
+ );
641
+
642
+ toolErrors.forEach(toolError => {
643
+ console.log('Tool error:', toolError.error);
644
+ console.log('Tool name:', toolError.toolName);
645
+ console.log('Tool input:', toolError.input);
646
+ });
647
+ ```
648
+
649
+ For streaming scenarios, tool execution errors appear as `tool-error` parts in the stream, while other errors appear as `error` parts.
650
+
651
+ ### Tool Call Streaming Now Default (toolCallStreaming Removed)
652
+
653
+ The `toolCallStreaming` option has been removed in AI SDK 5.0. Tool call streaming is now always enabled by default.
654
+
655
+ ```tsx filename="AI SDK 4.0"
656
+ const result = streamText({
657
+ model: __MODEL__,
658
+ messages,
659
+ toolCallStreaming: true, // Optional parameter to enable streaming
660
+ tools: {
661
+ weatherTool,
662
+ searchTool,
663
+ },
664
+ });
665
+ ```
666
+
667
+ ```tsx filename="AI SDK 5.0"
668
+ const result = streamText({
669
+ model: __MODEL__,
670
+ messages: convertToModelMessages(messages),
671
+ // toolCallStreaming removed - streaming is always enabled
672
+ tools: {
673
+ weatherTool,
674
+ searchTool,
675
+ },
676
+ });
677
+ ```
678
+
679
+ ### Tool Part Type Changes (UIMessage)
680
+
681
+ In v5, UI tool parts use typed naming: `tool-${toolName}` instead of generic types.
682
+
683
+ ```tsx filename="AI SDK 4.0"
684
+ // Generic tool-invocation type
685
+ {
686
+ message.parts.map(part => {
687
+ if (part.type === 'tool-invocation') {
688
+ return <div>{part.toolInvocation.toolName}</div>;
689
+ }
690
+ });
691
+ }
692
+ ```
693
+
694
+ ```tsx filename="AI SDK 5.0"
695
+ // Type-safe tool parts with specific names
696
+ {
697
+ message.parts.map(part => {
698
+ switch (part.type) {
699
+ case 'tool-getWeatherInformation':
700
+ return <div>Getting weather...</div>;
701
+ case 'tool-askForConfirmation':
702
+ return <div>Asking for confirmation...</div>;
703
+ }
704
+ });
705
+ }
706
+ ```
707
+
708
+ ### Dynamic Tools Support
709
+
710
+ AI SDK 5.0 introduces dynamic tools for handling tools with unknown types at development time, such as MCP tools without schemas or user-defined functions at runtime.
711
+
712
+ #### New dynamicTool Helper
713
+
714
+ The new `dynamicTool` helper function allows you to define tools where the input and output types are not known at compile time.
715
+
716
+ ```tsx filename="AI SDK 5.0"
717
+ import { dynamicTool } from 'ai';
718
+ import { z } from 'zod';
719
+
720
+ // Define a dynamic tool
721
+ const runtimeTool = dynamicTool({
722
+ description: 'A tool defined at runtime',
723
+ inputSchema: z.object({}),
724
+ execute: async input => {
725
+ // Input and output are typed as 'unknown'
726
+ return { result: `Processed: ${input.query}` };
727
+ },
728
+ });
729
+ ```
730
+
731
+ #### MCP Tools Without Schemas
732
+
733
+ MCP tools that don't provide schemas are now automatically treated as dynamic tools:
734
+
735
+ ```tsx filename="AI SDK 5.0"
736
+ import { MCPClient } from 'ai';
737
+
738
+ const client = new MCPClient({
739
+ /* ... */
740
+ });
741
+ const tools = await client.getTools();
742
+
743
+ // Tools without schemas are now 'dynamic' type
744
+ // and won't break type inference when mixed with static tools
745
+ ```
746
+
747
+ #### Type-Safe Handling with Mixed Tools
748
+
749
+ When using both static and dynamic tools together, use the `dynamic` flag for type narrowing:
750
+
751
+ ```tsx filename="AI SDK 5.0"
752
+ const result = await generateText({
753
+ model: __MODEL__,
754
+ tools: {
755
+ // Static tool with known types
756
+ weather: weatherTool,
757
+ // Dynamic tool with unknown types
758
+ customDynamicTool: dynamicTool({
759
+ /* ... */
760
+ }),
761
+ },
762
+ onStepFinish: step => {
763
+ // Handle tool calls with type safety
764
+ for (const toolCall of step.toolCalls) {
765
+ if (toolCall.dynamic) {
766
+ // Dynamic tool: input/output are 'unknown'
767
+ console.log('Dynamic tool called:', toolCall.toolName);
768
+ continue;
769
+ }
770
+
771
+ // Static tools have full type inference
772
+ switch (toolCall.toolName) {
773
+ case 'weather':
774
+ // TypeScript knows the exact types
775
+ console.log(toolCall.input.location); // string
776
+ break;
777
+ }
778
+ }
779
+ },
780
+ });
781
+ ```
782
+
783
+ #### New dynamic-tool UI Part
784
+
785
+ UI messages now include a `dynamic-tool` part type for rendering dynamic tool invocations:
786
+
787
+ ```tsx filename="AI SDK 5.0"
788
+ {
789
+ message.parts.map((part, index) => {
790
+ switch (part.type) {
791
+ // Static tools use specific types
792
+ case 'tool-weather':
793
+ return <div>Weather: {part.input.city}</div>;
794
+
795
+ // Dynamic tools use the generic dynamic-tool type
796
+ case 'dynamic-tool':
797
+ return (
798
+ <div>
799
+ Dynamic tool: {part.toolName}
800
+ <pre>{JSON.stringify(part.input, null, 2)}</pre>
801
+ </div>
802
+ );
803
+ }
804
+ });
805
+ }
806
+ ```
807
+
808
+ #### Breaking Change: Type Narrowing Required for Tool Calls and Results
809
+
810
+ When iterating over `toolCalls` and `toolResults`, you now need to check the `dynamic` flag first for proper type narrowing:
811
+
812
+ ```tsx filename="AI SDK 4.0"
813
+ // Direct type checking worked without dynamic flag
814
+ onStepFinish: step => {
815
+ for (const toolCall of step.toolCalls) {
816
+ switch (toolCall.toolName) {
817
+ case 'weather':
818
+ console.log(toolCall.input.location); // typed as string
819
+ break;
820
+ case 'search':
821
+ console.log(toolCall.input.query); // typed as string
822
+ break;
823
+ }
824
+ }
825
+ };
826
+ ```
827
+
828
+ ```tsx filename="AI SDK 5.0"
829
+ // Must check dynamic flag first for type narrowing
830
+ onStepFinish: step => {
831
+ for (const toolCall of step.toolCalls) {
832
+ // Check if it's a dynamic tool first
833
+ if (toolCall.dynamic) {
834
+ console.log('Dynamic tool:', toolCall.toolName);
835
+ console.log('Input:', toolCall.input); // typed as unknown
836
+ continue;
837
+ }
838
+
839
+ // Now TypeScript knows it's a static tool
840
+ switch (toolCall.toolName) {
841
+ case 'weather':
842
+ console.log(toolCall.input.location); // typed as string
843
+ break;
844
+ case 'search':
845
+ console.log(toolCall.input.query); // typed as string
846
+ break;
847
+ }
848
+ }
849
+ };
850
+ ```
851
+
852
+ ### Tool UI Part State Changes
853
+
854
+ Tool UI parts now use more granular states that better represent the streaming lifecycle and error handling.
855
+
856
+ ```tsx filename="AI SDK 4.0"
857
+ // Old states
858
+ {
859
+ message.parts.map(part => {
860
+ if (part.type === 'tool-invocation') {
861
+ switch (part.toolInvocation.state) {
862
+ case 'partial-call':
863
+ return <div>Loading...</div>;
864
+ case 'call':
865
+ return (
866
+ <div>
867
+ Tool called with {JSON.stringify(part.toolInvocation.args)}
868
+ </div>
869
+ );
870
+ case 'result':
871
+ return <div>Result: {part.toolInvocation.result}</div>;
872
+ }
873
+ }
874
+ });
875
+ }
876
+ ```
877
+
878
+ ```tsx filename="AI SDK 5.0"
879
+ // New granular states
880
+ {
881
+ message.parts.map(part => {
882
+ switch (part.type) {
883
+ case 'tool-getWeatherInformation':
884
+ switch (part.state) {
885
+ case 'input-streaming':
886
+ return <pre>{JSON.stringify(part.input, null, 2)}</pre>;
887
+ case 'input-available':
888
+ return <div>Getting weather for {part.input.city}...</div>;
889
+ case 'output-available':
890
+ return <div>Weather: {part.output}</div>;
891
+ case 'output-error':
892
+ return <div>Error: {part.errorText}</div>;
893
+ }
894
+ }
895
+ });
896
+ }
897
+ ```
898
+
899
+ **State Changes:**
900
+
901
+ - `partial-call` → `input-streaming` (tool input being streamed)
902
+ - `call` → `input-available` (tool input complete, ready to execute)
903
+ - `result` → `output-available` (tool execution successful)
904
+ - New: `output-error` (tool execution failed)
905
+
906
+ #### Rendering Tool Invocations (Catch-All Pattern)
907
+
908
+ In v4, you typically rendered tool invocations using a catch-all `tool-invocation` type. In v5, the **recommended approach is to handle each tool specifically using its typed part name (e.g., `tool-getWeather`)**. However, if you need a catch-all pattern for rendering all tool invocations the same way, you can use the `isToolUIPart` and `getToolName` helper functions as a fallback.
909
+
910
+ ```tsx filename="AI SDK 4.0"
911
+ {
912
+ message.parts.map((part, index) => {
913
+ switch (part.type) {
914
+ case 'text':
915
+ return <div key={index}>{part.text}</div>;
916
+ case 'tool-invocation':
917
+ const { toolInvocation } = part;
918
+ return (
919
+ <details key={`tool-${toolInvocation.toolCallId}`}>
920
+ <summary>
921
+ <span>{toolInvocation.toolName}</span>
922
+ {toolInvocation.state === 'result' ? (
923
+ <span>Click to expand</span>
924
+ ) : (
925
+ <span>calling...</span>
926
+ )}
927
+ </summary>
928
+ {toolInvocation.state === 'result' ? (
929
+ <div>
930
+ <pre>{JSON.stringify(toolInvocation.result, null, 2)}</pre>
931
+ </div>
932
+ ) : null}
933
+ </details>
934
+ );
935
+ }
936
+ });
937
+ }
938
+ ```
939
+
940
+ ```tsx filename="AI SDK 5.0"
941
+ import { isToolUIPart, getToolName } from 'ai';
942
+
943
+ {
944
+ message.parts.map((part, index) => {
945
+ switch (part.type) {
946
+ case 'text':
947
+ return <div key={index}>{part.text}</div>;
948
+ default:
949
+ if (isToolUIPart(part)) {
950
+ const toolInvocation = part;
951
+ return (
952
+ <details key={`tool-${toolInvocation.toolCallId}`}>
953
+ <summary>
954
+ <span>{getToolName(toolInvocation)}</span>
955
+ {toolInvocation.state === 'output-available' ? (
956
+ <span>Click to expand</span>
957
+ ) : (
958
+ <span>calling...</span>
959
+ )}
960
+ </summary>
961
+ {toolInvocation.state === 'output-available' ? (
962
+ <div>
963
+ <pre>{JSON.stringify(toolInvocation.output, null, 2)}</pre>
964
+ </div>
965
+ ) : null}
966
+ </details>
967
+ );
968
+ }
969
+ }
970
+ });
971
+ }
972
+ ```
973
+
974
+ #### Media Type Standardization
975
+
976
+ `mimeType` has been renamed to `mediaType` for consistency. Both image and file types are supported in model messages.
977
+
978
+ ```tsx filename="AI SDK 4.0"
979
+ const result = await generateText({
980
+ model: someModel,
981
+ messages: [
982
+ {
983
+ role: 'user',
984
+ content: [
985
+ { type: 'text', text: 'What do you see?' },
986
+ {
987
+ type: 'image',
988
+ image: new Uint8Array([0, 1, 2, 3]),
989
+ mimeType: 'image/png',
990
+ },
991
+ {
992
+ type: 'file',
993
+ data: contents,
994
+ mimeType: 'application/pdf',
995
+ },
996
+ ],
997
+ },
998
+ ],
999
+ });
1000
+ ```
1001
+
1002
+ ```tsx filename="AI SDK 5.0"
1003
+ const result = await generateText({
1004
+ model: someModel,
1005
+ messages: [
1006
+ {
1007
+ role: 'user',
1008
+ content: [
1009
+ { type: 'text', text: 'What do you see?' },
1010
+ {
1011
+ type: 'image',
1012
+ image: new Uint8Array([0, 1, 2, 3]),
1013
+ mediaType: 'image/png',
1014
+ },
1015
+ {
1016
+ type: 'file',
1017
+ data: contents,
1018
+ mediaType: 'application/pdf',
1019
+ },
1020
+ ],
1021
+ },
1022
+ ],
1023
+ });
1024
+ ```
1025
+
1026
+ ### Reasoning Support
1027
+
1028
+ #### Reasoning Text Property Rename
1029
+
1030
+ The `.reasoning` property has been renamed to `.reasoningText` for multi-step generations.
1031
+
1032
+ ```tsx filename="AI SDK 4.0"
1033
+ for (const step of steps) {
1034
+ console.log(step.reasoning);
1035
+ }
1036
+ ```
1037
+
1038
+ ```tsx filename="AI SDK 5.0"
1039
+ for (const step of steps) {
1040
+ console.log(step.reasoningText);
1041
+ }
1042
+ ```
1043
+
1044
+ #### Generate Text Reasoning Property Changes
1045
+
1046
+ In `generateText()` and `streamText()` results, reasoning properties have been renamed.
1047
+
1048
+ ```tsx filename="AI SDK 4.0"
1049
+ const result = await generateText({
1050
+ model: anthropic('claude-sonnet-4-20250514'),
1051
+ prompt: 'Explain your reasoning',
1052
+ });
1053
+
1054
+ console.log(result.reasoning); // String reasoning text
1055
+ console.log(result.reasoningDetails); // Array of reasoning details
1056
+ ```
1057
+
1058
+ ```tsx filename="AI SDK 5.0"
1059
+ const result = await generateText({
1060
+ model: anthropic('claude-sonnet-4-20250514'),
1061
+ prompt: 'Explain your reasoning',
1062
+ });
1063
+
1064
+ console.log(result.reasoningText); // String reasoning text
1065
+ console.log(result.reasoning); // Array of reasoning details
1066
+ ```
1067
+
1068
+ ### Continuation Steps Removal
1069
+
1070
+ The `experimental_continueSteps` option has been removed from `generateText()`.
1071
+
1072
+ ```tsx filename="AI SDK 4.0"
1073
+ const result = await generateText({
1074
+ experimental_continueSteps: true,
1075
+ // ...
1076
+ });
1077
+ ```
1078
+
1079
+ ```tsx filename="AI SDK 5.0"
1080
+ const result = await generateText({
1081
+ // experimental_continueSteps has been removed
1082
+ // Use newer models with higher output token limits instead
1083
+ // ...
1084
+ });
1085
+ ```
1086
+
1087
+ ### Image Generation Changes
1088
+
1089
+ Image model settings have been moved to `providerOptions`.
1090
+
1091
+ ```tsx filename="AI SDK 4.0"
1092
+ await generateImage({
1093
+ model: luma.image('photon-flash-1', {
1094
+ maxImagesPerCall: 5,
1095
+ pollIntervalMillis: 500,
1096
+ }),
1097
+ prompt,
1098
+ n: 10,
1099
+ });
1100
+ ```
1101
+
1102
+ ```tsx filename="AI SDK 5.0"
1103
+ await generateImage({
1104
+ model: luma.image('photon-flash-1'),
1105
+ prompt,
1106
+ n: 10,
1107
+ maxImagesPerCall: 5,
1108
+ providerOptions: {
1109
+ luma: { pollIntervalMillis: 500 },
1110
+ },
1111
+ });
1112
+ ```
1113
+
1114
+ ### Step Result Changes
1115
+
1116
+ #### Step Type Removal
1117
+
1118
+ The `stepType` property has been removed from step results.
1119
+
1120
+ ```tsx filename="AI SDK 4.0"
1121
+ steps.forEach(step => {
1122
+ switch (step.stepType) {
1123
+ case 'initial':
1124
+ console.log('Initial step');
1125
+ break;
1126
+ case 'tool-result':
1127
+ console.log('Tool result step');
1128
+ break;
1129
+ case 'done':
1130
+ console.log('Final step');
1131
+ break;
1132
+ }
1133
+ });
1134
+ ```
1135
+
1136
+ ```tsx filename="AI SDK 5.0"
1137
+ steps.forEach((step, index) => {
1138
+ if (index === 0) {
1139
+ console.log('Initial step');
1140
+ } else if (step.toolResults.length > 0) {
1141
+ console.log('Tool result step');
1142
+ } else {
1143
+ console.log('Final step');
1144
+ }
1145
+ });
1146
+ ```
1147
+
1148
+ ### Step Control: maxSteps → stopWhen
1149
+
1150
+ For core functions like `generateText` and `streamText`, the `maxSteps` parameter has been replaced with `stopWhen`, which provides more flexible control over multi-step execution. The `stopWhen` parameter defines conditions for stopping the generation **when the last step contains tool results**. When multiple conditions are provided as an array, the generation stops if any condition is met.
1151
+
1152
+ ```tsx filename="AI SDK 4.0"
1153
+ // V4: Simple numeric limit
1154
+ const result = await generateText({
1155
+ model: __MODEL__,
1156
+ messages,
1157
+ maxSteps: 5, // Stop after a maximum of 5 steps
1158
+ });
1159
+
1160
+ // useChat with maxSteps
1161
+ const { messages } = useChat({
1162
+ maxSteps: 3, // Stop after a maximum of 3 steps
1163
+ });
1164
+ ```
1165
+
1166
+ ```tsx filename="AI SDK 5.0"
1167
+ import { stepCountIs, hasToolCall } from 'ai';
1168
+
1169
+ // V5: Server-side - flexible stopping conditions with stopWhen
1170
+ const result = await generateText({
1171
+ model: __MODEL__,
1172
+ messages,
1173
+ // Only triggers when last step has tool results
1174
+ stopWhen: stepCountIs(5), // Stop at step 5 if tools were called
1175
+ });
1176
+
1177
+ // Server-side - stop when specific tool is called
1178
+ const result = await generateText({
1179
+ model: __MODEL__,
1180
+ messages,
1181
+ stopWhen: hasToolCall('finalizeTask'), // Stop when finalizeTask tool is called
1182
+ });
1183
+ ```
1184
+
1185
+ **Common stopping patterns:**
1186
+
1187
+ ```tsx filename="AI SDK 5.0"
1188
+ // Stop after N steps (equivalent to old maxSteps)
1189
+ // Note: Only applies when the last step has tool results
1190
+ stopWhen: stepCountIs(5);
1191
+
1192
+ // Stop when specific tool is called
1193
+ stopWhen: hasToolCall('finalizeTask');
1194
+
1195
+ // Multiple conditions (stops if ANY condition is met)
1196
+ stopWhen: [
1197
+ stepCountIs(10), // Maximum 10 steps
1198
+ hasToolCall('submitOrder'), // Or when order is submitted
1199
+ ];
1200
+
1201
+ // Custom condition based on step content
1202
+ stopWhen: ({ steps }) => {
1203
+ const lastStep = steps[steps.length - 1];
1204
+ // Custom logic - only triggers if last step has tool results
1205
+ return lastStep?.text?.includes('COMPLETE');
1206
+ };
1207
+ ```
1208
+
1209
+ **Important:** The `stopWhen` conditions are only evaluated when the last step contains tool results.
1210
+
1211
+ #### Usage vs Total Usage
1212
+
1213
+ Usage properties now distinguish between single step and total usage.
1214
+
1215
+ ```tsx filename="AI SDK 4.0"
1216
+ // usage contained total token usage across all steps
1217
+ console.log(result.usage);
1218
+ ```
1219
+
1220
+ ```tsx filename="AI SDK 5.0"
1221
+ // usage contains token usage from the final step only
1222
+ console.log(result.usage);
1223
+ // totalUsage contains total token usage across all steps
1224
+ console.log(result.totalUsage);
1225
+ ```
1226
+
1227
+ ## AI SDK UI Changes
1228
+
1229
+ ### Package Structure Changes
1230
+
1231
+ ### `@ai-sdk/rsc` Package Extraction
1232
+
1233
+ The `ai/rsc` export has been extracted to a separate package `@ai-sdk/rsc`.
1234
+
1235
+ ```tsx filename="AI SDK 4.0"
1236
+ import { createStreamableValue } from 'ai/rsc';
1237
+ ```
1238
+
1239
+ ```tsx filename="AI SDK 5.0"
1240
+ import { createStreamableValue } from '@ai-sdk/rsc';
1241
+ ```
1242
+
1243
+ <Note>Don't forget to install the new package: `npm install @ai-sdk/rsc`</Note>
1244
+
1245
+ ### React UI Hooks Moved to `@ai-sdk/react`
1246
+
1247
+ The deprecated `ai/react` export has been removed in favor of `@ai-sdk/react`.
1248
+
1249
+ ```tsx filename="AI SDK 4.0"
1250
+ import { useChat } from 'ai/react';
1251
+ ```
1252
+
1253
+ ```tsx filename="AI SDK 5.0"
1254
+ import { useChat } from '@ai-sdk/react';
1255
+ ```
1256
+
1257
+ <Note>
1258
+ Don't forget to install the new package: `npm install @ai-sdk/react`
1259
+ </Note>
1260
+
1261
+ ### useChat Changes
1262
+
1263
+ The `useChat` hook has undergone significant changes in v5, with new transport architecture, removal of managed input state, and more.
1264
+
1265
+ #### maxSteps Removal
1266
+
1267
+ The `maxSteps` parameter has been removed from `useChat`. You should now use server-side `stopWhen` conditions for multi-step tool execution control, and manually submit tool results and trigger new messages for client-side tool calls.
1268
+
1269
+ ```tsx filename="AI SDK 4.0"
1270
+ const { messages, sendMessage } = useChat({
1271
+ maxSteps: 5, // Automatic tool result submission
1272
+ });
1273
+ ```
1274
+
1275
+ ```tsx filename="AI SDK 5.0"
1276
+ // Server-side: Use stopWhen for multi-step control
1277
+ import { streamText, convertToModelMessages, stepCountIs } from 'ai';
1278
+ __PROVIDER_IMPORT__;
1279
+
1280
+ const result = await streamText({
1281
+ model: __MODEL__,
1282
+ messages: convertToModelMessages(messages),
1283
+ stopWhen: stepCountIs(5), // Stop after 5 steps with tool calls
1284
+ });
1285
+
1286
+ // Client-side: Configure automatic submission
1287
+ import { useChat } from '@ai-sdk/react';
1288
+ import {
1289
+ DefaultChatTransport,
1290
+ lastAssistantMessageIsCompleteWithToolCalls,
1291
+ } from 'ai';
1292
+
1293
+ const { messages, sendMessage, addToolOutput } = useChat({
1294
+ // Automatically submit when all tool results are available
1295
+ sendAutomaticallyWhen: lastAssistantMessageIsCompleteWithToolCalls,
1296
+
1297
+ async onToolCall({ toolCall }) {
1298
+ const result = await executeToolCall(toolCall);
1299
+
1300
+ // Important: Don't await addToolOutput inside onToolCall to avoid deadlocks
1301
+ addToolOutput({
1302
+ tool: toolCall.toolName,
1303
+ toolCallId: toolCall.toolCallId,
1304
+ output: result,
1305
+ });
1306
+ },
1307
+ });
1308
+ ```
1309
+
1310
+ <Note>
1311
+ Important: When using `sendAutomaticallyWhen`, don't use `await` with
1312
+ `addToolOutput` inside `onToolCall` as it can cause deadlocks. The `await` is
1313
+ useful when you're not using automatic submission and need to ensure the
1314
+ messages are updated before manually calling `sendMessage()`.
1315
+ </Note>
1316
+
1317
+ This change provides more flexibility for handling tool calls and aligns client behavior with server-side multi-step execution patterns.
1318
+
1319
+ For more details on the new tool submission approach, see the [Tool Result Submission Changes](#tool-result-submission-changes) section below.
1320
+
1321
+ #### Initial Messages Renamed
1322
+
1323
+ The `initialMessages` option has been renamed to `messages`.
1324
+
1325
+ ```tsx filename="AI SDK 4.0"
1326
+ import { useChat, type Message } from '@ai-sdk/react';
1327
+
1328
+ function ChatComponent({ initialMessages }: { initialMessages: Message[] }) {
1329
+ const { messages } = useChat({
1330
+ initialMessages: initialMessages,
1331
+ // ...
1332
+ });
1333
+
1334
+ // your component
1335
+ }
1336
+ ```
1337
+
1338
+ ```tsx filename="AI SDK 5.0"
1339
+ import { useChat, type UIMessage } from '@ai-sdk/react';
1340
+
1341
+ function ChatComponent({ initialMessages }: { initialMessages: UIMessage[] }) {
1342
+ const { messages } = useChat({
1343
+ messages: initialMessages,
1344
+ // ...
1345
+ });
1346
+
1347
+ // your component
1348
+ }
1349
+ ```
1350
+
1351
+ #### Sharing Chat Instances
1352
+
1353
+ In v4, you could share chat state between components by using the same `id` parameter in multiple `useChat` hooks.
1354
+
1355
+ ```tsx filename="AI SDK 4.0"
1356
+ // Component A
1357
+ const { messages } = useChat({
1358
+ id: 'shared-chat',
1359
+ api: '/api/chat',
1360
+ });
1361
+
1362
+ // Component B - would share the same chat state
1363
+ const { messages } = useChat({
1364
+ id: 'shared-chat',
1365
+ api: '/api/chat',
1366
+ });
1367
+ ```
1368
+
1369
+ In v5, you need to explicitly share chat instances by passing a shared `Chat` instance.
1370
+
1371
+ ```tsx filename="AI SDK 5.0"
1372
+ // e.g. Store Chat instance in React Context and create a custom hook
1373
+
1374
+ // Component A
1375
+ const { chat } = useSharedChat(); // Custom hook that accesses shared Chat from context
1376
+
1377
+ const { messages, sendMessage } = useChat({
1378
+ chat, // Pass the shared chat instance
1379
+ });
1380
+
1381
+ // Component B - shares the same chat instance
1382
+ const { chat } = useSharedChat(); // Same hook to access shared Chat from context
1383
+
1384
+ const { messages } = useChat({
1385
+ chat, // Same shared chat instance
1386
+ });
1387
+ ```
1388
+
1389
+ For a complete example of sharing chat state across components, see the [Share Chat State Across Components](/docs/cookbook/use-shared-chat-context) recipe.
1390
+
1391
+ #### Chat Transport Architecture
1392
+
1393
+ Configuration is now handled through transport objects instead of direct API options.
1394
+
1395
+ ```tsx filename="AI SDK 4.0"
1396
+ import { useChat } from '@ai-sdk/react';
1397
+
1398
+ const { messages } = useChat({
1399
+ api: '/api/chat',
1400
+ credentials: 'include',
1401
+ headers: { 'Custom-Header': 'value' },
1402
+ });
1403
+ ```
1404
+
1405
+ ```tsx filename="AI SDK 5.0"
1406
+ import { useChat } from '@ai-sdk/react';
1407
+ import { DefaultChatTransport } from 'ai';
1408
+
1409
+ const { messages } = useChat({
1410
+ transport: new DefaultChatTransport({
1411
+ api: '/api/chat',
1412
+ credentials: 'include',
1413
+ headers: { 'Custom-Header': 'value' },
1414
+ }),
1415
+ });
1416
+ ```
1417
+
1418
+ #### Removed Managed Input State
1419
+
1420
+ The `useChat` hook no longer manages input state internally. You must now manage input state manually.
1421
+
1422
+ ```tsx filename="AI SDK 4.0"
1423
+ import { useChat } from '@ai-sdk/react';
1424
+
1425
+ export default function Page() {
1426
+ const { messages, input, handleInputChange, handleSubmit } = useChat({
1427
+ api: '/api/chat',
1428
+ });
1429
+
1430
+ return (
1431
+ <form onSubmit={handleSubmit}>
1432
+ <input value={input} onChange={handleInputChange} />
1433
+ <button type="submit">Send</button>
1434
+ </form>
1435
+ );
1436
+ }
1437
+ ```
1438
+
1439
+ ```tsx filename="AI SDK 5.0"
1440
+ import { useChat } from '@ai-sdk/react';
1441
+ import { DefaultChatTransport } from 'ai';
1442
+ import { useState } from 'react';
1443
+
1444
+ export default function Page() {
1445
+ const [input, setInput] = useState('');
1446
+ const { messages, sendMessage } = useChat({
1447
+ transport: new DefaultChatTransport({ api: '/api/chat' }),
1448
+ });
1449
+
1450
+ const handleSubmit = e => {
1451
+ e.preventDefault();
1452
+ sendMessage({ text: input });
1453
+ setInput('');
1454
+ };
1455
+
1456
+ return (
1457
+ <form onSubmit={handleSubmit}>
1458
+ <input value={input} onChange={e => setInput(e.target.value)} />
1459
+ <button type="submit">Send</button>
1460
+ </form>
1461
+ );
1462
+ }
1463
+ ```
1464
+
1465
+ #### Message Sending: `append` → `sendMessage`
1466
+
1467
+ The `append` function has been replaced with `sendMessage` and requires structured message format.
1468
+
1469
+ ```tsx filename="AI SDK 4.0"
1470
+ const { append } = useChat();
1471
+
1472
+ // Simple text message
1473
+ append({ role: 'user', content: 'Hello' });
1474
+
1475
+ // With custom body
1476
+ append(
1477
+ {
1478
+ role: 'user',
1479
+ content: 'Hello',
1480
+ },
1481
+ { body: { imageUrl: 'https://...' } },
1482
+ );
1483
+ ```
1484
+
1485
+ ```tsx filename="AI SDK 5.0"
1486
+ const { sendMessage } = useChat();
1487
+
1488
+ // Simple text message (most common usage)
1489
+ sendMessage({ text: 'Hello' });
1490
+
1491
+ // Or with explicit parts array
1492
+ sendMessage({
1493
+ parts: [{ type: 'text', text: 'Hello' }],
1494
+ });
1495
+
1496
+ // With custom body (via request options)
1497
+ sendMessage(
1498
+ { role: 'user', parts: [{ type: 'text', text: 'Hello' }] },
1499
+ { body: { imageUrl: 'https://...' } },
1500
+ );
1501
+ ```
1502
+
1503
+ #### Message Regeneration: `reload` → `regenerate`
1504
+
1505
+ The `reload` function has been renamed to `regenerate` with enhanced functionality.
1506
+
1507
+ ```tsx filename="AI SDK 4.0"
1508
+ const { reload } = useChat();
1509
+
1510
+ // Regenerate last message
1511
+ reload();
1512
+ ```
1513
+
1514
+ ```tsx filename="AI SDK 5.0"
1515
+ const { regenerate } = useChat();
1516
+
1517
+ // Regenerate last message
1518
+ regenerate();
1519
+
1520
+ // Regenerate specific message
1521
+ regenerate({ messageId: 'message-123' });
1522
+ ```
1523
+
1524
+ #### onResponse Removal
1525
+
1526
+ The `onResponse` callback has been removed from `useChat` and `useCompletion`.
1527
+
1528
+ ```tsx filename="AI SDK 4.0"
1529
+ const { messages } = useChat({
1530
+ onResponse(response) {
1531
+ // handle response
1532
+ },
1533
+ });
1534
+ ```
1535
+
1536
+ ```tsx filename="AI SDK 5.0"
1537
+ const { messages } = useChat({
1538
+ // onResponse is no longer available
1539
+ });
1540
+ ```
1541
+
1542
+ #### Send Extra Message Fields Default
1543
+
1544
+ The `sendExtraMessageFields` option has been removed and is now the default behavior.
1545
+
1546
+ ```tsx filename="AI SDK 4.0"
1547
+ const { messages } = useChat({
1548
+ sendExtraMessageFields: true,
1549
+ });
1550
+ ```
1551
+
1552
+ ```tsx filename="AI SDK 5.0"
1553
+ const { messages } = useChat({
1554
+ // sendExtraMessageFields is now the default
1555
+ });
1556
+ ```
1557
+
1558
+ #### Keep Last Message on Error Removal
1559
+
1560
+ The `keepLastMessageOnError` option has been removed as it's no longer needed.
1561
+
1562
+ ```tsx filename="AI SDK 4.0"
1563
+ const { messages } = useChat({
1564
+ keepLastMessageOnError: true,
1565
+ });
1566
+ ```
1567
+
1568
+ ```tsx filename="AI SDK 5.0"
1569
+ const { messages } = useChat({
1570
+ // keepLastMessageOnError is no longer needed
1571
+ });
1572
+ ```
1573
+
1574
+ #### Chat Request Options Changes
1575
+
1576
+ The `data` and `allowEmptySubmit` options have been removed from `ChatRequestOptions`.
1577
+
1578
+ ```tsx filename="AI SDK 4.0"
1579
+ handleSubmit(e, {
1580
+ data: { imageUrl: 'https://...' },
1581
+ body: { custom: 'value' },
1582
+ allowEmptySubmit: true,
1583
+ });
1584
+ ```
1585
+
1586
+ ```tsx filename="AI SDK 5.0"
1587
+ sendMessage(
1588
+ {
1589
+ /* yourMessage */
1590
+ },
1591
+ {
1592
+ body: {
1593
+ custom: 'value',
1594
+ imageUrl: 'https://...', // Move data to body
1595
+ },
1596
+ },
1597
+ );
1598
+ ```
1599
+
1600
+ #### Request Options Type Rename
1601
+
1602
+ `RequestOptions` has been renamed to `CompletionRequestOptions`.
1603
+
1604
+ ```tsx filename="AI SDK 4.0"
1605
+ import type { RequestOptions } from 'ai';
1606
+ ```
1607
+
1608
+ ```tsx filename="AI SDK 5.0"
1609
+ import type { CompletionRequestOptions } from 'ai';
1610
+ ```
1611
+
1612
+ #### addToolResult Renamed to addToolOutput
1613
+
1614
+ The `addToolResult` method has been renamed to `addToolOutput`. Additionally, the `result` parameter has been renamed to `output` for consistency with other tool-related APIs.
1615
+
1616
+ ```tsx filename="AI SDK 4.0"
1617
+ const { addToolResult } = useChat();
1618
+
1619
+ // Add tool result with 'result' parameter
1620
+ addToolResult({
1621
+ toolCallId: 'tool-call-123',
1622
+ result: 'Weather: 72°F, sunny',
1623
+ });
1624
+ ```
1625
+
1626
+ ```tsx filename="AI SDK 5.0"
1627
+ const { addToolOutput } = useChat();
1628
+
1629
+ // Add tool output with 'output' parameter and 'tool' name for type safety
1630
+ addToolOutput({
1631
+ tool: 'getWeather',
1632
+ toolCallId: 'tool-call-123',
1633
+ output: 'Weather: 72°F, sunny',
1634
+ });
1635
+ ```
1636
+
1637
+ <Note>
1638
+ `addToolResult` is still available but deprecated. It will be removed in
1639
+ version 6.
1640
+ </Note>
1641
+
1642
+ #### Tool Result Submission Changes
1643
+
1644
+ The automatic tool result submission behavior has been updated in `useChat` and the `Chat` component. You now have more control and flexibility over when tool results are submitted.
1645
+
1646
+ - `onToolCall` no longer supports returning values to automatically submit tool results
1647
+ - You must explicitly call `addToolOutput` to provide tool results
1648
+ - Use `sendAutomaticallyWhen` with `lastAssistantMessageIsCompleteWithToolCalls` helper for automatic submission
1649
+ - Important: Don't use `await` with `addToolOutput` inside `onToolCall` to avoid deadlocks
1650
+ - The `maxSteps` parameter has been removed from the `Chat` component and `useChat` hook
1651
+ - For multi-step tool execution, use server-side `stopWhen` conditions instead (see [maxSteps Removal](#maxsteps-removal))
1652
+
1653
+ ```tsx filename="AI SDK 4.0"
1654
+ const { messages, sendMessage, addToolResult } = useChat({
1655
+ maxSteps: 5, // Removed in v5
1656
+
1657
+ // Automatic submission by returning a value
1658
+ async onToolCall({ toolCall }) {
1659
+ if (toolCall.toolName === 'getLocation') {
1660
+ const cities = ['New York', 'Los Angeles', 'Chicago', 'San Francisco'];
1661
+ return cities[Math.floor(Math.random() * cities.length)];
1662
+ }
1663
+ },
1664
+ });
1665
+ ```
1666
+
1667
+ ```tsx filename="AI SDK 5.0"
1668
+ import { useChat } from '@ai-sdk/react';
1669
+ import {
1670
+ DefaultChatTransport,
1671
+ lastAssistantMessageIsCompleteWithToolCalls,
1672
+ } from 'ai';
1673
+
1674
+ const { messages, sendMessage, addToolOutput } = useChat({
1675
+ // Automatic submission with helper
1676
+ sendAutomaticallyWhen: lastAssistantMessageIsCompleteWithToolCalls,
1677
+
1678
+ async onToolCall({ toolCall }) {
1679
+ if (toolCall.toolName === 'getLocation') {
1680
+ const cities = ['New York', 'Los Angeles', 'Chicago', 'San Francisco'];
1681
+
1682
+ // Important: Don't await inside onToolCall to avoid deadlocks
1683
+ addToolOutput({
1684
+ tool: 'getLocation',
1685
+ toolCallId: toolCall.toolCallId,
1686
+ output: cities[Math.floor(Math.random() * cities.length)],
1687
+ });
1688
+ }
1689
+ },
1690
+ });
1691
+ ```
1692
+
1693
+ #### Loading State Changes
1694
+
1695
+ The deprecated `isLoading` helper has been removed in favor of `status`.
1696
+
1697
+ ```tsx filename="AI SDK 4.0"
1698
+ const { isLoading } = useChat();
1699
+ ```
1700
+
1701
+ ```tsx filename="AI SDK 5.0"
1702
+ const { status } = useChat();
1703
+ // Use state instead of isLoading for more granular control
1704
+ ```
1705
+
1706
+ #### Resume Stream Support
1707
+
1708
+ The resume functionality has been moved from `experimental_resume` to `resumeStream`.
1709
+
1710
+ ```tsx filename="AI SDK 4.0"
1711
+ // Resume was experimental
1712
+ const { messages } = useChat({
1713
+ experimental_resume: true,
1714
+ });
1715
+ ```
1716
+
1717
+ ```tsx filename="AI SDK 5.0"
1718
+ const { messages } = useChat({
1719
+ resumeStream: true, // Resume interrupted streams
1720
+ });
1721
+ ```
1722
+
1723
+ #### Dynamic Body Values
1724
+
1725
+ In v4, the `body` option in useChat configuration would dynamically update with component state changes. In v5, the `body` value is only captured at the first render and remains static throughout the component lifecycle.
1726
+
1727
+ ```tsx filename="AI SDK 4.0"
1728
+ const [temperature, setTemperature] = useState(0.7);
1729
+
1730
+ const { messages } = useChat({
1731
+ api: '/api/chat',
1732
+ body: {
1733
+ temperature, // This would update dynamically in v4
1734
+ },
1735
+ });
1736
+ ```
1737
+
1738
+ ```tsx filename="AI SDK 5.0"
1739
+ const [temperature, setTemperature] = useState(0.7);
1740
+
1741
+ // Option 1: Use request-level configuration (Recommended)
1742
+ const { messages, sendMessage } = useChat({
1743
+ transport: new DefaultChatTransport({ api: '/api/chat' }),
1744
+ });
1745
+
1746
+ // Pass dynamic values at request time
1747
+ sendMessage(
1748
+ { text: input },
1749
+ {
1750
+ body: {
1751
+ temperature, // Current temperature value at request time
1752
+ },
1753
+ },
1754
+ );
1755
+
1756
+ // Option 2: Use function configuration with useRef
1757
+ const temperatureRef = useRef(temperature);
1758
+ temperatureRef.current = temperature;
1759
+
1760
+ const { messages } = useChat({
1761
+ transport: new DefaultChatTransport({
1762
+ api: '/api/chat',
1763
+ body: () => ({
1764
+ temperature: temperatureRef.current,
1765
+ }),
1766
+ }),
1767
+ });
1768
+ ```
1769
+
1770
+ For more details on request configuration, see the [Chatbot guide](/docs/ai-sdk-ui/chatbot#request-configuration).
1771
+
1772
+ #### Usage Information
1773
+
1774
+ In v4, usage information was directly accessible through the `onFinish` callback's options parameter. In v5, usage data is attached as metadata to individual messages using the `messageMetadata` function in `toUIMessageStreamResponse`.
1775
+
1776
+ ```tsx filename="AI SDK 4.0"
1777
+ const { messages } = useChat({
1778
+ onFinish(message, options) {
1779
+ const usage = options.usage;
1780
+ console.log('Usage:', usage);
1781
+ },
1782
+ });
1783
+ ```
1784
+
1785
+ ```tsx filename="AI SDK 5.0"
1786
+ import {
1787
+ convertToModelMessages,
1788
+ streamText,
1789
+ UIMessage,
1790
+ type LanguageModelUsage,
1791
+ } from 'ai';
1792
+ __PROVIDER_IMPORT__;
1793
+
1794
+ // Create a new metadata type (optional for type-safety)
1795
+ type MyMetadata = {
1796
+ totalUsage: LanguageModelUsage;
1797
+ };
1798
+
1799
+ // Create a new custom message type with your own metadata
1800
+ export type MyUIMessage = UIMessage<MyMetadata>;
1801
+
1802
+ export async function POST(req: Request) {
1803
+ const { messages }: { messages: MyUIMessage[] } = await req.json();
1804
+
1805
+ const result = streamText({
1806
+ model: __MODEL__,
1807
+ messages: convertToModelMessages(messages),
1808
+ });
1809
+
1810
+ return result.toUIMessageStreamResponse({
1811
+ originalMessages: messages,
1812
+ messageMetadata: ({ part }) => {
1813
+ // Send total usage when generation is finished
1814
+ if (part.type === 'finish') {
1815
+ return { totalUsage: part.totalUsage };
1816
+ }
1817
+ },
1818
+ });
1819
+ }
1820
+ ```
1821
+
1822
+ Then, on the client, you can access the message-level metadata.
1823
+
1824
+ ```tsx filename="AI SDK 5.0 - Client"
1825
+ 'use client';
1826
+
1827
+ import { useChat } from '@ai-sdk/react';
1828
+ import type { MyUIMessage } from './api/chat/route';
1829
+ import { DefaultChatTransport } from 'ai';
1830
+
1831
+ export default function Chat() {
1832
+ // Use custom message type defined on the server (optional for type-safety)
1833
+ const { messages } = useChat<MyUIMessage>({
1834
+ transport: new DefaultChatTransport({
1835
+ api: '/api/chat',
1836
+ }),
1837
+ });
1838
+
1839
+ return (
1840
+ <div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
1841
+ {messages.map(m => (
1842
+ <div key={m.id} className="whitespace-pre-wrap">
1843
+ {m.role === 'user' ? 'User: ' : 'AI: '}
1844
+ {m.parts.map(part => {
1845
+ if (part.type === 'text') {
1846
+ return part.text;
1847
+ }
1848
+ })}
1849
+ {/* Render usage via metadata */}
1850
+ {m.metadata?.totalUsage && (
1851
+ <div>Total usage: {m.metadata?.totalUsage.totalTokens} tokens</div>
1852
+ )}
1853
+ </div>
1854
+ ))}
1855
+ </div>
1856
+ );
1857
+ }
1858
+ ```
1859
+
1860
+ You can also access your metadata from the `onFinish` callback of `useChat`:
1861
+
1862
+ ```tsx filename="AI SDK 5.0 - onFinish"
1863
+ 'use client';
1864
+
1865
+ import { useChat } from '@ai-sdk/react';
1866
+ import type { MyUIMessage } from './api/chat/route';
1867
+ import { DefaultChatTransport } from 'ai';
1868
+
1869
+ export default function Chat() {
1870
+ // Use custom message type defined on the server (optional for type-safety)
1871
+ const { messages } = useChat<MyUIMessage>({
1872
+ transport: new DefaultChatTransport({
1873
+ api: '/api/chat',
1874
+ }),
1875
+ onFinish: ({ message }) => {
1876
+ // Access message metadata via onFinish callback
1877
+ console.log(message.metadata?.totalUsage);
1878
+ },
1879
+ });
1880
+ }
1881
+ ```
1882
+
1883
+ #### Request Body Preparation: experimental_prepareRequestBody → prepareSendMessagesRequest
1884
+
1885
+ The `experimental_prepareRequestBody` option has been replaced with `prepareSendMessagesRequest` in the transport configuration.
1886
+
1887
+ ```tsx filename="AI SDK 4.0"
1888
+ import { useChat } from '@ai-sdk/react';
1889
+
1890
+ const { messages } = useChat({
1891
+ api: '/api/chat',
1892
+ // Only send the last message to the server:
1893
+ experimental_prepareRequestBody({ messages, id }) {
1894
+ return { message: messages[messages.length - 1], id };
1895
+ },
1896
+ });
1897
+ ```
1898
+
1899
+ ```tsx filename="AI SDK 5.0"
1900
+ import { useChat } from '@ai-sdk/react';
1901
+ import { DefaultChatTransport } from 'ai';
1902
+
1903
+ const { messages } = useChat({
1904
+ transport: new DefaultChatTransport({
1905
+ api: '/api/chat',
1906
+ // Only send the last message to the server:
1907
+ prepareSendMessagesRequest({ messages, id }) {
1908
+ return { body: { message: messages[messages.length - 1], id } };
1909
+ },
1910
+ }),
1911
+ });
1912
+ ```
1913
+
1914
+ ### `@ai-sdk/vue` Changes
1915
+
1916
+ The Vue.js integration has been completely restructured, replacing the `useChat` composable with a `Chat` class.
1917
+
1918
+ #### useChat Replaced with Chat Class
1919
+
1920
+ ```typescript filename="@ai-sdk/vue v1"
1921
+ <script setup>
1922
+ import { useChat } from '@ai-sdk/vue';
1923
+
1924
+ const { messages, input, handleSubmit } = useChat({
1925
+ api: '/api/chat',
1926
+ });
1927
+ </script>
1928
+ ```
1929
+
1930
+ ```typescript filename="@ai-sdk/vue v2"
1931
+ <script setup>
1932
+ import { Chat } from '@ai-sdk/vue';
1933
+ import { DefaultChatTransport } from 'ai';
1934
+ import { ref } from 'vue';
1935
+
1936
+ const input = ref('');
1937
+ const chat = new Chat({
1938
+ transport: new DefaultChatTransport({ api: '/api/chat' }),
1939
+ });
1940
+
1941
+ const handleSubmit = (e: Event) => {
1942
+ e.preventDefault();
1943
+ chat.sendMessage({ text: input.value });
1944
+ input.value = '';
1945
+ };
1946
+ </script>
1947
+ ```
1948
+
1949
+ #### Message Structure Changes
1950
+
1951
+ Messages now use a `parts` array instead of a `content` string.
1952
+
1953
+ ```typescript filename="@ai-sdk/vue v1"
1954
+ <template>
1955
+ <div v-for="message in messages" :key="message.id">
1956
+ <div>{{ message.role }}: {{ message.content }}</div>
1957
+ </div>
1958
+ </template>
1959
+ ```
1960
+
1961
+ ```typescript filename="@ai-sdk/vue v2"
1962
+ <template>
1963
+ <div v-for="message in chat.messages" :key="message.id">
1964
+ <div>{{ message.role }}:</div>
1965
+ <div v-for="part in message.parts" :key="part.type">
1966
+ <span v-if="part.type === 'text'">{{ part.text }}</span>
1967
+ </div>
1968
+ </div>
1969
+ </template>
1970
+ ```
1971
+
1972
+ ### `@ai-sdk/svelte` Changes
1973
+
1974
+ The Svelte integration has also been updated with new constructor patterns and readonly properties.
1975
+
1976
+ #### Constructor API Changes
1977
+
1978
+ ```js filename="@ai-sdk/svelte v1"
1979
+ import { Chat } from '@ai-sdk/svelte';
1980
+
1981
+ const chatInstance = Chat({
1982
+ api: '/api/chat',
1983
+ });
1984
+ ```
1985
+
1986
+ ```js filename="@ai-sdk/svelte v2"
1987
+ import { Chat } from '@ai-sdk/svelte';
1988
+ import { DefaultChatTransport } from 'ai';
1989
+
1990
+ const chatInstance = Chat(() => ({
1991
+ transport: new DefaultChatTransport({ api: '/api/chat' }),
1992
+ }));
1993
+ ```
1994
+
1995
+ ##### Properties Made Readonly
1996
+
1997
+ Properties are now readonly and must be updated using setter methods.
1998
+
1999
+ ```js filename="@ai-sdk/svelte v1"
2000
+ // Direct property mutation was allowed
2001
+ chatInstance.messages = [...chatInstance.messages, newMessage];
2002
+ ```
2003
+
2004
+ ```js filename="@ai-sdk/svelte v2"
2005
+ // Must use setter methods
2006
+ chatInstance.setMessages([...chatInstance.messages, newMessage]);
2007
+ ```
2008
+
2009
+ ##### Removed Managed Input
2010
+
2011
+ Like React and Vue, input management has been removed from the Svelte integration.
2012
+
2013
+ ```js filename="@ai-sdk/svelte v1"
2014
+ // Input was managed internally
2015
+ const { messages, input, handleSubmit } = chatInstance;
2016
+ ```
2017
+
2018
+ ```js filename="@ai-sdk/svelte v2"
2019
+ // Must manage input state manually
2020
+ let input = '';
2021
+ const { messages, sendMessage } = chatInstance;
2022
+
2023
+ const handleSubmit = () => {
2024
+ sendMessage({ text: input });
2025
+ input = '';
2026
+ };
2027
+ ```
2028
+
2029
+ #### `@ai-sdk/ui-utils` Package Removal
2030
+
2031
+ The `@ai-sdk/ui-utils` package has been removed and its exports moved to the main `ai` package.
2032
+
2033
+ ```tsx filename="AI SDK 4.0"
2034
+ import { getTextFromDataUrl } from '@ai-sdk/ui-utils';
2035
+ ```
2036
+
2037
+ ```tsx filename="AI SDK 5.0"
2038
+ import { getTextFromDataUrl } from 'ai';
2039
+ ```
2040
+
2041
+ **Note**: `processDataStream` was removed entirely in v5.0. Use `readUIMessageStream` instead for processing UI message streams, or use the more configurable Chat/useChat APIs for most use cases.
2042
+
2043
+ ### useCompletion Changes
2044
+
2045
+ The `data` property has been removed from the `useCompletion` hook.
2046
+
2047
+ ```tsx filename="AI SDK 4.0"
2048
+ const {
2049
+ completion,
2050
+ handleSubmit,
2051
+ data, // No longer available
2052
+ } = useCompletion();
2053
+ ```
2054
+
2055
+ ```tsx filename="AI SDK 5.0"
2056
+ const {
2057
+ completion,
2058
+ handleSubmit,
2059
+ // data property removed entirely
2060
+ } = useCompletion();
2061
+ ```
2062
+
2063
+ ### useAssistant Removal
2064
+
2065
+ The `useAssistant` hook has been removed.
2066
+
2067
+ ```tsx filename="AI SDK 4.0"
2068
+ import { useAssistant } from '@ai-sdk/react';
2069
+ ```
2070
+
2071
+ ```tsx filename="AI SDK 5.0"
2072
+ // useAssistant has been removed
2073
+ // Use useChat with appropriate configuration instead
2074
+ ```
2075
+
2076
+ For an implementation of the assistant functionality with AI SDK v5, see this [example repository](https://github.com/vercel-labs/ai-sdk-openai-assistants-api).
2077
+
2078
+ #### Attachments → File Parts
2079
+
2080
+ The `experimental_attachments` property has been replaced with the parts array.
2081
+
2082
+ ```tsx filename="AI SDK 4.0"
2083
+ {
2084
+ messages.map(message => (
2085
+ <div className="flex flex-col gap-2">
2086
+ {message.content}
2087
+
2088
+ <div className="flex flex-row gap-2">
2089
+ {message.experimental_attachments?.map((attachment, index) =>
2090
+ attachment.contentType?.includes('image/') ? (
2091
+ <img src={attachment.url} alt={attachment.name} />
2092
+ ) : attachment.contentType?.includes('text/') ? (
2093
+ <div className="w-32 h-24 p-2 overflow-hidden text-xs border rounded-md ellipsis text-zinc-500">
2094
+ {getTextFromDataUrl(attachment.url)}
2095
+ </div>
2096
+ ) : null,
2097
+ )}
2098
+ </div>
2099
+ </div>
2100
+ ));
2101
+ }
2102
+ ```
2103
+
2104
+ ```tsx filename="AI SDK 5.0"
2105
+ {
2106
+ messages.map(message => (
2107
+ <div>
2108
+ {message.parts.map((part, index) => {
2109
+ if (part.type === 'text') {
2110
+ return <div key={index}>{part.text}</div>;
2111
+ }
2112
+
2113
+ if (part.type === 'file' && part.mediaType?.startsWith('image/')) {
2114
+ return (
2115
+ <div key={index}>
2116
+ <img src={part.url} />
2117
+ </div>
2118
+ );
2119
+ }
2120
+ })}
2121
+ </div>
2122
+ ));
2123
+ }
2124
+ ```
2125
+
2126
+ <Note>
2127
+ Some models do not support text files (text/plain, text/markdown, text/csv,
2128
+ etc.) as file parts. For text files, you can read and send the context as a text part
2129
+ instead:
2130
+
2131
+ ```tsx
2132
+ // Instead of this:
2133
+ { type: 'file', data: buffer, mediaType: 'text/plain' }
2134
+
2135
+ // Do this:
2136
+ { type: 'text', text: buffer.toString('utf-8') }
2137
+ ```
2138
+
2139
+ </Note>
2140
+
2141
+ ### Embedding Changes
2142
+
2143
+ #### Provider Options for Embeddings
2144
+
2145
+ Embedding model settings now use provider options instead of model parameters.
2146
+
2147
+ ```tsx filename="AI SDK 4.0"
2148
+ const { embedding } = await embed({
2149
+ model: openai('text-embedding-3-small', {
2150
+ dimensions: 10,
2151
+ }),
2152
+ });
2153
+ ```
2154
+
2155
+ ```tsx filename="AI SDK 5.0"
2156
+ const { embedding } = await embed({
2157
+ model: openai('text-embedding-3-small'),
2158
+ providerOptions: {
2159
+ openai: {
2160
+ dimensions: 10,
2161
+ },
2162
+ },
2163
+ });
2164
+ ```
2165
+
2166
+ #### Raw Response → Response
2167
+
2168
+ The `rawResponse` property has been renamed to `response`.
2169
+
2170
+ ```tsx filename="AI SDK 4.0"
2171
+ const { rawResponse } = await embed(/* */);
2172
+ ```
2173
+
2174
+ ```tsx filename="AI SDK 5.0"
2175
+ const { response } = await embed(/* */);
2176
+ ```
2177
+
2178
+ #### Parallel Requests in embedMany
2179
+
2180
+ `embedMany` now makes parallel requests with a configurable `maxParallelCalls` option.
2181
+
2182
+ ```tsx filename="AI SDK 5.0"
2183
+ const { embeddings, usage } = await embedMany({
2184
+ maxParallelCalls: 2, // Limit parallel requests
2185
+ model: 'openai/text-embedding-3-small',
2186
+ values: [
2187
+ 'sunny day at the beach',
2188
+ 'rainy afternoon in the city',
2189
+ 'snowy night in the mountains',
2190
+ ],
2191
+ });
2192
+ ```
2193
+
2194
+ #### LangChain Adapter Moved to `@ai-sdk/langchain`
2195
+
2196
+ The `LangChainAdapter` has been moved to `@ai-sdk/langchain` and the API has been updated to use UI message streams.
2197
+
2198
+ ```tsx filename="AI SDK 4.0"
2199
+ import { LangChainAdapter } from 'ai';
2200
+
2201
+ const response = LangChainAdapter.toDataStreamResponse(stream);
2202
+ ```
2203
+
2204
+ ```tsx filename="AI SDK 5.0"
2205
+ import { toUIMessageStream } from '@ai-sdk/langchain';
2206
+ import { createUIMessageStreamResponse } from 'ai';
2207
+
2208
+ const response = createUIMessageStreamResponse({
2209
+ stream: toUIMessageStream(stream),
2210
+ });
2211
+ ```
2212
+
2213
+ <Note>
2214
+ Don't forget to install the new package: `npm install @ai-sdk/langchain`
2215
+ </Note>
2216
+
2217
+ #### LlamaIndex Adapter Moved to `@ai-sdk/llamaindex`
2218
+
2219
+ The `LlamaIndexAdapter` has been extracted to a separate package `@ai-sdk/llamaindex` and follows the same UI message stream pattern.
2220
+
2221
+ ```tsx filename="AI SDK 4.0"
2222
+ import { LlamaIndexAdapter } from 'ai';
2223
+
2224
+ const response = LlamaIndexAdapter.toDataStreamResponse(stream);
2225
+ ```
2226
+
2227
+ ```tsx filename="AI SDK 5.0"
2228
+ import { toUIMessageStream } from '@ai-sdk/llamaindex';
2229
+ import { createUIMessageStreamResponse } from 'ai';
2230
+
2231
+ const response = createUIMessageStreamResponse({
2232
+ stream: toUIMessageStream(stream),
2233
+ });
2234
+ ```
2235
+
2236
+ <Note>
2237
+ Don't forget to install the new package: `npm install @ai-sdk/llamaindex`
2238
+ </Note>
2239
+
2240
+ ## Streaming Architecture
2241
+
2242
+ The streaming architecture has been completely redesigned in v5 to support better content differentiation, concurrent streaming of multiple parts, and improved real-time UX.
2243
+
2244
+ ### Stream Protocol Changes
2245
+
2246
+ #### Stream Protocol: Single Chunks → Start/Delta/End Pattern
2247
+
2248
+ The fundamental streaming pattern has changed from single chunks to a three-phase pattern with unique IDs for each content block.
2249
+
2250
+ ```tsx filename="AI SDK 4.0"
2251
+ for await (const chunk of result.fullStream) {
2252
+ switch (chunk.type) {
2253
+ case 'text-delta': {
2254
+ process.stdout.write(chunk.textDelta);
2255
+ break;
2256
+ }
2257
+ }
2258
+ }
2259
+ ```
2260
+
2261
+ ```tsx filename="AI SDK 5.0"
2262
+ for await (const chunk of result.fullStream) {
2263
+ switch (chunk.type) {
2264
+ case 'text-start': {
2265
+ // New: Initialize a text block with unique ID
2266
+ console.log(`Starting text block: ${chunk.id}`);
2267
+ break;
2268
+ }
2269
+ case 'text-delta': {
2270
+ // Changed: Now includes ID and uses 'delta' property
2271
+ process.stdout.write(chunk.delta); // Changed from 'textDelta'
2272
+ break;
2273
+ }
2274
+ case 'text-end': {
2275
+ // New: Finalize the text block
2276
+ console.log(`Completed text block: ${chunk.id}`);
2277
+ break;
2278
+ }
2279
+ }
2280
+ }
2281
+ ```
2282
+
2283
+ #### Reasoning Streaming Pattern
2284
+
2285
+ Reasoning content now follows the same start/delta/end pattern:
2286
+
2287
+ ```tsx filename="AI SDK 4.0"
2288
+ for await (const chunk of result.fullStream) {
2289
+ switch (chunk.type) {
2290
+ case 'reasoning': {
2291
+ // Single chunk with full reasoning text
2292
+ console.log('Reasoning:', chunk.text);
2293
+ break;
2294
+ }
2295
+ }
2296
+ }
2297
+ ```
2298
+
2299
+ ```tsx filename="AI SDK 5.0"
2300
+ for await (const chunk of result.fullStream) {
2301
+ switch (chunk.type) {
2302
+ case 'reasoning-start': {
2303
+ console.log(`Starting reasoning block: ${chunk.id}`);
2304
+ break;
2305
+ }
2306
+ case 'reasoning-delta': {
2307
+ process.stdout.write(chunk.delta);
2308
+ break;
2309
+ }
2310
+ case 'reasoning-end': {
2311
+ console.log(`Completed reasoning block: ${chunk.id}`);
2312
+ break;
2313
+ }
2314
+ }
2315
+ }
2316
+ ```
2317
+
2318
+ #### Tool Input Streaming
2319
+
2320
+ Tool inputs can now be streamed as they're being generated:
2321
+
2322
+ ```tsx filename="AI SDK 5.0"
2323
+ for await (const chunk of result.fullStream) {
2324
+ switch (chunk.type) {
2325
+ case 'tool-input-start': {
2326
+ console.log(`Starting tool input for ${chunk.toolName}: ${chunk.id}`);
2327
+ break;
2328
+ }
2329
+ case 'tool-input-delta': {
2330
+ // Stream the JSON input as it's being generated
2331
+ process.stdout.write(chunk.delta);
2332
+ break;
2333
+ }
2334
+ case 'tool-input-end': {
2335
+ console.log(`Completed tool input: ${chunk.id}`);
2336
+ break;
2337
+ }
2338
+ case 'tool-call': {
2339
+ // Final tool call with complete input
2340
+ console.log('Tool call:', chunk.toolName, chunk.input);
2341
+ break;
2342
+ }
2343
+ }
2344
+ }
2345
+ ```
2346
+
2347
+ #### onChunk Callback Changes
2348
+
2349
+ The `onChunk` callback now receives the new streaming chunk types with IDs and the start/delta/end pattern.
2350
+
2351
+ ```tsx filename="AI SDK 4.0"
2352
+ const result = streamText({
2353
+ model: __MODEL__,
2354
+ prompt: 'Write a story',
2355
+ onChunk({ chunk }) {
2356
+ switch (chunk.type) {
2357
+ case 'text-delta': {
2358
+ // Single property with text content
2359
+ console.log('Text delta:', chunk.textDelta);
2360
+ break;
2361
+ }
2362
+ }
2363
+ },
2364
+ });
2365
+ ```
2366
+
2367
+ ```tsx filename="AI SDK 5.0"
2368
+ const result = streamText({
2369
+ model: __MODEL__,
2370
+ prompt: 'Write a story',
2371
+ onChunk({ chunk }) {
2372
+ switch (chunk.type) {
2373
+ case 'text-delta': {
2374
+ // Text chunks now use single 'text' type
2375
+ console.log('Text chunk:', chunk.text);
2376
+ break;
2377
+ }
2378
+ case 'reasoning': {
2379
+ // Reasoning chunks use single 'reasoning' type
2380
+ console.log('Reasoning chunk:', chunk.text);
2381
+ break;
2382
+ }
2383
+ case 'source': {
2384
+ console.log('Source chunk:', chunk);
2385
+ break;
2386
+ }
2387
+ case 'tool-call': {
2388
+ console.log('Tool call:', chunk.toolName, chunk.input);
2389
+ break;
2390
+ }
2391
+ case 'tool-input-start': {
2392
+ console.log(
2393
+ `Tool input started for ${chunk.toolName}:`,
2394
+ chunk.toolCallId,
2395
+ );
2396
+ break;
2397
+ }
2398
+ case 'tool-input-delta': {
2399
+ console.log(`Tool input delta for ${chunk.toolCallId}:`, chunk.delta);
2400
+ break;
2401
+ }
2402
+ case 'tool-result': {
2403
+ console.log('Tool result:', chunk.output);
2404
+ break;
2405
+ }
2406
+ case 'raw': {
2407
+ console.log('Raw chunk:', chunk);
2408
+ break;
2409
+ }
2410
+ }
2411
+ },
2412
+ });
2413
+ ```
2414
+
2415
+ #### File Stream Parts Restructure
2416
+
2417
+ File parts in streams have been flattened.
2418
+
2419
+ ```tsx filename="AI SDK 4.0"
2420
+ for await (const chunk of result.fullStream) {
2421
+ switch (chunk.type) {
2422
+ case 'file': {
2423
+ console.log('Media type:', chunk.file.mediaType);
2424
+ console.log('File data:', chunk.file.data);
2425
+ break;
2426
+ }
2427
+ }
2428
+ }
2429
+ ```
2430
+
2431
+ ```tsx filename="AI SDK 5.0"
2432
+ for await (const chunk of result.fullStream) {
2433
+ switch (chunk.type) {
2434
+ case 'file': {
2435
+ console.log('Media type:', chunk.mediaType);
2436
+ console.log('File data:', chunk.data);
2437
+ break;
2438
+ }
2439
+ }
2440
+ }
2441
+ ```
2442
+
2443
+ #### Source Stream Parts Restructure
2444
+
2445
+ Source stream parts have been flattened.
2446
+
2447
+ ```tsx filename="AI SDK 4.0"
2448
+ for await (const part of result.fullStream) {
2449
+ if (part.type === 'source' && part.source.sourceType === 'url') {
2450
+ console.log('ID:', part.source.id);
2451
+ console.log('Title:', part.source.title);
2452
+ console.log('URL:', part.source.url);
2453
+ }
2454
+ }
2455
+ ```
2456
+
2457
+ ```tsx filename="AI SDK 5.0"
2458
+ for await (const part of result.fullStream) {
2459
+ if (part.type === 'source' && part.sourceType === 'url') {
2460
+ console.log('ID:', part.id);
2461
+ console.log('Title:', part.title);
2462
+ console.log('URL:', part.url);
2463
+ }
2464
+ }
2465
+ ```
2466
+
2467
+ #### Finish Event Changes
2468
+
2469
+ Stream finish events have been renamed for consistency.
2470
+
2471
+ ```tsx filename="AI SDK 4.0"
2472
+ for await (const part of result.fullStream) {
2473
+ switch (part.type) {
2474
+ case 'step-finish': {
2475
+ console.log('Step finished:', part.finishReason);
2476
+ break;
2477
+ }
2478
+ case 'finish': {
2479
+ console.log('Usage:', part.usage);
2480
+ break;
2481
+ }
2482
+ }
2483
+ }
2484
+ ```
2485
+
2486
+ ```tsx filename="AI SDK 5.0"
2487
+ for await (const part of result.fullStream) {
2488
+ switch (part.type) {
2489
+ case 'finish-step': {
2490
+ // Renamed from 'step-finish'
2491
+ console.log('Step finished:', part.finishReason);
2492
+ break;
2493
+ }
2494
+ case 'finish': {
2495
+ console.log('Total Usage:', part.totalUsage); // Changed from 'usage'
2496
+ break;
2497
+ }
2498
+ }
2499
+ }
2500
+ ```
2501
+
2502
+ ### Stream Protocol Changes
2503
+
2504
+ #### Proprietary Protocol -> Server-Sent Events
2505
+
2506
+ The data stream protocol has been updated to use Server-Sent Events.
2507
+
2508
+ ```tsx filename="AI SDK 4.0"
2509
+ import { createDataStream, formatDataStreamPart } from 'ai';
2510
+
2511
+ const dataStream = createDataStream({
2512
+ execute: writer => {
2513
+ writer.writeData('initialized call');
2514
+ writer.write(formatDataStreamPart('text', 'Hello'));
2515
+ writer.writeSource({
2516
+ type: 'source',
2517
+ sourceType: 'url',
2518
+ id: 'source-1',
2519
+ url: 'https://example.com',
2520
+ title: 'Example Source',
2521
+ });
2522
+ },
2523
+ });
2524
+ ```
2525
+
2526
+ ```tsx filename="AI SDK 5.0"
2527
+ import { createUIMessageStream } from 'ai';
2528
+
2529
+ const stream = createUIMessageStream({
2530
+ execute: ({ writer }) => {
2531
+ writer.write({ type: 'data', value: ['initialized call'] });
2532
+ writer.write({ type: 'text', value: 'Hello' });
2533
+ writer.write({
2534
+ type: 'source-url',
2535
+ value: {
2536
+ type: 'source',
2537
+ id: 'source-1',
2538
+ url: 'https://example.com',
2539
+ title: 'Example Source',
2540
+ },
2541
+ });
2542
+ },
2543
+ });
2544
+ ```
2545
+
2546
+ #### Data Stream Response Helper Functions Renamed
2547
+
2548
+ The streaming API has been completely restructured from data streams to UI message streams.
2549
+
2550
+ ```tsx filename="AI SDK 4.0"
2551
+ // Express/Node.js servers
2552
+ app.post('/stream', async (req, res) => {
2553
+ const result = streamText({
2554
+ model: __MODEL__,
2555
+ prompt: 'Generate content',
2556
+ });
2557
+
2558
+ result.pipeDataStreamToResponse(res);
2559
+ });
2560
+
2561
+ // Next.js API routes
2562
+ const result = streamText({
2563
+ model: __MODEL__,
2564
+ prompt: 'Generate content',
2565
+ });
2566
+
2567
+ return result.toDataStreamResponse();
2568
+ ```
2569
+
2570
+ ```tsx filename="AI SDK 5.0"
2571
+ // Express/Node.js servers
2572
+ app.post('/stream', async (req, res) => {
2573
+ const result = streamText({
2574
+ model: __MODEL__,
2575
+ prompt: 'Generate content',
2576
+ });
2577
+
2578
+ result.pipeUIMessageStreamToResponse(res);
2579
+ });
2580
+
2581
+ // Next.js API routes
2582
+ const result = streamText({
2583
+ model: __MODEL__,
2584
+ prompt: 'Generate content',
2585
+ });
2586
+
2587
+ return result.toUIMessageStreamResponse();
2588
+ ```
2589
+
2590
+ #### Stream Transform Function Renaming
2591
+
2592
+ Various stream-related functions have been renamed for consistency.
2593
+
2594
+ ```tsx filename="AI SDK 4.0"
2595
+ import { DataStreamToSSETransformStream } from 'ai';
2596
+ ```
2597
+
2598
+ ```tsx filename="AI SDK 5.0"
2599
+ import { JsonToSseTransformStream } from 'ai';
2600
+ ```
2601
+
2602
+ #### Error Handling: getErrorMessage → onError
2603
+
2604
+ The `getErrorMessage` option in `toDataStreamResponse` has been replaced with `onError` in `toUIMessageStreamResponse`, providing more control over error forwarding to the client.
2605
+
2606
+ By default, error messages are NOT sent to the client to prevent leaking sensitive information. The `onError` callback allows you to explicitly control what error information is forwarded to the client.
2607
+
2608
+ ```tsx filename="AI SDK 4.0"
2609
+ return result.toDataStreamResponse({
2610
+ getErrorMessage: error => {
2611
+ // Return sanitized error data to send to client
2612
+ // Only return what you want the client to see!
2613
+ return {
2614
+ errorCode: 'STREAM_ERROR',
2615
+ message: 'An error occurred while processing your request',
2616
+ // In production, avoid sending error.message directly to prevent information leakage
2617
+ };
2618
+ },
2619
+ });
2620
+ ```
2621
+
2622
+ ```tsx filename="AI SDK 5.0"
2623
+ return result.toUIMessageStreamResponse({
2624
+ onError: error => {
2625
+ // Return sanitized error data to send to client
2626
+ // Only return what you want the client to see!
2627
+ return {
2628
+ errorCode: 'STREAM_ERROR',
2629
+ message: 'An error occurred while processing your request',
2630
+ // In production, avoid sending error.message directly to prevent information leakage
2631
+ };
2632
+ },
2633
+ });
2634
+ ```
2635
+
2636
+ ### Utility Changes
2637
+
2638
+ #### ID Generation Changes
2639
+
2640
+ The `createIdGenerator()` function now requires a `size` argument.
2641
+
2642
+ ```tsx filename="AI SDK 4.0"
2643
+ const generator = createIdGenerator({ prefix: 'msg' });
2644
+ const id = generator(16); // Custom size at call time
2645
+ ```
2646
+
2647
+ ```tsx filename="AI SDK 5.0"
2648
+ const generator = createIdGenerator({ prefix: 'msg', size: 16 });
2649
+ const id = generator(); // Fixed size from creation
2650
+ ```
2651
+
2652
+ #### IDGenerator → IdGenerator
2653
+
2654
+ The type name has been updated.
2655
+
2656
+ ```tsx filename="AI SDK 4.0"
2657
+ import { IDGenerator } from 'ai';
2658
+ ```
2659
+
2660
+ ```tsx filename="AI SDK 5.0"
2661
+ import { IdGenerator } from 'ai';
2662
+ ```
2663
+
2664
+ ### Provider Interface Changes
2665
+
2666
+ #### Language Model V2 Import
2667
+
2668
+ `LanguageModelV3` must now be imported from `@ai-sdk/provider`.
2669
+
2670
+ ```tsx filename="AI SDK 4.0"
2671
+ import { LanguageModelV3 } from 'ai';
2672
+ ```
2673
+
2674
+ ```tsx filename="AI SDK 5.0"
2675
+ import { LanguageModelV3 } from '@ai-sdk/provider';
2676
+ ```
2677
+
2678
+ #### Middleware Rename
2679
+
2680
+ `LanguageModelV1Middleware` has been renamed and moved.
2681
+
2682
+ ```tsx filename="AI SDK 4.0"
2683
+ import { LanguageModelV1Middleware } from 'ai';
2684
+ ```
2685
+
2686
+ ```tsx filename="AI SDK 5.0"
2687
+ import { LanguageModelV3Middleware } from '@ai-sdk/provider';
2688
+ ```
2689
+
2690
+ #### Usage Token Properties
2691
+
2692
+ Token usage properties have been renamed for consistency.
2693
+
2694
+ ```tsx filename="AI SDK 4.0"
2695
+ // In language model implementations
2696
+ {
2697
+ usage: {
2698
+ promptTokens: 10,
2699
+ completionTokens: 20
2700
+ }
2701
+ }
2702
+ ```
2703
+
2704
+ ```tsx filename="AI SDK 5.0"
2705
+ // In language model implementations
2706
+ {
2707
+ usage: {
2708
+ inputTokens: 10,
2709
+ outputTokens: 20,
2710
+ totalTokens: 30 // Now required
2711
+ }
2712
+ }
2713
+ ```
2714
+
2715
+ #### Stream Part Type Changes
2716
+
2717
+ The `LanguageModelV3StreamPart` type has been expanded to support the new streaming architecture with start/delta/end patterns and IDs.
2718
+
2719
+ ```tsx filename="AI SDK 4.0"
2720
+ // V4: Simple stream parts
2721
+ type LanguageModelV3StreamPart =
2722
+ | { type: 'text-delta'; textDelta: string }
2723
+ | { type: 'reasoning'; text: string }
2724
+ | { type: 'tool-call'; toolCallId: string; toolName: string; input: string };
2725
+ ```
2726
+
2727
+ ```tsx filename="AI SDK 5.0"
2728
+ // V5: Enhanced stream parts with IDs and lifecycle events
2729
+ type LanguageModelV3StreamPart =
2730
+ // Text blocks with start/delta/end pattern
2731
+ | {
2732
+ type: 'text-start';
2733
+ id: string;
2734
+ providerMetadata?: SharedV2ProviderMetadata;
2735
+ }
2736
+ | {
2737
+ type: 'text-delta';
2738
+ id: string;
2739
+ delta: string;
2740
+ providerMetadata?: SharedV2ProviderMetadata;
2741
+ }
2742
+ | {
2743
+ type: 'text-end';
2744
+ id: string;
2745
+ providerMetadata?: SharedV2ProviderMetadata;
2746
+ }
2747
+
2748
+ // Reasoning blocks with start/delta/end pattern
2749
+ | {
2750
+ type: 'reasoning-start';
2751
+ id: string;
2752
+ providerMetadata?: SharedV2ProviderMetadata;
2753
+ }
2754
+ | {
2755
+ type: 'reasoning-delta';
2756
+ id: string;
2757
+ delta: string;
2758
+ providerMetadata?: SharedV2ProviderMetadata;
2759
+ }
2760
+ | {
2761
+ type: 'reasoning-end';
2762
+ id: string;
2763
+ providerMetadata?: SharedV2ProviderMetadata;
2764
+ }
2765
+
2766
+ // Tool input streaming
2767
+ | {
2768
+ type: 'tool-input-start';
2769
+ id: string;
2770
+ toolName: string;
2771
+ providerMetadata?: SharedV2ProviderMetadata;
2772
+ }
2773
+ | {
2774
+ type: 'tool-input-delta';
2775
+ id: string;
2776
+ delta: string;
2777
+ providerMetadata?: SharedV2ProviderMetadata;
2778
+ }
2779
+ | {
2780
+ type: 'tool-input-end';
2781
+ id: string;
2782
+ providerMetadata?: SharedV2ProviderMetadata;
2783
+ }
2784
+
2785
+ // Enhanced tool calls
2786
+ | {
2787
+ type: 'tool-call';
2788
+ toolCallId: string;
2789
+ toolName: string;
2790
+ input: string;
2791
+ providerMetadata?: SharedV2ProviderMetadata;
2792
+ }
2793
+
2794
+ // Stream lifecycle events
2795
+ | { type: 'stream-start'; warnings: Array<SharedV3Warning> }
2796
+ | {
2797
+ type: 'finish';
2798
+ usage: LanguageModelV3Usage;
2799
+ finishReason: LanguageModelV3FinishReason;
2800
+ providerMetadata?: SharedV2ProviderMetadata;
2801
+ };
2802
+ ```
2803
+
2804
+ #### Raw Response → Response
2805
+
2806
+ Provider response objects have been updated.
2807
+
2808
+ ```tsx filename="AI SDK 4.0"
2809
+ // In language model implementations
2810
+ {
2811
+ rawResponse: {
2812
+ /* ... */
2813
+ }
2814
+ }
2815
+ ```
2816
+
2817
+ ```tsx filename="AI SDK 5.0"
2818
+ // In language model implementations
2819
+ {
2820
+ response: {
2821
+ /* ... */
2822
+ }
2823
+ }
2824
+ ```
2825
+
2826
+ #### `wrapLanguageModel` now stable
2827
+
2828
+ ```tsx filename="AI SDK 4.0"
2829
+ import { experimental_wrapLanguageModel } from 'ai';
2830
+ ```
2831
+
2832
+ ```tsx filename="AI SDK 5.0"
2833
+ import { wrapLanguageModel } from 'ai';
2834
+ ```
2835
+
2836
+ #### `activeTools` No Longer Experimental
2837
+
2838
+ ```tsx filename="AI SDK 4.0"
2839
+ const result = await generateText({
2840
+ model: __MODEL__,
2841
+ messages,
2842
+ tools: { weatherTool, locationTool },
2843
+ experimental_activeTools: ['weatherTool'],
2844
+ });
2845
+ ```
2846
+
2847
+ ```tsx filename="AI SDK 5.0"
2848
+ const result = await generateText({
2849
+ model: __MODEL__,
2850
+ messages,
2851
+ tools: { weatherTool, locationTool },
2852
+ activeTools: ['weatherTool'], // No longer experimental
2853
+ });
2854
+ ```
2855
+
2856
+ #### `prepareStep` No Longer Experimental
2857
+
2858
+ The `experimental_prepareStep` option has been promoted and no longer requires the experimental prefix.
2859
+
2860
+ ```tsx filename="AI SDK 4.0"
2861
+ const result = await generateText({
2862
+ model: __MODEL__,
2863
+ messages,
2864
+ tools: { weatherTool, locationTool },
2865
+ experimental_prepareStep: ({ steps, stepNumber, model }) => {
2866
+ console.log('Preparing step:', stepNumber);
2867
+ return {
2868
+ activeTools: ['weatherTool'],
2869
+ system: 'Be helpful and concise.',
2870
+ };
2871
+ },
2872
+ });
2873
+ ```
2874
+
2875
+ ```tsx filename="AI SDK 5.0"
2876
+ const result = await generateText({
2877
+ model: __MODEL__,
2878
+ messages,
2879
+ tools: { weatherTool, locationTool },
2880
+ prepareStep: ({ steps, stepNumber, model }) => {
2881
+ console.log('Preparing step:', stepNumber);
2882
+ return {
2883
+ activeTools: ['weatherTool'],
2884
+ system: 'Be helpful and concise.',
2885
+ // Can also configure toolChoice, model, etc.
2886
+ };
2887
+ },
2888
+ });
2889
+ ```
2890
+
2891
+ The `prepareStep` function receives `{ steps, stepNumber, model }` and can return:
2892
+
2893
+ - `model`: Different model for this step
2894
+ - `activeTools`: Which tools to make available
2895
+ - `toolChoice`: Tool selection strategy
2896
+ - `system`: System message for this step
2897
+ - `undefined`: Use default settings
2898
+
2899
+ ### Temperature Default Removal
2900
+
2901
+ Temperature is no longer set to `0` by default.
2902
+
2903
+ ```tsx filename="AI SDK 4.0"
2904
+ await generateText({
2905
+ model: __MODEL__,
2906
+ prompt: 'Write a creative story',
2907
+ // Implicitly temperature: 0
2908
+ });
2909
+ ```
2910
+
2911
+ ```tsx filename="AI SDK 5.0"
2912
+ await generateText({
2913
+ model: __MODEL__,
2914
+ prompt: 'Write a creative story',
2915
+ temperature: 0, // Must explicitly set
2916
+ });
2917
+ ```
2918
+
2919
+ ## Message Persistence Changes
2920
+
2921
+ <Note>
2922
+ If you have persisted messages in a database, see the [Data Migration
2923
+ Guide](/docs/migration-guides/migration-guide-5-0-data) for comprehensive
2924
+ guidance on migrating your stored message data to the v5 format.
2925
+ </Note>
2926
+
2927
+ In v4, you would typically use helper functions like `appendResponseMessages` or `appendClientMessage` to format messages in the `onFinish` callback of `streamText`:
2928
+
2929
+ ```tsx filename="AI SDK 4.0"
2930
+ import {
2931
+ streamText,
2932
+ convertToModelMessages,
2933
+ appendClientMessage,
2934
+ appendResponseMessages,
2935
+ } from 'ai';
2936
+
2937
+ const updatedMessages = appendClientMessage({
2938
+ messages,
2939
+ message: lastUserMessage,
2940
+ });
2941
+
2942
+ const result = streamText({
2943
+ model: __MODEL__,
2944
+ messages: updatedMessages,
2945
+ experimental_generateMessageId: () => generateId(), // ID generation on streamText
2946
+ onFinish: async ({ responseMessages, usage }) => {
2947
+ // Use helper functions to format messages
2948
+ const finalMessages = appendResponseMessages({
2949
+ messages: updatedMessages,
2950
+ responseMessages,
2951
+ });
2952
+
2953
+ // Save formatted messages to database
2954
+ await saveMessages(finalMessages);
2955
+ },
2956
+ });
2957
+ ```
2958
+
2959
+ In v5, message persistence is now handled through the `toUIMessageStreamResponse` method, which automatically formats response messages in the `UIMessage` format:
2960
+
2961
+ ```tsx filename="AI SDK 5.0"
2962
+ import { streamText, convertToModelMessages, UIMessage } from 'ai';
2963
+
2964
+ const messages: UIMessage[] = [
2965
+ // Your existing messages in UIMessage format
2966
+ ];
2967
+
2968
+ const result = streamText({
2969
+ model: __MODEL__,
2970
+ messages: convertToModelMessages(messages),
2971
+ // experimental_generateMessageId removed from here
2972
+ });
2973
+
2974
+ return result.toUIMessageStreamResponse({
2975
+ originalMessages: messages, // IMPORTANT: Required to prevent duplicate messages
2976
+ generateMessageId: () => generateId(), // IMPORTANT: Required for proper message ID generation
2977
+ onFinish: ({ messages, responseMessage }) => {
2978
+ // messages contains all messages (original + response) in UIMessage format
2979
+ saveChat({ chatId, messages });
2980
+
2981
+ // responseMessage contains just the generated message in UIMessage format
2982
+ saveMessage({ chatId, message: responseMessage });
2983
+ },
2984
+ });
2985
+ ```
2986
+
2987
+ <Note>
2988
+ **Important:** When using `toUIMessageStreamResponse`, you should always
2989
+ provide both `originalMessages` and `generateMessageId` parameters. Without
2990
+ these, you may experience duplicate or repeated assistant messages in your UI.
2991
+ For more details, see [Troubleshooting: Repeated Assistant
2992
+ Messages](/docs/troubleshooting/repeated-assistant-messages).
2993
+ </Note>
2994
+
2995
+ ### Message ID Generation
2996
+
2997
+ The `experimental_generateMessageId` option has been moved from `streamText` configuration to `toUIMessageStreamResponse`, as it's designed for use with `UIMessage`s rather than `ModelMessage`s.
2998
+
2999
+ ```tsx filename="AI SDK 4.0"
3000
+ const result = streamText({
3001
+ model: __MODEL__,
3002
+ messages,
3003
+ experimental_generateMessageId: () => generateId(),
3004
+ });
3005
+ ```
3006
+
3007
+ ```tsx filename="AI SDK 5.0"
3008
+ const result = streamText({
3009
+ model: __MODEL__,
3010
+ messages: convertToModelMessages(messages),
3011
+ });
3012
+
3013
+ return result.toUIMessageStreamResponse({
3014
+ generateMessageId: () => generateId(), // No longer experimental
3015
+ // ...
3016
+ });
3017
+ ```
3018
+
3019
+ For more details on message IDs and persistence, see the [Chatbot Message Persistence guide](/docs/ai-sdk-ui/chatbot-message-persistence#message-ids).
3020
+
3021
+ ### Using createUIMessageStream
3022
+
3023
+ For more complex scenarios, especially when working with data parts, you can use `createUIMessageStream`:
3024
+
3025
+ ```tsx filename="AI SDK 5.0 - Advanced"
3026
+ import {
3027
+ createUIMessageStream,
3028
+ createUIMessageStreamResponse,
3029
+ streamText,
3030
+ convertToModelMessages,
3031
+ UIMessage,
3032
+ } from 'ai';
3033
+
3034
+ const stream = createUIMessageStream({
3035
+ originalMessages: messages,
3036
+ generateId: generateId, // Required for proper message ID generation
3037
+ execute: ({ writer }) => {
3038
+ // Write custom data parts
3039
+ writer.write({
3040
+ type: 'data',
3041
+ data: { status: 'processing', timestamp: Date.now() },
3042
+ });
3043
+
3044
+ // Stream the AI response
3045
+ const result = streamText({
3046
+ model: __MODEL__,
3047
+ messages: convertToModelMessages(messages),
3048
+ });
3049
+
3050
+ writer.merge(result.toUIMessageStream());
3051
+ },
3052
+ onFinish: ({ messages }) => {
3053
+ // messages contains all messages (original + response + data parts) in UIMessage format
3054
+ saveChat({ chatId, messages });
3055
+ },
3056
+ });
3057
+
3058
+ return createUIMessageStreamResponse({ stream });
3059
+ ```
3060
+
3061
+ ## Provider & Model Changes
3062
+
3063
+ ### OpenAI
3064
+
3065
+ #### Default Provider Instance Uses Responses API
3066
+
3067
+ In AI SDK 5, the default OpenAI provider instance uses the Responses API, while AI SDK 4 used the Chat Completions API. The Chat Completions API remains fully supported and you can use it with `openai.chat(...)`.
3068
+
3069
+ ```tsx filename="AI SDK 4.0"
3070
+ import { openai } from '@ai-sdk/openai';
3071
+
3072
+ const defaultModel = openai('gpt-4.1-mini'); // Chat Completions API
3073
+ ```
3074
+
3075
+ ```tsx filename="AI SDK 5.0"
3076
+ import { openai } from '@ai-sdk/openai';
3077
+
3078
+ const defaultModel = openai('gpt-4.1-mini'); // Responses API
3079
+
3080
+ // Specify a specific API when needed:
3081
+ const chatCompletionsModel = openai.chat('gpt-4.1-mini');
3082
+ const responsesModel = openai.responses('gpt-4.1-mini');
3083
+ ```
3084
+
3085
+ <Note>
3086
+ The Responses and Chat Completions APIs have different behavior and defaults.
3087
+ If you depend on the Chat Completions API, switch your model instance to
3088
+ `openai.chat(...)` and audit your configuration.
3089
+ </Note>
3090
+
3091
+ #### Strict Schemas (`strictSchemas`) with Responses API
3092
+
3093
+ In AI SDK 4.0, you could set the `strictSchemas` option on Responses models (which defaulted to `true`). This option has been renamed to `strictJsonSchema` in AI SDK 5.0 and now defaults to `false`.
3094
+
3095
+ ```tsx filename="AI SDK 4.0"
3096
+ import { z } from 'zod';
3097
+ import { generateObject } from 'ai';
3098
+ import { openai, type OpenAIResponsesProviderOptions } from '@ai-sdk/openai';
3099
+
3100
+ const result = await generateObject({
3101
+ model: openai.responses('gpt-4.1'),
3102
+ schema: z.object({
3103
+ // ...
3104
+ }),
3105
+ providerOptions: {
3106
+ openai: {
3107
+ strictSchemas: true, // default behaviour in AI SDK 4
3108
+ } satisfies OpenAIResponsesProviderOptions,
3109
+ },
3110
+ });
3111
+ ```
3112
+
3113
+ ```tsx filename="AI SDK 5.0"
3114
+ import { z } from 'zod';
3115
+ import { generateObject } from 'ai';
3116
+ import { openai, type OpenAIResponsesProviderOptions } from '@ai-sdk/openai';
3117
+
3118
+ const result = await generateObject({
3119
+ model: openai('gpt-4.1-2024'), // uses Responses API
3120
+ schema: z.object({
3121
+ // ...
3122
+ }),
3123
+ providerOptions: {
3124
+ openai: {
3125
+ strictJsonSchema: true, // defaults to false, opt back in to the AI SDK 4 strict behaviour
3126
+ } satisfies OpenAIResponsesProviderOptions,
3127
+ },
3128
+ });
3129
+ ```
3130
+
3131
+ If you call `openai.chat(...)` to use the Chat Completions API directly, you can type it with `OpenAIChatLanguageModelOptions`. AI SDK 5 adds the same `strictJsonSchema` option there as well.
3132
+
3133
+ #### Structured Outputs
3134
+
3135
+ The `structuredOutputs` option is now configured using provider options rather than as a setting on the model instance.
3136
+
3137
+ ```tsx filename="AI SDK 4.0"
3138
+ import { z } from 'zod';
3139
+ import { generateObject } from 'ai';
3140
+ import { openai } from '@ai-sdk/openai';
3141
+
3142
+ const result = await generateObject({
3143
+ model: openai('gpt-4.1', { structuredOutputs: true }), // use Chat Completions API
3144
+ schema: z.object({ name: z.string() }),
3145
+ });
3146
+ ```
3147
+
3148
+ ```tsx filename="AI SDK 5.0 (Chat Completions API)"
3149
+ import { z } from 'zod';
3150
+ import { generateObject } from 'ai';
3151
+ import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
3152
+
3153
+ const result = await generateObject({
3154
+ model: openai.chat('gpt-4.1'), // use Chat Completions API
3155
+ schema: z.object({ name: z.string() }),
3156
+ providerOptions: {
3157
+ openai: {
3158
+ structuredOutputs: true,
3159
+ } satisfies OpenAIChatLanguageModelOptions,
3160
+ },
3161
+ });
3162
+ ```
3163
+
3164
+ #### Compatibility Option Removal
3165
+
3166
+ The `compatibility` option has been removed; strict compatibility mode is now the default.
3167
+
3168
+ ```tsx filename="AI SDK 4.0"
3169
+ const openai = createOpenAI({
3170
+ compatibility: 'strict',
3171
+ });
3172
+ ```
3173
+
3174
+ ```tsx filename="AI SDK 5.0"
3175
+ const openai = createOpenAI({
3176
+ // strict compatibility is now the default
3177
+ });
3178
+ ```
3179
+
3180
+ #### Legacy Function Calls Removal
3181
+
3182
+ The `useLegacyFunctionCalls` option has been removed.
3183
+
3184
+ ```tsx filename="AI SDK 4.0"
3185
+ const result = streamText({
3186
+ model: openai('gpt-4.1', { useLegacyFunctionCalls: true }),
3187
+ });
3188
+ ```
3189
+
3190
+ ```tsx filename="AI SDK 5.0"
3191
+ const result = streamText({
3192
+ model: openai('gpt-4.1'),
3193
+ });
3194
+ ```
3195
+
3196
+ #### Simulate Streaming
3197
+
3198
+ The `simulateStreaming` model option has been replaced with middleware.
3199
+
3200
+ ```tsx filename="AI SDK 4.0"
3201
+ const result = generateText({
3202
+ model: openai('gpt-4.1', { simulateStreaming: true }),
3203
+ prompt: 'Hello, world!',
3204
+ });
3205
+ ```
3206
+
3207
+ ```tsx filename="AI SDK 5.0"
3208
+ import { simulateStreamingMiddleware, wrapLanguageModel } from 'ai';
3209
+
3210
+ const model = wrapLanguageModel({
3211
+ model: openai('gpt-4.1'),
3212
+ middleware: simulateStreamingMiddleware(),
3213
+ });
3214
+
3215
+ const result = generateText({
3216
+ model,
3217
+ prompt: 'Hello, world!',
3218
+ });
3219
+ ```
3220
+
3221
+ ### Google
3222
+
3223
+ #### Search Grounding is now a provider defined tool
3224
+
3225
+ Search Grounding is now called "Google Search" and is now a provider defined tool.
3226
+
3227
+ ```tsx filename="AI SDK 4.0"
3228
+ const { text, providerMetadata } = await generateText({
3229
+ model: google('gemini-1.5-pro', {
3230
+ useSearchGrounding: true,
3231
+ }),
3232
+ prompt: 'List the top 5 San Francisco news from the past week.',
3233
+ });
3234
+ ```
3235
+
3236
+ ```tsx filename="AI SDK 5.0"
3237
+ import { google } from '@ai-sdk/google';
3238
+ const { text, sources, providerMetadata } = await generateText({
3239
+ model: google('gemini-1.5-pro'),
3240
+ prompt:
3241
+ 'List the top 5 San Francisco news from the past week.'
3242
+ tools: {
3243
+ google_search: google.tools.googleSearch({}),
3244
+ },
3245
+ });
3246
+ ```
3247
+
3248
+ ### Amazon Bedrock
3249
+
3250
+ #### Snake Case → Camel Case
3251
+
3252
+ Provider options have been updated to use camelCase.
3253
+
3254
+ ```tsx filename="AI SDK 4.0"
3255
+ const result = await generateText({
3256
+ model: bedrock('amazon.titan-tg1-large'),
3257
+ prompt: 'Hello, world!',
3258
+ providerOptions: {
3259
+ bedrock: {
3260
+ reasoning_config: {
3261
+ /* ... */
3262
+ },
3263
+ },
3264
+ },
3265
+ });
3266
+ ```
3267
+
3268
+ ```tsx filename="AI SDK 5.0"
3269
+ const result = await generateText({
3270
+ model: bedrock('amazon.titan-tg1-large'),
3271
+ prompt: 'Hello, world!',
3272
+ providerOptions: {
3273
+ bedrock: {
3274
+ reasoningConfig: {
3275
+ /* ... */
3276
+ },
3277
+ },
3278
+ },
3279
+ });
3280
+ ```
3281
+
3282
+ ### Provider-Utils Changes
3283
+
3284
+ Deprecated `CoreTool*` types have been removed.
3285
+
3286
+ ```tsx filename="AI SDK 4.0"
3287
+ import {
3288
+ CoreToolCall,
3289
+ CoreToolResult,
3290
+ CoreToolResultUnion,
3291
+ CoreToolCallUnion,
3292
+ CoreToolChoice,
3293
+ } from '@ai-sdk/provider-utils';
3294
+ ```
3295
+
3296
+ ```tsx filename="AI SDK 5.0"
3297
+ import {
3298
+ ToolCall,
3299
+ ToolResult,
3300
+ TypedToolResult,
3301
+ TypedToolCall,
3302
+ ToolChoice,
3303
+ } from '@ai-sdk/provider-utils';
3304
+ ```
3305
+
3306
+ ## Troubleshooting
3307
+
3308
+ ### TypeScript Performance Issues with Zod
3309
+
3310
+ If you experience TypeScript server crashes, slow type checking, or errors like "Type instantiation is excessively deep and possibly infinite" when using Zod with AI SDK 5.0:
3311
+
3312
+ 1. **First, ensure you're using Zod 4.1.8 or later** - this version includes a fix for module resolution issues that cause TypeScript performance problems.
3313
+
3314
+ 2. If the issue persists, update your `tsconfig.json` to use `moduleResolution: "nodenext"`:
3315
+
3316
+ ```json
3317
+ {
3318
+ "compilerOptions": {
3319
+ "moduleResolution": "nodenext"
3320
+ // ... other options
3321
+ }
3322
+ }
3323
+ ```
3324
+
3325
+ This resolves the TypeScript performance issues while allowing you to continue using the standard Zod import. If this doesn't resolve the issue, you can try using a version-specific import path as an alternative solution. For detailed troubleshooting steps, see [TypeScript performance issues with Zod](/docs/troubleshooting/typescript-performance-zod).
3326
+
3327
+ ## Codemod Table
3328
+
3329
+ The following table lists available codemods for the AI SDK 5.0 upgrade
3330
+ process.
3331
+ For more information, see the [Codemods](#codemods) section.
3332
+
3333
+ | Change | Codemod |
3334
+ | ------------------------------------------------ | ----------------------------------------------------- |
3335
+ | **AI SDK Core Changes** | |
3336
+ | Flatten streamText file properties | `v5/flatten-streamtext-file-properties` |
3337
+ | ID Generation Changes | `v5/require-createIdGenerator-size-argument` |
3338
+ | IDGenerator → IdGenerator | `v5/rename-IDGenerator-to-IdGenerator` |
3339
+ | Import LanguageModelV3 from provider package | `v5/import-LanguageModelV3-from-provider-package` |
3340
+ | Migrate to data stream protocol v2 | `v5/migrate-to-data-stream-protocol-v2` |
3341
+ | Move image model maxImagesPerCall | `v5/move-image-model-maxImagesPerCall` |
3342
+ | Move LangChain adapter | `v5/move-langchain-adapter` |
3343
+ | Move maxSteps to stopWhen | `v5/move-maxsteps-to-stopwhen` |
3344
+ | Move provider options | `v5/move-provider-options` |
3345
+ | Move React to AI SDK | `v5/move-react-to-ai-sdk` |
3346
+ | Move UI utils to AI | `v5/move-ui-utils-to-ai` |
3347
+ | Remove experimental wrap language model | `v5/remove-experimental-wrap-language-model` |
3348
+ | Remove experimental activeTools | `v5/remove-experimental-activetools` |
3349
+ | Remove experimental prepareStep | `v5/remove-experimental-preparestep` |
3350
+ | Remove experimental continueSteps | `v5/remove-experimental-continuesteps` |
3351
+ | Remove experimental temperature | `v5/remove-experimental-temperature` |
3352
+ | Remove experimental truncate | `v5/remove-experimental-truncate` |
3353
+ | Remove experimental OpenAI compatibility | `v5/remove-experimental-openai-compatibility` |
3354
+ | Remove experimental OpenAI legacy function calls | `v5/remove-experimental-openai-legacy-function-calls` |
3355
+ | Remove experimental OpenAI structured outputs | `v5/remove-experimental-openai-structured-outputs` |
3356
+ | Remove experimental OpenAI store | `v5/remove-experimental-openai-store` |
3357
+ | Remove experimental OpenAI user | `v5/remove-experimental-openai-user` |
3358
+ | Remove experimental OpenAI parallel tool calls | `v5/remove-experimental-openai-parallel-tool-calls` |
3359
+ | Remove experimental OpenAI response format | `v5/remove-experimental-openai-response-format` |
3360
+ | Remove experimental OpenAI logit bias | `v5/remove-experimental-openai-logit-bias` |
3361
+ | Remove experimental OpenAI logprobs | `v5/remove-experimental-openai-logprobs` |
3362
+ | Remove experimental OpenAI seed | `v5/remove-experimental-openai-seed` |
3363
+ | Remove experimental OpenAI service tier | `v5/remove-experimental-openai-service-tier` |
3364
+ | Remove experimental OpenAI top logprobs | `v5/remove-experimental-openai-top-logprobs` |
3365
+ | Remove experimental OpenAI transform | `v5/remove-experimental-openai-transform` |
3366
+ | Remove experimental OpenAI stream options | `v5/remove-experimental-openai-stream-options` |
3367
+ | Remove experimental OpenAI prediction | `v5/remove-experimental-openai-prediction` |
3368
+ | Remove experimental Anthropic caching | `v5/remove-experimental-anthropic-caching` |
3369
+ | Remove experimental Anthropic computer use | `v5/remove-experimental-anthropic-computer-use` |
3370
+ | Remove experimental Anthropic PDF support | `v5/remove-experimental-anthropic-pdf-support` |
3371
+ | Remove experimental Anthropic prompt caching | `v5/remove-experimental-anthropic-prompt-caching` |
3372
+ | Remove experimental Google search grounding | `v5/remove-experimental-google-search-grounding` |
3373
+ | Remove experimental Google code execution | `v5/remove-experimental-google-code-execution` |
3374
+ | Remove experimental Google cached content | `v5/remove-experimental-google-cached-content` |
3375
+ | Remove experimental Google custom headers | `v5/remove-experimental-google-custom-headers` |
3376
+ | Rename format stream part | `v5/rename-format-stream-part` |
3377
+ | Rename parse stream part | `v5/rename-parse-stream-part` |
3378
+ | Replace image type with file type | `v5/replace-image-type-with-file-type` |
3379
+ | Replace LlamaIndex adapter | `v5/replace-llamaindex-adapter` |
3380
+ | Replace onCompletion with onFinal | `v5/replace-oncompletion-with-onfinal` |
3381
+ | Replace provider metadata with provider options | `v5/replace-provider-metadata-with-provider-options` |
3382
+ | Replace rawResponse with response | `v5/replace-rawresponse-with-response` |
3383
+ | Replace redacted reasoning type | `v5/replace-redacted-reasoning-type` |
3384
+ | Replace simulate streaming | `v5/replace-simulate-streaming` |
3385
+ | Replace textDelta with text | `v5/replace-textdelta-with-text` |
3386
+ | Replace usage token properties | `v5/replace-usage-token-properties` |
3387
+ | Restructure file stream parts | `v5/restructure-file-stream-parts` |
3388
+ | Restructure source stream parts | `v5/restructure-source-stream-parts` |
3389
+ | RSC package | `v5/rsc-package` |
3390
+
3391
+ ## Changes Between v5 Beta Versions
3392
+
3393
+ This section documents breaking changes between different beta versions of AI SDK 5.0. If you're upgrading from an earlier v5 beta version to a later one, check this section for any changes that might affect your code.
3394
+
3395
+ ### fullStream Type Rename: text/reasoning → text-delta/reasoning-delta
3396
+
3397
+ The chunk types in `fullStream` have been renamed for consistency with UI streams and language model streams.
3398
+
3399
+ ```tsx filename="AI SDK 5.0 (before beta.26)"
3400
+ for await (const chunk of result.fullStream) {
3401
+ switch (chunk.type) {
3402
+ case 'text-delta': {
3403
+ process.stdout.write(chunk.text);
3404
+ break;
3405
+ }
3406
+ case 'reasoning': {
3407
+ console.log('Reasoning:', chunk.text);
3408
+ break;
3409
+ }
3410
+ }
3411
+ }
3412
+ ```
3413
+
3414
+ ```tsx filename="AI SDK 5.0 (beta.26 and later)"
3415
+ for await (const chunk of result.fullStream) {
3416
+ switch (chunk.type) {
3417
+ case 'text-delta': {
3418
+ process.stdout.write(chunk.text);
3419
+ break;
3420
+ }
3421
+ case 'reasoning-delta': {
3422
+ console.log('Reasoning:', chunk.text);
3423
+ break;
3424
+ }
3425
+ }
3426
+ }
3427
+ ```