ai 6.0.31 → 6.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/dist/internal/index.js +1 -1
  5. package/dist/internal/index.mjs +1 -1
  6. package/docs/00-introduction/index.mdx +76 -0
  7. package/docs/02-foundations/01-overview.mdx +43 -0
  8. package/docs/02-foundations/02-providers-and-models.mdx +163 -0
  9. package/docs/02-foundations/03-prompts.mdx +620 -0
  10. package/docs/02-foundations/04-tools.mdx +160 -0
  11. package/docs/02-foundations/05-streaming.mdx +62 -0
  12. package/docs/02-foundations/index.mdx +43 -0
  13. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  14. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  15. package/docs/02-getting-started/02-nextjs-app-router.mdx +556 -0
  16. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  17. package/docs/02-getting-started/04-svelte.mdx +627 -0
  18. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  19. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  20. package/docs/02-getting-started/07-expo.mdx +766 -0
  21. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  22. package/docs/02-getting-started/index.mdx +44 -0
  23. package/docs/03-agents/01-overview.mdx +96 -0
  24. package/docs/03-agents/02-building-agents.mdx +367 -0
  25. package/docs/03-agents/03-workflows.mdx +370 -0
  26. package/docs/03-agents/04-loop-control.mdx +350 -0
  27. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  28. package/docs/03-agents/index.mdx +40 -0
  29. package/docs/03-ai-sdk-core/01-overview.mdx +33 -0
  30. package/docs/03-ai-sdk-core/05-generating-text.mdx +600 -0
  31. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +662 -0
  32. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1102 -0
  33. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +375 -0
  34. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +144 -0
  35. package/docs/03-ai-sdk-core/25-settings.mdx +198 -0
  36. package/docs/03-ai-sdk-core/30-embeddings.mdx +247 -0
  37. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  38. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  39. package/docs/03-ai-sdk-core/36-transcription.mdx +173 -0
  40. package/docs/03-ai-sdk-core/37-speech.mdx +167 -0
  41. package/docs/03-ai-sdk-core/40-middleware.mdx +480 -0
  42. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  43. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  44. package/docs/03-ai-sdk-core/55-testing.mdx +218 -0
  45. package/docs/03-ai-sdk-core/60-telemetry.mdx +313 -0
  46. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  47. package/docs/03-ai-sdk-core/index.mdx +88 -0
  48. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  49. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1313 -0
  50. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +535 -0
  51. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  52. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  53. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  54. package/docs/04-ai-sdk-ui/05-completion.mdx +186 -0
  55. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  56. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  57. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  58. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  59. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  60. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  61. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +477 -0
  62. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  63. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  64. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  65. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  66. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  67. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  68. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +158 -0
  69. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  70. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +96 -0
  71. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  72. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  73. package/docs/05-ai-sdk-rsc/index.mdx +58 -0
  74. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  75. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  76. package/docs/06-advanced/03-backpressure.mdx +173 -0
  77. package/docs/06-advanced/04-caching.mdx +169 -0
  78. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  79. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  80. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +213 -0
  81. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  82. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  83. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  84. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  85. package/docs/06-advanced/index.mdx +11 -0
  86. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2142 -0
  87. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3215 -0
  88. package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +780 -0
  89. package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +1140 -0
  90. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +190 -0
  91. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +171 -0
  92. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +309 -0
  93. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +227 -0
  94. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +138 -0
  95. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +214 -0
  96. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +203 -0
  97. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +449 -0
  98. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +148 -0
  99. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +168 -0
  100. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +144 -0
  101. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +196 -0
  102. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +175 -0
  103. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +410 -0
  104. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  105. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  106. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  107. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +55 -0
  108. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  109. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +415 -0
  110. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +246 -0
  111. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  112. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  113. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +182 -0
  114. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +121 -0
  115. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  116. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  117. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  118. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +46 -0
  119. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  120. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  121. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  122. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  123. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  124. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  125. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  126. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  127. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  128. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +43 -0
  129. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  130. package/docs/07-reference/01-ai-sdk-core/index.mdx +159 -0
  131. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +446 -0
  132. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +179 -0
  133. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +178 -0
  134. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +230 -0
  135. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  136. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +151 -0
  137. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +113 -0
  138. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +73 -0
  139. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  140. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  141. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  142. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  143. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  144. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  145. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  146. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  147. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +48 -0
  148. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +78 -0
  149. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  150. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  151. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  152. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  153. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  154. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  155. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +262 -0
  156. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  157. package/docs/07-reference/04-stream-helpers/01-ai-stream.mdx +89 -0
  158. package/docs/07-reference/04-stream-helpers/02-streaming-text-response.mdx +79 -0
  159. package/docs/07-reference/04-stream-helpers/05-stream-to-response.mdx +108 -0
  160. package/docs/07-reference/04-stream-helpers/07-openai-stream.mdx +77 -0
  161. package/docs/07-reference/04-stream-helpers/08-anthropic-stream.mdx +79 -0
  162. package/docs/07-reference/04-stream-helpers/09-aws-bedrock-stream.mdx +91 -0
  163. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-anthropic-stream.mdx +96 -0
  164. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-messages-stream.mdx +96 -0
  165. package/docs/07-reference/04-stream-helpers/11-aws-bedrock-cohere-stream.mdx +93 -0
  166. package/docs/07-reference/04-stream-helpers/12-aws-bedrock-llama-2-stream.mdx +93 -0
  167. package/docs/07-reference/04-stream-helpers/13-cohere-stream.mdx +78 -0
  168. package/docs/07-reference/04-stream-helpers/14-google-generative-ai-stream.mdx +85 -0
  169. package/docs/07-reference/04-stream-helpers/15-hugging-face-stream.mdx +84 -0
  170. package/docs/07-reference/04-stream-helpers/16-langchain-adapter.mdx +98 -0
  171. package/docs/07-reference/04-stream-helpers/16-llamaindex-adapter.mdx +70 -0
  172. package/docs/07-reference/04-stream-helpers/17-mistral-stream.mdx +81 -0
  173. package/docs/07-reference/04-stream-helpers/18-replicate-stream.mdx +83 -0
  174. package/docs/07-reference/04-stream-helpers/19-inkeep-stream.mdx +80 -0
  175. package/docs/07-reference/04-stream-helpers/index.mdx +103 -0
  176. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +30 -0
  177. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +27 -0
  178. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +25 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content.mdx +26 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +25 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +25 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +25 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +26 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  205. package/docs/07-reference/05-ai-sdk-errors/index.mdx +38 -0
  206. package/docs/07-reference/index.mdx +34 -0
  207. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  208. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  209. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  210. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  211. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  212. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  213. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  214. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  215. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  216. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  217. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  218. package/docs/08-migration-guides/index.mdx +22 -0
  219. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  220. package/docs/09-troubleshooting/02-client-side-function-calls-not-invoked.mdx +22 -0
  221. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  222. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  223. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  224. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  225. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  226. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  227. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  228. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  229. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  230. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  231. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  232. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  233. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  234. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  235. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  236. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  237. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  238. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  239. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  240. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  241. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  242. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  243. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  244. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +72 -0
  245. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  246. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  247. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  248. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  249. package/docs/09-troubleshooting/index.mdx +11 -0
  250. package/package.json +8 -4
@@ -0,0 +1,58 @@
1
+ ---
2
+ title: AI SDK RSC
3
+ description: Learn about AI SDK RSC.
4
+ collapsed: true
5
+ ---
6
+
7
+ # AI SDK RSC
8
+
9
+ <Note type="warning">
10
+ AI SDK RSC is currently experimental. We recommend using [AI SDK
11
+ UI](/docs/ai-sdk-ui/overview) for production. For guidance on migrating from
12
+ RSC to UI, see our [migration guide](/docs/ai-sdk-rsc/migrating-to-ui).
13
+ </Note>
14
+
15
+ <IndexCards
16
+ cards={[
17
+ {
18
+ title: 'Overview',
19
+ description: 'Learn about AI SDK RSC.',
20
+ href: '/docs/ai-sdk-rsc/overview',
21
+ },
22
+ {
23
+ title: 'Streaming React Components',
24
+ description: 'Learn how to stream React components.',
25
+ href: '/docs/ai-sdk-rsc/streaming-react-components',
26
+ },
27
+ {
28
+ title: 'Managing Generative UI State',
29
+ description: 'Learn how to manage generative UI state.',
30
+ href: '/docs/ai-sdk-rsc/generative-ui-state',
31
+ },
32
+ {
33
+ title: 'Saving and Restoring States',
34
+ description: 'Learn how to save and restore states.',
35
+ href: '/docs/ai-sdk-rsc/saving-and-restoring-states',
36
+ },
37
+ {
38
+ title: 'Multi-step Interfaces',
39
+ description: 'Learn how to build multi-step interfaces.',
40
+ href: '/docs/ai-sdk-rsc/multistep-interfaces',
41
+ },
42
+ {
43
+ title: 'Streaming Values',
44
+ description: 'Learn how to stream values with AI SDK RSC.',
45
+ href: '/docs/ai-sdk-rsc/streaming-values',
46
+ },
47
+ {
48
+ title: 'Error Handling',
49
+ description: 'Learn how to handle errors.',
50
+ href: '/docs/ai-sdk-rsc/error-handling',
51
+ },
52
+ {
53
+ title: 'Authentication',
54
+ description: 'Learn how to authenticate users.',
55
+ href: '/docs/ai-sdk-rsc/authentication',
56
+ },
57
+ ]}
58
+ />
@@ -0,0 +1,96 @@
1
+ ---
2
+ title: Prompt Engineering
3
+ description: Learn how to engineer prompts for LLMs with the AI SDK
4
+ ---
5
+
6
+ # Prompt Engineering
7
+
8
+ ## What is a Large Language Model (LLM)?
9
+
10
+ A Large Language Model is essentially a prediction engine that takes a sequence of words as input and aims to predict the most likely sequence to follow. It does this by assigning probabilities to potential next sequences and then selecting one. The model continues to generate sequences until it meets a specified stopping criterion.
11
+
12
+ These models learn by training on massive text corpuses, which means they will be better suited to some use cases than others. For example, a model trained on GitHub data would understand the probabilities of sequences in source code particularly well. However, it's crucial to understand that the generated sequences, while often seeming plausible, can sometimes be random and not grounded in reality. As these models become more accurate, many surprising abilities and applications emerge.
13
+
14
+ ## What is a prompt?
15
+
16
+ Prompts are the starting points for LLMs. They are the inputs that trigger the model to generate text. The scope of prompt engineering involves not just crafting these prompts but also understanding related concepts such as hidden prompts, tokens, token limits, and the potential for prompt hacking, which includes phenomena like jailbreaks and leaks.
17
+
18
+ ## Why is prompt engineering needed?
19
+
20
+ Prompt engineering currently plays a pivotal role in shaping the responses of LLMs. It allows us to tweak the model to respond more effectively to a broader range of queries. This includes the use of techniques like semantic search, command grammars, and the ReActive model architecture. The performance, context window, and cost of LLMs varies between models and model providers which adds further constraints to the mix. For example, the GPT-4 model is more expensive than GPT-3.5-turbo and significantly slower, but it can also be more effective at certain tasks. And so, like many things in software engineering, there is a trade-offs between cost and performance.
21
+
22
+ To assist with comparing and tweaking LLMs, we've built an AI playground that allows you to compare the performance of different models side-by-side online. When you're ready, you can even generate code with the AI SDK to quickly use your prompt and your selected model into your own applications.
23
+
24
+ ## Example: Build a Slogan Generator
25
+
26
+ ### Start with an instruction
27
+
28
+ Imagine you want to build a slogan generator for marketing campaigns. Creating catchy slogans isn't always straightforward!
29
+
30
+ First, you'll need a prompt that makes it clear what you want. Let's start with an instruction. Submit this prompt to generate your first completion.
31
+
32
+ <InlinePrompt initialInput="Create a slogan for a coffee shop." />
33
+
34
+ Not bad! Now, try making your instruction more specific.
35
+
36
+ <InlinePrompt initialInput="Create a slogan for an organic coffee shop." />
37
+
38
+ Introducing a single descriptive term to our prompt influences the completion. Essentially, crafting your prompt is the means by which you "instruct" or "program" the model.
39
+
40
+ ### Include examples
41
+
42
+ Clear instructions are key for quality outcomes, but that might not always be enough. Let's try to enhance your instruction further.
43
+
44
+ <InlinePrompt initialInput="Create three slogans for a coffee shop with live music." />
45
+
46
+ These slogans are fine, but could be even better. It appears the model overlooked the 'live' part in our prompt. Let's change it slightly to generate more appropriate suggestions.
47
+
48
+ Often, it's beneficial to both demonstrate and tell the model your requirements. Incorporating examples in your prompt can aid in conveying patterns or subtleties. Test this prompt that carries a few examples.
49
+
50
+ <InlinePrompt
51
+ initialInput={`Create three slogans for a business with unique features.
52
+
53
+ Business: Bookstore with cats
54
+ Slogans: "Purr-fect Pages", "Books and Whiskers", "Novels and Nuzzles"
55
+ Business: Gym with rock climbing
56
+ Slogans: "Peak Performance", "Reach New Heights", "Climb Your Way Fit"
57
+ Business: Coffee shop with live music
58
+ Slogans:`}
59
+ />
60
+
61
+ Great! Incorporating examples of expected output for a certain input prompted the model to generate the kind of names we aimed for.
62
+
63
+ ### Tweak your settings
64
+
65
+ Apart from designing prompts, you can influence completions by tweaking model settings. A crucial setting is the **temperature**.
66
+
67
+ You might have seen that the same prompt, when repeated, yielded the same or nearly the same completions. This happens when your temperature is at 0.
68
+
69
+ Attempt to re-submit the identical prompt a few times with temperature set to 1.
70
+
71
+ <InlinePrompt
72
+ initialInput={`Create three slogans for a business with unique features.
73
+
74
+ Business: Bookstore with cats
75
+ Slogans: "Purr-fect Pages", "Books and Whiskers", "Novels and Nuzzles"
76
+ Business: Gym with rock climbing
77
+ Slogans: "Peak Performance", "Reach New Heights", "Climb Your Way Fit"
78
+ Business: Coffee shop with live music
79
+ Slogans:`}
80
+ showTemp={true}
81
+ initialTemperature={1}
82
+ />
83
+
84
+ Notice the difference? With a temperature above 0, the same prompt delivers varied completions each time.
85
+
86
+ Keep in mind that the model forecasts the text most likely to follow the preceding text. Temperature, a value from 0 to 1, essentially governs the model's confidence level in making these predictions. A lower temperature implies lesser risks, leading to more precise and deterministic completions. A higher temperature yields a broader range of completions.
87
+
88
+ For your slogan generator, you might want a large pool of name suggestions. A moderate temperature of 0.6 should serve well.
89
+
90
+ ## Recommended Resources
91
+
92
+ Prompt Engineering is evolving rapidly, with new methods and research papers surfacing every week. Here are some resources that we've found useful for learning about and experimenting with prompt engineering:
93
+
94
+ - [The Vercel AI Playground](/playground)
95
+ - [Brex Prompt Engineering](https://github.com/brexhq/prompt-engineering)
96
+ - [Prompt Engineering Guide by Dair AI](https://www.promptingguide.ai/)
@@ -0,0 +1,184 @@
1
+ ---
2
+ title: Stopping Streams
3
+ description: Learn how to cancel streams with the AI SDK
4
+ ---
5
+
6
+ # Stopping Streams
7
+
8
+ Cancelling ongoing streams is often needed.
9
+ For example, users might want to stop a stream when they realize that the response is not what they want.
10
+
11
+ The different parts of the AI SDK support cancelling streams in different ways.
12
+
13
+ ## AI SDK Core
14
+
15
+ The AI SDK functions have an `abortSignal` argument that you can use to cancel a stream.
16
+ You would use this if you want to cancel a stream from the server side to the LLM API, e.g. by
17
+ forwarding the `abortSignal` from the request.
18
+
19
+ ```tsx highlight="10,11,12-16"
20
+ import { streamText } from 'ai';
21
+ __PROVIDER_IMPORT__;
22
+
23
+ export async function POST(req: Request) {
24
+ const { prompt } = await req.json();
25
+
26
+ const result = streamText({
27
+ model: __MODEL__,
28
+ prompt,
29
+ // forward the abort signal:
30
+ abortSignal: req.signal,
31
+ onAbort: ({ steps }) => {
32
+ // Handle cleanup when stream is aborted
33
+ console.log('Stream aborted after', steps.length, 'steps');
34
+ // Persist partial results to database
35
+ },
36
+ });
37
+
38
+ return result.toTextStreamResponse();
39
+ }
40
+ ```
41
+
42
+ ## AI SDK UI
43
+
44
+ The hooks, e.g. `useChat` or `useCompletion`, provide a `stop` helper function that can be used to cancel a stream.
45
+ This will cancel the stream from the client side to the server.
46
+
47
+ <Note type="warning">
48
+ Stream abort functionality is not compatible with stream resumption. If you're
49
+ using `resume: true` in `useChat`, the abort functionality will break the
50
+ resumption mechanism. Choose either abort or resume functionality, but not
51
+ both.
52
+ </Note>
53
+
54
+ ```tsx file="app/page.tsx" highlight="9,18-20"
55
+ 'use client';
56
+
57
+ import { useCompletion } from '@ai-sdk/react';
58
+
59
+ export default function Chat() {
60
+ const { input, completion, stop, status, handleSubmit, handleInputChange } =
61
+ useCompletion();
62
+
63
+ return (
64
+ <div>
65
+ {(status === 'submitted' || status === 'streaming') && (
66
+ <button type="button" onClick={() => stop()}>
67
+ Stop
68
+ </button>
69
+ )}
70
+ {completion}
71
+ <form onSubmit={handleSubmit}>
72
+ <input value={input} onChange={handleInputChange} />
73
+ </form>
74
+ </div>
75
+ );
76
+ }
77
+ ```
78
+
79
+ ## Handling stream abort cleanup
80
+
81
+ When streams are aborted, you may need to perform cleanup operations such as persisting partial results or cleaning up resources. The `onAbort` callback provides a way to handle these scenarios on the server side.
82
+
83
+ Unlike `onFinish`, which is called when a stream completes normally, `onAbort` is specifically called when a stream is aborted via `AbortSignal`. This distinction allows you to handle normal completion and aborted streams differently.
84
+
85
+ <Note>
86
+ For UI message streams (`toUIMessageStreamResponse`), the `onFinish` callback
87
+ also receives an `isAborted` parameter that indicates whether the stream was
88
+ aborted. This allows you to handle both completion and abort scenarios in a
89
+ single callback.
90
+ </Note>
91
+
92
+ ```tsx highlight="8-12"
93
+ import { streamText } from 'ai';
94
+ __PROVIDER_IMPORT__;
95
+
96
+ const result = streamText({
97
+ model: __MODEL__,
98
+ prompt: 'Write a long story...',
99
+ abortSignal: controller.signal,
100
+ onAbort: ({ steps }) => {
101
+ // Called when stream is aborted - persist partial results
102
+ await savePartialResults(steps);
103
+ await logAbortEvent(steps.length);
104
+ },
105
+ onFinish: ({ steps, totalUsage }) => {
106
+ // Called when stream completes normally
107
+ await saveFinalResults(steps, totalUsage);
108
+ },
109
+ });
110
+ ```
111
+
112
+ The `onAbort` callback receives:
113
+
114
+ - `steps`: Array of all completed steps before the abort occurred
115
+
116
+ This is particularly useful for:
117
+
118
+ - Persisting partial conversation history to database
119
+ - Saving partial progress for later continuation
120
+ - Cleaning up server-side resources or connections
121
+ - Logging abort events for analytics
122
+
123
+ You can also handle abort events directly in the stream using the `abort` stream part:
124
+
125
+ ```tsx highlight="8-12"
126
+ for await (const part of result.fullStream) {
127
+ switch (part.type) {
128
+ case 'text-delta':
129
+ // Handle text delta content
130
+ break;
131
+ case 'abort':
132
+ // Handle abort event directly in stream
133
+ console.log('Stream was aborted');
134
+ break;
135
+ // ... other cases
136
+ }
137
+ }
138
+ ```
139
+
140
+ ## UI Message Streams
141
+
142
+ When using `toUIMessageStreamResponse`, you need to handle stream abortion slightly differently. The `onFinish` callback receives an `isAborted` parameter, and you should pass the `consumeStream` function to ensure proper abort handling:
143
+
144
+ ```tsx highlight="5,19,20-24,26"
145
+ import { openai } from '@ai-sdk/openai';
146
+ import {
147
+ consumeStream,
148
+ convertToModelMessages,
149
+ streamText,
150
+ UIMessage,
151
+ } from 'ai';
152
+ __PROVIDER_IMPORT__;
153
+
154
+ export async function POST(req: Request) {
155
+ const { messages }: { messages: UIMessage[] } = await req.json();
156
+
157
+ const result = streamText({
158
+ model: __MODEL__,
159
+ messages: await convertToModelMessages(messages),
160
+ abortSignal: req.signal,
161
+ });
162
+
163
+ return result.toUIMessageStreamResponse({
164
+ onFinish: async ({ isAborted }) => {
165
+ if (isAborted) {
166
+ console.log('Stream was aborted');
167
+ // Handle abort-specific cleanup
168
+ } else {
169
+ console.log('Stream completed normally');
170
+ // Handle normal completion
171
+ }
172
+ },
173
+ consumeSseStream: consumeStream,
174
+ });
175
+ }
176
+ ```
177
+
178
+ The `consumeStream` function is necessary for proper abort handling in UI message streams. It ensures that the stream is properly consumed even when aborted, preventing potential memory leaks or hanging connections.
179
+
180
+ ## AI SDK RSC
181
+
182
+ <Note type="warning">
183
+ The AI SDK RSC does not currently support stopping streams.
184
+ </Note>
@@ -0,0 +1,173 @@
1
+ ---
2
+ title: Backpressure
3
+ description: How to handle backpressure and cancellation when working with the AI SDK
4
+ ---
5
+
6
+ # Stream Back-pressure and Cancellation
7
+
8
+ This page focuses on understanding back-pressure and cancellation when working with streams. You do not need to know this information to use the AI SDK, but for those interested, it offers a deeper dive on why and how the SDK optimally streams responses.
9
+
10
+ In the following sections, we'll explore back-pressure and cancellation in the context of a simple example program. We'll discuss the issues that can arise from an eager approach and demonstrate how a lazy approach can resolve them.
11
+
12
+ ## Back-pressure and Cancellation with Streams
13
+
14
+ Let's begin by setting up a simple example program:
15
+
16
+ ```jsx
17
+ // A generator that will yield positive integers
18
+ async function* integers() {
19
+ let i = 1;
20
+ while (true) {
21
+ console.log(`yielding ${i}`);
22
+ yield i++;
23
+
24
+ await sleep(100);
25
+ }
26
+ }
27
+ function sleep(ms) {
28
+ return new Promise(resolve => setTimeout(resolve, ms));
29
+ }
30
+
31
+ // Wraps a generator into a ReadableStream
32
+ function createStream(iterator) {
33
+ return new ReadableStream({
34
+ async start(controller) {
35
+ for await (const v of iterator) {
36
+ controller.enqueue(v);
37
+ }
38
+ controller.close();
39
+ },
40
+ });
41
+ }
42
+
43
+ // Collect data from stream
44
+ async function run() {
45
+ // Set up a stream of integers
46
+ const stream = createStream(integers());
47
+
48
+ // Read values from our stream
49
+ const reader = stream.getReader();
50
+ for (let i = 0; i < 10_000; i++) {
51
+ // we know our stream is infinite, so there's no need to check `done`.
52
+ const { value } = await reader.read();
53
+ console.log(`read ${value}`);
54
+
55
+ await sleep(1_000);
56
+ }
57
+ }
58
+ run();
59
+ ```
60
+
61
+ In this example, we create an async-generator that yields positive integers, a `ReadableStream` that wraps our integer generator, and a reader which will read values out of our stream. Notice, too, that our integer generator logs out `"yielding ${i}"`, and our reader logs out `"read ${value}"`. Both take an arbitrary amount of time to process data, represented with a 100ms sleep in our generator, and a 1sec sleep in our reader.
62
+
63
+ ## Back-pressure
64
+
65
+ If you were to run this program, you'd notice something funny. We'll see roughly 10 "yield" logs for every "read" log. This might seem obvious, the generator can push values 10x faster than the reader can pull them out. But it represents a problem, our `stream` has to maintain an ever expanding queue of items that have been pushed in but not pulled out.
66
+
67
+ The problem stems from the way we wrap our generator into a stream. Notice the use of `for await (…)` inside our `start` handler. This is an **eager** for-loop, and it is constantly running to get the next value from our generator to be enqueued in our stream. This means our stream does not respect back-pressure, the signal from the consumer to the producer that more values aren't needed _yet_. We've essentially spawned a thread that will perpetually push more data into the stream, one that runs as fast as possible to push new data immediately. Worse, there's no way to signal to this thread to stop running when we don't need additional data.
68
+
69
+ To fix this, `ReadableStream` allows a `pull` handler. `pull` is called every time the consumer attempts to read more data from our stream (if there's no data already queued internally). But it's not enough to just move the `for await(…)` into `pull`, we also need to convert from an eager enqueuing to a **lazy** one. By making these 2 changes, we'll be able to react to the consumer. If they need more data, we can easily produce it, and if they don't, then we don't need to spend any time doing unnecessary work.
70
+
71
+ ```jsx
72
+ function createStream(iterator) {
73
+ return new ReadableStream({
74
+ async pull(controller) {
75
+ const { value, done } = await iterator.next();
76
+
77
+ if (done) {
78
+ controller.close();
79
+ } else {
80
+ controller.enqueue(value);
81
+ }
82
+ },
83
+ });
84
+ }
85
+ ```
86
+
87
+ Our `createStream` is a little more verbose now, but the new code is important. First, we need to manually call our `iterator.next()` method. This returns a `Promise` for an object with the type signature `{ done: boolean, value: T }`. If `done` is `true`, then we know that our iterator won't yield any more values and we must `close` the stream (this allows the consumer to know that the stream is also finished producing values). Else, we need to `enqueue` our newly produced value.
88
+
89
+ When we run this program, we see that our "yield" and "read" logs are now paired. We're no longer yielding 10x integers for every read! And, our stream now only needs to maintain 1 item in its internal buffer. We've essentially given control to the consumer, so that it's responsible for producing new values as it needs it. Neato!
90
+
91
+ ## Cancellation
92
+
93
+ Let's go back to our initial eager example, with 1 small edit. Now instead of reading 10,000 integers, we're only going to read 3:
94
+
95
+ ```jsx
96
+ // A generator that will yield positive integers
97
+ async function* integers() {
98
+ let i = 1;
99
+ while (true) {
100
+ console.log(`yielding ${i}`);
101
+ yield i++;
102
+
103
+ await sleep(100);
104
+ }
105
+ }
106
+ function sleep(ms) {
107
+ return new Promise(resolve => setTimeout(resolve, ms));
108
+ }
109
+
110
+ // Wraps a generator into a ReadableStream
111
+ function createStream(iterator) {
112
+ return new ReadableStream({
113
+ async start(controller) {
114
+ for await (const v of iterator) {
115
+ controller.enqueue(v);
116
+ }
117
+ controller.close();
118
+ },
119
+ });
120
+ }
121
+ // Collect data from stream
122
+ async function run() {
123
+ // Set up a stream that of integers
124
+ const stream = createStream(integers());
125
+
126
+ // Read values from our stream
127
+ const reader = stream.getReader();
128
+ // We're only reading 3 items this time:
129
+ for (let i = 0; i < 3; i++) {
130
+ // we know our stream is infinite, so there's no need to check `done`.
131
+ const { value } = await reader.read();
132
+ console.log(`read ${value}`);
133
+
134
+ await sleep(1000);
135
+ }
136
+ }
137
+ run();
138
+ ```
139
+
140
+ We're back to yielding 10x the number of values read. But notice now, after we've read 3 values, we're continuing to yield new values. We know that our reader will never read another value, but our stream doesn't! The eager `for await (…)` will continue forever, loudly enqueuing new values into our stream's buffer and increasing our memory usage until it consumes all available program memory.
141
+
142
+ The fix to this is exactly the same: use `pull` and manual iteration. By producing values _**lazily**_, we tie the lifetime of our integer generator to the lifetime of the reader. Once the reads stop, the yields will stop too:
143
+
144
+ ```jsx
145
+ // Wraps a generator into a ReadableStream
146
+ function createStream(iterator) {
147
+ return new ReadableStream({
148
+ async pull(controller) {
149
+ const { value, done } = await iterator.next();
150
+
151
+ if (done) {
152
+ controller.close();
153
+ } else {
154
+ controller.enqueue(value);
155
+ }
156
+ },
157
+ });
158
+ }
159
+ ```
160
+
161
+ Since the solution is the same as implementing back-pressure, it shows that they're just 2 facets of the same problem: Pushing values into a stream should be done **lazily**, and doing it eagerly results in expected problems.
162
+
163
+ ## Tying Stream Laziness to AI Responses
164
+
165
+ Now let's imagine you're integrating AIBot service into your product. Users will be able to prompt "count from 1 to infinity", the browser will fetch your AI API endpoint, and your servers connect to AIBot to get a response. But "infinity" is, well, infinite. The response will never end!
166
+
167
+ After a few seconds, the user gets bored and navigates away. Or maybe you're doing local development and a hot-module reload refreshes your page. The browser will have ended its connection to the API endpoint, but will your server end its connection with AIBot?
168
+
169
+ If you used the eager `for await (...)` approach, then the connection is still running and your server is asking for more and more data from AIBot. Our server spawned a "thread" and there's no signal when we can end the eager pulls. Eventually, the server is going to run out of memory (remember, there's no active fetch connection to read the buffering responses and free them).
170
+
171
+ {/* When we started writing the streaming code for the AI SDK, we confirm aborting a fetch will end a streamed response from Next.js */}
172
+
173
+ With the lazy approach, this is taken care of for you. Because the stream will only request new data from AIBot when the consumer requests it, navigating away from the page naturally frees all resources. The fetch connection aborts and the server can clean up the response. The `ReadableStream` tied to that response can now be garbage collected. When that happens, the connection it holds to AIBot can then be freed.
@@ -0,0 +1,169 @@
1
+ ---
2
+ title: Caching
3
+ description: How to handle caching when working with the AI SDK
4
+ ---
5
+
6
+ # Caching Responses
7
+
8
+ Depending on the type of application you're building, you may want to cache the responses you receive from your AI provider, at least temporarily.
9
+
10
+ ## Using Language Model Middleware (Recommended)
11
+
12
+ The recommended approach to caching responses is using [language model middleware](/docs/ai-sdk-core/middleware)
13
+ and the [`simulateReadableStream`](/docs/reference/ai-sdk-core/simulate-readable-stream) function.
14
+
15
+ Language model middleware is a way to enhance the behavior of language models by intercepting and modifying the calls to the language model.
16
+ Let's see how you can use language model middleware to cache responses.
17
+
18
+ ```ts filename="ai/middleware.ts"
19
+ import { Redis } from '@upstash/redis';
20
+ import {
21
+ type LanguageModelV3,
22
+ type LanguageModelV3Middleware,
23
+ type LanguageModelV3StreamPart,
24
+ simulateReadableStream,
25
+ } from 'ai';
26
+
27
+ const redis = new Redis({
28
+ url: process.env.KV_URL,
29
+ token: process.env.KV_TOKEN,
30
+ });
31
+
32
+ export const cacheMiddleware: LanguageModelV3Middleware = {
33
+ wrapGenerate: async ({ doGenerate, params }) => {
34
+ const cacheKey = JSON.stringify(params);
35
+
36
+ const cached = (await redis.get(cacheKey)) as Awaited<
37
+ ReturnType<LanguageModelV3['doGenerate']>
38
+ > | null;
39
+
40
+ if (cached !== null) {
41
+ return {
42
+ ...cached,
43
+ response: {
44
+ ...cached.response,
45
+ timestamp: cached?.response?.timestamp
46
+ ? new Date(cached?.response?.timestamp)
47
+ : undefined,
48
+ },
49
+ };
50
+ }
51
+
52
+ const result = await doGenerate();
53
+
54
+ redis.set(cacheKey, result);
55
+
56
+ return result;
57
+ },
58
+ wrapStream: async ({ doStream, params }) => {
59
+ const cacheKey = JSON.stringify(params);
60
+
61
+ // Check if the result is in the cache
62
+ const cached = await redis.get(cacheKey);
63
+
64
+ // If cached, return a simulated ReadableStream that yields the cached result
65
+ if (cached !== null) {
66
+ // Format the timestamps in the cached response
67
+ const formattedChunks = (cached as LanguageModelV3StreamPart[]).map(p => {
68
+ if (p.type === 'response-metadata' && p.timestamp) {
69
+ return { ...p, timestamp: new Date(p.timestamp) };
70
+ } else return p;
71
+ });
72
+ return {
73
+ stream: simulateReadableStream({
74
+ initialDelayInMs: 0,
75
+ chunkDelayInMs: 10,
76
+ chunks: formattedChunks,
77
+ }),
78
+ };
79
+ }
80
+
81
+ // If not cached, proceed with streaming
82
+ const { stream, ...rest } = await doStream();
83
+
84
+ const fullResponse: LanguageModelV3StreamPart[] = [];
85
+
86
+ const transformStream = new TransformStream<
87
+ LanguageModelV3StreamPart,
88
+ LanguageModelV3StreamPart
89
+ >({
90
+ transform(chunk, controller) {
91
+ fullResponse.push(chunk);
92
+ controller.enqueue(chunk);
93
+ },
94
+ flush() {
95
+ // Store the full response in the cache after streaming is complete
96
+ redis.set(cacheKey, fullResponse);
97
+ },
98
+ });
99
+
100
+ return {
101
+ stream: stream.pipeThrough(transformStream),
102
+ ...rest,
103
+ };
104
+ },
105
+ };
106
+ ```
107
+
108
+ <Note>
109
+ This example uses `@upstash/redis` to store and retrieve the assistant's
110
+ responses but you can use any KV storage provider you would like.
111
+ </Note>
112
+
113
+ `LanguageModelMiddleware` has two methods: `wrapGenerate` and `wrapStream`. `wrapGenerate` is called when using [`generateText`](/docs/reference/ai-sdk-core/generate-text) and [`generateObject`](/docs/reference/ai-sdk-core/generate-object), while `wrapStream` is called when using [`streamText`](/docs/reference/ai-sdk-core/stream-text) and [`streamObject`](/docs/reference/ai-sdk-core/stream-object).
114
+
115
+ For `wrapGenerate`, you can cache the response directly. Instead, for `wrapStream`, you cache an array of the stream parts, which can then be used with [`simulateReadableStream`](/docs/ai-sdk-core/testing#simulate-data-stream-protocol-responses) function to create a simulated `ReadableStream` that returns the cached response. In this way, the cached response is returned chunk-by-chunk as if it were being generated by the model. You can control the initial delay and delay between chunks by adjusting the `initialDelayInMs` and `chunkDelayInMs` parameters of `simulateReadableStream`.
116
+
117
+ You can see a full example of caching with Redis in a Next.js application in our [Caching Middleware Recipe](/cookbook/next/caching-middleware).
118
+
119
+ ## Using Lifecycle Callbacks
120
+
121
+ Alternatively, each AI SDK Core function has special lifecycle callbacks you can use. The one of interest is likely `onFinish`, which is called when the generation is complete. This is where you can cache the full response.
122
+
123
+ Here's an example of how you can implement caching using Vercel KV and Next.js to cache the OpenAI response for 1 hour:
124
+
125
+ This example uses [Upstash Redis](https://upstash.com/docs/redis/overall/getstarted) and Next.js to cache the response for 1 hour.
126
+
127
+ ```tsx filename="app/api/chat/route.ts"
128
+ import { formatDataStreamPart, streamText, UIMessage } from 'ai';
129
+ __PROVIDER_IMPORT__;
130
+ import { Redis } from '@upstash/redis';
131
+
132
+ // Allow streaming responses up to 30 seconds
133
+ export const maxDuration = 30;
134
+
135
+ const redis = new Redis({
136
+ url: process.env.KV_URL,
137
+ token: process.env.KV_TOKEN,
138
+ });
139
+
140
+ export async function POST(req: Request) {
141
+ const { messages }: { messages: UIMessage[] } = await req.json();
142
+
143
+ // come up with a key based on the request:
144
+ const key = JSON.stringify(messages);
145
+
146
+ // Check if we have a cached response
147
+ const cached = await redis.get(key);
148
+ if (cached != null) {
149
+ return new Response(formatDataStreamPart('text', cached), {
150
+ status: 200,
151
+ headers: { 'Content-Type': 'text/plain' },
152
+ });
153
+ }
154
+
155
+ // Call the language model:
156
+ const result = streamText({
157
+ model: __MODEL__,
158
+ messages: await convertToModelMessages(messages),
159
+ async onFinish({ text }) {
160
+ // Cache the response text:
161
+ await redis.set(key, text);
162
+ await redis.expire(key, 60 * 60);
163
+ },
164
+ });
165
+
166
+ // Respond with the stream
167
+ return result.toUIMessageStreamResponse();
168
+ }
169
+ ```