ai 6.0.30 → 6.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/dist/internal/index.js +1 -1
  5. package/dist/internal/index.mjs +1 -1
  6. package/docs/00-introduction/index.mdx +76 -0
  7. package/docs/02-foundations/01-overview.mdx +43 -0
  8. package/docs/02-foundations/02-providers-and-models.mdx +163 -0
  9. package/docs/02-foundations/03-prompts.mdx +620 -0
  10. package/docs/02-foundations/04-tools.mdx +160 -0
  11. package/docs/02-foundations/05-streaming.mdx +62 -0
  12. package/docs/02-foundations/index.mdx +43 -0
  13. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  14. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  15. package/docs/02-getting-started/02-nextjs-app-router.mdx +556 -0
  16. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  17. package/docs/02-getting-started/04-svelte.mdx +627 -0
  18. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  19. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  20. package/docs/02-getting-started/07-expo.mdx +766 -0
  21. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  22. package/docs/02-getting-started/index.mdx +44 -0
  23. package/docs/03-agents/01-overview.mdx +96 -0
  24. package/docs/03-agents/02-building-agents.mdx +367 -0
  25. package/docs/03-agents/03-workflows.mdx +370 -0
  26. package/docs/03-agents/04-loop-control.mdx +350 -0
  27. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  28. package/docs/03-agents/index.mdx +40 -0
  29. package/docs/03-ai-sdk-core/01-overview.mdx +33 -0
  30. package/docs/03-ai-sdk-core/05-generating-text.mdx +600 -0
  31. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +662 -0
  32. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1102 -0
  33. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +375 -0
  34. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +144 -0
  35. package/docs/03-ai-sdk-core/25-settings.mdx +198 -0
  36. package/docs/03-ai-sdk-core/30-embeddings.mdx +247 -0
  37. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  38. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  39. package/docs/03-ai-sdk-core/36-transcription.mdx +173 -0
  40. package/docs/03-ai-sdk-core/37-speech.mdx +167 -0
  41. package/docs/03-ai-sdk-core/40-middleware.mdx +480 -0
  42. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  43. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  44. package/docs/03-ai-sdk-core/55-testing.mdx +218 -0
  45. package/docs/03-ai-sdk-core/60-telemetry.mdx +313 -0
  46. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  47. package/docs/03-ai-sdk-core/index.mdx +88 -0
  48. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  49. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1313 -0
  50. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +535 -0
  51. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  52. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  53. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  54. package/docs/04-ai-sdk-ui/05-completion.mdx +186 -0
  55. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  56. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  57. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  58. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  59. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  60. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  61. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +477 -0
  62. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  63. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  64. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  65. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  66. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  67. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  68. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +158 -0
  69. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  70. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +96 -0
  71. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  72. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  73. package/docs/05-ai-sdk-rsc/index.mdx +58 -0
  74. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  75. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  76. package/docs/06-advanced/03-backpressure.mdx +173 -0
  77. package/docs/06-advanced/04-caching.mdx +169 -0
  78. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  79. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  80. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +213 -0
  81. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  82. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  83. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  84. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  85. package/docs/06-advanced/index.mdx +11 -0
  86. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2142 -0
  87. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3215 -0
  88. package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +780 -0
  89. package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +1140 -0
  90. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +190 -0
  91. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +171 -0
  92. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +309 -0
  93. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +227 -0
  94. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +138 -0
  95. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +214 -0
  96. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +203 -0
  97. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +449 -0
  98. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +148 -0
  99. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +168 -0
  100. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +144 -0
  101. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +196 -0
  102. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +175 -0
  103. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +410 -0
  104. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  105. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  106. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  107. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +55 -0
  108. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  109. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +415 -0
  110. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +246 -0
  111. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  112. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  113. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +182 -0
  114. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +121 -0
  115. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  116. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  117. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  118. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +46 -0
  119. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  120. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  121. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  122. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  123. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  124. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  125. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  126. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  127. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  128. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +43 -0
  129. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  130. package/docs/07-reference/01-ai-sdk-core/index.mdx +159 -0
  131. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +446 -0
  132. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +179 -0
  133. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +178 -0
  134. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +230 -0
  135. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  136. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +151 -0
  137. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +113 -0
  138. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +73 -0
  139. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  140. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  141. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  142. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  143. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  144. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  145. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  146. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  147. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +48 -0
  148. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +78 -0
  149. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  150. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  151. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  152. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  153. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  154. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  155. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +262 -0
  156. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  157. package/docs/07-reference/04-stream-helpers/01-ai-stream.mdx +89 -0
  158. package/docs/07-reference/04-stream-helpers/02-streaming-text-response.mdx +79 -0
  159. package/docs/07-reference/04-stream-helpers/05-stream-to-response.mdx +108 -0
  160. package/docs/07-reference/04-stream-helpers/07-openai-stream.mdx +77 -0
  161. package/docs/07-reference/04-stream-helpers/08-anthropic-stream.mdx +79 -0
  162. package/docs/07-reference/04-stream-helpers/09-aws-bedrock-stream.mdx +91 -0
  163. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-anthropic-stream.mdx +96 -0
  164. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-messages-stream.mdx +96 -0
  165. package/docs/07-reference/04-stream-helpers/11-aws-bedrock-cohere-stream.mdx +93 -0
  166. package/docs/07-reference/04-stream-helpers/12-aws-bedrock-llama-2-stream.mdx +93 -0
  167. package/docs/07-reference/04-stream-helpers/13-cohere-stream.mdx +78 -0
  168. package/docs/07-reference/04-stream-helpers/14-google-generative-ai-stream.mdx +85 -0
  169. package/docs/07-reference/04-stream-helpers/15-hugging-face-stream.mdx +84 -0
  170. package/docs/07-reference/04-stream-helpers/16-langchain-adapter.mdx +98 -0
  171. package/docs/07-reference/04-stream-helpers/16-llamaindex-adapter.mdx +70 -0
  172. package/docs/07-reference/04-stream-helpers/17-mistral-stream.mdx +81 -0
  173. package/docs/07-reference/04-stream-helpers/18-replicate-stream.mdx +83 -0
  174. package/docs/07-reference/04-stream-helpers/19-inkeep-stream.mdx +80 -0
  175. package/docs/07-reference/04-stream-helpers/index.mdx +103 -0
  176. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +30 -0
  177. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +27 -0
  178. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +25 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content.mdx +26 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +25 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +25 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +25 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +26 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  205. package/docs/07-reference/05-ai-sdk-errors/index.mdx +38 -0
  206. package/docs/07-reference/index.mdx +34 -0
  207. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  208. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  209. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  210. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  211. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  212. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  213. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  214. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  215. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  216. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  217. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  218. package/docs/08-migration-guides/index.mdx +22 -0
  219. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  220. package/docs/09-troubleshooting/02-client-side-function-calls-not-invoked.mdx +22 -0
  221. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  222. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  223. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  224. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  225. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  226. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  227. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  228. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  229. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  230. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  231. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  232. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  233. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  234. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  235. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  236. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  237. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  238. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  239. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  240. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  241. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  242. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  243. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  244. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +72 -0
  245. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  246. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  247. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  248. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  249. package/docs/09-troubleshooting/index.mdx +11 -0
  250. package/package.json +7 -3
@@ -0,0 +1,68 @@
1
+ ---
2
+ title: Multiple Streamables
3
+ description: Learn to handle multiple streamables in your application.
4
+ ---
5
+
6
+ # Multiple Streams
7
+
8
+ ## Multiple Streamable UIs
9
+
10
+ The AI SDK RSC APIs allow you to compose and return any number of streamable UIs, along with other data, in a single request. This can be useful when you want to decouple the UI into smaller components and stream them separately.
11
+
12
+ ```tsx file='app/actions.tsx'
13
+ 'use server';
14
+
15
+ import { createStreamableUI } from '@ai-sdk/rsc';
16
+
17
+ export async function getWeather() {
18
+ const weatherUI = createStreamableUI();
19
+ const forecastUI = createStreamableUI();
20
+
21
+ weatherUI.update(<div>Loading weather...</div>);
22
+ forecastUI.update(<div>Loading forecast...</div>);
23
+
24
+ getWeatherData().then(weatherData => {
25
+ weatherUI.done(<div>{weatherData}</div>);
26
+ });
27
+
28
+ getForecastData().then(forecastData => {
29
+ forecastUI.done(<div>{forecastData}</div>);
30
+ });
31
+
32
+ // Return both streamable UIs and other data fields.
33
+ return {
34
+ requestedAt: Date.now(),
35
+ weather: weatherUI.value,
36
+ forecast: forecastUI.value,
37
+ };
38
+ }
39
+ ```
40
+
41
+ The client side code is similar to the previous example, but the [tool call](/docs/ai-sdk-core/tools-and-tool-calling) will return the new data structure with the weather and forecast UIs. Depending on the speed of getting weather and forecast data, these two components might be updated independently.
42
+
43
+ ## Nested Streamable UIs
44
+
45
+ You can stream UI components within other UI components. This allows you to create complex UIs that are built up from smaller, reusable components. In the example below, we pass a `historyChart` streamable as a prop to a `StockCard` component. The StockCard can render the `historyChart` streamable, and it will automatically update as the server responds with new data.
46
+
47
+ ```tsx file='app/actions.tsx'
48
+ async function getStockHistoryChart({ symbol: string }) {
49
+ 'use server';
50
+
51
+ const ui = createStreamableUI(<Spinner />);
52
+
53
+ // We need to wrap this in an async IIFE to avoid blocking.
54
+ (async () => {
55
+ const price = await getStockPrice({ symbol });
56
+
57
+ // Show a spinner as the history chart for now.
58
+ const historyChart = createStreamableUI(<Spinner />);
59
+ ui.done(<StockCard historyChart={historyChart.value} price={price} />);
60
+
61
+ // Getting the history data and then update that part of the UI.
62
+ const historyData = await fetch('https://my-stock-data-api.com');
63
+ historyChart.done(<HistoryChart data={historyData} />);
64
+ })();
65
+
66
+ return ui;
67
+ }
68
+ ```
@@ -0,0 +1,60 @@
1
+ ---
2
+ title: Rate Limiting
3
+ description: Learn how to rate limit your application.
4
+ ---
5
+
6
+ # Rate Limiting
7
+
8
+ Rate limiting helps you protect your APIs from abuse. It involves setting a
9
+ maximum threshold on the number of requests a client can make within a
10
+ specified timeframe. This simple technique acts as a gatekeeper,
11
+ preventing excessive usage that can degrade service performance and incur
12
+ unnecessary costs.
13
+
14
+ ## Rate Limiting with Vercel KV and Upstash Ratelimit
15
+
16
+ In this example, you will protect an API endpoint using [Vercel KV](https://vercel.com/storage/kv)
17
+ and [Upstash Ratelimit](https://github.com/upstash/ratelimit).
18
+
19
+ ```tsx filename='app/api/generate/route.ts'
20
+ import kv from '@vercel/kv';
21
+ import { streamText } from 'ai';
22
+ __PROVIDER_IMPORT__;
23
+ import { Ratelimit } from '@upstash/ratelimit';
24
+ import { NextRequest } from 'next/server';
25
+
26
+ // Allow streaming responses up to 30 seconds
27
+ export const maxDuration = 30;
28
+
29
+ // Create Rate limit
30
+ const ratelimit = new Ratelimit({
31
+ redis: kv,
32
+ limiter: Ratelimit.fixedWindow(5, '30s'),
33
+ });
34
+
35
+ export async function POST(req: NextRequest) {
36
+ // call ratelimit with request ip
37
+ const ip = req.ip ?? 'ip';
38
+ const { success, remaining } = await ratelimit.limit(ip);
39
+
40
+ // block the request if unsuccessfull
41
+ if (!success) {
42
+ return new Response('Ratelimited!', { status: 429 });
43
+ }
44
+
45
+ const { messages } = await req.json();
46
+
47
+ const result = streamText({
48
+ model: __MODEL__,
49
+ messages,
50
+ });
51
+
52
+ return result.toUIMessageStreamResponse();
53
+ }
54
+ ```
55
+
56
+ ## Simplify API Protection
57
+
58
+ With Vercel KV and Upstash Ratelimit, it is possible to protect your APIs
59
+ from such attacks with ease. To learn more about how Ratelimit works and
60
+ how it can be configured to your needs, see [Ratelimit Documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview).
@@ -0,0 +1,213 @@
1
+ ---
2
+ title: Rendering UI with Language Models
3
+ description: Rendering UI with Language Models
4
+ ---
5
+
6
+ # Rendering User Interfaces with Language Models
7
+
8
+ Language models generate text, so at first it may seem like you would only need to render text in your application.
9
+
10
+ ```tsx highlight="16" filename="app/actions.tsx"
11
+ const text = generateText({
12
+ model: __MODEL__,
13
+ system: 'You are a friendly assistant',
14
+ prompt: 'What is the weather in SF?',
15
+ tools: {
16
+ getWeather: {
17
+ description: 'Get the weather for a location',
18
+ parameters: z.object({
19
+ city: z.string().describe('The city to get the weather for'),
20
+ unit: z
21
+ .enum(['C', 'F'])
22
+ .describe('The unit to display the temperature in'),
23
+ }),
24
+ execute: async ({ city, unit }) => {
25
+ const weather = getWeather({ city, unit });
26
+ return `It is currently ${weather.value}°${unit} and ${weather.description} in ${city}!`;
27
+ },
28
+ },
29
+ },
30
+ });
31
+ ```
32
+
33
+ Above, the language model is passed a [tool](/docs/ai-sdk-core/tools-and-tool-calling) called `getWeather` that returns the weather information as text. However, instead of returning text, if you return a JSON object that represents the weather information, you can use it to render a React component instead.
34
+
35
+ ```tsx highlight="18-23" filename="app/action.ts"
36
+ const text = generateText({
37
+ model: __MODEL__,
38
+ system: 'You are a friendly assistant',
39
+ prompt: 'What is the weather in SF?',
40
+ tools: {
41
+ getWeather: {
42
+ description: 'Get the weather for a location',
43
+ parameters: z.object({
44
+ city: z.string().describe('The city to get the weather for'),
45
+ unit: z
46
+ .enum(['C', 'F'])
47
+ .describe('The unit to display the temperature in'),
48
+ }),
49
+ execute: async ({ city, unit }) => {
50
+ const weather = getWeather({ city, unit });
51
+ const { temperature, unit, description, forecast } = weather;
52
+
53
+ return {
54
+ temperature,
55
+ unit,
56
+ description,
57
+ forecast,
58
+ };
59
+ },
60
+ },
61
+ },
62
+ });
63
+ ```
64
+
65
+ Now you can use the object returned by the `getWeather` function to conditionally render a React component `<WeatherCard/>` that displays the weather information by passing the object as props.
66
+
67
+ ```tsx filename="app/page.tsx"
68
+ return (
69
+ <div>
70
+ {messages.map(message => {
71
+ if (message.role === 'function') {
72
+ const { name, content } = message
73
+ const { temperature, unit, description, forecast } = content;
74
+
75
+ return (
76
+ <WeatherCard
77
+ weather={{
78
+ temperature: 47,
79
+ unit: 'F',
80
+ description: 'sunny'
81
+ forecast,
82
+ }}
83
+ />
84
+ )
85
+ }
86
+ })}
87
+ </div>
88
+ )
89
+ ```
90
+
91
+ Here's a little preview of what that might look like.
92
+
93
+ <div className="not-prose flex flex-col2">
94
+ <CardPlayer
95
+ type="weather"
96
+ title="Weather"
97
+ description="An example of an assistant that renders the weather information in a streamed component."
98
+ />
99
+ </div>
100
+
101
+ Rendering interfaces as part of language model generations elevates the user experience of your application, allowing people to interact with language models beyond text.
102
+
103
+ They also make it easier for you to interpret [sequential tool calls](/docs/ai-sdk-rsc/multistep-interfaces) that take place in multiple steps and help identify and debug where the model reasoned incorrectly.
104
+
105
+ ## Rendering Multiple User Interfaces
106
+
107
+ To recap, an application has to go through the following steps to render user interfaces as part of model generations:
108
+
109
+ 1. The user prompts the language model.
110
+ 2. The language model generates a response that includes a tool call.
111
+ 3. The tool call returns a JSON object that represents the user interface.
112
+ 4. The response is sent to the client.
113
+ 5. The client receives the response and checks if the latest message was a tool call.
114
+ 6. If it was a tool call, the client renders the user interface based on the JSON object returned by the tool call.
115
+
116
+ Most applications have multiple tools that are called by the language model, and each tool can return a different user interface.
117
+
118
+ For example, a tool that searches for courses can return a list of courses, while a tool that searches for people can return a list of people. As this list grows, the complexity of your application will grow as well and it can become increasingly difficult to manage these user interfaces.
119
+
120
+ ```tsx filename='app/page.tsx'
121
+ {
122
+ message.role === 'tool' ? (
123
+ message.name === 'api-search-course' ? (
124
+ <Courses courses={message.content} />
125
+ ) : message.name === 'api-search-profile' ? (
126
+ <People people={message.content} />
127
+ ) : message.name === 'api-meetings' ? (
128
+ <Meetings meetings={message.content} />
129
+ ) : message.name === 'api-search-building' ? (
130
+ <Buildings buildings={message.content} />
131
+ ) : message.name === 'api-events' ? (
132
+ <Events events={message.content} />
133
+ ) : message.name === 'api-meals' ? (
134
+ <Meals meals={message.content} />
135
+ ) : null
136
+ ) : (
137
+ <div>{message.content}</div>
138
+ );
139
+ }
140
+ ```
141
+
142
+ ## Rendering User Interfaces on the Server
143
+
144
+ The **AI SDK RSC (`@ai-sdk/rsc`)** takes advantage of RSCs to solve the problem of managing all your React components on the client side, allowing you to render React components on the server and stream them to the client.
145
+
146
+ Rather than conditionally rendering user interfaces on the client based on the data returned by the language model, you can directly stream them from the server during a model generation.
147
+
148
+ ```tsx highlight="3,22-31,38" filename="app/action.ts"
149
+ import { createStreamableUI } from '@ai-sdk/rsc'
150
+
151
+ const uiStream = createStreamableUI();
152
+
153
+ const text = generateText({
154
+ model: __MODEL__,
155
+ system: 'you are a friendly assistant'
156
+ prompt: 'what is the weather in SF?'
157
+ tools: {
158
+ getWeather: {
159
+ description: 'Get the weather for a location',
160
+ parameters: z.object({
161
+ city: z.string().describe('The city to get the weather for'),
162
+ unit: z
163
+ .enum(['C', 'F'])
164
+ .describe('The unit to display the temperature in')
165
+ }),
166
+ execute: async ({ city, unit }) => {
167
+ const weather = getWeather({ city, unit })
168
+ const { temperature, unit, description, forecast } = weather
169
+
170
+ uiStream.done(
171
+ <WeatherCard
172
+ weather={{
173
+ temperature: 47,
174
+ unit: 'F',
175
+ description: 'sunny'
176
+ forecast,
177
+ }}
178
+ />
179
+ )
180
+ }
181
+ }
182
+ }
183
+ })
184
+
185
+ return {
186
+ display: uiStream.value
187
+ }
188
+ ```
189
+
190
+ The [`createStreamableUI`](/docs/reference/ai-sdk-rsc/create-streamable-ui) function belongs to the `@ai-sdk/rsc` module and creates a stream that can send React components to the client.
191
+
192
+ On the server, you render the `<WeatherCard/>` component with the props passed to it, and then stream it to the client. On the client side, you only need to render the UI that is streamed from the server.
193
+
194
+ ```tsx filename="app/page.tsx" highlight="4"
195
+ return (
196
+ <div>
197
+ {messages.map(message => (
198
+ <div>{message.display}</div>
199
+ ))}
200
+ </div>
201
+ );
202
+ ```
203
+
204
+ Now the steps involved are simplified:
205
+
206
+ 1. The user prompts the language model.
207
+ 2. The language model generates a response that includes a tool call.
208
+ 3. The tool call renders a React component along with relevant props that represent the user interface.
209
+ 4. The response is streamed to the client and rendered directly.
210
+
211
+ > **Note:** You can also render text on the server and stream it to the client using React Server Components. This way, all operations from language model generation to UI rendering can be done on the server, while the client only needs to render the UI that is streamed from the server.
212
+
213
+ Check out this [example](/examples/next-app/interface/stream-component-updates) for a full illustration of how to stream component updates with React Server Components in Next.js App Router.
@@ -0,0 +1,120 @@
1
+ ---
2
+ title: Language Models as Routers
3
+ description: Generative User Interfaces and Language Models as Routers
4
+ ---
5
+
6
+ # Generative User Interfaces
7
+
8
+ Since language models can render user interfaces as part of their generations, the resulting model generations are referred to as generative user interfaces.
9
+
10
+ In this section we will learn more about generative user interfaces and their impact on the way AI applications are built.
11
+
12
+ ## Deterministic Routes and Probabilistic Routing
13
+
14
+ Generative user interfaces are not deterministic in nature because they depend on the model's generation output. Since these generations are probabilistic in nature, it is possible for every user query to result in a different user interface.
15
+
16
+ Users expect their experience using your application to be predictable, so non-deterministic user interfaces can sound like a bad idea at first. However, language models can be set up to limit their generations to a particular set of outputs using their ability to call functions.
17
+
18
+ When language models are provided with a set of function definitions and instructed to execute any of them based on user query, they do either one of the following things:
19
+
20
+ - Execute a function that is most relevant to the user query.
21
+ - Not execute any function if the user query is out of bounds of the set of functions available to them.
22
+
23
+ ```tsx filename='app/actions.ts'
24
+ const sendMessage = (prompt: string) =>
25
+ generateText({
26
+ model: __MODEL__,
27
+ system: 'you are a friendly weather assistant!',
28
+ prompt,
29
+ tools: {
30
+ getWeather: {
31
+ description: 'Get the weather in a location',
32
+ parameters: z.object({
33
+ location: z.string().describe('The location to get the weather for'),
34
+ }),
35
+ execute: async ({ location }: { location: string }) => ({
36
+ location,
37
+ temperature: 72 + Math.floor(Math.random() * 21) - 10,
38
+ }),
39
+ },
40
+ },
41
+ });
42
+
43
+ sendMessage('What is the weather in San Francisco?'); // getWeather is called
44
+ sendMessage('What is the weather in New York?'); // getWeather is called
45
+ sendMessage('What events are happening in London?'); // No function is called
46
+ ```
47
+
48
+ This way, it is possible to ensure that the generations result in deterministic outputs, while the choice a model makes still remains to be probabilistic.
49
+
50
+ This emergent ability exhibited by a language model to choose whether a function needs to be executed or not based on a user query is believed to be models emulating "reasoning".
51
+
52
+ As a result, the combination of language models being able to reason which function to execute as well as render user interfaces at the same time gives you the ability to build applications where language models can be used as a router.
53
+
54
+ ## Language Models as Routers
55
+
56
+ Historically, developers had to write routing logic that connected different parts of an application to be navigable by a user and complete a specific task.
57
+
58
+ In web applications today, most of the routing logic takes place in the form of routes:
59
+
60
+ - `/login` would navigate you to a page with a login form.
61
+ - `/user/john` would navigate you to a page with profile details about John.
62
+ - `/api/events?limit=5` would display the five most recent events from an events database.
63
+
64
+ While routes help you build web applications that connect different parts of an application into a seamless user experience, it can also be a burden to manage them as the complexity of applications grow.
65
+
66
+ Next.js has helped reduce complexity in developing with routes by introducing:
67
+
68
+ - File-based routing system
69
+ - Dynamic routing
70
+ - API routes
71
+ - Middleware
72
+ - App router, and so on...
73
+
74
+ With language models becoming better at reasoning, we believe that there is a future where developers only write core application specific components while models take care of routing them based on the user's state in an application.
75
+
76
+ With generative user interfaces, the language model decides which user interface to render based on the user's state in the application, giving users the flexibility to interact with your application in a conversational manner instead of navigating through a series of predefined routes.
77
+
78
+ ### Routing by parameters
79
+
80
+ For routes like:
81
+
82
+ - `/profile/[username]`
83
+ - `/search?q=[query]`
84
+ - `/media/[id]`
85
+
86
+ that have segments dependent on dynamic data, the language model can generate the correct parameters and render the user interface.
87
+
88
+ For example, when you're in a search application, you can ask the language model to search for artworks from different artists. The language model will call the search function with the artist's name as a parameter and render the search results.
89
+
90
+ <div className="not-prose">
91
+ <CardPlayer
92
+ type="media-search"
93
+ title="Media Search"
94
+ description="Let your users see more than words can say by rendering components directly within your search experience."
95
+ />
96
+ </div>
97
+
98
+ ### Routing by sequence
99
+
100
+ For actions that require a sequence of steps to be completed by navigating through different routes, the language model can generate the correct sequence of routes to complete in order to fulfill the user's request.
101
+
102
+ For example, when you're in a calendar application, you can ask the language model to schedule a happy hour evening with your friends. The language model will then understand your request and will perform the right sequence of [tool calls](/docs/ai-sdk-core/tools-and-tool-calling) to:
103
+
104
+ 1. Lookup your calendar
105
+ 2. Lookup your friends' calendars
106
+ 3. Determine the best time for everyone
107
+ 4. Search for nearby happy hour spots
108
+ 5. Create an event and send out invites to your friends
109
+
110
+ <div className="not-prose">
111
+ <CardPlayer
112
+ type="event-planning"
113
+ title="Planning an Event"
114
+ description="The model calls functions and generates interfaces based on user intent, acting like a router."
115
+ />
116
+ </div>
117
+
118
+ Just by defining functions to lookup contacts, pull events from a calendar, and search for nearby locations, the model is able to sequentially navigate the routes for you.
119
+
120
+ To learn more, check out these [examples](/examples/next-app/interface) using the `streamUI` function to stream generative user interfaces to the client based on the response from the language model.
@@ -0,0 +1,115 @@
1
+ ---
2
+ title: Multistep Interfaces
3
+ description: Concepts behind building multistep interfaces
4
+ ---
5
+
6
+ # Multistep Interfaces
7
+
8
+ Multistep interfaces refer to user interfaces that require multiple independent steps to be executed in order to complete a specific task.
9
+
10
+ In order to understand multistep interfaces, it is important to understand two concepts:
11
+
12
+ - Tool composition
13
+ - Application context
14
+
15
+ **Tool composition** is the process of combining multiple [tools](/docs/ai-sdk-core/tools-and-tool-calling) to create a new tool. This is a powerful concept that allows you to break down complex tasks into smaller, more manageable steps.
16
+
17
+ **Application context** refers to the state of the application at any given point in time. This includes the user's input, the output of the language model, and any other relevant information.
18
+
19
+ When designing multistep interfaces, you need to consider how the tools in your application can be composed together to form a coherent user experience as well as how the application context changes as the user progresses through the interface.
20
+
21
+ ## Application Context
22
+
23
+ The application context can be thought of as the conversation history between the user and the language model. The richer the context, the more information the model has to generate relevant responses.
24
+
25
+ In the context of multistep interfaces, the application context becomes even more important. This is because **the user's input in one step may affect the output of the model in the next step**.
26
+
27
+ For example, consider a meal logging application that helps users track their daily food intake. The language model is provided with the following tools:
28
+
29
+ - `log_meal` takes in parameters like the name of the food, the quantity, and the time of consumption to log a meal.
30
+ - `delete_meal` takes in the name of the meal to be deleted.
31
+
32
+ When the user logs a meal, the model generates a response confirming the meal has been logged.
33
+
34
+ ```txt highlight="2"
35
+ User: Log a chicken shawarma for lunch.
36
+ Tool: log_meal("chicken shawarma", "250g", "12:00 PM")
37
+ Model: Chicken shawarma has been logged for lunch.
38
+ ```
39
+
40
+ Now when the user decides to delete the meal, the model should be able to reference the previous step to identify the meal to be deleted.
41
+
42
+ ```txt highlight="7"
43
+ User: Log a chicken shawarma for lunch.
44
+ Tool: log_meal("chicken shawarma", "250g", "12:00 PM")
45
+ Model: Chicken shawarma has been logged for lunch.
46
+ ...
47
+ ...
48
+ User: I skipped lunch today, can you update my log?
49
+ Tool: delete_meal("chicken shawarma")
50
+ Model: Chicken shawarma has been deleted from your log.
51
+ ```
52
+
53
+ In this example, managing the application context is important for the model to generate the correct response. The model needs to have information about the previous actions in order for it to use generate the parameters for the `delete_meal` tool.
54
+
55
+ ## Tool Composition
56
+
57
+ Tool composition is the process of combining multiple tools to create a new tool. This involves defining the inputs and outputs of each tool, as well as how they interact with each other.
58
+
59
+ The design of how these tools can be composed together to form a multistep interface is crucial to both the user experience of your application and the model's ability to generate the correct output.
60
+
61
+ For example, consider a flight booking assistant that can help users book flights. The assistant can be designed to have the following tools:
62
+
63
+ - `searchFlights`: Searches for flights based on the user's query.
64
+ - `lookupFlight`: Looks up details of a specific flight based on the flight number.
65
+ - `bookFlight`: Books a flight based on the user's selection.
66
+
67
+ The `searchFlights` tool is called when the user wants to lookup flights for a specific route. This would typically mean the tool should be able to take in parameters like the origin and destination of the flight.
68
+
69
+ The `lookupFlight` tool is called when the user wants to get more details about a specific flight. This would typically mean the tool should be able to take in parameters like the flight number and return information about seat availability.
70
+
71
+ The `bookFlight` tool is called when the user decides to book a flight. In order to identify the flight to book, the tool should be able to take in parameters like the flight number, trip date, and passenger details.
72
+
73
+ So the conversation between the user and the model could look like this:
74
+
75
+ ```txt highlight="8"
76
+ User: I want to book a flight from New York to London.
77
+ Tool: searchFlights("New York", "London")
78
+ Model: Here are the available flights from New York to London.
79
+ User: I want to book flight number BA123 on 12th December for myself and my wife.
80
+ Tool: lookupFlight("BA123") -> "4 seats available"
81
+ Model: Sure, there are seats available! Can you provide the names of the passengers?
82
+ User: John Doe and Jane Doe.
83
+ Tool: bookFlight("BA123", "12th December", ["John Doe", "Jane Doe"])
84
+ Model: Your flight has been booked!
85
+ ```
86
+
87
+ In the last tool call, the `bookFlight` tool does not include passenger details as it is not available in the application context. As a result, it requests the user to provide the passenger details before proceeding with the booking.
88
+
89
+ Looking up passenger information could've been another tool that the model could've called before calling the `bookFlight` tool assuming that the user is logged into the application. This way, the model does not have to ask the user for the passenger details and can proceed with the booking.
90
+
91
+ ```txt highlight="5,6"
92
+ User: I want to book a flight from New York to London.
93
+ Tool: searchFlights("New York", "London")
94
+ Model: Here are the available flights from New York to London.
95
+ User: I want to book flight number BA123 on 12th December for myself an my wife.
96
+ Tool: lookupContacts() -> ["John Doe", "Jane Doe"]
97
+ Tool: bookFlight("BA123", "12th December", ["John Doe", "Jane Doe"])
98
+ Model: Your flight has been booked!
99
+ ```
100
+
101
+ The `lookupContacts` tool is called before the `bookFlight` tool to ensure that the passenger details are available in the application context when booking the flight. This way, the model can reduce the number of steps required from the user and use its ability to call tools that populate its context and use that information to complete the booking process.
102
+
103
+ Now, let's introduce another tool called `lookupBooking` that can be used to show booking details by taking in the name of the passenger as parameter. This tool can be composed with the existing tools to provide a more complete user experience.
104
+
105
+ ```txt highlight="2-4"
106
+ User: What's the status of my wife's upcoming flight?
107
+ Tool: lookupContacts() -> ["John Doe", "Jane Doe"]
108
+ Tool: lookupBooking("Jane Doe") -> "BA123 confirmed"
109
+ Tool: lookupFlight("BA123") -> "Flight BA123 is scheduled to depart on 12th December."
110
+ Model: Your wife's flight BA123 is confirmed and scheduled to depart on 12th December.
111
+ ```
112
+
113
+ In this example, the `lookupBooking` tool is used to provide the user with the status of their wife's upcoming flight. By composing this tool with the existing tools, the model is able to generate a response that includes the booking status and the departure date of the flight without requiring the user to provide additional information.
114
+
115
+ As a result, the more tools you design that can be composed together, the more complex and powerful your application can become.
@@ -0,0 +1,55 @@
1
+ ---
2
+ title: Sequential Generations
3
+ description: Learn how to implement sequential generations ("chains") with the AI SDK
4
+ ---
5
+
6
+ # Sequential Generations
7
+
8
+ When working with the AI SDK, you may want to create sequences of generations (often referred to as "chains" or "pipes"), where the output of one becomes the input for the next. This can be useful for creating more complex AI-powered workflows or for breaking down larger tasks into smaller, more manageable steps.
9
+
10
+ ## Example
11
+
12
+ In a sequential chain, the output of one generation is directly used as input for the next generation. This allows you to create a series of dependent generations, where each step builds upon the previous one.
13
+
14
+ Here's an example of how you can implement sequential actions:
15
+
16
+ ```typescript
17
+ import { generateText } from 'ai';
18
+ __PROVIDER_IMPORT__;
19
+
20
+ async function sequentialActions() {
21
+ // Generate blog post ideas
22
+ const ideasGeneration = await generateText({
23
+ model: __MODEL__,
24
+ prompt: 'Generate 10 ideas for a blog post about making spaghetti.',
25
+ });
26
+
27
+ console.log('Generated Ideas:\n', ideasGeneration);
28
+
29
+ // Pick the best idea
30
+ const bestIdeaGeneration = await generateText({
31
+ model: __MODEL__,
32
+ prompt: `Here are some blog post ideas about making spaghetti:
33
+ ${ideasGeneration}
34
+
35
+ Pick the best idea from the list above and explain why it's the best.`,
36
+ });
37
+
38
+ console.log('\nBest Idea:\n', bestIdeaGeneration);
39
+
40
+ // Generate an outline
41
+ const outlineGeneration = await generateText({
42
+ model: __MODEL__,
43
+ prompt: `We've chosen the following blog post idea about making spaghetti:
44
+ ${bestIdeaGeneration}
45
+
46
+ Create a detailed outline for a blog post based on this idea.`,
47
+ });
48
+
49
+ console.log('\nBlog Post Outline:\n', outlineGeneration);
50
+ }
51
+
52
+ sequentialActions().catch(console.error);
53
+ ```
54
+
55
+ In this example, we first generate ideas for a blog post, then pick the best idea, and finally create an outline based on that idea. Each step uses the output from the previous step as input for the next generation.