ai 6.0.31 → 6.0.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.js +12 -2
  3. package/dist/index.js.map +1 -1
  4. package/dist/index.mjs +12 -2
  5. package/dist/index.mjs.map +1 -1
  6. package/dist/internal/index.js +1 -1
  7. package/dist/internal/index.mjs +1 -1
  8. package/docs/00-introduction/index.mdx +76 -0
  9. package/docs/02-foundations/01-overview.mdx +43 -0
  10. package/docs/02-foundations/02-providers-and-models.mdx +163 -0
  11. package/docs/02-foundations/03-prompts.mdx +620 -0
  12. package/docs/02-foundations/04-tools.mdx +160 -0
  13. package/docs/02-foundations/05-streaming.mdx +62 -0
  14. package/docs/02-foundations/index.mdx +43 -0
  15. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  16. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  17. package/docs/02-getting-started/02-nextjs-app-router.mdx +556 -0
  18. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  19. package/docs/02-getting-started/04-svelte.mdx +627 -0
  20. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  21. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  22. package/docs/02-getting-started/07-expo.mdx +766 -0
  23. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  24. package/docs/02-getting-started/index.mdx +44 -0
  25. package/docs/03-agents/01-overview.mdx +96 -0
  26. package/docs/03-agents/02-building-agents.mdx +367 -0
  27. package/docs/03-agents/03-workflows.mdx +370 -0
  28. package/docs/03-agents/04-loop-control.mdx +350 -0
  29. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  30. package/docs/03-agents/index.mdx +40 -0
  31. package/docs/03-ai-sdk-core/01-overview.mdx +33 -0
  32. package/docs/03-ai-sdk-core/05-generating-text.mdx +600 -0
  33. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +662 -0
  34. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1102 -0
  35. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +375 -0
  36. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +144 -0
  37. package/docs/03-ai-sdk-core/25-settings.mdx +198 -0
  38. package/docs/03-ai-sdk-core/30-embeddings.mdx +247 -0
  39. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  40. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  41. package/docs/03-ai-sdk-core/36-transcription.mdx +173 -0
  42. package/docs/03-ai-sdk-core/37-speech.mdx +167 -0
  43. package/docs/03-ai-sdk-core/40-middleware.mdx +480 -0
  44. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  45. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  46. package/docs/03-ai-sdk-core/55-testing.mdx +218 -0
  47. package/docs/03-ai-sdk-core/60-telemetry.mdx +313 -0
  48. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  49. package/docs/03-ai-sdk-core/index.mdx +88 -0
  50. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  51. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1313 -0
  52. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +535 -0
  53. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  54. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  55. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  56. package/docs/04-ai-sdk-ui/05-completion.mdx +186 -0
  57. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  58. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  59. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  60. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  61. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  62. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  63. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +477 -0
  64. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  65. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  66. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  67. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  68. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  69. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  70. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +158 -0
  71. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  72. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +96 -0
  73. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  74. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  75. package/docs/05-ai-sdk-rsc/index.mdx +58 -0
  76. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  77. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  78. package/docs/06-advanced/03-backpressure.mdx +173 -0
  79. package/docs/06-advanced/04-caching.mdx +169 -0
  80. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  81. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  82. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +213 -0
  83. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  84. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  85. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  86. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  87. package/docs/06-advanced/index.mdx +11 -0
  88. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2142 -0
  89. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3215 -0
  90. package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +780 -0
  91. package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +1140 -0
  92. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +190 -0
  93. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +171 -0
  94. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +309 -0
  95. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +227 -0
  96. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +138 -0
  97. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +214 -0
  98. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +203 -0
  99. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +449 -0
  100. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +148 -0
  101. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +168 -0
  102. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +144 -0
  103. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +196 -0
  104. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +175 -0
  105. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +410 -0
  106. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  107. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  108. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  109. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +55 -0
  110. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  111. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +415 -0
  112. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +246 -0
  113. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  114. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  115. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +182 -0
  116. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +121 -0
  117. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  118. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  119. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  120. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +46 -0
  121. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  122. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  123. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  124. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  125. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  126. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  127. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  128. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  129. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  130. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +43 -0
  131. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  132. package/docs/07-reference/01-ai-sdk-core/index.mdx +159 -0
  133. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +446 -0
  134. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +179 -0
  135. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +178 -0
  136. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +230 -0
  137. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  138. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +151 -0
  139. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +113 -0
  140. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +73 -0
  141. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  142. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  143. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  144. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  145. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  146. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  147. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  148. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  149. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +48 -0
  150. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +78 -0
  151. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  152. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  153. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  154. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  155. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  156. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  157. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +262 -0
  158. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  159. package/docs/07-reference/04-stream-helpers/01-ai-stream.mdx +89 -0
  160. package/docs/07-reference/04-stream-helpers/02-streaming-text-response.mdx +79 -0
  161. package/docs/07-reference/04-stream-helpers/05-stream-to-response.mdx +108 -0
  162. package/docs/07-reference/04-stream-helpers/07-openai-stream.mdx +77 -0
  163. package/docs/07-reference/04-stream-helpers/08-anthropic-stream.mdx +79 -0
  164. package/docs/07-reference/04-stream-helpers/09-aws-bedrock-stream.mdx +91 -0
  165. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-anthropic-stream.mdx +96 -0
  166. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-messages-stream.mdx +96 -0
  167. package/docs/07-reference/04-stream-helpers/11-aws-bedrock-cohere-stream.mdx +93 -0
  168. package/docs/07-reference/04-stream-helpers/12-aws-bedrock-llama-2-stream.mdx +93 -0
  169. package/docs/07-reference/04-stream-helpers/13-cohere-stream.mdx +78 -0
  170. package/docs/07-reference/04-stream-helpers/14-google-generative-ai-stream.mdx +85 -0
  171. package/docs/07-reference/04-stream-helpers/15-hugging-face-stream.mdx +84 -0
  172. package/docs/07-reference/04-stream-helpers/16-langchain-adapter.mdx +98 -0
  173. package/docs/07-reference/04-stream-helpers/16-llamaindex-adapter.mdx +70 -0
  174. package/docs/07-reference/04-stream-helpers/17-mistral-stream.mdx +81 -0
  175. package/docs/07-reference/04-stream-helpers/18-replicate-stream.mdx +83 -0
  176. package/docs/07-reference/04-stream-helpers/19-inkeep-stream.mdx +80 -0
  177. package/docs/07-reference/04-stream-helpers/index.mdx +103 -0
  178. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +30 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +27 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +25 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content.mdx +26 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +25 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +25 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +25 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +26 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  205. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  206. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  207. package/docs/07-reference/05-ai-sdk-errors/index.mdx +38 -0
  208. package/docs/07-reference/index.mdx +34 -0
  209. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  210. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  211. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  212. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  213. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  214. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  215. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  216. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  217. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  218. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  219. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  220. package/docs/08-migration-guides/index.mdx +22 -0
  221. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  222. package/docs/09-troubleshooting/02-client-side-function-calls-not-invoked.mdx +22 -0
  223. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  224. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  225. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  226. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  227. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  228. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  229. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  230. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  231. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  232. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  233. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  234. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  235. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  236. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  237. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  238. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  239. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  240. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  241. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  242. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  243. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  244. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  245. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  246. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +72 -0
  247. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  248. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  249. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  250. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  251. package/docs/09-troubleshooting/index.mdx +11 -0
  252. package/package.json +8 -4
@@ -0,0 +1,1313 @@
1
+ ---
2
+ title: Chatbot
3
+ description: Learn how to use the useChat hook.
4
+ ---
5
+
6
+ # Chatbot
7
+
8
+ The `useChat` hook makes it effortless to create a conversational user interface for your chatbot application. It enables the streaming of chat messages from your AI provider, manages the chat state, and updates the UI automatically as new messages arrive.
9
+
10
+ To summarize, the `useChat` hook provides the following features:
11
+
12
+ - **Message Streaming**: All the messages from the AI provider are streamed to the chat UI in real-time.
13
+ - **Managed States**: The hook manages the states for input, messages, status, error and more for you.
14
+ - **Seamless Integration**: Easily integrate your chat AI into any design or layout with minimal effort.
15
+
16
+ In this guide, you will learn how to use the `useChat` hook to create a chatbot application with real-time message streaming.
17
+ Check out our [chatbot with tools guide](/docs/ai-sdk-ui/chatbot-with-tool-calling) to learn how to use tools in your chatbot.
18
+ Let's start with the following example first.
19
+
20
+ ## Example
21
+
22
+ ```tsx filename='app/page.tsx'
23
+ 'use client';
24
+
25
+ import { useChat } from '@ai-sdk/react';
26
+ import { DefaultChatTransport } from 'ai';
27
+ import { useState } from 'react';
28
+
29
+ export default function Page() {
30
+ const { messages, sendMessage, status } = useChat({
31
+ transport: new DefaultChatTransport({
32
+ api: '/api/chat',
33
+ }),
34
+ });
35
+ const [input, setInput] = useState('');
36
+
37
+ return (
38
+ <>
39
+ {messages.map(message => (
40
+ <div key={message.id}>
41
+ {message.role === 'user' ? 'User: ' : 'AI: '}
42
+ {message.parts.map((part, index) =>
43
+ part.type === 'text' ? <span key={index}>{part.text}</span> : null,
44
+ )}
45
+ </div>
46
+ ))}
47
+
48
+ <form
49
+ onSubmit={e => {
50
+ e.preventDefault();
51
+ if (input.trim()) {
52
+ sendMessage({ text: input });
53
+ setInput('');
54
+ }
55
+ }}
56
+ >
57
+ <input
58
+ value={input}
59
+ onChange={e => setInput(e.target.value)}
60
+ disabled={status !== 'ready'}
61
+ placeholder="Say something..."
62
+ />
63
+ <button type="submit" disabled={status !== 'ready'}>
64
+ Submit
65
+ </button>
66
+ </form>
67
+ </>
68
+ );
69
+ }
70
+ ```
71
+
72
+ ```ts filename='app/api/chat/route.ts'
73
+ import { convertToModelMessages, streamText, UIMessage } from 'ai';
74
+ __PROVIDER_IMPORT__;
75
+
76
+ // Allow streaming responses up to 30 seconds
77
+ export const maxDuration = 30;
78
+
79
+ export async function POST(req: Request) {
80
+ const { messages }: { messages: UIMessage[] } = await req.json();
81
+
82
+ const result = streamText({
83
+ model: __MODEL__,
84
+ system: 'You are a helpful assistant.',
85
+ messages: await convertToModelMessages(messages),
86
+ });
87
+
88
+ return result.toUIMessageStreamResponse();
89
+ }
90
+ ```
91
+
92
+ <Note>
93
+ The UI messages have a new `parts` property that contains the message parts.
94
+ We recommend rendering the messages using the `parts` property instead of the
95
+ `content` property. The parts property supports different message types,
96
+ including text, tool invocation, and tool result, and allows for more flexible
97
+ and complex chat UIs.
98
+ </Note>
99
+
100
+ In the `Page` component, the `useChat` hook will request to your AI provider endpoint whenever the user sends a message using `sendMessage`.
101
+ The messages are then streamed back in real-time and displayed in the chat UI.
102
+
103
+ This enables a seamless chat experience where the user can see the AI response as soon as it is available,
104
+ without having to wait for the entire response to be received.
105
+
106
+ ## Customized UI
107
+
108
+ `useChat` also provides ways to manage the chat message states via code, show status, and update messages without being triggered by user interactions.
109
+
110
+ ### Status
111
+
112
+ The `useChat` hook returns a `status`. It has the following possible values:
113
+
114
+ - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
115
+ - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
116
+ - `ready`: The full response has been received and processed; a new user message can be submitted.
117
+ - `error`: An error occurred during the API request, preventing successful completion.
118
+
119
+ You can use `status` for e.g. the following purposes:
120
+
121
+ - To show a loading spinner while the chatbot is processing the user's message.
122
+ - To show a "Stop" button to abort the current message.
123
+ - To disable the submit button.
124
+
125
+ ```tsx filename='app/page.tsx' highlight="6,22-29,36"
126
+ 'use client';
127
+
128
+ import { useChat } from '@ai-sdk/react';
129
+ import { DefaultChatTransport } from 'ai';
130
+ import { useState } from 'react';
131
+
132
+ export default function Page() {
133
+ const { messages, sendMessage, status, stop } = useChat({
134
+ transport: new DefaultChatTransport({
135
+ api: '/api/chat',
136
+ }),
137
+ });
138
+ const [input, setInput] = useState('');
139
+
140
+ return (
141
+ <>
142
+ {messages.map(message => (
143
+ <div key={message.id}>
144
+ {message.role === 'user' ? 'User: ' : 'AI: '}
145
+ {message.parts.map((part, index) =>
146
+ part.type === 'text' ? <span key={index}>{part.text}</span> : null,
147
+ )}
148
+ </div>
149
+ ))}
150
+
151
+ {(status === 'submitted' || status === 'streaming') && (
152
+ <div>
153
+ {status === 'submitted' && <Spinner />}
154
+ <button type="button" onClick={() => stop()}>
155
+ Stop
156
+ </button>
157
+ </div>
158
+ )}
159
+
160
+ <form
161
+ onSubmit={e => {
162
+ e.preventDefault();
163
+ if (input.trim()) {
164
+ sendMessage({ text: input });
165
+ setInput('');
166
+ }
167
+ }}
168
+ >
169
+ <input
170
+ value={input}
171
+ onChange={e => setInput(e.target.value)}
172
+ disabled={status !== 'ready'}
173
+ placeholder="Say something..."
174
+ />
175
+ <button type="submit" disabled={status !== 'ready'}>
176
+ Submit
177
+ </button>
178
+ </form>
179
+ </>
180
+ );
181
+ }
182
+ ```
183
+
184
+ ### Error State
185
+
186
+ Similarly, the `error` state reflects the error object thrown during the fetch request.
187
+ It can be used to display an error message, disable the submit button, or show a retry button:
188
+
189
+ <Note>
190
+ We recommend showing a generic error message to the user, such as "Something
191
+ went wrong." This is a good practice to avoid leaking information from the
192
+ server.
193
+ </Note>
194
+
195
+ ```tsx file="app/page.tsx" highlight="6,20-27,33"
196
+ 'use client';
197
+
198
+ import { useChat } from '@ai-sdk/react';
199
+ import { DefaultChatTransport } from 'ai';
200
+ import { useState } from 'react';
201
+
202
+ export default function Chat() {
203
+ const { messages, sendMessage, error, reload } = useChat({
204
+ transport: new DefaultChatTransport({
205
+ api: '/api/chat',
206
+ }),
207
+ });
208
+ const [input, setInput] = useState('');
209
+
210
+ return (
211
+ <div>
212
+ {messages.map(m => (
213
+ <div key={m.id}>
214
+ {m.role}:{' '}
215
+ {m.parts.map((part, index) =>
216
+ part.type === 'text' ? <span key={index}>{part.text}</span> : null,
217
+ )}
218
+ </div>
219
+ ))}
220
+
221
+ {error && (
222
+ <>
223
+ <div>An error occurred.</div>
224
+ <button type="button" onClick={() => reload()}>
225
+ Retry
226
+ </button>
227
+ </>
228
+ )}
229
+
230
+ <form
231
+ onSubmit={e => {
232
+ e.preventDefault();
233
+ if (input.trim()) {
234
+ sendMessage({ text: input });
235
+ setInput('');
236
+ }
237
+ }}
238
+ >
239
+ <input
240
+ value={input}
241
+ onChange={e => setInput(e.target.value)}
242
+ disabled={error != null}
243
+ />
244
+ </form>
245
+ </div>
246
+ );
247
+ }
248
+ ```
249
+
250
+ Please also see the [error handling](/docs/ai-sdk-ui/error-handling) guide for more information.
251
+
252
+ ### Modify messages
253
+
254
+ Sometimes, you may want to directly modify some existing messages. For example, a delete button can be added to each message to allow users to remove them from the chat history.
255
+
256
+ The `setMessages` function can help you achieve these tasks:
257
+
258
+ ```tsx
259
+ const { messages, setMessages } = useChat()
260
+
261
+ const handleDelete = (id) => {
262
+ setMessages(messages.filter(message => message.id !== id))
263
+ }
264
+
265
+ return <>
266
+ {messages.map(message => (
267
+ <div key={message.id}>
268
+ {message.role === 'user' ? 'User: ' : 'AI: '}
269
+ {message.parts.map((part, index) => (
270
+ part.type === 'text' ? (
271
+ <span key={index}>{part.text}</span>
272
+ ) : null
273
+ ))}
274
+ <button onClick={() => handleDelete(message.id)}>Delete</button>
275
+ </div>
276
+ ))}
277
+ ...
278
+ ```
279
+
280
+ You can think of `messages` and `setMessages` as a pair of `state` and `setState` in React.
281
+
282
+ ### Cancellation and regeneration
283
+
284
+ It's also a common use case to abort the response message while it's still streaming back from the AI provider. You can do this by calling the `stop` function returned by the `useChat` hook.
285
+
286
+ ```tsx
287
+ const { stop, status } = useChat()
288
+
289
+ return <>
290
+ <button onClick={stop} disabled={!(status === 'streaming' || status === 'submitted')}>Stop</button>
291
+ ...
292
+ ```
293
+
294
+ When the user clicks the "Stop" button, the fetch request will be aborted. This avoids consuming unnecessary resources and improves the UX of your chatbot application.
295
+
296
+ Similarly, you can also request the AI provider to reprocess the last message by calling the `regenerate` function returned by the `useChat` hook:
297
+
298
+ ```tsx
299
+ const { regenerate, status } = useChat();
300
+
301
+ return (
302
+ <>
303
+ <button
304
+ onClick={regenerate}
305
+ disabled={!(status === 'ready' || status === 'error')}
306
+ >
307
+ Regenerate
308
+ </button>
309
+ ...
310
+ </>
311
+ );
312
+ ```
313
+
314
+ When the user clicks the "Regenerate" button, the AI provider will regenerate the last message and replace the current one correspondingly.
315
+
316
+ ### Throttling UI Updates
317
+
318
+ <Note>This feature is currently only available for React.</Note>
319
+
320
+ By default, the `useChat` hook will trigger a render every time a new chunk is received.
321
+ You can throttle the UI updates with the `experimental_throttle` option.
322
+
323
+ ```tsx filename="page.tsx" highlight="2-3"
324
+ const { messages, ... } = useChat({
325
+ // Throttle the messages and data updates to 50ms:
326
+ experimental_throttle: 50
327
+ })
328
+ ```
329
+
330
+ ## Event Callbacks
331
+
332
+ `useChat` provides optional event callbacks that you can use to handle different stages of the chatbot lifecycle:
333
+
334
+ - `onFinish`: Called when the assistant response is completed. The event includes the response message, all messages, and flags for abort, disconnect, and errors.
335
+ - `onError`: Called when an error occurs during the fetch request.
336
+ - `onData`: Called whenever a data part is received.
337
+
338
+ These callbacks can be used to trigger additional actions, such as logging, analytics, or custom UI updates.
339
+
340
+ ```tsx
341
+ import { UIMessage } from 'ai';
342
+
343
+ const {
344
+ /* ... */
345
+ } = useChat({
346
+ onFinish: ({ message, messages, isAbort, isDisconnect, isError }) => {
347
+ // use information to e.g. update other UI states
348
+ },
349
+ onError: error => {
350
+ console.error('An error occurred:', error);
351
+ },
352
+ onData: data => {
353
+ console.log('Received data part from server:', data);
354
+ },
355
+ });
356
+ ```
357
+
358
+ It's worth noting that you can abort the processing by throwing an error in the `onData` callback. This will trigger the `onError` callback and stop the message from being appended to the chat UI. This can be useful for handling unexpected responses from the AI provider.
359
+
360
+ ## Request Configuration
361
+
362
+ ### Custom headers, body, and credentials
363
+
364
+ By default, the `useChat` hook sends a HTTP POST request to the `/api/chat` endpoint with the message list as the request body. You can customize the request in two ways:
365
+
366
+ #### Hook-Level Configuration (Applied to all requests)
367
+
368
+ You can configure transport-level options that will be applied to all requests made by the hook:
369
+
370
+ ```tsx
371
+ import { useChat } from '@ai-sdk/react';
372
+ import { DefaultChatTransport } from 'ai';
373
+
374
+ const { messages, sendMessage } = useChat({
375
+ transport: new DefaultChatTransport({
376
+ api: '/api/custom-chat',
377
+ headers: {
378
+ Authorization: 'your_token',
379
+ },
380
+ body: {
381
+ user_id: '123',
382
+ },
383
+ credentials: 'same-origin',
384
+ }),
385
+ });
386
+ ```
387
+
388
+ #### Dynamic Hook-Level Configuration
389
+
390
+ You can also provide functions that return configuration values. This is useful for authentication tokens that need to be refreshed, or for configuration that depends on runtime conditions:
391
+
392
+ ```tsx
393
+ import { useChat } from '@ai-sdk/react';
394
+ import { DefaultChatTransport } from 'ai';
395
+
396
+ const { messages, sendMessage } = useChat({
397
+ transport: new DefaultChatTransport({
398
+ api: '/api/custom-chat',
399
+ headers: () => ({
400
+ Authorization: `Bearer ${getAuthToken()}`,
401
+ 'X-User-ID': getCurrentUserId(),
402
+ }),
403
+ body: () => ({
404
+ sessionId: getCurrentSessionId(),
405
+ preferences: getUserPreferences(),
406
+ }),
407
+ credentials: () => 'include',
408
+ }),
409
+ });
410
+ ```
411
+
412
+ <Note>
413
+ For component state that changes over time, use `useRef` to store the current
414
+ value and reference `ref.current` in your configuration function, or prefer
415
+ request-level options (see next section) for better reliability.
416
+ </Note>
417
+
418
+ #### Request-Level Configuration (Recommended)
419
+
420
+ <Note>
421
+ **Recommended**: Use request-level options for better flexibility and control.
422
+ Request-level options take precedence over hook-level options and allow you to
423
+ customize each request individually.
424
+ </Note>
425
+
426
+ ```tsx
427
+ // Pass options as the second parameter to sendMessage
428
+ sendMessage(
429
+ { text: input },
430
+ {
431
+ headers: {
432
+ Authorization: 'Bearer token123',
433
+ 'X-Custom-Header': 'custom-value',
434
+ },
435
+ body: {
436
+ temperature: 0.7,
437
+ max_tokens: 100,
438
+ user_id: '123',
439
+ },
440
+ metadata: {
441
+ userId: 'user123',
442
+ sessionId: 'session456',
443
+ },
444
+ },
445
+ );
446
+ ```
447
+
448
+ The request-level options are merged with hook-level options, with request-level options taking precedence. On your server side, you can handle the request with this additional information.
449
+
450
+ ### Setting custom body fields per request
451
+
452
+ You can configure custom `body` fields on a per-request basis using the second parameter of the `sendMessage` function.
453
+ This is useful if you want to pass in additional information to your backend that is not part of the message list.
454
+
455
+ ```tsx filename="app/page.tsx" highlight="20-25"
456
+ 'use client';
457
+
458
+ import { useChat } from '@ai-sdk/react';
459
+ import { useState } from 'react';
460
+
461
+ export default function Chat() {
462
+ const { messages, sendMessage } = useChat();
463
+ const [input, setInput] = useState('');
464
+
465
+ return (
466
+ <div>
467
+ {messages.map(m => (
468
+ <div key={m.id}>
469
+ {m.role}:{' '}
470
+ {m.parts.map((part, index) =>
471
+ part.type === 'text' ? <span key={index}>{part.text}</span> : null,
472
+ )}
473
+ </div>
474
+ ))}
475
+
476
+ <form
477
+ onSubmit={event => {
478
+ event.preventDefault();
479
+ if (input.trim()) {
480
+ sendMessage(
481
+ { text: input },
482
+ {
483
+ body: {
484
+ customKey: 'customValue',
485
+ },
486
+ },
487
+ );
488
+ setInput('');
489
+ }
490
+ }}
491
+ >
492
+ <input value={input} onChange={e => setInput(e.target.value)} />
493
+ </form>
494
+ </div>
495
+ );
496
+ }
497
+ ```
498
+
499
+ You can retrieve these custom fields on your server side by destructuring the request body:
500
+
501
+ ```ts filename="app/api/chat/route.ts" highlight="3,4"
502
+ export async function POST(req: Request) {
503
+ // Extract additional information ("customKey") from the body of the request:
504
+ const { messages, customKey }: { messages: UIMessage[]; customKey: string } =
505
+ await req.json();
506
+ //...
507
+ }
508
+ ```
509
+
510
+ ## Message Metadata
511
+
512
+ You can attach custom metadata to messages for tracking information like timestamps, model details, and token usage.
513
+
514
+ ```ts
515
+ // Server: Send metadata about the message
516
+ return result.toUIMessageStreamResponse({
517
+ messageMetadata: ({ part }) => {
518
+ if (part.type === 'start') {
519
+ return {
520
+ createdAt: Date.now(),
521
+ model: 'gpt-5.1',
522
+ };
523
+ }
524
+
525
+ if (part.type === 'finish') {
526
+ return {
527
+ totalTokens: part.totalUsage.totalTokens,
528
+ };
529
+ }
530
+ },
531
+ });
532
+ ```
533
+
534
+ ```tsx
535
+ // Client: Access metadata via message.metadata
536
+ {
537
+ messages.map(message => (
538
+ <div key={message.id}>
539
+ {message.role}:{' '}
540
+ {message.metadata?.createdAt &&
541
+ new Date(message.metadata.createdAt).toLocaleTimeString()}
542
+ {/* Render message content */}
543
+ {message.parts.map((part, index) =>
544
+ part.type === 'text' ? <span key={index}>{part.text}</span> : null,
545
+ )}
546
+ {/* Show token count if available */}
547
+ {message.metadata?.totalTokens && (
548
+ <span>{message.metadata.totalTokens} tokens</span>
549
+ )}
550
+ </div>
551
+ ));
552
+ }
553
+ ```
554
+
555
+ For complete examples with type safety and advanced use cases, see the [Message Metadata documentation](/docs/ai-sdk-ui/message-metadata).
556
+
557
+ ## Transport Configuration
558
+
559
+ You can configure custom transport behavior using the `transport` option to customize how messages are sent to your API:
560
+
561
+ ```tsx filename="app/page.tsx"
562
+ import { useChat } from '@ai-sdk/react';
563
+ import { DefaultChatTransport } from 'ai';
564
+
565
+ export default function Chat() {
566
+ const { messages, sendMessage } = useChat({
567
+ id: 'my-chat',
568
+ transport: new DefaultChatTransport({
569
+ prepareSendMessagesRequest: ({ id, messages }) => {
570
+ return {
571
+ body: {
572
+ id,
573
+ message: messages[messages.length - 1],
574
+ },
575
+ };
576
+ },
577
+ }),
578
+ });
579
+
580
+ // ... rest of your component
581
+ }
582
+ ```
583
+
584
+ The corresponding API route receives the custom request format:
585
+
586
+ ```ts filename="app/api/chat/route.ts"
587
+ export async function POST(req: Request) {
588
+ const { id, message } = await req.json();
589
+
590
+ // Load existing messages and add the new one
591
+ const messages = await loadMessages(id);
592
+ messages.push(message);
593
+
594
+ const result = streamText({
595
+ model: __MODEL__,
596
+ messages: await convertToModelMessages(messages),
597
+ });
598
+
599
+ return result.toUIMessageStreamResponse();
600
+ }
601
+ ```
602
+
603
+ ### Advanced: Trigger-based routing
604
+
605
+ For more complex scenarios like message regeneration, you can use trigger-based routing:
606
+
607
+ ```tsx filename="app/page.tsx"
608
+ import { useChat } from '@ai-sdk/react';
609
+ import { DefaultChatTransport } from 'ai';
610
+
611
+ export default function Chat() {
612
+ const { messages, sendMessage, regenerate } = useChat({
613
+ id: 'my-chat',
614
+ transport: new DefaultChatTransport({
615
+ prepareSendMessagesRequest: ({ id, messages, trigger, messageId }) => {
616
+ if (trigger === 'submit-user-message') {
617
+ return {
618
+ body: {
619
+ trigger: 'submit-user-message',
620
+ id,
621
+ message: messages[messages.length - 1],
622
+ messageId,
623
+ },
624
+ };
625
+ } else if (trigger === 'regenerate-assistant-message') {
626
+ return {
627
+ body: {
628
+ trigger: 'regenerate-assistant-message',
629
+ id,
630
+ messageId,
631
+ },
632
+ };
633
+ }
634
+ throw new Error(`Unsupported trigger: ${trigger}`);
635
+ },
636
+ }),
637
+ });
638
+
639
+ // ... rest of your component
640
+ }
641
+ ```
642
+
643
+ The corresponding API route would handle different triggers:
644
+
645
+ ```ts filename="app/api/chat/route.ts"
646
+ export async function POST(req: Request) {
647
+ const { trigger, id, message, messageId } = await req.json();
648
+
649
+ const chat = await readChat(id);
650
+ let messages = chat.messages;
651
+
652
+ if (trigger === 'submit-user-message') {
653
+ // Handle new user message
654
+ messages = [...messages, message];
655
+ } else if (trigger === 'regenerate-assistant-message') {
656
+ // Handle message regeneration - remove messages after messageId
657
+ const messageIndex = messages.findIndex(m => m.id === messageId);
658
+ if (messageIndex !== -1) {
659
+ messages = messages.slice(0, messageIndex);
660
+ }
661
+ }
662
+
663
+ const result = streamText({
664
+ model: __MODEL__,
665
+ messages: await convertToModelMessages(messages),
666
+ });
667
+
668
+ return result.toUIMessageStreamResponse();
669
+ }
670
+ ```
671
+
672
+ To learn more about building custom transports, refer to the [Transport API documentation](/docs/ai-sdk-ui/transport).
673
+
674
+ ### Direct Agent Transport
675
+
676
+ For scenarios where you want to communicate directly with an Agent without going through HTTP, you can use `DirectChatTransport`. This is useful for:
677
+
678
+ - Server-side rendering scenarios
679
+ - Testing without network
680
+ - Single-process applications
681
+
682
+ ```tsx filename="app/page.tsx"
683
+ import { useChat } from '@ai-sdk/react';
684
+ import { DirectChatTransport, ToolLoopAgent } from 'ai';
685
+ __PROVIDER_IMPORT__;
686
+
687
+ const agent = new ToolLoopAgent({
688
+ model: __MODEL__,
689
+ instructions: 'You are a helpful assistant.',
690
+ });
691
+
692
+ export default function Chat() {
693
+ const { messages, sendMessage, status } = useChat({
694
+ transport: new DirectChatTransport({ agent }),
695
+ });
696
+
697
+ return (
698
+ <>
699
+ {messages.map(message => (
700
+ <div key={message.id}>
701
+ {message.role === 'user' ? 'User: ' : 'AI: '}
702
+ {message.parts.map((part, index) =>
703
+ part.type === 'text' ? <span key={index}>{part.text}</span> : null,
704
+ )}
705
+ </div>
706
+ ))}
707
+
708
+ <button
709
+ onClick={() => sendMessage({ text: 'Hello!' })}
710
+ disabled={status !== 'ready'}
711
+ >
712
+ Send
713
+ </button>
714
+ </>
715
+ );
716
+ }
717
+ ```
718
+
719
+ The `DirectChatTransport` invokes the agent's `stream()` method directly, converting UI messages to model messages and streaming the response back as UI message chunks.
720
+
721
+ For more details, see the [DirectChatTransport reference](/docs/reference/ai-sdk-ui/direct-chat-transport).
722
+
723
+ ## Controlling the response stream
724
+
725
+ With `streamText`, you can control how error messages and usage information are sent back to the client.
726
+
727
+ ### Error Messages
728
+
729
+ By default, the error message is masked for security reasons.
730
+ The default error message is "An error occurred."
731
+ You can forward error messages or send your own error message by providing a `getErrorMessage` function:
732
+
733
+ ```ts filename="app/api/chat/route.ts" highlight="13-27"
734
+ import { convertToModelMessages, streamText, UIMessage } from 'ai';
735
+ __PROVIDER_IMPORT__;
736
+
737
+ export async function POST(req: Request) {
738
+ const { messages }: { messages: UIMessage[] } = await req.json();
739
+
740
+ const result = streamText({
741
+ model: __MODEL__,
742
+ messages: await convertToModelMessages(messages),
743
+ });
744
+
745
+ return result.toUIMessageStreamResponse({
746
+ onError: error => {
747
+ if (error == null) {
748
+ return 'unknown error';
749
+ }
750
+
751
+ if (typeof error === 'string') {
752
+ return error;
753
+ }
754
+
755
+ if (error instanceof Error) {
756
+ return error.message;
757
+ }
758
+
759
+ return JSON.stringify(error);
760
+ },
761
+ });
762
+ }
763
+ ```
764
+
765
+ ### Usage Information
766
+
767
+ Track token consumption and resource usage with [message metadata](/docs/ai-sdk-ui/message-metadata):
768
+
769
+ 1. Define a custom metadata type with usage fields (optional, for type safety)
770
+ 2. Attach usage data using `messageMetadata` in your response
771
+ 3. Display usage metrics in your UI components
772
+
773
+ Usage data is attached as metadata to messages and becomes available once the model completes its response generation.
774
+
775
+ ```ts
776
+ import { openai } from '@ai-sdk/openai';
777
+ import {
778
+ convertToModelMessages,
779
+ streamText,
780
+ UIMessage,
781
+ type LanguageModelUsage,
782
+ } from 'ai';
783
+ __PROVIDER_IMPORT__;
784
+
785
+ // Create a new metadata type (optional for type-safety)
786
+ type MyMetadata = {
787
+ totalUsage: LanguageModelUsage;
788
+ };
789
+
790
+ // Create a new custom message type with your own metadata
791
+ export type MyUIMessage = UIMessage<MyMetadata>;
792
+
793
+ export async function POST(req: Request) {
794
+ const { messages }: { messages: MyUIMessage[] } = await req.json();
795
+
796
+ const result = streamText({
797
+ model: __MODEL__,
798
+ messages: await convertToModelMessages(messages),
799
+ });
800
+
801
+ return result.toUIMessageStreamResponse({
802
+ originalMessages: messages,
803
+ messageMetadata: ({ part }) => {
804
+ // Send total usage when generation is finished
805
+ if (part.type === 'finish') {
806
+ return { totalUsage: part.totalUsage };
807
+ }
808
+ },
809
+ });
810
+ }
811
+ ```
812
+
813
+ Then, on the client, you can access the message-level metadata.
814
+
815
+ ```tsx
816
+ 'use client';
817
+
818
+ import { useChat } from '@ai-sdk/react';
819
+ import type { MyUIMessage } from './api/chat/route';
820
+ import { DefaultChatTransport } from 'ai';
821
+
822
+ export default function Chat() {
823
+ // Use custom message type defined on the server (optional for type-safety)
824
+ const { messages } = useChat<MyUIMessage>({
825
+ transport: new DefaultChatTransport({
826
+ api: '/api/chat',
827
+ }),
828
+ });
829
+
830
+ return (
831
+ <div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
832
+ {messages.map(m => (
833
+ <div key={m.id} className="whitespace-pre-wrap">
834
+ {m.role === 'user' ? 'User: ' : 'AI: '}
835
+ {m.parts.map(part => {
836
+ if (part.type === 'text') {
837
+ return part.text;
838
+ }
839
+ })}
840
+ {/* Render usage via metadata */}
841
+ {m.metadata?.totalUsage && (
842
+ <div>Total usage: {m.metadata?.totalUsage.totalTokens} tokens</div>
843
+ )}
844
+ </div>
845
+ ))}
846
+ </div>
847
+ );
848
+ }
849
+ ```
850
+
851
+ You can also access your metadata from the `onFinish` callback of `useChat`:
852
+
853
+ ```tsx
854
+ 'use client';
855
+
856
+ import { useChat } from '@ai-sdk/react';
857
+ import type { MyUIMessage } from './api/chat/route';
858
+ import { DefaultChatTransport } from 'ai';
859
+
860
+ export default function Chat() {
861
+ // Use custom message type defined on the server (optional for type-safety)
862
+ const { messages } = useChat<MyUIMessage>({
863
+ transport: new DefaultChatTransport({
864
+ api: '/api/chat',
865
+ }),
866
+ onFinish: ({ message }) => {
867
+ // Access message metadata via onFinish callback
868
+ console.log(message.metadata?.totalUsage);
869
+ },
870
+ });
871
+ }
872
+ ```
873
+
874
+ ### Text Streams
875
+
876
+ `useChat` can handle plain text streams by setting the `streamProtocol` option to `text`:
877
+
878
+ ```tsx filename="app/page.tsx" highlight="7"
879
+ 'use client';
880
+
881
+ import { useChat } from '@ai-sdk/react';
882
+ import { TextStreamChatTransport } from 'ai';
883
+
884
+ export default function Chat() {
885
+ const { messages } = useChat({
886
+ transport: new TextStreamChatTransport({
887
+ api: '/api/chat',
888
+ }),
889
+ });
890
+
891
+ return <>...</>;
892
+ }
893
+ ```
894
+
895
+ This configuration also works with other backend servers that stream plain text.
896
+ Check out the [stream protocol guide](/docs/ai-sdk-ui/stream-protocol) for more information.
897
+
898
+ <Note>
899
+ When using `TextStreamChatTransport`, tool calls, usage information and finish
900
+ reasons are not available.
901
+ </Note>
902
+
903
+ ## Reasoning
904
+
905
+ Some models such as as DeepSeek `deepseek-r1`
906
+ and Anthropic `claude-3-7-sonnet-20250219` support reasoning tokens.
907
+ These tokens are typically sent before the message content.
908
+ You can forward them to the client with the `sendReasoning` option:
909
+
910
+ ```ts filename="app/api/chat/route.ts" highlight="13"
911
+ import { convertToModelMessages, streamText, UIMessage } from 'ai';
912
+
913
+ export async function POST(req: Request) {
914
+ const { messages }: { messages: UIMessage[] } = await req.json();
915
+
916
+ const result = streamText({
917
+ model: 'deepseek/deepseek-r1',
918
+ messages: await convertToModelMessages(messages),
919
+ });
920
+
921
+ return result.toUIMessageStreamResponse({
922
+ sendReasoning: true,
923
+ });
924
+ }
925
+ ```
926
+
927
+ On the client side, you can access the reasoning parts of the message object.
928
+
929
+ Reasoning parts have a `text` property that contains the reasoning content.
930
+
931
+ ```tsx filename="app/page.tsx"
932
+ messages.map(message => (
933
+ <div key={message.id}>
934
+ {message.role === 'user' ? 'User: ' : 'AI: '}
935
+ {message.parts.map((part, index) => {
936
+ // text parts:
937
+ if (part.type === 'text') {
938
+ return <div key={index}>{part.text}</div>;
939
+ }
940
+
941
+ // reasoning parts:
942
+ if (part.type === 'reasoning') {
943
+ return <pre key={index}>{part.text}</pre>;
944
+ }
945
+ })}
946
+ </div>
947
+ ));
948
+ ```
949
+
950
+ ## Sources
951
+
952
+ Some providers such as [Perplexity](/providers/ai-sdk-providers/perplexity#sources) and
953
+ [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai#sources) include sources in the response.
954
+
955
+ Currently sources are limited to web pages that ground the response.
956
+ You can forward them to the client with the `sendSources` option:
957
+
958
+ ```ts filename="app/api/chat/route.ts" highlight="13"
959
+ import { convertToModelMessages, streamText, UIMessage } from 'ai';
960
+
961
+ export async function POST(req: Request) {
962
+ const { messages }: { messages: UIMessage[] } = await req.json();
963
+
964
+ const result = streamText({
965
+ model: 'perplexity/sonar-pro',
966
+ messages: await convertToModelMessages(messages),
967
+ });
968
+
969
+ return result.toUIMessageStreamResponse({
970
+ sendSources: true,
971
+ });
972
+ }
973
+ ```
974
+
975
+ On the client side, you can access source parts of the message object.
976
+ There are two types of sources: `source-url` for web pages and `source-document` for documents.
977
+ Here is an example that renders both types of sources:
978
+
979
+ ```tsx filename="app/page.tsx"
980
+ messages.map(message => (
981
+ <div key={message.id}>
982
+ {message.role === 'user' ? 'User: ' : 'AI: '}
983
+
984
+ {/* Render URL sources */}
985
+ {message.parts
986
+ .filter(part => part.type === 'source-url')
987
+ .map(part => (
988
+ <span key={`source-${part.id}`}>
989
+ [
990
+ <a href={part.url} target="_blank">
991
+ {part.title ?? new URL(part.url).hostname}
992
+ </a>
993
+ ]
994
+ </span>
995
+ ))}
996
+
997
+ {/* Render document sources */}
998
+ {message.parts
999
+ .filter(part => part.type === 'source-document')
1000
+ .map(part => (
1001
+ <span key={`source-${part.id}`}>
1002
+ [<span>{part.title ?? `Document ${part.id}`}</span>]
1003
+ </span>
1004
+ ))}
1005
+ </div>
1006
+ ));
1007
+ ```
1008
+
1009
+ ## Image Generation
1010
+
1011
+ Some models such as Google `gemini-2.5-flash-image-preview` support image generation.
1012
+ When images are generated, they are exposed as files to the client.
1013
+ On the client side, you can access file parts of the message object
1014
+ and render them as images.
1015
+
1016
+ ```tsx filename="app/page.tsx"
1017
+ messages.map(message => (
1018
+ <div key={message.id}>
1019
+ {message.role === 'user' ? 'User: ' : 'AI: '}
1020
+ {message.parts.map((part, index) => {
1021
+ if (part.type === 'text') {
1022
+ return <div key={index}>{part.text}</div>;
1023
+ } else if (part.type === 'file' && part.mediaType.startsWith('image/')) {
1024
+ return <img key={index} src={part.url} alt="Generated image" />;
1025
+ }
1026
+ })}
1027
+ </div>
1028
+ ));
1029
+ ```
1030
+
1031
+ ## Attachments
1032
+
1033
+ The `useChat` hook supports sending file attachments along with a message as well as rendering them on the client. This can be useful for building applications that involve sending images, files, or other media content to the AI provider.
1034
+
1035
+ There are two ways to send files with a message: using a `FileList` object from file inputs or using an array of file objects.
1036
+
1037
+ ### FileList
1038
+
1039
+ By using `FileList`, you can send multiple files as attachments along with a message using the file input element. The `useChat` hook will automatically convert them into data URLs and send them to the AI provider.
1040
+
1041
+ <Note>
1042
+ Currently, only `image/*` and `text/*` content types get automatically
1043
+ converted into [multi-modal content
1044
+ parts](/docs/foundations/prompts#multi-modal-messages). You will need to
1045
+ handle other content types manually.
1046
+ </Note>
1047
+
1048
+ ```tsx filename="app/page.tsx"
1049
+ 'use client';
1050
+
1051
+ import { useChat } from '@ai-sdk/react';
1052
+ import { useRef, useState } from 'react';
1053
+
1054
+ export default function Page() {
1055
+ const { messages, sendMessage, status } = useChat();
1056
+
1057
+ const [input, setInput] = useState('');
1058
+ const [files, setFiles] = useState<FileList | undefined>(undefined);
1059
+ const fileInputRef = useRef<HTMLInputElement>(null);
1060
+
1061
+ return (
1062
+ <div>
1063
+ <div>
1064
+ {messages.map(message => (
1065
+ <div key={message.id}>
1066
+ <div>{`${message.role}: `}</div>
1067
+
1068
+ <div>
1069
+ {message.parts.map((part, index) => {
1070
+ if (part.type === 'text') {
1071
+ return <span key={index}>{part.text}</span>;
1072
+ }
1073
+
1074
+ if (
1075
+ part.type === 'file' &&
1076
+ part.mediaType?.startsWith('image/')
1077
+ ) {
1078
+ return <img key={index} src={part.url} alt={part.filename} />;
1079
+ }
1080
+
1081
+ return null;
1082
+ })}
1083
+ </div>
1084
+ </div>
1085
+ ))}
1086
+ </div>
1087
+
1088
+ <form
1089
+ onSubmit={event => {
1090
+ event.preventDefault();
1091
+ if (input.trim()) {
1092
+ sendMessage({
1093
+ text: input,
1094
+ files,
1095
+ });
1096
+ setInput('');
1097
+ setFiles(undefined);
1098
+
1099
+ if (fileInputRef.current) {
1100
+ fileInputRef.current.value = '';
1101
+ }
1102
+ }
1103
+ }}
1104
+ >
1105
+ <input
1106
+ type="file"
1107
+ onChange={event => {
1108
+ if (event.target.files) {
1109
+ setFiles(event.target.files);
1110
+ }
1111
+ }}
1112
+ multiple
1113
+ ref={fileInputRef}
1114
+ />
1115
+ <input
1116
+ value={input}
1117
+ placeholder="Send message..."
1118
+ onChange={e => setInput(e.target.value)}
1119
+ disabled={status !== 'ready'}
1120
+ />
1121
+ </form>
1122
+ </div>
1123
+ );
1124
+ }
1125
+ ```
1126
+
1127
+ ### File Objects
1128
+
1129
+ You can also send files as objects along with a message. This can be useful for sending pre-uploaded files or data URLs.
1130
+
1131
+ ```tsx filename="app/page.tsx"
1132
+ 'use client';
1133
+
1134
+ import { useChat } from '@ai-sdk/react';
1135
+ import { useState } from 'react';
1136
+ import { FileUIPart } from 'ai';
1137
+
1138
+ export default function Page() {
1139
+ const { messages, sendMessage, status } = useChat();
1140
+
1141
+ const [input, setInput] = useState('');
1142
+ const [files] = useState<FileUIPart[]>([
1143
+ {
1144
+ type: 'file',
1145
+ filename: 'earth.png',
1146
+ mediaType: 'image/png',
1147
+ url: 'https://example.com/earth.png',
1148
+ },
1149
+ {
1150
+ type: 'file',
1151
+ filename: 'moon.png',
1152
+ mediaType: 'image/png',
1153
+ url: 'data:image/png;base64,iVBORw0KGgo...',
1154
+ },
1155
+ ]);
1156
+
1157
+ return (
1158
+ <div>
1159
+ <div>
1160
+ {messages.map(message => (
1161
+ <div key={message.id}>
1162
+ <div>{`${message.role}: `}</div>
1163
+
1164
+ <div>
1165
+ {message.parts.map((part, index) => {
1166
+ if (part.type === 'text') {
1167
+ return <span key={index}>{part.text}</span>;
1168
+ }
1169
+
1170
+ if (
1171
+ part.type === 'file' &&
1172
+ part.mediaType?.startsWith('image/')
1173
+ ) {
1174
+ return <img key={index} src={part.url} alt={part.filename} />;
1175
+ }
1176
+
1177
+ return null;
1178
+ })}
1179
+ </div>
1180
+ </div>
1181
+ ))}
1182
+ </div>
1183
+
1184
+ <form
1185
+ onSubmit={event => {
1186
+ event.preventDefault();
1187
+ if (input.trim()) {
1188
+ sendMessage({
1189
+ text: input,
1190
+ files,
1191
+ });
1192
+ setInput('');
1193
+ }
1194
+ }}
1195
+ >
1196
+ <input
1197
+ value={input}
1198
+ placeholder="Send message..."
1199
+ onChange={e => setInput(e.target.value)}
1200
+ disabled={status !== 'ready'}
1201
+ />
1202
+ </form>
1203
+ </div>
1204
+ );
1205
+ }
1206
+ ```
1207
+
1208
+ ## Type Inference for Tools
1209
+
1210
+ When working with tools in TypeScript, AI SDK UI provides type inference helpers to ensure type safety for your tool inputs and outputs.
1211
+
1212
+ ### InferUITool
1213
+
1214
+ The `InferUITool` type helper infers the input and output types of a single tool for use in UI messages:
1215
+
1216
+ ```tsx
1217
+ import { InferUITool } from 'ai';
1218
+ import { z } from 'zod';
1219
+
1220
+ const weatherTool = {
1221
+ description: 'Get the current weather',
1222
+ inputSchema: z.object({
1223
+ location: z.string().describe('The city and state'),
1224
+ }),
1225
+ execute: async ({ location }) => {
1226
+ return `The weather in ${location} is sunny.`;
1227
+ },
1228
+ };
1229
+
1230
+ // Infer the types from the tool
1231
+ type WeatherUITool = InferUITool<typeof weatherTool>;
1232
+ // This creates a type with:
1233
+ // {
1234
+ // input: { location: string };
1235
+ // output: string;
1236
+ // }
1237
+ ```
1238
+
1239
+ ### InferUITools
1240
+
1241
+ The `InferUITools` type helper infers the input and output types of a `ToolSet`:
1242
+
1243
+ ```tsx
1244
+ import { InferUITools, ToolSet } from 'ai';
1245
+ import { z } from 'zod';
1246
+
1247
+ const tools = {
1248
+ weather: {
1249
+ description: 'Get the current weather',
1250
+ inputSchema: z.object({
1251
+ location: z.string().describe('The city and state'),
1252
+ }),
1253
+ execute: async ({ location }) => {
1254
+ return `The weather in ${location} is sunny.`;
1255
+ },
1256
+ },
1257
+ calculator: {
1258
+ description: 'Perform basic arithmetic',
1259
+ inputSchema: z.object({
1260
+ operation: z.enum(['add', 'subtract', 'multiply', 'divide']),
1261
+ a: z.number(),
1262
+ b: z.number(),
1263
+ }),
1264
+ execute: async ({ operation, a, b }) => {
1265
+ switch (operation) {
1266
+ case 'add':
1267
+ return a + b;
1268
+ case 'subtract':
1269
+ return a - b;
1270
+ case 'multiply':
1271
+ return a * b;
1272
+ case 'divide':
1273
+ return a / b;
1274
+ }
1275
+ },
1276
+ },
1277
+ } satisfies ToolSet;
1278
+
1279
+ // Infer the types from the tool set
1280
+ type MyUITools = InferUITools<typeof tools>;
1281
+ // This creates a type with:
1282
+ // {
1283
+ // weather: { input: { location: string }; output: string };
1284
+ // calculator: { input: { operation: 'add' | 'subtract' | 'multiply' | 'divide'; a: number; b: number }; output: number };
1285
+ // }
1286
+ ```
1287
+
1288
+ ### Using Inferred Types
1289
+
1290
+ You can use these inferred types to create a custom UIMessage type and pass it to various AI SDK UI functions:
1291
+
1292
+ ```tsx
1293
+ import { InferUITools, UIMessage, UIDataTypes } from 'ai';
1294
+
1295
+ type MyUITools = InferUITools<typeof tools>;
1296
+ type MyUIMessage = UIMessage<never, UIDataTypes, MyUITools>;
1297
+ ```
1298
+
1299
+ Pass the custom type to `useChat` or `createUIMessageStream`:
1300
+
1301
+ ```tsx
1302
+ import { useChat } from '@ai-sdk/react';
1303
+ import { createUIMessageStream } from 'ai';
1304
+ import type { MyUIMessage } from './types';
1305
+
1306
+ // With useChat
1307
+ const { messages } = useChat<MyUIMessage>();
1308
+
1309
+ // With createUIMessageStream
1310
+ const stream = createUIMessageStream<MyUIMessage>(/* ... */);
1311
+ ```
1312
+
1313
+ This provides full type safety for tool inputs and outputs on the client and server.