ai 6.0.30 → 6.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/dist/internal/index.js +1 -1
  5. package/dist/internal/index.mjs +1 -1
  6. package/docs/00-introduction/index.mdx +76 -0
  7. package/docs/02-foundations/01-overview.mdx +43 -0
  8. package/docs/02-foundations/02-providers-and-models.mdx +163 -0
  9. package/docs/02-foundations/03-prompts.mdx +620 -0
  10. package/docs/02-foundations/04-tools.mdx +160 -0
  11. package/docs/02-foundations/05-streaming.mdx +62 -0
  12. package/docs/02-foundations/index.mdx +43 -0
  13. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  14. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  15. package/docs/02-getting-started/02-nextjs-app-router.mdx +556 -0
  16. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  17. package/docs/02-getting-started/04-svelte.mdx +627 -0
  18. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  19. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  20. package/docs/02-getting-started/07-expo.mdx +766 -0
  21. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  22. package/docs/02-getting-started/index.mdx +44 -0
  23. package/docs/03-agents/01-overview.mdx +96 -0
  24. package/docs/03-agents/02-building-agents.mdx +367 -0
  25. package/docs/03-agents/03-workflows.mdx +370 -0
  26. package/docs/03-agents/04-loop-control.mdx +350 -0
  27. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  28. package/docs/03-agents/index.mdx +40 -0
  29. package/docs/03-ai-sdk-core/01-overview.mdx +33 -0
  30. package/docs/03-ai-sdk-core/05-generating-text.mdx +600 -0
  31. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +662 -0
  32. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1102 -0
  33. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +375 -0
  34. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +144 -0
  35. package/docs/03-ai-sdk-core/25-settings.mdx +198 -0
  36. package/docs/03-ai-sdk-core/30-embeddings.mdx +247 -0
  37. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  38. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  39. package/docs/03-ai-sdk-core/36-transcription.mdx +173 -0
  40. package/docs/03-ai-sdk-core/37-speech.mdx +167 -0
  41. package/docs/03-ai-sdk-core/40-middleware.mdx +480 -0
  42. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  43. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  44. package/docs/03-ai-sdk-core/55-testing.mdx +218 -0
  45. package/docs/03-ai-sdk-core/60-telemetry.mdx +313 -0
  46. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  47. package/docs/03-ai-sdk-core/index.mdx +88 -0
  48. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  49. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1313 -0
  50. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +535 -0
  51. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  52. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  53. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  54. package/docs/04-ai-sdk-ui/05-completion.mdx +186 -0
  55. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  56. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  57. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  58. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  59. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  60. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  61. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +477 -0
  62. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  63. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  64. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  65. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  66. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  67. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  68. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +158 -0
  69. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  70. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +96 -0
  71. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  72. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  73. package/docs/05-ai-sdk-rsc/index.mdx +58 -0
  74. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  75. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  76. package/docs/06-advanced/03-backpressure.mdx +173 -0
  77. package/docs/06-advanced/04-caching.mdx +169 -0
  78. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  79. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  80. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +213 -0
  81. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  82. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  83. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  84. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  85. package/docs/06-advanced/index.mdx +11 -0
  86. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2142 -0
  87. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3215 -0
  88. package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +780 -0
  89. package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +1140 -0
  90. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +190 -0
  91. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +171 -0
  92. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +309 -0
  93. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +227 -0
  94. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +138 -0
  95. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +214 -0
  96. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +203 -0
  97. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +449 -0
  98. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +148 -0
  99. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +168 -0
  100. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +144 -0
  101. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +196 -0
  102. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +175 -0
  103. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +410 -0
  104. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  105. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  106. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  107. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +55 -0
  108. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  109. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +415 -0
  110. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +246 -0
  111. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  112. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  113. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +182 -0
  114. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +121 -0
  115. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  116. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  117. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  118. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +46 -0
  119. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  120. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  121. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  122. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  123. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  124. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  125. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  126. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  127. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  128. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +43 -0
  129. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  130. package/docs/07-reference/01-ai-sdk-core/index.mdx +159 -0
  131. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +446 -0
  132. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +179 -0
  133. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +178 -0
  134. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +230 -0
  135. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  136. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +151 -0
  137. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +113 -0
  138. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +73 -0
  139. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  140. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  141. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  142. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  143. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  144. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  145. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  146. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  147. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +48 -0
  148. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +78 -0
  149. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  150. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  151. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  152. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  153. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  154. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  155. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +262 -0
  156. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  157. package/docs/07-reference/04-stream-helpers/01-ai-stream.mdx +89 -0
  158. package/docs/07-reference/04-stream-helpers/02-streaming-text-response.mdx +79 -0
  159. package/docs/07-reference/04-stream-helpers/05-stream-to-response.mdx +108 -0
  160. package/docs/07-reference/04-stream-helpers/07-openai-stream.mdx +77 -0
  161. package/docs/07-reference/04-stream-helpers/08-anthropic-stream.mdx +79 -0
  162. package/docs/07-reference/04-stream-helpers/09-aws-bedrock-stream.mdx +91 -0
  163. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-anthropic-stream.mdx +96 -0
  164. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-messages-stream.mdx +96 -0
  165. package/docs/07-reference/04-stream-helpers/11-aws-bedrock-cohere-stream.mdx +93 -0
  166. package/docs/07-reference/04-stream-helpers/12-aws-bedrock-llama-2-stream.mdx +93 -0
  167. package/docs/07-reference/04-stream-helpers/13-cohere-stream.mdx +78 -0
  168. package/docs/07-reference/04-stream-helpers/14-google-generative-ai-stream.mdx +85 -0
  169. package/docs/07-reference/04-stream-helpers/15-hugging-face-stream.mdx +84 -0
  170. package/docs/07-reference/04-stream-helpers/16-langchain-adapter.mdx +98 -0
  171. package/docs/07-reference/04-stream-helpers/16-llamaindex-adapter.mdx +70 -0
  172. package/docs/07-reference/04-stream-helpers/17-mistral-stream.mdx +81 -0
  173. package/docs/07-reference/04-stream-helpers/18-replicate-stream.mdx +83 -0
  174. package/docs/07-reference/04-stream-helpers/19-inkeep-stream.mdx +80 -0
  175. package/docs/07-reference/04-stream-helpers/index.mdx +103 -0
  176. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +30 -0
  177. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +27 -0
  178. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +25 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content.mdx +26 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +25 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +25 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +25 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +26 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  205. package/docs/07-reference/05-ai-sdk-errors/index.mdx +38 -0
  206. package/docs/07-reference/index.mdx +34 -0
  207. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  208. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  209. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  210. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  211. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  212. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  213. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  214. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  215. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  216. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  217. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  218. package/docs/08-migration-guides/index.mdx +22 -0
  219. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  220. package/docs/09-troubleshooting/02-client-side-function-calls-not-invoked.mdx +22 -0
  221. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  222. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  223. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  224. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  225. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  226. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  227. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  228. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  229. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  230. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  231. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  232. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  233. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  234. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  235. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  236. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  237. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  238. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  239. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  240. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  241. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  242. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  243. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  244. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +72 -0
  245. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  246. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  247. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  248. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  249. package/docs/09-troubleshooting/index.mdx +11 -0
  250. package/package.json +7 -3
@@ -0,0 +1,766 @@
1
+ ---
2
+ title: Expo
3
+ description: Learn how to build your first agent with the AI SDK and Expo.
4
+ ---
5
+
6
+ # Expo Quickstart
7
+
8
+ In this quickstart tutorial, you'll build a simple agent with a streaming chat user interface with [Expo](https://expo.dev/). Along the way, you'll learn key concepts and techniques that are fundamental to using the SDK in your own projects.
9
+
10
+ If you are unfamiliar with the concepts of [Prompt Engineering](/docs/advanced/prompt-engineering) and [HTTP Streaming](/docs/advanced/why-streaming), you can optionally read these documents first.
11
+
12
+ ## Prerequisites
13
+
14
+ To follow this quickstart, you'll need:
15
+
16
+ - Node.js 18+ and pnpm installed on your local development machine.
17
+ - A [ Vercel AI Gateway ](https://vercel.com/ai-gateway) API key.
18
+
19
+ If you haven't obtained your Vercel AI Gateway API key, you can do so by [signing up](https://vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai&title=Go+to+AI+Gateway) on the Vercel website.
20
+
21
+ ## Create Your Application
22
+
23
+ Start by creating a new Expo application. This command will create a new directory named `my-ai-app` and set up a basic Expo application inside it.
24
+
25
+ <Snippet text="pnpm create expo-app@latest my-ai-app" />
26
+
27
+ Navigate to the newly created directory:
28
+
29
+ <Snippet text="cd my-ai-app" />
30
+
31
+ <Note>This guide requires Expo 52 or higher.</Note>
32
+
33
+ ### Install dependencies
34
+
35
+ Install `ai` and `@ai-sdk/react`, the AI package and AI SDK's React hooks. The AI SDK's [ Vercel AI Gateway provider ](/providers/ai-sdk-providers/ai-gateway) ships with the `ai` package. You'll also install `zod`, a schema validation library used for defining tool inputs.
36
+
37
+ <Note>
38
+ This guide uses the Vercel AI Gateway provider so you can access hundreds of
39
+ models from different providers with one API key, but you can switch to any
40
+ provider or model by installing its package. Check out available [AI SDK
41
+ providers](/providers/ai-sdk-providers) for more information.
42
+ </Note>
43
+
44
+ <div className="my-4">
45
+ <Tabs items={['pnpm', 'npm', 'yarn', 'bun']}>
46
+ <Tab>
47
+ <Snippet text="pnpm add ai @ai-sdk/react zod" dark />
48
+ </Tab>
49
+ <Tab>
50
+ <Snippet text="npm install ai @ai-sdk/react zod" dark />
51
+ </Tab>
52
+ <Tab>
53
+ <Snippet text="yarn add ai @ai-sdk/react zod" dark />
54
+ </Tab>
55
+ <Tab>
56
+ <Snippet text="bun add ai @ai-sdk/react zod" dark />
57
+ </Tab>
58
+ </Tabs>
59
+ </div>
60
+
61
+ ### Configure your AI Gateway API key
62
+
63
+ Create a `.env.local` file in your project root and add your AI Gateway API key. This key authenticates your application with the Vercel AI Gateway.
64
+
65
+ <Snippet text="touch .env.local" />
66
+
67
+ Edit the `.env.local` file:
68
+
69
+ ```env filename=".env.local"
70
+ AI_GATEWAY_API_KEY=xxxxxxxxx
71
+ ```
72
+
73
+ Replace `xxxxxxxxx` with your actual Vercel AI Gateway API key.
74
+
75
+ <Note className="mb-4">
76
+ The AI SDK's Vercel AI Gateway Provider will default to using the
77
+ `AI_GATEWAY_API_KEY` environment variable.
78
+ </Note>
79
+
80
+ ## Create an API Route
81
+
82
+ Create a route handler, `app/api/chat+api.ts` and add the following code:
83
+
84
+ ```tsx filename="app/api/chat+api.ts"
85
+ import { streamText, UIMessage, convertToModelMessages } from 'ai';
86
+ __PROVIDER_IMPORT__;
87
+
88
+ export async function POST(req: Request) {
89
+ const { messages }: { messages: UIMessage[] } = await req.json();
90
+
91
+ const result = streamText({
92
+ model: __MODEL__,
93
+ messages: await convertToModelMessages(messages),
94
+ });
95
+
96
+ return result.toUIMessageStreamResponse({
97
+ headers: {
98
+ 'Content-Type': 'application/octet-stream',
99
+ 'Content-Encoding': 'none',
100
+ },
101
+ });
102
+ }
103
+ ```
104
+
105
+ Let's take a look at what is happening in this code:
106
+
107
+ 1. Define an asynchronous `POST` request handler and extract `messages` from the body of the request. The `messages` variable contains a history of the conversation between you and the chatbot and provides the chatbot with the necessary context to make the next generation.
108
+ 2. Call [`streamText`](/docs/reference/ai-sdk-core/stream-text), which is imported from the `ai` package. This function accepts a configuration object that contains a `model` provider (imported from `ai`) and `messages` (defined in step 1). You can pass additional [settings](/docs/ai-sdk-core/settings) to further customise the model's behaviour.
109
+ 3. The `streamText` function returns a [`StreamTextResult`](/docs/reference/ai-sdk-core/stream-text#result-object). This result object contains the [ `toUIMessageStreamResponse` ](/docs/reference/ai-sdk-core/stream-text#to-ui-message-stream-response) function which converts the result to a streamed response object.
110
+ 4. Finally, return the result to the client to stream the response.
111
+
112
+ This API route creates a POST request endpoint at `/api/chat`.
113
+
114
+ ## Choosing a Provider
115
+
116
+ The AI SDK supports dozens of model providers through [first-party](/providers/ai-sdk-providers), [OpenAI-compatible](/providers/openai-compatible-providers), and [ community ](/providers/community-providers) packages.
117
+
118
+ This quickstart uses the [Vercel AI Gateway](https://vercel.com/ai-gateway) provider, which is the default [global provider](/docs/ai-sdk-core/provider-management#global-provider-configuration). This means you can access models using a simple string in the model configuration:
119
+
120
+ ```ts
121
+ model: __MODEL__;
122
+ ```
123
+
124
+ You can also explicitly import and use the gateway provider in two other equivalent ways:
125
+
126
+ ```ts
127
+ // Option 1: Import from 'ai' package (included by default)
128
+ import { gateway } from 'ai';
129
+ model: gateway('anthropic/claude-sonnet-4.5');
130
+
131
+ // Option 2: Install and import from '@ai-sdk/gateway' package
132
+ import { gateway } from '@ai-sdk/gateway';
133
+ model: gateway('anthropic/claude-sonnet-4.5');
134
+ ```
135
+
136
+ ### Using other providers
137
+
138
+ To use a different provider, install its package and create a provider instance. For example, to use OpenAI directly:
139
+
140
+ <div className="my-4">
141
+ <Tabs items={['pnpm', 'npm', 'yarn', 'bun']}>
142
+ <Tab>
143
+ <Snippet text="pnpm add @ai-sdk/openai" dark />
144
+ </Tab>
145
+ <Tab>
146
+ <Snippet text="npm install @ai-sdk/openai" dark />
147
+ </Tab>
148
+ <Tab>
149
+ <Snippet text="yarn add @ai-sdk/openai" dark />
150
+ </Tab>
151
+
152
+ <Tab>
153
+ <Snippet text="bun add @ai-sdk/openai" dark />
154
+ </Tab>
155
+
156
+ </Tabs>
157
+ </div>
158
+
159
+ ```ts
160
+ import { openai } from '@ai-sdk/openai';
161
+
162
+ model: openai('gpt-5.1');
163
+ ```
164
+
165
+ #### Updating the global provider
166
+
167
+ You can change the default global provider so string model references use your preferred provider everywhere in your application. Learn more about [provider management](/docs/ai-sdk-core/provider-management#global-provider-configuration).
168
+
169
+ Pick the approach that best matches how you want to manage providers across your application.
170
+
171
+ ## Wire up the UI
172
+
173
+ Now that you have an API route that can query an LLM, it's time to setup your frontend. The AI SDK's [ UI ](/docs/ai-sdk-ui) package abstracts the complexity of a chat interface into one hook, [`useChat`](/docs/reference/ai-sdk-ui/use-chat).
174
+
175
+ Update your root page (`app/(tabs)/index.tsx`) with the following code to show a list of chat messages and provide a user message input:
176
+
177
+ ```tsx filename="app/(tabs)/index.tsx"
178
+ import { generateAPIUrl } from '@/utils';
179
+ import { useChat } from '@ai-sdk/react';
180
+ import { DefaultChatTransport } from 'ai';
181
+ import { fetch as expoFetch } from 'expo/fetch';
182
+ import { useState } from 'react';
183
+ import { View, TextInput, ScrollView, Text, SafeAreaView } from 'react-native';
184
+
185
+ export default function App() {
186
+ const [input, setInput] = useState('');
187
+ const { messages, error, sendMessage } = useChat({
188
+ transport: new DefaultChatTransport({
189
+ fetch: expoFetch as unknown as typeof globalThis.fetch,
190
+ api: generateAPIUrl('/api/chat'),
191
+ }),
192
+ onError: error => console.error(error, 'ERROR'),
193
+ });
194
+
195
+ if (error) return <Text>{error.message}</Text>;
196
+
197
+ return (
198
+ <SafeAreaView style={{ height: '100%' }}>
199
+ <View
200
+ style={{
201
+ height: '95%',
202
+ display: 'flex',
203
+ flexDirection: 'column',
204
+ paddingHorizontal: 8,
205
+ }}
206
+ >
207
+ <ScrollView style={{ flex: 1 }}>
208
+ {messages.map(m => (
209
+ <View key={m.id} style={{ marginVertical: 8 }}>
210
+ <View>
211
+ <Text style={{ fontWeight: 700 }}>{m.role}</Text>
212
+ {m.parts.map((part, i) => {
213
+ switch (part.type) {
214
+ case 'text':
215
+ return <Text key={`${m.id}-${i}`}>{part.text}</Text>;
216
+ }
217
+ })}
218
+ </View>
219
+ </View>
220
+ ))}
221
+ </ScrollView>
222
+
223
+ <View style={{ marginTop: 8 }}>
224
+ <TextInput
225
+ style={{ backgroundColor: 'white', padding: 8 }}
226
+ placeholder="Say something..."
227
+ value={input}
228
+ onChange={e => setInput(e.nativeEvent.text)}
229
+ onSubmitEditing={e => {
230
+ e.preventDefault();
231
+ sendMessage({ text: input });
232
+ setInput('');
233
+ }}
234
+ autoFocus={true}
235
+ />
236
+ </View>
237
+ </View>
238
+ </SafeAreaView>
239
+ );
240
+ }
241
+ ```
242
+
243
+ This page utilizes the `useChat` hook, which will, by default, use the `POST` API route you created earlier (`/api/chat`). The hook provides functions and state for handling user input and form submission. The `useChat` hook provides multiple utility functions and state variables:
244
+
245
+ - `messages` - the current chat messages (an array of objects with `id`, `role`, and `parts` properties).
246
+ - `sendMessage` - a function to send a message to the chat API.
247
+
248
+ The component uses local state (`useState`) to manage the input field value, and handles form submission by calling `sendMessage` with the input text and then clearing the input field.
249
+
250
+ The LLM's response is accessed through the message `parts` array. Each message contains an ordered array of `parts` that represents everything the model generated in its response. These parts can include plain text, reasoning tokens, and more that you will see later. The `parts` array preserves the sequence of the model's outputs, allowing you to display or process each component in the order it was generated.
251
+
252
+ <Note>
253
+ You use the expo/fetch function instead of the native node fetch to enable
254
+ streaming of chat responses. This requires Expo 52 or higher.
255
+ </Note>
256
+
257
+ ### Create the API URL Generator
258
+
259
+ Because you're using expo/fetch for streaming responses instead of the native fetch function, you'll need an API URL generator to ensure you are using the correct base url and format depending on the client environment (e.g. web or mobile). Create a new file called `utils.ts` in the root of your project and add the following code:
260
+
261
+ ```ts filename="utils.ts"
262
+ import Constants from 'expo-constants';
263
+
264
+ export const generateAPIUrl = (relativePath: string) => {
265
+ const origin = Constants.experienceUrl.replace('exp://', 'http://');
266
+
267
+ const path = relativePath.startsWith('/') ? relativePath : `/${relativePath}`;
268
+
269
+ if (process.env.NODE_ENV === 'development') {
270
+ return origin.concat(path);
271
+ }
272
+
273
+ if (!process.env.EXPO_PUBLIC_API_BASE_URL) {
274
+ throw new Error(
275
+ 'EXPO_PUBLIC_API_BASE_URL environment variable is not defined',
276
+ );
277
+ }
278
+
279
+ return process.env.EXPO_PUBLIC_API_BASE_URL.concat(path);
280
+ };
281
+ ```
282
+
283
+ This utility function handles URL generation for both development and production environments, ensuring your API calls work correctly across different devices and configurations.
284
+
285
+ <Note>
286
+ Before deploying to production, you must set the `EXPO_PUBLIC_API_BASE_URL`
287
+ environment variable in your production environment. This variable should
288
+ point to the base URL of your API server.
289
+ </Note>
290
+
291
+ ## Running Your Application
292
+
293
+ With that, you have built everything you need for your chatbot! To start your application, use the command:
294
+
295
+ <Snippet text="pnpm expo" />
296
+
297
+ Head to your browser and open http://localhost:8081. You should see an input field. Test it out by entering a message and see the AI chatbot respond in real-time! The AI SDK makes it fast and easy to build AI chat interfaces with Expo.
298
+
299
+ <Note>
300
+ If you experience "Property `structuredClone` doesn't exist" errors on mobile,
301
+ add the [polyfills described below](#polyfills).
302
+ </Note>
303
+
304
+ ## Enhance Your Chatbot with Tools
305
+
306
+ While large language models (LLMs) have incredible generation capabilities, they struggle with discrete tasks (e.g. mathematics) and interacting with the outside world (e.g. getting the weather). This is where [tools](/docs/ai-sdk-core/tools-and-tool-calling) come in.
307
+
308
+ Tools are actions that an LLM can invoke. The results of these actions can be reported back to the LLM to be considered in the next response.
309
+
310
+ For example, if a user asks about the current weather, without tools, the model would only be able to provide general information based on its training data. But with a weather tool, it can fetch and provide up-to-date, location-specific weather information.
311
+
312
+ Let's enhance your chatbot by adding a simple weather tool.
313
+
314
+ ### Update Your API route
315
+
316
+ Modify your `app/api/chat+api.ts` file to include the new weather tool:
317
+
318
+ ```tsx filename="app/api/chat+api.ts" highlight="2,11-25"
319
+ import { streamText, UIMessage, convertToModelMessages, tool } from 'ai';
320
+ __PROVIDER_IMPORT__;
321
+ import { z } from 'zod';
322
+
323
+ export async function POST(req: Request) {
324
+ const { messages }: { messages: UIMessage[] } = await req.json();
325
+
326
+ const result = streamText({
327
+ model: __MODEL__,
328
+ messages: await convertToModelMessages(messages),
329
+ tools: {
330
+ weather: tool({
331
+ description: 'Get the weather in a location (fahrenheit)',
332
+ inputSchema: z.object({
333
+ location: z.string().describe('The location to get the weather for'),
334
+ }),
335
+ execute: async ({ location }) => {
336
+ const temperature = Math.round(Math.random() * (90 - 32) + 32);
337
+ return {
338
+ location,
339
+ temperature,
340
+ };
341
+ },
342
+ }),
343
+ },
344
+ });
345
+
346
+ return result.toUIMessageStreamResponse({
347
+ headers: {
348
+ 'Content-Type': 'application/octet-stream',
349
+ 'Content-Encoding': 'none',
350
+ },
351
+ });
352
+ }
353
+ ```
354
+
355
+ In this updated code:
356
+
357
+ 1. You import the `tool` function from the `ai` package and `z` from `zod` for schema validation.
358
+ 2. You define a `tools` object with a `weather` tool. This tool:
359
+
360
+ - Has a description that helps the model understand when to use it.
361
+ - Defines `inputSchema` using a Zod schema, specifying that it requires a `location` string to execute this tool. The model will attempt to extract this input from the context of the conversation. If it can't, it will ask the user for the missing information.
362
+ - Defines an `execute` function that simulates getting weather data (in this case, it returns a random temperature). This is an asynchronous function running on the server so you can fetch real data from an external API.
363
+
364
+ Now your chatbot can "fetch" weather information for any location the user asks about. When the model determines it needs to use the weather tool, it will generate a tool call with the necessary input. The `execute` function will then be automatically run, and the tool output will be added to the `messages` as a `tool` message.
365
+
366
+ <Note>
367
+ You may need to restart your development server for the changes to take
368
+ effect.
369
+ </Note>
370
+
371
+ Try asking something like "What's the weather in New York?" and see how the model uses the new tool.
372
+
373
+ Notice the blank response in the UI? This is because instead of generating a text response, the model generated a tool call. You can access the tool call and subsequent tool result on the client via the `tool-weather` part of the `message.parts` array.
374
+
375
+ <Note>
376
+ Tool parts are always named `tool-{toolName}`, where `{toolName}` is the key
377
+ you used when defining the tool. In this case, since we defined the tool as
378
+ `weather`, the part type is `tool-weather`.
379
+ </Note>
380
+
381
+ ### Update the UI
382
+
383
+ To display the weather tool invocation in your UI, update your `app/(tabs)/index.tsx` file:
384
+
385
+ ```tsx filename="app/(tabs)/index.tsx" highlight="31-35"
386
+ import { generateAPIUrl } from '@/utils';
387
+ import { useChat } from '@ai-sdk/react';
388
+ import { DefaultChatTransport } from 'ai';
389
+ import { fetch as expoFetch } from 'expo/fetch';
390
+ import { useState } from 'react';
391
+ import { View, TextInput, ScrollView, Text, SafeAreaView } from 'react-native';
392
+
393
+ export default function App() {
394
+ const [input, setInput] = useState('');
395
+ const { messages, error, sendMessage } = useChat({
396
+ transport: new DefaultChatTransport({
397
+ fetch: expoFetch as unknown as typeof globalThis.fetch,
398
+ api: generateAPIUrl('/api/chat'),
399
+ }),
400
+ onError: error => console.error(error, 'ERROR'),
401
+ });
402
+
403
+ if (error) return <Text>{error.message}</Text>;
404
+
405
+ return (
406
+ <SafeAreaView style={{ height: '100%' }}>
407
+ <View
408
+ style={{
409
+ height: '95%',
410
+ display: 'flex',
411
+ flexDirection: 'column',
412
+ paddingHorizontal: 8,
413
+ }}
414
+ >
415
+ <ScrollView style={{ flex: 1 }}>
416
+ {messages.map(m => (
417
+ <View key={m.id} style={{ marginVertical: 8 }}>
418
+ <View>
419
+ <Text style={{ fontWeight: 700 }}>{m.role}</Text>
420
+ {m.parts.map((part, i) => {
421
+ switch (part.type) {
422
+ case 'text':
423
+ return <Text key={`${m.id}-${i}`}>{part.text}</Text>;
424
+ case 'tool-weather':
425
+ return (
426
+ <Text key={`${m.id}-${i}`}>
427
+ {JSON.stringify(part, null, 2)}
428
+ </Text>
429
+ );
430
+ }
431
+ })}
432
+ </View>
433
+ </View>
434
+ ))}
435
+ </ScrollView>
436
+
437
+ <View style={{ marginTop: 8 }}>
438
+ <TextInput
439
+ style={{ backgroundColor: 'white', padding: 8 }}
440
+ placeholder="Say something..."
441
+ value={input}
442
+ onChange={e => setInput(e.nativeEvent.text)}
443
+ onSubmitEditing={e => {
444
+ e.preventDefault();
445
+ sendMessage({ text: input });
446
+ setInput('');
447
+ }}
448
+ autoFocus={true}
449
+ />
450
+ </View>
451
+ </View>
452
+ </SafeAreaView>
453
+ );
454
+ }
455
+ ```
456
+
457
+ <Note>
458
+ You may need to restart your development server for the changes to take
459
+ effect.
460
+ </Note>
461
+
462
+ With this change, you're updating the UI to handle different message parts. For text parts, you display the text content as before. For weather tool invocations, you display a JSON representation of the tool call and its result.
463
+
464
+ Now, when you ask about the weather, you'll see the tool call and its result displayed in your chat interface.
465
+
466
+ ## Enabling Multi-Step Tool Calls
467
+
468
+ You may have noticed that while the tool results are visible in the chat interface, the model isn't using this information to answer your original query. This is because once the model generates a tool call, it has technically completed its generation.
469
+
470
+ To solve this, you can enable multi-step tool calls using `stopWhen`. By default, `stopWhen` is set to `stepCountIs(1)`, which means generation stops after the first step when there are tool results. By changing this condition, you can allow the model to automatically send tool results back to itself to trigger additional generations until your specified stopping condition is met. In this case, you want the model to continue generating so it can use the weather tool results to answer your original question.
471
+
472
+ ### Update Your API Route
473
+
474
+ Modify your `app/api/chat+api.ts` file to include the `stopWhen` condition:
475
+
476
+ ```tsx filename="app/api/chat+api.ts" highlight="10"
477
+ import {
478
+ streamText,
479
+ UIMessage,
480
+ convertToModelMessages,
481
+ tool,
482
+ stepCountIs,
483
+ } from 'ai';
484
+ __PROVIDER_IMPORT__;
485
+ import { z } from 'zod';
486
+
487
+ export async function POST(req: Request) {
488
+ const { messages }: { messages: UIMessage[] } = await req.json();
489
+
490
+ const result = streamText({
491
+ model: __MODEL__,
492
+ messages: await convertToModelMessages(messages),
493
+ stopWhen: stepCountIs(5),
494
+ tools: {
495
+ weather: tool({
496
+ description: 'Get the weather in a location (fahrenheit)',
497
+ inputSchema: z.object({
498
+ location: z.string().describe('The location to get the weather for'),
499
+ }),
500
+ execute: async ({ location }) => {
501
+ const temperature = Math.round(Math.random() * (90 - 32) + 32);
502
+ return {
503
+ location,
504
+ temperature,
505
+ };
506
+ },
507
+ }),
508
+ },
509
+ });
510
+
511
+ return result.toUIMessageStreamResponse({
512
+ headers: {
513
+ 'Content-Type': 'application/octet-stream',
514
+ 'Content-Encoding': 'none',
515
+ },
516
+ });
517
+ }
518
+ ```
519
+
520
+ <Note>
521
+ You may need to restart your development server for the changes to take
522
+ effect.
523
+ </Note>
524
+
525
+ Head back to the Expo app and ask about the weather in a location. You should now see the model using the weather tool results to answer your question.
526
+
527
+ By setting `stopWhen: stepCountIs(5)`, you're allowing the model to use up to 5 "steps" for any given generation. This enables more complex interactions and allows the model to gather and process information over several steps if needed. You can see this in action by adding another tool to convert the temperature from Fahrenheit to Celsius.
528
+
529
+ ### Add More Tools
530
+
531
+ Update your `app/api/chat+api.ts` file to add a new tool to convert the temperature from Fahrenheit to Celsius:
532
+
533
+ ```tsx filename="app/api/chat+api.ts" highlight="28-41"
534
+ import {
535
+ streamText,
536
+ UIMessage,
537
+ convertToModelMessages,
538
+ tool,
539
+ stepCountIs,
540
+ } from 'ai';
541
+ __PROVIDER_IMPORT__;
542
+ import { z } from 'zod';
543
+
544
+ export async function POST(req: Request) {
545
+ const { messages }: { messages: UIMessage[] } = await req.json();
546
+
547
+ const result = streamText({
548
+ model: __MODEL__,
549
+ messages: await convertToModelMessages(messages),
550
+ stopWhen: stepCountIs(5),
551
+ tools: {
552
+ weather: tool({
553
+ description: 'Get the weather in a location (fahrenheit)',
554
+ inputSchema: z.object({
555
+ location: z.string().describe('The location to get the weather for'),
556
+ }),
557
+ execute: async ({ location }) => {
558
+ const temperature = Math.round(Math.random() * (90 - 32) + 32);
559
+ return {
560
+ location,
561
+ temperature,
562
+ };
563
+ },
564
+ }),
565
+ convertFahrenheitToCelsius: tool({
566
+ description: 'Convert a temperature in fahrenheit to celsius',
567
+ inputSchema: z.object({
568
+ temperature: z
569
+ .number()
570
+ .describe('The temperature in fahrenheit to convert'),
571
+ }),
572
+ execute: async ({ temperature }) => {
573
+ const celsius = Math.round((temperature - 32) * (5 / 9));
574
+ return {
575
+ celsius,
576
+ };
577
+ },
578
+ }),
579
+ },
580
+ });
581
+
582
+ return result.toUIMessageStreamResponse({
583
+ headers: {
584
+ 'Content-Type': 'application/octet-stream',
585
+ 'Content-Encoding': 'none',
586
+ },
587
+ });
588
+ }
589
+ ```
590
+
591
+ <Note>
592
+ You may need to restart your development server for the changes to take
593
+ effect.
594
+ </Note>
595
+
596
+ ### Update the UI for the new tool
597
+
598
+ To display the temperature conversion tool invocation in your UI, update your `app/(tabs)/index.tsx` file to handle the new tool part:
599
+
600
+ ```tsx filename="app/(tabs)/index.tsx" highlight="37-42"
601
+ import { generateAPIUrl } from '@/utils';
602
+ import { useChat } from '@ai-sdk/react';
603
+ import { DefaultChatTransport } from 'ai';
604
+ import { fetch as expoFetch } from 'expo/fetch';
605
+ import { useState } from 'react';
606
+ import { View, TextInput, ScrollView, Text, SafeAreaView } from 'react-native';
607
+
608
+ export default function App() {
609
+ const [input, setInput] = useState('');
610
+ const { messages, error, sendMessage } = useChat({
611
+ transport: new DefaultChatTransport({
612
+ fetch: expoFetch as unknown as typeof globalThis.fetch,
613
+ api: generateAPIUrl('/api/chat'),
614
+ }),
615
+ onError: error => console.error(error, 'ERROR'),
616
+ });
617
+
618
+ if (error) return <Text>{error.message}</Text>;
619
+
620
+ return (
621
+ <SafeAreaView style={{ height: '100%' }}>
622
+ <View
623
+ style={{
624
+ height: '95%',
625
+ display: 'flex',
626
+ flexDirection: 'column',
627
+ paddingHorizontal: 8,
628
+ }}
629
+ >
630
+ <ScrollView style={{ flex: 1 }}>
631
+ {messages.map(m => (
632
+ <View key={m.id} style={{ marginVertical: 8 }}>
633
+ <View>
634
+ <Text style={{ fontWeight: 700 }}>{m.role}</Text>
635
+ {m.parts.map((part, i) => {
636
+ switch (part.type) {
637
+ case 'text':
638
+ return <Text key={`${m.id}-${i}`}>{part.text}</Text>;
639
+ case 'tool-weather':
640
+ case 'tool-convertFahrenheitToCelsius':
641
+ return (
642
+ <Text key={`${m.id}-${i}`}>
643
+ {JSON.stringify(part, null, 2)}
644
+ </Text>
645
+ );
646
+ }
647
+ })}
648
+ </View>
649
+ </View>
650
+ ))}
651
+ </ScrollView>
652
+
653
+ <View style={{ marginTop: 8 }}>
654
+ <TextInput
655
+ style={{ backgroundColor: 'white', padding: 8 }}
656
+ placeholder="Say something..."
657
+ value={input}
658
+ onChange={e => setInput(e.nativeEvent.text)}
659
+ onSubmitEditing={e => {
660
+ e.preventDefault();
661
+ sendMessage({ text: input });
662
+ setInput('');
663
+ }}
664
+ autoFocus={true}
665
+ />
666
+ </View>
667
+ </View>
668
+ </SafeAreaView>
669
+ );
670
+ }
671
+ ```
672
+
673
+ <Note>
674
+ You may need to restart your development server for the changes to take
675
+ effect.
676
+ </Note>
677
+
678
+ Now, when you ask "What's the weather in New York in celsius?", you should see a more complete interaction:
679
+
680
+ 1. The model will call the weather tool for New York.
681
+ 2. You'll see the tool result displayed.
682
+ 3. It will then call the temperature conversion tool to convert the temperature from Fahrenheit to Celsius.
683
+ 4. The model will then use that information to provide a natural language response about the weather in New York.
684
+
685
+ This multi-step approach allows the model to gather information and use it to provide more accurate and contextual responses, making your chatbot considerably more useful.
686
+
687
+ This simple example demonstrates how tools can expand your model's capabilities. You can create more complex tools to integrate with real APIs, databases, or any other external systems, allowing the model to access and process real-world data in real-time. Tools bridge the gap between the model's knowledge cutoff and current information.
688
+
689
+ ## Polyfills
690
+
691
+ Several functions that are internally used by the AI SDK might not available in the Expo runtime depending on your configuration and the target platform.
692
+
693
+ First, install the following packages:
694
+
695
+ <div className="my-4">
696
+ <Tabs items={['pnpm', 'npm', 'yarn', 'bun']}>
697
+ <Tab>
698
+ <Snippet
699
+ text="pnpm add @ungap/structured-clone @stardazed/streams-text-encoding"
700
+ dark
701
+ />
702
+ </Tab>
703
+ <Tab>
704
+ <Snippet
705
+ text="npm install @ungap/structured-clone @stardazed/streams-text-encoding"
706
+ dark
707
+ />
708
+ </Tab>
709
+ <Tab>
710
+ <Snippet
711
+ text="yarn add @ungap/structured-clone @stardazed/streams-text-encoding"
712
+ dark
713
+ />
714
+ </Tab>
715
+ <Tab>
716
+ <Snippet
717
+ text="bun add @ungap/structured-clone @stardazed/streams-text-encoding"
718
+ dark
719
+ />
720
+ </Tab>
721
+ </Tabs>
722
+ </div>
723
+
724
+ Then create a new file in the root of your project with the following polyfills:
725
+
726
+ ```ts filename="polyfills.js"
727
+ import { Platform } from 'react-native';
728
+ import structuredClone from '@ungap/structured-clone';
729
+
730
+ if (Platform.OS !== 'web') {
731
+ const setupPolyfills = async () => {
732
+ const { polyfillGlobal } = await import(
733
+ 'react-native/Libraries/Utilities/PolyfillFunctions'
734
+ );
735
+
736
+ const { TextEncoderStream, TextDecoderStream } = await import(
737
+ '@stardazed/streams-text-encoding'
738
+ );
739
+
740
+ if (!('structuredClone' in global)) {
741
+ polyfillGlobal('structuredClone', () => structuredClone);
742
+ }
743
+
744
+ polyfillGlobal('TextEncoderStream', () => TextEncoderStream);
745
+ polyfillGlobal('TextDecoderStream', () => TextDecoderStream);
746
+ };
747
+
748
+ setupPolyfills();
749
+ }
750
+
751
+ export {};
752
+ ```
753
+
754
+ Finally, import the polyfills in your root `_layout.tsx`:
755
+
756
+ ```ts filename="_layout.tsx"
757
+ import '@/polyfills';
758
+ ```
759
+
760
+ ## Where to Next?
761
+
762
+ You've built an AI chatbot using the AI SDK! From here, you have several paths to explore:
763
+
764
+ - To learn more about the AI SDK, read through the [documentation](/docs).
765
+ - If you're interested in diving deeper with guides, check out the [RAG (retrieval-augmented generation)](/docs/guides/rag-chatbot) and [multi-modal chatbot](/docs/guides/multi-modal-chatbot) guides.
766
+ - To jumpstart your first AI project, explore available [templates](https://vercel.com/templates?type=ai).