ai 6.0.30 → 6.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/dist/internal/index.js +1 -1
  5. package/dist/internal/index.mjs +1 -1
  6. package/docs/00-introduction/index.mdx +76 -0
  7. package/docs/02-foundations/01-overview.mdx +43 -0
  8. package/docs/02-foundations/02-providers-and-models.mdx +163 -0
  9. package/docs/02-foundations/03-prompts.mdx +620 -0
  10. package/docs/02-foundations/04-tools.mdx +160 -0
  11. package/docs/02-foundations/05-streaming.mdx +62 -0
  12. package/docs/02-foundations/index.mdx +43 -0
  13. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  14. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  15. package/docs/02-getting-started/02-nextjs-app-router.mdx +556 -0
  16. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  17. package/docs/02-getting-started/04-svelte.mdx +627 -0
  18. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  19. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  20. package/docs/02-getting-started/07-expo.mdx +766 -0
  21. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  22. package/docs/02-getting-started/index.mdx +44 -0
  23. package/docs/03-agents/01-overview.mdx +96 -0
  24. package/docs/03-agents/02-building-agents.mdx +367 -0
  25. package/docs/03-agents/03-workflows.mdx +370 -0
  26. package/docs/03-agents/04-loop-control.mdx +350 -0
  27. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  28. package/docs/03-agents/index.mdx +40 -0
  29. package/docs/03-ai-sdk-core/01-overview.mdx +33 -0
  30. package/docs/03-ai-sdk-core/05-generating-text.mdx +600 -0
  31. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +662 -0
  32. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1102 -0
  33. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +375 -0
  34. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +144 -0
  35. package/docs/03-ai-sdk-core/25-settings.mdx +198 -0
  36. package/docs/03-ai-sdk-core/30-embeddings.mdx +247 -0
  37. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  38. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  39. package/docs/03-ai-sdk-core/36-transcription.mdx +173 -0
  40. package/docs/03-ai-sdk-core/37-speech.mdx +167 -0
  41. package/docs/03-ai-sdk-core/40-middleware.mdx +480 -0
  42. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  43. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  44. package/docs/03-ai-sdk-core/55-testing.mdx +218 -0
  45. package/docs/03-ai-sdk-core/60-telemetry.mdx +313 -0
  46. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  47. package/docs/03-ai-sdk-core/index.mdx +88 -0
  48. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  49. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1313 -0
  50. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +535 -0
  51. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  52. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  53. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  54. package/docs/04-ai-sdk-ui/05-completion.mdx +186 -0
  55. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  56. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  57. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  58. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  59. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  60. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  61. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +477 -0
  62. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  63. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  64. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  65. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  66. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  67. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  68. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +158 -0
  69. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  70. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +96 -0
  71. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  72. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  73. package/docs/05-ai-sdk-rsc/index.mdx +58 -0
  74. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  75. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  76. package/docs/06-advanced/03-backpressure.mdx +173 -0
  77. package/docs/06-advanced/04-caching.mdx +169 -0
  78. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  79. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  80. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +213 -0
  81. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  82. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  83. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  84. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  85. package/docs/06-advanced/index.mdx +11 -0
  86. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2142 -0
  87. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3215 -0
  88. package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +780 -0
  89. package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +1140 -0
  90. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +190 -0
  91. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +171 -0
  92. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +309 -0
  93. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +227 -0
  94. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +138 -0
  95. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +214 -0
  96. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +203 -0
  97. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +449 -0
  98. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +148 -0
  99. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +168 -0
  100. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +144 -0
  101. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +196 -0
  102. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +175 -0
  103. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +410 -0
  104. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  105. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  106. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  107. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +55 -0
  108. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  109. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +415 -0
  110. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +246 -0
  111. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  112. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  113. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +182 -0
  114. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +121 -0
  115. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  116. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  117. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  118. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +46 -0
  119. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  120. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  121. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  122. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  123. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  124. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  125. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  126. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  127. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  128. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +43 -0
  129. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  130. package/docs/07-reference/01-ai-sdk-core/index.mdx +159 -0
  131. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +446 -0
  132. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +179 -0
  133. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +178 -0
  134. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +230 -0
  135. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  136. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +151 -0
  137. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +113 -0
  138. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +73 -0
  139. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  140. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  141. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  142. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  143. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  144. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  145. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  146. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  147. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +48 -0
  148. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +78 -0
  149. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  150. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  151. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  152. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  153. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  154. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  155. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +262 -0
  156. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  157. package/docs/07-reference/04-stream-helpers/01-ai-stream.mdx +89 -0
  158. package/docs/07-reference/04-stream-helpers/02-streaming-text-response.mdx +79 -0
  159. package/docs/07-reference/04-stream-helpers/05-stream-to-response.mdx +108 -0
  160. package/docs/07-reference/04-stream-helpers/07-openai-stream.mdx +77 -0
  161. package/docs/07-reference/04-stream-helpers/08-anthropic-stream.mdx +79 -0
  162. package/docs/07-reference/04-stream-helpers/09-aws-bedrock-stream.mdx +91 -0
  163. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-anthropic-stream.mdx +96 -0
  164. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-messages-stream.mdx +96 -0
  165. package/docs/07-reference/04-stream-helpers/11-aws-bedrock-cohere-stream.mdx +93 -0
  166. package/docs/07-reference/04-stream-helpers/12-aws-bedrock-llama-2-stream.mdx +93 -0
  167. package/docs/07-reference/04-stream-helpers/13-cohere-stream.mdx +78 -0
  168. package/docs/07-reference/04-stream-helpers/14-google-generative-ai-stream.mdx +85 -0
  169. package/docs/07-reference/04-stream-helpers/15-hugging-face-stream.mdx +84 -0
  170. package/docs/07-reference/04-stream-helpers/16-langchain-adapter.mdx +98 -0
  171. package/docs/07-reference/04-stream-helpers/16-llamaindex-adapter.mdx +70 -0
  172. package/docs/07-reference/04-stream-helpers/17-mistral-stream.mdx +81 -0
  173. package/docs/07-reference/04-stream-helpers/18-replicate-stream.mdx +83 -0
  174. package/docs/07-reference/04-stream-helpers/19-inkeep-stream.mdx +80 -0
  175. package/docs/07-reference/04-stream-helpers/index.mdx +103 -0
  176. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +30 -0
  177. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +27 -0
  178. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +25 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content.mdx +26 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +25 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +25 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +25 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +26 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  205. package/docs/07-reference/05-ai-sdk-errors/index.mdx +38 -0
  206. package/docs/07-reference/index.mdx +34 -0
  207. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  208. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  209. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  210. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  211. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  212. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  213. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  214. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  215. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  216. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  217. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  218. package/docs/08-migration-guides/index.mdx +22 -0
  219. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  220. package/docs/09-troubleshooting/02-client-side-function-calls-not-invoked.mdx +22 -0
  221. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  222. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  223. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  224. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  225. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  226. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  227. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  228. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  229. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  230. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  231. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  232. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  233. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  234. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  235. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  236. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  237. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  238. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  239. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  240. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  241. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  242. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  243. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  244. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +72 -0
  245. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  246. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  247. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  248. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  249. package/docs/09-troubleshooting/index.mdx +11 -0
  250. package/package.json +7 -3
@@ -0,0 +1,349 @@
1
+ ---
2
+ title: Provider & Model Management
3
+ description: Learn how to work with multiple providers and models
4
+ ---
5
+
6
+ # Provider & Model Management
7
+
8
+ When you work with multiple providers and models, it is often desirable to manage them in a central place
9
+ and access the models through simple string ids.
10
+
11
+ The AI SDK offers [custom providers](/docs/reference/ai-sdk-core/custom-provider) and
12
+ a [provider registry](/docs/reference/ai-sdk-core/provider-registry) for this purpose:
13
+
14
+ - With **custom providers**, you can pre-configure model settings, provide model name aliases,
15
+ and limit the available models.
16
+ - The **provider registry** lets you mix multiple providers and access them through simple string ids.
17
+
18
+ You can mix and match custom providers, the provider registry, and [middleware](/docs/ai-sdk-core/middleware) in your application.
19
+
20
+ ## Custom Providers
21
+
22
+ You can create a [custom provider](/docs/reference/ai-sdk-core/custom-provider) using `customProvider`.
23
+
24
+ ### Example: custom model settings
25
+
26
+ You might want to override the default model settings for a provider or provide model name aliases
27
+ with pre-configured settings.
28
+
29
+ ```ts
30
+ import {
31
+ gateway,
32
+ customProvider,
33
+ defaultSettingsMiddleware,
34
+ wrapLanguageModel,
35
+ } from 'ai';
36
+
37
+ // custom provider with different provider options:
38
+ export const openai = customProvider({
39
+ languageModels: {
40
+ // replacement model with custom provider options:
41
+ 'gpt-5.1': wrapLanguageModel({
42
+ model: gateway('openai/gpt-5.1'),
43
+ middleware: defaultSettingsMiddleware({
44
+ settings: {
45
+ providerOptions: {
46
+ openai: {
47
+ reasoningEffort: 'high',
48
+ },
49
+ },
50
+ },
51
+ }),
52
+ }),
53
+ // alias model with custom provider options:
54
+ 'gpt-5.1-high-reasoning': wrapLanguageModel({
55
+ model: gateway('openai/gpt-5.1'),
56
+ middleware: defaultSettingsMiddleware({
57
+ settings: {
58
+ providerOptions: {
59
+ openai: {
60
+ reasoningEffort: 'high',
61
+ },
62
+ },
63
+ },
64
+ }),
65
+ }),
66
+ },
67
+ fallbackProvider: gateway,
68
+ });
69
+ ```
70
+
71
+ ### Example: model name alias
72
+
73
+ You can also provide model name aliases, so you can update the model version in one place in the future:
74
+
75
+ ```ts
76
+ import { customProvider, gateway } from 'ai';
77
+
78
+ // custom provider with alias names:
79
+ export const anthropic = customProvider({
80
+ languageModels: {
81
+ opus: gateway('anthropic/claude-opus-4.1'),
82
+ sonnet: gateway('anthropic/claude-sonnet-4.5'),
83
+ haiku: gateway('anthropic/claude-haiku-4.5'),
84
+ },
85
+ fallbackProvider: gateway,
86
+ });
87
+ ```
88
+
89
+ ### Example: limit available models
90
+
91
+ You can limit the available models in the system, even if you have multiple providers.
92
+
93
+ ```ts
94
+ import {
95
+ customProvider,
96
+ defaultSettingsMiddleware,
97
+ wrapLanguageModel,
98
+ gateway,
99
+ } from 'ai';
100
+
101
+ export const myProvider = customProvider({
102
+ languageModels: {
103
+ 'text-medium': gateway('anthropic/claude-3-5-sonnet-20240620'),
104
+ 'text-small': gateway('openai/gpt-5-mini'),
105
+ 'reasoning-medium': wrapLanguageModel({
106
+ model: gateway('openai/gpt-5.1'),
107
+ middleware: defaultSettingsMiddleware({
108
+ settings: {
109
+ providerOptions: {
110
+ openai: {
111
+ reasoningEffort: 'high',
112
+ },
113
+ },
114
+ },
115
+ }),
116
+ }),
117
+ 'reasoning-fast': wrapLanguageModel({
118
+ model: gateway('openai/gpt-5.1'),
119
+ middleware: defaultSettingsMiddleware({
120
+ settings: {
121
+ providerOptions: {
122
+ openai: {
123
+ reasoningEffort: 'low',
124
+ },
125
+ },
126
+ },
127
+ }),
128
+ }),
129
+ },
130
+ embeddingModels: {
131
+ embedding: gateway.embeddingModel('openai/text-embedding-3-small'),
132
+ },
133
+ // no fallback provider
134
+ });
135
+ ```
136
+
137
+ ## Provider Registry
138
+
139
+ You can create a [provider registry](/docs/reference/ai-sdk-core/provider-registry) with multiple providers and models using `createProviderRegistry`.
140
+
141
+ ### Setup
142
+
143
+ ```ts filename={"registry.ts"}
144
+ import { anthropic } from '@ai-sdk/anthropic';
145
+ import { openai } from '@ai-sdk/openai';
146
+ import { createProviderRegistry, gateway } from 'ai';
147
+
148
+ export const registry = createProviderRegistry({
149
+ // register provider with prefix and default setup using gateway:
150
+ gateway,
151
+
152
+ // register provider with prefix and direct provider import:
153
+ anthropic,
154
+ openai,
155
+ });
156
+ ```
157
+
158
+ ### Setup with Custom Separator
159
+
160
+ By default, the registry uses `:` as the separator between provider and model IDs. You can customize this separator:
161
+
162
+ ```ts filename={"registry.ts"}
163
+ import { anthropic } from '@ai-sdk/anthropic';
164
+ import { openai } from '@ai-sdk/openai';
165
+ import { createProviderRegistry, gateway } from 'ai';
166
+
167
+ export const customSeparatorRegistry = createProviderRegistry(
168
+ {
169
+ gateway,
170
+ anthropic,
171
+ openai,
172
+ },
173
+ { separator: ' > ' },
174
+ );
175
+ ```
176
+
177
+ ### Example: Use language models
178
+
179
+ You can access language models by using the `languageModel` method on the registry.
180
+ The provider id will become the prefix of the model id: `providerId:modelId`.
181
+
182
+ ```ts highlight={"5"}
183
+ import { generateText } from 'ai';
184
+ import { registry } from './registry';
185
+
186
+ const { text } = await generateText({
187
+ model: registry.languageModel('openai:gpt-5.1'), // default separator
188
+ // or with custom separator:
189
+ // model: customSeparatorRegistry.languageModel('openai > gpt-5.1'),
190
+ prompt: 'Invent a new holiday and describe its traditions.',
191
+ });
192
+ ```
193
+
194
+ ### Example: Use text embedding models
195
+
196
+ You can access text embedding models by using the `.embeddingModel` method on the registry.
197
+ The provider id will become the prefix of the model id: `providerId:modelId`.
198
+
199
+ ```ts highlight={"5"}
200
+ import { embed } from 'ai';
201
+ import { registry } from './registry';
202
+
203
+ const { embedding } = await embed({
204
+ model: registry.embeddingModel('openai:text-embedding-3-small'),
205
+ value: 'sunny day at the beach',
206
+ });
207
+ ```
208
+
209
+ ### Example: Use image models
210
+
211
+ You can access image models by using the `imageModel` method on the registry.
212
+ The provider id will become the prefix of the model id: `providerId:modelId`.
213
+
214
+ ```ts highlight={"5"}
215
+ import { generateImage } from 'ai';
216
+ import { registry } from './registry';
217
+
218
+ const { image } = await generateImage({
219
+ model: registry.imageModel('openai:dall-e-3'),
220
+ prompt: 'A beautiful sunset over a calm ocean',
221
+ });
222
+ ```
223
+
224
+ ## Combining Custom Providers, Provider Registry, and Middleware
225
+
226
+ The central idea of provider management is to set up a file that contains all the providers and models you want to use.
227
+ You may want to pre-configure model settings, provide model name aliases, limit the available models, and more.
228
+
229
+ Here is an example that implements the following concepts:
230
+
231
+ - pass through gateway with a namespace prefix (here: `gateway > *`)
232
+ - pass through a full provider with a namespace prefix (here: `xai > *`)
233
+ - setup an OpenAI-compatible provider with custom api key and base URL (here: `custom > *`)
234
+ - setup model name aliases (here: `anthropic > fast`, `anthropic > writing`, `anthropic > reasoning`)
235
+ - pre-configure model settings (here: `anthropic > reasoning`)
236
+ - validate the provider-specific options (here: `AnthropicProviderOptions`)
237
+ - use a fallback provider (here: `anthropic > *`)
238
+ - limit a provider to certain models without a fallback (here: `groq > gemma2-9b-it`, `groq > qwen-qwq-32b`)
239
+ - define a custom separator for the provider registry (here: `>`)
240
+
241
+ ```ts
242
+ import { anthropic, AnthropicProviderOptions } from '@ai-sdk/anthropic';
243
+ import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
244
+ import { xai } from '@ai-sdk/xai';
245
+ import { groq } from '@ai-sdk/groq';
246
+ import {
247
+ createProviderRegistry,
248
+ customProvider,
249
+ defaultSettingsMiddleware,
250
+ gateway,
251
+ wrapLanguageModel,
252
+ } from 'ai';
253
+
254
+ export const registry = createProviderRegistry(
255
+ {
256
+ // pass through gateway with a namespace prefix
257
+ gateway,
258
+
259
+ // pass through full providers with namespace prefixes
260
+ xai,
261
+
262
+ // access an OpenAI-compatible provider with custom setup
263
+ custom: createOpenAICompatible({
264
+ name: 'provider-name',
265
+ apiKey: process.env.CUSTOM_API_KEY,
266
+ baseURL: 'https://api.custom.com/v1',
267
+ }),
268
+
269
+ // setup model name aliases
270
+ anthropic: customProvider({
271
+ languageModels: {
272
+ fast: anthropic('claude-haiku-4-5'),
273
+
274
+ // simple model
275
+ writing: anthropic('claude-sonnet-4-5'),
276
+
277
+ // extended reasoning model configuration:
278
+ reasoning: wrapLanguageModel({
279
+ model: anthropic('claude-sonnet-4-5'),
280
+ middleware: defaultSettingsMiddleware({
281
+ settings: {
282
+ maxOutputTokens: 100000, // example default setting
283
+ providerOptions: {
284
+ anthropic: {
285
+ thinking: {
286
+ type: 'enabled',
287
+ budgetTokens: 32000,
288
+ },
289
+ } satisfies AnthropicProviderOptions,
290
+ },
291
+ },
292
+ }),
293
+ }),
294
+ },
295
+ fallbackProvider: anthropic,
296
+ }),
297
+
298
+ // limit a provider to certain models without a fallback
299
+ groq: customProvider({
300
+ languageModels: {
301
+ 'gemma2-9b-it': groq('gemma2-9b-it'),
302
+ 'qwen-qwq-32b': groq('qwen-qwq-32b'),
303
+ },
304
+ }),
305
+ },
306
+ { separator: ' > ' },
307
+ );
308
+
309
+ // usage:
310
+ const model = registry.languageModel('anthropic > reasoning');
311
+ ```
312
+
313
+ ## Global Provider Configuration
314
+
315
+ The AI SDK 5 includes a global provider feature that allows you to specify a model using just a plain model ID string:
316
+
317
+ ```ts
318
+ import { streamText } from 'ai';
319
+ __PROVIDER_IMPORT__;
320
+
321
+ const result = await streamText({
322
+ model: __MODEL__, // Uses the global provider (defaults to gateway)
323
+ prompt: 'Invent a new holiday and describe its traditions.',
324
+ });
325
+ ```
326
+
327
+ By default, the global provider is set to the Vercel AI Gateway.
328
+
329
+ ### Customizing the Global Provider
330
+
331
+ You can set your own preferred global provider:
332
+
333
+ ```ts filename="setup.ts"
334
+ import { openai } from '@ai-sdk/openai';
335
+
336
+ // Initialize once during startup:
337
+ globalThis.AI_SDK_DEFAULT_PROVIDER = openai;
338
+ ```
339
+
340
+ ```ts filename="app.ts"
341
+ import { streamText } from 'ai';
342
+
343
+ const result = await streamText({
344
+ model: 'gpt-5.1', // Uses OpenAI provider without prefix
345
+ prompt: 'Invent a new holiday and describe its traditions.',
346
+ });
347
+ ```
348
+
349
+ This simplifies provider usage and makes it easier to switch between providers without changing your model references throughout your codebase.
@@ -0,0 +1,149 @@
1
+ ---
2
+ title: Error Handling
3
+ description: Learn how to handle errors in the AI SDK Core
4
+ ---
5
+
6
+ # Error Handling
7
+
8
+ ## Handling regular errors
9
+
10
+ Regular errors are thrown and can be handled using the `try/catch` block.
11
+
12
+ ```ts highlight="3,8-10"
13
+ import { generateText } from 'ai';
14
+ __PROVIDER_IMPORT__;
15
+
16
+ try {
17
+ const { text } = await generateText({
18
+ model: __MODEL__,
19
+ prompt: 'Write a vegetarian lasagna recipe for 4 people.',
20
+ });
21
+ } catch (error) {
22
+ // handle error
23
+ }
24
+ ```
25
+
26
+ See [Error Types](/docs/reference/ai-sdk-errors) for more information on the different types of errors that may be thrown.
27
+
28
+ ## Handling streaming errors (simple streams)
29
+
30
+ When errors occur during streams that do not support error chunks,
31
+ the error is thrown as a regular error.
32
+ You can handle these errors using the `try/catch` block.
33
+
34
+ ```ts highlight="3,12-14"
35
+ import { streamText } from 'ai';
36
+ __PROVIDER_IMPORT__;
37
+
38
+ try {
39
+ const { textStream } = streamText({
40
+ model: __MODEL__,
41
+ prompt: 'Write a vegetarian lasagna recipe for 4 people.',
42
+ });
43
+
44
+ for await (const textPart of textStream) {
45
+ process.stdout.write(textPart);
46
+ }
47
+ } catch (error) {
48
+ // handle error
49
+ }
50
+ ```
51
+
52
+ ## Handling streaming errors (streaming with `error` support)
53
+
54
+ Full streams support error parts.
55
+ You can handle those parts similar to other parts.
56
+ It is recommended to also add a try-catch block for errors that
57
+ happen outside of the streaming.
58
+
59
+ ```ts highlight="13-21"
60
+ import { streamText } from 'ai';
61
+ __PROVIDER_IMPORT__;
62
+
63
+ try {
64
+ const { fullStream } = streamText({
65
+ model: __MODEL__,
66
+ prompt: 'Write a vegetarian lasagna recipe for 4 people.',
67
+ });
68
+
69
+ for await (const part of fullStream) {
70
+ switch (part.type) {
71
+ // ... handle other part types
72
+
73
+ case 'error': {
74
+ const error = part.error;
75
+ // handle error
76
+ break;
77
+ }
78
+
79
+ case 'abort': {
80
+ // handle stream abort
81
+ break;
82
+ }
83
+
84
+ case 'tool-error': {
85
+ const error = part.error;
86
+ // handle error
87
+ break;
88
+ }
89
+ }
90
+ }
91
+ } catch (error) {
92
+ // handle error
93
+ }
94
+ ```
95
+
96
+ ## Handling stream aborts
97
+
98
+ When streams are aborted (e.g., via chat stop button), you may want to perform cleanup operations like updating stored messages in your UI. Use the `onAbort` callback to handle these cases.
99
+
100
+ The `onAbort` callback is called when a stream is aborted via `AbortSignal`, but `onFinish` is not called. This ensures you can still update your UI state appropriately.
101
+
102
+ ```ts highlight="5-9"
103
+ import { streamText } from 'ai';
104
+ __PROVIDER_IMPORT__;
105
+
106
+ const { textStream } = streamText({
107
+ model: __MODEL__,
108
+ prompt: 'Write a vegetarian lasagna recipe for 4 people.',
109
+ onAbort: ({ steps }) => {
110
+ // Update stored messages or perform cleanup
111
+ console.log('Stream aborted after', steps.length, 'steps');
112
+ },
113
+ onFinish: ({ steps, totalUsage }) => {
114
+ // This is called on normal completion
115
+ console.log('Stream completed normally');
116
+ },
117
+ });
118
+
119
+ for await (const textPart of textStream) {
120
+ process.stdout.write(textPart);
121
+ }
122
+ ```
123
+
124
+ The `onAbort` callback receives:
125
+
126
+ - `steps`: An array of all completed steps before the abort
127
+
128
+ You can also handle abort events directly in the stream:
129
+
130
+ ```ts highlight="10-13"
131
+ import { streamText } from 'ai';
132
+ __PROVIDER_IMPORT__;
133
+
134
+ const { fullStream } = streamText({
135
+ model: __MODEL__,
136
+ prompt: 'Write a vegetarian lasagna recipe for 4 people.',
137
+ });
138
+
139
+ for await (const chunk of fullStream) {
140
+ switch (chunk.type) {
141
+ case 'abort': {
142
+ // Handle abort directly in stream
143
+ console.log('Stream was aborted');
144
+ break;
145
+ }
146
+ // ... handle other part types
147
+ }
148
+ }
149
+ ```
@@ -0,0 +1,218 @@
1
+ ---
2
+ title: Testing
3
+ description: Learn how to use AI SDK Core mock providers for testing.
4
+ ---
5
+
6
+ # Testing
7
+
8
+ Testing language models can be challenging, because they are non-deterministic
9
+ and calling them is slow and expensive.
10
+
11
+ To enable you to unit test your code that uses the AI SDK, the AI SDK Core
12
+ includes mock providers and test helpers. You can import the following helpers from `ai/test`:
13
+
14
+ - `MockEmbeddingModelV3`: A mock embedding model using the [embedding model v3 specification](https://github.com/vercel/ai/blob/main/packages/provider/src/embedding-model/v3/embedding-model-v3.ts).
15
+ - `MockLanguageModelV3`: A mock language model using the [language model v3 specification](https://github.com/vercel/ai/blob/main/packages/provider/src/language-model/v3/language-model-v3.ts).
16
+ - `mockId`: Provides an incrementing integer ID.
17
+ - `mockValues`: Iterates over an array of values with each call. Returns the last value when the array is exhausted.
18
+ - [`simulateReadableStream`](/docs/reference/ai-sdk-core/simulate-readable-stream): Simulates a readable stream with delays.
19
+
20
+ With mock providers and test helpers, you can control the output of the AI SDK
21
+ and test your code in a repeatable and deterministic way without actually calling
22
+ a language model provider.
23
+
24
+ ## Examples
25
+
26
+ You can use the test helpers with the AI Core functions in your unit tests:
27
+
28
+ ### generateText
29
+
30
+ ```ts
31
+ import { generateText } from 'ai';
32
+ import { MockLanguageModelV3 } from 'ai/test';
33
+
34
+ const result = await generateText({
35
+ model: new MockLanguageModelV3({
36
+ doGenerate: async () => ({
37
+ content: [{ type: 'text', text: `Hello, world!` }],
38
+ finishReason: { unified: 'stop', raw: undefined },
39
+ usage: {
40
+ inputTokens: {
41
+ total: 10,
42
+ noCache: 10,
43
+ cacheRead: undefined,
44
+ cacheWrite: undefined,
45
+ },
46
+ outputTokens: {
47
+ total: 20,
48
+ text: 20,
49
+ reasoning: undefined,
50
+ },
51
+ },
52
+ warnings: [],
53
+ }),
54
+ }),
55
+ prompt: 'Hello, test!',
56
+ });
57
+ ```
58
+
59
+ ### streamText
60
+
61
+ ```ts
62
+ import { streamText, simulateReadableStream } from 'ai';
63
+ import { MockLanguageModelV3 } from 'ai/test';
64
+
65
+ const result = streamText({
66
+ model: new MockLanguageModelV3({
67
+ doStream: async () => ({
68
+ stream: simulateReadableStream({
69
+ chunks: [
70
+ { type: 'text-start', id: 'text-1' },
71
+ { type: 'text-delta', id: 'text-1', delta: 'Hello' },
72
+ { type: 'text-delta', id: 'text-1', delta: ', ' },
73
+ { type: 'text-delta', id: 'text-1', delta: 'world!' },
74
+ { type: 'text-end', id: 'text-1' },
75
+ {
76
+ type: 'finish',
77
+ finishReason: { unified: 'stop', raw: undefined },
78
+ logprobs: undefined,
79
+ usage: {
80
+ inputTokens: {
81
+ total: 3,
82
+ noCache: 3,
83
+ cacheRead: undefined,
84
+ cacheWrite: undefined,
85
+ },
86
+ outputTokens: {
87
+ total: 10,
88
+ text: 10,
89
+ reasoning: undefined,
90
+ },
91
+ },
92
+ },
93
+ ],
94
+ }),
95
+ }),
96
+ }),
97
+ prompt: 'Hello, test!',
98
+ });
99
+ ```
100
+
101
+ ### generateObject
102
+
103
+ ```ts
104
+ import { generateObject } from 'ai';
105
+ import { MockLanguageModelV3 } from 'ai/test';
106
+ import { z } from 'zod';
107
+
108
+ const result = await generateObject({
109
+ model: new MockLanguageModelV3({
110
+ doGenerate: async () => ({
111
+ content: [{ type: 'text', text: `{"content":"Hello, world!"}` }],
112
+ finishReason: { unified: 'stop', raw: undefined },
113
+ usage: {
114
+ inputTokens: {
115
+ total: 10,
116
+ noCache: 10,
117
+ cacheRead: undefined,
118
+ cacheWrite: undefined,
119
+ },
120
+ outputTokens: {
121
+ total: 20,
122
+ text: 20,
123
+ reasoning: undefined,
124
+ },
125
+ },
126
+ warnings: [],
127
+ }),
128
+ }),
129
+ schema: z.object({ content: z.string() }),
130
+ prompt: 'Hello, test!',
131
+ });
132
+ ```
133
+
134
+ ### streamObject
135
+
136
+ ```ts
137
+ import { streamObject, simulateReadableStream } from 'ai';
138
+ import { MockLanguageModelV3 } from 'ai/test';
139
+ import { z } from 'zod';
140
+
141
+ const result = streamObject({
142
+ model: new MockLanguageModelV3({
143
+ doStream: async () => ({
144
+ stream: simulateReadableStream({
145
+ chunks: [
146
+ { type: 'text-start', id: 'text-1' },
147
+ { type: 'text-delta', id: 'text-1', delta: '{ ' },
148
+ { type: 'text-delta', id: 'text-1', delta: '"content": ' },
149
+ { type: 'text-delta', id: 'text-1', delta: `"Hello, ` },
150
+ { type: 'text-delta', id: 'text-1', delta: `world` },
151
+ { type: 'text-delta', id: 'text-1', delta: `!"` },
152
+ { type: 'text-delta', id: 'text-1', delta: ' }' },
153
+ { type: 'text-end', id: 'text-1' },
154
+ {
155
+ type: 'finish',
156
+ finishReason: { unified: 'stop', raw: undefined },
157
+ logprobs: undefined,
158
+ usage: {
159
+ inputTokens: {
160
+ total: 3,
161
+ noCache: 3,
162
+ cacheRead: undefined,
163
+ cacheWrite: undefined,
164
+ },
165
+ outputTokens: {
166
+ total: 10,
167
+ text: 10,
168
+ reasoning: undefined,
169
+ },
170
+ },
171
+ },
172
+ ],
173
+ }),
174
+ }),
175
+ }),
176
+ schema: z.object({ content: z.string() }),
177
+ prompt: 'Hello, test!',
178
+ });
179
+ ```
180
+
181
+ ### Simulate UI Message Stream Responses
182
+
183
+ You can also simulate [UI Message Stream](/docs/ai-sdk-ui/stream-protocol#ui-message-stream) responses for testing,
184
+ debugging, or demonstration purposes.
185
+
186
+ Here is a Next example:
187
+
188
+ ```ts filename="route.ts"
189
+ import { simulateReadableStream } from 'ai';
190
+
191
+ export async function POST(req: Request) {
192
+ return new Response(
193
+ simulateReadableStream({
194
+ initialDelayInMs: 1000, // Delay before the first chunk
195
+ chunkDelayInMs: 300, // Delay between chunks
196
+ chunks: [
197
+ `data: {"type":"start","messageId":"msg-123"}\n\n`,
198
+ `data: {"type":"text-start","id":"text-1"}\n\n`,
199
+ `data: {"type":"text-delta","id":"text-1","delta":"This"}\n\n`,
200
+ `data: {"type":"text-delta","id":"text-1","delta":" is an"}\n\n`,
201
+ `data: {"type":"text-delta","id":"text-1","delta":" example."}\n\n`,
202
+ `data: {"type":"text-end","id":"text-1"}\n\n`,
203
+ `data: {"type":"finish"}\n\n`,
204
+ `data: [DONE]\n\n`,
205
+ ],
206
+ }).pipeThrough(new TextEncoderStream()),
207
+ {
208
+ status: 200,
209
+ headers: {
210
+ 'Content-Type': 'text/event-stream',
211
+ 'Cache-Control': 'no-cache',
212
+ Connection: 'keep-alive',
213
+ 'x-vercel-ai-ui-message-stream': 'v1',
214
+ },
215
+ },
216
+ );
217
+ }
218
+ ```