ai 6.0.30 → 6.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/dist/internal/index.js +1 -1
  5. package/dist/internal/index.mjs +1 -1
  6. package/docs/00-introduction/index.mdx +76 -0
  7. package/docs/02-foundations/01-overview.mdx +43 -0
  8. package/docs/02-foundations/02-providers-and-models.mdx +163 -0
  9. package/docs/02-foundations/03-prompts.mdx +620 -0
  10. package/docs/02-foundations/04-tools.mdx +160 -0
  11. package/docs/02-foundations/05-streaming.mdx +62 -0
  12. package/docs/02-foundations/index.mdx +43 -0
  13. package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
  14. package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
  15. package/docs/02-getting-started/02-nextjs-app-router.mdx +556 -0
  16. package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
  17. package/docs/02-getting-started/04-svelte.mdx +627 -0
  18. package/docs/02-getting-started/05-nuxt.mdx +566 -0
  19. package/docs/02-getting-started/06-nodejs.mdx +512 -0
  20. package/docs/02-getting-started/07-expo.mdx +766 -0
  21. package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
  22. package/docs/02-getting-started/index.mdx +44 -0
  23. package/docs/03-agents/01-overview.mdx +96 -0
  24. package/docs/03-agents/02-building-agents.mdx +367 -0
  25. package/docs/03-agents/03-workflows.mdx +370 -0
  26. package/docs/03-agents/04-loop-control.mdx +350 -0
  27. package/docs/03-agents/05-configuring-call-options.mdx +286 -0
  28. package/docs/03-agents/index.mdx +40 -0
  29. package/docs/03-ai-sdk-core/01-overview.mdx +33 -0
  30. package/docs/03-ai-sdk-core/05-generating-text.mdx +600 -0
  31. package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +662 -0
  32. package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1102 -0
  33. package/docs/03-ai-sdk-core/16-mcp-tools.mdx +375 -0
  34. package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +144 -0
  35. package/docs/03-ai-sdk-core/25-settings.mdx +198 -0
  36. package/docs/03-ai-sdk-core/30-embeddings.mdx +247 -0
  37. package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
  38. package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
  39. package/docs/03-ai-sdk-core/36-transcription.mdx +173 -0
  40. package/docs/03-ai-sdk-core/37-speech.mdx +167 -0
  41. package/docs/03-ai-sdk-core/40-middleware.mdx +480 -0
  42. package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
  43. package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
  44. package/docs/03-ai-sdk-core/55-testing.mdx +218 -0
  45. package/docs/03-ai-sdk-core/60-telemetry.mdx +313 -0
  46. package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
  47. package/docs/03-ai-sdk-core/index.mdx +88 -0
  48. package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
  49. package/docs/04-ai-sdk-ui/02-chatbot.mdx +1313 -0
  50. package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +535 -0
  51. package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
  52. package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
  53. package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
  54. package/docs/04-ai-sdk-ui/05-completion.mdx +186 -0
  55. package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
  56. package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
  57. package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
  58. package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
  59. package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
  60. package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
  61. package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +477 -0
  62. package/docs/04-ai-sdk-ui/index.mdx +64 -0
  63. package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
  64. package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
  65. package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
  66. package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
  67. package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
  68. package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +158 -0
  69. package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
  70. package/docs/05-ai-sdk-rsc/08-error-handling.mdx +96 -0
  71. package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
  72. package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
  73. package/docs/05-ai-sdk-rsc/index.mdx +58 -0
  74. package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
  75. package/docs/06-advanced/02-stopping-streams.mdx +184 -0
  76. package/docs/06-advanced/03-backpressure.mdx +173 -0
  77. package/docs/06-advanced/04-caching.mdx +169 -0
  78. package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
  79. package/docs/06-advanced/06-rate-limiting.mdx +60 -0
  80. package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +213 -0
  81. package/docs/06-advanced/08-model-as-router.mdx +120 -0
  82. package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
  83. package/docs/06-advanced/09-sequential-generations.mdx +55 -0
  84. package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
  85. package/docs/06-advanced/index.mdx +11 -0
  86. package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2142 -0
  87. package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3215 -0
  88. package/docs/07-reference/01-ai-sdk-core/03-generate-object.mdx +780 -0
  89. package/docs/07-reference/01-ai-sdk-core/04-stream-object.mdx +1140 -0
  90. package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +190 -0
  91. package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +171 -0
  92. package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +309 -0
  93. package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +227 -0
  94. package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +138 -0
  95. package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +214 -0
  96. package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +203 -0
  97. package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +449 -0
  98. package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +148 -0
  99. package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +168 -0
  100. package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +144 -0
  101. package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +196 -0
  102. package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +175 -0
  103. package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +410 -0
  104. package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
  105. package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
  106. package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
  107. package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +55 -0
  108. package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
  109. package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +415 -0
  110. package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +246 -0
  111. package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
  112. package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
  113. package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +182 -0
  114. package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +121 -0
  115. package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
  116. package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
  117. package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
  118. package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +46 -0
  119. package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
  120. package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
  121. package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
  122. package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
  123. package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
  124. package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
  125. package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
  126. package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
  127. package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
  128. package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +43 -0
  129. package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
  130. package/docs/07-reference/01-ai-sdk-core/index.mdx +159 -0
  131. package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +446 -0
  132. package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +179 -0
  133. package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +178 -0
  134. package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +230 -0
  135. package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
  136. package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +151 -0
  137. package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +113 -0
  138. package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +73 -0
  139. package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
  140. package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
  141. package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
  142. package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
  143. package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
  144. package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
  145. package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
  146. package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
  147. package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +48 -0
  148. package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +78 -0
  149. package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
  150. package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
  151. package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
  152. package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
  153. package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
  154. package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
  155. package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +262 -0
  156. package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
  157. package/docs/07-reference/04-stream-helpers/01-ai-stream.mdx +89 -0
  158. package/docs/07-reference/04-stream-helpers/02-streaming-text-response.mdx +79 -0
  159. package/docs/07-reference/04-stream-helpers/05-stream-to-response.mdx +108 -0
  160. package/docs/07-reference/04-stream-helpers/07-openai-stream.mdx +77 -0
  161. package/docs/07-reference/04-stream-helpers/08-anthropic-stream.mdx +79 -0
  162. package/docs/07-reference/04-stream-helpers/09-aws-bedrock-stream.mdx +91 -0
  163. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-anthropic-stream.mdx +96 -0
  164. package/docs/07-reference/04-stream-helpers/10-aws-bedrock-messages-stream.mdx +96 -0
  165. package/docs/07-reference/04-stream-helpers/11-aws-bedrock-cohere-stream.mdx +93 -0
  166. package/docs/07-reference/04-stream-helpers/12-aws-bedrock-llama-2-stream.mdx +93 -0
  167. package/docs/07-reference/04-stream-helpers/13-cohere-stream.mdx +78 -0
  168. package/docs/07-reference/04-stream-helpers/14-google-generative-ai-stream.mdx +85 -0
  169. package/docs/07-reference/04-stream-helpers/15-hugging-face-stream.mdx +84 -0
  170. package/docs/07-reference/04-stream-helpers/16-langchain-adapter.mdx +98 -0
  171. package/docs/07-reference/04-stream-helpers/16-llamaindex-adapter.mdx +70 -0
  172. package/docs/07-reference/04-stream-helpers/17-mistral-stream.mdx +81 -0
  173. package/docs/07-reference/04-stream-helpers/18-replicate-stream.mdx +83 -0
  174. package/docs/07-reference/04-stream-helpers/19-inkeep-stream.mdx +80 -0
  175. package/docs/07-reference/04-stream-helpers/index.mdx +103 -0
  176. package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +30 -0
  177. package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +27 -0
  178. package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
  179. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
  180. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +25 -0
  181. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content.mdx +26 -0
  182. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
  183. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
  184. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
  185. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +25 -0
  186. package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
  187. package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
  188. package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
  189. package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
  190. package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
  191. package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
  192. package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
  193. package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
  194. package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +25 -0
  195. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
  196. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
  197. package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
  198. package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +25 -0
  199. package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
  200. package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
  201. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +26 -0
  202. package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
  203. package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
  204. package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
  205. package/docs/07-reference/05-ai-sdk-errors/index.mdx +38 -0
  206. package/docs/07-reference/index.mdx +34 -0
  207. package/docs/08-migration-guides/00-versioning.mdx +46 -0
  208. package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
  209. package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
  210. package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
  211. package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
  212. package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
  213. package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
  214. package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
  215. package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
  216. package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
  217. package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
  218. package/docs/08-migration-guides/index.mdx +22 -0
  219. package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
  220. package/docs/09-troubleshooting/02-client-side-function-calls-not-invoked.mdx +22 -0
  221. package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
  222. package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
  223. package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
  224. package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
  225. package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
  226. package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
  227. package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
  228. package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
  229. package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
  230. package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
  231. package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
  232. package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
  233. package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
  234. package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
  235. package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
  236. package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
  237. package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
  238. package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
  239. package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
  240. package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
  241. package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
  242. package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
  243. package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
  244. package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +72 -0
  245. package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
  246. package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
  247. package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
  248. package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
  249. package/docs/09-troubleshooting/index.mdx +11 -0
  250. package/package.json +7 -3
@@ -0,0 +1,167 @@
1
+ ---
2
+ title: Speech
3
+ description: Learn how to generate speech from text with the AI SDK.
4
+ ---
5
+
6
+ # Speech
7
+
8
+ <Note type="warning">Speech is an experimental feature.</Note>
9
+
10
+ The AI SDK provides the [`generateSpeech`](/docs/reference/ai-sdk-core/generate-speech)
11
+ function to generate speech from text using a speech model.
12
+
13
+ ```ts
14
+ import { experimental_generateSpeech as generateSpeech } from 'ai';
15
+ import { openai } from '@ai-sdk/openai';
16
+
17
+ const audio = await generateSpeech({
18
+ model: openai.speech('tts-1'),
19
+ text: 'Hello, world!',
20
+ voice: 'alloy',
21
+ });
22
+ ```
23
+
24
+ ### Language Setting
25
+
26
+ You can specify the language for speech generation (provider support varies):
27
+
28
+ ```ts
29
+ import { experimental_generateSpeech as generateSpeech } from 'ai';
30
+ import { lmnt } from '@ai-sdk/lmnt';
31
+
32
+ const audio = await generateSpeech({
33
+ model: lmnt.speech('aurora'),
34
+ text: 'Hola, mundo!',
35
+ language: 'es', // Spanish
36
+ });
37
+ ```
38
+
39
+ To access the generated audio:
40
+
41
+ ```ts
42
+ const audio = audio.audioData; // audio data e.g. Uint8Array
43
+ ```
44
+
45
+ ## Settings
46
+
47
+ ### Provider-Specific settings
48
+
49
+ You can set model-specific settings with the `providerOptions` parameter.
50
+
51
+ ```ts highlight="7-11"
52
+ import { experimental_generateSpeech as generateSpeech } from 'ai';
53
+ import { openai } from '@ai-sdk/openai';
54
+
55
+ const audio = await generateSpeech({
56
+ model: openai.speech('tts-1'),
57
+ text: 'Hello, world!',
58
+ providerOptions: {
59
+ openai: {
60
+ // ...
61
+ },
62
+ },
63
+ });
64
+ ```
65
+
66
+ ### Abort Signals and Timeouts
67
+
68
+ `generateSpeech` accepts an optional `abortSignal` parameter of
69
+ type [`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal)
70
+ that you can use to abort the speech generation process or set a timeout.
71
+
72
+ ```ts highlight="7"
73
+ import { openai } from '@ai-sdk/openai';
74
+ import { experimental_generateSpeech as generateSpeech } from 'ai';
75
+
76
+ const audio = await generateSpeech({
77
+ model: openai.speech('tts-1'),
78
+ text: 'Hello, world!',
79
+ abortSignal: AbortSignal.timeout(1000), // Abort after 1 second
80
+ });
81
+ ```
82
+
83
+ ### Custom Headers
84
+
85
+ `generateSpeech` accepts an optional `headers` parameter of type `Record<string, string>`
86
+ that you can use to add custom headers to the speech generation request.
87
+
88
+ ```ts highlight="7"
89
+ import { openai } from '@ai-sdk/openai';
90
+ import { experimental_generateSpeech as generateSpeech } from 'ai';
91
+
92
+ const audio = await generateSpeech({
93
+ model: openai.speech('tts-1'),
94
+ text: 'Hello, world!',
95
+ headers: { 'X-Custom-Header': 'custom-value' },
96
+ });
97
+ ```
98
+
99
+ ### Warnings
100
+
101
+ Warnings (e.g. unsupported parameters) are available on the `warnings` property.
102
+
103
+ ```ts
104
+ import { openai } from '@ai-sdk/openai';
105
+ import { experimental_generateSpeech as generateSpeech } from 'ai';
106
+
107
+ const audio = await generateSpeech({
108
+ model: openai.speech('tts-1'),
109
+ text: 'Hello, world!',
110
+ });
111
+
112
+ const warnings = audio.warnings;
113
+ ```
114
+
115
+ ### Error Handling
116
+
117
+ When `generateSpeech` cannot generate a valid audio, it throws a [`AI_NoSpeechGeneratedError`](/docs/reference/ai-sdk-errors/ai-no-speech-generated-error).
118
+
119
+ This error can arise for any the following reasons:
120
+
121
+ - The model failed to generate a response
122
+ - The model generated a response that could not be parsed
123
+
124
+ The error preserves the following information to help you log the issue:
125
+
126
+ - `responses`: Metadata about the speech model responses, including timestamp, model, and headers.
127
+ - `cause`: The cause of the error. You can use this for more detailed error handling.
128
+
129
+ ```ts
130
+ import {
131
+ experimental_generateSpeech as generateSpeech,
132
+ NoSpeechGeneratedError,
133
+ } from 'ai';
134
+ import { openai } from '@ai-sdk/openai';
135
+
136
+ try {
137
+ await generateSpeech({
138
+ model: openai.speech('tts-1'),
139
+ text: 'Hello, world!',
140
+ });
141
+ } catch (error) {
142
+ if (NoSpeechGeneratedError.isInstance(error)) {
143
+ console.log('AI_NoSpeechGeneratedError');
144
+ console.log('Cause:', error.cause);
145
+ console.log('Responses:', error.responses);
146
+ }
147
+ }
148
+ ```
149
+
150
+ ## Speech Models
151
+
152
+ | Provider | Model |
153
+ | ------------------------------------------------------------------ | ------------------------ |
154
+ | [OpenAI](/providers/ai-sdk-providers/openai#speech-models) | `tts-1` |
155
+ | [OpenAI](/providers/ai-sdk-providers/openai#speech-models) | `tts-1-hd` |
156
+ | [OpenAI](/providers/ai-sdk-providers/openai#speech-models) | `gpt-4o-mini-tts` |
157
+ | [ElevenLabs](/providers/ai-sdk-providers/elevenlabs#speech-models) | `eleven_v3` |
158
+ | [ElevenLabs](/providers/ai-sdk-providers/elevenlabs#speech-models) | `eleven_multilingual_v2` |
159
+ | [ElevenLabs](/providers/ai-sdk-providers/elevenlabs#speech-models) | `eleven_flash_v2_5` |
160
+ | [ElevenLabs](/providers/ai-sdk-providers/elevenlabs#speech-models) | `eleven_flash_v2` |
161
+ | [ElevenLabs](/providers/ai-sdk-providers/elevenlabs#speech-models) | `eleven_turbo_v2_5` |
162
+ | [ElevenLabs](/providers/ai-sdk-providers/elevenlabs#speech-models) | `eleven_turbo_v2` |
163
+ | [LMNT](/providers/ai-sdk-providers/lmnt#speech-models) | `aurora` |
164
+ | [LMNT](/providers/ai-sdk-providers/lmnt#speech-models) | `blizzard` |
165
+ | [Hume](/providers/ai-sdk-providers/hume#speech-models) | `default` |
166
+
167
+ Above are a small subset of the speech models supported by the AI SDK providers. For more, see the respective provider documentation.
@@ -0,0 +1,480 @@
1
+ ---
2
+ title: Language Model Middleware
3
+ description: Learn how to use middleware to enhance the behavior of language models
4
+ ---
5
+
6
+ # Language Model Middleware
7
+
8
+ Language model middleware is a way to enhance the behavior of language models
9
+ by intercepting and modifying the calls to the language model.
10
+
11
+ It can be used to add features like guardrails, RAG, caching, and logging
12
+ in a language model agnostic way. Such middleware can be developed and
13
+ distributed independently from the language models that they are applied to.
14
+
15
+ ## Using Language Model Middleware
16
+
17
+ You can use language model middleware with the `wrapLanguageModel` function.
18
+ It takes a language model and a language model middleware and returns a new
19
+ language model that incorporates the middleware.
20
+
21
+ ```ts
22
+ import { wrapLanguageModel } from 'ai';
23
+
24
+ const wrappedLanguageModel = wrapLanguageModel({
25
+ model: yourModel,
26
+ middleware: yourLanguageModelMiddleware,
27
+ });
28
+ ```
29
+
30
+ The wrapped language model can be used just like any other language model, e.g. in `streamText`:
31
+
32
+ ```ts highlight="2"
33
+ const result = streamText({
34
+ model: wrappedLanguageModel,
35
+ prompt: 'What cities are in the United States?',
36
+ });
37
+ ```
38
+
39
+ ## Multiple middlewares
40
+
41
+ You can provide multiple middlewares to the `wrapLanguageModel` function.
42
+ The middlewares will be applied in the order they are provided.
43
+
44
+ ```ts
45
+ const wrappedLanguageModel = wrapLanguageModel({
46
+ model: yourModel,
47
+ middleware: [firstMiddleware, secondMiddleware],
48
+ });
49
+
50
+ // applied as: firstMiddleware(secondMiddleware(yourModel))
51
+ ```
52
+
53
+ ## Built-in Middleware
54
+
55
+ The AI SDK comes with several built-in middlewares that you can use to configure language models:
56
+
57
+ - `extractReasoningMiddleware`: Extracts reasoning information from the generated text and exposes it as a `reasoning` property on the result.
58
+ - `extractJsonMiddleware`: Extracts JSON from text content by stripping markdown code fences. Useful when using `Output.object()` with models that wrap JSON responses in code blocks.
59
+ - `simulateStreamingMiddleware`: Simulates streaming behavior with responses from non-streaming language models.
60
+ - `defaultSettingsMiddleware`: Applies default settings to a language model.
61
+ - `addToolInputExamplesMiddleware`: Adds tool input examples to tool descriptions for providers that don't natively support the `inputExamples` property.
62
+
63
+ ### Extract Reasoning
64
+
65
+ Some providers and models expose reasoning information in the generated text using special tags,
66
+ e.g. &lt;think&gt; and &lt;/think&gt;.
67
+
68
+ The `extractReasoningMiddleware` function can be used to extract this reasoning information and expose it as a `reasoning` property on the result.
69
+
70
+ ```ts
71
+ import { wrapLanguageModel, extractReasoningMiddleware } from 'ai';
72
+
73
+ const model = wrapLanguageModel({
74
+ model: yourModel,
75
+ middleware: extractReasoningMiddleware({ tagName: 'think' }),
76
+ });
77
+ ```
78
+
79
+ You can then use that enhanced model in functions like `generateText` and `streamText`.
80
+
81
+ The `extractReasoningMiddleware` function also includes a `startWithReasoning` option.
82
+ When set to `true`, the reasoning tag will be prepended to the generated text.
83
+ This is useful for models that do not include the reasoning tag at the beginning of the response.
84
+ For more details, see the [DeepSeek R1 guide](/docs/guides/r1#deepseek-r1-middleware).
85
+
86
+ ### Extract JSON
87
+
88
+ Some models wrap JSON responses in markdown code fences (e.g., ` ```json ... ``` `) even when you request structured output.
89
+
90
+ The `extractJsonMiddleware` function strips these code fences from the response, making it compatible with `Output.object()`.
91
+
92
+ ```ts
93
+ import { wrapLanguageModel, extractJsonMiddleware, Output } from 'ai';
94
+ import { z } from 'zod';
95
+
96
+ const model = wrapLanguageModel({
97
+ model: yourModel,
98
+ middleware: extractJsonMiddleware(),
99
+ });
100
+
101
+ const result = await generateText({
102
+ model,
103
+ output: Output.object({
104
+ schema: z.object({
105
+ name: z.string(),
106
+ ingredients: z.array(z.string()),
107
+ }),
108
+ }),
109
+ prompt: 'Generate a recipe.',
110
+ });
111
+ ```
112
+
113
+ You can also provide a custom transform function for models that use different formatting:
114
+
115
+ ```ts
116
+ const model = wrapLanguageModel({
117
+ model: yourModel,
118
+ middleware: extractJsonMiddleware({
119
+ transform: text => text.replace(/^PREFIX/, '').replace(/SUFFIX$/, ''),
120
+ }),
121
+ });
122
+ ```
123
+
124
+ ### Simulate Streaming
125
+
126
+ The `simulateStreamingMiddleware` function can be used to simulate streaming behavior with responses from non-streaming language models.
127
+ This is useful when you want to maintain a consistent streaming interface even when using models that only provide complete responses.
128
+
129
+ ```ts
130
+ import { wrapLanguageModel, simulateStreamingMiddleware } from 'ai';
131
+
132
+ const model = wrapLanguageModel({
133
+ model: yourModel,
134
+ middleware: simulateStreamingMiddleware(),
135
+ });
136
+ ```
137
+
138
+ ### Default Settings
139
+
140
+ The `defaultSettingsMiddleware` function can be used to apply default settings to a language model.
141
+
142
+ ```ts
143
+ import { wrapLanguageModel, defaultSettingsMiddleware } from 'ai';
144
+
145
+ const model = wrapLanguageModel({
146
+ model: yourModel,
147
+ middleware: defaultSettingsMiddleware({
148
+ settings: {
149
+ temperature: 0.5,
150
+ maxOutputTokens: 800,
151
+ providerOptions: { openai: { store: false } },
152
+ },
153
+ }),
154
+ });
155
+ ```
156
+
157
+ ### Add Tool Input Examples
158
+
159
+ The `addToolInputExamplesMiddleware` function adds tool input examples to tool descriptions.
160
+ This is useful for providers that don't natively support the `inputExamples` property on tools.
161
+ The middleware serializes the examples into the tool's description text so models can still benefit from seeing example inputs.
162
+
163
+ ```ts
164
+ import { wrapLanguageModel, addToolInputExamplesMiddleware } from 'ai';
165
+
166
+ const model = wrapLanguageModel({
167
+ model: yourModel,
168
+ middleware: addToolInputExamplesMiddleware({
169
+ examplesPrefix: 'Input Examples:',
170
+ }),
171
+ });
172
+ ```
173
+
174
+ When you define a tool with `inputExamples`, the middleware will append them to the tool's description:
175
+
176
+ ```ts
177
+ import { generateText, tool } from 'ai';
178
+ import { z } from 'zod';
179
+
180
+ const result = await generateText({
181
+ model, // wrapped model from above
182
+ tools: {
183
+ weather: tool({
184
+ description: 'Get the weather in a location',
185
+ inputSchema: z.object({
186
+ location: z.string(),
187
+ }),
188
+ inputExamples: [
189
+ { input: { location: 'San Francisco' } },
190
+ { input: { location: 'London' } },
191
+ ],
192
+ }),
193
+ },
194
+ prompt: 'What is the weather in Tokyo?',
195
+ });
196
+ ```
197
+
198
+ The tool description will be transformed to:
199
+
200
+ ```
201
+ Get the weather in a location
202
+
203
+ Input Examples:
204
+ {"location":"San Francisco"}
205
+ {"location":"London"}
206
+ ```
207
+
208
+ #### Options
209
+
210
+ - `examplesPrefix` (required): A prefix text to prepend before the examples.
211
+ - `formatExample` (optional): A custom formatter function for each example. Receives the example object and its index. Default: `JSON.stringify(example.input)`.
212
+ - `removeInputExamples` (optional): Whether to remove the `inputExamples` property from the tool after adding them to the description. Default: `true`.
213
+
214
+ ```ts
215
+ const model = wrapLanguageModel({
216
+ model: yourModel,
217
+ middleware: addToolInputExamplesMiddleware({
218
+ examplesPrefix: 'Input Examples:',
219
+ formatExample: (example, index) =>
220
+ `${index + 1}. ${JSON.stringify(example.input)}`,
221
+ removeInputExamples: true,
222
+ }),
223
+ });
224
+ ```
225
+
226
+ ## Community Middleware
227
+
228
+ The AI SDK provides a Language Model Middleware specification. Community members can develop middleware that adheres to this specification, making it compatible with the AI SDK ecosystem.
229
+
230
+ Here are some community middlewares that you can explore:
231
+
232
+ ### Custom tool call parser
233
+
234
+ The [Custom tool call parser](https://github.com/minpeter/ai-sdk-tool-call-middleware) middleware extends tool call capabilities to models that don't natively support the OpenAI-style `tools` parameter. This includes many self-hosted and third-party models that lack native function calling features.
235
+
236
+ <Note>
237
+ Using this middleware on models that support native function calls may result
238
+ in unintended performance degradation, so check whether your model supports
239
+ native function calls before deciding to use it.
240
+ </Note>
241
+
242
+ This middleware enables function calling capabilities by converting function schemas into prompt instructions and parsing the model's responses into structured function calls. It works by transforming the JSON function definitions into natural language instructions the model can understand, then analyzing the generated text to extract function call attempts. This approach allows developers to use the same function calling API across different model providers, even with models that don't natively support the OpenAI-style function calling format, providing a consistent function calling experience regardless of the underlying model implementation.
243
+
244
+ The `@ai-sdk-tool/parser` package offers three middleware variants:
245
+
246
+ - `createToolMiddleware`: A flexible function for creating custom tool call middleware tailored to specific models
247
+ - `hermesToolMiddleware`: Ready-to-use middleware for Hermes & Qwen format function calls
248
+ - `gemmaToolMiddleware`: Pre-configured middleware for Gemma 3 model series function call format
249
+
250
+ Here's how you can enable function calls with Gemma models that don't support them natively:
251
+
252
+ ```ts
253
+ import { wrapLanguageModel } from 'ai';
254
+ import { gemmaToolMiddleware } from '@ai-sdk-tool/parser';
255
+
256
+ const model = wrapLanguageModel({
257
+ model: openrouter('google/gemma-3-27b-it'),
258
+ middleware: gemmaToolMiddleware,
259
+ });
260
+ ```
261
+
262
+ Find more examples at this [link](https://github.com/minpeter/ai-sdk-tool-call-middleware/tree/main/examples/core/src).
263
+
264
+ ## Implementing Language Model Middleware
265
+
266
+ <Note>
267
+ Implementing language model middleware is advanced functionality and requires
268
+ a solid understanding of the [language model
269
+ specification](https://github.com/vercel/ai/blob/v5/packages/provider/src/language-model/v2/language-model-v2.ts).
270
+ </Note>
271
+
272
+ You can implement any of the following three function to modify the behavior of the language model:
273
+
274
+ 1. `transformParams`: Transforms the parameters before they are passed to the language model, for both `doGenerate` and `doStream`.
275
+ 2. `wrapGenerate`: Wraps the `doGenerate` method of the [language model](https://github.com/vercel/ai/blob/v5/packages/provider/src/language-model/v2/language-model-v2.ts).
276
+ You can modify the parameters, call the language model, and modify the result.
277
+ 3. `wrapStream`: Wraps the `doStream` method of the [language model](https://github.com/vercel/ai/blob/v5/packages/provider/src/language-model/v2/language-model-v2.ts).
278
+ You can modify the parameters, call the language model, and modify the result.
279
+
280
+ Here are some examples of how to implement language model middleware:
281
+
282
+ ## Examples
283
+
284
+ <Note>
285
+ These examples are not meant to be used in production. They are just to show
286
+ how you can use middleware to enhance the behavior of language models.
287
+ </Note>
288
+
289
+ ### Logging
290
+
291
+ This example shows how to log the parameters and generated text of a language model call.
292
+
293
+ ```ts
294
+ import type {
295
+ LanguageModelV3Middleware,
296
+ LanguageModelV3StreamPart,
297
+ } from '@ai-sdk/provider';
298
+
299
+ export const yourLogMiddleware: LanguageModelV3Middleware = {
300
+ wrapGenerate: async ({ doGenerate, params }) => {
301
+ console.log('doGenerate called');
302
+ console.log(`params: ${JSON.stringify(params, null, 2)}`);
303
+
304
+ const result = await doGenerate();
305
+
306
+ console.log('doGenerate finished');
307
+ console.log(`generated text: ${result.text}`);
308
+
309
+ return result;
310
+ },
311
+
312
+ wrapStream: async ({ doStream, params }) => {
313
+ console.log('doStream called');
314
+ console.log(`params: ${JSON.stringify(params, null, 2)}`);
315
+
316
+ const { stream, ...rest } = await doStream();
317
+
318
+ let generatedText = '';
319
+ const textBlocks = new Map<string, string>();
320
+
321
+ const transformStream = new TransformStream<
322
+ LanguageModelV3StreamPart,
323
+ LanguageModelV3StreamPart
324
+ >({
325
+ transform(chunk, controller) {
326
+ switch (chunk.type) {
327
+ case 'text-start': {
328
+ textBlocks.set(chunk.id, '');
329
+ break;
330
+ }
331
+ case 'text-delta': {
332
+ const existing = textBlocks.get(chunk.id) || '';
333
+ textBlocks.set(chunk.id, existing + chunk.delta);
334
+ generatedText += chunk.delta;
335
+ break;
336
+ }
337
+ case 'text-end': {
338
+ console.log(
339
+ `Text block ${chunk.id} completed:`,
340
+ textBlocks.get(chunk.id),
341
+ );
342
+ break;
343
+ }
344
+ }
345
+
346
+ controller.enqueue(chunk);
347
+ },
348
+
349
+ flush() {
350
+ console.log('doStream finished');
351
+ console.log(`generated text: ${generatedText}`);
352
+ },
353
+ });
354
+
355
+ return {
356
+ stream: stream.pipeThrough(transformStream),
357
+ ...rest,
358
+ };
359
+ },
360
+ };
361
+ ```
362
+
363
+ ### Caching
364
+
365
+ This example shows how to build a simple cache for the generated text of a language model call.
366
+
367
+ ```ts
368
+ import type { LanguageModelV3Middleware } from '@ai-sdk/provider';
369
+
370
+ const cache = new Map<string, any>();
371
+
372
+ export const yourCacheMiddleware: LanguageModelV3Middleware = {
373
+ wrapGenerate: async ({ doGenerate, params }) => {
374
+ const cacheKey = JSON.stringify(params);
375
+
376
+ if (cache.has(cacheKey)) {
377
+ return cache.get(cacheKey);
378
+ }
379
+
380
+ const result = await doGenerate();
381
+
382
+ cache.set(cacheKey, result);
383
+
384
+ return result;
385
+ },
386
+
387
+ // here you would implement the caching logic for streaming
388
+ };
389
+ ```
390
+
391
+ ### Retrieval Augmented Generation (RAG)
392
+
393
+ This example shows how to use RAG as middleware.
394
+
395
+ <Note>
396
+ Helper functions like `getLastUserMessageText` and `findSources` are not part
397
+ of the AI SDK. They are just used in this example to illustrate the concept of
398
+ RAG.
399
+ </Note>
400
+
401
+ ```ts
402
+ import type { LanguageModelV3Middleware } from '@ai-sdk/provider';
403
+
404
+ export const yourRagMiddleware: LanguageModelV3Middleware = {
405
+ transformParams: async ({ params }) => {
406
+ const lastUserMessageText = getLastUserMessageText({
407
+ prompt: params.prompt,
408
+ });
409
+
410
+ if (lastUserMessageText == null) {
411
+ return params; // do not use RAG (send unmodified parameters)
412
+ }
413
+
414
+ const instruction =
415
+ 'Use the following information to answer the question:\n' +
416
+ findSources({ text: lastUserMessageText })
417
+ .map(chunk => JSON.stringify(chunk))
418
+ .join('\n');
419
+
420
+ return addToLastUserMessage({ params, text: instruction });
421
+ },
422
+ };
423
+ ```
424
+
425
+ ### Guardrails
426
+
427
+ Guard rails are a way to ensure that the generated text of a language model call
428
+ is safe and appropriate. This example shows how to use guardrails as middleware.
429
+
430
+ ```ts
431
+ import type { LanguageModelV3Middleware } from '@ai-sdk/provider';
432
+
433
+ export const yourGuardrailMiddleware: LanguageModelV3Middleware = {
434
+ wrapGenerate: async ({ doGenerate }) => {
435
+ const { text, ...rest } = await doGenerate();
436
+
437
+ // filtering approach, e.g. for PII or other sensitive information:
438
+ const cleanedText = text?.replace(/badword/g, '<REDACTED>');
439
+
440
+ return { text: cleanedText, ...rest };
441
+ },
442
+
443
+ // here you would implement the guardrail logic for streaming
444
+ // Note: streaming guardrails are difficult to implement, because
445
+ // you do not know the full content of the stream until it's finished.
446
+ };
447
+ ```
448
+
449
+ ## Configuring Per Request Custom Metadata
450
+
451
+ To send and access custom metadata in Middleware, you can use `providerOptions`. This is useful when building logging middleware where you want to pass additional context like user IDs, timestamps, or other contextual data that can help with tracking and debugging.
452
+
453
+ ```ts
454
+ import { generateText, wrapLanguageModel } from 'ai';
455
+ __PROVIDER_IMPORT__;
456
+ import type { LanguageModelV3Middleware } from '@ai-sdk/provider';
457
+
458
+ export const yourLogMiddleware: LanguageModelV3Middleware = {
459
+ wrapGenerate: async ({ doGenerate, params }) => {
460
+ console.log('METADATA', params?.providerMetadata?.yourLogMiddleware);
461
+ const result = await doGenerate();
462
+ return result;
463
+ },
464
+ };
465
+
466
+ const { text } = await generateText({
467
+ model: wrapLanguageModel({
468
+ model: __MODEL__,
469
+ middleware: yourLogMiddleware,
470
+ }),
471
+ prompt: 'Invent a new holiday and describe its traditions.',
472
+ providerOptions: {
473
+ yourLogMiddleware: {
474
+ hello: 'world',
475
+ },
476
+ },
477
+ });
478
+
479
+ console.log(text);
480
+ ```