@fondation-io/ai 7.0.0-beta.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7687 -0
- package/README.md +238 -0
- package/dist/index.d.mts +7056 -0
- package/dist/index.d.ts +7056 -0
- package/dist/index.js +14607 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +14578 -0
- package/dist/index.mjs.map +1 -0
- package/dist/internal/index.d.mts +303 -0
- package/dist/internal/index.d.ts +303 -0
- package/dist/internal/index.js +1352 -0
- package/dist/internal/index.js.map +1 -0
- package/dist/internal/index.mjs +1336 -0
- package/dist/internal/index.mjs.map +1 -0
- package/dist/test/index.d.mts +265 -0
- package/dist/test/index.d.ts +265 -0
- package/dist/test/index.js +509 -0
- package/dist/test/index.js.map +1 -0
- package/dist/test/index.mjs +472 -0
- package/dist/test/index.mjs.map +1 -0
- package/docs/00-introduction/index.mdx +76 -0
- package/docs/02-foundations/01-overview.mdx +43 -0
- package/docs/02-foundations/02-providers-and-models.mdx +160 -0
- package/docs/02-foundations/03-prompts.mdx +616 -0
- package/docs/02-foundations/04-tools.mdx +251 -0
- package/docs/02-foundations/05-streaming.mdx +62 -0
- package/docs/02-foundations/06-provider-options.mdx +345 -0
- package/docs/02-foundations/index.mdx +49 -0
- package/docs/02-getting-started/00-choosing-a-provider.mdx +110 -0
- package/docs/02-getting-started/01-navigating-the-library.mdx +85 -0
- package/docs/02-getting-started/02-nextjs-app-router.mdx +559 -0
- package/docs/02-getting-started/03-nextjs-pages-router.mdx +542 -0
- package/docs/02-getting-started/04-svelte.mdx +627 -0
- package/docs/02-getting-started/05-nuxt.mdx +566 -0
- package/docs/02-getting-started/06-nodejs.mdx +512 -0
- package/docs/02-getting-started/07-expo.mdx +766 -0
- package/docs/02-getting-started/08-tanstack-start.mdx +583 -0
- package/docs/02-getting-started/09-coding-agents.mdx +179 -0
- package/docs/02-getting-started/index.mdx +44 -0
- package/docs/03-agents/01-overview.mdx +96 -0
- package/docs/03-agents/02-building-agents.mdx +449 -0
- package/docs/03-agents/03-workflows.mdx +386 -0
- package/docs/03-agents/04-loop-control.mdx +394 -0
- package/docs/03-agents/05-configuring-call-options.mdx +286 -0
- package/docs/03-agents/06-memory.mdx +222 -0
- package/docs/03-agents/06-subagents.mdx +362 -0
- package/docs/03-agents/index.mdx +46 -0
- package/docs/03-ai-sdk-core/01-overview.mdx +31 -0
- package/docs/03-ai-sdk-core/05-generating-text.mdx +707 -0
- package/docs/03-ai-sdk-core/10-generating-structured-data.mdx +498 -0
- package/docs/03-ai-sdk-core/15-tools-and-tool-calling.mdx +1148 -0
- package/docs/03-ai-sdk-core/16-mcp-tools.mdx +383 -0
- package/docs/03-ai-sdk-core/20-prompt-engineering.mdx +146 -0
- package/docs/03-ai-sdk-core/25-settings.mdx +216 -0
- package/docs/03-ai-sdk-core/26-reasoning.mdx +190 -0
- package/docs/03-ai-sdk-core/30-embeddings.mdx +236 -0
- package/docs/03-ai-sdk-core/31-reranking.mdx +218 -0
- package/docs/03-ai-sdk-core/35-image-generation.mdx +341 -0
- package/docs/03-ai-sdk-core/36-transcription.mdx +227 -0
- package/docs/03-ai-sdk-core/37-speech.mdx +169 -0
- package/docs/03-ai-sdk-core/38-video-generation.mdx +366 -0
- package/docs/03-ai-sdk-core/40-middleware.mdx +485 -0
- package/docs/03-ai-sdk-core/45-provider-management.mdx +349 -0
- package/docs/03-ai-sdk-core/50-error-handling.mdx +149 -0
- package/docs/03-ai-sdk-core/55-testing.mdx +219 -0
- package/docs/03-ai-sdk-core/60-telemetry.mdx +391 -0
- package/docs/03-ai-sdk-core/65-devtools.mdx +107 -0
- package/docs/03-ai-sdk-core/65-event-listeners.mdx +1303 -0
- package/docs/03-ai-sdk-core/index.mdx +99 -0
- package/docs/04-ai-sdk-ui/01-overview.mdx +44 -0
- package/docs/04-ai-sdk-ui/02-chatbot.mdx +1320 -0
- package/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx +534 -0
- package/docs/04-ai-sdk-ui/03-chatbot-resume-streams.mdx +263 -0
- package/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx +682 -0
- package/docs/04-ai-sdk-ui/04-generative-user-interfaces.mdx +389 -0
- package/docs/04-ai-sdk-ui/05-completion.mdx +181 -0
- package/docs/04-ai-sdk-ui/08-object-generation.mdx +344 -0
- package/docs/04-ai-sdk-ui/20-streaming-data.mdx +397 -0
- package/docs/04-ai-sdk-ui/21-error-handling.mdx +190 -0
- package/docs/04-ai-sdk-ui/21-transport.mdx +174 -0
- package/docs/04-ai-sdk-ui/24-reading-ui-message-streams.mdx +104 -0
- package/docs/04-ai-sdk-ui/25-message-metadata.mdx +152 -0
- package/docs/04-ai-sdk-ui/50-stream-protocol.mdx +503 -0
- package/docs/04-ai-sdk-ui/index.mdx +64 -0
- package/docs/05-ai-sdk-rsc/01-overview.mdx +45 -0
- package/docs/05-ai-sdk-rsc/02-streaming-react-components.mdx +209 -0
- package/docs/05-ai-sdk-rsc/03-generative-ui-state.mdx +279 -0
- package/docs/05-ai-sdk-rsc/03-saving-and-restoring-states.mdx +105 -0
- package/docs/05-ai-sdk-rsc/04-multistep-interfaces.mdx +282 -0
- package/docs/05-ai-sdk-rsc/05-streaming-values.mdx +157 -0
- package/docs/05-ai-sdk-rsc/06-loading-state.mdx +273 -0
- package/docs/05-ai-sdk-rsc/08-error-handling.mdx +94 -0
- package/docs/05-ai-sdk-rsc/09-authentication.mdx +42 -0
- package/docs/05-ai-sdk-rsc/10-migrating-to-ui.mdx +722 -0
- package/docs/05-ai-sdk-rsc/index.mdx +63 -0
- package/docs/06-advanced/01-prompt-engineering.mdx +96 -0
- package/docs/06-advanced/02-stopping-streams.mdx +184 -0
- package/docs/06-advanced/03-backpressure.mdx +173 -0
- package/docs/06-advanced/04-caching.mdx +169 -0
- package/docs/06-advanced/05-multiple-streamables.mdx +68 -0
- package/docs/06-advanced/06-rate-limiting.mdx +60 -0
- package/docs/06-advanced/07-rendering-ui-with-language-models.mdx +225 -0
- package/docs/06-advanced/08-model-as-router.mdx +120 -0
- package/docs/06-advanced/09-multistep-interfaces.mdx +115 -0
- package/docs/06-advanced/09-sequential-generations.mdx +55 -0
- package/docs/06-advanced/10-vercel-deployment-guide.mdx +117 -0
- package/docs/06-advanced/index.mdx +11 -0
- package/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx +2785 -0
- package/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx +3752 -0
- package/docs/07-reference/01-ai-sdk-core/05-embed.mdx +332 -0
- package/docs/07-reference/01-ai-sdk-core/06-embed-many.mdx +330 -0
- package/docs/07-reference/01-ai-sdk-core/06-rerank.mdx +323 -0
- package/docs/07-reference/01-ai-sdk-core/10-generate-image.mdx +251 -0
- package/docs/07-reference/01-ai-sdk-core/11-transcribe.mdx +152 -0
- package/docs/07-reference/01-ai-sdk-core/12-generate-speech.mdx +221 -0
- package/docs/07-reference/01-ai-sdk-core/13-generate-video.mdx +264 -0
- package/docs/07-reference/01-ai-sdk-core/15-agent.mdx +235 -0
- package/docs/07-reference/01-ai-sdk-core/16-tool-loop-agent.mdx +973 -0
- package/docs/07-reference/01-ai-sdk-core/17-create-agent-ui-stream.mdx +154 -0
- package/docs/07-reference/01-ai-sdk-core/18-create-agent-ui-stream-response.mdx +173 -0
- package/docs/07-reference/01-ai-sdk-core/18-pipe-agent-ui-stream-to-response.mdx +150 -0
- package/docs/07-reference/01-ai-sdk-core/20-tool.mdx +209 -0
- package/docs/07-reference/01-ai-sdk-core/22-dynamic-tool.mdx +223 -0
- package/docs/07-reference/01-ai-sdk-core/23-create-mcp-client.mdx +423 -0
- package/docs/07-reference/01-ai-sdk-core/24-mcp-stdio-transport.mdx +68 -0
- package/docs/07-reference/01-ai-sdk-core/25-json-schema.mdx +94 -0
- package/docs/07-reference/01-ai-sdk-core/26-zod-schema.mdx +109 -0
- package/docs/07-reference/01-ai-sdk-core/27-valibot-schema.mdx +58 -0
- package/docs/07-reference/01-ai-sdk-core/28-output.mdx +342 -0
- package/docs/07-reference/01-ai-sdk-core/30-model-message.mdx +435 -0
- package/docs/07-reference/01-ai-sdk-core/31-ui-message.mdx +264 -0
- package/docs/07-reference/01-ai-sdk-core/32-validate-ui-messages.mdx +101 -0
- package/docs/07-reference/01-ai-sdk-core/33-safe-validate-ui-messages.mdx +113 -0
- package/docs/07-reference/01-ai-sdk-core/40-provider-registry.mdx +198 -0
- package/docs/07-reference/01-ai-sdk-core/42-custom-provider.mdx +157 -0
- package/docs/07-reference/01-ai-sdk-core/50-cosine-similarity.mdx +52 -0
- package/docs/07-reference/01-ai-sdk-core/60-wrap-language-model.mdx +59 -0
- package/docs/07-reference/01-ai-sdk-core/61-wrap-image-model.mdx +64 -0
- package/docs/07-reference/01-ai-sdk-core/65-language-model-v2-middleware.mdx +74 -0
- package/docs/07-reference/01-ai-sdk-core/66-extract-reasoning-middleware.mdx +68 -0
- package/docs/07-reference/01-ai-sdk-core/67-simulate-streaming-middleware.mdx +71 -0
- package/docs/07-reference/01-ai-sdk-core/68-default-settings-middleware.mdx +80 -0
- package/docs/07-reference/01-ai-sdk-core/69-add-tool-input-examples-middleware.mdx +155 -0
- package/docs/07-reference/01-ai-sdk-core/70-extract-json-middleware.mdx +147 -0
- package/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx +84 -0
- package/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx +120 -0
- package/docs/07-reference/01-ai-sdk-core/75-simulate-readable-stream.mdx +94 -0
- package/docs/07-reference/01-ai-sdk-core/80-smooth-stream.mdx +145 -0
- package/docs/07-reference/01-ai-sdk-core/90-generate-id.mdx +30 -0
- package/docs/07-reference/01-ai-sdk-core/91-create-id-generator.mdx +89 -0
- package/docs/07-reference/01-ai-sdk-core/92-default-generated-file.mdx +68 -0
- package/docs/07-reference/01-ai-sdk-core/index.mdx +160 -0
- package/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx +493 -0
- package/docs/07-reference/02-ai-sdk-ui/02-use-completion.mdx +185 -0
- package/docs/07-reference/02-ai-sdk-ui/03-use-object.mdx +196 -0
- package/docs/07-reference/02-ai-sdk-ui/31-convert-to-model-messages.mdx +231 -0
- package/docs/07-reference/02-ai-sdk-ui/32-prune-messages.mdx +108 -0
- package/docs/07-reference/02-ai-sdk-ui/40-create-ui-message-stream.mdx +162 -0
- package/docs/07-reference/02-ai-sdk-ui/41-create-ui-message-stream-response.mdx +119 -0
- package/docs/07-reference/02-ai-sdk-ui/42-pipe-ui-message-stream-to-response.mdx +77 -0
- package/docs/07-reference/02-ai-sdk-ui/43-read-ui-message-stream.mdx +57 -0
- package/docs/07-reference/02-ai-sdk-ui/46-infer-ui-tools.mdx +99 -0
- package/docs/07-reference/02-ai-sdk-ui/47-infer-ui-tool.mdx +75 -0
- package/docs/07-reference/02-ai-sdk-ui/50-direct-chat-transport.mdx +333 -0
- package/docs/07-reference/02-ai-sdk-ui/index.mdx +89 -0
- package/docs/07-reference/03-ai-sdk-rsc/01-stream-ui.mdx +767 -0
- package/docs/07-reference/03-ai-sdk-rsc/02-create-ai.mdx +90 -0
- package/docs/07-reference/03-ai-sdk-rsc/03-create-streamable-ui.mdx +91 -0
- package/docs/07-reference/03-ai-sdk-rsc/04-create-streamable-value.mdx +78 -0
- package/docs/07-reference/03-ai-sdk-rsc/05-read-streamable-value.mdx +79 -0
- package/docs/07-reference/03-ai-sdk-rsc/06-get-ai-state.mdx +50 -0
- package/docs/07-reference/03-ai-sdk-rsc/07-get-mutable-ai-state.mdx +70 -0
- package/docs/07-reference/03-ai-sdk-rsc/08-use-ai-state.mdx +26 -0
- package/docs/07-reference/03-ai-sdk-rsc/09-use-actions.mdx +42 -0
- package/docs/07-reference/03-ai-sdk-rsc/10-use-ui-state.mdx +35 -0
- package/docs/07-reference/03-ai-sdk-rsc/11-use-streamable-value.mdx +46 -0
- package/docs/07-reference/03-ai-sdk-rsc/20-render.mdx +266 -0
- package/docs/07-reference/03-ai-sdk-rsc/index.mdx +67 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-api-call-error.mdx +31 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-download-error.mdx +28 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-empty-response-body-error.mdx +24 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-invalid-argument-error.mdx +26 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-invalid-data-content-error.mdx +26 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-invalid-message-role-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-invalid-prompt-error.mdx +47 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-invalid-response-data-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-approval-error.mdx +24 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-invalid-tool-input-error.mdx +27 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-json-parse-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-load-api-key-error.mdx +24 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-load-setting-error.mdx +24 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-message-conversion-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-content-generated-error.mdx +24 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-image-generated-error.mdx +36 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-object-generated-error.mdx +43 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-output-generated-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-speech-generated-error.mdx +24 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-such-model-error.mdx +26 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-such-provider-error.mdx +28 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-such-tool-error.mdx +26 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-transcript-generated-error.mdx +24 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-no-video-generated-error.mdx +39 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-retry-error.mdx +27 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-too-many-embedding-values-for-call-error.mdx +27 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-not-found-for-approval-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-tool-call-repair-error.mdx +28 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-type-validation-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-ui-message-stream-error.mdx +67 -0
- package/docs/07-reference/05-ai-sdk-errors/ai-unsupported-functionality-error.mdx +25 -0
- package/docs/07-reference/05-ai-sdk-errors/index.mdx +39 -0
- package/docs/07-reference/index.mdx +28 -0
- package/docs/08-migration-guides/00-versioning.mdx +46 -0
- package/docs/08-migration-guides/23-migration-guide-7-0.mdx +95 -0
- package/docs/08-migration-guides/24-migration-guide-6-0.mdx +823 -0
- package/docs/08-migration-guides/25-migration-guide-5-0-data.mdx +882 -0
- package/docs/08-migration-guides/26-migration-guide-5-0.mdx +3427 -0
- package/docs/08-migration-guides/27-migration-guide-4-2.mdx +99 -0
- package/docs/08-migration-guides/28-migration-guide-4-1.mdx +14 -0
- package/docs/08-migration-guides/29-migration-guide-4-0.mdx +1157 -0
- package/docs/08-migration-guides/36-migration-guide-3-4.mdx +14 -0
- package/docs/08-migration-guides/37-migration-guide-3-3.mdx +64 -0
- package/docs/08-migration-guides/38-migration-guide-3-2.mdx +46 -0
- package/docs/08-migration-guides/39-migration-guide-3-1.mdx +168 -0
- package/docs/08-migration-guides/index.mdx +22 -0
- package/docs/09-troubleshooting/01-azure-stream-slow.mdx +33 -0
- package/docs/09-troubleshooting/03-server-actions-in-client-components.mdx +40 -0
- package/docs/09-troubleshooting/04-strange-stream-output.mdx +36 -0
- package/docs/09-troubleshooting/05-streamable-ui-errors.mdx +16 -0
- package/docs/09-troubleshooting/05-tool-invocation-missing-result.mdx +106 -0
- package/docs/09-troubleshooting/06-streaming-not-working-when-deployed.mdx +31 -0
- package/docs/09-troubleshooting/06-streaming-not-working-when-proxied.mdx +31 -0
- package/docs/09-troubleshooting/06-timeout-on-vercel.mdx +60 -0
- package/docs/09-troubleshooting/07-unclosed-streams.mdx +34 -0
- package/docs/09-troubleshooting/08-use-chat-failed-to-parse-stream.mdx +26 -0
- package/docs/09-troubleshooting/09-client-stream-error.mdx +25 -0
- package/docs/09-troubleshooting/10-use-chat-tools-no-response.mdx +32 -0
- package/docs/09-troubleshooting/11-use-chat-custom-request-options.mdx +149 -0
- package/docs/09-troubleshooting/12-typescript-performance-zod.mdx +46 -0
- package/docs/09-troubleshooting/12-use-chat-an-error-occurred.mdx +59 -0
- package/docs/09-troubleshooting/13-repeated-assistant-messages.mdx +73 -0
- package/docs/09-troubleshooting/14-stream-abort-handling.mdx +73 -0
- package/docs/09-troubleshooting/14-tool-calling-with-structured-outputs.mdx +48 -0
- package/docs/09-troubleshooting/15-abort-breaks-resumable-streams.mdx +55 -0
- package/docs/09-troubleshooting/15-stream-text-not-working.mdx +33 -0
- package/docs/09-troubleshooting/16-streaming-status-delay.mdx +63 -0
- package/docs/09-troubleshooting/17-use-chat-stale-body-data.mdx +141 -0
- package/docs/09-troubleshooting/18-ontoolcall-type-narrowing.mdx +66 -0
- package/docs/09-troubleshooting/19-unsupported-model-version.mdx +50 -0
- package/docs/09-troubleshooting/20-no-object-generated-content-filter.mdx +76 -0
- package/docs/09-troubleshooting/21-missing-tool-results-error.mdx +82 -0
- package/docs/09-troubleshooting/30-model-is-not-assignable-to-type.mdx +21 -0
- package/docs/09-troubleshooting/40-typescript-cannot-find-namespace-jsx.mdx +24 -0
- package/docs/09-troubleshooting/50-react-maximum-update-depth-exceeded.mdx +39 -0
- package/docs/09-troubleshooting/60-jest-cannot-find-module-ai-rsc.mdx +22 -0
- package/docs/09-troubleshooting/70-high-memory-usage-with-images.mdx +108 -0
- package/docs/09-troubleshooting/index.mdx +11 -0
- package/internal.d.ts +1 -0
- package/package.json +120 -0
- package/src/agent/agent.ts +156 -0
- package/src/agent/create-agent-ui-stream-response.ts +61 -0
- package/src/agent/create-agent-ui-stream.ts +84 -0
- package/src/agent/index.ts +37 -0
- package/src/agent/infer-agent-tools.ts +7 -0
- package/src/agent/infer-agent-ui-message.ts +11 -0
- package/src/agent/pipe-agent-ui-stream-to-response.ts +64 -0
- package/src/agent/tool-loop-agent-settings.ts +252 -0
- package/src/agent/tool-loop-agent.ts +205 -0
- package/src/embed/embed-events.ts +181 -0
- package/src/embed/embed-many-result.ts +53 -0
- package/src/embed/embed-many.ts +428 -0
- package/src/embed/embed-result.ts +50 -0
- package/src/embed/embed.ts +266 -0
- package/src/embed/index.ts +5 -0
- package/src/error/index.ts +37 -0
- package/src/error/invalid-argument-error.ts +34 -0
- package/src/error/invalid-stream-part-error.ts +28 -0
- package/src/error/invalid-tool-approval-error.ts +26 -0
- package/src/error/invalid-tool-input-error.ts +33 -0
- package/src/error/missing-tool-result-error.ts +28 -0
- package/src/error/no-image-generated-error.ts +39 -0
- package/src/error/no-object-generated-error.ts +70 -0
- package/src/error/no-output-generated-error.ts +26 -0
- package/src/error/no-speech-generated-error.ts +28 -0
- package/src/error/no-such-tool-error.ts +35 -0
- package/src/error/no-transcript-generated-error.ts +30 -0
- package/src/error/no-video-generated-error.ts +57 -0
- package/src/error/tool-call-not-found-for-approval-error.ts +32 -0
- package/src/error/tool-call-repair-error.ts +30 -0
- package/src/error/ui-message-stream-error.ts +48 -0
- package/src/error/unsupported-model-version-error.ts +23 -0
- package/src/error/verify-no-object-generated-error.ts +27 -0
- package/src/generate-image/generate-image-result.ts +42 -0
- package/src/generate-image/generate-image.ts +361 -0
- package/src/generate-image/index.ts +18 -0
- package/src/generate-object/generate-object-result.ts +67 -0
- package/src/generate-object/generate-object.ts +514 -0
- package/src/generate-object/index.ts +9 -0
- package/src/generate-object/inject-json-instruction.ts +30 -0
- package/src/generate-object/output-strategy.ts +415 -0
- package/src/generate-object/parse-and-validate-object-result.ts +111 -0
- package/src/generate-object/repair-text.ts +12 -0
- package/src/generate-object/stream-object-result.ts +120 -0
- package/src/generate-object/stream-object.ts +984 -0
- package/src/generate-object/validate-object-generation-input.ts +144 -0
- package/src/generate-speech/generate-speech-result.ts +30 -0
- package/src/generate-speech/generate-speech.ts +191 -0
- package/src/generate-speech/generated-audio-file.ts +65 -0
- package/src/generate-speech/index.ts +3 -0
- package/src/generate-text/collect-tool-approvals.ts +116 -0
- package/src/generate-text/content-part.ts +31 -0
- package/src/generate-text/core-events.ts +390 -0
- package/src/generate-text/create-execute-tools-transformation.ts +144 -0
- package/src/generate-text/execute-tool-call.ts +190 -0
- package/src/generate-text/extract-reasoning-content.ts +17 -0
- package/src/generate-text/extract-text-content.ts +15 -0
- package/src/generate-text/generate-text-result.ts +168 -0
- package/src/generate-text/generate-text.ts +1445 -0
- package/src/generate-text/generated-file.ts +70 -0
- package/src/generate-text/index.ts +78 -0
- package/src/generate-text/invoke-tool-callbacks-from-stream.ts +81 -0
- package/src/generate-text/is-approval-needed.ts +29 -0
- package/src/generate-text/output-utils.ts +23 -0
- package/src/generate-text/output.ts +590 -0
- package/src/generate-text/parse-tool-call.ts +188 -0
- package/src/generate-text/prepare-step.ts +103 -0
- package/src/generate-text/prune-messages.ts +167 -0
- package/src/generate-text/reasoning-output.ts +99 -0
- package/src/generate-text/reasoning.ts +10 -0
- package/src/generate-text/response-message.ts +10 -0
- package/src/generate-text/smooth-stream.ts +162 -0
- package/src/generate-text/step-result.ts +310 -0
- package/src/generate-text/stop-condition.ts +29 -0
- package/src/generate-text/stream-model-call.ts +418 -0
- package/src/generate-text/stream-text-result.ts +536 -0
- package/src/generate-text/stream-text.ts +2696 -0
- package/src/generate-text/to-response-messages.ts +195 -0
- package/src/generate-text/tool-approval-request-output.ts +21 -0
- package/src/generate-text/tool-call-repair-function.ts +27 -0
- package/src/generate-text/tool-call.ts +47 -0
- package/src/generate-text/tool-error.ts +34 -0
- package/src/generate-text/tool-output-denied.ts +21 -0
- package/src/generate-text/tool-output.ts +7 -0
- package/src/generate-text/tool-result.ts +36 -0
- package/src/generate-text/tool-set.ts +14 -0
- package/src/generate-video/generate-video-result.ts +36 -0
- package/src/generate-video/generate-video.ts +402 -0
- package/src/generate-video/index.ts +3 -0
- package/src/global.ts +36 -0
- package/src/index.ts +49 -0
- package/src/logger/index.ts +6 -0
- package/src/logger/log-warnings.ts +140 -0
- package/src/middleware/add-tool-input-examples-middleware.ts +90 -0
- package/src/middleware/default-embedding-settings-middleware.ts +22 -0
- package/src/middleware/default-settings-middleware.ts +33 -0
- package/src/middleware/extract-json-middleware.ts +197 -0
- package/src/middleware/extract-reasoning-middleware.ts +249 -0
- package/src/middleware/index.ts +10 -0
- package/src/middleware/simulate-streaming-middleware.ts +79 -0
- package/src/middleware/wrap-embedding-model.ts +89 -0
- package/src/middleware/wrap-image-model.ts +92 -0
- package/src/middleware/wrap-language-model.ts +108 -0
- package/src/middleware/wrap-provider.ts +51 -0
- package/src/model/as-embedding-model-v3.ts +24 -0
- package/src/model/as-embedding-model-v4.ts +25 -0
- package/src/model/as-image-model-v3.ts +24 -0
- package/src/model/as-image-model-v4.ts +21 -0
- package/src/model/as-language-model-v3.ts +103 -0
- package/src/model/as-language-model-v4.ts +25 -0
- package/src/model/as-provider-v3.ts +36 -0
- package/src/model/as-provider-v4.ts +47 -0
- package/src/model/as-reranking-model-v4.ts +16 -0
- package/src/model/as-speech-model-v3.ts +24 -0
- package/src/model/as-speech-model-v4.ts +21 -0
- package/src/model/as-transcription-model-v3.ts +24 -0
- package/src/model/as-transcription-model-v4.ts +25 -0
- package/src/model/as-video-model-v4.ts +19 -0
- package/src/model/resolve-model.ts +172 -0
- package/src/prompt/call-settings.ts +169 -0
- package/src/prompt/content-part.ts +236 -0
- package/src/prompt/convert-to-language-model-prompt.ts +548 -0
- package/src/prompt/create-tool-model-output.ts +34 -0
- package/src/prompt/data-content.ts +134 -0
- package/src/prompt/index.ts +27 -0
- package/src/prompt/invalid-data-content-error.ts +29 -0
- package/src/prompt/invalid-message-role-error.ts +27 -0
- package/src/prompt/message-conversion-error.ts +28 -0
- package/src/prompt/message.ts +72 -0
- package/src/prompt/prepare-call-settings.ts +110 -0
- package/src/prompt/prepare-tools-and-tool-choice.ts +86 -0
- package/src/prompt/prompt.ts +43 -0
- package/src/prompt/split-data-url.ts +17 -0
- package/src/prompt/standardize-prompt.ts +99 -0
- package/src/prompt/wrap-gateway-error.ts +29 -0
- package/src/registry/custom-provider.ts +210 -0
- package/src/registry/index.ts +7 -0
- package/src/registry/no-such-provider-error.ts +41 -0
- package/src/registry/provider-registry.ts +331 -0
- package/src/rerank/index.ts +8 -0
- package/src/rerank/rerank-events.ts +189 -0
- package/src/rerank/rerank-result.ts +70 -0
- package/src/rerank/rerank.ts +348 -0
- package/src/telemetry/assemble-operation-name.ts +21 -0
- package/src/telemetry/get-base-telemetry-attributes.ts +45 -0
- package/src/telemetry/get-global-telemetry-integration.ts +126 -0
- package/src/telemetry/get-tracer.ts +20 -0
- package/src/telemetry/index.ts +4 -0
- package/src/telemetry/noop-tracer.ts +69 -0
- package/src/telemetry/open-telemetry-integration.ts +875 -0
- package/src/telemetry/record-span.ts +75 -0
- package/src/telemetry/select-telemetry-attributes.ts +78 -0
- package/src/telemetry/stringify-for-telemetry.ts +33 -0
- package/src/telemetry/telemetry-integration-registry.ts +22 -0
- package/src/telemetry/telemetry-integration.ts +139 -0
- package/src/telemetry/telemetry-settings.ts +55 -0
- package/src/test/mock-embedding-model-v2.ts +35 -0
- package/src/test/mock-embedding-model-v3.ts +48 -0
- package/src/test/mock-embedding-model-v4.ts +48 -0
- package/src/test/mock-image-model-v2.ts +28 -0
- package/src/test/mock-image-model-v3.ts +28 -0
- package/src/test/mock-image-model-v4.ts +28 -0
- package/src/test/mock-language-model-v2.ts +72 -0
- package/src/test/mock-language-model-v3.ts +77 -0
- package/src/test/mock-language-model-v4.ts +77 -0
- package/src/test/mock-provider-v2.ts +68 -0
- package/src/test/mock-provider-v3.ts +80 -0
- package/src/test/mock-provider-v4.ts +80 -0
- package/src/test/mock-reranking-model-v3.ts +25 -0
- package/src/test/mock-reranking-model-v4.ts +25 -0
- package/src/test/mock-server-response.ts +69 -0
- package/src/test/mock-speech-model-v2.ts +24 -0
- package/src/test/mock-speech-model-v3.ts +24 -0
- package/src/test/mock-speech-model-v4.ts +24 -0
- package/src/test/mock-tracer.ts +156 -0
- package/src/test/mock-transcription-model-v2.ts +24 -0
- package/src/test/mock-transcription-model-v3.ts +24 -0
- package/src/test/mock-transcription-model-v4.ts +24 -0
- package/src/test/mock-values.ts +4 -0
- package/src/test/mock-video-model-v3.ts +28 -0
- package/src/test/mock-video-model-v4.ts +28 -0
- package/src/test/not-implemented.ts +3 -0
- package/src/text-stream/create-text-stream-response.ts +30 -0
- package/src/text-stream/index.ts +2 -0
- package/src/text-stream/pipe-text-stream-to-response.ts +38 -0
- package/src/transcribe/index.ts +2 -0
- package/src/transcribe/transcribe-result.ts +60 -0
- package/src/transcribe/transcribe.ts +187 -0
- package/src/types/embedding-model-middleware.ts +15 -0
- package/src/types/embedding-model.ts +20 -0
- package/src/types/image-model-middleware.ts +15 -0
- package/src/types/image-model-response-metadata.ts +16 -0
- package/src/types/image-model.ts +19 -0
- package/src/types/index.ts +29 -0
- package/src/types/json-value.ts +15 -0
- package/src/types/language-model-middleware.ts +15 -0
- package/src/types/language-model-request-metadata.ts +6 -0
- package/src/types/language-model-response-metadata.ts +21 -0
- package/src/types/language-model.ts +106 -0
- package/src/types/provider-metadata.ts +16 -0
- package/src/types/provider.ts +55 -0
- package/src/types/reranking-model.ts +6 -0
- package/src/types/speech-model-response-metadata.ts +21 -0
- package/src/types/speech-model.ts +10 -0
- package/src/types/transcription-model-response-metadata.ts +16 -0
- package/src/types/transcription-model.ts +14 -0
- package/src/types/usage.ts +200 -0
- package/src/types/video-model-response-metadata.ts +28 -0
- package/src/types/video-model.ts +15 -0
- package/src/types/warning.ts +7 -0
- package/src/ui/call-completion-api.ts +157 -0
- package/src/ui/chat-transport.ts +83 -0
- package/src/ui/chat.ts +786 -0
- package/src/ui/convert-file-list-to-file-ui-parts.ts +36 -0
- package/src/ui/convert-to-model-messages.ts +403 -0
- package/src/ui/default-chat-transport.ts +36 -0
- package/src/ui/direct-chat-transport.ts +117 -0
- package/src/ui/http-chat-transport.ts +273 -0
- package/src/ui/index.ts +76 -0
- package/src/ui/last-assistant-message-is-complete-with-approval-responses.ts +44 -0
- package/src/ui/last-assistant-message-is-complete-with-tool-calls.ts +39 -0
- package/src/ui/process-text-stream.ts +16 -0
- package/src/ui/process-ui-message-stream.ts +858 -0
- package/src/ui/text-stream-chat-transport.ts +23 -0
- package/src/ui/transform-text-to-ui-message-stream.ts +27 -0
- package/src/ui/ui-messages.ts +602 -0
- package/src/ui/use-completion.ts +84 -0
- package/src/ui/validate-ui-messages.ts +521 -0
- package/src/ui-message-stream/create-ui-message-stream-response.ts +44 -0
- package/src/ui-message-stream/create-ui-message-stream.ts +145 -0
- package/src/ui-message-stream/get-response-ui-message-id.ts +35 -0
- package/src/ui-message-stream/handle-ui-message-stream-finish.ts +170 -0
- package/src/ui-message-stream/index.ts +14 -0
- package/src/ui-message-stream/json-to-sse-transform-stream.ts +17 -0
- package/src/ui-message-stream/pipe-ui-message-stream-to-response.ts +51 -0
- package/src/ui-message-stream/read-ui-message-stream.ts +87 -0
- package/src/ui-message-stream/ui-message-chunks.ts +372 -0
- package/src/ui-message-stream/ui-message-stream-headers.ts +7 -0
- package/src/ui-message-stream/ui-message-stream-on-finish-callback.ts +32 -0
- package/src/ui-message-stream/ui-message-stream-on-step-finish-callback.ts +25 -0
- package/src/ui-message-stream/ui-message-stream-response-init.ts +14 -0
- package/src/ui-message-stream/ui-message-stream-writer.ts +24 -0
- package/src/util/as-array.ts +3 -0
- package/src/util/async-iterable-stream.ts +94 -0
- package/src/util/consume-stream.ts +31 -0
- package/src/util/cosine-similarity.ts +46 -0
- package/src/util/create-resolvable-promise.ts +30 -0
- package/src/util/create-stitchable-stream.ts +112 -0
- package/src/util/data-url.ts +17 -0
- package/src/util/deep-partial.ts +84 -0
- package/src/util/detect-media-type.ts +226 -0
- package/src/util/download/create-download.ts +13 -0
- package/src/util/download/download-function.ts +45 -0
- package/src/util/download/download.ts +74 -0
- package/src/util/error-handler.ts +1 -0
- package/src/util/fix-json.ts +401 -0
- package/src/util/get-potential-start-index.ts +39 -0
- package/src/util/index.ts +12 -0
- package/src/util/is-deep-equal-data.ts +48 -0
- package/src/util/is-non-empty-object.ts +5 -0
- package/src/util/job.ts +1 -0
- package/src/util/log-v2-compatibility-warning.ts +21 -0
- package/src/util/merge-abort-signals.ts +43 -0
- package/src/util/merge-objects.ts +79 -0
- package/src/util/notify.ts +22 -0
- package/src/util/now.ts +4 -0
- package/src/util/parse-partial-json.ts +30 -0
- package/src/util/prepare-headers.ts +14 -0
- package/src/util/prepare-retries.ts +47 -0
- package/src/util/retry-error.ts +41 -0
- package/src/util/retry-with-exponential-backoff.ts +154 -0
- package/src/util/serial-job-executor.ts +36 -0
- package/src/util/simulate-readable-stream.ts +39 -0
- package/src/util/split-array.ts +20 -0
- package/src/util/value-of.ts +65 -0
- package/src/util/write-to-server-response.ts +49 -0
- package/src/version.ts +5 -0
- package/test.d.ts +1 -0
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Tools
|
|
3
|
+
description: Learn about tools with the AI SDK.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Tools
|
|
7
|
+
|
|
8
|
+
While [large language models (LLMs)](/docs/foundations/overview#large-language-models) have incredible generation capabilities,
|
|
9
|
+
they struggle with discrete tasks (e.g. mathematics) and interacting with the outside world (e.g. getting the weather).
|
|
10
|
+
|
|
11
|
+
Tools are actions that an LLM can invoke.
|
|
12
|
+
The results of these actions can be reported back to the LLM to be considered in the next response.
|
|
13
|
+
|
|
14
|
+
For example, when you ask an LLM for the "weather in London", and there is a weather tool available, it could call a tool
|
|
15
|
+
with London as the argument. The tool would then fetch the weather data and return it to the LLM. The LLM can then use this
|
|
16
|
+
information in its response.
|
|
17
|
+
|
|
18
|
+
## What is a tool?
|
|
19
|
+
|
|
20
|
+
A tool is an object that can be called by the model to perform a specific task.
|
|
21
|
+
You can use tools with [`generateText`](/docs/reference/ai-sdk-core/generate-text)
|
|
22
|
+
and [`streamText`](/docs/reference/ai-sdk-core/stream-text) by passing one or more tools to the `tools` parameter.
|
|
23
|
+
|
|
24
|
+
A tool consists of three properties:
|
|
25
|
+
|
|
26
|
+
- **`description`**: An optional description of the tool that can influence when the tool is picked.
|
|
27
|
+
- **`inputSchema`**: A [Zod schema](/docs/reference/ai-sdk-core/zod-schema) or a [JSON schema](/docs/reference/ai-sdk-core/json-schema) that defines the input required for the tool to run. The schema is consumed by the LLM, and also used to validate the LLM tool calls.
|
|
28
|
+
- **`execute`**: An optional async function that is called with the arguments from the tool call.
|
|
29
|
+
|
|
30
|
+
<Note>
|
|
31
|
+
`streamUI` uses UI generator tools with a `generate` function that can return
|
|
32
|
+
React components.
|
|
33
|
+
</Note>
|
|
34
|
+
|
|
35
|
+
If the LLM decides to use a tool, it will generate a tool call.
|
|
36
|
+
Tools with an `execute` function are run automatically when these calls are generated.
|
|
37
|
+
The output of the tool calls are returned using tool result objects.
|
|
38
|
+
|
|
39
|
+
You can automatically pass tool results back to the LLM
|
|
40
|
+
using [multi-step calls](/docs/ai-sdk-core/tools-and-tool-calling#multi-step-calls) with `streamText` and `generateText`.
|
|
41
|
+
|
|
42
|
+
## Types of Tools
|
|
43
|
+
|
|
44
|
+
The AI SDK supports three types of tools, each with different trade-offs:
|
|
45
|
+
|
|
46
|
+
### Custom Tools
|
|
47
|
+
|
|
48
|
+
Custom tools are tools you define entirely yourself, including the description, input schema, and execute function. They are provider-agnostic and give you full control.
|
|
49
|
+
|
|
50
|
+
```ts
|
|
51
|
+
import { tool } from 'ai';
|
|
52
|
+
import { z } from 'zod';
|
|
53
|
+
|
|
54
|
+
const weatherTool = tool({
|
|
55
|
+
description: 'Get the weather in a location',
|
|
56
|
+
inputSchema: z.object({
|
|
57
|
+
location: z.string().describe('The location to get the weather for'),
|
|
58
|
+
}),
|
|
59
|
+
execute: async ({ location }) => {
|
|
60
|
+
// Your implementation
|
|
61
|
+
return { temperature: 72, conditions: 'sunny' };
|
|
62
|
+
},
|
|
63
|
+
});
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
**When to use**: When you need full control, want provider portability, or are implementing application-specific functionality.
|
|
67
|
+
|
|
68
|
+
### Provider-Defined Tools
|
|
69
|
+
|
|
70
|
+
Provider-defined tools are tools where the provider specifies the tool's `inputSchema` and `description`, but you provide the `execute` function. These are sometimes called "client tools" because execution happens on your side.
|
|
71
|
+
|
|
72
|
+
Examples include Anthropic's `bash` and `text_editor` tools. The model has been specifically trained to use these tools effectively, which can result in better performance for supported tasks.
|
|
73
|
+
|
|
74
|
+
```ts
|
|
75
|
+
import { anthropic } from '@ai-sdk/anthropic';
|
|
76
|
+
import { generateText } from 'ai';
|
|
77
|
+
|
|
78
|
+
const result = await generateText({
|
|
79
|
+
model: anthropic('claude-opus-4-5'),
|
|
80
|
+
tools: {
|
|
81
|
+
bash: anthropic.tools.bash_20250124({
|
|
82
|
+
execute: async ({ command }) => {
|
|
83
|
+
// Your implementation to run the command
|
|
84
|
+
return runCommand(command);
|
|
85
|
+
},
|
|
86
|
+
}),
|
|
87
|
+
},
|
|
88
|
+
prompt: 'List files in the current directory',
|
|
89
|
+
});
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
**When to use**: When the provider offers a tool the model is trained to use well, and you want better performance for that specific task.
|
|
93
|
+
|
|
94
|
+
### Provider-Executed Tools
|
|
95
|
+
|
|
96
|
+
Provider-executed tools are tools that run entirely on the provider's servers. You configure them, but the provider handles execution. These are sometimes called "server-side tools".
|
|
97
|
+
|
|
98
|
+
Examples include OpenAI's web search and Anthropic's code execution. These provide out-of-the-box functionality without requiring you to set up infrastructure.
|
|
99
|
+
|
|
100
|
+
```ts
|
|
101
|
+
import { openai } from '@ai-sdk/openai';
|
|
102
|
+
import { generateText } from 'ai';
|
|
103
|
+
|
|
104
|
+
const result = await generateText({
|
|
105
|
+
model: openai('gpt-5.2'),
|
|
106
|
+
tools: {
|
|
107
|
+
web_search: openai.tools.webSearch(),
|
|
108
|
+
},
|
|
109
|
+
prompt: 'What happened in the news today?',
|
|
110
|
+
});
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
**When to use**: When you want powerful functionality (like web search or sandboxed code execution) without managing the infrastructure yourself.
|
|
114
|
+
|
|
115
|
+
### Comparison
|
|
116
|
+
|
|
117
|
+
| Aspect | Custom Tools | Provider-Defined Tools | Provider-Executed Tools |
|
|
118
|
+
| ------------------ | ------------------------ | ---------------------- | ----------------------- |
|
|
119
|
+
| **Execution** | Your code | Your code | Provider's servers |
|
|
120
|
+
| **Schema** | You define | Provider defines | Provider defines |
|
|
121
|
+
| **Portability** | Works with any provider | Provider-specific | Provider-specific |
|
|
122
|
+
| **Model Training** | General tool use | Optimized for the tool | Optimized for the tool |
|
|
123
|
+
| **Setup** | You implement everything | You implement execute | Configuration only |
|
|
124
|
+
|
|
125
|
+
<Note>
|
|
126
|
+
Provider-defined and provider-executed tools are documented in each provider's
|
|
127
|
+
page. See [Anthropic Provider](/providers/ai-sdk-providers/anthropic) and
|
|
128
|
+
[OpenAI Provider](/providers/ai-sdk-providers/openai) for examples.
|
|
129
|
+
</Note>
|
|
130
|
+
|
|
131
|
+
## Schemas
|
|
132
|
+
|
|
133
|
+
Schemas are used to define and validate the [tool input](/docs/ai-sdk-core/tools-and-tool-calling), tools outputs, and structured output generation.
|
|
134
|
+
|
|
135
|
+
The AI SDK supports the following schemas:
|
|
136
|
+
|
|
137
|
+
- [Zod](https://zod.dev/) v3 and v4 directly or via [`zodSchema()`](/docs/reference/ai-sdk-core/zod-schema)
|
|
138
|
+
- [Valibot](https://valibot.dev/) via [`valibotSchema()`](/docs/reference/ai-sdk-core/valibot-schema) from `@ai-sdk/valibot`
|
|
139
|
+
- [Standard JSON Schema](https://standardschema.dev/json-schema) compatible schemas
|
|
140
|
+
- Raw JSON schemas via [`jsonSchema()`](/docs/reference/ai-sdk-core/json-schema)
|
|
141
|
+
|
|
142
|
+
<Note>
|
|
143
|
+
You can also use schemas for structured output generation with
|
|
144
|
+
[`generateText`](/docs/reference/ai-sdk-core/generate-text) and
|
|
145
|
+
[`streamText`](/docs/reference/ai-sdk-core/stream-text) using the `output`
|
|
146
|
+
setting.
|
|
147
|
+
</Note>
|
|
148
|
+
|
|
149
|
+
## Tool Packages
|
|
150
|
+
|
|
151
|
+
Given tools are JavaScript objects, they can be packaged and distributed through npm like any other library. This makes it easy to share reusable tools across projects and with the community.
|
|
152
|
+
|
|
153
|
+
### Using Ready-Made Tool Packages
|
|
154
|
+
|
|
155
|
+
Install a tool package and import the tools you need:
|
|
156
|
+
|
|
157
|
+
```bash
|
|
158
|
+
pnpm add some-tool-package
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
Then pass them directly to `generateText`, `streamText`, or your agent definition:
|
|
162
|
+
|
|
163
|
+
```ts highlight="2, 8"
|
|
164
|
+
import { generateText, stepCountIs } from 'ai';
|
|
165
|
+
import { searchTool } from 'some-tool-package';
|
|
166
|
+
|
|
167
|
+
const { text } = await generateText({
|
|
168
|
+
model: 'anthropic/claude-haiku-4.5',
|
|
169
|
+
prompt: 'When was Vercel Ship AI?',
|
|
170
|
+
tools: {
|
|
171
|
+
webSearch: searchTool,
|
|
172
|
+
},
|
|
173
|
+
stopWhen: stepCountIs(10),
|
|
174
|
+
});
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
### Publishing Your Own Tools
|
|
178
|
+
|
|
179
|
+
You can publish your own tool packages to npm for others to use. Simply export your tool objects from your package:
|
|
180
|
+
|
|
181
|
+
```ts filename="my-tools/index.ts"
|
|
182
|
+
import { tool } from 'ai';
|
|
183
|
+
import { z } from 'zod';
|
|
184
|
+
|
|
185
|
+
export const myTool = tool({
|
|
186
|
+
description: 'A helpful tool',
|
|
187
|
+
inputSchema: z.object({
|
|
188
|
+
query: z.string(),
|
|
189
|
+
}),
|
|
190
|
+
execute: async ({ query }) => {
|
|
191
|
+
// your tool logic
|
|
192
|
+
return result;
|
|
193
|
+
},
|
|
194
|
+
});
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
Anyone can then install and use your tools by importing them.
|
|
198
|
+
|
|
199
|
+
To get started, you can use the [AI SDK Tool Package Template](https://github.com/vercel-labs/ai-sdk-tool-as-package-template) which provides a ready-to-use starting point for publishing your own tools.
|
|
200
|
+
|
|
201
|
+
## Toolsets
|
|
202
|
+
|
|
203
|
+
When you work with tools, you typically need a mix of application-specific tools and general-purpose tools. The community has created various toolsets and resources to help you build and use tools.
|
|
204
|
+
|
|
205
|
+
### Ready-to-Use Tool Packages
|
|
206
|
+
|
|
207
|
+
These packages provide pre-built tools you can install and use immediately:
|
|
208
|
+
|
|
209
|
+
- **[@exalabs/ai-sdk](https://www.npmjs.com/package/@exalabs/ai-sdk)** - Web search tool that lets AI search the web and get real-time information.
|
|
210
|
+
- **[@parallel-web/ai-sdk-tools](https://www.npmjs.com/package/@parallel-web/ai-sdk-tools)** - Web search and extract tools powered by Parallel Web API for real-time information and content extraction.
|
|
211
|
+
- **[@perplexity-ai/ai-sdk](https://www.npmjs.com/package/@perplexity-ai/ai-sdk)** - Search the web with real-time results and advanced filtering powered by Perplexity's Search API.
|
|
212
|
+
- **[@tavily/ai-sdk](https://www.npmjs.com/package/@tavily/ai-sdk)** - Search, extract, crawl, and map tools for enterprise-grade agents to explore the web in real-time.
|
|
213
|
+
- **[Stripe agent tools](https://docs.stripe.com/agents?framework=vercel)** - Tools for interacting with Stripe.
|
|
214
|
+
- **[StackOne ToolSet](https://docs.stackone.com/agents/typescript/frameworks/vercel-ai-sdk)** - Agentic integrations for hundreds of [enterprise SaaS](https://www.stackone.com/integrations) platforms.
|
|
215
|
+
- **[agentic](https://docs.agentic.so/marketplace/ts-sdks/ai-sdk)** - A collection of 20+ tools that connect to external APIs such as [Exa](https://exa.ai/) or [E2B](https://e2b.dev/).
|
|
216
|
+
- **[Amazon Bedrock AgentCore](https://github.com/aws/bedrock-agentcore-sdk-typescript)** - Fully managed AI agent services including [**Browser**](https://docs.aws.amazon.com/bedrock-agentcore/latest/devguide/built-in-tools.html) (a fast and secure cloud-based browser runtime to enable agents to interact with web applications, fill forms, navigate websites, and extract information) and [**Code Interpreter**](https://docs.aws.amazon.com/bedrock-agentcore/latest/devguide/built-in-tools.html) (an isolated sandbox environment for agents to execute code in Python, JavaScript, and TypeScript, enhancing accuracy and expanding ability to solve complex end-to-end tasks).
|
|
217
|
+
- **[@airweave/vercel-ai-sdk](https://www.npmjs.com/package/@airweave/vercel-ai-sdk)** - Unified semantic search across 35+ data sources (Notion, Slack, Google Drive, databases, and more) for AI agents.
|
|
218
|
+
- **[Composio](https://docs.composio.dev/providers/vercel)** - 250+ tools like GitHub, Gmail, Salesforce and [more](https://composio.dev/tools).
|
|
219
|
+
- **[JigsawStack](http://www.jigsawstack.com/docs/integration/vercel)** - Over 30+ small custom fine-tuned models available for specific uses.
|
|
220
|
+
- **[AI Tools Registry](https://ai-tools-registry.vercel.app)** - A Shadcn-compatible tool definitions and components registry for the AI SDK.
|
|
221
|
+
- **[Toolhouse](https://docs.toolhouse.ai/toolhouse/toolhouse-sdk/using-vercel-ai)** - AI function-calling in 3 lines of code for over 25 different actions.
|
|
222
|
+
- **[bash-tool](https://www.npmjs.com/package/bash-tool)** - Provides `bash`, `readFile`, and `writeFile` tools for AI agents. Supports [@vercel/sandbox](https://vercel.com/docs/vercel-sandbox) for full VM isolation.
|
|
223
|
+
|
|
224
|
+
### MCP Tools
|
|
225
|
+
|
|
226
|
+
These are pre-built tools available as MCP servers:
|
|
227
|
+
|
|
228
|
+
- **[Smithery](https://smithery.ai/docs/integrations/vercel_ai_sdk)** - An open marketplace of 6,000+ MCPs, including [Browserbase](https://browserbase.com/) and [Exa](https://exa.ai/).
|
|
229
|
+
- **[Pipedream](https://pipedream.com/docs/connect/mcp/ai-frameworks/vercel-ai-sdk)** - Developer toolkit that lets you easily add 3,000+ integrations to your app or AI agent.
|
|
230
|
+
- **[Apify](https://docs.apify.com/platform/integrations/vercel-ai-sdk)** - Apify provides a [marketplace](https://apify.com/store) of thousands of tools for web scraping, data extraction, and browser automation.
|
|
231
|
+
|
|
232
|
+
### Tool Building Tutorials
|
|
233
|
+
|
|
234
|
+
These tutorials and guides help you build your own tools that integrate with specific services:
|
|
235
|
+
|
|
236
|
+
- **[browserbase](https://docs.browserbase.com/integrations/vercel/introduction#vercel-ai-integration)** - Tutorial for building browser tools that run a headless browser.
|
|
237
|
+
- **[browserless](https://docs.browserless.io/ai-integrations/vercel-ai-sdk)** - Guide for integrating browser automation (self-hosted or cloud-based).
|
|
238
|
+
- **[AI Tool Maker](https://github.com/nihaocami/ai-tool-maker)** - A CLI utility to generate AI SDK tools from OpenAPI specs.
|
|
239
|
+
- **[Interlify](https://www.interlify.com/docs/integrate-with-vercel-ai)** - Guide for converting APIs into tools.
|
|
240
|
+
- **[DeepAgent](https://deepagent.amardeep.space/docs/vercel-ai-sdk)** - A suite of 50+ AI tools and integrations, seamlessly connecting with APIs like Tavily, E2B, Airtable and [more](https://deepagent.amardeep.space/docs).
|
|
241
|
+
|
|
242
|
+
<Note>
|
|
243
|
+
Do you have open source tools or tool libraries that are compatible with the
|
|
244
|
+
AI SDK? Please [file a pull request](https://github.com/vercel/ai/pulls) to
|
|
245
|
+
add them to this list.
|
|
246
|
+
</Note>
|
|
247
|
+
|
|
248
|
+
## Learn more
|
|
249
|
+
|
|
250
|
+
The AI SDK Core [Tool Calling](/docs/ai-sdk-core/tools-and-tool-calling)
|
|
251
|
+
and [Agents](/docs/agents) documentation has more information about tools and tool calling.
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Streaming
|
|
3
|
+
description: Why use streaming for AI applications?
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Streaming
|
|
7
|
+
|
|
8
|
+
Streaming conversational text UIs (like ChatGPT) have gained massive popularity over the past few months. This section explores the benefits and drawbacks of streaming and blocking interfaces.
|
|
9
|
+
|
|
10
|
+
[Large language models (LLMs)](/docs/foundations/overview#large-language-models) are extremely powerful. However, when generating long outputs, they can be very slow compared to the latency you're likely used to. If you try to build a traditional blocking UI, your users might easily find themselves staring at loading spinners for 5, 10, even up to 40s waiting for the entire LLM response to be generated. This can lead to a poor user experience, especially in conversational applications like chatbots. Streaming UIs can help mitigate this issue by **displaying parts of the response as they become available**.
|
|
11
|
+
|
|
12
|
+
<div className="grid lg:grid-cols-2 grid-cols-1 gap-4 mt-8">
|
|
13
|
+
<Card
|
|
14
|
+
title="Blocking UI"
|
|
15
|
+
description="Blocking responses wait until the full response is available before displaying it."
|
|
16
|
+
>
|
|
17
|
+
<BrowserIllustration highlight blocking />
|
|
18
|
+
</Card>
|
|
19
|
+
<Card
|
|
20
|
+
title="Streaming UI"
|
|
21
|
+
description="Streaming responses can transmit parts of the response as they become available."
|
|
22
|
+
>
|
|
23
|
+
<BrowserIllustration highlight />
|
|
24
|
+
</Card>
|
|
25
|
+
</div>
|
|
26
|
+
|
|
27
|
+
## Real-world Examples
|
|
28
|
+
|
|
29
|
+
Here are 2 examples that illustrate how streaming UIs can improve user experiences in a real-world setting – the first uses a blocking UI, while the second uses a streaming UI.
|
|
30
|
+
|
|
31
|
+
### Blocking UI
|
|
32
|
+
|
|
33
|
+
<InlinePrompt
|
|
34
|
+
initialInput="Come up with the first 200 characters of the first book in the Harry Potter series."
|
|
35
|
+
blocking
|
|
36
|
+
/>
|
|
37
|
+
|
|
38
|
+
### Streaming UI
|
|
39
|
+
|
|
40
|
+
<InlinePrompt initialInput="Come up with the first 200 characters of the first book in the Harry Potter series." />
|
|
41
|
+
|
|
42
|
+
As you can see, the streaming UI is able to start displaying the response much faster than the blocking UI. This is because the blocking UI has to wait for the entire response to be generated before it can display anything, while the streaming UI can display parts of the response as they become available.
|
|
43
|
+
|
|
44
|
+
While streaming interfaces can greatly enhance user experiences, especially with larger language models, they aren't always necessary or beneficial. If you can achieve your desired functionality using a smaller, faster model without resorting to streaming, this route can often lead to simpler and more manageable development processes.
|
|
45
|
+
|
|
46
|
+
However, regardless of the speed of your model, the AI SDK is designed to make implementing streaming UIs as simple as possible. In the example below, we stream text generation in under 10 lines of code using the SDK's [`streamText`](/docs/reference/ai-sdk-core/stream-text) function:
|
|
47
|
+
|
|
48
|
+
```ts
|
|
49
|
+
import { streamText } from 'ai';
|
|
50
|
+
__PROVIDER_IMPORT__;
|
|
51
|
+
|
|
52
|
+
const { textStream } = streamText({
|
|
53
|
+
model: __MODEL__,
|
|
54
|
+
prompt: 'Write a poem about embedding models.',
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
for await (const textPart of textStream) {
|
|
58
|
+
console.log(textPart);
|
|
59
|
+
}
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
For an introduction to streaming UIs and the AI SDK, check out our [Getting Started guides](/docs/getting-started).
|
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Provider Options
|
|
3
|
+
description: Learn how to use provider-specific options to control reasoning, caching, and other advanced features.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Provider Options
|
|
7
|
+
|
|
8
|
+
Provider options let you pass provider-specific configuration that goes beyond the [standard settings](/docs/ai-sdk-core/settings) shared by all providers. They are set via the `providerOptions` property on functions like `generateText` and `streamText`.
|
|
9
|
+
|
|
10
|
+
```ts
|
|
11
|
+
const result = await generateText({
|
|
12
|
+
model: openai('gpt-5.2'),
|
|
13
|
+
prompt: 'Explain quantum entanglement.',
|
|
14
|
+
providerOptions: {
|
|
15
|
+
openai: {
|
|
16
|
+
reasoningEffort: 'low',
|
|
17
|
+
},
|
|
18
|
+
},
|
|
19
|
+
});
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
Provider options are namespaced by the provider name (e.g. `openai`, `anthropic`) so you can even include options for multiple providers in the same call — only the options matching the active provider are used. See [Prompts: Provider Options](/docs/foundations/prompts#provider-options) for details on applying options at the message and message-part level.
|
|
23
|
+
|
|
24
|
+
<Note>
|
|
25
|
+
For controlling reasoning effort, consider using the top-level [`reasoning`
|
|
26
|
+
parameter](/docs/ai-sdk-core/reasoning) instead of provider-specific options.
|
|
27
|
+
It provides a portable setting that works across all providers that support
|
|
28
|
+
reasoning. Use provider-specific options only when you need features like
|
|
29
|
+
exact token budgets.
|
|
30
|
+
</Note>
|
|
31
|
+
|
|
32
|
+
## Common Provider Options
|
|
33
|
+
|
|
34
|
+
The sections below cover the most frequently used provider options, focusing on reasoning and output control for OpenAI and Anthropic. For a complete reference, see the individual provider pages:
|
|
35
|
+
|
|
36
|
+
- [OpenAI provider options](/providers/ai-sdk-providers/openai)
|
|
37
|
+
- [Anthropic provider options](/providers/ai-sdk-providers/anthropic)
|
|
38
|
+
|
|
39
|
+
---
|
|
40
|
+
|
|
41
|
+
## OpenAI
|
|
42
|
+
|
|
43
|
+
### Reasoning Effort
|
|
44
|
+
|
|
45
|
+
For reasoning models (e.g. `o3`, `o4-mini`, `gpt-5.2`), `reasoningEffort` controls how much internal reasoning the model performs before responding. Lower values are faster and cheaper; higher values produce more thorough answers.
|
|
46
|
+
|
|
47
|
+
```ts
|
|
48
|
+
import {
|
|
49
|
+
openai,
|
|
50
|
+
type OpenAILanguageModelResponsesOptions,
|
|
51
|
+
} from '@ai-sdk/openai';
|
|
52
|
+
import { generateText } from 'ai';
|
|
53
|
+
|
|
54
|
+
const { text, usage, providerMetadata } = await generateText({
|
|
55
|
+
model: openai('gpt-5.2'),
|
|
56
|
+
prompt: 'Invent a new holiday and describe its traditions.',
|
|
57
|
+
providerOptions: {
|
|
58
|
+
openai: {
|
|
59
|
+
reasoningEffort: 'low', // 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
|
|
60
|
+
} satisfies OpenAILanguageModelResponsesOptions,
|
|
61
|
+
},
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
console.log('Reasoning tokens:', providerMetadata?.openai?.reasoningTokens);
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
| Value | Behavior |
|
|
68
|
+
| ----------- | ------------------------------------------ |
|
|
69
|
+
| `'none'` | No reasoning (GPT-5.1 models only) |
|
|
70
|
+
| `'minimal'` | Bare-minimum reasoning |
|
|
71
|
+
| `'low'` | Fast, concise reasoning |
|
|
72
|
+
| `'medium'` | Balanced (default) |
|
|
73
|
+
| `'high'` | Thorough reasoning |
|
|
74
|
+
| `'xhigh'` | Maximum reasoning (GPT-5.1-Codex-Max only) |
|
|
75
|
+
|
|
76
|
+
<Note>
|
|
77
|
+
`'none'` and `'xhigh'` are only supported on specific models. Using them with
|
|
78
|
+
unsupported models will result in an error.
|
|
79
|
+
</Note>
|
|
80
|
+
|
|
81
|
+
### Reasoning Summary
|
|
82
|
+
|
|
83
|
+
When working with reasoning models, you may want to see _how_ the model arrived at its answer. The `reasoningSummary` option surfaces the model's thought process.
|
|
84
|
+
|
|
85
|
+
#### Streaming
|
|
86
|
+
|
|
87
|
+
```ts
|
|
88
|
+
import {
|
|
89
|
+
openai,
|
|
90
|
+
type OpenAILanguageModelResponsesOptions,
|
|
91
|
+
} from '@ai-sdk/openai';
|
|
92
|
+
import { streamText } from 'ai';
|
|
93
|
+
|
|
94
|
+
const result = streamText({
|
|
95
|
+
model: openai('gpt-5.2'),
|
|
96
|
+
prompt: 'Tell me about the Mission burrito debate in San Francisco.',
|
|
97
|
+
providerOptions: {
|
|
98
|
+
openai: {
|
|
99
|
+
reasoningSummary: 'detailed', // 'auto' | 'detailed'
|
|
100
|
+
} satisfies OpenAILanguageModelResponsesOptions,
|
|
101
|
+
},
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
for await (const part of result.fullStream) {
|
|
105
|
+
if (part.type === 'reasoning') {
|
|
106
|
+
console.log(`Reasoning: ${part.textDelta}`);
|
|
107
|
+
} else if (part.type === 'text-delta') {
|
|
108
|
+
process.stdout.write(part.textDelta);
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
#### Non-streaming
|
|
114
|
+
|
|
115
|
+
```ts
|
|
116
|
+
import {
|
|
117
|
+
openai,
|
|
118
|
+
type OpenAILanguageModelResponsesOptions,
|
|
119
|
+
} from '@ai-sdk/openai';
|
|
120
|
+
import { generateText } from 'ai';
|
|
121
|
+
|
|
122
|
+
const result = await generateText({
|
|
123
|
+
model: openai('gpt-5.2'),
|
|
124
|
+
prompt: 'Tell me about the Mission burrito debate in San Francisco.',
|
|
125
|
+
providerOptions: {
|
|
126
|
+
openai: {
|
|
127
|
+
reasoningSummary: 'auto',
|
|
128
|
+
} satisfies OpenAILanguageModelResponsesOptions,
|
|
129
|
+
},
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
console.log('Reasoning:', result.reasoning);
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
| Value | Behavior |
|
|
136
|
+
| ------------ | ------------------------------ |
|
|
137
|
+
| `'auto'` | Condensed summary of reasoning |
|
|
138
|
+
| `'detailed'` | Comprehensive reasoning output |
|
|
139
|
+
|
|
140
|
+
### Text Verbosity
|
|
141
|
+
|
|
142
|
+
Control the length and detail of the model's text response independently of reasoning:
|
|
143
|
+
|
|
144
|
+
```ts
|
|
145
|
+
import {
|
|
146
|
+
openai,
|
|
147
|
+
type OpenAILanguageModelResponsesOptions,
|
|
148
|
+
} from '@ai-sdk/openai';
|
|
149
|
+
import { generateText } from 'ai';
|
|
150
|
+
|
|
151
|
+
const result = await generateText({
|
|
152
|
+
model: openai('gpt-5-mini'),
|
|
153
|
+
prompt: 'Write a poem about a boy and his first pet dog.',
|
|
154
|
+
providerOptions: {
|
|
155
|
+
openai: {
|
|
156
|
+
textVerbosity: 'low', // 'low' | 'medium' | 'high'
|
|
157
|
+
} satisfies OpenAILanguageModelResponsesOptions,
|
|
158
|
+
},
|
|
159
|
+
});
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
| Value | Behavior |
|
|
163
|
+
| ---------- | -------------------------------- |
|
|
164
|
+
| `'low'` | Terse, minimal responses |
|
|
165
|
+
| `'medium'` | Balanced detail (default) |
|
|
166
|
+
| `'high'` | Verbose, comprehensive responses |
|
|
167
|
+
|
|
168
|
+
---
|
|
169
|
+
|
|
170
|
+
## Anthropic
|
|
171
|
+
|
|
172
|
+
### Thinking (Extended Reasoning)
|
|
173
|
+
|
|
174
|
+
Anthropic's thinking feature gives Claude models a dedicated "thinking" phase before they respond. You enable it by providing a `thinking` object with a token budget.
|
|
175
|
+
|
|
176
|
+
```ts
|
|
177
|
+
import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
|
|
178
|
+
import { generateText } from 'ai';
|
|
179
|
+
|
|
180
|
+
const { text, reasoning, reasoningText } = await generateText({
|
|
181
|
+
model: anthropic('claude-opus-4-20250514'),
|
|
182
|
+
prompt: 'How many people will live in the world in 2040?',
|
|
183
|
+
providerOptions: {
|
|
184
|
+
anthropic: {
|
|
185
|
+
thinking: { type: 'enabled', budgetTokens: 12000 },
|
|
186
|
+
} satisfies AnthropicLanguageModelOptions,
|
|
187
|
+
},
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
console.log('Reasoning:', reasoningText);
|
|
191
|
+
console.log('Answer:', text);
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
The `budgetTokens` value sets the upper limit on how many tokens the model can use for its internal reasoning. Higher budgets allow deeper reasoning but increase latency and cost.
|
|
195
|
+
|
|
196
|
+
<Note>
|
|
197
|
+
Thinking is supported on `claude-opus-4-20250514`, `claude-sonnet-4-20250514`,
|
|
198
|
+
and `claude-sonnet-4-5-20250929` models.
|
|
199
|
+
</Note>
|
|
200
|
+
|
|
201
|
+
### Effort
|
|
202
|
+
|
|
203
|
+
The `effort` option provides a simpler way to control reasoning depth without specifying a token budget. It affects thinking, text responses, and function calls.
|
|
204
|
+
|
|
205
|
+
```ts
|
|
206
|
+
import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
|
|
207
|
+
import { generateText } from 'ai';
|
|
208
|
+
|
|
209
|
+
const { text, usage } = await generateText({
|
|
210
|
+
model: anthropic('claude-opus-4-20250514'),
|
|
211
|
+
prompt: 'How many people will live in the world in 2040?',
|
|
212
|
+
providerOptions: {
|
|
213
|
+
anthropic: {
|
|
214
|
+
effort: 'low', // 'low' | 'medium' | 'high'
|
|
215
|
+
} satisfies AnthropicLanguageModelOptions,
|
|
216
|
+
},
|
|
217
|
+
});
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
| Value | Behavior |
|
|
221
|
+
| ---------- | ------------------------------------ |
|
|
222
|
+
| `'low'` | Minimal reasoning, fastest responses |
|
|
223
|
+
| `'medium'` | Balanced reasoning |
|
|
224
|
+
| `'high'` | Thorough reasoning (default) |
|
|
225
|
+
|
|
226
|
+
### Fast Mode
|
|
227
|
+
|
|
228
|
+
For `claude-opus-4-6`, the `speed` option enables approximately 2.5x faster output token speeds:
|
|
229
|
+
|
|
230
|
+
```ts
|
|
231
|
+
import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
|
|
232
|
+
import { generateText } from 'ai';
|
|
233
|
+
|
|
234
|
+
const { text } = await generateText({
|
|
235
|
+
model: anthropic('claude-opus-4-6'),
|
|
236
|
+
prompt: 'Write a short poem about the sea.',
|
|
237
|
+
providerOptions: {
|
|
238
|
+
anthropic: {
|
|
239
|
+
speed: 'fast', // 'fast' | 'standard'
|
|
240
|
+
} satisfies AnthropicLanguageModelOptions,
|
|
241
|
+
},
|
|
242
|
+
});
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
## Combining Options
|
|
248
|
+
|
|
249
|
+
You can combine multiple provider options in a single call. For example, using both reasoning effort and reasoning summaries with OpenAI:
|
|
250
|
+
|
|
251
|
+
```ts
|
|
252
|
+
import {
|
|
253
|
+
openai,
|
|
254
|
+
type OpenAILanguageModelResponsesOptions,
|
|
255
|
+
} from '@ai-sdk/openai';
|
|
256
|
+
import { generateText } from 'ai';
|
|
257
|
+
|
|
258
|
+
const result = await generateText({
|
|
259
|
+
model: openai('gpt-5.2'),
|
|
260
|
+
prompt: 'What are the implications of quantum computing for cryptography?',
|
|
261
|
+
providerOptions: {
|
|
262
|
+
openai: {
|
|
263
|
+
reasoningEffort: 'high',
|
|
264
|
+
reasoningSummary: 'detailed',
|
|
265
|
+
} satisfies OpenAILanguageModelResponsesOptions,
|
|
266
|
+
},
|
|
267
|
+
});
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
Or enabling thinking with a low effort level for Anthropic:
|
|
271
|
+
|
|
272
|
+
```ts
|
|
273
|
+
import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
|
|
274
|
+
import { generateText } from 'ai';
|
|
275
|
+
|
|
276
|
+
const result = await generateText({
|
|
277
|
+
model: anthropic('claude-opus-4-20250514'),
|
|
278
|
+
prompt: 'Explain the Riemann hypothesis in simple terms.',
|
|
279
|
+
providerOptions: {
|
|
280
|
+
anthropic: {
|
|
281
|
+
thinking: { type: 'enabled', budgetTokens: 8000 },
|
|
282
|
+
effort: 'medium',
|
|
283
|
+
} satisfies AnthropicLanguageModelOptions,
|
|
284
|
+
},
|
|
285
|
+
});
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
## Using Provider Options with the AI Gateway
|
|
289
|
+
|
|
290
|
+
Provider options work the same way when using the [Vercel AI Gateway](/providers/ai-sdk-providers/ai-gateway). Use the underlying provider name (e.g. `openai`, `anthropic`) as the key — not `gateway`. The AI Gateway forwards these options to the target provider automatically.
|
|
291
|
+
|
|
292
|
+
```ts
|
|
293
|
+
import type { OpenAILanguageModelResponsesOptions } from '@ai-sdk/openai';
|
|
294
|
+
import { generateText } from 'ai';
|
|
295
|
+
|
|
296
|
+
const result = await generateText({
|
|
297
|
+
model: 'openai/gpt-5.2', // AI Gateway model string
|
|
298
|
+
prompt: 'What are the implications of quantum computing for cryptography?',
|
|
299
|
+
providerOptions: {
|
|
300
|
+
openai: {
|
|
301
|
+
reasoningEffort: 'high',
|
|
302
|
+
reasoningSummary: 'detailed',
|
|
303
|
+
} satisfies OpenAILanguageModelResponsesOptions,
|
|
304
|
+
},
|
|
305
|
+
});
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
You can also combine gateway-specific options (like routing and fallbacks) with provider-specific options in the same call:
|
|
309
|
+
|
|
310
|
+
```ts
|
|
311
|
+
import type { AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
|
|
312
|
+
import type { GatewayProviderOptions } from '@ai-sdk/gateway';
|
|
313
|
+
import { generateText } from 'ai';
|
|
314
|
+
|
|
315
|
+
const result = await generateText({
|
|
316
|
+
model: 'anthropic/claude-sonnet-4',
|
|
317
|
+
prompt: 'Explain quantum computing',
|
|
318
|
+
providerOptions: {
|
|
319
|
+
// Gateway-specific: control routing
|
|
320
|
+
gateway: {
|
|
321
|
+
order: ['vertex', 'anthropic'],
|
|
322
|
+
} satisfies GatewayProviderOptions,
|
|
323
|
+
// Provider-specific: enable reasoning
|
|
324
|
+
anthropic: {
|
|
325
|
+
thinking: { type: 'enabled', budgetTokens: 12000 },
|
|
326
|
+
} satisfies AnthropicLanguageModelOptions,
|
|
327
|
+
},
|
|
328
|
+
});
|
|
329
|
+
```
|
|
330
|
+
|
|
331
|
+
For more on gateway routing, fallbacks, and other gateway-specific options, see the [AI Gateway provider documentation](/providers/ai-sdk-providers/ai-gateway#provider-options).
|
|
332
|
+
|
|
333
|
+
## Type Safety
|
|
334
|
+
|
|
335
|
+
Each provider exports a type for its options, which you can use with `satisfies` to get autocomplete and catch typos at build time:
|
|
336
|
+
|
|
337
|
+
```ts
|
|
338
|
+
import { type OpenAILanguageModelResponsesOptions } from '@ai-sdk/openai';
|
|
339
|
+
import { type AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
|
|
340
|
+
```
|
|
341
|
+
|
|
342
|
+
For a full list of available options, see the provider-specific documentation:
|
|
343
|
+
|
|
344
|
+
- [OpenAI Provider](/providers/ai-sdk-providers/openai)
|
|
345
|
+
- [Anthropic Provider](/providers/ai-sdk-providers/anthropic)
|