@mastra/mcp-docs-server 1.1.5 → 1.1.6-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/docs/agents/adding-voice.md +349 -0
- package/.docs/docs/agents/agent-approval.md +558 -0
- package/.docs/docs/agents/agent-memory.md +209 -0
- package/.docs/docs/agents/guardrails.md +374 -0
- package/.docs/docs/agents/network-approval.md +275 -0
- package/.docs/docs/agents/networks.md +299 -0
- package/.docs/docs/agents/overview.md +304 -0
- package/.docs/docs/agents/processors.md +622 -0
- package/.docs/docs/agents/structured-output.md +273 -0
- package/.docs/docs/agents/supervisor-agents.md +304 -0
- package/.docs/docs/agents/using-tools.md +214 -0
- package/.docs/docs/build-with-ai/mcp-docs-server.md +238 -0
- package/.docs/docs/build-with-ai/skills.md +35 -0
- package/.docs/docs/community/contributing-templates.md +3 -0
- package/.docs/docs/community/discord.md +9 -0
- package/.docs/docs/community/licensing.md +66 -0
- package/.docs/docs/deployment/cloud-providers.md +15 -0
- package/.docs/docs/deployment/mastra-server.md +122 -0
- package/.docs/docs/deployment/monorepo.md +142 -0
- package/.docs/docs/deployment/overview.md +62 -0
- package/.docs/docs/deployment/studio.md +239 -0
- package/.docs/docs/deployment/web-framework.md +52 -0
- package/.docs/docs/deployment/workflow-runners.md +9 -0
- package/.docs/docs/evals/built-in-scorers.md +47 -0
- package/.docs/docs/evals/custom-scorers.md +519 -0
- package/.docs/docs/evals/overview.md +141 -0
- package/.docs/docs/evals/running-in-ci.md +124 -0
- package/.docs/docs/getting-started/build-with-ai.md +68 -0
- package/.docs/docs/getting-started/manual-install.md +226 -0
- package/.docs/docs/getting-started/project-structure.md +60 -0
- package/.docs/docs/getting-started/start.md +28 -0
- package/.docs/docs/getting-started/studio.md +101 -0
- package/.docs/docs/index.md +43 -0
- package/.docs/docs/mastra-cloud/deployment.md +77 -0
- package/.docs/docs/mastra-cloud/observability.md +38 -0
- package/.docs/docs/mastra-cloud/overview.md +23 -0
- package/.docs/docs/mastra-cloud/setup.md +42 -0
- package/.docs/docs/mastra-cloud/studio.md +24 -0
- package/.docs/docs/mastra-code/configuration.md +299 -0
- package/.docs/docs/mastra-code/customization.md +228 -0
- package/.docs/docs/mastra-code/modes.md +104 -0
- package/.docs/docs/mastra-code/overview.md +135 -0
- package/.docs/docs/mastra-code/tools.md +229 -0
- package/.docs/docs/mcp/overview.md +373 -0
- package/.docs/docs/mcp/publishing-mcp-server.md +95 -0
- package/.docs/docs/memory/memory-processors.md +314 -0
- package/.docs/docs/memory/message-history.md +260 -0
- package/.docs/docs/memory/observational-memory.md +248 -0
- package/.docs/docs/memory/overview.md +45 -0
- package/.docs/docs/memory/semantic-recall.md +272 -0
- package/.docs/docs/memory/storage.md +261 -0
- package/.docs/docs/memory/working-memory.md +400 -0
- package/.docs/docs/observability/datasets/overview.md +198 -0
- package/.docs/docs/observability/datasets/running-experiments.md +274 -0
- package/.docs/docs/observability/logging.md +99 -0
- package/.docs/docs/observability/overview.md +70 -0
- package/.docs/docs/observability/tracing/bridges/otel.md +209 -0
- package/.docs/docs/observability/tracing/exporters/arize.md +272 -0
- package/.docs/docs/observability/tracing/exporters/braintrust.md +111 -0
- package/.docs/docs/observability/tracing/exporters/cloud.md +127 -0
- package/.docs/docs/observability/tracing/exporters/datadog.md +187 -0
- package/.docs/docs/observability/tracing/exporters/default.md +209 -0
- package/.docs/docs/observability/tracing/exporters/laminar.md +100 -0
- package/.docs/docs/observability/tracing/exporters/langfuse.md +213 -0
- package/.docs/docs/observability/tracing/exporters/langsmith.md +198 -0
- package/.docs/docs/observability/tracing/exporters/otel.md +476 -0
- package/.docs/docs/observability/tracing/exporters/posthog.md +148 -0
- package/.docs/docs/observability/tracing/exporters/sentry.md +208 -0
- package/.docs/docs/observability/tracing/overview.md +1112 -0
- package/.docs/docs/observability/tracing/processors/sensitive-data-filter.md +300 -0
- package/.docs/docs/rag/chunking-and-embedding.md +183 -0
- package/.docs/docs/rag/graph-rag.md +215 -0
- package/.docs/docs/rag/overview.md +72 -0
- package/.docs/docs/rag/retrieval.md +515 -0
- package/.docs/docs/rag/vector-databases.md +645 -0
- package/.docs/docs/server/auth/auth0.md +220 -0
- package/.docs/docs/server/auth/better-auth.md +203 -0
- package/.docs/docs/server/auth/clerk.md +132 -0
- package/.docs/docs/server/auth/composite-auth.md +234 -0
- package/.docs/docs/server/auth/custom-auth-provider.md +513 -0
- package/.docs/docs/server/auth/firebase.md +272 -0
- package/.docs/docs/server/auth/jwt.md +110 -0
- package/.docs/docs/server/auth/simple-auth.md +180 -0
- package/.docs/docs/server/auth/supabase.md +117 -0
- package/.docs/docs/server/auth/workos.md +186 -0
- package/.docs/docs/server/auth.md +38 -0
- package/.docs/docs/server/custom-adapters.md +378 -0
- package/.docs/docs/server/custom-api-routes.md +267 -0
- package/.docs/docs/server/mastra-client.md +243 -0
- package/.docs/docs/server/mastra-server.md +71 -0
- package/.docs/docs/server/middleware.md +225 -0
- package/.docs/docs/server/request-context.md +471 -0
- package/.docs/docs/server/server-adapters.md +547 -0
- package/.docs/docs/streaming/events.md +237 -0
- package/.docs/docs/streaming/overview.md +175 -0
- package/.docs/docs/streaming/tool-streaming.md +175 -0
- package/.docs/docs/streaming/workflow-streaming.md +109 -0
- package/.docs/docs/voice/overview.md +959 -0
- package/.docs/docs/voice/speech-to-speech.md +102 -0
- package/.docs/docs/voice/speech-to-text.md +79 -0
- package/.docs/docs/voice/text-to-speech.md +83 -0
- package/.docs/docs/workflows/agents-and-tools.md +166 -0
- package/.docs/docs/workflows/control-flow.md +822 -0
- package/.docs/docs/workflows/error-handling.md +360 -0
- package/.docs/docs/workflows/human-in-the-loop.md +215 -0
- package/.docs/docs/workflows/overview.md +370 -0
- package/.docs/docs/workflows/snapshots.md +238 -0
- package/.docs/docs/workflows/suspend-and-resume.md +205 -0
- package/.docs/docs/workflows/time-travel.md +309 -0
- package/.docs/docs/workflows/workflow-state.md +181 -0
- package/.docs/docs/workspace/filesystem.md +164 -0
- package/.docs/docs/workspace/overview.md +239 -0
- package/.docs/docs/workspace/sandbox.md +63 -0
- package/.docs/docs/workspace/search.md +243 -0
- package/.docs/docs/workspace/skills.md +169 -0
- package/.docs/guides/agent-frameworks/ai-sdk.md +140 -0
- package/.docs/guides/build-your-ui/ai-sdk-ui.md +1499 -0
- package/.docs/guides/build-your-ui/assistant-ui.md +156 -0
- package/.docs/guides/build-your-ui/copilotkit.md +289 -0
- package/.docs/guides/deployment/amazon-ec2.md +130 -0
- package/.docs/guides/deployment/aws-lambda.md +248 -0
- package/.docs/guides/deployment/azure-app-services.md +114 -0
- package/.docs/guides/deployment/cloudflare.md +99 -0
- package/.docs/guides/deployment/digital-ocean.md +168 -0
- package/.docs/guides/deployment/inngest.md +682 -0
- package/.docs/guides/deployment/netlify.md +77 -0
- package/.docs/guides/deployment/vercel.md +101 -0
- package/.docs/guides/getting-started/astro.md +398 -0
- package/.docs/guides/getting-started/electron.md +504 -0
- package/.docs/guides/getting-started/express.md +251 -0
- package/.docs/guides/getting-started/hono.md +190 -0
- package/.docs/guides/getting-started/next-js.md +347 -0
- package/.docs/guides/getting-started/nuxt.md +497 -0
- package/.docs/guides/getting-started/quickstart.md +67 -0
- package/.docs/guides/getting-started/sveltekit.md +296 -0
- package/.docs/guides/getting-started/vite-react.md +425 -0
- package/.docs/guides/guide/ai-recruiter.md +226 -0
- package/.docs/guides/guide/chef-michel.md +211 -0
- package/.docs/guides/guide/code-review-bot.md +226 -0
- package/.docs/guides/guide/dev-assistant.md +307 -0
- package/.docs/guides/guide/docs-manager.md +238 -0
- package/.docs/guides/guide/github-actions-pr-description.md +236 -0
- package/.docs/guides/guide/notes-mcp-server.md +416 -0
- package/.docs/guides/guide/research-assistant.md +348 -0
- package/.docs/guides/guide/research-coordinator.md +416 -0
- package/.docs/guides/guide/stock-agent.md +132 -0
- package/.docs/guides/guide/web-search.md +320 -0
- package/.docs/guides/guide/whatsapp-chat-bot.md +405 -0
- package/.docs/guides/index.md +3 -0
- package/.docs/guides/migrations/agentnetwork.md +97 -0
- package/.docs/guides/migrations/ai-sdk-v4-to-v5.md +112 -0
- package/.docs/guides/migrations/network-to-supervisor.md +261 -0
- package/.docs/guides/migrations/upgrade-to-v1/agent.md +404 -0
- package/.docs/guides/migrations/upgrade-to-v1/cli.md +57 -0
- package/.docs/guides/migrations/upgrade-to-v1/client.md +337 -0
- package/.docs/guides/migrations/upgrade-to-v1/deployment.md +37 -0
- package/.docs/guides/migrations/upgrade-to-v1/evals.md +239 -0
- package/.docs/guides/migrations/upgrade-to-v1/mastra.md +143 -0
- package/.docs/guides/migrations/upgrade-to-v1/mcp.md +97 -0
- package/.docs/guides/migrations/upgrade-to-v1/memory.md +285 -0
- package/.docs/guides/migrations/upgrade-to-v1/overview.md +119 -0
- package/.docs/guides/migrations/upgrade-to-v1/processors.md +68 -0
- package/.docs/guides/migrations/upgrade-to-v1/rag.md +42 -0
- package/.docs/guides/migrations/upgrade-to-v1/storage.md +553 -0
- package/.docs/guides/migrations/upgrade-to-v1/tools.md +180 -0
- package/.docs/guides/migrations/upgrade-to-v1/tracing.md +412 -0
- package/.docs/guides/migrations/upgrade-to-v1/vectors.md +87 -0
- package/.docs/guides/migrations/upgrade-to-v1/voice.md +30 -0
- package/.docs/guides/migrations/upgrade-to-v1/workflows.md +341 -0
- package/.docs/guides/migrations/vnext-to-standard-apis.md +362 -0
- package/.docs/models/embeddings.md +161 -0
- package/.docs/models/gateways/azure-openai.md +128 -0
- package/.docs/models/gateways/custom-gateways.md +545 -0
- package/.docs/models/gateways/netlify.md +88 -0
- package/.docs/models/gateways/openrouter.md +219 -0
- package/.docs/models/gateways/vercel.md +225 -0
- package/.docs/models/gateways.md +14 -0
- package/.docs/models/index.md +286 -0
- package/.docs/models/providers/302ai.md +134 -0
- package/.docs/models/providers/abacus.md +125 -0
- package/.docs/models/providers/agentrouter.md +90 -0
- package/.docs/models/providers/aihubmix.md +107 -0
- package/.docs/models/providers/alibaba-cn.md +135 -0
- package/.docs/models/providers/alibaba.md +111 -0
- package/.docs/models/providers/amazon-bedrock.md +33 -0
- package/.docs/models/providers/anthropic.md +153 -0
- package/.docs/models/providers/azure.md +33 -0
- package/.docs/models/providers/bailing.md +72 -0
- package/.docs/models/providers/baseten.md +77 -0
- package/.docs/models/providers/berget.md +78 -0
- package/.docs/models/providers/cerebras.md +101 -0
- package/.docs/models/providers/chutes.md +136 -0
- package/.docs/models/providers/cloudflare-ai-gateway.md +33 -0
- package/.docs/models/providers/cloudflare-workers-ai.md +109 -0
- package/.docs/models/providers/cohere.md +33 -0
- package/.docs/models/providers/cortecs.md +91 -0
- package/.docs/models/providers/deepinfra.md +112 -0
- package/.docs/models/providers/deepseek.md +88 -0
- package/.docs/models/providers/fastrouter.md +84 -0
- package/.docs/models/providers/fireworks-ai.md +89 -0
- package/.docs/models/providers/firmware.md +85 -0
- package/.docs/models/providers/friendli.md +78 -0
- package/.docs/models/providers/github-models.md +125 -0
- package/.docs/models/providers/google-vertex.md +33 -0
- package/.docs/models/providers/google.md +159 -0
- package/.docs/models/providers/groq.md +107 -0
- package/.docs/models/providers/helicone.md +161 -0
- package/.docs/models/providers/huggingface.md +90 -0
- package/.docs/models/providers/iflowcn.md +84 -0
- package/.docs/models/providers/inception.md +72 -0
- package/.docs/models/providers/inference.md +79 -0
- package/.docs/models/providers/io-intelligence.md +87 -0
- package/.docs/models/providers/io-net.md +87 -0
- package/.docs/models/providers/jiekou.md +131 -0
- package/.docs/models/providers/kilo.md +333 -0
- package/.docs/models/providers/kimi-for-coding.md +100 -0
- package/.docs/models/providers/kuae-cloud-coding-plan.md +71 -0
- package/.docs/models/providers/llama.md +77 -0
- package/.docs/models/providers/lmstudio.md +73 -0
- package/.docs/models/providers/lucidquery.md +72 -0
- package/.docs/models/providers/minimax-cn-coding-plan.md +102 -0
- package/.docs/models/providers/minimax-cn.md +102 -0
- package/.docs/models/providers/minimax-coding-plan.md +102 -0
- package/.docs/models/providers/minimax.md +104 -0
- package/.docs/models/providers/mistral.md +124 -0
- package/.docs/models/providers/moark.md +72 -0
- package/.docs/models/providers/modelscope.md +77 -0
- package/.docs/models/providers/moonshotai-cn.md +76 -0
- package/.docs/models/providers/moonshotai.md +76 -0
- package/.docs/models/providers/morph.md +73 -0
- package/.docs/models/providers/nano-gpt.md +103 -0
- package/.docs/models/providers/nebius.md +116 -0
- package/.docs/models/providers/nova.md +72 -0
- package/.docs/models/providers/novita-ai.md +154 -0
- package/.docs/models/providers/nvidia.md +141 -0
- package/.docs/models/providers/ollama-cloud.md +103 -0
- package/.docs/models/providers/ollama.md +33 -0
- package/.docs/models/providers/openai.md +193 -0
- package/.docs/models/providers/opencode.md +100 -0
- package/.docs/models/providers/ovhcloud.md +83 -0
- package/.docs/models/providers/perplexity.md +100 -0
- package/.docs/models/providers/poe.md +183 -0
- package/.docs/models/providers/privatemode-ai.md +75 -0
- package/.docs/models/providers/requesty.md +90 -0
- package/.docs/models/providers/scaleway.md +84 -0
- package/.docs/models/providers/siliconflow-cn.md +138 -0
- package/.docs/models/providers/siliconflow.md +140 -0
- package/.docs/models/providers/stackit.md +78 -0
- package/.docs/models/providers/stepfun.md +73 -0
- package/.docs/models/providers/submodel.md +79 -0
- package/.docs/models/providers/synthetic.md +96 -0
- package/.docs/models/providers/togetherai.md +115 -0
- package/.docs/models/providers/upstage.md +73 -0
- package/.docs/models/providers/venice.md +95 -0
- package/.docs/models/providers/vivgrid.md +106 -0
- package/.docs/models/providers/vultr.md +75 -0
- package/.docs/models/providers/wandb.md +80 -0
- package/.docs/models/providers/xai.md +141 -0
- package/.docs/models/providers/xiaomi.md +71 -0
- package/.docs/models/providers/zai-coding-plan.md +80 -0
- package/.docs/models/providers/zai.md +79 -0
- package/.docs/models/providers/zenmux.md +161 -0
- package/.docs/models/providers/zhipuai-coding-plan.md +79 -0
- package/.docs/models/providers/zhipuai.md +79 -0
- package/.docs/models/providers.md +81 -0
- package/.docs/reference/agents/agent.md +141 -0
- package/.docs/reference/agents/generate.md +186 -0
- package/.docs/reference/agents/generateLegacy.md +173 -0
- package/.docs/reference/agents/getDefaultGenerateOptions.md +36 -0
- package/.docs/reference/agents/getDefaultOptions.md +34 -0
- package/.docs/reference/agents/getDefaultStreamOptions.md +36 -0
- package/.docs/reference/agents/getDescription.md +21 -0
- package/.docs/reference/agents/getInstructions.md +34 -0
- package/.docs/reference/agents/getLLM.md +37 -0
- package/.docs/reference/agents/getMemory.md +34 -0
- package/.docs/reference/agents/getModel.md +34 -0
- package/.docs/reference/agents/getTools.md +29 -0
- package/.docs/reference/agents/getVoice.md +34 -0
- package/.docs/reference/agents/listAgents.md +35 -0
- package/.docs/reference/agents/listScorers.md +34 -0
- package/.docs/reference/agents/listTools.md +34 -0
- package/.docs/reference/agents/listWorkflows.md +34 -0
- package/.docs/reference/agents/network.md +133 -0
- package/.docs/reference/ai-sdk/chat-route.md +82 -0
- package/.docs/reference/ai-sdk/handle-chat-stream.md +53 -0
- package/.docs/reference/ai-sdk/handle-network-stream.md +37 -0
- package/.docs/reference/ai-sdk/handle-workflow-stream.md +55 -0
- package/.docs/reference/ai-sdk/network-route.md +74 -0
- package/.docs/reference/ai-sdk/to-ai-sdk-stream.md +231 -0
- package/.docs/reference/ai-sdk/to-ai-sdk-v4-messages.md +79 -0
- package/.docs/reference/ai-sdk/to-ai-sdk-v5-messages.md +76 -0
- package/.docs/reference/ai-sdk/with-mastra.md +59 -0
- package/.docs/reference/ai-sdk/workflow-route.md +79 -0
- package/.docs/reference/auth/auth0.md +73 -0
- package/.docs/reference/auth/better-auth.md +71 -0
- package/.docs/reference/auth/clerk.md +36 -0
- package/.docs/reference/auth/firebase.md +80 -0
- package/.docs/reference/auth/jwt.md +26 -0
- package/.docs/reference/auth/supabase.md +33 -0
- package/.docs/reference/auth/workos.md +84 -0
- package/.docs/reference/cli/create-mastra.md +137 -0
- package/.docs/reference/cli/mastra.md +336 -0
- package/.docs/reference/client-js/agents.md +437 -0
- package/.docs/reference/client-js/error-handling.md +16 -0
- package/.docs/reference/client-js/logs.md +24 -0
- package/.docs/reference/client-js/mastra-client.md +63 -0
- package/.docs/reference/client-js/memory.md +221 -0
- package/.docs/reference/client-js/observability.md +72 -0
- package/.docs/reference/client-js/telemetry.md +20 -0
- package/.docs/reference/client-js/tools.md +44 -0
- package/.docs/reference/client-js/vectors.md +79 -0
- package/.docs/reference/client-js/workflows.md +199 -0
- package/.docs/reference/configuration.md +752 -0
- package/.docs/reference/core/addGateway.md +42 -0
- package/.docs/reference/core/getAgent.md +21 -0
- package/.docs/reference/core/getAgentById.md +21 -0
- package/.docs/reference/core/getDeployer.md +22 -0
- package/.docs/reference/core/getGateway.md +38 -0
- package/.docs/reference/core/getGatewayById.md +41 -0
- package/.docs/reference/core/getLogger.md +22 -0
- package/.docs/reference/core/getMCPServer.md +47 -0
- package/.docs/reference/core/getMCPServerById.md +55 -0
- package/.docs/reference/core/getMemory.md +50 -0
- package/.docs/reference/core/getScorer.md +54 -0
- package/.docs/reference/core/getScorerById.md +54 -0
- package/.docs/reference/core/getServer.md +22 -0
- package/.docs/reference/core/getStorage.md +22 -0
- package/.docs/reference/core/getStoredAgentById.md +89 -0
- package/.docs/reference/core/getTelemetry.md +22 -0
- package/.docs/reference/core/getVector.md +22 -0
- package/.docs/reference/core/getWorkflow.md +42 -0
- package/.docs/reference/core/listAgents.md +21 -0
- package/.docs/reference/core/listGateways.md +40 -0
- package/.docs/reference/core/listLogs.md +38 -0
- package/.docs/reference/core/listLogsByRunId.md +36 -0
- package/.docs/reference/core/listMCPServers.md +55 -0
- package/.docs/reference/core/listMemory.md +56 -0
- package/.docs/reference/core/listScorers.md +29 -0
- package/.docs/reference/core/listStoredAgents.md +93 -0
- package/.docs/reference/core/listVectors.md +22 -0
- package/.docs/reference/core/listWorkflows.md +21 -0
- package/.docs/reference/core/mastra-class.md +66 -0
- package/.docs/reference/core/mastra-model-gateway.md +153 -0
- package/.docs/reference/core/setLogger.md +26 -0
- package/.docs/reference/core/setStorage.md +27 -0
- package/.docs/reference/datasets/addItem.md +37 -0
- package/.docs/reference/datasets/addItems.md +35 -0
- package/.docs/reference/datasets/compareExperiments.md +52 -0
- package/.docs/reference/datasets/create.md +51 -0
- package/.docs/reference/datasets/dataset.md +82 -0
- package/.docs/reference/datasets/datasets-manager.md +94 -0
- package/.docs/reference/datasets/delete.md +25 -0
- package/.docs/reference/datasets/deleteExperiment.md +27 -0
- package/.docs/reference/datasets/deleteItem.md +27 -0
- package/.docs/reference/datasets/deleteItems.md +29 -0
- package/.docs/reference/datasets/get.md +31 -0
- package/.docs/reference/datasets/getDetails.md +47 -0
- package/.docs/reference/datasets/getExperiment.md +30 -0
- package/.docs/reference/datasets/getItem.md +33 -0
- package/.docs/reference/datasets/getItemHistory.md +31 -0
- package/.docs/reference/datasets/list.md +31 -0
- package/.docs/reference/datasets/listExperimentResults.md +39 -0
- package/.docs/reference/datasets/listExperiments.md +33 -0
- package/.docs/reference/datasets/listItems.md +46 -0
- package/.docs/reference/datasets/listVersions.md +33 -0
- package/.docs/reference/datasets/startExperiment.md +62 -0
- package/.docs/reference/datasets/startExperimentAsync.md +43 -0
- package/.docs/reference/datasets/update.md +48 -0
- package/.docs/reference/datasets/updateItem.md +38 -0
- package/.docs/reference/deployer/cloudflare.md +79 -0
- package/.docs/reference/deployer/netlify.md +80 -0
- package/.docs/reference/deployer/vercel.md +91 -0
- package/.docs/reference/deployer.md +100 -0
- package/.docs/reference/evals/answer-relevancy.md +105 -0
- package/.docs/reference/evals/answer-similarity.md +99 -0
- package/.docs/reference/evals/bias.md +120 -0
- package/.docs/reference/evals/completeness.md +136 -0
- package/.docs/reference/evals/content-similarity.md +101 -0
- package/.docs/reference/evals/context-precision.md +196 -0
- package/.docs/reference/evals/context-relevance.md +531 -0
- package/.docs/reference/evals/create-scorer.md +270 -0
- package/.docs/reference/evals/faithfulness.md +114 -0
- package/.docs/reference/evals/hallucination.md +213 -0
- package/.docs/reference/evals/keyword-coverage.md +128 -0
- package/.docs/reference/evals/mastra-scorer.md +123 -0
- package/.docs/reference/evals/noise-sensitivity.md +675 -0
- package/.docs/reference/evals/prompt-alignment.md +614 -0
- package/.docs/reference/evals/run-evals.md +179 -0
- package/.docs/reference/evals/scorer-utils.md +326 -0
- package/.docs/reference/evals/textual-difference.md +113 -0
- package/.docs/reference/evals/tone-consistency.md +119 -0
- package/.docs/reference/evals/tool-call-accuracy.md +533 -0
- package/.docs/reference/evals/toxicity.md +123 -0
- package/.docs/reference/harness/harness-class.md +708 -0
- package/.docs/reference/index.md +277 -0
- package/.docs/reference/logging/pino-logger.md +117 -0
- package/.docs/reference/mastra-code/createMastraCode.md +108 -0
- package/.docs/reference/memory/clone-utilities.md +199 -0
- package/.docs/reference/memory/cloneThread.md +130 -0
- package/.docs/reference/memory/createThread.md +68 -0
- package/.docs/reference/memory/deleteMessages.md +38 -0
- package/.docs/reference/memory/getThreadById.md +24 -0
- package/.docs/reference/memory/listThreads.md +145 -0
- package/.docs/reference/memory/memory-class.md +147 -0
- package/.docs/reference/memory/observational-memory.md +565 -0
- package/.docs/reference/memory/recall.md +91 -0
- package/.docs/reference/observability/tracing/bridges/otel.md +131 -0
- package/.docs/reference/observability/tracing/configuration.md +178 -0
- package/.docs/reference/observability/tracing/exporters/arize.md +141 -0
- package/.docs/reference/observability/tracing/exporters/braintrust.md +93 -0
- package/.docs/reference/observability/tracing/exporters/cloud-exporter.md +163 -0
- package/.docs/reference/observability/tracing/exporters/console-exporter.md +138 -0
- package/.docs/reference/observability/tracing/exporters/datadog.md +116 -0
- package/.docs/reference/observability/tracing/exporters/default-exporter.md +174 -0
- package/.docs/reference/observability/tracing/exporters/laminar.md +78 -0
- package/.docs/reference/observability/tracing/exporters/langfuse.md +134 -0
- package/.docs/reference/observability/tracing/exporters/langsmith.md +108 -0
- package/.docs/reference/observability/tracing/exporters/otel.md +199 -0
- package/.docs/reference/observability/tracing/exporters/posthog.md +92 -0
- package/.docs/reference/observability/tracing/exporters/sentry.md +184 -0
- package/.docs/reference/observability/tracing/instances.md +107 -0
- package/.docs/reference/observability/tracing/interfaces.md +743 -0
- package/.docs/reference/observability/tracing/processors/sensitive-data-filter.md +144 -0
- package/.docs/reference/observability/tracing/spans.md +224 -0
- package/.docs/reference/processors/batch-parts-processor.md +61 -0
- package/.docs/reference/processors/language-detector.md +82 -0
- package/.docs/reference/processors/message-history-processor.md +85 -0
- package/.docs/reference/processors/moderation-processor.md +104 -0
- package/.docs/reference/processors/pii-detector.md +108 -0
- package/.docs/reference/processors/processor-interface.md +521 -0
- package/.docs/reference/processors/prompt-injection-detector.md +72 -0
- package/.docs/reference/processors/semantic-recall-processor.md +117 -0
- package/.docs/reference/processors/system-prompt-scrubber.md +80 -0
- package/.docs/reference/processors/token-limiter-processor.md +115 -0
- package/.docs/reference/processors/tool-call-filter.md +85 -0
- package/.docs/reference/processors/tool-search-processor.md +111 -0
- package/.docs/reference/processors/unicode-normalizer.md +62 -0
- package/.docs/reference/processors/working-memory-processor.md +152 -0
- package/.docs/reference/rag/chunk.md +221 -0
- package/.docs/reference/rag/database-config.md +261 -0
- package/.docs/reference/rag/document.md +114 -0
- package/.docs/reference/rag/embeddings.md +92 -0
- package/.docs/reference/rag/extract-params.md +168 -0
- package/.docs/reference/rag/graph-rag.md +111 -0
- package/.docs/reference/rag/metadata-filters.md +216 -0
- package/.docs/reference/rag/rerank.md +75 -0
- package/.docs/reference/rag/rerankWithScorer.md +80 -0
- package/.docs/reference/server/create-route.md +262 -0
- package/.docs/reference/server/express-adapter.md +176 -0
- package/.docs/reference/server/fastify-adapter.md +90 -0
- package/.docs/reference/server/hono-adapter.md +162 -0
- package/.docs/reference/server/koa-adapter.md +127 -0
- package/.docs/reference/server/mastra-server.md +298 -0
- package/.docs/reference/server/register-api-route.md +249 -0
- package/.docs/reference/server/routes.md +306 -0
- package/.docs/reference/storage/cloudflare-d1.md +218 -0
- package/.docs/reference/storage/cloudflare.md +88 -0
- package/.docs/reference/storage/composite.md +235 -0
- package/.docs/reference/storage/convex.md +161 -0
- package/.docs/reference/storage/dynamodb.md +282 -0
- package/.docs/reference/storage/lance.md +131 -0
- package/.docs/reference/storage/libsql.md +135 -0
- package/.docs/reference/storage/mongodb.md +262 -0
- package/.docs/reference/storage/mssql.md +157 -0
- package/.docs/reference/storage/overview.md +121 -0
- package/.docs/reference/storage/postgresql.md +526 -0
- package/.docs/reference/storage/upstash.md +160 -0
- package/.docs/reference/streaming/ChunkType.md +292 -0
- package/.docs/reference/streaming/agents/MastraModelOutput.md +182 -0
- package/.docs/reference/streaming/agents/stream.md +221 -0
- package/.docs/reference/streaming/agents/streamLegacy.md +142 -0
- package/.docs/reference/streaming/workflows/observeStream.md +42 -0
- package/.docs/reference/streaming/workflows/resumeStream.md +61 -0
- package/.docs/reference/streaming/workflows/stream.md +88 -0
- package/.docs/reference/streaming/workflows/timeTravelStream.md +142 -0
- package/.docs/reference/templates/overview.md +194 -0
- package/.docs/reference/tools/create-tool.md +237 -0
- package/.docs/reference/tools/document-chunker-tool.md +89 -0
- package/.docs/reference/tools/graph-rag-tool.md +182 -0
- package/.docs/reference/tools/mcp-client.md +954 -0
- package/.docs/reference/tools/mcp-server.md +1271 -0
- package/.docs/reference/tools/vector-query-tool.md +459 -0
- package/.docs/reference/vectors/astra.md +121 -0
- package/.docs/reference/vectors/chroma.md +264 -0
- package/.docs/reference/vectors/convex.md +300 -0
- package/.docs/reference/vectors/couchbase.md +226 -0
- package/.docs/reference/vectors/duckdb.md +318 -0
- package/.docs/reference/vectors/elasticsearch.md +189 -0
- package/.docs/reference/vectors/lance.md +220 -0
- package/.docs/reference/vectors/libsql.md +305 -0
- package/.docs/reference/vectors/mongodb.md +295 -0
- package/.docs/reference/vectors/opensearch.md +99 -0
- package/.docs/reference/vectors/pg.md +408 -0
- package/.docs/reference/vectors/pinecone.md +168 -0
- package/.docs/reference/vectors/qdrant.md +222 -0
- package/.docs/reference/vectors/s3vectors.md +277 -0
- package/.docs/reference/vectors/turbopuffer.md +157 -0
- package/.docs/reference/vectors/upstash.md +294 -0
- package/.docs/reference/vectors/vectorize.md +147 -0
- package/.docs/reference/voice/azure.md +148 -0
- package/.docs/reference/voice/cloudflare.md +83 -0
- package/.docs/reference/voice/composite-voice.md +121 -0
- package/.docs/reference/voice/deepgram.md +79 -0
- package/.docs/reference/voice/elevenlabs.md +98 -0
- package/.docs/reference/voice/google-gemini-live.md +378 -0
- package/.docs/reference/voice/google.md +228 -0
- package/.docs/reference/voice/mastra-voice.md +311 -0
- package/.docs/reference/voice/murf.md +122 -0
- package/.docs/reference/voice/openai-realtime.md +203 -0
- package/.docs/reference/voice/openai.md +88 -0
- package/.docs/reference/voice/playai.md +80 -0
- package/.docs/reference/voice/sarvam.md +126 -0
- package/.docs/reference/voice/speechify.md +75 -0
- package/.docs/reference/voice/voice.addInstructions.md +55 -0
- package/.docs/reference/voice/voice.addTools.md +67 -0
- package/.docs/reference/voice/voice.answer.md +54 -0
- package/.docs/reference/voice/voice.close.md +51 -0
- package/.docs/reference/voice/voice.connect.md +94 -0
- package/.docs/reference/voice/voice.events.md +37 -0
- package/.docs/reference/voice/voice.getSpeakers.md +129 -0
- package/.docs/reference/voice/voice.listen.md +164 -0
- package/.docs/reference/voice/voice.off.md +54 -0
- package/.docs/reference/voice/voice.on.md +111 -0
- package/.docs/reference/voice/voice.send.md +65 -0
- package/.docs/reference/voice/voice.speak.md +157 -0
- package/.docs/reference/voice/voice.updateConfig.md +60 -0
- package/.docs/reference/workflows/run-methods/cancel.md +86 -0
- package/.docs/reference/workflows/run-methods/restart.md +33 -0
- package/.docs/reference/workflows/run-methods/resume.md +59 -0
- package/.docs/reference/workflows/run-methods/start.md +58 -0
- package/.docs/reference/workflows/run-methods/startAsync.md +67 -0
- package/.docs/reference/workflows/run-methods/timeTravel.md +142 -0
- package/.docs/reference/workflows/run.md +59 -0
- package/.docs/reference/workflows/step.md +119 -0
- package/.docs/reference/workflows/workflow-methods/branch.md +25 -0
- package/.docs/reference/workflows/workflow-methods/commit.md +17 -0
- package/.docs/reference/workflows/workflow-methods/create-run.md +63 -0
- package/.docs/reference/workflows/workflow-methods/dountil.md +25 -0
- package/.docs/reference/workflows/workflow-methods/dowhile.md +25 -0
- package/.docs/reference/workflows/workflow-methods/foreach.md +118 -0
- package/.docs/reference/workflows/workflow-methods/map.md +93 -0
- package/.docs/reference/workflows/workflow-methods/parallel.md +21 -0
- package/.docs/reference/workflows/workflow-methods/sleep.md +35 -0
- package/.docs/reference/workflows/workflow-methods/sleepUntil.md +35 -0
- package/.docs/reference/workflows/workflow-methods/then.md +21 -0
- package/.docs/reference/workflows/workflow.md +157 -0
- package/.docs/reference/workspace/e2b-sandbox.md +289 -0
- package/.docs/reference/workspace/filesystem.md +255 -0
- package/.docs/reference/workspace/gcs-filesystem.md +174 -0
- package/.docs/reference/workspace/local-filesystem.md +343 -0
- package/.docs/reference/workspace/local-sandbox.md +301 -0
- package/.docs/reference/workspace/s3-filesystem.md +175 -0
- package/.docs/reference/workspace/sandbox.md +87 -0
- package/.docs/reference/workspace/workspace-class.md +244 -0
- package/CHANGELOG.md +8 -0
- package/package.json +5 -5
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
# OpenAI Realtime Voice
|
|
2
|
+
|
|
3
|
+
The OpenAIRealtimeVoice class provides real-time voice interaction capabilities using OpenAI's WebSocket-based API. It supports real time speech to speech, voice activity detection, and event-based audio streaming.
|
|
4
|
+
|
|
5
|
+
## Usage Example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { OpenAIRealtimeVoice } from '@mastra/voice-openai-realtime'
|
|
9
|
+
import { playAudio, getMicrophoneStream } from '@mastra/node-audio'
|
|
10
|
+
|
|
11
|
+
// Initialize with default configuration using environment variables
|
|
12
|
+
const voice = new OpenAIRealtimeVoice()
|
|
13
|
+
|
|
14
|
+
// Or initialize with specific configuration
|
|
15
|
+
const voiceWithConfig = new OpenAIRealtimeVoice({
|
|
16
|
+
apiKey: 'your-openai-api-key',
|
|
17
|
+
model: 'gpt-5.1-realtime-preview-2024-12-17',
|
|
18
|
+
speaker: 'alloy', // Default voice
|
|
19
|
+
})
|
|
20
|
+
|
|
21
|
+
voiceWithConfig.updateSession({
|
|
22
|
+
turn_detection: {
|
|
23
|
+
type: 'server_vad',
|
|
24
|
+
threshold: 0.6,
|
|
25
|
+
silence_duration_ms: 1200,
|
|
26
|
+
},
|
|
27
|
+
})
|
|
28
|
+
|
|
29
|
+
// Establish connection
|
|
30
|
+
await voice.connect()
|
|
31
|
+
|
|
32
|
+
// Set up event listeners
|
|
33
|
+
voice.on('speaker', ({ audio }) => {
|
|
34
|
+
// Handle audio data (Int16Array) pcm format by default
|
|
35
|
+
playAudio(audio)
|
|
36
|
+
})
|
|
37
|
+
|
|
38
|
+
voice.on('writing', ({ text, role }) => {
|
|
39
|
+
// Handle transcribed text
|
|
40
|
+
console.log(`${role}: ${text}`)
|
|
41
|
+
})
|
|
42
|
+
|
|
43
|
+
// Convert text to speech
|
|
44
|
+
await voice.speak('Hello, how can I help you today?', {
|
|
45
|
+
speaker: 'echo', // Override default voice
|
|
46
|
+
})
|
|
47
|
+
|
|
48
|
+
// Process audio input
|
|
49
|
+
const microphoneStream = getMicrophoneStream()
|
|
50
|
+
await voice.send(microphoneStream)
|
|
51
|
+
|
|
52
|
+
// When done, disconnect
|
|
53
|
+
voice.connect()
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Configuration
|
|
57
|
+
|
|
58
|
+
### Constructor Options
|
|
59
|
+
|
|
60
|
+
**model?:** (`string`): The model ID to use for real-time voice interactions. (Default: `'gpt-5.1-realtime-preview-2024-12-17'`)
|
|
61
|
+
|
|
62
|
+
**apiKey?:** (`string`): OpenAI API key. Falls back to OPENAI\_API\_KEY environment variable.
|
|
63
|
+
|
|
64
|
+
**speaker?:** (`string`): Default voice ID for speech synthesis. (Default: `'alloy'`)
|
|
65
|
+
|
|
66
|
+
### Voice Activity Detection (VAD) Configuration
|
|
67
|
+
|
|
68
|
+
**type?:** (`string`): Type of VAD to use. Server-side VAD provides better accuracy. (Default: `'server_vad'`)
|
|
69
|
+
|
|
70
|
+
**threshold?:** (`number`): Speech detection sensitivity (0.0-1.0). (Default: `0.5`)
|
|
71
|
+
|
|
72
|
+
**prefix\_padding\_ms?:** (`number`): Milliseconds of audio to include before speech is detected. (Default: `1000`)
|
|
73
|
+
|
|
74
|
+
**silence\_duration\_ms?:** (`number`): Milliseconds of silence before ending a turn. (Default: `1000`)
|
|
75
|
+
|
|
76
|
+
## Methods
|
|
77
|
+
|
|
78
|
+
### connect()
|
|
79
|
+
|
|
80
|
+
Establishes a connection to the OpenAI realtime service. Must be called before using speak, listen, or send functions.
|
|
81
|
+
|
|
82
|
+
**returns:** (`Promise<void>`): Promise that resolves when the connection is established.
|
|
83
|
+
|
|
84
|
+
### speak()
|
|
85
|
+
|
|
86
|
+
Emits a speaking event using the configured voice model. Can accept either a string or a readable stream as input.
|
|
87
|
+
|
|
88
|
+
**input:** (`string | NodeJS.ReadableStream`): Text or text stream to convert to speech.
|
|
89
|
+
|
|
90
|
+
**options.speaker?:** (`string`): Voice ID to use for this specific speech request. (Default: `Constructor's speaker value`)
|
|
91
|
+
|
|
92
|
+
Returns: `Promise<void>`
|
|
93
|
+
|
|
94
|
+
### listen()
|
|
95
|
+
|
|
96
|
+
Processes audio input for speech recognition. Takes a readable stream of audio data and emits a 'listening' event with the transcribed text.
|
|
97
|
+
|
|
98
|
+
**audioData:** (`NodeJS.ReadableStream`): Audio stream to transcribe.
|
|
99
|
+
|
|
100
|
+
Returns: `Promise<void>`
|
|
101
|
+
|
|
102
|
+
### send()
|
|
103
|
+
|
|
104
|
+
Streams audio data in real-time to the OpenAI service for continuous audio streaming scenarios like live microphone input.
|
|
105
|
+
|
|
106
|
+
**audioData:** (`NodeJS.ReadableStream`): Audio stream to send to the service.
|
|
107
|
+
|
|
108
|
+
Returns: `Promise<void>`
|
|
109
|
+
|
|
110
|
+
### updateConfig()
|
|
111
|
+
|
|
112
|
+
Updates the session configuration for the voice instance. This can be used to modify voice settings, turn detection, and other parameters.
|
|
113
|
+
|
|
114
|
+
**sessionConfig:** (`Realtime.SessionConfig`): New session configuration to apply.
|
|
115
|
+
|
|
116
|
+
Returns: `void`
|
|
117
|
+
|
|
118
|
+
### addTools()
|
|
119
|
+
|
|
120
|
+
Adds a set of tools to the voice instance. Tools allow the model to perform additional actions during conversations. When OpenAIRealtimeVoice is added to an Agent, any tools configured for the Agent will automatically be available to the voice interface.
|
|
121
|
+
|
|
122
|
+
**tools?:** (`ToolsInput`): Tools configuration to equip.
|
|
123
|
+
|
|
124
|
+
Returns: `void`
|
|
125
|
+
|
|
126
|
+
### close()
|
|
127
|
+
|
|
128
|
+
Disconnects from the OpenAI realtime session and cleans up resources. Should be called when you're done with the voice instance.
|
|
129
|
+
|
|
130
|
+
Returns: `void`
|
|
131
|
+
|
|
132
|
+
### getSpeakers()
|
|
133
|
+
|
|
134
|
+
Returns a list of available voice speakers.
|
|
135
|
+
|
|
136
|
+
Returns: `Promise<Array<{ voiceId: string; [key: string]: any }>>`
|
|
137
|
+
|
|
138
|
+
### on()
|
|
139
|
+
|
|
140
|
+
Registers an event listener for voice events.
|
|
141
|
+
|
|
142
|
+
**event:** (`string`): Name of the event to listen for.
|
|
143
|
+
|
|
144
|
+
**callback:** (`Function`): Function to call when the event occurs.
|
|
145
|
+
|
|
146
|
+
Returns: `void`
|
|
147
|
+
|
|
148
|
+
### off()
|
|
149
|
+
|
|
150
|
+
Removes a previously registered event listener.
|
|
151
|
+
|
|
152
|
+
**event:** (`string`): Name of the event to stop listening to.
|
|
153
|
+
|
|
154
|
+
**callback:** (`Function`): The specific callback function to remove.
|
|
155
|
+
|
|
156
|
+
Returns: `void`
|
|
157
|
+
|
|
158
|
+
## Events
|
|
159
|
+
|
|
160
|
+
The OpenAIRealtimeVoice class emits the following events:
|
|
161
|
+
|
|
162
|
+
**speaking:** (`event`): Emitted when audio data is received from the model. Callback receives { audio: Int16Array }.
|
|
163
|
+
|
|
164
|
+
**writing:** (`event`): Emitted when transcribed text is available. Callback receives { text: string, role: string }.
|
|
165
|
+
|
|
166
|
+
**error:** (`event`): Emitted when an error occurs. Callback receives the error object.
|
|
167
|
+
|
|
168
|
+
### OpenAI Realtime Events
|
|
169
|
+
|
|
170
|
+
You can also listen to [OpenAI Realtime utility events](https://github.com/openai/openai-realtime-api-beta#reference-client-utility-events) by prefixing with 'openAIRealtime:':
|
|
171
|
+
|
|
172
|
+
**openAIRealtime:conversation.created:** (`event`): Emitted when a new conversation is created.
|
|
173
|
+
|
|
174
|
+
**openAIRealtime:conversation.interrupted:** (`event`): Emitted when a conversation is interrupted.
|
|
175
|
+
|
|
176
|
+
**openAIRealtime:conversation.updated:** (`event`): Emitted when a conversation is updated.
|
|
177
|
+
|
|
178
|
+
**openAIRealtime:conversation.item.appended:** (`event`): Emitted when an item is appended to the conversation.
|
|
179
|
+
|
|
180
|
+
**openAIRealtime:conversation.item.completed:** (`event`): Emitted when an item in the conversation is completed.
|
|
181
|
+
|
|
182
|
+
## Available Voices
|
|
183
|
+
|
|
184
|
+
The following voice options are available:
|
|
185
|
+
|
|
186
|
+
- `alloy`: Neutral and balanced
|
|
187
|
+
- `ash`: Clear and precise
|
|
188
|
+
- `ballad`: Melodic and smooth
|
|
189
|
+
- `coral`: Warm and friendly
|
|
190
|
+
- `echo`: Resonant and deep
|
|
191
|
+
- `sage`: Calm and thoughtful
|
|
192
|
+
- `shimmer`: Bright and energetic
|
|
193
|
+
- `verse`: Versatile and expressive
|
|
194
|
+
|
|
195
|
+
## Notes
|
|
196
|
+
|
|
197
|
+
- API keys can be provided via constructor options or the `OPENAI_API_KEY` environment variable
|
|
198
|
+
- The OpenAI Realtime Voice API uses WebSockets for real-time communication
|
|
199
|
+
- Server-side Voice Activity Detection (VAD) provides better accuracy for speech detection
|
|
200
|
+
- All audio data is processed as Int16Array format
|
|
201
|
+
- The voice instance must be connected with `connect()` before using other methods
|
|
202
|
+
- Always call `close()` when done to properly clean up resources
|
|
203
|
+
- Memory management is handled by OpenAI Realtime API
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# OpenAI
|
|
2
|
+
|
|
3
|
+
The OpenAIVoice class in Mastra provides text-to-speech and speech-to-text capabilities using OpenAI's models.
|
|
4
|
+
|
|
5
|
+
## Usage Example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { OpenAIVoice } from '@mastra/voice-openai'
|
|
9
|
+
|
|
10
|
+
// Initialize with default configuration using environment variables
|
|
11
|
+
const voice = new OpenAIVoice()
|
|
12
|
+
|
|
13
|
+
// Or initialize with specific configuration
|
|
14
|
+
const voiceWithConfig = new OpenAIVoice({
|
|
15
|
+
speechModel: {
|
|
16
|
+
name: 'tts-1-hd',
|
|
17
|
+
apiKey: 'your-openai-api-key',
|
|
18
|
+
},
|
|
19
|
+
listeningModel: {
|
|
20
|
+
name: 'whisper-1',
|
|
21
|
+
apiKey: 'your-openai-api-key',
|
|
22
|
+
},
|
|
23
|
+
speaker: 'alloy', // Default voice
|
|
24
|
+
})
|
|
25
|
+
|
|
26
|
+
// Convert text to speech
|
|
27
|
+
const audioStream = await voice.speak('Hello, how can I help you?', {
|
|
28
|
+
speaker: 'nova', // Override default voice
|
|
29
|
+
speed: 1.2, // Adjust speech speed
|
|
30
|
+
})
|
|
31
|
+
|
|
32
|
+
// Convert speech to text
|
|
33
|
+
const text = await voice.listen(audioStream, {
|
|
34
|
+
filetype: 'mp3',
|
|
35
|
+
})
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Configuration
|
|
39
|
+
|
|
40
|
+
### Constructor Options
|
|
41
|
+
|
|
42
|
+
**speechModel?:** (`OpenAIConfig`): Configuration for text-to-speech synthesis. (Default: `{ name: 'tts-1' }`)
|
|
43
|
+
|
|
44
|
+
**listeningModel?:** (`OpenAIConfig`): Configuration for speech-to-text recognition. (Default: `{ name: 'whisper-1' }`)
|
|
45
|
+
|
|
46
|
+
**speaker?:** (`OpenAIVoiceId`): Default voice ID for speech synthesis. (Default: `'alloy'`)
|
|
47
|
+
|
|
48
|
+
### OpenAIConfig
|
|
49
|
+
|
|
50
|
+
**name?:** (`'tts-1' | 'tts-1-hd' | 'whisper-1'`): Model name. Use 'tts-1-hd' for higher quality audio.
|
|
51
|
+
|
|
52
|
+
**apiKey?:** (`string`): OpenAI API key. Falls back to OPENAI\_API\_KEY environment variable.
|
|
53
|
+
|
|
54
|
+
## Methods
|
|
55
|
+
|
|
56
|
+
### speak()
|
|
57
|
+
|
|
58
|
+
Converts text to speech using OpenAI's text-to-speech models.
|
|
59
|
+
|
|
60
|
+
**input:** (`string | NodeJS.ReadableStream`): Text or text stream to convert to speech.
|
|
61
|
+
|
|
62
|
+
**options.speaker?:** (`OpenAIVoiceId`): Voice ID to use for speech synthesis. (Default: `Constructor's speaker value`)
|
|
63
|
+
|
|
64
|
+
**options.speed?:** (`number`): Speech speed multiplier. (Default: `1.0`)
|
|
65
|
+
|
|
66
|
+
Returns: `Promise<NodeJS.ReadableStream>`
|
|
67
|
+
|
|
68
|
+
### listen()
|
|
69
|
+
|
|
70
|
+
Transcribes audio using OpenAI's Whisper model.
|
|
71
|
+
|
|
72
|
+
**audioStream:** (`NodeJS.ReadableStream`): Audio stream to transcribe.
|
|
73
|
+
|
|
74
|
+
**options.filetype?:** (`string`): Audio format of the input stream. (Default: `'mp3'`)
|
|
75
|
+
|
|
76
|
+
Returns: `Promise<string>`
|
|
77
|
+
|
|
78
|
+
### getSpeakers()
|
|
79
|
+
|
|
80
|
+
Returns an array of available voice options, where each node contains:
|
|
81
|
+
|
|
82
|
+
**voiceId:** (`string`): Unique identifier for the voice
|
|
83
|
+
|
|
84
|
+
## Notes
|
|
85
|
+
|
|
86
|
+
- API keys can be provided via constructor options or the `OPENAI_API_KEY` environment variable
|
|
87
|
+
- The `tts-1-hd` model provides higher quality audio but may have slower processing times
|
|
88
|
+
- Speech recognition supports multiple audio formats including mp3, wav, and webm
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# PlayAI
|
|
2
|
+
|
|
3
|
+
The PlayAI voice implementation in Mastra provides text-to-speech capabilities using PlayAI's API.
|
|
4
|
+
|
|
5
|
+
## Usage Example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { PlayAIVoice } from '@mastra/voice-playai'
|
|
9
|
+
|
|
10
|
+
// Initialize with default configuration (uses PLAYAI_API_KEY environment variable and PLAYAI_USER_ID environment variable)
|
|
11
|
+
const voice = new PlayAIVoice()
|
|
12
|
+
|
|
13
|
+
// Initialize with default configuration
|
|
14
|
+
const voice = new PlayAIVoice({
|
|
15
|
+
speechModel: {
|
|
16
|
+
name: 'PlayDialog',
|
|
17
|
+
apiKey: process.env.PLAYAI_API_KEY,
|
|
18
|
+
userId: process.env.PLAYAI_USER_ID,
|
|
19
|
+
},
|
|
20
|
+
speaker: 'Angelo', // Default voice
|
|
21
|
+
})
|
|
22
|
+
|
|
23
|
+
// Convert text to speech with a specific voice
|
|
24
|
+
const audioStream = await voice.speak('Hello, world!', {
|
|
25
|
+
speaker:
|
|
26
|
+
's3://voice-cloning-zero-shot/b27bc13e-996f-4841-b584-4d35801aea98/original/manifest.json', // Dexter voice
|
|
27
|
+
})
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Constructor Parameters
|
|
31
|
+
|
|
32
|
+
**speechModel?:** (`PlayAIConfig`): Configuration for text-to-speech functionality (Default: `{ name: 'PlayDialog' }`)
|
|
33
|
+
|
|
34
|
+
**speaker?:** (`string`): Default voice ID to use for speech synthesis (Default: `First available voice ID`)
|
|
35
|
+
|
|
36
|
+
### PlayAIConfig
|
|
37
|
+
|
|
38
|
+
**name?:** (`'PlayDialog' | 'Play3.0-mini'`): The PlayAI model to use (Default: `'PlayDialog'`)
|
|
39
|
+
|
|
40
|
+
**apiKey?:** (`string`): PlayAI API key. Falls back to PLAYAI\_API\_KEY environment variable
|
|
41
|
+
|
|
42
|
+
**userId?:** (`string`): PlayAI user ID. Falls back to PLAYAI\_USER\_ID environment variable
|
|
43
|
+
|
|
44
|
+
## Methods
|
|
45
|
+
|
|
46
|
+
### speak()
|
|
47
|
+
|
|
48
|
+
Converts text to speech using the configured speech model and voice.
|
|
49
|
+
|
|
50
|
+
**input:** (`string | NodeJS.ReadableStream`): Text to convert to speech. If a stream is provided, it will be converted to text first.
|
|
51
|
+
|
|
52
|
+
**options.speaker?:** (`string`): Override the default speaker for this request (Default: `Constructor's speaker value`)
|
|
53
|
+
|
|
54
|
+
Returns: `Promise<NodeJS.ReadableStream>`.
|
|
55
|
+
|
|
56
|
+
### getSpeakers()
|
|
57
|
+
|
|
58
|
+
Returns an array of available voice options, where each node contains:
|
|
59
|
+
|
|
60
|
+
**name:** (`string`): Name of the voice
|
|
61
|
+
|
|
62
|
+
**accent:** (`string`): Accent of the voice (e.g., 'US', 'British', 'Australian')
|
|
63
|
+
|
|
64
|
+
**gender:** (`'M' | 'F'`): Gender of the voice
|
|
65
|
+
|
|
66
|
+
**age:** (`'Young' | 'Middle' | 'Old'`): Age category of the voice
|
|
67
|
+
|
|
68
|
+
**style:** (`'Conversational' | 'Narrative'`): Speaking style of the voice
|
|
69
|
+
|
|
70
|
+
**voiceId:** (`string`): Unique identifier for the voice
|
|
71
|
+
|
|
72
|
+
### listen()
|
|
73
|
+
|
|
74
|
+
This method is not supported by PlayAI and will throw an error. PlayAI does not provide speech-to-text functionality.
|
|
75
|
+
|
|
76
|
+
## Notes
|
|
77
|
+
|
|
78
|
+
- PlayAI requires both an API key and a user ID for authentication
|
|
79
|
+
- The service offers two models: 'PlayDialog' and 'Play3.0-mini'
|
|
80
|
+
- Each voice has a unique S3 manifest ID that must be used when making API calls
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
# Sarvam
|
|
2
|
+
|
|
3
|
+
The SarvamVoice class in Mastra provides text-to-speech and speech-to-text capabilities using Sarvam AI models.
|
|
4
|
+
|
|
5
|
+
## Usage Example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { SarvamVoice } from '@mastra/voice-sarvam'
|
|
9
|
+
|
|
10
|
+
// Initialize with default configuration using environment variables
|
|
11
|
+
const voice = new SarvamVoice()
|
|
12
|
+
|
|
13
|
+
// Or initialize with specific configuration
|
|
14
|
+
const voiceWithConfig = new SarvamVoice({
|
|
15
|
+
speechModel: {
|
|
16
|
+
model: 'bulbul:v1',
|
|
17
|
+
apiKey: process.env.SARVAM_API_KEY!,
|
|
18
|
+
language: 'en-IN',
|
|
19
|
+
properties: {
|
|
20
|
+
pitch: 0,
|
|
21
|
+
pace: 1.65,
|
|
22
|
+
loudness: 1.5,
|
|
23
|
+
speech_sample_rate: 8000,
|
|
24
|
+
enable_preprocessing: false,
|
|
25
|
+
eng_interpolation_wt: 123,
|
|
26
|
+
},
|
|
27
|
+
},
|
|
28
|
+
listeningModel: {
|
|
29
|
+
model: 'saarika:v2',
|
|
30
|
+
apiKey: process.env.SARVAM_API_KEY!,
|
|
31
|
+
languageCode: 'en-IN',
|
|
32
|
+
filetype: 'wav',
|
|
33
|
+
},
|
|
34
|
+
speaker: 'meera', // Default voice
|
|
35
|
+
})
|
|
36
|
+
|
|
37
|
+
// Convert text to speech
|
|
38
|
+
const audioStream = await voice.speak('Hello, how can I help you?')
|
|
39
|
+
|
|
40
|
+
// Convert speech to text
|
|
41
|
+
const text = await voice.listen(audioStream, {
|
|
42
|
+
filetype: 'wav',
|
|
43
|
+
})
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
### Sarvam API Docs -
|
|
47
|
+
|
|
48
|
+
<https://docs.sarvam.ai/api-reference-docs/endpoints/text-to-speech>
|
|
49
|
+
|
|
50
|
+
## Configuration
|
|
51
|
+
|
|
52
|
+
### Constructor Options
|
|
53
|
+
|
|
54
|
+
**speechModel?:** (`SarvamVoiceConfig`): Configuration for text-to-speech synthesis. (Default: `{ model: 'bulbul:v1', language: 'en-IN' }`)
|
|
55
|
+
|
|
56
|
+
**speaker?:** (`SarvamVoiceId`): The speaker to be used for the output audio. If not provided, Meera will be used as default. AvailableOptions - meera, pavithra, maitreyi, arvind, amol, amartya, diya, neel, misha, vian, arjun, maya (Default: `'meera'`)
|
|
57
|
+
|
|
58
|
+
**listeningModel?:** (`SarvamListenOptions`): Configuration for speech-to-text recognition. (Default: `{ model: 'saarika:v2', language_code: 'unknown' }`)
|
|
59
|
+
|
|
60
|
+
### SarvamVoiceConfig
|
|
61
|
+
|
|
62
|
+
**apiKey?:** (`string`): Sarvam API key. Falls back to SARVAM\_API\_KEY environment variable.
|
|
63
|
+
|
|
64
|
+
**model?:** (`SarvamTTSModel`): Specifies the model to use for text-to-speech conversion. (Default: `'bulbul:v1'`)
|
|
65
|
+
|
|
66
|
+
**language:** (`SarvamTTSLanguage`): Target language for speech synthesis. Available options: hi-IN, bn-IN, kn-IN, ml-IN, mr-IN, od-IN, pa-IN, ta-IN, te-IN, en-IN, gu-IN (Default: `'en-IN'`)
|
|
67
|
+
|
|
68
|
+
**properties?:** (`object`): Additional voice properties for customization.
|
|
69
|
+
|
|
70
|
+
**properties.pitch?:** (`number`): Controls the pitch of the audio. Lower values result in a deeper voice, while higher values make it sharper. The suitable range is between -0.75 and 0.75.
|
|
71
|
+
|
|
72
|
+
**properties.pace?:** (`number`): Controls the speed of the audio. Lower values result in slower speech, while higher values make it faster. The suitable range is between 0.5 and 2.0. Default is 1.0. Required range: 0.3 <= x <= 3
|
|
73
|
+
|
|
74
|
+
**properties.loudness?:** (`number`): Controls the loudness of the audio. Lower values result in quieter audio, while higher values make it louder. The suitable range is between 0.3 and 3.0. Required range: 0 <= x <= 3
|
|
75
|
+
|
|
76
|
+
**properties.speech\_sample\_rate?:** (`8000 | 16000 | 22050`): Audio sample rate in Hz.
|
|
77
|
+
|
|
78
|
+
**properties.enable\_preprocessing?:** (`boolean`): Controls whether normalization of English words and numeric entities (e.g., numbers, dates) is performed. Set to true for better handling of mixed-language text. Default is false.
|
|
79
|
+
|
|
80
|
+
**properties.eng\_interpolation\_wt?:** (`number`): Weight for interpolating with English speaker at encoder.
|
|
81
|
+
|
|
82
|
+
### SarvamListenOptions
|
|
83
|
+
|
|
84
|
+
**apiKey?:** (`string`): Sarvam API key. Falls back to SARVAM\_API\_KEY environment variable.
|
|
85
|
+
|
|
86
|
+
**model?:** (`SarvamSTTModel`): Specifies the model to use for speech-to-text conversion. Note:- Default model is saarika:v2 . Available options: saarika:v1, saarika:v2, saarika:flash (Default: `'saarika:v2'`)
|
|
87
|
+
|
|
88
|
+
**languageCode?:** (`SarvamSTTLanguage`): Specifies the language of the input audio. This parameter is required to ensure accurate transcription. For the saarika:v1 model, this parameter is mandatory. For the saarika:v2 model, it is optional. unknown: Use this when the language is not known; the API will detect it automatically. Note:- that the saarika:v1 model does not support unknown language code. Available options: unknown, hi-IN, bn-IN, kn-IN, ml-IN, mr-IN, od-IN, pa-IN, ta-IN, te-IN, en-IN, gu-IN (Default: `'unknown'`)
|
|
89
|
+
|
|
90
|
+
**filetype?:** (`'mp3' | 'wav'`): Audio format of the input stream.
|
|
91
|
+
|
|
92
|
+
## Methods
|
|
93
|
+
|
|
94
|
+
### speak()
|
|
95
|
+
|
|
96
|
+
Converts text to speech using Sarvam's text-to-speech models.
|
|
97
|
+
|
|
98
|
+
**input:** (`string | NodeJS.ReadableStream`): Text or text stream to convert to speech.
|
|
99
|
+
|
|
100
|
+
**options.speaker?:** (`SarvamVoiceId`): Voice ID to use for speech synthesis. (Default: `Constructor's speaker value`)
|
|
101
|
+
|
|
102
|
+
Returns: `Promise<NodeJS.ReadableStream>`
|
|
103
|
+
|
|
104
|
+
### listen()
|
|
105
|
+
|
|
106
|
+
Transcribes audio using Sarvam's speech recognition models.
|
|
107
|
+
|
|
108
|
+
**input:** (`NodeJS.ReadableStream`): Audio stream to transcribe.
|
|
109
|
+
|
|
110
|
+
**options?:** (`SarvamListenOptions`): Configuration options for speech recognition.
|
|
111
|
+
|
|
112
|
+
Returns: `Promise<string>`
|
|
113
|
+
|
|
114
|
+
### getSpeakers()
|
|
115
|
+
|
|
116
|
+
Returns an array of available voice options.
|
|
117
|
+
|
|
118
|
+
Returns: `Promise<Array<{voiceId: SarvamVoiceId}>>`
|
|
119
|
+
|
|
120
|
+
## Notes
|
|
121
|
+
|
|
122
|
+
- API key can be provided via constructor options or the `SARVAM_API_KEY` environment variable
|
|
123
|
+
- If no API key is provided, the constructor will throw an error
|
|
124
|
+
- The service communicates with the Sarvam AI API at `https://api.sarvam.ai`
|
|
125
|
+
- Audio is returned as a stream containing binary audio data
|
|
126
|
+
- Speech recognition supports mp3 and wav audio formats
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# Speechify
|
|
2
|
+
|
|
3
|
+
The Speechify voice implementation in Mastra provides text-to-speech capabilities using Speechify's API.
|
|
4
|
+
|
|
5
|
+
## Usage Example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { SpeechifyVoice } from '@mastra/voice-speechify'
|
|
9
|
+
|
|
10
|
+
// Initialize with default configuration (uses SPEECHIFY_API_KEY environment variable)
|
|
11
|
+
const voice = new SpeechifyVoice()
|
|
12
|
+
|
|
13
|
+
// Initialize with custom configuration
|
|
14
|
+
const voice = new SpeechifyVoice({
|
|
15
|
+
speechModel: {
|
|
16
|
+
name: 'simba-english',
|
|
17
|
+
apiKey: 'your-api-key',
|
|
18
|
+
},
|
|
19
|
+
speaker: 'george', // Default voice
|
|
20
|
+
})
|
|
21
|
+
|
|
22
|
+
// Convert text to speech
|
|
23
|
+
const audioStream = await voice.speak('Hello, world!', {
|
|
24
|
+
speaker: 'henry', // Override default voice
|
|
25
|
+
})
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## Constructor Parameters
|
|
29
|
+
|
|
30
|
+
**speechModel?:** (`SpeechifyConfig`): Configuration for text-to-speech functionality (Default: `{ name: 'simba-english' }`)
|
|
31
|
+
|
|
32
|
+
**speaker?:** (`SpeechifyVoiceId`): Default voice ID to use for speech synthesis (Default: `'george'`)
|
|
33
|
+
|
|
34
|
+
### SpeechifyConfig
|
|
35
|
+
|
|
36
|
+
**name?:** (`VoiceModelName`): The Speechify model to use (Default: `'simba-english'`)
|
|
37
|
+
|
|
38
|
+
**apiKey?:** (`string`): Speechify API key. Falls back to SPEECHIFY\_API\_KEY environment variable
|
|
39
|
+
|
|
40
|
+
## Methods
|
|
41
|
+
|
|
42
|
+
### speak()
|
|
43
|
+
|
|
44
|
+
Converts text to speech using the configured speech model and voice.
|
|
45
|
+
|
|
46
|
+
**input:** (`string | NodeJS.ReadableStream`): Text to convert to speech. If a stream is provided, it will be converted to text first.
|
|
47
|
+
|
|
48
|
+
**options.speaker?:** (`string`): Override the default speaker for this request (Default: `Constructor's speaker value`)
|
|
49
|
+
|
|
50
|
+
**options.model?:** (`VoiceModelName`): Override the default model for this request (Default: `Constructor's model value`)
|
|
51
|
+
|
|
52
|
+
Returns: `Promise<NodeJS.ReadableStream>`
|
|
53
|
+
|
|
54
|
+
### getSpeakers()
|
|
55
|
+
|
|
56
|
+
Returns an array of available voice options, where each node contains:
|
|
57
|
+
|
|
58
|
+
**voiceId:** (`string`): Unique identifier for the voice
|
|
59
|
+
|
|
60
|
+
**name:** (`string`): Display name of the voice
|
|
61
|
+
|
|
62
|
+
**language:** (`string`): Language code for the voice
|
|
63
|
+
|
|
64
|
+
**gender:** (`string`): Gender of the voice
|
|
65
|
+
|
|
66
|
+
### listen()
|
|
67
|
+
|
|
68
|
+
This method is not supported by Speechify and will throw an error. Speechify does not provide speech-to-text functionality.
|
|
69
|
+
|
|
70
|
+
## Notes
|
|
71
|
+
|
|
72
|
+
- Speechify requires an API key for authentication
|
|
73
|
+
- The default model is 'simba-english'
|
|
74
|
+
- Speech-to-text functionality is not supported
|
|
75
|
+
- Additional audio stream options can be passed through the speak() method's options parameter
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# voice.addInstructions()
|
|
2
|
+
|
|
3
|
+
The `addInstructions()` method equips a voice provider with instructions that guide the model's behavior during real-time interactions. This is particularly useful for real-time voice providers that maintain context across a conversation.
|
|
4
|
+
|
|
5
|
+
## Usage Example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { OpenAIRealtimeVoice } from '@mastra/voice-openai-realtime'
|
|
9
|
+
import { Agent } from '@mastra/core/agent'
|
|
10
|
+
|
|
11
|
+
// Initialize a real-time voice provider
|
|
12
|
+
const voice = new OpenAIRealtimeVoice({
|
|
13
|
+
realtimeConfig: {
|
|
14
|
+
model: 'gpt-5.1-realtime',
|
|
15
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
16
|
+
},
|
|
17
|
+
})
|
|
18
|
+
|
|
19
|
+
// Create an agent with the voice provider
|
|
20
|
+
const agent = new Agent({
|
|
21
|
+
name: 'Customer Support Agent',
|
|
22
|
+
instructions: 'You are a helpful customer support agent for a software company.',
|
|
23
|
+
model: 'openai/gpt-5.1',
|
|
24
|
+
voice,
|
|
25
|
+
})
|
|
26
|
+
|
|
27
|
+
// Add additional instructions to the voice provider
|
|
28
|
+
voice.addInstructions(`
|
|
29
|
+
When speaking to customers:
|
|
30
|
+
- Always introduce yourself as the customer support agent
|
|
31
|
+
- Speak clearly and concisely
|
|
32
|
+
- Ask clarifying questions when needed
|
|
33
|
+
- Summarize the conversation at the end
|
|
34
|
+
`)
|
|
35
|
+
|
|
36
|
+
// Connect to the real-time service
|
|
37
|
+
await voice.connect()
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Parameters
|
|
41
|
+
|
|
42
|
+
**instructions:** (`string`): Instructions to guide the voice model's behavior
|
|
43
|
+
|
|
44
|
+
## Return Value
|
|
45
|
+
|
|
46
|
+
This method does not return a value.
|
|
47
|
+
|
|
48
|
+
## Notes
|
|
49
|
+
|
|
50
|
+
- Instructions are most effective when they are clear, specific, and relevant to the voice interaction
|
|
51
|
+
- This method is primarily used with real-time voice providers that maintain conversation context
|
|
52
|
+
- If called on a voice provider that doesn't support instructions, it will log a warning and do nothing
|
|
53
|
+
- Instructions added with this method are typically combined with any instructions provided by an associated Agent
|
|
54
|
+
- For best results, add instructions before starting a conversation (before calling `connect()`)
|
|
55
|
+
- Multiple calls to `addInstructions()` may either replace or append to existing instructions, depending on the provider implementation
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# voice.addTools()
|
|
2
|
+
|
|
3
|
+
The `addTools()` method equips a voice provider with tools (functions) that can be called by the model during real-time interactions. This enables voice assistants to perform actions like searching for information, making calculations, or interacting with external systems.
|
|
4
|
+
|
|
5
|
+
## Usage Example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
import { OpenAIRealtimeVoice } from '@mastra/voice-openai-realtime'
|
|
9
|
+
import { createTool } from '@mastra/core/tools'
|
|
10
|
+
import { z } from 'zod'
|
|
11
|
+
|
|
12
|
+
// Define tools
|
|
13
|
+
const weatherTool = createTool({
|
|
14
|
+
id: 'getWeather',
|
|
15
|
+
description: 'Get the current weather for a location',
|
|
16
|
+
inputSchema: z.object({
|
|
17
|
+
location: z.string().describe('The city and state, e.g. San Francisco, CA'),
|
|
18
|
+
}),
|
|
19
|
+
outputSchema: z.object({
|
|
20
|
+
message: z.string(),
|
|
21
|
+
}),
|
|
22
|
+
execute: async inputData => {
|
|
23
|
+
// Fetch weather data from an API
|
|
24
|
+
const response = await fetch(
|
|
25
|
+
`https://api.weather.com?location=${encodeURIComponent(inputData.location)}`,
|
|
26
|
+
)
|
|
27
|
+
const data = await response.json()
|
|
28
|
+
return {
|
|
29
|
+
message: `The current temperature in ${inputData.location} is ${data.temperature}°F with ${data.conditions}.`,
|
|
30
|
+
}
|
|
31
|
+
},
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
// Initialize a real-time voice provider
|
|
35
|
+
const voice = new OpenAIRealtimeVoice({
|
|
36
|
+
realtimeConfig: {
|
|
37
|
+
model: 'gpt-5.1-realtime',
|
|
38
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
39
|
+
},
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
// Add tools to the voice provider
|
|
43
|
+
voice.addTools({
|
|
44
|
+
getWeather: weatherTool,
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
// Connect to the real-time service
|
|
48
|
+
await voice.connect()
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Parameters
|
|
52
|
+
|
|
53
|
+
**tools:** (`ToolsInput`): Object containing tool definitions that can be called by the voice model
|
|
54
|
+
|
|
55
|
+
## Return Value
|
|
56
|
+
|
|
57
|
+
This method does not return a value.
|
|
58
|
+
|
|
59
|
+
## Notes
|
|
60
|
+
|
|
61
|
+
- Tools must follow the Mastra tool format with name, description, input schema, and execute function
|
|
62
|
+
- This method is primarily used with real-time voice providers that support function calling
|
|
63
|
+
- If called on a voice provider that doesn't support tools, it will log a warning and do nothing
|
|
64
|
+
- Tools added with this method are typically combined with any tools provided by an associated Agent
|
|
65
|
+
- For best results, add tools before starting a conversation (before calling `connect()`)
|
|
66
|
+
- The voice provider will automatically handle the invocation of tool handlers when the model decides to use them
|
|
67
|
+
- Multiple calls to `addTools()` may either replace or merge with existing tools, depending on the provider implementation
|