@mastra/mcp-docs-server 1.1.5 → 1.1.6-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/docs/agents/adding-voice.md +349 -0
- package/.docs/docs/agents/agent-approval.md +558 -0
- package/.docs/docs/agents/agent-memory.md +209 -0
- package/.docs/docs/agents/guardrails.md +374 -0
- package/.docs/docs/agents/network-approval.md +275 -0
- package/.docs/docs/agents/networks.md +299 -0
- package/.docs/docs/agents/overview.md +304 -0
- package/.docs/docs/agents/processors.md +622 -0
- package/.docs/docs/agents/structured-output.md +273 -0
- package/.docs/docs/agents/supervisor-agents.md +304 -0
- package/.docs/docs/agents/using-tools.md +214 -0
- package/.docs/docs/build-with-ai/mcp-docs-server.md +238 -0
- package/.docs/docs/build-with-ai/skills.md +35 -0
- package/.docs/docs/community/contributing-templates.md +3 -0
- package/.docs/docs/community/discord.md +9 -0
- package/.docs/docs/community/licensing.md +66 -0
- package/.docs/docs/deployment/cloud-providers.md +15 -0
- package/.docs/docs/deployment/mastra-server.md +122 -0
- package/.docs/docs/deployment/monorepo.md +142 -0
- package/.docs/docs/deployment/overview.md +62 -0
- package/.docs/docs/deployment/studio.md +239 -0
- package/.docs/docs/deployment/web-framework.md +52 -0
- package/.docs/docs/deployment/workflow-runners.md +9 -0
- package/.docs/docs/evals/built-in-scorers.md +47 -0
- package/.docs/docs/evals/custom-scorers.md +519 -0
- package/.docs/docs/evals/overview.md +141 -0
- package/.docs/docs/evals/running-in-ci.md +124 -0
- package/.docs/docs/getting-started/build-with-ai.md +68 -0
- package/.docs/docs/getting-started/manual-install.md +226 -0
- package/.docs/docs/getting-started/project-structure.md +60 -0
- package/.docs/docs/getting-started/start.md +28 -0
- package/.docs/docs/getting-started/studio.md +101 -0
- package/.docs/docs/index.md +43 -0
- package/.docs/docs/mastra-cloud/deployment.md +77 -0
- package/.docs/docs/mastra-cloud/observability.md +38 -0
- package/.docs/docs/mastra-cloud/overview.md +23 -0
- package/.docs/docs/mastra-cloud/setup.md +42 -0
- package/.docs/docs/mastra-cloud/studio.md +24 -0
- package/.docs/docs/mastra-code/configuration.md +299 -0
- package/.docs/docs/mastra-code/customization.md +228 -0
- package/.docs/docs/mastra-code/modes.md +104 -0
- package/.docs/docs/mastra-code/overview.md +135 -0
- package/.docs/docs/mastra-code/tools.md +229 -0
- package/.docs/docs/mcp/overview.md +373 -0
- package/.docs/docs/mcp/publishing-mcp-server.md +95 -0
- package/.docs/docs/memory/memory-processors.md +314 -0
- package/.docs/docs/memory/message-history.md +260 -0
- package/.docs/docs/memory/observational-memory.md +248 -0
- package/.docs/docs/memory/overview.md +45 -0
- package/.docs/docs/memory/semantic-recall.md +272 -0
- package/.docs/docs/memory/storage.md +261 -0
- package/.docs/docs/memory/working-memory.md +400 -0
- package/.docs/docs/observability/datasets/overview.md +198 -0
- package/.docs/docs/observability/datasets/running-experiments.md +274 -0
- package/.docs/docs/observability/logging.md +99 -0
- package/.docs/docs/observability/overview.md +70 -0
- package/.docs/docs/observability/tracing/bridges/otel.md +209 -0
- package/.docs/docs/observability/tracing/exporters/arize.md +272 -0
- package/.docs/docs/observability/tracing/exporters/braintrust.md +111 -0
- package/.docs/docs/observability/tracing/exporters/cloud.md +127 -0
- package/.docs/docs/observability/tracing/exporters/datadog.md +187 -0
- package/.docs/docs/observability/tracing/exporters/default.md +209 -0
- package/.docs/docs/observability/tracing/exporters/laminar.md +100 -0
- package/.docs/docs/observability/tracing/exporters/langfuse.md +213 -0
- package/.docs/docs/observability/tracing/exporters/langsmith.md +198 -0
- package/.docs/docs/observability/tracing/exporters/otel.md +476 -0
- package/.docs/docs/observability/tracing/exporters/posthog.md +148 -0
- package/.docs/docs/observability/tracing/exporters/sentry.md +208 -0
- package/.docs/docs/observability/tracing/overview.md +1112 -0
- package/.docs/docs/observability/tracing/processors/sensitive-data-filter.md +300 -0
- package/.docs/docs/rag/chunking-and-embedding.md +183 -0
- package/.docs/docs/rag/graph-rag.md +215 -0
- package/.docs/docs/rag/overview.md +72 -0
- package/.docs/docs/rag/retrieval.md +515 -0
- package/.docs/docs/rag/vector-databases.md +645 -0
- package/.docs/docs/server/auth/auth0.md +220 -0
- package/.docs/docs/server/auth/better-auth.md +203 -0
- package/.docs/docs/server/auth/clerk.md +132 -0
- package/.docs/docs/server/auth/composite-auth.md +234 -0
- package/.docs/docs/server/auth/custom-auth-provider.md +513 -0
- package/.docs/docs/server/auth/firebase.md +272 -0
- package/.docs/docs/server/auth/jwt.md +110 -0
- package/.docs/docs/server/auth/simple-auth.md +180 -0
- package/.docs/docs/server/auth/supabase.md +117 -0
- package/.docs/docs/server/auth/workos.md +186 -0
- package/.docs/docs/server/auth.md +38 -0
- package/.docs/docs/server/custom-adapters.md +378 -0
- package/.docs/docs/server/custom-api-routes.md +267 -0
- package/.docs/docs/server/mastra-client.md +243 -0
- package/.docs/docs/server/mastra-server.md +71 -0
- package/.docs/docs/server/middleware.md +225 -0
- package/.docs/docs/server/request-context.md +471 -0
- package/.docs/docs/server/server-adapters.md +547 -0
- package/.docs/docs/streaming/events.md +237 -0
- package/.docs/docs/streaming/overview.md +175 -0
- package/.docs/docs/streaming/tool-streaming.md +175 -0
- package/.docs/docs/streaming/workflow-streaming.md +109 -0
- package/.docs/docs/voice/overview.md +959 -0
- package/.docs/docs/voice/speech-to-speech.md +102 -0
- package/.docs/docs/voice/speech-to-text.md +79 -0
- package/.docs/docs/voice/text-to-speech.md +83 -0
- package/.docs/docs/workflows/agents-and-tools.md +166 -0
- package/.docs/docs/workflows/control-flow.md +822 -0
- package/.docs/docs/workflows/error-handling.md +360 -0
- package/.docs/docs/workflows/human-in-the-loop.md +215 -0
- package/.docs/docs/workflows/overview.md +370 -0
- package/.docs/docs/workflows/snapshots.md +238 -0
- package/.docs/docs/workflows/suspend-and-resume.md +205 -0
- package/.docs/docs/workflows/time-travel.md +309 -0
- package/.docs/docs/workflows/workflow-state.md +181 -0
- package/.docs/docs/workspace/filesystem.md +164 -0
- package/.docs/docs/workspace/overview.md +239 -0
- package/.docs/docs/workspace/sandbox.md +63 -0
- package/.docs/docs/workspace/search.md +243 -0
- package/.docs/docs/workspace/skills.md +169 -0
- package/.docs/guides/agent-frameworks/ai-sdk.md +140 -0
- package/.docs/guides/build-your-ui/ai-sdk-ui.md +1499 -0
- package/.docs/guides/build-your-ui/assistant-ui.md +156 -0
- package/.docs/guides/build-your-ui/copilotkit.md +289 -0
- package/.docs/guides/deployment/amazon-ec2.md +130 -0
- package/.docs/guides/deployment/aws-lambda.md +248 -0
- package/.docs/guides/deployment/azure-app-services.md +114 -0
- package/.docs/guides/deployment/cloudflare.md +99 -0
- package/.docs/guides/deployment/digital-ocean.md +168 -0
- package/.docs/guides/deployment/inngest.md +682 -0
- package/.docs/guides/deployment/netlify.md +77 -0
- package/.docs/guides/deployment/vercel.md +101 -0
- package/.docs/guides/getting-started/astro.md +398 -0
- package/.docs/guides/getting-started/electron.md +504 -0
- package/.docs/guides/getting-started/express.md +251 -0
- package/.docs/guides/getting-started/hono.md +190 -0
- package/.docs/guides/getting-started/next-js.md +347 -0
- package/.docs/guides/getting-started/nuxt.md +497 -0
- package/.docs/guides/getting-started/quickstart.md +67 -0
- package/.docs/guides/getting-started/sveltekit.md +296 -0
- package/.docs/guides/getting-started/vite-react.md +425 -0
- package/.docs/guides/guide/ai-recruiter.md +226 -0
- package/.docs/guides/guide/chef-michel.md +211 -0
- package/.docs/guides/guide/code-review-bot.md +226 -0
- package/.docs/guides/guide/dev-assistant.md +307 -0
- package/.docs/guides/guide/docs-manager.md +238 -0
- package/.docs/guides/guide/github-actions-pr-description.md +236 -0
- package/.docs/guides/guide/notes-mcp-server.md +416 -0
- package/.docs/guides/guide/research-assistant.md +348 -0
- package/.docs/guides/guide/research-coordinator.md +416 -0
- package/.docs/guides/guide/stock-agent.md +132 -0
- package/.docs/guides/guide/web-search.md +320 -0
- package/.docs/guides/guide/whatsapp-chat-bot.md +405 -0
- package/.docs/guides/index.md +3 -0
- package/.docs/guides/migrations/agentnetwork.md +97 -0
- package/.docs/guides/migrations/ai-sdk-v4-to-v5.md +112 -0
- package/.docs/guides/migrations/network-to-supervisor.md +261 -0
- package/.docs/guides/migrations/upgrade-to-v1/agent.md +404 -0
- package/.docs/guides/migrations/upgrade-to-v1/cli.md +57 -0
- package/.docs/guides/migrations/upgrade-to-v1/client.md +337 -0
- package/.docs/guides/migrations/upgrade-to-v1/deployment.md +37 -0
- package/.docs/guides/migrations/upgrade-to-v1/evals.md +239 -0
- package/.docs/guides/migrations/upgrade-to-v1/mastra.md +143 -0
- package/.docs/guides/migrations/upgrade-to-v1/mcp.md +97 -0
- package/.docs/guides/migrations/upgrade-to-v1/memory.md +285 -0
- package/.docs/guides/migrations/upgrade-to-v1/overview.md +119 -0
- package/.docs/guides/migrations/upgrade-to-v1/processors.md +68 -0
- package/.docs/guides/migrations/upgrade-to-v1/rag.md +42 -0
- package/.docs/guides/migrations/upgrade-to-v1/storage.md +553 -0
- package/.docs/guides/migrations/upgrade-to-v1/tools.md +180 -0
- package/.docs/guides/migrations/upgrade-to-v1/tracing.md +412 -0
- package/.docs/guides/migrations/upgrade-to-v1/vectors.md +87 -0
- package/.docs/guides/migrations/upgrade-to-v1/voice.md +30 -0
- package/.docs/guides/migrations/upgrade-to-v1/workflows.md +341 -0
- package/.docs/guides/migrations/vnext-to-standard-apis.md +362 -0
- package/.docs/models/embeddings.md +161 -0
- package/.docs/models/gateways/azure-openai.md +128 -0
- package/.docs/models/gateways/custom-gateways.md +545 -0
- package/.docs/models/gateways/netlify.md +88 -0
- package/.docs/models/gateways/openrouter.md +219 -0
- package/.docs/models/gateways/vercel.md +225 -0
- package/.docs/models/gateways.md +14 -0
- package/.docs/models/index.md +286 -0
- package/.docs/models/providers/302ai.md +134 -0
- package/.docs/models/providers/abacus.md +125 -0
- package/.docs/models/providers/agentrouter.md +90 -0
- package/.docs/models/providers/aihubmix.md +107 -0
- package/.docs/models/providers/alibaba-cn.md +135 -0
- package/.docs/models/providers/alibaba.md +111 -0
- package/.docs/models/providers/amazon-bedrock.md +33 -0
- package/.docs/models/providers/anthropic.md +153 -0
- package/.docs/models/providers/azure.md +33 -0
- package/.docs/models/providers/bailing.md +72 -0
- package/.docs/models/providers/baseten.md +77 -0
- package/.docs/models/providers/berget.md +78 -0
- package/.docs/models/providers/cerebras.md +101 -0
- package/.docs/models/providers/chutes.md +136 -0
- package/.docs/models/providers/cloudflare-ai-gateway.md +33 -0
- package/.docs/models/providers/cloudflare-workers-ai.md +109 -0
- package/.docs/models/providers/cohere.md +33 -0
- package/.docs/models/providers/cortecs.md +91 -0
- package/.docs/models/providers/deepinfra.md +112 -0
- package/.docs/models/providers/deepseek.md +88 -0
- package/.docs/models/providers/fastrouter.md +84 -0
- package/.docs/models/providers/fireworks-ai.md +89 -0
- package/.docs/models/providers/firmware.md +85 -0
- package/.docs/models/providers/friendli.md +78 -0
- package/.docs/models/providers/github-models.md +125 -0
- package/.docs/models/providers/google-vertex.md +33 -0
- package/.docs/models/providers/google.md +159 -0
- package/.docs/models/providers/groq.md +107 -0
- package/.docs/models/providers/helicone.md +161 -0
- package/.docs/models/providers/huggingface.md +90 -0
- package/.docs/models/providers/iflowcn.md +84 -0
- package/.docs/models/providers/inception.md +72 -0
- package/.docs/models/providers/inference.md +79 -0
- package/.docs/models/providers/io-intelligence.md +87 -0
- package/.docs/models/providers/io-net.md +87 -0
- package/.docs/models/providers/jiekou.md +131 -0
- package/.docs/models/providers/kilo.md +333 -0
- package/.docs/models/providers/kimi-for-coding.md +100 -0
- package/.docs/models/providers/kuae-cloud-coding-plan.md +71 -0
- package/.docs/models/providers/llama.md +77 -0
- package/.docs/models/providers/lmstudio.md +73 -0
- package/.docs/models/providers/lucidquery.md +72 -0
- package/.docs/models/providers/minimax-cn-coding-plan.md +102 -0
- package/.docs/models/providers/minimax-cn.md +102 -0
- package/.docs/models/providers/minimax-coding-plan.md +102 -0
- package/.docs/models/providers/minimax.md +104 -0
- package/.docs/models/providers/mistral.md +124 -0
- package/.docs/models/providers/moark.md +72 -0
- package/.docs/models/providers/modelscope.md +77 -0
- package/.docs/models/providers/moonshotai-cn.md +76 -0
- package/.docs/models/providers/moonshotai.md +76 -0
- package/.docs/models/providers/morph.md +73 -0
- package/.docs/models/providers/nano-gpt.md +103 -0
- package/.docs/models/providers/nebius.md +116 -0
- package/.docs/models/providers/nova.md +72 -0
- package/.docs/models/providers/novita-ai.md +154 -0
- package/.docs/models/providers/nvidia.md +141 -0
- package/.docs/models/providers/ollama-cloud.md +103 -0
- package/.docs/models/providers/ollama.md +33 -0
- package/.docs/models/providers/openai.md +193 -0
- package/.docs/models/providers/opencode.md +100 -0
- package/.docs/models/providers/ovhcloud.md +83 -0
- package/.docs/models/providers/perplexity.md +100 -0
- package/.docs/models/providers/poe.md +183 -0
- package/.docs/models/providers/privatemode-ai.md +75 -0
- package/.docs/models/providers/requesty.md +90 -0
- package/.docs/models/providers/scaleway.md +84 -0
- package/.docs/models/providers/siliconflow-cn.md +138 -0
- package/.docs/models/providers/siliconflow.md +140 -0
- package/.docs/models/providers/stackit.md +78 -0
- package/.docs/models/providers/stepfun.md +73 -0
- package/.docs/models/providers/submodel.md +79 -0
- package/.docs/models/providers/synthetic.md +96 -0
- package/.docs/models/providers/togetherai.md +115 -0
- package/.docs/models/providers/upstage.md +73 -0
- package/.docs/models/providers/venice.md +95 -0
- package/.docs/models/providers/vivgrid.md +106 -0
- package/.docs/models/providers/vultr.md +75 -0
- package/.docs/models/providers/wandb.md +80 -0
- package/.docs/models/providers/xai.md +141 -0
- package/.docs/models/providers/xiaomi.md +71 -0
- package/.docs/models/providers/zai-coding-plan.md +80 -0
- package/.docs/models/providers/zai.md +79 -0
- package/.docs/models/providers/zenmux.md +161 -0
- package/.docs/models/providers/zhipuai-coding-plan.md +79 -0
- package/.docs/models/providers/zhipuai.md +79 -0
- package/.docs/models/providers.md +81 -0
- package/.docs/reference/agents/agent.md +141 -0
- package/.docs/reference/agents/generate.md +186 -0
- package/.docs/reference/agents/generateLegacy.md +173 -0
- package/.docs/reference/agents/getDefaultGenerateOptions.md +36 -0
- package/.docs/reference/agents/getDefaultOptions.md +34 -0
- package/.docs/reference/agents/getDefaultStreamOptions.md +36 -0
- package/.docs/reference/agents/getDescription.md +21 -0
- package/.docs/reference/agents/getInstructions.md +34 -0
- package/.docs/reference/agents/getLLM.md +37 -0
- package/.docs/reference/agents/getMemory.md +34 -0
- package/.docs/reference/agents/getModel.md +34 -0
- package/.docs/reference/agents/getTools.md +29 -0
- package/.docs/reference/agents/getVoice.md +34 -0
- package/.docs/reference/agents/listAgents.md +35 -0
- package/.docs/reference/agents/listScorers.md +34 -0
- package/.docs/reference/agents/listTools.md +34 -0
- package/.docs/reference/agents/listWorkflows.md +34 -0
- package/.docs/reference/agents/network.md +133 -0
- package/.docs/reference/ai-sdk/chat-route.md +82 -0
- package/.docs/reference/ai-sdk/handle-chat-stream.md +53 -0
- package/.docs/reference/ai-sdk/handle-network-stream.md +37 -0
- package/.docs/reference/ai-sdk/handle-workflow-stream.md +55 -0
- package/.docs/reference/ai-sdk/network-route.md +74 -0
- package/.docs/reference/ai-sdk/to-ai-sdk-stream.md +231 -0
- package/.docs/reference/ai-sdk/to-ai-sdk-v4-messages.md +79 -0
- package/.docs/reference/ai-sdk/to-ai-sdk-v5-messages.md +76 -0
- package/.docs/reference/ai-sdk/with-mastra.md +59 -0
- package/.docs/reference/ai-sdk/workflow-route.md +79 -0
- package/.docs/reference/auth/auth0.md +73 -0
- package/.docs/reference/auth/better-auth.md +71 -0
- package/.docs/reference/auth/clerk.md +36 -0
- package/.docs/reference/auth/firebase.md +80 -0
- package/.docs/reference/auth/jwt.md +26 -0
- package/.docs/reference/auth/supabase.md +33 -0
- package/.docs/reference/auth/workos.md +84 -0
- package/.docs/reference/cli/create-mastra.md +137 -0
- package/.docs/reference/cli/mastra.md +336 -0
- package/.docs/reference/client-js/agents.md +437 -0
- package/.docs/reference/client-js/error-handling.md +16 -0
- package/.docs/reference/client-js/logs.md +24 -0
- package/.docs/reference/client-js/mastra-client.md +63 -0
- package/.docs/reference/client-js/memory.md +221 -0
- package/.docs/reference/client-js/observability.md +72 -0
- package/.docs/reference/client-js/telemetry.md +20 -0
- package/.docs/reference/client-js/tools.md +44 -0
- package/.docs/reference/client-js/vectors.md +79 -0
- package/.docs/reference/client-js/workflows.md +199 -0
- package/.docs/reference/configuration.md +752 -0
- package/.docs/reference/core/addGateway.md +42 -0
- package/.docs/reference/core/getAgent.md +21 -0
- package/.docs/reference/core/getAgentById.md +21 -0
- package/.docs/reference/core/getDeployer.md +22 -0
- package/.docs/reference/core/getGateway.md +38 -0
- package/.docs/reference/core/getGatewayById.md +41 -0
- package/.docs/reference/core/getLogger.md +22 -0
- package/.docs/reference/core/getMCPServer.md +47 -0
- package/.docs/reference/core/getMCPServerById.md +55 -0
- package/.docs/reference/core/getMemory.md +50 -0
- package/.docs/reference/core/getScorer.md +54 -0
- package/.docs/reference/core/getScorerById.md +54 -0
- package/.docs/reference/core/getServer.md +22 -0
- package/.docs/reference/core/getStorage.md +22 -0
- package/.docs/reference/core/getStoredAgentById.md +89 -0
- package/.docs/reference/core/getTelemetry.md +22 -0
- package/.docs/reference/core/getVector.md +22 -0
- package/.docs/reference/core/getWorkflow.md +42 -0
- package/.docs/reference/core/listAgents.md +21 -0
- package/.docs/reference/core/listGateways.md +40 -0
- package/.docs/reference/core/listLogs.md +38 -0
- package/.docs/reference/core/listLogsByRunId.md +36 -0
- package/.docs/reference/core/listMCPServers.md +55 -0
- package/.docs/reference/core/listMemory.md +56 -0
- package/.docs/reference/core/listScorers.md +29 -0
- package/.docs/reference/core/listStoredAgents.md +93 -0
- package/.docs/reference/core/listVectors.md +22 -0
- package/.docs/reference/core/listWorkflows.md +21 -0
- package/.docs/reference/core/mastra-class.md +66 -0
- package/.docs/reference/core/mastra-model-gateway.md +153 -0
- package/.docs/reference/core/setLogger.md +26 -0
- package/.docs/reference/core/setStorage.md +27 -0
- package/.docs/reference/datasets/addItem.md +37 -0
- package/.docs/reference/datasets/addItems.md +35 -0
- package/.docs/reference/datasets/compareExperiments.md +52 -0
- package/.docs/reference/datasets/create.md +51 -0
- package/.docs/reference/datasets/dataset.md +82 -0
- package/.docs/reference/datasets/datasets-manager.md +94 -0
- package/.docs/reference/datasets/delete.md +25 -0
- package/.docs/reference/datasets/deleteExperiment.md +27 -0
- package/.docs/reference/datasets/deleteItem.md +27 -0
- package/.docs/reference/datasets/deleteItems.md +29 -0
- package/.docs/reference/datasets/get.md +31 -0
- package/.docs/reference/datasets/getDetails.md +47 -0
- package/.docs/reference/datasets/getExperiment.md +30 -0
- package/.docs/reference/datasets/getItem.md +33 -0
- package/.docs/reference/datasets/getItemHistory.md +31 -0
- package/.docs/reference/datasets/list.md +31 -0
- package/.docs/reference/datasets/listExperimentResults.md +39 -0
- package/.docs/reference/datasets/listExperiments.md +33 -0
- package/.docs/reference/datasets/listItems.md +46 -0
- package/.docs/reference/datasets/listVersions.md +33 -0
- package/.docs/reference/datasets/startExperiment.md +62 -0
- package/.docs/reference/datasets/startExperimentAsync.md +43 -0
- package/.docs/reference/datasets/update.md +48 -0
- package/.docs/reference/datasets/updateItem.md +38 -0
- package/.docs/reference/deployer/cloudflare.md +79 -0
- package/.docs/reference/deployer/netlify.md +80 -0
- package/.docs/reference/deployer/vercel.md +91 -0
- package/.docs/reference/deployer.md +100 -0
- package/.docs/reference/evals/answer-relevancy.md +105 -0
- package/.docs/reference/evals/answer-similarity.md +99 -0
- package/.docs/reference/evals/bias.md +120 -0
- package/.docs/reference/evals/completeness.md +136 -0
- package/.docs/reference/evals/content-similarity.md +101 -0
- package/.docs/reference/evals/context-precision.md +196 -0
- package/.docs/reference/evals/context-relevance.md +531 -0
- package/.docs/reference/evals/create-scorer.md +270 -0
- package/.docs/reference/evals/faithfulness.md +114 -0
- package/.docs/reference/evals/hallucination.md +213 -0
- package/.docs/reference/evals/keyword-coverage.md +128 -0
- package/.docs/reference/evals/mastra-scorer.md +123 -0
- package/.docs/reference/evals/noise-sensitivity.md +675 -0
- package/.docs/reference/evals/prompt-alignment.md +614 -0
- package/.docs/reference/evals/run-evals.md +179 -0
- package/.docs/reference/evals/scorer-utils.md +326 -0
- package/.docs/reference/evals/textual-difference.md +113 -0
- package/.docs/reference/evals/tone-consistency.md +119 -0
- package/.docs/reference/evals/tool-call-accuracy.md +533 -0
- package/.docs/reference/evals/toxicity.md +123 -0
- package/.docs/reference/harness/harness-class.md +708 -0
- package/.docs/reference/index.md +277 -0
- package/.docs/reference/logging/pino-logger.md +117 -0
- package/.docs/reference/mastra-code/createMastraCode.md +108 -0
- package/.docs/reference/memory/clone-utilities.md +199 -0
- package/.docs/reference/memory/cloneThread.md +130 -0
- package/.docs/reference/memory/createThread.md +68 -0
- package/.docs/reference/memory/deleteMessages.md +38 -0
- package/.docs/reference/memory/getThreadById.md +24 -0
- package/.docs/reference/memory/listThreads.md +145 -0
- package/.docs/reference/memory/memory-class.md +147 -0
- package/.docs/reference/memory/observational-memory.md +565 -0
- package/.docs/reference/memory/recall.md +91 -0
- package/.docs/reference/observability/tracing/bridges/otel.md +131 -0
- package/.docs/reference/observability/tracing/configuration.md +178 -0
- package/.docs/reference/observability/tracing/exporters/arize.md +141 -0
- package/.docs/reference/observability/tracing/exporters/braintrust.md +93 -0
- package/.docs/reference/observability/tracing/exporters/cloud-exporter.md +163 -0
- package/.docs/reference/observability/tracing/exporters/console-exporter.md +138 -0
- package/.docs/reference/observability/tracing/exporters/datadog.md +116 -0
- package/.docs/reference/observability/tracing/exporters/default-exporter.md +174 -0
- package/.docs/reference/observability/tracing/exporters/laminar.md +78 -0
- package/.docs/reference/observability/tracing/exporters/langfuse.md +134 -0
- package/.docs/reference/observability/tracing/exporters/langsmith.md +108 -0
- package/.docs/reference/observability/tracing/exporters/otel.md +199 -0
- package/.docs/reference/observability/tracing/exporters/posthog.md +92 -0
- package/.docs/reference/observability/tracing/exporters/sentry.md +184 -0
- package/.docs/reference/observability/tracing/instances.md +107 -0
- package/.docs/reference/observability/tracing/interfaces.md +743 -0
- package/.docs/reference/observability/tracing/processors/sensitive-data-filter.md +144 -0
- package/.docs/reference/observability/tracing/spans.md +224 -0
- package/.docs/reference/processors/batch-parts-processor.md +61 -0
- package/.docs/reference/processors/language-detector.md +82 -0
- package/.docs/reference/processors/message-history-processor.md +85 -0
- package/.docs/reference/processors/moderation-processor.md +104 -0
- package/.docs/reference/processors/pii-detector.md +108 -0
- package/.docs/reference/processors/processor-interface.md +521 -0
- package/.docs/reference/processors/prompt-injection-detector.md +72 -0
- package/.docs/reference/processors/semantic-recall-processor.md +117 -0
- package/.docs/reference/processors/system-prompt-scrubber.md +80 -0
- package/.docs/reference/processors/token-limiter-processor.md +115 -0
- package/.docs/reference/processors/tool-call-filter.md +85 -0
- package/.docs/reference/processors/tool-search-processor.md +111 -0
- package/.docs/reference/processors/unicode-normalizer.md +62 -0
- package/.docs/reference/processors/working-memory-processor.md +152 -0
- package/.docs/reference/rag/chunk.md +221 -0
- package/.docs/reference/rag/database-config.md +261 -0
- package/.docs/reference/rag/document.md +114 -0
- package/.docs/reference/rag/embeddings.md +92 -0
- package/.docs/reference/rag/extract-params.md +168 -0
- package/.docs/reference/rag/graph-rag.md +111 -0
- package/.docs/reference/rag/metadata-filters.md +216 -0
- package/.docs/reference/rag/rerank.md +75 -0
- package/.docs/reference/rag/rerankWithScorer.md +80 -0
- package/.docs/reference/server/create-route.md +262 -0
- package/.docs/reference/server/express-adapter.md +176 -0
- package/.docs/reference/server/fastify-adapter.md +90 -0
- package/.docs/reference/server/hono-adapter.md +162 -0
- package/.docs/reference/server/koa-adapter.md +127 -0
- package/.docs/reference/server/mastra-server.md +298 -0
- package/.docs/reference/server/register-api-route.md +249 -0
- package/.docs/reference/server/routes.md +306 -0
- package/.docs/reference/storage/cloudflare-d1.md +218 -0
- package/.docs/reference/storage/cloudflare.md +88 -0
- package/.docs/reference/storage/composite.md +235 -0
- package/.docs/reference/storage/convex.md +161 -0
- package/.docs/reference/storage/dynamodb.md +282 -0
- package/.docs/reference/storage/lance.md +131 -0
- package/.docs/reference/storage/libsql.md +135 -0
- package/.docs/reference/storage/mongodb.md +262 -0
- package/.docs/reference/storage/mssql.md +157 -0
- package/.docs/reference/storage/overview.md +121 -0
- package/.docs/reference/storage/postgresql.md +526 -0
- package/.docs/reference/storage/upstash.md +160 -0
- package/.docs/reference/streaming/ChunkType.md +292 -0
- package/.docs/reference/streaming/agents/MastraModelOutput.md +182 -0
- package/.docs/reference/streaming/agents/stream.md +221 -0
- package/.docs/reference/streaming/agents/streamLegacy.md +142 -0
- package/.docs/reference/streaming/workflows/observeStream.md +42 -0
- package/.docs/reference/streaming/workflows/resumeStream.md +61 -0
- package/.docs/reference/streaming/workflows/stream.md +88 -0
- package/.docs/reference/streaming/workflows/timeTravelStream.md +142 -0
- package/.docs/reference/templates/overview.md +194 -0
- package/.docs/reference/tools/create-tool.md +237 -0
- package/.docs/reference/tools/document-chunker-tool.md +89 -0
- package/.docs/reference/tools/graph-rag-tool.md +182 -0
- package/.docs/reference/tools/mcp-client.md +954 -0
- package/.docs/reference/tools/mcp-server.md +1271 -0
- package/.docs/reference/tools/vector-query-tool.md +459 -0
- package/.docs/reference/vectors/astra.md +121 -0
- package/.docs/reference/vectors/chroma.md +264 -0
- package/.docs/reference/vectors/convex.md +300 -0
- package/.docs/reference/vectors/couchbase.md +226 -0
- package/.docs/reference/vectors/duckdb.md +318 -0
- package/.docs/reference/vectors/elasticsearch.md +189 -0
- package/.docs/reference/vectors/lance.md +220 -0
- package/.docs/reference/vectors/libsql.md +305 -0
- package/.docs/reference/vectors/mongodb.md +295 -0
- package/.docs/reference/vectors/opensearch.md +99 -0
- package/.docs/reference/vectors/pg.md +408 -0
- package/.docs/reference/vectors/pinecone.md +168 -0
- package/.docs/reference/vectors/qdrant.md +222 -0
- package/.docs/reference/vectors/s3vectors.md +277 -0
- package/.docs/reference/vectors/turbopuffer.md +157 -0
- package/.docs/reference/vectors/upstash.md +294 -0
- package/.docs/reference/vectors/vectorize.md +147 -0
- package/.docs/reference/voice/azure.md +148 -0
- package/.docs/reference/voice/cloudflare.md +83 -0
- package/.docs/reference/voice/composite-voice.md +121 -0
- package/.docs/reference/voice/deepgram.md +79 -0
- package/.docs/reference/voice/elevenlabs.md +98 -0
- package/.docs/reference/voice/google-gemini-live.md +378 -0
- package/.docs/reference/voice/google.md +228 -0
- package/.docs/reference/voice/mastra-voice.md +311 -0
- package/.docs/reference/voice/murf.md +122 -0
- package/.docs/reference/voice/openai-realtime.md +203 -0
- package/.docs/reference/voice/openai.md +88 -0
- package/.docs/reference/voice/playai.md +80 -0
- package/.docs/reference/voice/sarvam.md +126 -0
- package/.docs/reference/voice/speechify.md +75 -0
- package/.docs/reference/voice/voice.addInstructions.md +55 -0
- package/.docs/reference/voice/voice.addTools.md +67 -0
- package/.docs/reference/voice/voice.answer.md +54 -0
- package/.docs/reference/voice/voice.close.md +51 -0
- package/.docs/reference/voice/voice.connect.md +94 -0
- package/.docs/reference/voice/voice.events.md +37 -0
- package/.docs/reference/voice/voice.getSpeakers.md +129 -0
- package/.docs/reference/voice/voice.listen.md +164 -0
- package/.docs/reference/voice/voice.off.md +54 -0
- package/.docs/reference/voice/voice.on.md +111 -0
- package/.docs/reference/voice/voice.send.md +65 -0
- package/.docs/reference/voice/voice.speak.md +157 -0
- package/.docs/reference/voice/voice.updateConfig.md +60 -0
- package/.docs/reference/workflows/run-methods/cancel.md +86 -0
- package/.docs/reference/workflows/run-methods/restart.md +33 -0
- package/.docs/reference/workflows/run-methods/resume.md +59 -0
- package/.docs/reference/workflows/run-methods/start.md +58 -0
- package/.docs/reference/workflows/run-methods/startAsync.md +67 -0
- package/.docs/reference/workflows/run-methods/timeTravel.md +142 -0
- package/.docs/reference/workflows/run.md +59 -0
- package/.docs/reference/workflows/step.md +119 -0
- package/.docs/reference/workflows/workflow-methods/branch.md +25 -0
- package/.docs/reference/workflows/workflow-methods/commit.md +17 -0
- package/.docs/reference/workflows/workflow-methods/create-run.md +63 -0
- package/.docs/reference/workflows/workflow-methods/dountil.md +25 -0
- package/.docs/reference/workflows/workflow-methods/dowhile.md +25 -0
- package/.docs/reference/workflows/workflow-methods/foreach.md +118 -0
- package/.docs/reference/workflows/workflow-methods/map.md +93 -0
- package/.docs/reference/workflows/workflow-methods/parallel.md +21 -0
- package/.docs/reference/workflows/workflow-methods/sleep.md +35 -0
- package/.docs/reference/workflows/workflow-methods/sleepUntil.md +35 -0
- package/.docs/reference/workflows/workflow-methods/then.md +21 -0
- package/.docs/reference/workflows/workflow.md +157 -0
- package/.docs/reference/workspace/e2b-sandbox.md +289 -0
- package/.docs/reference/workspace/filesystem.md +255 -0
- package/.docs/reference/workspace/gcs-filesystem.md +174 -0
- package/.docs/reference/workspace/local-filesystem.md +343 -0
- package/.docs/reference/workspace/local-sandbox.md +301 -0
- package/.docs/reference/workspace/s3-filesystem.md +175 -0
- package/.docs/reference/workspace/sandbox.md +87 -0
- package/.docs/reference/workspace/workspace-class.md +244 -0
- package/CHANGELOG.md +8 -0
- package/package.json +5 -5
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
# Agent.stream()
|
|
2
|
+
|
|
3
|
+
The `.stream()` method enables real-time streaming of responses from an agent with enhanced capabilities and format flexibility. This method accepts messages and optional streaming options, providing a next-generation streaming experience with support for both Mastra's native format and AI SDK v5+ compatibility.
|
|
4
|
+
|
|
5
|
+
## Usage example
|
|
6
|
+
|
|
7
|
+
```ts
|
|
8
|
+
const stream = await agent.stream('message for agent')
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
> **Info:** **Model Compatibility**: This method is designed for V2 models. V1 models should use the [`.streamLegacy()`](https://mastra.ai/reference/streaming/agents/streamLegacy) method. The framework automatically detects your model version and will throw an error if there's a mismatch.
|
|
12
|
+
|
|
13
|
+
## Parameters
|
|
14
|
+
|
|
15
|
+
**messages:** (`string | string[] | CoreMessage[] | AiMessageType[] | UIMessageWithMetadata[]`): The messages to send to the agent. Can be a single string, array of strings, or structured message objects.
|
|
16
|
+
|
|
17
|
+
**options?:** (`AgentExecutionOptions<Output, Format>`): Optional configuration for the streaming process.
|
|
18
|
+
|
|
19
|
+
### Options
|
|
20
|
+
|
|
21
|
+
**maxSteps?:** (`number`): Maximum number of steps to run during execution.
|
|
22
|
+
|
|
23
|
+
**scorers?:** (`MastraScorers | Record<string, { scorer: MastraScorer['name']; sampling?: ScoringSamplingConfig }>`): scorer:stringName of the scorer to use.sampling?:ScoringSamplingConfigSampling configuration for the scorer.type:'none' | 'ratio'Type of sampling strategy. Use 'none' to disable sampling or 'ratio' for percentage-based sampling.rate?:numberSampling rate (0-1). Required when type is 'ratio'.
|
|
24
|
+
|
|
25
|
+
**onIterationComplete?:** (`(context: IterationCompleteContext) => { continue?: boolean; feedback?: string } | void | Promise<{ continue?: boolean; feedback?: string } | void>`): context.iteration:numberCurrent iteration number (1-based).context.maxIterations:number | undefinedMaximum iterations allowed (if set).context.text:stringThe text response from this iteration.context.isFinal:booleanWhether this is the final iteration.context.finishReason:stringReason why this iteration finished (e.g., 'stop', 'length', 'tool-calls').context.toolCalls:ToolCall\[]Tool calls made in this iteration.context.messages:MastraDBMessage\[]All messages accumulated so far.return.continue?:booleanSet to false to stop execution early.return.feedback?:stringFeedback message to guide the agent's next iteration.
|
|
26
|
+
|
|
27
|
+
**isTaskComplete?:** (`IsTaskCompleteConfig`): scorers:MastraScorer\[]Array of scorers that evaluate task completion. Each scorer returns 0 (failed) or 1 (passed).strategy?:'all' | 'any'Strategy for combining scorer results. 'all' requires all scorers to pass, 'any' requires at least one.onComplete?:(result: IsTaskCompleteRunResult) => void | Promise\<void>Callback called when the task completion check finishes. Receives the result with individual scorer scores.parallel?:booleanWhether to run scorers in parallel.timeout?:numberMaximum time in milliseconds to wait for all scorers to complete.
|
|
28
|
+
|
|
29
|
+
**delegation?:** (`DelegationConfig`): onDelegationStart?:(context: DelegationStartContext) => DelegationStartResult | void | Promise\<DelegationStartResult | void>Called before delegating to a subagent. Use this to modify the delegation parameters or reject the delegation entirely.onDelegationComplete?:(context: DelegationCompleteContext) => { feedback?: string } | void | Promise<{ feedback?: string } | void>Called after a subagent delegation completes. The context includes a \`bail()\` method to stop further execution, and you can return \`{ feedback }\` to guide the supervisor's next action. Feedback is saved to supervisor memory as an assistant message.messageFilter?:(context: MessageFilterContext) => MastraDBMessage\[] | Promise\<MastraDBMessage\[]>Callback function called before delegating to a subagent. Use this to filter the messages that are passed to the subagent.
|
|
30
|
+
|
|
31
|
+
**tracingContext?:** (`TracingContext`): Tracing context for span hierarchy and metadata.
|
|
32
|
+
|
|
33
|
+
**returnScorerData?:** (`boolean`): Whether to return detailed scoring data in the response.
|
|
34
|
+
|
|
35
|
+
**onChunk?:** (`(chunk: ChunkType) => Promise<void> | void`): Callback function called for each chunk during streaming.
|
|
36
|
+
|
|
37
|
+
**onError?:** (`({ error }: { error: Error | string }) => Promise<void> | void`): Callback function called when an error occurs during streaming.
|
|
38
|
+
|
|
39
|
+
**onAbort?:** (`(event: any) => Promise<void> | void`): Callback function called when the stream is aborted.
|
|
40
|
+
|
|
41
|
+
**abortSignal?:** (`AbortSignal`): Signal object that allows you to abort the agent's execution. When the signal is aborted, all ongoing operations will be terminated.
|
|
42
|
+
|
|
43
|
+
**activeTools?:** (`Array<keyof ToolSet> | undefined`): Array of active tool names that can be used during execution.
|
|
44
|
+
|
|
45
|
+
**prepareStep?:** (`PrepareStepFunction<any>`): Callback function called before each step of multi-step execution.
|
|
46
|
+
|
|
47
|
+
**context?:** (`ModelMessage[]`): Additional context messages to provide to the agent.
|
|
48
|
+
|
|
49
|
+
**structuredOutput?:** (`StructuredOutputOptions<S extends ZodTypeAny = ZodTypeAny>`): schema:z.ZodSchema\<S>Zod schema defining the expected output structure.model?:MastraLanguageModelLanguage model to use for structured output generation. If provided, enables the agent to respond in multi step with tool calls, text, and structured outputerrorStrategy?:'strict' | 'warn' | 'fallback'Strategy for handling schema validation errors. 'strict' throws errors, 'warn' logs warnings, 'fallback' uses fallback values.fallbackValue?:\<S extends ZodTypeAny>Fallback value to use when schema validation fails and errorStrategy is 'fallback'.instructions?:stringAdditional instructions for the structured output model.jsonPromptInjection?:booleanInjects system prompt into the main agent instructing it to return structured output, useful for when a model does not natively support structured outputs.providerOptions?:ProviderOptionsProvider-specific options passed to the internal structuring agent. Use this to control model behavior like reasoning effort for thinking models (e.g., \`{ openai: { reasoningEffort: 'low' } }\`).
|
|
50
|
+
|
|
51
|
+
**outputProcessors?:** (`Processor[]`): Overrides the output processors set on the agent. Output processors that can modify or validate messages from the agent before they are returned to the user. Must implement either (or both) of the \`processOutputResult\` and \`processOutputStream\` functions.
|
|
52
|
+
|
|
53
|
+
**includeRawChunks?:** (`boolean`): Whether to include raw chunks in the stream output (not available on all model providers).
|
|
54
|
+
|
|
55
|
+
**inputProcessors?:** (`Processor[]`): Overrides the input processors set on the agent. Input processors that can modify or validate messages before they are processed by the agent. Must implement the \`processInput\` function.
|
|
56
|
+
|
|
57
|
+
**instructions?:** (`string`): Custom instructions that override the agent's default instructions for this specific generation. Useful for dynamically modifying agent behavior without creating a new agent instance.
|
|
58
|
+
|
|
59
|
+
**system?:** (`string | string[] | CoreSystemMessage | SystemModelMessage | CoreSystemMessage[] | SystemModelMessage[]`): Custom system message(s) to include in the prompt. Can be a single string, message object, or array of either. System messages provide additional context or behavior instructions that supplement the agent's main instructions.
|
|
60
|
+
|
|
61
|
+
**output?:** (`Zod schema | JsonSchema7`): \*\*Deprecated.\*\* Use structuredOutput without a model to achieve the same thing. Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.
|
|
62
|
+
|
|
63
|
+
**memory?:** (`object`): thread:string | { id: string; metadata?: Record\<string, any>, title?: string }The conversation thread, as a string ID or an object with an \`id\` and optional \`metadata\`.resource:stringIdentifier for the user or resource associated with the thread.options?:MemoryConfigConfiguration for memory behavior including lastMessages, readOnly, semanticRecall, and workingMemory.
|
|
64
|
+
|
|
65
|
+
**onFinish?:** (`StreamTextOnFinishCallback<any> | StreamObjectOnFinishCallback<OUTPUT>`): Callback function called when streaming completes. Receives the final result.
|
|
66
|
+
|
|
67
|
+
**onStepFinish?:** (`StreamTextOnStepFinishCallback<any> | never`): Callback function called after each execution step. Receives step details as a JSON string. Unavailable for structured output
|
|
68
|
+
|
|
69
|
+
**telemetry?:** (`TelemetrySettings`): isEnabled?:booleanEnable or disable telemetry. Disabled by default while experimental.recordInputs?:booleanEnable or disable input recording. Enabled by default. You might want to disable input recording to avoid recording sensitive information.recordOutputs?:booleanEnable or disable output recording. Enabled by default. You might want to disable output recording to avoid recording sensitive information.functionId?:stringIdentifier for this function. Used to group telemetry data by function.
|
|
70
|
+
|
|
71
|
+
**modelSettings?:** (`CallSettings`): temperature?:numberControls randomness in generation (0-2). Higher values make output more random.maxOutputTokens?:numberMaximum number of tokens to generate in the response. Note: Use maxOutputTokens (not maxTokens) as per AI SDK v5 convention.maxRetries?:numberMaximum number of retry attempts for failed requests.topP?:numberNucleus sampling parameter (0-1). Controls diversity of generated text.topK?:numberTop-k sampling parameter. Limits vocabulary to k most likely tokens.presencePenalty?:numberPenalty for token presence (-2 to 2). Reduces repetition.frequencyPenalty?:numberPenalty for token frequency (-2 to 2). Reduces repetition of frequent tokens.stopSequences?:string\[]Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated.
|
|
72
|
+
|
|
73
|
+
**toolChoice?:** (`'auto' | 'none' | 'required' | { type: 'tool'; toolName: string }`): 'auto':stringLet the model decide whether to use tools (default).'none':stringDo not use any tools.'required':stringRequire the model to use at least one tool.{ type: 'tool'; toolName: string }:objectRequire the model to use a specific tool by name. (Default: `'auto'`)
|
|
74
|
+
|
|
75
|
+
**toolsets?:** (`ToolsetsInput`): Additional toolsets to make available to the agent during streaming.
|
|
76
|
+
|
|
77
|
+
**clientTools?:** (`ToolsInput`): Tools that are executed on the 'client' side of the request. These tools do not have execute functions in the definition.
|
|
78
|
+
|
|
79
|
+
**savePerStep?:** (`boolean`): Save messages incrementally after each stream step completes (default: false).
|
|
80
|
+
|
|
81
|
+
**requireToolApproval?:** (`boolean`): When true, all tool calls require explicit approval before execution. The stream will emit \`tool-call-approval\` chunks and pause until \`approveToolCall()\` or \`declineToolCall()\` is called.
|
|
82
|
+
|
|
83
|
+
**autoResumeSuspendedTools?:** (`boolean`): When true, automatically resumes suspended tools when the user sends a new message on the same thread. The agent extracts \`resumeData\` from the user's message based on the tool's \`resumeSchema\`. Requires memory to be configured.
|
|
84
|
+
|
|
85
|
+
**toolCallConcurrency?:** (`number`): Maximum number of tool calls to execute concurrently. Defaults to 1 when approval may be required, otherwise 10.
|
|
86
|
+
|
|
87
|
+
**providerOptions?:** (`Record<string, Record<string, JSONValue>>`): openai?:Record\<string, JSONValue>OpenAI-specific options. Example: \`{ reasoningEffort: 'high' }\`anthropic?:Record\<string, JSONValue>Anthropic-specific options. Example: \`{ maxTokens: 1000 }\`google?:Record\<string, JSONValue>Google-specific options. Example: \`{ safetySettings: \[...] }\`\[providerName]?:Record\<string, JSONValue>Other provider-specific options. The key is the provider name and the value is a record of provider-specific options.
|
|
88
|
+
|
|
89
|
+
**runId?:** (`string`): Unique ID for this generation run. Useful for tracking and debugging purposes.
|
|
90
|
+
|
|
91
|
+
**requestContext?:** (`RequestContext`): Request Context for dependency injection and contextual information.
|
|
92
|
+
|
|
93
|
+
**tracingContext?:** (`TracingContext`): currentSpan?:SpanCurrent span for creating child spans and adding metadata. Use this to create custom child spans or update span attributes during execution.
|
|
94
|
+
|
|
95
|
+
**tracingOptions?:** (`TracingOptions`): metadata?:Record\<string, any>Metadata to add to the root trace span. Useful for adding custom attributes like user IDs, session IDs, or feature flags.requestContextKeys?:string\[]Additional RequestContext keys to extract as metadata for this trace. Supports dot notation for nested values (e.g., 'user.id').traceId?:stringTrace ID to use for this execution (1-32 hexadecimal characters). If provided, this trace will be part of the specified trace.parentSpanId?:stringParent span ID to use for this execution (1-16 hexadecimal characters). If provided, the root span will be created as a child of this span.tags?:string\[]Tags to apply to this trace. String labels for categorizing and filtering traces.
|
|
96
|
+
|
|
97
|
+
## Returns
|
|
98
|
+
|
|
99
|
+
**stream:** (`MastraModelOutput<Output>`): Returns a MastraModelOutput instance that provides access to the streaming output.
|
|
100
|
+
|
|
101
|
+
**traceId?:** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
|
|
102
|
+
|
|
103
|
+
## Extended usage example
|
|
104
|
+
|
|
105
|
+
### Mastra Format (Default)
|
|
106
|
+
|
|
107
|
+
```ts
|
|
108
|
+
import { stepCountIs } from 'ai-v5'
|
|
109
|
+
|
|
110
|
+
const stream = await agent.stream('Tell me a story', {
|
|
111
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
112
|
+
modelSettings: {
|
|
113
|
+
temperature: 0.7,
|
|
114
|
+
},
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
// Access text stream
|
|
118
|
+
for await (const chunk of stream.textStream) {
|
|
119
|
+
console.log(chunk)
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// or access full stream
|
|
123
|
+
for await (const chunk of stream.fullStream) {
|
|
124
|
+
console.log(chunk)
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Get full text after streaming
|
|
128
|
+
const fullText = await stream.text
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### AI SDK v5+ Format
|
|
132
|
+
|
|
133
|
+
To use the stream with AI SDK v5 (and later), you can convert it using our utility function `toAISdkStream`.
|
|
134
|
+
|
|
135
|
+
```ts
|
|
136
|
+
import { stepCountIs, createUIMessageStreamResponse } from 'ai'
|
|
137
|
+
import { toAISdkStream } from '@mastra/ai-sdk'
|
|
138
|
+
|
|
139
|
+
const stream = await agent.stream('Tell me a story', {
|
|
140
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
141
|
+
modelSettings: {
|
|
142
|
+
temperature: 0.7,
|
|
143
|
+
},
|
|
144
|
+
})
|
|
145
|
+
|
|
146
|
+
// In an API route for frontend integration
|
|
147
|
+
return createUIMessageStreamResponse({
|
|
148
|
+
stream: toAISdkStream(stream, { from: 'agent' }),
|
|
149
|
+
})
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### Using Callbacks
|
|
153
|
+
|
|
154
|
+
All callback functions are now available as top-level properties for a cleaner API experience.
|
|
155
|
+
|
|
156
|
+
```ts
|
|
157
|
+
const stream = await agent.stream('Tell me a story', {
|
|
158
|
+
onFinish: result => {
|
|
159
|
+
console.log('Streaming finished:', result)
|
|
160
|
+
},
|
|
161
|
+
onStepFinish: step => {
|
|
162
|
+
console.log('Step completed:', step)
|
|
163
|
+
},
|
|
164
|
+
onChunk: chunk => {
|
|
165
|
+
console.log('Received chunk:', chunk)
|
|
166
|
+
},
|
|
167
|
+
onError: ({ error }) => {
|
|
168
|
+
console.error('Streaming error:', error)
|
|
169
|
+
},
|
|
170
|
+
onAbort: event => {
|
|
171
|
+
console.log('Stream aborted:', event)
|
|
172
|
+
},
|
|
173
|
+
})
|
|
174
|
+
|
|
175
|
+
// Process the stream
|
|
176
|
+
for await (const chunk of stream.textStream) {
|
|
177
|
+
console.log(chunk)
|
|
178
|
+
}
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### Advanced Example with Options
|
|
182
|
+
|
|
183
|
+
```ts
|
|
184
|
+
import { z } from 'zod'
|
|
185
|
+
import { stepCountIs } from 'ai'
|
|
186
|
+
|
|
187
|
+
await agent.stream('message for agent', {
|
|
188
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
189
|
+
modelSettings: {
|
|
190
|
+
temperature: 0.7,
|
|
191
|
+
},
|
|
192
|
+
memory: {
|
|
193
|
+
thread: 'user-123',
|
|
194
|
+
resource: 'test-app',
|
|
195
|
+
},
|
|
196
|
+
toolChoice: 'auto',
|
|
197
|
+
// Structured output with better DX
|
|
198
|
+
structuredOutput: {
|
|
199
|
+
schema: z.object({
|
|
200
|
+
sentiment: z.enum(['positive', 'negative', 'neutral']),
|
|
201
|
+
confidence: z.number(),
|
|
202
|
+
}),
|
|
203
|
+
model: 'openai/gpt-5.1',
|
|
204
|
+
errorStrategy: 'warn',
|
|
205
|
+
},
|
|
206
|
+
// Output processors for streaming response validation
|
|
207
|
+
outputProcessors: [
|
|
208
|
+
new ModerationProcessor({ model: 'openrouter/openai/gpt-oss-safeguard-20b' }),
|
|
209
|
+
new BatchPartsProcessor({ maxBatchSize: 3, maxWaitTime: 100 }),
|
|
210
|
+
],
|
|
211
|
+
})
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
## Related
|
|
215
|
+
|
|
216
|
+
- [Generating responses](https://mastra.ai/docs/agents/overview)
|
|
217
|
+
- [Streaming responses](https://mastra.ai/docs/agents/overview)
|
|
218
|
+
- [Agent Approval](https://mastra.ai/docs/agents/agent-approval)
|
|
219
|
+
- [Agent Networks](https://mastra.ai/docs/agents/networks) - Using the supervisor pattern for multi-agent coordination
|
|
220
|
+
- [Migration: .network() to Supervisor Pattern](https://mastra.ai/guides/migrations/network-to-supervisor)
|
|
221
|
+
- [Guide: Research Coordinator](https://mastra.ai/guides/guide/research-coordinator)
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# Agent.streamLegacy() (Legacy)
|
|
2
|
+
|
|
3
|
+
> **Warning:** **Deprecated**: This method is deprecated and only works with V1 models. For V2 models, use the new [`.stream()`](https://mastra.ai/reference/streaming/agents/stream) method instead. See the [migration guide](https://mastra.ai/guides/migrations/vnext-to-standard-apis) for details on upgrading.
|
|
4
|
+
|
|
5
|
+
The `.streamLegacy()` method is the legacy version of the agent streaming API, used for real-time streaming of responses from V1 model agents. This method accepts messages and optional streaming options.
|
|
6
|
+
|
|
7
|
+
## Usage example
|
|
8
|
+
|
|
9
|
+
```typescript
|
|
10
|
+
await agent.streamLegacy('message for agent')
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Parameters
|
|
14
|
+
|
|
15
|
+
**messages:** (`string | string[] | CoreMessage[] | AiMessageType[] | UIMessageWithMetadata[]`): The messages to send to the agent. Can be a single string, array of strings, or structured message objects.
|
|
16
|
+
|
|
17
|
+
**options?:** (`AgentStreamOptions<OUTPUT, EXPERIMENTAL_OUTPUT>`): Optional configuration for the streaming process.
|
|
18
|
+
|
|
19
|
+
### Options parameters
|
|
20
|
+
|
|
21
|
+
**abortSignal?:** (`AbortSignal`): Signal object that allows you to abort the agent's execution. When the signal is aborted, all ongoing operations will be terminated.
|
|
22
|
+
|
|
23
|
+
**context?:** (`CoreMessage[]`): Additional context messages to provide to the agent.
|
|
24
|
+
|
|
25
|
+
**experimental\_output?:** (`Zod schema | JsonSchema7`): Enables structured output generation alongside text generation and tool calls. The model will generate responses that conform to the provided schema.
|
|
26
|
+
|
|
27
|
+
**instructions?:** (`string`): Custom instructions that override the agent's default instructions for this specific generation. Useful for dynamically modifying agent behavior without creating a new agent instance.
|
|
28
|
+
|
|
29
|
+
**output?:** (`Zod schema | JsonSchema7`): Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.
|
|
30
|
+
|
|
31
|
+
**memory?:** (`object`): thread:string | { id: string; metadata?: Record\<string, any>, title?: string }The conversation thread, as a string ID or an object with an \`id\` and optional \`metadata\`.resource:stringIdentifier for the user or resource associated with the thread.options?:MemoryConfigConfiguration for memory behavior, like message history and semantic recall.
|
|
32
|
+
|
|
33
|
+
**maxSteps?:** (`number`): Maximum number of execution steps allowed. (Default: `5`)
|
|
34
|
+
|
|
35
|
+
**maxRetries?:** (`number`): Maximum number of retries. Set to 0 to disable retries. (Default: `2`)
|
|
36
|
+
|
|
37
|
+
**memoryOptions?:** (`MemoryConfig`): lastMessages?:number | falseNumber of recent messages to include in context, or false to disable.semanticRecall?:boolean | { topK: number; messageRange: number | { before: number; after: number }; scope?: 'thread' | 'resource' }Enable semantic recall to find relevant past messages. Can be a boolean or detailed configuration.workingMemory?:WorkingMemoryConfiguration for working memory functionality.threads?:{ generateTitle?: boolean | { model: DynamicArgument\<MastraLanguageModel>; instructions?: DynamicArgument\<string> } }Thread-specific configuration, including automatic title generation.
|
|
38
|
+
|
|
39
|
+
**onFinish?:** (`StreamTextOnFinishCallback<any> | StreamObjectOnFinishCallback<OUTPUT>`): Callback function called when streaming completes. Receives the final result.
|
|
40
|
+
|
|
41
|
+
**onStepFinish?:** (`StreamTextOnStepFinishCallback<any> | never`): Callback function called after each execution step. Receives step details as a JSON string. Unavailable for structured output
|
|
42
|
+
|
|
43
|
+
**resourceId?:** (`string`): \*\*Deprecated.\*\* Use \`memory.resource\` instead. Identifier for the user or resource interacting with the agent. Must be provided if threadId is provided.
|
|
44
|
+
|
|
45
|
+
**telemetry?:** (`TelemetrySettings`): isEnabled?:booleanEnable or disable telemetry. Disabled by default while experimental.recordInputs?:booleanEnable or disable input recording. Enabled by default. You might want to disable input recording to avoid recording sensitive information.recordOutputs?:booleanEnable or disable output recording. Enabled by default. You might want to disable output recording to avoid recording sensitive information.functionId?:stringIdentifier for this function. Used to group telemetry data by function.
|
|
46
|
+
|
|
47
|
+
**temperature?:** (`number`): Controls randomness in the model's output. Higher values (e.g., 0.8) make the output more random, lower values (e.g., 0.2) make it more focused and deterministic.
|
|
48
|
+
|
|
49
|
+
**threadId?:** (`string`): \*\*Deprecated.\*\* Use \`memory.thread\` instead. Identifier for the conversation thread. Allows for maintaining context across multiple interactions. Must be provided if resourceId is provided.
|
|
50
|
+
|
|
51
|
+
**toolChoice?:** (`'auto' | 'none' | 'required' | { type: 'tool'; toolName: string }`): 'auto':stringLet the model decide whether to use tools (default).'none':stringDo not use any tools.'required':stringRequire the model to use at least one tool.{ type: 'tool'; toolName: string }:objectRequire the model to use a specific tool by name. (Default: `'auto'`)
|
|
52
|
+
|
|
53
|
+
**toolsets?:** (`ToolsetsInput`): Additional toolsets to make available to the agent during streaming.
|
|
54
|
+
|
|
55
|
+
**clientTools?:** (`ToolsInput`): Tools that are executed on the 'client' side of the request. These tools do not have execute functions in the definition.
|
|
56
|
+
|
|
57
|
+
**savePerStep?:** (`boolean`): Save messages incrementally after each stream step completes (default: false).
|
|
58
|
+
|
|
59
|
+
**providerOptions?:** (`Record<string, Record<string, JSONValue>>`): openai?:Record\<string, JSONValue>OpenAI-specific options. Example: \`{ reasoningEffort: 'high' }\`anthropic?:Record\<string, JSONValue>Anthropic-specific options. Example: \`{ maxTokens: 1000 }\`google?:Record\<string, JSONValue>Google-specific options. Example: \`{ safetySettings: \[...] }\`\[providerName]?:Record\<string, JSONValue>Other provider-specific options. The key is the provider name and the value is a record of provider-specific options.
|
|
60
|
+
|
|
61
|
+
**runId?:** (`string`): Unique ID for this generation run. Useful for tracking and debugging purposes.
|
|
62
|
+
|
|
63
|
+
**requestContext?:** (`RequestContext`): Request Context for dependency injection and contextual information.
|
|
64
|
+
|
|
65
|
+
**maxTokens?:** (`number`): Maximum number of tokens to generate.
|
|
66
|
+
|
|
67
|
+
**topP?:** (`number`): Nucleus sampling. This is a number between 0 and 1. It is recommended to set either \`temperature\` or \`topP\`, but not both.
|
|
68
|
+
|
|
69
|
+
**topK?:** (`number`): Only sample from the top K options for each subsequent token. Used to remove 'long tail' low probability responses.
|
|
70
|
+
|
|
71
|
+
**presencePenalty?:** (`number`): Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
|
72
|
+
|
|
73
|
+
**frequencyPenalty?:** (`number`): Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
|
|
74
|
+
|
|
75
|
+
**stopSequences?:** (`string[]`): Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated.
|
|
76
|
+
|
|
77
|
+
**seed?:** (`number`): The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results.
|
|
78
|
+
|
|
79
|
+
**headers?:** (`Record<string, string | undefined>`): Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
80
|
+
|
|
81
|
+
## Returns
|
|
82
|
+
|
|
83
|
+
**textStream?:** (`AsyncGenerator<string>`): Async generator that yields text chunks as they become available.
|
|
84
|
+
|
|
85
|
+
**fullStream?:** (`Promise<ReadableStream>`): Promise that resolves to a ReadableStream for the complete response.
|
|
86
|
+
|
|
87
|
+
**text?:** (`Promise<string>`): Promise that resolves to the complete text response.
|
|
88
|
+
|
|
89
|
+
**usage?:** (`Promise<{ totalTokens: number; promptTokens: number; completionTokens: number }>`): Promise that resolves to token usage information.
|
|
90
|
+
|
|
91
|
+
**finishReason?:** (`Promise<string>`): Promise that resolves to the reason why the stream finished.
|
|
92
|
+
|
|
93
|
+
**toolCalls?:** (`Promise<Array<ToolCall>>`): toolName:stringThe name of the tool invoked.args:anyThe arguments passed to the tool.
|
|
94
|
+
|
|
95
|
+
## Extended usage example
|
|
96
|
+
|
|
97
|
+
```typescript
|
|
98
|
+
await agent.streamLegacy('message for agent', {
|
|
99
|
+
temperature: 0.7,
|
|
100
|
+
maxSteps: 3,
|
|
101
|
+
memory: {
|
|
102
|
+
thread: 'user-123',
|
|
103
|
+
resource: 'test-app',
|
|
104
|
+
},
|
|
105
|
+
toolChoice: 'auto',
|
|
106
|
+
})
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Migration to New API
|
|
110
|
+
|
|
111
|
+
> **Info:** The new `.stream()` method offers enhanced capabilities including AI SDK v5+ compatibility, better structured output handling, and improved callback system. See the [migration guide](https://mastra.ai/guides/migrations/vnext-to-standard-apis) for detailed migration instructions.
|
|
112
|
+
|
|
113
|
+
### Quick Migration Example
|
|
114
|
+
|
|
115
|
+
#### Before (Legacy)
|
|
116
|
+
|
|
117
|
+
```typescript
|
|
118
|
+
const result = await agent.streamLegacy('message', {
|
|
119
|
+
temperature: 0.7,
|
|
120
|
+
maxSteps: 3,
|
|
121
|
+
onFinish: result => console.log(result),
|
|
122
|
+
})
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
#### After (New API)
|
|
126
|
+
|
|
127
|
+
```typescript
|
|
128
|
+
const result = await agent.stream('message', {
|
|
129
|
+
modelSettings: {
|
|
130
|
+
temperature: 0.7,
|
|
131
|
+
},
|
|
132
|
+
maxSteps: 3,
|
|
133
|
+
onFinish: result => console.log(result),
|
|
134
|
+
})
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
## Related
|
|
138
|
+
|
|
139
|
+
- [Migration Guide](https://mastra.ai/guides/migrations/vnext-to-standard-apis)
|
|
140
|
+
- [New .stream() method](https://mastra.ai/reference/streaming/agents/stream)
|
|
141
|
+
- [Generating responses](https://mastra.ai/docs/agents/overview)
|
|
142
|
+
- [Streaming responses](https://mastra.ai/docs/agents/overview)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Run.observeStream()
|
|
2
|
+
|
|
3
|
+
The `.observeStream()` method opens a new `ReadableStream` to a workflow run that is currently running, allowing you to observe the stream of events if the original stream is no longer available.
|
|
4
|
+
|
|
5
|
+
## Usage example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
const run = await workflow.createRun()
|
|
9
|
+
|
|
10
|
+
run.stream({
|
|
11
|
+
inputData: {
|
|
12
|
+
value: 'initial data',
|
|
13
|
+
},
|
|
14
|
+
})
|
|
15
|
+
|
|
16
|
+
const stream = await run.observeStream()
|
|
17
|
+
|
|
18
|
+
for await (const chunk of stream) {
|
|
19
|
+
console.log(chunk)
|
|
20
|
+
}
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Returns
|
|
24
|
+
|
|
25
|
+
`ReadableStream<ChunkType>`
|
|
26
|
+
|
|
27
|
+
## Stream Events
|
|
28
|
+
|
|
29
|
+
The stream emits various event types during workflow execution. Each event has a `type` field and a `payload` containing relevant data:
|
|
30
|
+
|
|
31
|
+
- **`workflow-start`**: Workflow execution begins
|
|
32
|
+
- **`workflow-step-start`**: A step begins execution
|
|
33
|
+
- **`workflow-step-output`**: Custom output from a step
|
|
34
|
+
- **`workflow-step-result`**: A step completes with results
|
|
35
|
+
- **`workflow-finish`**: Workflow execution completes with usage statistics
|
|
36
|
+
|
|
37
|
+
## Related
|
|
38
|
+
|
|
39
|
+
- [Workflows overview](https://mastra.ai/docs/workflows/overview)
|
|
40
|
+
- [Workflow.createRun()](https://mastra.ai/reference/workflows/workflow-methods/create-run)
|
|
41
|
+
- [Run.stream()](https://mastra.ai/reference/streaming/workflows/stream)
|
|
42
|
+
- [Run.resumeStream()](https://mastra.ai/reference/streaming/workflows/resumeStream)
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# Run.resumeStream()
|
|
2
|
+
|
|
3
|
+
The `.resumeStream()` method resumes a suspended workflow run with new data, allowing you to continue execution from a specific step and to observe the stream of events.
|
|
4
|
+
|
|
5
|
+
## Usage example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
const run = await workflow.createRun()
|
|
9
|
+
|
|
10
|
+
const stream = run.stream({
|
|
11
|
+
inputData: {
|
|
12
|
+
value: 'initial data',
|
|
13
|
+
},
|
|
14
|
+
})
|
|
15
|
+
|
|
16
|
+
const result = await stream.result
|
|
17
|
+
|
|
18
|
+
if (result!.status === 'suspended') {
|
|
19
|
+
const resumedStream = await run.resumeStream({
|
|
20
|
+
resumeData: {
|
|
21
|
+
value: 'resume data',
|
|
22
|
+
},
|
|
23
|
+
})
|
|
24
|
+
}
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Parameters
|
|
28
|
+
|
|
29
|
+
**resumeData?:** (`z.infer<TInput>`): Input data that matches the workflow's input schema
|
|
30
|
+
|
|
31
|
+
**requestContext?:** (`RequestContext`): Request Context data to use during workflow execution
|
|
32
|
+
|
|
33
|
+
**step?:** (`Step<string, any, any, any, any, TEngineType>`): The step to resume execution from
|
|
34
|
+
|
|
35
|
+
**tracingOptions?:** (`TracingOptions`): metadata?:Record\<string, any>Metadata to add to the root trace span. Useful for adding custom attributes like user IDs, session IDs, or feature flags.requestContextKeys?:string\[]Additional RequestContext keys to extract as metadata for this trace. Supports dot notation for nested values (e.g., 'user.id').traceId?:stringTrace ID to use for this execution (1-32 hexadecimal characters). If provided, this trace will be part of the specified trace.parentSpanId?:stringParent span ID to use for this execution (1-16 hexadecimal characters). If provided, the root span will be created as a child of this span.tags?:string\[]Tags to apply to this trace. String labels for categorizing and filtering traces.
|
|
36
|
+
|
|
37
|
+
## Returns
|
|
38
|
+
|
|
39
|
+
**stream:** (`MastraWorkflowStream<ChunkType>`): A custom stream that extends ReadableStream\<ChunkType> with additional workflow-specific properties
|
|
40
|
+
|
|
41
|
+
**stream.status:** (`Promise<RunStatus>`): A promise that resolves to the current workflow run status
|
|
42
|
+
|
|
43
|
+
**stream.result:** (`Promise<WorkflowResult<TState, TOutput, TSteps>>`): A promise that resolves to the final workflow result
|
|
44
|
+
|
|
45
|
+
**stream.usage:** (`Promise<{ inputTokens: number; outputTokens: number; totalTokens: number, reasoningTokens?: number, cacheInputTokens?: number }>`): A promise that resolves to token usage statistics
|
|
46
|
+
|
|
47
|
+
## Stream Events
|
|
48
|
+
|
|
49
|
+
The stream emits various event types during workflow execution. Each event has a `type` field and a `payload` containing relevant data:
|
|
50
|
+
|
|
51
|
+
- **`workflow-start`**: Workflow execution begins
|
|
52
|
+
- **`workflow-step-start`**: A step begins execution
|
|
53
|
+
- **`workflow-step-output`**: Custom output from a step
|
|
54
|
+
- **`workflow-step-result`**: A step completes with results
|
|
55
|
+
- **`workflow-finish`**: Workflow execution completes with usage statistics
|
|
56
|
+
|
|
57
|
+
## Related
|
|
58
|
+
|
|
59
|
+
- [Workflows overview](https://mastra.ai/docs/workflows/overview)
|
|
60
|
+
- [Workflow.createRun()](https://mastra.ai/reference/workflows/workflow-methods/create-run)
|
|
61
|
+
- [Run.stream()](https://mastra.ai/reference/streaming/workflows/stream)
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# Run.stream()
|
|
2
|
+
|
|
3
|
+
The `.stream()` method enables real-time streaming of responses from a workflow. It returns a `ReadableStream` of events directly.
|
|
4
|
+
|
|
5
|
+
## Usage example
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
const run = await workflow.createRun()
|
|
9
|
+
|
|
10
|
+
const stream = await run.stream({
|
|
11
|
+
inputData: {
|
|
12
|
+
value: 'initial data',
|
|
13
|
+
},
|
|
14
|
+
})
|
|
15
|
+
|
|
16
|
+
for await (const chunk of stream) {
|
|
17
|
+
console.log(chunk)
|
|
18
|
+
}
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Parameters
|
|
22
|
+
|
|
23
|
+
**inputData?:** (`z.infer<TInput>`): Input data that matches the workflow's input schema
|
|
24
|
+
|
|
25
|
+
**requestContext?:** (`RequestContext`): Request Context data to use during workflow execution
|
|
26
|
+
|
|
27
|
+
**tracingContext?:** (`TracingContext`): currentSpan?:SpanCurrent span for creating child spans and adding metadata.
|
|
28
|
+
|
|
29
|
+
**tracingOptions?:** (`TracingOptions`): metadata?:Record\<string, any>Metadata to add to the root trace span.requestContextKeys?:string\[]Additional RequestContext keys to extract as metadata for this trace. Supports dot notation for nested values (e.g., 'user.id').traceId?:stringTrace ID to use for this execution (1-32 hexadecimal characters). If provided, this trace will be part of the specified trace.parentSpanId?:stringParent span ID to use for this execution (1-16 hexadecimal characters). If provided, the root span will be created as a child of this span.tags?:string\[]Tags to apply to this trace. String labels for categorizing and filtering traces.
|
|
30
|
+
|
|
31
|
+
**closeOnSuspend?:** (`boolean`): Whether to close the stream when the workflow is suspended, or to keep the stream open until the workflow is finished (by success or error). Default value is true.
|
|
32
|
+
|
|
33
|
+
## Returns
|
|
34
|
+
|
|
35
|
+
Returns a `WorkflowRunOutput` object that implements the async iterable interface (can be used directly in `for await...of` loops) and provides access to the stream and workflow execution results.
|
|
36
|
+
|
|
37
|
+
**fullStream:** (`ReadableStream<WorkflowStreamEvent>`): A ReadableStream of workflow events that you can iterate over to track progress in real-time. You can also iterate over the WorkflowRunOutput object directly.
|
|
38
|
+
|
|
39
|
+
**result:** (`Promise<WorkflowResult<TState, TInput, TOutput, TSteps>>`): A promise that resolves to the final workflow result
|
|
40
|
+
|
|
41
|
+
**status:** (`WorkflowRunStatus`): The current workflow run status ('running', 'suspended', 'success', 'failed', 'canceled', or 'tripwire')
|
|
42
|
+
|
|
43
|
+
**usage:** (`Promise<{ inputTokens: number; outputTokens: number; totalTokens: number, reasoningTokens?: number, cachedInputTokens?: number }>`): A promise that resolves to token usage statistics
|
|
44
|
+
|
|
45
|
+
## Extended usage example
|
|
46
|
+
|
|
47
|
+
```typescript
|
|
48
|
+
const run = await workflow.createRun()
|
|
49
|
+
|
|
50
|
+
const stream = run.stream({
|
|
51
|
+
inputData: {
|
|
52
|
+
value: 'initial data',
|
|
53
|
+
},
|
|
54
|
+
})
|
|
55
|
+
|
|
56
|
+
// Iterate over stream events (you can iterate over stream directly or use stream.fullStream)
|
|
57
|
+
for await (const chunk of stream) {
|
|
58
|
+
console.log(chunk)
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// Access the final result
|
|
62
|
+
const result = await stream.result
|
|
63
|
+
console.log('Final result:', result)
|
|
64
|
+
|
|
65
|
+
// Access token usage
|
|
66
|
+
const usage = await stream.usage
|
|
67
|
+
console.log('Token usage:', usage)
|
|
68
|
+
|
|
69
|
+
// Check current status
|
|
70
|
+
console.log('Status:', stream.status)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Stream Events
|
|
74
|
+
|
|
75
|
+
The stream emits various event types during workflow execution. Each event has a `type` field and a `payload` containing relevant data:
|
|
76
|
+
|
|
77
|
+
- **`workflow-start`**: Workflow execution begins
|
|
78
|
+
- **`workflow-step-start`**: A step begins execution
|
|
79
|
+
- **`workflow-step-output`**: Custom output from a step
|
|
80
|
+
- **`workflow-step-progress`**: A foreach step reports per-iteration progress (includes `completedCount`, `totalCount`, `currentIndex`, `iterationStatus`, and optional `iterationOutput`)
|
|
81
|
+
- **`workflow-step-result`**: A step completes with results
|
|
82
|
+
- **`workflow-finish`**: Workflow execution completes with usage statistics
|
|
83
|
+
|
|
84
|
+
## Related
|
|
85
|
+
|
|
86
|
+
- [Workflows overview](https://mastra.ai/docs/workflows/overview)
|
|
87
|
+
- [Workflow.createRun()](https://mastra.ai/reference/workflows/workflow-methods/create-run)
|
|
88
|
+
- [Run.resumeStream()](https://mastra.ai/reference/streaming/workflows/resumeStream)
|