@mastra/mcp-docs-server 1.0.0-beta.3 → 1.0.0-beta.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fauth.md +6 -0
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +370 -170
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +80 -1
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/organized/code-examples/memory-with-processors.md +1 -1
- package/.docs/organized/code-examples/quick-start.md +1 -1
- package/.docs/raw/agents/adding-voice.mdx +55 -9
- package/.docs/raw/agents/guardrails.mdx +19 -20
- package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +6 -5
- package/.docs/raw/agents/networks.mdx +1 -2
- package/.docs/raw/agents/overview.mdx +5 -5
- package/.docs/raw/agents/using-tools.mdx +4 -5
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/deployment/building-mastra.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/index.mdx +1 -1
- package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
- package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
- package/.docs/raw/deployment/overview.mdx +2 -2
- package/.docs/raw/deployment/web-framework.mdx +5 -5
- package/.docs/raw/evals/custom-scorers.mdx +3 -5
- package/.docs/raw/evals/overview.mdx +2 -3
- package/.docs/raw/getting-started/project-structure.mdx +1 -1
- package/.docs/raw/getting-started/start.mdx +72 -0
- package/.docs/raw/getting-started/studio.mdx +1 -1
- package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +105 -11
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
- package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
- package/.docs/raw/guides/{guide → getting-started}/manual-install.mdx +1 -1
- package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
- package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
- package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
- package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
- package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
- package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
- package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
- package/.docs/raw/guides/guide/web-search.mdx +12 -10
- package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
- package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
- package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +29 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +22 -0
- package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
- package/.docs/raw/index.mdx +2 -2
- package/.docs/raw/mcp/overview.mdx +3 -5
- package/.docs/raw/memory/memory-processors.mdx +1 -2
- package/.docs/raw/memory/semantic-recall.mdx +7 -7
- package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
- package/.docs/raw/memory/threads-and-resources.mdx +3 -3
- package/.docs/raw/memory/working-memory.mdx +4 -5
- package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
- package/.docs/raw/observability/overview.mdx +2 -2
- package/.docs/raw/observability/tracing/exporters/otel.mdx +21 -2
- package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
- package/.docs/raw/observability/tracing/overview.mdx +3 -2
- package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
- package/.docs/raw/rag/overview.mdx +3 -2
- package/.docs/raw/rag/retrieval.mdx +20 -32
- package/.docs/raw/reference/agents/agent.mdx +7 -10
- package/.docs/raw/reference/agents/generate.mdx +11 -92
- package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
- package/.docs/raw/reference/agents/getLLM.mdx +1 -1
- package/.docs/raw/reference/agents/network.mdx +5 -88
- package/.docs/raw/reference/cli/mastra.mdx +2 -1
- package/.docs/raw/reference/client-js/agents.mdx +3 -3
- package/.docs/raw/reference/core/getLogger.mdx +1 -1
- package/.docs/raw/reference/core/listLogs.mdx +1 -1
- package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
- package/.docs/raw/reference/core/setLogger.mdx +1 -1
- package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
- package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/evals/bias.mdx +29 -87
- package/.docs/raw/reference/evals/completeness.mdx +31 -90
- package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
- package/.docs/raw/reference/evals/context-precision.mdx +28 -130
- package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
- package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
- package/.docs/raw/reference/evals/hallucination.mdx +28 -103
- package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
- package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
- package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
- package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
- package/.docs/raw/reference/evals/toxicity.mdx +29 -92
- package/.docs/raw/reference/memory/memory-class.mdx +5 -7
- package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
- package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
- package/.docs/raw/reference/processors/language-detector.mdx +1 -1
- package/.docs/raw/reference/processors/moderation-processor.mdx +2 -2
- package/.docs/raw/reference/processors/pii-detector.mdx +2 -2
- package/.docs/raw/reference/processors/prompt-injection-detector.mdx +1 -1
- package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -3
- package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
- package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
- package/.docs/raw/reference/rag/embeddings.mdx +5 -5
- package/.docs/raw/reference/rag/rerank.mdx +1 -2
- package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
- package/.docs/raw/reference/streaming/agents/stream.mdx +11 -93
- package/.docs/raw/reference/templates/overview.mdx +1 -4
- package/.docs/raw/reference/tools/client.mdx +1 -2
- package/.docs/raw/reference/tools/create-tool.mdx +132 -0
- package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
- package/.docs/raw/reference/tools/mcp-client.mdx +2 -4
- package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
- package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
- package/.docs/raw/reference/vectors/chroma.mdx +81 -1
- package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
- package/.docs/raw/reference/vectors/lance.mdx +38 -22
- package/.docs/raw/reference/vectors/libsql.mdx +35 -2
- package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
- package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
- package/.docs/raw/reference/vectors/pg.mdx +43 -36
- package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
- package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
- package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
- package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
- package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
- package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
- package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
- package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
- package/.docs/raw/reference/voice/voice.close.mdx +1 -1
- package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
- package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
- package/.docs/raw/reference/voice/voice.off.mdx +1 -1
- package/.docs/raw/reference/voice/voice.on.mdx +1 -1
- package/.docs/raw/reference/voice/voice.send.mdx +1 -1
- package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
- package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
- package/.docs/raw/server-db/mastra-client.mdx +1 -2
- package/.docs/raw/streaming/overview.mdx +20 -9
- package/.docs/raw/streaming/tool-streaming.mdx +47 -4
- package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
- package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
- package/.docs/raw/voice/overview.mdx +87 -40
- package/.docs/raw/voice/speech-to-speech.mdx +4 -4
- package/.docs/raw/voice/speech-to-text.mdx +1 -2
- package/.docs/raw/voice/text-to-speech.mdx +1 -2
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/.docs/raw/workflows/overview.mdx +1 -1
- package/CHANGELOG.md +17 -0
- package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +7 -7
- package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
- package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
- package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
- package/.docs/raw/getting-started/quickstart.mdx +0 -27
- package/.docs/raw/getting-started/templates.mdx +0 -73
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
|
@@ -17,7 +17,6 @@ To learn how to integrate voice capabilities into your agents, check out the [Ad
|
|
|
17
17
|
|
|
18
18
|
```typescript
|
|
19
19
|
import { Agent } from "@mastra/core/agent";
|
|
20
|
-
import { openai } from "@ai-sdk/openai";
|
|
21
20
|
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
22
21
|
|
|
23
22
|
// Initialize OpenAI voice for TTS
|
|
@@ -27,7 +26,7 @@ const voiceAgent = new Agent({
|
|
|
27
26
|
name: "Voice Agent",
|
|
28
27
|
instructions:
|
|
29
28
|
"You are a voice assistant that can help users with their tasks.",
|
|
30
|
-
model: openai
|
|
29
|
+
model: "openai/gpt-5.1",
|
|
31
30
|
voice: new OpenAIVoice(),
|
|
32
31
|
});
|
|
33
32
|
```
|
|
@@ -46,7 +45,6 @@ For detailed configuration options and advanced features, check out our [Text-to
|
|
|
46
45
|
|
|
47
46
|
```typescript
|
|
48
47
|
import { Agent } from "@mastra/core/agent";
|
|
49
|
-
import { openai } from "@ai-sdk/openai";
|
|
50
48
|
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
51
49
|
import { playAudio } from "@mastra/node-audio";
|
|
52
50
|
|
|
@@ -55,7 +53,7 @@ const voiceAgent = new Agent({
|
|
|
55
53
|
name: "Voice Agent",
|
|
56
54
|
instructions:
|
|
57
55
|
"You are a voice assistant that can help users with their tasks.",
|
|
58
|
-
model: openai
|
|
56
|
+
model: "openai/gpt-5.1",
|
|
59
57
|
voice: new OpenAIVoice(),
|
|
60
58
|
});
|
|
61
59
|
|
|
@@ -77,7 +75,6 @@ Visit the [OpenAI Voice Reference](/reference/v1/voice/openai) for more informat
|
|
|
77
75
|
|
|
78
76
|
```typescript
|
|
79
77
|
import { Agent } from "@mastra/core/agent";
|
|
80
|
-
import { openai } from "@ai-sdk/openai";
|
|
81
78
|
import { AzureVoice } from "@mastra/voice-azure";
|
|
82
79
|
import { playAudio } from "@mastra/node-audio";
|
|
83
80
|
|
|
@@ -86,7 +83,7 @@ const voiceAgent = new Agent({
|
|
|
86
83
|
name: "Voice Agent",
|
|
87
84
|
instructions:
|
|
88
85
|
"You are a voice assistant that can help users with their tasks.",
|
|
89
|
-
model: openai
|
|
86
|
+
model: "openai/gpt-5.1",
|
|
90
87
|
voice: new AzureVoice(),
|
|
91
88
|
});
|
|
92
89
|
|
|
@@ -107,7 +104,6 @@ Visit the [Azure Voice Reference](/reference/v1/voice/azure) for more informatio
|
|
|
107
104
|
|
|
108
105
|
```typescript
|
|
109
106
|
import { Agent } from "@mastra/core/agent";
|
|
110
|
-
import { openai } from "@ai-sdk/openai";
|
|
111
107
|
import { ElevenLabsVoice } from "@mastra/voice-elevenlabs";
|
|
112
108
|
import { playAudio } from "@mastra/node-audio";
|
|
113
109
|
|
|
@@ -116,7 +112,7 @@ const voiceAgent = new Agent({
|
|
|
116
112
|
name: "Voice Agent",
|
|
117
113
|
instructions:
|
|
118
114
|
"You are a voice assistant that can help users with their tasks.",
|
|
119
|
-
model: openai
|
|
115
|
+
model: "openai/gpt-5.1",
|
|
120
116
|
voice: new ElevenLabsVoice(),
|
|
121
117
|
});
|
|
122
118
|
|
|
@@ -137,7 +133,6 @@ Visit the [ElevenLabs Voice Reference](/reference/v1/voice/elevenlabs) for more
|
|
|
137
133
|
|
|
138
134
|
```typescript
|
|
139
135
|
import { Agent } from "@mastra/core/agent";
|
|
140
|
-
import { openai } from "@ai-sdk/openai";
|
|
141
136
|
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
142
137
|
import { playAudio } from "@mastra/node-audio";
|
|
143
138
|
|
|
@@ -146,7 +141,7 @@ const voiceAgent = new Agent({
|
|
|
146
141
|
name: "Voice Agent",
|
|
147
142
|
instructions:
|
|
148
143
|
"You are a voice assistant that can help users with their tasks.",
|
|
149
|
-
model: openai
|
|
144
|
+
model: "openai/gpt-5.1",
|
|
150
145
|
voice: new PlayAIVoice(),
|
|
151
146
|
});
|
|
152
147
|
|
|
@@ -167,7 +162,6 @@ Visit the [PlayAI Voice Reference](/reference/v1/voice/playai) for more informat
|
|
|
167
162
|
|
|
168
163
|
```typescript
|
|
169
164
|
import { Agent } from "@mastra/core/agent";
|
|
170
|
-
import { openai } from "@ai-sdk/openai";
|
|
171
165
|
import { GoogleVoice } from "@mastra/voice-google";
|
|
172
166
|
import { playAudio } from "@mastra/node-audio";
|
|
173
167
|
|
|
@@ -176,7 +170,7 @@ const voiceAgent = new Agent({
|
|
|
176
170
|
name: "Voice Agent",
|
|
177
171
|
instructions:
|
|
178
172
|
"You are a voice assistant that can help users with their tasks.",
|
|
179
|
-
model: openai
|
|
173
|
+
model: "openai/gpt-5.1",
|
|
180
174
|
voice: new GoogleVoice(),
|
|
181
175
|
});
|
|
182
176
|
|
|
@@ -197,7 +191,6 @@ Visit the [Google Voice Reference](/reference/v1/voice/google) for more informat
|
|
|
197
191
|
|
|
198
192
|
```typescript
|
|
199
193
|
import { Agent } from "@mastra/core/agent";
|
|
200
|
-
import { openai } from "@ai-sdk/openai";
|
|
201
194
|
import { CloudflareVoice } from "@mastra/voice-cloudflare";
|
|
202
195
|
import { playAudio } from "@mastra/node-audio";
|
|
203
196
|
|
|
@@ -206,7 +199,7 @@ const voiceAgent = new Agent({
|
|
|
206
199
|
name: "Voice Agent",
|
|
207
200
|
instructions:
|
|
208
201
|
"You are a voice assistant that can help users with their tasks.",
|
|
209
|
-
model: openai
|
|
202
|
+
model: "openai/gpt-5.1",
|
|
210
203
|
voice: new CloudflareVoice(),
|
|
211
204
|
});
|
|
212
205
|
|
|
@@ -227,7 +220,6 @@ Visit the [Cloudflare Voice Reference](/reference/v1/voice/cloudflare) for more
|
|
|
227
220
|
|
|
228
221
|
```typescript
|
|
229
222
|
import { Agent } from "@mastra/core/agent";
|
|
230
|
-
import { openai } from "@ai-sdk/openai";
|
|
231
223
|
import { DeepgramVoice } from "@mastra/voice-deepgram";
|
|
232
224
|
import { playAudio } from "@mastra/node-audio";
|
|
233
225
|
|
|
@@ -236,7 +228,7 @@ const voiceAgent = new Agent({
|
|
|
236
228
|
name: "Voice Agent",
|
|
237
229
|
instructions:
|
|
238
230
|
"You are a voice assistant that can help users with their tasks.",
|
|
239
|
-
model: openai
|
|
231
|
+
model: "openai/gpt-5.1",
|
|
240
232
|
voice: new DeepgramVoice(),
|
|
241
233
|
});
|
|
242
234
|
|
|
@@ -257,7 +249,6 @@ Visit the [Deepgram Voice Reference](/reference/v1/voice/deepgram) for more info
|
|
|
257
249
|
|
|
258
250
|
```typescript
|
|
259
251
|
import { Agent } from "@mastra/core/agent";
|
|
260
|
-
import { openai } from "@ai-sdk/openai";
|
|
261
252
|
import { SpeechifyVoice } from "@mastra/voice-speechify";
|
|
262
253
|
import { playAudio } from "@mastra/node-audio";
|
|
263
254
|
|
|
@@ -266,7 +257,7 @@ const voiceAgent = new Agent({
|
|
|
266
257
|
name: "Voice Agent",
|
|
267
258
|
instructions:
|
|
268
259
|
"You are a voice assistant that can help users with their tasks.",
|
|
269
|
-
model: openai
|
|
260
|
+
model: "openai/gpt-5.1",
|
|
270
261
|
voice: new SpeechifyVoice(),
|
|
271
262
|
});
|
|
272
263
|
|
|
@@ -287,7 +278,6 @@ Visit the [Speechify Voice Reference](/reference/v1/voice/speechify) for more in
|
|
|
287
278
|
|
|
288
279
|
```typescript
|
|
289
280
|
import { Agent } from "@mastra/core/agent";
|
|
290
|
-
import { openai } from "@ai-sdk/openai";
|
|
291
281
|
import { SarvamVoice } from "@mastra/voice-sarvam";
|
|
292
282
|
import { playAudio } from "@mastra/node-audio";
|
|
293
283
|
|
|
@@ -296,7 +286,7 @@ const voiceAgent = new Agent({
|
|
|
296
286
|
name: "Voice Agent",
|
|
297
287
|
instructions:
|
|
298
288
|
"You are a voice assistant that can help users with their tasks.",
|
|
299
|
-
model: openai
|
|
289
|
+
model: "openai/gpt-5.1",
|
|
300
290
|
voice: new SarvamVoice(),
|
|
301
291
|
});
|
|
302
292
|
|
|
@@ -317,7 +307,6 @@ Visit the [Sarvam Voice Reference](/reference/v1/voice/sarvam) for more informat
|
|
|
317
307
|
|
|
318
308
|
```typescript
|
|
319
309
|
import { Agent } from "@mastra/core/agent";
|
|
320
|
-
import { openai } from "@ai-sdk/openai";
|
|
321
310
|
import { MurfVoice } from "@mastra/voice-murf";
|
|
322
311
|
import { playAudio } from "@mastra/node-audio";
|
|
323
312
|
|
|
@@ -326,7 +315,7 @@ const voiceAgent = new Agent({
|
|
|
326
315
|
name: "Voice Agent",
|
|
327
316
|
instructions:
|
|
328
317
|
"You are a voice assistant that can help users with their tasks.",
|
|
329
|
-
model: openai
|
|
318
|
+
model: "openai/gpt-5.1",
|
|
330
319
|
voice: new MurfVoice(),
|
|
331
320
|
});
|
|
332
321
|
|
|
@@ -359,7 +348,6 @@ You can download a sample audio file from [here](https://github.com/mastra-ai/re
|
|
|
359
348
|
|
|
360
349
|
```typescript
|
|
361
350
|
import { Agent } from "@mastra/core/agent";
|
|
362
|
-
import { openai } from "@ai-sdk/openai";
|
|
363
351
|
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
364
352
|
import { createReadStream } from "fs";
|
|
365
353
|
|
|
@@ -368,7 +356,7 @@ const voiceAgent = new Agent({
|
|
|
368
356
|
name: "Voice Agent",
|
|
369
357
|
instructions:
|
|
370
358
|
"You are a voice assistant that can help users with their tasks.",
|
|
371
|
-
model: openai
|
|
359
|
+
model: "openai/gpt-5.1",
|
|
372
360
|
voice: new OpenAIVoice(),
|
|
373
361
|
});
|
|
374
362
|
|
|
@@ -391,7 +379,6 @@ Visit the [OpenAI Voice Reference](/reference/v1/voice/openai) for more informat
|
|
|
391
379
|
```typescript
|
|
392
380
|
import { createReadStream } from "fs";
|
|
393
381
|
import { Agent } from "@mastra/core/agent";
|
|
394
|
-
import { openai } from "@ai-sdk/openai";
|
|
395
382
|
import { AzureVoice } from "@mastra/voice-azure";
|
|
396
383
|
import { createReadStream } from "fs";
|
|
397
384
|
|
|
@@ -400,7 +387,7 @@ const voiceAgent = new Agent({
|
|
|
400
387
|
name: "Voice Agent",
|
|
401
388
|
instructions:
|
|
402
389
|
"You are a voice assistant that can help users with their tasks.",
|
|
403
|
-
model: openai
|
|
390
|
+
model: "openai/gpt-5.1",
|
|
404
391
|
voice: new AzureVoice(),
|
|
405
392
|
});
|
|
406
393
|
|
|
@@ -422,7 +409,6 @@ Visit the [Azure Voice Reference](/reference/v1/voice/azure) for more informatio
|
|
|
422
409
|
|
|
423
410
|
```typescript
|
|
424
411
|
import { Agent } from "@mastra/core/agent";
|
|
425
|
-
import { openai } from "@ai-sdk/openai";
|
|
426
412
|
import { ElevenLabsVoice } from "@mastra/voice-elevenlabs";
|
|
427
413
|
import { createReadStream } from "fs";
|
|
428
414
|
|
|
@@ -431,7 +417,7 @@ const voiceAgent = new Agent({
|
|
|
431
417
|
name: "Voice Agent",
|
|
432
418
|
instructions:
|
|
433
419
|
"You are a voice assistant that can help users with their tasks.",
|
|
434
|
-
model: openai
|
|
420
|
+
model: "openai/gpt-5.1",
|
|
435
421
|
voice: new ElevenLabsVoice(),
|
|
436
422
|
});
|
|
437
423
|
|
|
@@ -453,7 +439,6 @@ Visit the [ElevenLabs Voice Reference](/reference/v1/voice/elevenlabs) for more
|
|
|
453
439
|
|
|
454
440
|
```typescript
|
|
455
441
|
import { Agent } from "@mastra/core/agent";
|
|
456
|
-
import { openai } from "@ai-sdk/openai";
|
|
457
442
|
import { GoogleVoice } from "@mastra/voice-google";
|
|
458
443
|
import { createReadStream } from "fs";
|
|
459
444
|
|
|
@@ -462,7 +447,7 @@ const voiceAgent = new Agent({
|
|
|
462
447
|
name: "Voice Agent",
|
|
463
448
|
instructions:
|
|
464
449
|
"You are a voice assistant that can help users with their tasks.",
|
|
465
|
-
model: openai
|
|
450
|
+
model: "openai/gpt-5.1",
|
|
466
451
|
voice: new GoogleVoice(),
|
|
467
452
|
});
|
|
468
453
|
|
|
@@ -484,7 +469,6 @@ Visit the [Google Voice Reference](/reference/v1/voice/google) for more informat
|
|
|
484
469
|
|
|
485
470
|
```typescript
|
|
486
471
|
import { Agent } from "@mastra/core/agent";
|
|
487
|
-
import { openai } from "@ai-sdk/openai";
|
|
488
472
|
import { CloudflareVoice } from "@mastra/voice-cloudflare";
|
|
489
473
|
import { createReadStream } from "fs";
|
|
490
474
|
|
|
@@ -493,7 +477,7 @@ const voiceAgent = new Agent({
|
|
|
493
477
|
name: "Voice Agent",
|
|
494
478
|
instructions:
|
|
495
479
|
"You are a voice assistant that can help users with their tasks.",
|
|
496
|
-
model: openai
|
|
480
|
+
model: "openai/gpt-5.1",
|
|
497
481
|
voice: new CloudflareVoice(),
|
|
498
482
|
});
|
|
499
483
|
|
|
@@ -515,7 +499,6 @@ Visit the [Cloudflare Voice Reference](/reference/v1/voice/cloudflare) for more
|
|
|
515
499
|
|
|
516
500
|
```typescript
|
|
517
501
|
import { Agent } from "@mastra/core/agent";
|
|
518
|
-
import { openai } from "@ai-sdk/openai";
|
|
519
502
|
import { DeepgramVoice } from "@mastra/voice-deepgram";
|
|
520
503
|
import { createReadStream } from "fs";
|
|
521
504
|
|
|
@@ -524,7 +507,7 @@ const voiceAgent = new Agent({
|
|
|
524
507
|
name: "Voice Agent",
|
|
525
508
|
instructions:
|
|
526
509
|
"You are a voice assistant that can help users with their tasks.",
|
|
527
|
-
model: openai
|
|
510
|
+
model: "openai/gpt-5.1",
|
|
528
511
|
voice: new DeepgramVoice(),
|
|
529
512
|
});
|
|
530
513
|
|
|
@@ -546,7 +529,6 @@ Visit the [Deepgram Voice Reference](/reference/v1/voice/deepgram) for more info
|
|
|
546
529
|
|
|
547
530
|
```typescript
|
|
548
531
|
import { Agent } from "@mastra/core/agent";
|
|
549
|
-
import { openai } from "@ai-sdk/openai";
|
|
550
532
|
import { SarvamVoice } from "@mastra/voice-sarvam";
|
|
551
533
|
import { createReadStream } from "fs";
|
|
552
534
|
|
|
@@ -555,7 +537,7 @@ const voiceAgent = new Agent({
|
|
|
555
537
|
name: "Voice Agent",
|
|
556
538
|
instructions:
|
|
557
539
|
"You are a voice assistant that can help users with their tasks.",
|
|
558
|
-
model: openai
|
|
540
|
+
model: "openai/gpt-5.1",
|
|
559
541
|
voice: new SarvamVoice(),
|
|
560
542
|
});
|
|
561
543
|
|
|
@@ -585,7 +567,6 @@ For detailed configuration options and advanced features, check out [Speech to S
|
|
|
585
567
|
|
|
586
568
|
```typescript
|
|
587
569
|
import { Agent } from "@mastra/core/agent";
|
|
588
|
-
import { openai } from "@ai-sdk/openai";
|
|
589
570
|
import { playAudio, getMicrophoneStream } from "@mastra/node-audio";
|
|
590
571
|
import { OpenAIRealtimeVoice } from "@mastra/voice-openai-realtime";
|
|
591
572
|
|
|
@@ -594,7 +575,7 @@ const voiceAgent = new Agent({
|
|
|
594
575
|
name: "Voice Agent",
|
|
595
576
|
instructions:
|
|
596
577
|
"You are a voice assistant that can help users with their tasks.",
|
|
597
|
-
model: openai
|
|
578
|
+
model: "openai/gpt-5.1",
|
|
598
579
|
voice: new OpenAIRealtimeVoice(),
|
|
599
580
|
});
|
|
600
581
|
|
|
@@ -618,7 +599,6 @@ Visit the [OpenAI Voice Reference](/reference/v1/voice/openai-realtime) for more
|
|
|
618
599
|
|
|
619
600
|
```typescript
|
|
620
601
|
import { Agent } from "@mastra/core/agent";
|
|
621
|
-
import { openai } from "@ai-sdk/openai";
|
|
622
602
|
import { playAudio, getMicrophoneStream } from "@mastra/node-audio";
|
|
623
603
|
import { GeminiLiveVoice } from "@mastra/voice-google-gemini-live";
|
|
624
604
|
|
|
@@ -627,7 +607,7 @@ const voiceAgent = new Agent({
|
|
|
627
607
|
name: "Voice Agent",
|
|
628
608
|
instructions:
|
|
629
609
|
"You are a voice assistant that can help users with their tasks.",
|
|
630
|
-
model: openai
|
|
610
|
+
model: "openai/gpt-5.1",
|
|
631
611
|
voice: new GeminiLiveVoice({
|
|
632
612
|
// Live API mode
|
|
633
613
|
apiKey: process.env.GOOGLE_API_KEY,
|
|
@@ -918,6 +898,30 @@ const voice = new GeminiLiveVoice({
|
|
|
918
898
|
Visit the [Google Gemini Live Reference](/reference/v1/voice/google-gemini-live) for more information on the Google Gemini Live voice provider.
|
|
919
899
|
|
|
920
900
|
</TabItem>
|
|
901
|
+
<TabItem value="aisdk" label="AI SDK">
|
|
902
|
+
|
|
903
|
+
```typescript
|
|
904
|
+
// AI SDK Voice Configuration
|
|
905
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
906
|
+
import { openai } from "@ai-sdk/openai";
|
|
907
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
908
|
+
|
|
909
|
+
// Use AI SDK models directly - no need to install separate packages
|
|
910
|
+
const voice = new CompositeVoice({
|
|
911
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription
|
|
912
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech
|
|
913
|
+
});
|
|
914
|
+
|
|
915
|
+
// Works seamlessly with your agent
|
|
916
|
+
const voiceAgent = new Agent({
|
|
917
|
+
id: "aisdk-voice-agent",
|
|
918
|
+
name: "AI SDK Voice Agent",
|
|
919
|
+
instructions: "You are a helpful assistant with voice capabilities.",
|
|
920
|
+
model: openai("gpt-5.1"),
|
|
921
|
+
voice,
|
|
922
|
+
});
|
|
923
|
+
```
|
|
924
|
+
</TabItem>
|
|
921
925
|
</Tabs>
|
|
922
926
|
|
|
923
927
|
### Using Multiple Voice Providers
|
|
@@ -971,6 +975,49 @@ const responseAudio = await voice.speak(`You said: ${transcript}`, {
|
|
|
971
975
|
playAudio(responseAudio);
|
|
972
976
|
```
|
|
973
977
|
|
|
978
|
+
### Using AI SDK Model Providers
|
|
979
|
+
|
|
980
|
+
You can also use AI SDK models directly with `CompositeVoice`:
|
|
981
|
+
|
|
982
|
+
```typescript
|
|
983
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
984
|
+
import { openai } from "@ai-sdk/openai";
|
|
985
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
986
|
+
import { playAudio, getMicrophoneStream } from "@mastra/node-audio";
|
|
987
|
+
|
|
988
|
+
// Use AI SDK models directly - no provider setup needed
|
|
989
|
+
const voice = new CompositeVoice({
|
|
990
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription
|
|
991
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech
|
|
992
|
+
});
|
|
993
|
+
|
|
994
|
+
// Works the same way as Mastra providers
|
|
995
|
+
const audioStream = getMicrophoneStream();
|
|
996
|
+
const transcript = await voice.listen(audioStream);
|
|
997
|
+
|
|
998
|
+
console.log("Transcribed text:", transcript);
|
|
999
|
+
|
|
1000
|
+
// Convert text to speech
|
|
1001
|
+
const responseAudio = await voice.speak(`You said: ${transcript}`, {
|
|
1002
|
+
speaker: "Rachel", // ElevenLabs voice
|
|
1003
|
+
});
|
|
1004
|
+
|
|
1005
|
+
playAudio(responseAudio);
|
|
1006
|
+
```
|
|
1007
|
+
|
|
1008
|
+
You can also mix AI SDK models with Mastra providers:
|
|
1009
|
+
|
|
1010
|
+
```typescript
|
|
1011
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
1012
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
1013
|
+
import { groq } from "@ai-sdk/groq";
|
|
1014
|
+
|
|
1015
|
+
const voice = new CompositeVoice({
|
|
1016
|
+
input: groq.transcription('whisper-large-v3'), // AI SDK for STT
|
|
1017
|
+
output: new PlayAIVoice(), // Mastra provider for TTS
|
|
1018
|
+
});
|
|
1019
|
+
```
|
|
1020
|
+
|
|
974
1021
|
For more information on the CompositeVoice, refer to the [CompositeVoice Reference](/reference/v1/voice/composite-voice).
|
|
975
1022
|
|
|
976
1023
|
## More Resources
|
|
@@ -13,13 +13,13 @@ STS enables continuous bidirectional audio communication through listening to ev
|
|
|
13
13
|
## Configuration
|
|
14
14
|
|
|
15
15
|
- **`apiKey`**: Your OpenAI API key. Falls back to the `OPENAI_API_KEY` environment variable.
|
|
16
|
-
- **`model`**: The model ID to use for real-time voice interactions (e.g., `gpt-
|
|
16
|
+
- **`model`**: The model ID to use for real-time voice interactions (e.g., `gpt-5.1-realtime`).
|
|
17
17
|
- **`speaker`**: The default voice ID for speech synthesis. This allows you to specify which voice to use for the speech output.
|
|
18
18
|
|
|
19
19
|
```typescript
|
|
20
20
|
const voice = new OpenAIRealtimeVoice({
|
|
21
21
|
apiKey: "your-openai-api-key",
|
|
22
|
-
model: "gpt-
|
|
22
|
+
model: "gpt-5.1-realtime",
|
|
23
23
|
speaker: "alloy", // Default voice
|
|
24
24
|
});
|
|
25
25
|
|
|
@@ -38,7 +38,7 @@ const agent = new Agent({
|
|
|
38
38
|
id: "agent",
|
|
39
39
|
name: "OpenAI Realtime Agent",
|
|
40
40
|
instructions: `You are a helpful assistant with real-time voice capabilities.`,
|
|
41
|
-
model: openai
|
|
41
|
+
model: "openai/gpt-5.1",
|
|
42
42
|
voice: new OpenAIRealtimeVoice(),
|
|
43
43
|
});
|
|
44
44
|
|
|
@@ -73,7 +73,7 @@ const agent = new Agent({
|
|
|
73
73
|
instructions:
|
|
74
74
|
"You are a helpful assistant with real-time voice capabilities.",
|
|
75
75
|
// Model used for text generation; voice provider handles realtime audio
|
|
76
|
-
model: openai
|
|
76
|
+
model: "openai/gpt-5.1",
|
|
77
77
|
voice: new GeminiLiveVoice({
|
|
78
78
|
apiKey: process.env.GOOGLE_API_KEY,
|
|
79
79
|
model: "gemini-2.0-flash-exp",
|
|
@@ -54,7 +54,6 @@ The primary method for STT is the `listen()` method, which converts spoken audio
|
|
|
54
54
|
|
|
55
55
|
```typescript
|
|
56
56
|
import { Agent } from "@mastra/core/agent";
|
|
57
|
-
import { openai } from "@ai-sdk/openai";
|
|
58
57
|
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
59
58
|
import { getMicrophoneStream } from "@mastra/node-audio";
|
|
60
59
|
|
|
@@ -65,7 +64,7 @@ const agent = new Agent({
|
|
|
65
64
|
name: "Voice Agent",
|
|
66
65
|
instructions:
|
|
67
66
|
"You are a voice assistant that provides recommendations based on user input.",
|
|
68
|
-
model: openai
|
|
67
|
+
model: "openai/gpt-5.1",
|
|
69
68
|
voice,
|
|
70
69
|
});
|
|
71
70
|
|
|
@@ -62,7 +62,6 @@ The primary method for TTS is the `speak()` method, which converts text to speec
|
|
|
62
62
|
|
|
63
63
|
```typescript
|
|
64
64
|
import { Agent } from "@mastra/core/agent";
|
|
65
|
-
import { openai } from "@ai-sdk/openai";
|
|
66
65
|
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
67
66
|
|
|
68
67
|
const voice = new OpenAIVoice();
|
|
@@ -72,7 +71,7 @@ const agent = new Agent({
|
|
|
72
71
|
name: "Voice Agent",
|
|
73
72
|
instructions:
|
|
74
73
|
"You are a voice assistant that can help users with their tasks.",
|
|
75
|
-
model: openai
|
|
74
|
+
model: "openai/gpt-5.1",
|
|
76
75
|
voice,
|
|
77
76
|
});
|
|
78
77
|
|
|
@@ -112,6 +112,70 @@ export const testWorkflow = createWorkflow({
|
|
|
112
112
|
|
|
113
113
|
> 📹 Watch: How to run steps in parallel and optimize your Mastra workflow → [YouTube (3 minutes)](https://youtu.be/GQJxve5Hki4)
|
|
114
114
|
|
|
115
|
+
### Output structure
|
|
116
|
+
|
|
117
|
+
When steps run in parallel, the output is an object where each key is the step's `id` and the value is that step's output. This allows you to access each parallel step's result independently.
|
|
118
|
+
|
|
119
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
120
|
+
const step1 = createStep({
|
|
121
|
+
id: "format-step",
|
|
122
|
+
inputSchema: z.object({ message: z.string() }),
|
|
123
|
+
outputSchema: z.object({ formatted: z.string() }),
|
|
124
|
+
execute: async ({ inputData }) => ({
|
|
125
|
+
formatted: inputData.message.toUpperCase()
|
|
126
|
+
})
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
const step2 = createStep({
|
|
130
|
+
id: "count-step",
|
|
131
|
+
inputSchema: z.object({ message: z.string() }),
|
|
132
|
+
outputSchema: z.object({ count: z.number() }),
|
|
133
|
+
execute: async ({ inputData }) => ({
|
|
134
|
+
count: inputData.message.length
|
|
135
|
+
})
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
const step3 = createStep({
|
|
139
|
+
id: "combine-step",
|
|
140
|
+
// The inputSchema must match the structure of parallel outputs
|
|
141
|
+
inputSchema: z.object({
|
|
142
|
+
"format-step": z.object({ formatted: z.string() }),
|
|
143
|
+
"count-step": z.object({ count: z.number() })
|
|
144
|
+
}),
|
|
145
|
+
outputSchema: z.object({ result: z.string() }),
|
|
146
|
+
execute: async ({ inputData }) => {
|
|
147
|
+
// Access each parallel step's output by its id
|
|
148
|
+
const formatted = inputData["format-step"].formatted;
|
|
149
|
+
const count = inputData["count-step"].count;
|
|
150
|
+
return {
|
|
151
|
+
result: `${formatted} (${count} characters)`
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
export const testWorkflow = createWorkflow({
|
|
157
|
+
id: "parallel-output-example",
|
|
158
|
+
inputSchema: z.object({ message: z.string() }),
|
|
159
|
+
outputSchema: z.object({ result: z.string() })
|
|
160
|
+
})
|
|
161
|
+
.parallel([step1, step2])
|
|
162
|
+
.then(step3)
|
|
163
|
+
.commit();
|
|
164
|
+
|
|
165
|
+
// When executed with { message: "hello" }
|
|
166
|
+
// The parallel output structure will be:
|
|
167
|
+
// {
|
|
168
|
+
// "format-step": { formatted: "HELLO" },
|
|
169
|
+
// "count-step": { count: 5 }
|
|
170
|
+
// }
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
**Key points:**
|
|
174
|
+
- Each parallel step's output is keyed by its `id`
|
|
175
|
+
- All parallel steps execute simultaneously
|
|
176
|
+
- The next step receives an object containing all parallel step outputs
|
|
177
|
+
- You must define the `inputSchema` of the following step to match this structure
|
|
178
|
+
|
|
115
179
|
## Conditional logic with `.branch()`
|
|
116
180
|
|
|
117
181
|
Use `.branch()` to choose which step to run based on a condition. All steps in a branch need the same `inputSchema` and `outputSchema` because branching requires consistent schemas so workflows can follow different paths.
|
|
@@ -158,6 +222,85 @@ export const testWorkflow = createWorkflow({
|
|
|
158
222
|
.commit();
|
|
159
223
|
```
|
|
160
224
|
|
|
225
|
+
### Output structure
|
|
226
|
+
|
|
227
|
+
When using conditional branching, only one branch executes based on which condition evaluates to `true` first. The output structure is similar to `.parallel()`, where the result is keyed by the executed step's `id`.
|
|
228
|
+
|
|
229
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
230
|
+
const step1 = createStep({
|
|
231
|
+
id: "initial-step",
|
|
232
|
+
inputSchema: z.object({ value: z.number() }),
|
|
233
|
+
outputSchema: z.object({ value: z.number() }),
|
|
234
|
+
execute: async ({ inputData }) => inputData
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
const highValueStep = createStep({
|
|
238
|
+
id: "high-value-step",
|
|
239
|
+
inputSchema: z.object({ value: z.number() }),
|
|
240
|
+
outputSchema: z.object({ result: z.string() }),
|
|
241
|
+
execute: async ({ inputData }) => ({
|
|
242
|
+
result: `High value: ${inputData.value}`
|
|
243
|
+
})
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
const lowValueStep = createStep({
|
|
247
|
+
id: "low-value-step",
|
|
248
|
+
inputSchema: z.object({ value: z.number() }),
|
|
249
|
+
outputSchema: z.object({ result: z.string() }),
|
|
250
|
+
execute: async ({ inputData }) => ({
|
|
251
|
+
result: `Low value: ${inputData.value}`
|
|
252
|
+
})
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
const finalStep = createStep({
|
|
256
|
+
id: "final-step",
|
|
257
|
+
// The inputSchema must account for either branch's output
|
|
258
|
+
inputSchema: z.object({
|
|
259
|
+
"high-value-step": z.object({ result: z.string() }).optional(),
|
|
260
|
+
"low-value-step": z.object({ result: z.string() }).optional()
|
|
261
|
+
}),
|
|
262
|
+
outputSchema: z.object({ message: z.string() }),
|
|
263
|
+
execute: async ({ inputData }) => {
|
|
264
|
+
// Only one branch will have executed
|
|
265
|
+
const result = inputData["high-value-step"]?.result ||
|
|
266
|
+
inputData["low-value-step"]?.result;
|
|
267
|
+
return { message: result };
|
|
268
|
+
}
|
|
269
|
+
});
|
|
270
|
+
|
|
271
|
+
export const testWorkflow = createWorkflow({
|
|
272
|
+
id: "branch-output-example",
|
|
273
|
+
inputSchema: z.object({ value: z.number() }),
|
|
274
|
+
outputSchema: z.object({ message: z.string() })
|
|
275
|
+
})
|
|
276
|
+
.then(step1)
|
|
277
|
+
.branch([
|
|
278
|
+
[async ({ inputData }) => inputData.value > 10, highValueStep],
|
|
279
|
+
[async ({ inputData }) => inputData.value <= 10, lowValueStep]
|
|
280
|
+
])
|
|
281
|
+
.then(finalStep)
|
|
282
|
+
.commit();
|
|
283
|
+
|
|
284
|
+
// When executed with { value: 15 }
|
|
285
|
+
// Only the high-value-step executes, output structure:
|
|
286
|
+
// {
|
|
287
|
+
// "high-value-step": { result: "High value: 15" }
|
|
288
|
+
// }
|
|
289
|
+
|
|
290
|
+
// When executed with { value: 5 }
|
|
291
|
+
// Only the low-value-step executes, output structure:
|
|
292
|
+
// {
|
|
293
|
+
// "low-value-step": { result: "Low value: 5" }
|
|
294
|
+
// }
|
|
295
|
+
```
|
|
296
|
+
|
|
297
|
+
**Key points:**
|
|
298
|
+
- Only one branch executes based on condition evaluation order
|
|
299
|
+
- The output is keyed by the executed step's `id`
|
|
300
|
+
- Subsequent steps should handle all possible branch outputs
|
|
301
|
+
- Use optional fields in the `inputSchema` when the next step needs to handle multiple possible branches
|
|
302
|
+
- Conditions are evaluated in the order they're defined
|
|
303
|
+
|
|
161
304
|
## Input data mapping
|
|
162
305
|
|
|
163
306
|
When using `.then()`, `.parallel()`, or `.branch()`, it is sometimes necessary to transform the output of a previous step to match the input of the next. In these cases you can use `.map()` to access the `inputData` and transform it to create a suitable data shape for the next step.
|
|
@@ -188,6 +331,43 @@ The `.map()` method provides additional helper functions for more complex mappin
|
|
|
188
331
|
- [`getInitData()`](/reference/v1/workflows/workflow-methods/map#using-getinitdata): Access the workflow's initial input data
|
|
189
332
|
- [`mapVariable()`](/reference/v1/workflows/workflow-methods/map#using-mapvariable): Use declarative object syntax to extract and rename fields
|
|
190
333
|
|
|
334
|
+
### Parallel and Branch outputs
|
|
335
|
+
|
|
336
|
+
When working with `.parallel()` or `.branch()` outputs, you can use `.map()` to transform the data structure before passing it to the next step. This is especially useful when you need to flatten or restructure the output.
|
|
337
|
+
|
|
338
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
339
|
+
export const testWorkflow = createWorkflow({...})
|
|
340
|
+
.parallel([step1, step2])
|
|
341
|
+
.map(async ({ inputData }) => {
|
|
342
|
+
// Transform the parallel output structure
|
|
343
|
+
return {
|
|
344
|
+
combined: `${inputData["step1"].value} - ${inputData["step2"].value}`
|
|
345
|
+
};
|
|
346
|
+
})
|
|
347
|
+
.then(nextStep)
|
|
348
|
+
.commit();
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
You can also use the helper functions provided by `.map()`:
|
|
352
|
+
|
|
353
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
354
|
+
export const testWorkflow = createWorkflow({...})
|
|
355
|
+
.branch([
|
|
356
|
+
[condition1, stepA],
|
|
357
|
+
[condition2, stepB]
|
|
358
|
+
])
|
|
359
|
+
.map(async ({ inputData, getStepResult }) => {
|
|
360
|
+
// Access specific step results
|
|
361
|
+
const stepAResult = getStepResult("stepA");
|
|
362
|
+
const stepBResult = getStepResult("stepB");
|
|
363
|
+
|
|
364
|
+
// Return the result from whichever branch executed
|
|
365
|
+
return stepAResult || stepBResult;
|
|
366
|
+
})
|
|
367
|
+
.then(nextStep)
|
|
368
|
+
.commit();
|
|
369
|
+
```
|
|
370
|
+
|
|
191
371
|
## Looping steps
|
|
192
372
|
|
|
193
373
|
Workflows support different looping methods that let you repeat steps until or while a condition is met, or iterate over arrays. Loops can be combined with other control methods like `.then()`.
|