@mastra/mcp-docs-server 0.0.3 → 0.0.4-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +27 -27
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +27 -27
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +29 -29
- package/.docs/organized/changelogs/%40mastra%2Fcomposio.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +36 -36
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +35 -35
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +35 -35
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +32 -32
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +27 -27
- package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +29 -29
- package/.docs/organized/changelogs/%40mastra%2Fgithub.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +26 -0
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +27 -27
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +35 -35
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +27 -27
- package/.docs/organized/changelogs/%40mastra%2Frag.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fragie.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-azure.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-deepgram.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-elevenlabs.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-google.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-ibm.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-murf.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-openai.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-playai.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-replicate.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fspeech-speechify.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fstabilityai.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +25 -0
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +31 -31
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +30 -30
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +25 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +27 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +26 -26
- package/.docs/organized/changelogs/create-mastra.md +22 -22
- package/.docs/organized/changelogs/mastra.md +47 -47
- package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -1
- package/.docs/raw/agents/02-adding-tools.mdx +6 -0
- package/.docs/raw/agents/02a-mcp-guide.mdx +192 -0
- package/.docs/raw/agents/03-adding-voice.mdx +8 -8
- package/.docs/raw/deployment/deployment.mdx +5 -42
- package/.docs/raw/deployment/server.mdx +45 -3
- package/.docs/raw/evals/00-overview.mdx +2 -2
- package/.docs/raw/evals/03-running-in-ci.mdx +7 -4
- package/.docs/raw/getting-started/mcp-docs-server.mdx +5 -2
- package/.docs/raw/guides/04-research-assistant.mdx +273 -0
- package/.docs/raw/local-dev/mastra-dev.mdx +2 -2
- package/.docs/raw/observability/logging.mdx +38 -0
- package/.docs/raw/observability/nextjs-tracing.mdx +102 -0
- package/.docs/raw/observability/tracing.mdx +110 -0
- package/.docs/raw/rag/overview.mdx +3 -3
- package/.docs/raw/rag/retrieval.mdx +7 -4
- package/.docs/raw/rag/vector-databases.mdx +107 -40
- package/.docs/raw/reference/client-js/memory.mdx +6 -3
- package/.docs/raw/reference/client-js/workflows.mdx +1 -0
- package/.docs/raw/reference/observability/providers/langsmith.mdx +2 -0
- package/.docs/raw/reference/rag/libsql.mdx +3 -3
- package/.docs/raw/reference/rag/upstash.mdx +50 -1
- package/.docs/raw/reference/rag/vectorize.mdx +48 -3
- package/.docs/raw/reference/tools/client.mdx +10 -2
- package/.docs/raw/reference/tools/vector-query-tool.mdx +1 -1
- package/.docs/raw/reference/voice/sarvam.mdx +260 -0
- package/.docs/raw/reference/workflows/afterEvent.mdx +76 -0
- package/.docs/raw/reference/workflows/events.mdx +305 -0
- package/.docs/raw/reference/workflows/resumeWithEvent.mdx +134 -0
- package/.docs/raw/reference/workflows/snapshots.mdx +204 -0
- package/.docs/raw/reference/workflows/step-retries.mdx +203 -0
- package/.docs/raw/voice/overview.mdx +135 -0
- package/.docs/raw/voice/speech-to-text.mdx +45 -0
- package/.docs/raw/voice/text-to-speech.mdx +52 -0
- package/.docs/raw/voice/voice-to-voice.mdx +310 -0
- package/.docs/raw/workflows/dynamic-workflows.mdx +4 -0
- package/.docs/raw/workflows/error-handling.mdx +183 -0
- package/.docs/raw/workflows/steps.mdx +12 -2
- package/.docs/raw/workflows/suspend-and-resume.mdx +207 -2
- package/.docs/raw/workflows/variables.mdx +23 -3
- package/dist/_tsup-dts-rollup.d.ts +83 -0
- package/dist/chunk-YEOOTUPA.js +191 -0
- package/dist/prepare-docs/prepare.d.ts +1 -1
- package/dist/prepare-docs/prepare.js +1 -13
- package/dist/stdio.d.ts +0 -1
- package/dist/stdio.js +352 -5
- package/package.json +9 -15
- package/.docs/raw/deployment/logging-and-tracing.mdx +0 -242
- package/dist/index.d.ts +0 -3
- package/dist/index.js +0 -19
- package/dist/prepare-docs/code-examples.d.ts +0 -4
- package/dist/prepare-docs/code-examples.js +0 -91
- package/dist/prepare-docs/copy-raw.d.ts +0 -1
- package/dist/prepare-docs/copy-raw.js +0 -41
- package/dist/prepare-docs/index.d.ts +0 -1
- package/dist/prepare-docs/index.js +0 -8
- package/dist/prepare-docs/package-changes.d.ts +0 -4
- package/dist/prepare-docs/package-changes.js +0 -92
- package/dist/sse.d.ts +0 -1
- package/dist/sse.js +0 -9
- package/dist/tools/__tests__/blog.test.d.ts +0 -1
- package/dist/tools/__tests__/blog.test.js +0 -48
- package/dist/tools/__tests__/changes.test.d.ts +0 -1
- package/dist/tools/__tests__/changes.test.js +0 -37
- package/dist/tools/__tests__/docs.test.d.ts +0 -1
- package/dist/tools/__tests__/docs.test.js +0 -46
- package/dist/tools/__tests__/examples.test.d.ts +0 -1
- package/dist/tools/__tests__/examples.test.js +0 -53
- package/dist/tools/blog.d.ts +0 -15
- package/dist/tools/blog.js +0 -73
- package/dist/tools/changes.d.ts +0 -11
- package/dist/tools/changes.js +0 -69
- package/dist/tools/docs.d.ts +0 -11
- package/dist/tools/docs.js +0 -176
- package/dist/tools/examples.d.ts +0 -11
- package/dist/tools/examples.js +0 -61
- package/dist/utils.d.ts +0 -6
- package/dist/utils.js +0 -9
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Speech-to-Text (STT) in Mastra | Mastra Docs
|
|
3
|
+
description: Overview of Speech-to-Text capabilities in Mastra, including configuration, usage, and integration with voice providers.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Speech-to-Text (STT)
|
|
7
|
+
|
|
8
|
+
Speech-to-Text (STT) in Mastra provides a standardized interface for converting audio input into text across multiple service providers. This section covers STT configuration and usage. Check out the [Adding Voice to Agents](../agents/03-adding-voice.mdx) documentation to learn how to use STT in an agent.
|
|
9
|
+
|
|
10
|
+
## Speech Configuration
|
|
11
|
+
|
|
12
|
+
To use STT in Mastra, you need to provide a `listeningModel` configuration when initializing the voice provider. This configuration includes parameters such as:
|
|
13
|
+
|
|
14
|
+
- **`name`**: The specific STT model to use.
|
|
15
|
+
- **`apiKey`**: Your API key for authentication.
|
|
16
|
+
- **Provider-specific options**: Additional options that may be required or supported by the specific voice provider.
|
|
17
|
+
|
|
18
|
+
**Note**: All of these parameters are optional. You can use the default settings provided by the voice provider, which will depend on the specific provider you are using.
|
|
19
|
+
|
|
20
|
+
### Example Configuration
|
|
21
|
+
|
|
22
|
+
```typescript
|
|
23
|
+
const voice = new OpenAIVoice({
|
|
24
|
+
listeningModel: {
|
|
25
|
+
name: "whisper-1",
|
|
26
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
27
|
+
},
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
// If using default settings the configuration can be simplified to:
|
|
31
|
+
const voice = new OpenAIVoice();
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Using the Listen Method
|
|
35
|
+
|
|
36
|
+
The primary method for STT is the `listen()` method, which converts spoken audio into text. Here's how to use it:
|
|
37
|
+
|
|
38
|
+
```typescript
|
|
39
|
+
const audioStream = getMicrophoneStream(); // Assume this function gets audio input
|
|
40
|
+
const transcript = await voice.listen(audioStream, {
|
|
41
|
+
filetype: "m4a", // Optional: specify the audio file type
|
|
42
|
+
});
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
**Note**: If you are using a voice-to-voice provider, such as `OpenAIRealtimeVoice`, the `listen()` method will emit a "writing" event instead of returning a transcript directly.
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Text-to-Speech (TTS) in Mastra | Mastra Docs
|
|
3
|
+
description: Overview of Text-to-Speech capabilities in Mastra, including configuration, usage, and integration with voice providers.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Text-to-Speech (TTS)
|
|
7
|
+
|
|
8
|
+
Text-to-Speech (TTS) in Mastra offers a unified API for synthesizing spoken audio from text using various provider services. This section explains TTS configuration options and implementation methods. For integrating TTS capabilities with agents, refer to the [Adding Voice to Agents](../agents/03-adding-voice.mdx) documentation.
|
|
9
|
+
|
|
10
|
+
## Speech Configuration
|
|
11
|
+
|
|
12
|
+
To use TTS in Mastra, you need to provide a `speechModel` configuration when initializing the voice provider. This configuration includes parameters such as:
|
|
13
|
+
|
|
14
|
+
- **`name`**: The specific TTS model to use.
|
|
15
|
+
- **`apiKey`**: Your API key for authentication.
|
|
16
|
+
- **Provider-specific options**: Additional options that may be required or supported by the specific voice provider.
|
|
17
|
+
|
|
18
|
+
The **`speaker`** option is specified separately and allows you to select different voices for speech synthesis.
|
|
19
|
+
|
|
20
|
+
**Note**: All of these parameters are optional. You can use the default settings provided by the voice provider, which will depend on the specific provider you are using.
|
|
21
|
+
|
|
22
|
+
### Example Configuration
|
|
23
|
+
|
|
24
|
+
```typescript
|
|
25
|
+
const voice = new OpenAIVoice({
|
|
26
|
+
speechModel: {
|
|
27
|
+
name: "tts-1-hd",
|
|
28
|
+
apiKey: process.env.OPENAI_API_KEY
|
|
29
|
+
},
|
|
30
|
+
speaker: "alloy",
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
// If using default settings the configuration can be simplified to:
|
|
34
|
+
const voice = new OpenAIVoice();
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Using the Speak Method
|
|
38
|
+
|
|
39
|
+
The primary method for TTS is the `speak()` method, which converts text to speech. This method can accept options that allows you to specify the speaker and other provider-specific options. Here's how to use it:
|
|
40
|
+
|
|
41
|
+
```typescript
|
|
42
|
+
const readableStream = await voice.speak("Hello, world!", {
|
|
43
|
+
speaker: "default", // Optional: specify a speaker
|
|
44
|
+
properties: {
|
|
45
|
+
speed: 1.0, // Optional: adjust speech speed
|
|
46
|
+
pitch: "default", // Optional: specify pitch if supported
|
|
47
|
+
},
|
|
48
|
+
});
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
**Note**: If you are using a voice-to-voice provider, such as `OpenAIRealtimeVoice`, the `speak()` method will emit a "speaking" event instead of returning an Readable Stream.
|
|
52
|
+
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Voice-to-Voice Capabilities in Mastra | Mastra Docs
|
|
3
|
+
description: Overview of voice-to-voice capabilities in Mastra, including real-time interactions and event-driven architecture.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Voice-to-Voice Capabilities in Mastra
|
|
7
|
+
|
|
8
|
+
## Introduction
|
|
9
|
+
|
|
10
|
+
Voice-to-Voice in Mastra provides a standardized interface for real-time speech-to-speech interactions across multiple service providers. This section covers configuration, event-driven architecture, and implementation methods for creating conversational voice experiences. For integrating Voice-to-Voice capabilities with agents, refer to the [Adding Voice to Agents](../agents/03-adding-voice.mdx) documentation.
|
|
11
|
+
|
|
12
|
+
## Real-time Voice Interactions
|
|
13
|
+
|
|
14
|
+
Mastra's real-time voice system enables continuous bidirectional audio communication through an event-driven architecture. Unlike separate TTS and STT operations, real-time voice maintains an open connection that processes speech continuously in both directions.
|
|
15
|
+
|
|
16
|
+
### Example Implementation
|
|
17
|
+
|
|
18
|
+
```typescript
|
|
19
|
+
import { Agent } from "@mastra/core/agent";
|
|
20
|
+
import { OpenAIRealtimeVoice } from "@mastra/voice-openai-realtime";
|
|
21
|
+
|
|
22
|
+
const agent = new Agent({
|
|
23
|
+
name: 'Agent',
|
|
24
|
+
instructions: `You are a helpful assistant with real-time voice capabilities.`,
|
|
25
|
+
model: openai('gpt-4o'),
|
|
26
|
+
voice: new OpenAIRealtimeVoice(),
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
// Connect to the voice service
|
|
30
|
+
await agent.voice.connect();
|
|
31
|
+
|
|
32
|
+
// Listen for agent audio responses
|
|
33
|
+
agent.voice.on('speaking', ({ audio }) => {
|
|
34
|
+
playAudio(audio);
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
// Initiate the conversation
|
|
38
|
+
await agent.voice.speak('How can I help you today?');
|
|
39
|
+
|
|
40
|
+
// Send continuous audio from the microphone
|
|
41
|
+
const micStream = getMicrophoneStream();
|
|
42
|
+
await agent.voice.send(micStream);
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Event-Driven Architecture
|
|
46
|
+
|
|
47
|
+
Mastra's voice-to-voice implementation is built on an event-driven architecture. Developers register event listeners to handle incoming audio progressively, allowing for more responsive interactions than waiting for complete audio responses.
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
## Configuration
|
|
51
|
+
|
|
52
|
+
When initializing a voice-to-voice provider, you can provide configuration options to customize its behavior:
|
|
53
|
+
|
|
54
|
+
### Constructor Options
|
|
55
|
+
|
|
56
|
+
- **`chatModel`**: Configuration for the OpenAI realtime model.
|
|
57
|
+
- **`apiKey`**: Your OpenAI API key. Falls back to the `OPENAI_API_KEY` environment variable.
|
|
58
|
+
- **`model`**: The model ID to use for real-time voice interactions (e.g., `gpt-4o-mini-realtime`).
|
|
59
|
+
- **`options`**: Additional options for the realtime client, such as session configuration.
|
|
60
|
+
|
|
61
|
+
- **`speaker`**: The default voice ID for speech synthesis. This allows you to specify which voice to use for the speech output.
|
|
62
|
+
|
|
63
|
+
### Example Configuration
|
|
64
|
+
|
|
65
|
+
```typescript
|
|
66
|
+
const voice = new OpenAIRealtimeVoice({
|
|
67
|
+
chatModel: {
|
|
68
|
+
apiKey: 'your-openai-api-key',
|
|
69
|
+
model: 'gpt-4o-mini-realtime',
|
|
70
|
+
options: {
|
|
71
|
+
sessionConfig: {
|
|
72
|
+
turn_detection: {
|
|
73
|
+
type: 'server_vad',
|
|
74
|
+
threshold: 0.6,
|
|
75
|
+
silence_duration_ms: 1200,
|
|
76
|
+
},
|
|
77
|
+
},
|
|
78
|
+
},
|
|
79
|
+
},
|
|
80
|
+
speaker: 'alloy', // Default voice
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
// If using default settings the configuration can be simplified to:
|
|
84
|
+
const voice = new OpenAIRealtimeVoice();
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Core Methods
|
|
88
|
+
|
|
89
|
+
The `OpenAIRealtimeVoice` class provides the following core methods for voice interactions:
|
|
90
|
+
|
|
91
|
+
### connect()
|
|
92
|
+
|
|
93
|
+
Establishes a connection to the OpenAI realtime service.
|
|
94
|
+
|
|
95
|
+
**Usage:**
|
|
96
|
+
```typescript
|
|
97
|
+
await voice.connect();
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
**Notes:**
|
|
101
|
+
- Must be called before using any other interaction methods
|
|
102
|
+
- Returns a Promise that resolves when the connection is established
|
|
103
|
+
|
|
104
|
+
### speak(text, options?)
|
|
105
|
+
|
|
106
|
+
Emits a speaking event using the configured voice model.
|
|
107
|
+
|
|
108
|
+
**Parameters:**
|
|
109
|
+
- `text`: String content to be spoken
|
|
110
|
+
- `options`: Optional configuration object
|
|
111
|
+
- `speaker`: Voice ID to use (overrides default)
|
|
112
|
+
- `properties`: Additional provider-specific properties
|
|
113
|
+
|
|
114
|
+
**Usage:**
|
|
115
|
+
```typescript
|
|
116
|
+
voice.speak('Hello, how can I help you today?', {
|
|
117
|
+
speaker: 'alloy'
|
|
118
|
+
});
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
**Notes:**
|
|
122
|
+
- Emits 'speaking' events rather than returning an audio stream
|
|
123
|
+
|
|
124
|
+
### listen(audioInput, options?)
|
|
125
|
+
|
|
126
|
+
Processes audio input for speech recognition.
|
|
127
|
+
|
|
128
|
+
**Parameters:**
|
|
129
|
+
- `audioInput`: Readable stream of audio data
|
|
130
|
+
- `options`: Optional configuration object
|
|
131
|
+
- `filetype`: Audio format (default: 'mp3')
|
|
132
|
+
- Additional provider-specific options
|
|
133
|
+
|
|
134
|
+
**Usage:**
|
|
135
|
+
```typescript
|
|
136
|
+
const audioData = getMicrophoneStream();
|
|
137
|
+
voice.listen(audioData, {
|
|
138
|
+
filetype: 'wav'
|
|
139
|
+
});
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
**Notes:**
|
|
143
|
+
- Emits 'writing' events with transcribed text
|
|
144
|
+
|
|
145
|
+
### send(audioStream)
|
|
146
|
+
|
|
147
|
+
Streams audio data in real-time for continuous processing.
|
|
148
|
+
|
|
149
|
+
**Parameters:**
|
|
150
|
+
- `audioStream`: Readable stream of audio data
|
|
151
|
+
|
|
152
|
+
**Usage:**
|
|
153
|
+
```typescript
|
|
154
|
+
const micStream = getMicrophoneStream();
|
|
155
|
+
await voice.send(micStream);
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
**Notes:**
|
|
159
|
+
- Used for continuous audio streaming scenarios like live microphone input
|
|
160
|
+
- Returns a Promise that resolves when the stream is accepted
|
|
161
|
+
|
|
162
|
+
### answer(params)
|
|
163
|
+
|
|
164
|
+
Sends a response to the OpenAI Realtime API.
|
|
165
|
+
|
|
166
|
+
**Parameters:**
|
|
167
|
+
- `params`: The parameters object
|
|
168
|
+
- `options`: Configuration options for the response
|
|
169
|
+
- `content`: Text content of the response
|
|
170
|
+
- `voice`: Voice ID to use for the response
|
|
171
|
+
|
|
172
|
+
**Usage:**
|
|
173
|
+
```typescript
|
|
174
|
+
await voice.answer({
|
|
175
|
+
options: {
|
|
176
|
+
content: "Hello, how can I help you today?",
|
|
177
|
+
voice: "alloy"
|
|
178
|
+
}
|
|
179
|
+
});
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
**Notes:**
|
|
183
|
+
- Triggers a response to the real-time session
|
|
184
|
+
- Returns a Promise that resolves when the response has been sent
|
|
185
|
+
|
|
186
|
+
## Utility Methods
|
|
187
|
+
|
|
188
|
+
### updateConfig(config)
|
|
189
|
+
|
|
190
|
+
Updates the session configuration for the voice instance.
|
|
191
|
+
|
|
192
|
+
**Parameters:**
|
|
193
|
+
- `config`: New session configuration object
|
|
194
|
+
|
|
195
|
+
**Usage:**
|
|
196
|
+
```typescript
|
|
197
|
+
voice.updateConfig({
|
|
198
|
+
turn_detection: {
|
|
199
|
+
type: 'server_vad',
|
|
200
|
+
threshold: 0.6,
|
|
201
|
+
silence_duration_ms: 1200,
|
|
202
|
+
}
|
|
203
|
+
});
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
### addTools(tools)
|
|
207
|
+
|
|
208
|
+
Adds a set of tools to the voice instance.
|
|
209
|
+
|
|
210
|
+
**Parameters:**
|
|
211
|
+
- `tools`: Array of tool objects that the model can call
|
|
212
|
+
|
|
213
|
+
**Usage:**
|
|
214
|
+
```typescript
|
|
215
|
+
voice.addTools([
|
|
216
|
+
createTool({
|
|
217
|
+
id: "Get Weather Information",
|
|
218
|
+
inputSchema: z.object({
|
|
219
|
+
city: z.string(),
|
|
220
|
+
}),
|
|
221
|
+
description: `Fetches the current weather information for a given city`,
|
|
222
|
+
execute: async ({ city }) => {...},
|
|
223
|
+
})
|
|
224
|
+
]);
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
### close()
|
|
228
|
+
|
|
229
|
+
Disconnects from the OpenAI realtime session and cleans up resources.
|
|
230
|
+
|
|
231
|
+
**Usage:**
|
|
232
|
+
```typescript
|
|
233
|
+
voice.close();
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
**Notes:**
|
|
237
|
+
- Should be called when you're done with the voice instance to free resources
|
|
238
|
+
|
|
239
|
+
### on(event, callback)
|
|
240
|
+
|
|
241
|
+
Registers an event listener for voice events.
|
|
242
|
+
|
|
243
|
+
**Parameters:**
|
|
244
|
+
- `event`: Event name ('speaking', 'writing', or 'error')
|
|
245
|
+
- `callback`: Function to call when the event occurs
|
|
246
|
+
|
|
247
|
+
**Usage:**
|
|
248
|
+
```typescript
|
|
249
|
+
voice.on('speaking', ({ audio }) => {
|
|
250
|
+
playAudio(audio);
|
|
251
|
+
});
|
|
252
|
+
```
|
|
253
|
+
|
|
254
|
+
### off(event, callback)
|
|
255
|
+
|
|
256
|
+
Removes a previously registered event listener.
|
|
257
|
+
|
|
258
|
+
**Parameters:**
|
|
259
|
+
- `event`: Event name
|
|
260
|
+
- `callback`: The callback function to remove
|
|
261
|
+
|
|
262
|
+
**Usage:**
|
|
263
|
+
```typescript
|
|
264
|
+
voice.off('speaking', callbackFunction);
|
|
265
|
+
```
|
|
266
|
+
|
|
267
|
+
## Events
|
|
268
|
+
|
|
269
|
+
The `OpenAIRealtimeVoice` class emits the following events:
|
|
270
|
+
|
|
271
|
+
### speaking
|
|
272
|
+
|
|
273
|
+
Emitted when audio data is received from the model.
|
|
274
|
+
|
|
275
|
+
**Event payload:**
|
|
276
|
+
- `audio`: A chunk of audio data as a readable stream
|
|
277
|
+
|
|
278
|
+
```typescript
|
|
279
|
+
agent.voice.on('speaking', ({ audio }) => {
|
|
280
|
+
playAudio(audio); // Handle audio chunks as they're generated
|
|
281
|
+
});
|
|
282
|
+
```
|
|
283
|
+
|
|
284
|
+
### writing
|
|
285
|
+
|
|
286
|
+
Emitted when transcribed text is available.
|
|
287
|
+
|
|
288
|
+
**Event payload:**
|
|
289
|
+
- `text`: The transcribed text
|
|
290
|
+
- `role`: The role of the speaker (user or assistant)
|
|
291
|
+
- `done`: Boolean indicating if this is the final transcription
|
|
292
|
+
|
|
293
|
+
```typescript
|
|
294
|
+
agent.voice.on('writing', ({ text, role }) => {
|
|
295
|
+
console.log(`${role}: ${text}`); // Log who said what
|
|
296
|
+
});
|
|
297
|
+
```
|
|
298
|
+
|
|
299
|
+
### error
|
|
300
|
+
|
|
301
|
+
Emitted when an error occurs.
|
|
302
|
+
|
|
303
|
+
**Event payload:**
|
|
304
|
+
- Error object with details about what went wrong
|
|
305
|
+
|
|
306
|
+
```typescript
|
|
307
|
+
agent.voice.on('error', (error) => {
|
|
308
|
+
console.error('Voice error:', error);
|
|
309
|
+
});
|
|
310
|
+
```
|
|
@@ -99,6 +99,10 @@ const mainWorkflow = new Workflow({
|
|
|
99
99
|
|
|
100
100
|
mainWorkflow.step(createDynamicWorkflow).commit();
|
|
101
101
|
|
|
102
|
+
// Register the workflow with Mastra
|
|
103
|
+
export const mastra = new Mastra({
|
|
104
|
+
workflows: { mainWorkflow },
|
|
105
|
+
});
|
|
102
106
|
|
|
103
107
|
const run = mainWorkflow.createRun();
|
|
104
108
|
const result = await run.start({
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Error Handling in Workflows | Mastra Docs"
|
|
3
|
+
description: "Learn how to handle errors in Mastra workflows using step retries, conditional branching, and monitoring."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Error Handling in Workflows
|
|
7
|
+
|
|
8
|
+
Robust error handling is essential for production workflows. Mastra provides several mechanisms to handle errors gracefully, allowing your workflows to recover from failures or gracefully degrade when necessary.
|
|
9
|
+
|
|
10
|
+
## Overview
|
|
11
|
+
|
|
12
|
+
Error handling in Mastra workflows can be implemented using:
|
|
13
|
+
|
|
14
|
+
1. **Step Retries** - Automatically retry failed steps
|
|
15
|
+
2. **Conditional Branching** - Create alternative paths based on step success or failure
|
|
16
|
+
3. **Error Monitoring** - Watch workflows for errors and handle them programmatically
|
|
17
|
+
4. **Result Status Checks** - Check the status of previous steps in subsequent steps
|
|
18
|
+
|
|
19
|
+
## Step Retries
|
|
20
|
+
|
|
21
|
+
Mastra provides a built-in retry mechanism for steps that fail due to transient errors. This is particularly useful for steps that interact with external services or resources that might experience temporary unavailability.
|
|
22
|
+
|
|
23
|
+
### Basic Retry Configuration
|
|
24
|
+
|
|
25
|
+
You can configure retries at the workflow level or for individual steps:
|
|
26
|
+
|
|
27
|
+
```typescript
|
|
28
|
+
// Workflow-level retry configuration
|
|
29
|
+
const workflow = new Workflow({
|
|
30
|
+
name: 'my-workflow',
|
|
31
|
+
retryConfig: {
|
|
32
|
+
attempts: 3, // Number of retry attempts
|
|
33
|
+
delay: 1000, // Delay between retries in milliseconds
|
|
34
|
+
},
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
// Step-level retry configuration (overrides workflow-level)
|
|
38
|
+
const apiStep = new Step({
|
|
39
|
+
id: 'callApi',
|
|
40
|
+
execute: async () => {
|
|
41
|
+
// API call that might fail
|
|
42
|
+
},
|
|
43
|
+
retryConfig: {
|
|
44
|
+
attempts: 5, // This step will retry up to 5 times
|
|
45
|
+
delay: 2000, // With a 2-second delay between retries
|
|
46
|
+
},
|
|
47
|
+
});
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
For more details about step retries, see the [Step Retries](../reference/workflows/step-retries.mdx) reference.
|
|
51
|
+
|
|
52
|
+
## Conditional Branching
|
|
53
|
+
|
|
54
|
+
You can create alternative workflow paths based on the success or failure of previous steps using conditional logic:
|
|
55
|
+
|
|
56
|
+
```typescript
|
|
57
|
+
// Create a workflow with conditional branching
|
|
58
|
+
const workflow = new Workflow({
|
|
59
|
+
name: 'error-handling-workflow',
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
workflow
|
|
63
|
+
.step(fetchDataStep)
|
|
64
|
+
.then(processDataStep, {
|
|
65
|
+
// Only execute processDataStep if fetchDataStep was successful
|
|
66
|
+
when: ({ context }) => {
|
|
67
|
+
return context.steps.fetchDataStep?.status === 'success';
|
|
68
|
+
},
|
|
69
|
+
})
|
|
70
|
+
.then(fallbackStep, {
|
|
71
|
+
// Execute fallbackStep if fetchDataStep failed
|
|
72
|
+
when: ({ context }) => {
|
|
73
|
+
return context.steps.fetchDataStep?.status === 'failed';
|
|
74
|
+
},
|
|
75
|
+
})
|
|
76
|
+
.commit();
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Error Monitoring
|
|
80
|
+
|
|
81
|
+
You can monitor workflows for errors using the `watch` method:
|
|
82
|
+
|
|
83
|
+
```typescript
|
|
84
|
+
const { start, watch } = workflow.createRun();
|
|
85
|
+
|
|
86
|
+
watch(async ({ context, activePaths }) => {
|
|
87
|
+
// Check for any failed steps
|
|
88
|
+
const failedSteps = Object.entries(context.steps)
|
|
89
|
+
.filter(([_, step]) => step.status === 'failed')
|
|
90
|
+
.map(([stepId]) => stepId);
|
|
91
|
+
|
|
92
|
+
if (failedSteps.length > 0) {
|
|
93
|
+
console.error(`Workflow has failed steps: ${failedSteps.join(', ')}`);
|
|
94
|
+
// Take remedial action, such as alerting or logging
|
|
95
|
+
}
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
await start();
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
## Handling Errors in Steps
|
|
102
|
+
|
|
103
|
+
Within a step's execution function, you can handle errors programmatically:
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
const robustStep = new Step({
|
|
107
|
+
id: 'robustStep',
|
|
108
|
+
execute: async ({ context }) => {
|
|
109
|
+
try {
|
|
110
|
+
// Attempt the primary operation
|
|
111
|
+
const result = await someRiskyOperation();
|
|
112
|
+
return { success: true, data: result };
|
|
113
|
+
} catch (error) {
|
|
114
|
+
// Log the error
|
|
115
|
+
console.error('Operation failed:', error);
|
|
116
|
+
|
|
117
|
+
// Return a graceful fallback result instead of throwing
|
|
118
|
+
return {
|
|
119
|
+
success: false,
|
|
120
|
+
error: error.message,
|
|
121
|
+
fallbackData: 'Default value'
|
|
122
|
+
};
|
|
123
|
+
}
|
|
124
|
+
},
|
|
125
|
+
});
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
## Checking Previous Step Results
|
|
129
|
+
|
|
130
|
+
You can make decisions based on the results of previous steps:
|
|
131
|
+
|
|
132
|
+
```typescript
|
|
133
|
+
const finalStep = new Step({
|
|
134
|
+
id: 'finalStep',
|
|
135
|
+
execute: async ({ context }) => {
|
|
136
|
+
// Check results of previous steps
|
|
137
|
+
const step1Success = context.steps.step1?.status === 'success';
|
|
138
|
+
const step2Success = context.steps.step2?.status === 'success';
|
|
139
|
+
|
|
140
|
+
if (step1Success && step2Success) {
|
|
141
|
+
// All steps succeeded
|
|
142
|
+
return { status: 'complete', result: 'All operations succeeded' };
|
|
143
|
+
} else if (step1Success) {
|
|
144
|
+
// Only step1 succeeded
|
|
145
|
+
return { status: 'partial', result: 'Partial completion' };
|
|
146
|
+
} else {
|
|
147
|
+
// Critical failure
|
|
148
|
+
return { status: 'failed', result: 'Critical steps failed' };
|
|
149
|
+
}
|
|
150
|
+
},
|
|
151
|
+
});
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
## Best Practices for Error Handling
|
|
155
|
+
|
|
156
|
+
1. **Use retries for transient failures**: Configure retry policies for steps that might experience temporary issues.
|
|
157
|
+
|
|
158
|
+
2. **Provide fallback paths**: Design workflows with alternative paths for when critical steps fail.
|
|
159
|
+
|
|
160
|
+
3. **Be specific about error scenarios**: Use different handling strategies for different types of errors.
|
|
161
|
+
|
|
162
|
+
4. **Log errors comprehensively**: Include context information when logging errors to aid in debugging.
|
|
163
|
+
|
|
164
|
+
5. **Return meaningful data on failure**: When a step fails, return structured data about the failure to help downstream steps make decisions.
|
|
165
|
+
|
|
166
|
+
6. **Consider idempotency**: Ensure steps can be safely retried without causing duplicate side effects.
|
|
167
|
+
|
|
168
|
+
7. **Monitor workflow execution**: Use the `watch` method to actively monitor workflow execution and detect errors early.
|
|
169
|
+
|
|
170
|
+
## Advanced Error Handling
|
|
171
|
+
|
|
172
|
+
For more complex error handling scenarios, consider:
|
|
173
|
+
|
|
174
|
+
- **Implementing circuit breakers**: If a step fails repeatedly, stop retrying and use a fallback strategy
|
|
175
|
+
- **Adding timeout handling**: Set time limits for steps to prevent workflows from hanging indefinitely
|
|
176
|
+
- **Creating dedicated error recovery workflows**: For critical workflows, create separate recovery workflows that can be triggered when the main workflow fails
|
|
177
|
+
|
|
178
|
+
## Related
|
|
179
|
+
|
|
180
|
+
- [Step Retries Reference](../reference/workflows/step-retries.mdx)
|
|
181
|
+
- [Watch Method Reference](../reference/workflows/watch.mdx)
|
|
182
|
+
- [Step Conditions](../reference/workflows/step-condition.mdx)
|
|
183
|
+
- [Control Flow](./control-flow.mdx)
|
|
@@ -14,7 +14,7 @@ The code below shows how to define these steps inline or separately.
|
|
|
14
14
|
You can create steps directly within your workflow using `.step()` and `.then()`. This code shows how to define, link, and execute two steps in sequence.
|
|
15
15
|
|
|
16
16
|
```typescript showLineNumbers filename="src/mastra/workflows/index.ts" copy
|
|
17
|
-
import { Step, Workflow } from "@mastra/core
|
|
17
|
+
import { Step, Workflow, Mastra } from "@mastra/core";
|
|
18
18
|
import { z } from "zod";
|
|
19
19
|
|
|
20
20
|
export const myWorkflow = new Workflow({
|
|
@@ -51,6 +51,11 @@ myWorkflow
|
|
|
51
51
|
},
|
|
52
52
|
}),
|
|
53
53
|
).commit();
|
|
54
|
+
|
|
55
|
+
// Register the workflow with Mastra
|
|
56
|
+
export const mastra = new Mastra({
|
|
57
|
+
workflows: { myWorkflow },
|
|
58
|
+
});
|
|
54
59
|
```
|
|
55
60
|
|
|
56
61
|
## Creating Steps Separately
|
|
@@ -58,7 +63,7 @@ myWorkflow
|
|
|
58
63
|
If you prefer to manage your step logic in separate entities, you can define steps outside and then add them to your workflow. This code shows how to define steps independently and link them afterward.
|
|
59
64
|
|
|
60
65
|
```typescript showLineNumbers filename="src/mastra/workflows/index.ts" copy
|
|
61
|
-
import { Step, Workflow } from "@mastra/core
|
|
66
|
+
import { Step, Workflow, Mastra } from "@mastra/core";
|
|
62
67
|
import { z } from "zod";
|
|
63
68
|
|
|
64
69
|
// Define steps separately
|
|
@@ -95,4 +100,9 @@ const myWorkflow = new Workflow({
|
|
|
95
100
|
|
|
96
101
|
myWorkflow.step(stepOne).then(stepTwo);
|
|
97
102
|
myWorkflow.commit();
|
|
103
|
+
|
|
104
|
+
// Register the workflow with Mastra
|
|
105
|
+
export const mastra = new Mastra({
|
|
106
|
+
workflows: { myWorkflow },
|
|
107
|
+
});
|
|
98
108
|
```
|