@mastra/mcp-docs-server 1.0.0-beta.3 → 1.0.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +30 -30
- package/.docs/organized/changelogs/%40mastra%2Fauth.md +6 -0
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +45 -45
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +8 -8
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Freact.md +8 -8
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +10 -10
- package/.docs/organized/changelogs/create-mastra.md +7 -7
- package/.docs/organized/changelogs/mastra.md +11 -11
- package/.docs/raw/agents/adding-voice.mdx +49 -0
- package/.docs/raw/reference/agents/generate.mdx +11 -92
- package/.docs/raw/reference/agents/network.mdx +3 -85
- package/.docs/raw/reference/streaming/agents/stream.mdx +3 -92
- package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
- package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
- package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
- package/.docs/raw/voice/overview.mdx +67 -0
- package/.docs/raw/workflows/overview.mdx +1 -1
- package/CHANGELOG.md +7 -0
- package/package.json +3 -3
|
@@ -7,31 +7,6 @@ description: "Documentation for the speak() method available in all Mastra voice
|
|
|
7
7
|
|
|
8
8
|
The `speak()` method is a core function available in all Mastra voice providers that converts text to speech. It takes text input and returns an audio stream that can be played or saved.
|
|
9
9
|
|
|
10
|
-
## Usage Example
|
|
11
|
-
|
|
12
|
-
```typescript
|
|
13
|
-
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
14
|
-
// Initialize a voice provider
|
|
15
|
-
const voice = new OpenAIVoice({
|
|
16
|
-
speaker: "alloy", // Default voice
|
|
17
|
-
});
|
|
18
|
-
// Basic usage with default settings
|
|
19
|
-
const audioStream = await voice.speak("Hello, world!");
|
|
20
|
-
// Using a different voice for this specific request
|
|
21
|
-
const audioStreamWithDifferentVoice = await voice.speak("Hello again!", {
|
|
22
|
-
speaker: "nova",
|
|
23
|
-
});
|
|
24
|
-
// Using provider-specific options
|
|
25
|
-
const audioStreamWithOptions = await voice.speak("Hello with options!", {
|
|
26
|
-
speaker: "echo",
|
|
27
|
-
speed: 1.2, // OpenAI-specific option
|
|
28
|
-
});
|
|
29
|
-
// Using a text stream as input
|
|
30
|
-
import { Readable } from "stream";
|
|
31
|
-
const textStream = Readable.from(["Hello", " from", " a", " stream!"]);
|
|
32
|
-
const audioStreamFromTextStream = await voice.speak(textStream);
|
|
33
|
-
```
|
|
34
|
-
|
|
35
10
|
## Parameters
|
|
36
11
|
|
|
37
12
|
<PropertiesTable
|
|
@@ -153,6 +128,81 @@ Each voice provider may support additional options specific to their implementat
|
|
|
153
128
|
]}
|
|
154
129
|
/>
|
|
155
130
|
|
|
131
|
+
## Usage Example
|
|
132
|
+
|
|
133
|
+
```typescript
|
|
134
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
135
|
+
// Initialize a voice provider
|
|
136
|
+
const voice = new OpenAIVoice({
|
|
137
|
+
speaker: "alloy", // Default voice
|
|
138
|
+
});
|
|
139
|
+
// Basic usage with default settings
|
|
140
|
+
const audioStream = await voice.speak("Hello, world!");
|
|
141
|
+
// Using a different voice for this specific request
|
|
142
|
+
const audioStreamWithDifferentVoice = await voice.speak("Hello again!", {
|
|
143
|
+
speaker: "nova",
|
|
144
|
+
});
|
|
145
|
+
// Using provider-specific options
|
|
146
|
+
const audioStreamWithOptions = await voice.speak("Hello with options!", {
|
|
147
|
+
speaker: "echo",
|
|
148
|
+
speed: 1.2, // OpenAI-specific option
|
|
149
|
+
});
|
|
150
|
+
// Using a text stream as input
|
|
151
|
+
import { Readable } from "stream";
|
|
152
|
+
const textStream = Readable.from(["Hello", " from", " a", " stream!"]);
|
|
153
|
+
const audioStreamFromTextStream = await voice.speak(textStream);
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
## Using with CompositeVoice
|
|
157
|
+
|
|
158
|
+
When using `CompositeVoice`, the `speak()` method delegates to the configured speaking provider:
|
|
159
|
+
|
|
160
|
+
```typescript
|
|
161
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
162
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
163
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
164
|
+
|
|
165
|
+
const voice = new CompositeVoice({
|
|
166
|
+
output: new PlayAIVoice(),
|
|
167
|
+
input: new OpenAIVoice(),
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
// This will use the PlayAIVoice provider
|
|
171
|
+
const audioStream = await voice.speak("Hello, world!");
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
### Using AI SDK Model Providers
|
|
175
|
+
|
|
176
|
+
You can also use AI SDK speech models directly with `CompositeVoice`:
|
|
177
|
+
|
|
178
|
+
```typescript
|
|
179
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
180
|
+
import { openai } from "@ai-sdk/openai";
|
|
181
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
182
|
+
|
|
183
|
+
// Use AI SDK speech models
|
|
184
|
+
const voice = new CompositeVoice({
|
|
185
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK model
|
|
186
|
+
input: openai.transcription('whisper-1'), // AI SDK model
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
// Works the same way
|
|
190
|
+
const audioStream = await voice.speak("Hello from AI SDK!");
|
|
191
|
+
|
|
192
|
+
// Provider-specific options can be passed through
|
|
193
|
+
const audioWithOptions = await voice.speak("Hello with options!", {
|
|
194
|
+
speaker: 'Rachel', // ElevenLabs voice
|
|
195
|
+
providerOptions: {
|
|
196
|
+
elevenlabs: {
|
|
197
|
+
stability: 0.5,
|
|
198
|
+
similarity_boost: 0.75,
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
});
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
See the [CompositeVoice reference](/reference/v1/voice/composite-voice) for more details on AI SDK integration.
|
|
205
|
+
|
|
156
206
|
## Realtime Voice Providers
|
|
157
207
|
|
|
158
208
|
When using realtime voice providers like `OpenAIRealtimeVoice`, the `speak()` method behaves differently:
|
|
@@ -181,21 +231,6 @@ voice.on("speaker", (stream) => {
|
|
|
181
231
|
await voice.speak("Hello, this is realtime speech!");
|
|
182
232
|
```
|
|
183
233
|
|
|
184
|
-
## Using with CompositeVoice
|
|
185
|
-
|
|
186
|
-
When using `CompositeVoice`, the `speak()` method delegates to the configured speaking provider:
|
|
187
|
-
|
|
188
|
-
```typescript
|
|
189
|
-
import { CompositeVoice } from "@mastra/core/voice";
|
|
190
|
-
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
191
|
-
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
192
|
-
const voice = new CompositeVoice({
|
|
193
|
-
speakProvider: new PlayAIVoice(),
|
|
194
|
-
listenProvider: new OpenAIVoice(),
|
|
195
|
-
});
|
|
196
|
-
// This will use the PlayAIVoice provider
|
|
197
|
-
const audioStream = await voice.speak("Hello, world!");
|
|
198
|
-
```
|
|
199
234
|
|
|
200
235
|
## Notes
|
|
201
236
|
|
|
@@ -918,6 +918,30 @@ const voice = new GeminiLiveVoice({
|
|
|
918
918
|
Visit the [Google Gemini Live Reference](/reference/v1/voice/google-gemini-live) for more information on the Google Gemini Live voice provider.
|
|
919
919
|
|
|
920
920
|
</TabItem>
|
|
921
|
+
<TabItem value="aisdk" label="AI SDK">
|
|
922
|
+
|
|
923
|
+
```typescript
|
|
924
|
+
// AI SDK Voice Configuration
|
|
925
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
926
|
+
import { openai } from "@ai-sdk/openai";
|
|
927
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
928
|
+
|
|
929
|
+
// Use AI SDK models directly - no need to install separate packages
|
|
930
|
+
const voice = new CompositeVoice({
|
|
931
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription
|
|
932
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech
|
|
933
|
+
});
|
|
934
|
+
|
|
935
|
+
// Works seamlessly with your agent
|
|
936
|
+
const voiceAgent = new Agent({
|
|
937
|
+
id: "aisdk-voice-agent",
|
|
938
|
+
name: "AI SDK Voice Agent",
|
|
939
|
+
instructions: "You are a helpful assistant with voice capabilities.",
|
|
940
|
+
model: openai("gpt-4o"),
|
|
941
|
+
voice,
|
|
942
|
+
});
|
|
943
|
+
```
|
|
944
|
+
</TabItem>
|
|
921
945
|
</Tabs>
|
|
922
946
|
|
|
923
947
|
### Using Multiple Voice Providers
|
|
@@ -971,6 +995,49 @@ const responseAudio = await voice.speak(`You said: ${transcript}`, {
|
|
|
971
995
|
playAudio(responseAudio);
|
|
972
996
|
```
|
|
973
997
|
|
|
998
|
+
### Using AI SDK Model Providers
|
|
999
|
+
|
|
1000
|
+
You can also use AI SDK models directly with `CompositeVoice`:
|
|
1001
|
+
|
|
1002
|
+
```typescript
|
|
1003
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
1004
|
+
import { openai } from "@ai-sdk/openai";
|
|
1005
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
1006
|
+
import { playAudio, getMicrophoneStream } from "@mastra/node-audio";
|
|
1007
|
+
|
|
1008
|
+
// Use AI SDK models directly - no provider setup needed
|
|
1009
|
+
const voice = new CompositeVoice({
|
|
1010
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription
|
|
1011
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech
|
|
1012
|
+
});
|
|
1013
|
+
|
|
1014
|
+
// Works the same way as Mastra providers
|
|
1015
|
+
const audioStream = getMicrophoneStream();
|
|
1016
|
+
const transcript = await voice.listen(audioStream);
|
|
1017
|
+
|
|
1018
|
+
console.log("Transcribed text:", transcript);
|
|
1019
|
+
|
|
1020
|
+
// Convert text to speech
|
|
1021
|
+
const responseAudio = await voice.speak(`You said: ${transcript}`, {
|
|
1022
|
+
speaker: "Rachel", // ElevenLabs voice
|
|
1023
|
+
});
|
|
1024
|
+
|
|
1025
|
+
playAudio(responseAudio);
|
|
1026
|
+
```
|
|
1027
|
+
|
|
1028
|
+
You can also mix AI SDK models with Mastra providers:
|
|
1029
|
+
|
|
1030
|
+
```typescript
|
|
1031
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
1032
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
1033
|
+
import { groq } from "@ai-sdk/groq";
|
|
1034
|
+
|
|
1035
|
+
const voice = new CompositeVoice({
|
|
1036
|
+
input: groq.transcription('whisper-large-v3'), // AI SDK for STT
|
|
1037
|
+
output: new PlayAIVoice(), // Mastra provider for TTS
|
|
1038
|
+
});
|
|
1039
|
+
```
|
|
1040
|
+
|
|
974
1041
|
For more information on the CompositeVoice, refer to the [CompositeVoice Reference](/reference/v1/voice/composite-voice).
|
|
975
1042
|
|
|
976
1043
|
## More Resources
|
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
# @mastra/mcp-docs-server
|
|
2
2
|
|
|
3
|
+
## 1.0.0-beta.4
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [[`352a5d6`](https://github.com/mastra-ai/mastra/commit/352a5d625cfe09849b21e8f52a24c9f0366759d5), [`a0a5b4b`](https://github.com/mastra-ai/mastra/commit/a0a5b4bbebe6c701ebbadf744873aa0d5ca01371), [`69ea758`](https://github.com/mastra-ai/mastra/commit/69ea758358edd7117f191c2e69c8bb5fc79e7a1a), [`993ad98`](https://github.com/mastra-ai/mastra/commit/993ad98d7ad3bebda9ecef5fec5c94349a0d04bc), [`3ff2c17`](https://github.com/mastra-ai/mastra/commit/3ff2c17a58e312fad5ea37377262c12d92ca0908)]:
|
|
8
|
+
- @mastra/core@1.0.0-beta.4
|
|
9
|
+
|
|
3
10
|
## 1.0.0-beta.3
|
|
4
11
|
|
|
5
12
|
### Patch Changes
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/mcp-docs-server",
|
|
3
|
-
"version": "1.0.0-beta.
|
|
3
|
+
"version": "1.0.0-beta.4",
|
|
4
4
|
"description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -29,7 +29,7 @@
|
|
|
29
29
|
"jsdom": "^26.1.0",
|
|
30
30
|
"zod": "^3.25.76",
|
|
31
31
|
"@mastra/mcp": "^1.0.0-beta.2",
|
|
32
|
-
"@mastra/core": "1.0.0-beta.
|
|
32
|
+
"@mastra/core": "1.0.0-beta.4"
|
|
33
33
|
},
|
|
34
34
|
"devDependencies": {
|
|
35
35
|
"@hono/node-server": "^1.19.6",
|
|
@@ -46,7 +46,7 @@
|
|
|
46
46
|
"typescript": "^5.8.3",
|
|
47
47
|
"vitest": "^4.0.8",
|
|
48
48
|
"@internal/lint": "0.0.53",
|
|
49
|
-
"@mastra/core": "1.0.0-beta.
|
|
49
|
+
"@mastra/core": "1.0.0-beta.4"
|
|
50
50
|
},
|
|
51
51
|
"homepage": "https://mastra.ai",
|
|
52
52
|
"repository": {
|