@mastra/mcp-docs-server 1.0.0-beta.3 → 1.0.0-beta.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (219) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
  2. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
  3. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
  4. package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
  5. package/.docs/organized/changelogs/%40mastra%2Fauth.md +6 -0
  6. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
  7. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
  8. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
  10. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +370 -170
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
  18. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
  19. package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
  20. package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
  21. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
  22. package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
  23. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
  24. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
  25. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
  26. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
  27. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
  28. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
  29. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
  30. package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
  31. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
  32. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
  33. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
  34. package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
  35. package/.docs/organized/changelogs/%40mastra%2Freact.md +80 -1
  36. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  37. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
  38. package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
  39. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
  40. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
  41. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
  42. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
  43. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
  55. package/.docs/organized/changelogs/create-mastra.md +201 -1
  56. package/.docs/organized/changelogs/mastra.md +201 -1
  57. package/.docs/organized/code-examples/memory-with-processors.md +1 -1
  58. package/.docs/organized/code-examples/quick-start.md +1 -1
  59. package/.docs/raw/agents/adding-voice.mdx +55 -9
  60. package/.docs/raw/agents/guardrails.mdx +19 -20
  61. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +6 -5
  62. package/.docs/raw/agents/networks.mdx +1 -2
  63. package/.docs/raw/agents/overview.mdx +5 -5
  64. package/.docs/raw/agents/using-tools.mdx +4 -5
  65. package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
  66. package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
  67. package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
  68. package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
  69. package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
  70. package/.docs/raw/deployment/building-mastra.mdx +1 -1
  71. package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
  72. package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
  73. package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
  74. package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
  75. package/.docs/raw/deployment/cloud-providers/index.mdx +1 -1
  76. package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
  77. package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
  78. package/.docs/raw/deployment/overview.mdx +2 -2
  79. package/.docs/raw/deployment/web-framework.mdx +5 -5
  80. package/.docs/raw/evals/custom-scorers.mdx +3 -5
  81. package/.docs/raw/evals/overview.mdx +2 -3
  82. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  83. package/.docs/raw/getting-started/start.mdx +72 -0
  84. package/.docs/raw/getting-started/studio.mdx +1 -1
  85. package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +105 -11
  86. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
  87. package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
  88. package/.docs/raw/guides/{guide → getting-started}/manual-install.mdx +1 -1
  89. package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
  90. package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
  91. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
  92. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
  93. package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
  94. package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
  95. package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
  96. package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
  97. package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
  98. package/.docs/raw/guides/guide/web-search.mdx +12 -10
  99. package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
  100. package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
  101. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +29 -0
  102. package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
  103. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +22 -0
  104. package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
  105. package/.docs/raw/index.mdx +2 -2
  106. package/.docs/raw/mcp/overview.mdx +3 -5
  107. package/.docs/raw/memory/memory-processors.mdx +1 -2
  108. package/.docs/raw/memory/semantic-recall.mdx +7 -7
  109. package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
  110. package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
  111. package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
  112. package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
  113. package/.docs/raw/memory/threads-and-resources.mdx +3 -3
  114. package/.docs/raw/memory/working-memory.mdx +4 -5
  115. package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
  116. package/.docs/raw/observability/overview.mdx +2 -2
  117. package/.docs/raw/observability/tracing/exporters/otel.mdx +21 -2
  118. package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
  119. package/.docs/raw/observability/tracing/overview.mdx +3 -2
  120. package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
  121. package/.docs/raw/rag/overview.mdx +3 -2
  122. package/.docs/raw/rag/retrieval.mdx +20 -32
  123. package/.docs/raw/reference/agents/agent.mdx +7 -10
  124. package/.docs/raw/reference/agents/generate.mdx +11 -92
  125. package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
  126. package/.docs/raw/reference/agents/getLLM.mdx +1 -1
  127. package/.docs/raw/reference/agents/network.mdx +5 -88
  128. package/.docs/raw/reference/cli/mastra.mdx +2 -1
  129. package/.docs/raw/reference/client-js/agents.mdx +3 -3
  130. package/.docs/raw/reference/core/getLogger.mdx +1 -1
  131. package/.docs/raw/reference/core/listLogs.mdx +1 -1
  132. package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
  133. package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
  134. package/.docs/raw/reference/core/setLogger.mdx +1 -1
  135. package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
  136. package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
  137. package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
  138. package/.docs/raw/reference/evals/bias.mdx +29 -87
  139. package/.docs/raw/reference/evals/completeness.mdx +31 -90
  140. package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
  141. package/.docs/raw/reference/evals/context-precision.mdx +28 -130
  142. package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
  143. package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
  144. package/.docs/raw/reference/evals/hallucination.mdx +28 -103
  145. package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
  146. package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
  147. package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
  148. package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
  149. package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
  150. package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
  151. package/.docs/raw/reference/evals/toxicity.mdx +29 -92
  152. package/.docs/raw/reference/memory/memory-class.mdx +5 -7
  153. package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
  154. package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
  155. package/.docs/raw/reference/processors/language-detector.mdx +1 -1
  156. package/.docs/raw/reference/processors/moderation-processor.mdx +2 -2
  157. package/.docs/raw/reference/processors/pii-detector.mdx +2 -2
  158. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +1 -1
  159. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -3
  160. package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
  161. package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
  162. package/.docs/raw/reference/rag/embeddings.mdx +5 -5
  163. package/.docs/raw/reference/rag/rerank.mdx +1 -2
  164. package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
  165. package/.docs/raw/reference/streaming/agents/stream.mdx +11 -93
  166. package/.docs/raw/reference/templates/overview.mdx +1 -4
  167. package/.docs/raw/reference/tools/client.mdx +1 -2
  168. package/.docs/raw/reference/tools/create-tool.mdx +132 -0
  169. package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
  170. package/.docs/raw/reference/tools/mcp-client.mdx +2 -4
  171. package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
  172. package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
  173. package/.docs/raw/reference/vectors/chroma.mdx +81 -1
  174. package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
  175. package/.docs/raw/reference/vectors/lance.mdx +38 -22
  176. package/.docs/raw/reference/vectors/libsql.mdx +35 -2
  177. package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
  178. package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
  179. package/.docs/raw/reference/vectors/pg.mdx +43 -36
  180. package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
  181. package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
  182. package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
  183. package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
  184. package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
  185. package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
  186. package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
  187. package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
  188. package/.docs/raw/reference/voice/voice.close.mdx +1 -1
  189. package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
  190. package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
  191. package/.docs/raw/reference/voice/voice.off.mdx +1 -1
  192. package/.docs/raw/reference/voice/voice.on.mdx +1 -1
  193. package/.docs/raw/reference/voice/voice.send.mdx +1 -1
  194. package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
  195. package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
  196. package/.docs/raw/server-db/mastra-client.mdx +1 -2
  197. package/.docs/raw/streaming/overview.mdx +20 -9
  198. package/.docs/raw/streaming/tool-streaming.mdx +47 -4
  199. package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
  200. package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
  201. package/.docs/raw/voice/overview.mdx +87 -40
  202. package/.docs/raw/voice/speech-to-speech.mdx +4 -4
  203. package/.docs/raw/voice/speech-to-text.mdx +1 -2
  204. package/.docs/raw/voice/text-to-speech.mdx +1 -2
  205. package/.docs/raw/workflows/control-flow.mdx +180 -0
  206. package/.docs/raw/workflows/overview.mdx +1 -1
  207. package/CHANGELOG.md +17 -0
  208. package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
  209. package/dist/prepare-docs/package-changes.d.ts.map +1 -1
  210. package/dist/prepare-docs/prepare.js +1 -1
  211. package/dist/stdio.js +1 -1
  212. package/package.json +7 -7
  213. package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
  214. package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
  215. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
  216. package/.docs/raw/getting-started/quickstart.mdx +0 -27
  217. package/.docs/raw/getting-started/templates.mdx +0 -73
  218. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
  219. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
@@ -7,41 +7,6 @@ description: "Documentation for the listen() method available in all Mastra voic
7
7
 
8
8
  The `listen()` method is a core function available in all Mastra voice providers that converts speech to text. It takes an audio stream as input and returns the transcribed text.
9
9
 
10
- ## Usage Example
11
-
12
- ```typescript
13
- import { OpenAIVoice } from "@mastra/voice-openai";
14
- import { getMicrophoneStream } from "@mastra/node-audio";
15
- import { createReadStream } from "fs";
16
- import path from "path";
17
-
18
- // Initialize a voice provider
19
- const voice = new OpenAIVoice({
20
- listeningModel: {
21
- name: "whisper-1",
22
- apiKey: process.env.OPENAI_API_KEY,
23
- },
24
- });
25
-
26
- // Basic usage with a file stream
27
- const audioFilePath = path.join(process.cwd(), "audio.mp3");
28
- const audioStream = createReadStream(audioFilePath);
29
- const transcript = await voice.listen(audioStream, {
30
- filetype: "mp3",
31
- });
32
- console.log("Transcribed text:", transcript);
33
-
34
- // Using a microphone stream
35
- const microphoneStream = getMicrophoneStream(); // Assume this function gets audio input
36
- const transcription = await voice.listen(microphoneStream);
37
-
38
- // With provider-specific options
39
- const transcriptWithOptions = await voice.listen(audioStream, {
40
- language: "en",
41
- prompt: "This is a conversation about artificial intelligence.",
42
- });
43
- ```
44
-
45
10
  ## Parameters
46
11
 
47
12
  <PropertiesTable
@@ -143,30 +108,42 @@ Each voice provider may support additional options specific to their implementat
143
108
  ]}
144
109
  />
145
110
 
146
- ## Realtime Voice Providers
147
-
148
- When using realtime voice providers like `OpenAIRealtimeVoice`, the `listen()` method behaves differently:
149
-
150
- - Instead of returning transcribed text, it emits 'writing' events with the transcribed text
151
- - You need to register an event listener to receive the transcription
111
+ ## Usage Example
152
112
 
153
113
  ```typescript
154
- import { OpenAIRealtimeVoice } from "@mastra/voice-openai-realtime";
114
+ import { OpenAIVoice } from "@mastra/voice-openai";
155
115
  import { getMicrophoneStream } from "@mastra/node-audio";
116
+ import { createReadStream } from "fs";
117
+ import path from "path";
156
118
 
157
- const voice = new OpenAIRealtimeVoice();
158
- await voice.connect();
119
+ // Initialize a voice provider
120
+ const voice = new OpenAIVoice({
121
+ listeningModel: {
122
+ name: "whisper-1",
123
+ apiKey: process.env.OPENAI_API_KEY,
124
+ },
125
+ });
159
126
 
160
- // Register event listener for transcription
161
- voice.on("writing", ({ text, role }) => {
162
- console.log(`${role}: ${text}`);
127
+ // Basic usage with a file stream
128
+ const audioFilePath = path.join(process.cwd(), "audio.mp3");
129
+ const audioStream = createReadStream(audioFilePath);
130
+ const transcript = await voice.listen(audioStream, {
131
+ filetype: "mp3",
163
132
  });
133
+ console.log("Transcribed text:", transcript);
164
134
 
165
- // This will emit 'writing' events instead of returning text
166
- const microphoneStream = getMicrophoneStream();
167
- await voice.listen(microphoneStream);
135
+ // Using a microphone stream
136
+ const microphoneStream = getMicrophoneStream(); // Assume this function gets audio input
137
+ const transcription = await voice.listen(microphoneStream);
138
+
139
+ // With provider-specific options
140
+ const transcriptWithOptions = await voice.listen(audioStream, {
141
+ language: "en",
142
+ prompt: "This is a conversation about artificial intelligence.",
143
+ });
168
144
  ```
169
145
 
146
+
170
147
  ## Using with CompositeVoice
171
148
 
172
149
  When using `CompositeVoice`, the `listen()` method delegates to the configured listening provider:
@@ -177,14 +154,70 @@ import { OpenAIVoice } from "@mastra/voice-openai";
177
154
  import { PlayAIVoice } from "@mastra/voice-playai";
178
155
 
179
156
  const voice = new CompositeVoice({
180
- listenProvider: new OpenAIVoice(),
181
- speakProvider: new PlayAIVoice(),
157
+ input: new OpenAIVoice(),
158
+ output: new PlayAIVoice(),
182
159
  });
183
160
 
184
161
  // This will use the OpenAIVoice provider
185
162
  const transcript = await voice.listen(audioStream);
186
163
  ```
187
164
 
165
+ ### Using AI SDK Model Providers
166
+
167
+ You can also use AI SDK transcription models directly with `CompositeVoice`:
168
+
169
+ ```typescript
170
+ import { CompositeVoice } from "@mastra/core/voice";
171
+ import { openai } from "@ai-sdk/openai";
172
+ import { groq } from "@ai-sdk/groq";
173
+
174
+ // Use AI SDK transcription models
175
+ const voice = new CompositeVoice({
176
+ input: openai.transcription('whisper-1'), // AI SDK model
177
+ output: new PlayAIVoice(), // Mastra provider
178
+ });
179
+
180
+ // Works the same way
181
+ const transcript = await voice.listen(audioStream);
182
+
183
+ // Provider-specific options can be passed through
184
+ const transcriptWithOptions = await voice.listen(audioStream, {
185
+ providerOptions: {
186
+ openai: {
187
+ language: 'en',
188
+ prompt: 'This is about AI',
189
+ }
190
+ }
191
+ });
192
+ ```
193
+
194
+ See the [CompositeVoice reference](/reference/v1/voice/composite-voice) for more details on AI SDK integration.
195
+
196
+
197
+ ## Realtime Voice Providers
198
+
199
+ When using realtime voice providers like `OpenAIRealtimeVoice`, the `listen()` method behaves differently:
200
+
201
+ - Instead of returning transcribed text, it emits 'writing' events with the transcribed text
202
+ - You need to register an event listener to receive the transcription
203
+
204
+ ```typescript
205
+ import { OpenAIRealtimeVoice } from "@mastra/voice-openai-realtime";
206
+ import { getMicrophoneStream } from "@mastra/node-audio";
207
+
208
+ const voice = new OpenAIRealtimeVoice();
209
+ await voice.connect();
210
+
211
+ // Register event listener for transcription
212
+ voice.on("writing", ({ text, role }) => {
213
+ console.log(`${role}: ${text}`);
214
+ });
215
+
216
+ // This will emit 'writing' events instead of returning text
217
+ const microphoneStream = getMicrophoneStream();
218
+ await voice.listen(microphoneStream);
219
+ ```
220
+
188
221
  ## Notes
189
222
 
190
223
  - Not all voice providers support speech-to-text functionality (e.g., PlayAI, Speechify)
@@ -194,6 +227,7 @@ const transcript = await voice.listen(audioStream);
194
227
  - Some providers support streaming transcription, where text is returned as it's transcribed
195
228
  - For best performance, consider closing or ending the audio stream when you're done with it
196
229
 
230
+
197
231
  ## Related Methods
198
232
 
199
233
  - [voice.speak()](./voice.speak) - Converts text to speech
@@ -16,7 +16,7 @@ import chalk from "chalk";
16
16
  // Initialize a real-time voice provider
17
17
  const voice = new OpenAIRealtimeVoice({
18
18
  realtimeConfig: {
19
- model: "gpt-4o-mini-realtime",
19
+ model: "gpt-5.1-realtime",
20
20
  apiKey: process.env.OPENAI_API_KEY,
21
21
  },
22
22
  });
@@ -17,7 +17,7 @@ import chalk from "chalk";
17
17
  // Initialize a real-time voice provider
18
18
  const voice = new OpenAIRealtimeVoice({
19
19
  realtimeConfig: {
20
- model: "gpt-4o-mini-realtime",
20
+ model: "gpt-5.1-realtime",
21
21
  apiKey: process.env.OPENAI_API_KEY,
22
22
  },
23
23
  });
@@ -23,7 +23,7 @@ const speaker = new Speaker({
23
23
  // Initialize a real-time voice provider
24
24
  const voice = new OpenAIRealtimeVoice({
25
25
  realtimeConfig: {
26
- model: "gpt-4o-mini-realtime",
26
+ model: "gpt-5.1-realtime",
27
27
  apiKey: process.env.OPENAI_API_KEY,
28
28
  },
29
29
  });
@@ -7,31 +7,6 @@ description: "Documentation for the speak() method available in all Mastra voice
7
7
 
8
8
  The `speak()` method is a core function available in all Mastra voice providers that converts text to speech. It takes text input and returns an audio stream that can be played or saved.
9
9
 
10
- ## Usage Example
11
-
12
- ```typescript
13
- import { OpenAIVoice } from "@mastra/voice-openai";
14
- // Initialize a voice provider
15
- const voice = new OpenAIVoice({
16
- speaker: "alloy", // Default voice
17
- });
18
- // Basic usage with default settings
19
- const audioStream = await voice.speak("Hello, world!");
20
- // Using a different voice for this specific request
21
- const audioStreamWithDifferentVoice = await voice.speak("Hello again!", {
22
- speaker: "nova",
23
- });
24
- // Using provider-specific options
25
- const audioStreamWithOptions = await voice.speak("Hello with options!", {
26
- speaker: "echo",
27
- speed: 1.2, // OpenAI-specific option
28
- });
29
- // Using a text stream as input
30
- import { Readable } from "stream";
31
- const textStream = Readable.from(["Hello", " from", " a", " stream!"]);
32
- const audioStreamFromTextStream = await voice.speak(textStream);
33
- ```
34
-
35
10
  ## Parameters
36
11
 
37
12
  <PropertiesTable
@@ -153,6 +128,81 @@ Each voice provider may support additional options specific to their implementat
153
128
  ]}
154
129
  />
155
130
 
131
+ ## Usage Example
132
+
133
+ ```typescript
134
+ import { OpenAIVoice } from "@mastra/voice-openai";
135
+ // Initialize a voice provider
136
+ const voice = new OpenAIVoice({
137
+ speaker: "alloy", // Default voice
138
+ });
139
+ // Basic usage with default settings
140
+ const audioStream = await voice.speak("Hello, world!");
141
+ // Using a different voice for this specific request
142
+ const audioStreamWithDifferentVoice = await voice.speak("Hello again!", {
143
+ speaker: "nova",
144
+ });
145
+ // Using provider-specific options
146
+ const audioStreamWithOptions = await voice.speak("Hello with options!", {
147
+ speaker: "echo",
148
+ speed: 1.2, // OpenAI-specific option
149
+ });
150
+ // Using a text stream as input
151
+ import { Readable } from "stream";
152
+ const textStream = Readable.from(["Hello", " from", " a", " stream!"]);
153
+ const audioStreamFromTextStream = await voice.speak(textStream);
154
+ ```
155
+
156
+ ## Using with CompositeVoice
157
+
158
+ When using `CompositeVoice`, the `speak()` method delegates to the configured speaking provider:
159
+
160
+ ```typescript
161
+ import { CompositeVoice } from "@mastra/core/voice";
162
+ import { OpenAIVoice } from "@mastra/voice-openai";
163
+ import { PlayAIVoice } from "@mastra/voice-playai";
164
+
165
+ const voice = new CompositeVoice({
166
+ output: new PlayAIVoice(),
167
+ input: new OpenAIVoice(),
168
+ });
169
+
170
+ // This will use the PlayAIVoice provider
171
+ const audioStream = await voice.speak("Hello, world!");
172
+ ```
173
+
174
+ ### Using AI SDK Model Providers
175
+
176
+ You can also use AI SDK speech models directly with `CompositeVoice`:
177
+
178
+ ```typescript
179
+ import { CompositeVoice } from "@mastra/core/voice";
180
+ import { openai } from "@ai-sdk/openai";
181
+ import { elevenlabs } from "@ai-sdk/elevenlabs";
182
+
183
+ // Use AI SDK speech models
184
+ const voice = new CompositeVoice({
185
+ output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK model
186
+ input: openai.transcription('whisper-1'), // AI SDK model
187
+ });
188
+
189
+ // Works the same way
190
+ const audioStream = await voice.speak("Hello from AI SDK!");
191
+
192
+ // Provider-specific options can be passed through
193
+ const audioWithOptions = await voice.speak("Hello with options!", {
194
+ speaker: 'Rachel', // ElevenLabs voice
195
+ providerOptions: {
196
+ elevenlabs: {
197
+ stability: 0.5,
198
+ similarity_boost: 0.75,
199
+ }
200
+ }
201
+ });
202
+ ```
203
+
204
+ See the [CompositeVoice reference](/reference/v1/voice/composite-voice) for more details on AI SDK integration.
205
+
156
206
  ## Realtime Voice Providers
157
207
 
158
208
  When using realtime voice providers like `OpenAIRealtimeVoice`, the `speak()` method behaves differently:
@@ -181,21 +231,6 @@ voice.on("speaker", (stream) => {
181
231
  await voice.speak("Hello, this is realtime speech!");
182
232
  ```
183
233
 
184
- ## Using with CompositeVoice
185
-
186
- When using `CompositeVoice`, the `speak()` method delegates to the configured speaking provider:
187
-
188
- ```typescript
189
- import { CompositeVoice } from "@mastra/core/voice";
190
- import { OpenAIVoice } from "@mastra/voice-openai";
191
- import { PlayAIVoice } from "@mastra/voice-playai";
192
- const voice = new CompositeVoice({
193
- speakProvider: new PlayAIVoice(),
194
- listenProvider: new OpenAIVoice(),
195
- });
196
- // This will use the PlayAIVoice provider
197
- const audioStream = await voice.speak("Hello, world!");
198
- ```
199
234
 
200
235
  ## Notes
201
236
 
@@ -15,7 +15,7 @@ import { OpenAIRealtimeVoice } from "@mastra/voice-openai-realtime";
15
15
  // Initialize a real-time voice provider
16
16
  const voice = new OpenAIRealtimeVoice({
17
17
  realtimeConfig: {
18
- model: "gpt-4o-mini-realtime",
18
+ model: "gpt-5.1-realtime",
19
19
  apiKey: process.env.OPENAI_API_KEY,
20
20
  },
21
21
  speaker: "alloy",
@@ -240,7 +240,6 @@ const handleClientTool = async () => {
240
240
  This is a standard Mastra [agent](../agents/overview#setting-up-agents) configured to return hex color codes, intended to work with the browser-based client tool defined above.
241
241
 
242
242
  ```typescript title="src/mastra/agents/color-agent" showLineNumbers copy
243
- import { openai } from "@ai-sdk/openai";
244
243
  import { Agent } from "@mastra/core/agent";
245
244
 
246
245
  export const colorAgent = new Agent({
@@ -249,7 +248,7 @@ export const colorAgent = new Agent({
249
248
  instructions: `You are a helpful CSS assistant.
250
249
  You can change the background color of web pages.
251
250
  Respond with a hex reference for the color requested by the user`,
252
- model: openai("gpt-4o-mini"),
251
+ model: "openai/gpt-5.1",
253
252
  });
254
253
  ```
255
254
 
@@ -60,19 +60,30 @@ An agent stream provides access to various response properties:
60
60
 
61
61
  AI SDK v5 uses `LanguageModelV2` for the model providers. If you are getting an error that you are using an AI SDK v4 model you will need to upgrade your model package to the next major version.
62
62
 
63
- For integration with AI SDK v5, use `format` 'aisdk' to get an `AISDKV5OutputStream`:
63
+ For integration with AI SDK v5, use the `toAISdkV5Stream()` utility from `@mastra/ai-sdk` to convert Mastra streams to AI SDK-compatible format:
64
+
65
+ ```typescript {2,9-12} showLineNumbers copy
66
+ import { toAISdkV5Stream } from "@mastra/ai-sdk";
64
67
 
65
- ```typescript {5} showLineNumbers copy
66
68
  const testAgent = mastra.getAgent("testAgent");
67
69
 
68
- const stream = await testAgent.stream(
69
- [{ role: "user", content: "Help me organize my day" }],
70
- { format: "aisdk" },
71
- );
70
+ const stream = await testAgent.stream([
71
+ { role: "user", content: "Help me organize my day" },
72
+ ]);
72
73
 
73
- for await (const chunk of stream.textStream) {
74
- process.stdout.write(chunk);
75
- }
74
+ // Convert to AI SDK v5 compatible stream
75
+ const aiSDKStream = toAISdkV5Stream(stream, { from: "agent" });
76
+
77
+ // Use with AI SDK v5 methods
78
+ ```
79
+
80
+ For converting messages to AI SDK v5 format, use the `toAISdkV5Messages()` utility from `@mastra/ai-sdk/ui`:
81
+
82
+ ```typescript {1,4} showLineNumbers copy
83
+ import { toAISdkV5Messages } from "@mastra/ai-sdk/ui";
84
+
85
+ const messages = [{ role: "user", content: "Hello" }];
86
+ const aiSDKMessages = toAISdkV5Messages(messages);
76
87
  ```
77
88
 
78
89
  ### Using `Agent.network()`
@@ -18,8 +18,7 @@ By combining writable tool streams with agent streaming, you gain fine grained c
18
18
 
19
19
  Agent streaming can be combined with tool calls, allowing tool outputs to be written directly into the agent’s streaming response. This makes it possible to surface tool activity as part of the overall interaction.
20
20
 
21
- ```typescript {4,10} showLineNumbers copy
22
- import { openai } from "@ai-sdk/openai";
21
+ ```typescript {3,9} showLineNumbers copy
23
22
  import { Agent } from "@mastra/core/agent";
24
23
 
25
24
  import { testTool } from "../tools/test-tool";
@@ -28,7 +27,7 @@ export const testAgent = new Agent({
28
27
  id: "test-agent",
29
28
  name: "Test Agent",
30
29
  instructions: "You are a weather agent.",
31
- model: openai("gpt-4o-mini"),
30
+ model: "openai/gpt-5.1",
32
31
  tools: { testTool },
33
32
  });
34
33
  ```
@@ -117,9 +116,53 @@ for await (const chunk of stream) {
117
116
  }
118
117
  ```
119
118
 
119
+ ## Tool Lifecycle Hooks
120
+
121
+ Tools support lifecycle hooks that allow you to monitor different stages of tool execution during streaming. These hooks are particularly useful for logging or analytics.
122
+
123
+ ### Example: Using onInputAvailable and onOutput
124
+
125
+ ```typescript showLineNumbers copy
126
+ import { createTool } from "@mastra/core/tools";
127
+ import { z } from "zod";
128
+
129
+ export const weatherTool = createTool({
130
+ id: "weather-tool",
131
+ description: "Get weather information",
132
+ inputSchema: z.object({
133
+ city: z.string(),
134
+ }),
135
+ outputSchema: z.object({
136
+ temperature: z.number(),
137
+ conditions: z.string(),
138
+ }),
139
+ // Called when the complete input is available
140
+ onInputAvailable: ({ input, toolCallId }) => {
141
+ console.log(`Weather requested for: ${input.city}`);
142
+ },
143
+ execute: async (input) => {
144
+ const weather = await fetchWeather(input.city);
145
+ return weather;
146
+ },
147
+ // Called after successful execution
148
+ onOutput: ({ output, toolName }) => {
149
+ console.log(`${toolName} result: ${output.temperature}°F, ${output.conditions}`);
150
+ },
151
+ });
152
+ ```
153
+
154
+ ### Available Hooks
155
+
156
+ - **onInputStart**: Called when tool call input streaming begins
157
+ - **onInputDelta**: Called for each chunk of input as it streams in
158
+ - **onInputAvailable**: Called when complete input is parsed and validated
159
+ - **onOutput**: Called after the tool successfully executes with the output
160
+
161
+ For detailed documentation on all lifecycle hooks, see the [createTool() reference](/reference/v1/tools/create-tool#tool-lifecycle-hooks).
162
+
120
163
  ## Tool using an agent
121
164
 
122
- Pipe an agents `textStream` to the tools `writer`. This streams partial output, and Mastra automatically aggregates the agents usage into the tool run.
165
+ Pipe an agent's `textStream` to the tool's `writer`. This streams partial output, and Mastra automatically aggregates the agent's usage into the tool run.
123
166
 
124
167
  ```typescript showLineNumbers copy
125
168
  import { createTool } from "@mastra/core/tools";
@@ -110,7 +110,6 @@ You can then add this tool to your Mastra agent just like any other tool:
110
110
 
111
111
  ```typescript title="src/mastra/agents/mixedToolsAgent.ts"
112
112
  import { Agent } from "@mastra/core/agent";
113
- import { openai } from "@ai-sdk/openai";
114
113
  import { vercelWeatherTool } from "../tools/vercelWeatherTool"; // Vercel AI SDK tool
115
114
  import { mastraTool } from "../tools/mastraTool"; // Mastra createTool tool
116
115
 
@@ -118,7 +117,7 @@ export const mixedToolsAgent = new Agent({
118
117
  id: "mixed-tools-agent",
119
118
  name: "Mixed Tools Agent",
120
119
  instructions: "You can use tools defined in different formats.",
121
- model: openai("gpt-4o-mini"),
120
+ model: "openai/gpt-5.1",
122
121
  tools: {
123
122
  weatherVercel: vercelWeatherTool,
124
123
  someMastraTool: mastraTool,
@@ -54,8 +54,7 @@ export const testMcpClient = new MCPClient({
54
54
 
55
55
  To use tools from an MCP server in an agent, import your `MCPClient` and call `.listTools()` in the `tools` parameter. This loads from the defined MCP servers, making them available to the agent.
56
56
 
57
- ```typescript {4,16} title="src/mastra/agents/test-agent.ts" showLineNumbers copy
58
- import { openai } from "@ai-sdk/openai";
57
+ ```typescript {3,15} title="src/mastra/agents/test-agent.ts" showLineNumbers copy
59
58
  import { Agent } from "@mastra/core/agent";
60
59
 
61
60
  import { testMcpClient } from "../mcp/test-mcp-client";
@@ -70,7 +69,7 @@ export const testAgent = new Agent({
70
69
  - US National Weather Service
71
70
 
72
71
  Answer questions using the information you find using the MCP Servers.`,
73
- model: openai("gpt-4o-mini"),
72
+ model: "openai/gpt-5.1",
74
73
  tools: await testMcpClient.listTools(),
75
74
  });
76
75
  ```
@@ -144,8 +143,7 @@ Use the `.listTools()` method to fetch tools from all configured MCP servers. Th
144
143
 
145
144
  > See [listTools()](/reference/v1/tools/mcp-client#listtools) for more information.
146
145
 
147
- ```typescript {8} title="src/mastra/agents/test-agent.ts" showLineNumbers copy
148
- import { openai } from "@ai-sdk/openai";
146
+ ```typescript {7} title="src/mastra/agents/test-agent.ts" showLineNumbers copy
149
147
  import { Agent } from "@mastra/core/agent";
150
148
 
151
149
  import { testMcpClient } from "../mcp/test-mcp-client";