@mastra/mcp-docs-server 0.13.44 → 0.13.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +2 -0
- package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +2 -0
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +251 -51
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +422 -222
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +72 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +104 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +49 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/raw/agents/adding-voice.mdx +49 -0
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +23 -1
- package/.docs/raw/reference/client-js/memory.mdx +43 -0
- package/.docs/raw/reference/core/mastra-class.mdx +8 -0
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +223 -0
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/scorers/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/scorers/bias.mdx +29 -87
- package/.docs/raw/reference/scorers/completeness.mdx +32 -91
- package/.docs/raw/reference/scorers/content-similarity.mdx +29 -99
- package/.docs/raw/reference/scorers/context-precision.mdx +28 -130
- package/.docs/raw/reference/scorers/faithfulness.mdx +28 -101
- package/.docs/raw/reference/scorers/hallucination.mdx +28 -103
- package/.docs/raw/reference/scorers/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/scorers/textual-difference.mdx +27 -100
- package/.docs/raw/reference/scorers/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/scorers/toxicity.mdx +29 -92
- package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
- package/.docs/raw/reference/storage/lance.mdx +33 -0
- package/.docs/raw/reference/storage/libsql.mdx +37 -0
- package/.docs/raw/reference/storage/mongodb.mdx +39 -0
- package/.docs/raw/reference/storage/mssql.mdx +37 -0
- package/.docs/raw/reference/storage/postgresql.mdx +37 -0
- package/.docs/raw/reference/streaming/agents/stream.mdx +7 -0
- package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
- package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
- package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
- package/.docs/raw/voice/overview.mdx +67 -0
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/CHANGELOG.md +20 -0
- package/dist/{chunk-TUAHUTTB.js → chunk-VE65X75W.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +5 -5
|
@@ -7,41 +7,6 @@ description: "Documentation for the listen() method available in all Mastra voic
|
|
|
7
7
|
|
|
8
8
|
The `listen()` method is a core function available in all Mastra voice providers that converts speech to text. It takes an audio stream as input and returns the transcribed text.
|
|
9
9
|
|
|
10
|
-
## Usage Example
|
|
11
|
-
|
|
12
|
-
```typescript
|
|
13
|
-
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
14
|
-
import { getMicrophoneStream } from "@mastra/node-audio";
|
|
15
|
-
import { createReadStream } from "fs";
|
|
16
|
-
import path from "path";
|
|
17
|
-
|
|
18
|
-
// Initialize a voice provider
|
|
19
|
-
const voice = new OpenAIVoice({
|
|
20
|
-
listeningModel: {
|
|
21
|
-
name: "whisper-1",
|
|
22
|
-
apiKey: process.env.OPENAI_API_KEY,
|
|
23
|
-
},
|
|
24
|
-
});
|
|
25
|
-
|
|
26
|
-
// Basic usage with a file stream
|
|
27
|
-
const audioFilePath = path.join(process.cwd(), "audio.mp3");
|
|
28
|
-
const audioStream = createReadStream(audioFilePath);
|
|
29
|
-
const transcript = await voice.listen(audioStream, {
|
|
30
|
-
filetype: "mp3",
|
|
31
|
-
});
|
|
32
|
-
console.log("Transcribed text:", transcript);
|
|
33
|
-
|
|
34
|
-
// Using a microphone stream
|
|
35
|
-
const microphoneStream = getMicrophoneStream(); // Assume this function gets audio input
|
|
36
|
-
const transcription = await voice.listen(microphoneStream);
|
|
37
|
-
|
|
38
|
-
// With provider-specific options
|
|
39
|
-
const transcriptWithOptions = await voice.listen(audioStream, {
|
|
40
|
-
language: "en",
|
|
41
|
-
prompt: "This is a conversation about artificial intelligence.",
|
|
42
|
-
});
|
|
43
|
-
```
|
|
44
|
-
|
|
45
10
|
## Parameters
|
|
46
11
|
|
|
47
12
|
<PropertiesTable
|
|
@@ -143,30 +108,42 @@ Each voice provider may support additional options specific to their implementat
|
|
|
143
108
|
]}
|
|
144
109
|
/>
|
|
145
110
|
|
|
146
|
-
##
|
|
147
|
-
|
|
148
|
-
When using realtime voice providers like `OpenAIRealtimeVoice`, the `listen()` method behaves differently:
|
|
149
|
-
|
|
150
|
-
- Instead of returning transcribed text, it emits 'writing' events with the transcribed text
|
|
151
|
-
- You need to register an event listener to receive the transcription
|
|
111
|
+
## Usage Example
|
|
152
112
|
|
|
153
113
|
```typescript
|
|
154
|
-
import {
|
|
114
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
155
115
|
import { getMicrophoneStream } from "@mastra/node-audio";
|
|
116
|
+
import { createReadStream } from "fs";
|
|
117
|
+
import path from "path";
|
|
156
118
|
|
|
157
|
-
|
|
158
|
-
|
|
119
|
+
// Initialize a voice provider
|
|
120
|
+
const voice = new OpenAIVoice({
|
|
121
|
+
listeningModel: {
|
|
122
|
+
name: "whisper-1",
|
|
123
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
124
|
+
},
|
|
125
|
+
});
|
|
159
126
|
|
|
160
|
-
//
|
|
161
|
-
|
|
162
|
-
|
|
127
|
+
// Basic usage with a file stream
|
|
128
|
+
const audioFilePath = path.join(process.cwd(), "audio.mp3");
|
|
129
|
+
const audioStream = createReadStream(audioFilePath);
|
|
130
|
+
const transcript = await voice.listen(audioStream, {
|
|
131
|
+
filetype: "mp3",
|
|
163
132
|
});
|
|
133
|
+
console.log("Transcribed text:", transcript);
|
|
164
134
|
|
|
165
|
-
//
|
|
166
|
-
const microphoneStream = getMicrophoneStream();
|
|
167
|
-
await voice.listen(microphoneStream);
|
|
135
|
+
// Using a microphone stream
|
|
136
|
+
const microphoneStream = getMicrophoneStream(); // Assume this function gets audio input
|
|
137
|
+
const transcription = await voice.listen(microphoneStream);
|
|
138
|
+
|
|
139
|
+
// With provider-specific options
|
|
140
|
+
const transcriptWithOptions = await voice.listen(audioStream, {
|
|
141
|
+
language: "en",
|
|
142
|
+
prompt: "This is a conversation about artificial intelligence.",
|
|
143
|
+
});
|
|
168
144
|
```
|
|
169
145
|
|
|
146
|
+
|
|
170
147
|
## Using with CompositeVoice
|
|
171
148
|
|
|
172
149
|
When using `CompositeVoice`, the `listen()` method delegates to the configured listening provider:
|
|
@@ -177,14 +154,70 @@ import { OpenAIVoice } from "@mastra/voice-openai";
|
|
|
177
154
|
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
178
155
|
|
|
179
156
|
const voice = new CompositeVoice({
|
|
180
|
-
|
|
181
|
-
|
|
157
|
+
input: new OpenAIVoice(),
|
|
158
|
+
output: new PlayAIVoice(),
|
|
182
159
|
});
|
|
183
160
|
|
|
184
161
|
// This will use the OpenAIVoice provider
|
|
185
162
|
const transcript = await voice.listen(audioStream);
|
|
186
163
|
```
|
|
187
164
|
|
|
165
|
+
### Using AI SDK Model Providers
|
|
166
|
+
|
|
167
|
+
You can also use AI SDK transcription models directly with `CompositeVoice`:
|
|
168
|
+
|
|
169
|
+
```typescript
|
|
170
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
171
|
+
import { openai } from "@ai-sdk/openai";
|
|
172
|
+
import { groq } from "@ai-sdk/groq";
|
|
173
|
+
|
|
174
|
+
// Use AI SDK transcription models
|
|
175
|
+
const voice = new CompositeVoice({
|
|
176
|
+
input: openai.transcription('whisper-1'), // AI SDK model
|
|
177
|
+
output: new PlayAIVoice(), // Mastra provider
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
// Works the same way
|
|
181
|
+
const transcript = await voice.listen(audioStream);
|
|
182
|
+
|
|
183
|
+
// Provider-specific options can be passed through
|
|
184
|
+
const transcriptWithOptions = await voice.listen(audioStream, {
|
|
185
|
+
providerOptions: {
|
|
186
|
+
openai: {
|
|
187
|
+
language: 'en',
|
|
188
|
+
prompt: 'This is about AI',
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
});
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
See the [CompositeVoice reference](/reference/voice/composite-voice) for more details on AI SDK integration.
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
## Realtime Voice Providers
|
|
198
|
+
|
|
199
|
+
When using realtime voice providers like `OpenAIRealtimeVoice`, the `listen()` method behaves differently:
|
|
200
|
+
|
|
201
|
+
- Instead of returning transcribed text, it emits 'writing' events with the transcribed text
|
|
202
|
+
- You need to register an event listener to receive the transcription
|
|
203
|
+
|
|
204
|
+
```typescript
|
|
205
|
+
import { OpenAIRealtimeVoice } from "@mastra/voice-openai-realtime";
|
|
206
|
+
import { getMicrophoneStream } from "@mastra/node-audio";
|
|
207
|
+
|
|
208
|
+
const voice = new OpenAIRealtimeVoice();
|
|
209
|
+
await voice.connect();
|
|
210
|
+
|
|
211
|
+
// Register event listener for transcription
|
|
212
|
+
voice.on("writing", ({ text, role }) => {
|
|
213
|
+
console.log(`${role}: ${text}`);
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
// This will emit 'writing' events instead of returning text
|
|
217
|
+
const microphoneStream = getMicrophoneStream();
|
|
218
|
+
await voice.listen(microphoneStream);
|
|
219
|
+
```
|
|
220
|
+
|
|
188
221
|
## Notes
|
|
189
222
|
|
|
190
223
|
- Not all voice providers support speech-to-text functionality (e.g., PlayAI, Speechify)
|
|
@@ -194,6 +227,7 @@ const transcript = await voice.listen(audioStream);
|
|
|
194
227
|
- Some providers support streaming transcription, where text is returned as it's transcribed
|
|
195
228
|
- For best performance, consider closing or ending the audio stream when you're done with it
|
|
196
229
|
|
|
230
|
+
|
|
197
231
|
## Related Methods
|
|
198
232
|
|
|
199
233
|
- [voice.speak()](./voice.speak) - Converts text to speech
|
|
@@ -7,31 +7,6 @@ description: "Documentation for the speak() method available in all Mastra voice
|
|
|
7
7
|
|
|
8
8
|
The `speak()` method is a core function available in all Mastra voice providers that converts text to speech. It takes text input and returns an audio stream that can be played or saved.
|
|
9
9
|
|
|
10
|
-
## Usage Example
|
|
11
|
-
|
|
12
|
-
```typescript
|
|
13
|
-
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
14
|
-
// Initialize a voice provider
|
|
15
|
-
const voice = new OpenAIVoice({
|
|
16
|
-
speaker: "alloy", // Default voice
|
|
17
|
-
});
|
|
18
|
-
// Basic usage with default settings
|
|
19
|
-
const audioStream = await voice.speak("Hello, world!");
|
|
20
|
-
// Using a different voice for this specific request
|
|
21
|
-
const audioStreamWithDifferentVoice = await voice.speak("Hello again!", {
|
|
22
|
-
speaker: "nova",
|
|
23
|
-
});
|
|
24
|
-
// Using provider-specific options
|
|
25
|
-
const audioStreamWithOptions = await voice.speak("Hello with options!", {
|
|
26
|
-
speaker: "echo",
|
|
27
|
-
speed: 1.2, // OpenAI-specific option
|
|
28
|
-
});
|
|
29
|
-
// Using a text stream as input
|
|
30
|
-
import { Readable } from "stream";
|
|
31
|
-
const textStream = Readable.from(["Hello", " from", " a", " stream!"]);
|
|
32
|
-
const audioStreamFromTextStream = await voice.speak(textStream);
|
|
33
|
-
```
|
|
34
|
-
|
|
35
10
|
## Parameters
|
|
36
11
|
|
|
37
12
|
<PropertiesTable
|
|
@@ -153,6 +128,81 @@ Each voice provider may support additional options specific to their implementat
|
|
|
153
128
|
]}
|
|
154
129
|
/>
|
|
155
130
|
|
|
131
|
+
## Usage Example
|
|
132
|
+
|
|
133
|
+
```typescript
|
|
134
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
135
|
+
// Initialize a voice provider
|
|
136
|
+
const voice = new OpenAIVoice({
|
|
137
|
+
speaker: "alloy", // Default voice
|
|
138
|
+
});
|
|
139
|
+
// Basic usage with default settings
|
|
140
|
+
const audioStream = await voice.speak("Hello, world!");
|
|
141
|
+
// Using a different voice for this specific request
|
|
142
|
+
const audioStreamWithDifferentVoice = await voice.speak("Hello again!", {
|
|
143
|
+
speaker: "nova",
|
|
144
|
+
});
|
|
145
|
+
// Using provider-specific options
|
|
146
|
+
const audioStreamWithOptions = await voice.speak("Hello with options!", {
|
|
147
|
+
speaker: "echo",
|
|
148
|
+
speed: 1.2, // OpenAI-specific option
|
|
149
|
+
});
|
|
150
|
+
// Using a text stream as input
|
|
151
|
+
import { Readable } from "stream";
|
|
152
|
+
const textStream = Readable.from(["Hello", " from", " a", " stream!"]);
|
|
153
|
+
const audioStreamFromTextStream = await voice.speak(textStream);
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
## Using with CompositeVoice
|
|
157
|
+
|
|
158
|
+
When using `CompositeVoice`, the `speak()` method delegates to the configured speaking provider:
|
|
159
|
+
|
|
160
|
+
```typescript
|
|
161
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
162
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
163
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
164
|
+
|
|
165
|
+
const voice = new CompositeVoice({
|
|
166
|
+
output: new PlayAIVoice(),
|
|
167
|
+
input: new OpenAIVoice(),
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
// This will use the PlayAIVoice provider
|
|
171
|
+
const audioStream = await voice.speak("Hello, world!");
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
### Using AI SDK Model Providers
|
|
175
|
+
|
|
176
|
+
You can also use AI SDK speech models directly with `CompositeVoice`:
|
|
177
|
+
|
|
178
|
+
```typescript
|
|
179
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
180
|
+
import { openai } from "@ai-sdk/openai";
|
|
181
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
182
|
+
|
|
183
|
+
// Use AI SDK speech models
|
|
184
|
+
const voice = new CompositeVoice({
|
|
185
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK model
|
|
186
|
+
input: openai.transcription('whisper-1'), // AI SDK model
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
// Works the same way
|
|
190
|
+
const audioStream = await voice.speak("Hello from AI SDK!");
|
|
191
|
+
|
|
192
|
+
// Provider-specific options can be passed through
|
|
193
|
+
const audioWithOptions = await voice.speak("Hello with options!", {
|
|
194
|
+
speaker: 'Rachel', // ElevenLabs voice
|
|
195
|
+
providerOptions: {
|
|
196
|
+
elevenlabs: {
|
|
197
|
+
stability: 0.5,
|
|
198
|
+
similarity_boost: 0.75,
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
});
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
See the [CompositeVoice reference](/reference/voice/composite-voice) for more details on AI SDK integration.
|
|
205
|
+
|
|
156
206
|
## Realtime Voice Providers
|
|
157
207
|
|
|
158
208
|
When using realtime voice providers like `OpenAIRealtimeVoice`, the `speak()` method behaves differently:
|
|
@@ -181,21 +231,6 @@ voice.on("speaker", (stream) => {
|
|
|
181
231
|
await voice.speak("Hello, this is realtime speech!");
|
|
182
232
|
```
|
|
183
233
|
|
|
184
|
-
## Using with CompositeVoice
|
|
185
|
-
|
|
186
|
-
When using `CompositeVoice`, the `speak()` method delegates to the configured speaking provider:
|
|
187
|
-
|
|
188
|
-
```typescript
|
|
189
|
-
import { CompositeVoice } from "@mastra/core/voice";
|
|
190
|
-
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
191
|
-
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
192
|
-
const voice = new CompositeVoice({
|
|
193
|
-
speakProvider: new PlayAIVoice(),
|
|
194
|
-
listenProvider: new OpenAIVoice(),
|
|
195
|
-
});
|
|
196
|
-
// This will use the PlayAIVoice provider
|
|
197
|
-
const audioStream = await voice.speak("Hello, world!");
|
|
198
|
-
```
|
|
199
234
|
|
|
200
235
|
## Notes
|
|
201
236
|
|
|
@@ -898,6 +898,30 @@ const voice = new GeminiLiveVoice({
|
|
|
898
898
|
Visit the [Google Gemini Live Reference](/reference/voice/google-gemini-live) for more information on the Google Gemini Live voice provider.
|
|
899
899
|
|
|
900
900
|
</TabItem>
|
|
901
|
+
<TabItem value="aisdk" label="AI SDK">
|
|
902
|
+
|
|
903
|
+
```typescript
|
|
904
|
+
// AI SDK Voice Configuration
|
|
905
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
906
|
+
import { openai } from "@ai-sdk/openai";
|
|
907
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
908
|
+
|
|
909
|
+
// Use AI SDK models directly - no need to install separate packages
|
|
910
|
+
const voice = new CompositeVoice({
|
|
911
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription
|
|
912
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech
|
|
913
|
+
});
|
|
914
|
+
|
|
915
|
+
// Works seamlessly with your agent
|
|
916
|
+
const voiceAgent = new Agent({
|
|
917
|
+
id: "aisdk-voice-agent",
|
|
918
|
+
name: "AI SDK Voice Agent",
|
|
919
|
+
instructions: "You are a helpful assistant with voice capabilities.",
|
|
920
|
+
model: openai("gpt-4o"),
|
|
921
|
+
voice,
|
|
922
|
+
});
|
|
923
|
+
```
|
|
924
|
+
</TabItem>
|
|
901
925
|
</Tabs>
|
|
902
926
|
|
|
903
927
|
### Using Multiple Voice Providers
|
|
@@ -951,6 +975,49 @@ const responseAudio = await voice.speak(`You said: ${transcript}`, {
|
|
|
951
975
|
playAudio(responseAudio);
|
|
952
976
|
```
|
|
953
977
|
|
|
978
|
+
### Using AI SDK Model Providers
|
|
979
|
+
|
|
980
|
+
You can also use AI SDK models directly with `CompositeVoice`:
|
|
981
|
+
|
|
982
|
+
```typescript
|
|
983
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
984
|
+
import { openai } from "@ai-sdk/openai";
|
|
985
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
986
|
+
import { playAudio, getMicrophoneStream } from "@mastra/node-audio";
|
|
987
|
+
|
|
988
|
+
// Use AI SDK models directly - no provider setup needed
|
|
989
|
+
const voice = new CompositeVoice({
|
|
990
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription
|
|
991
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech
|
|
992
|
+
});
|
|
993
|
+
|
|
994
|
+
// Works the same way as Mastra providers
|
|
995
|
+
const audioStream = getMicrophoneStream();
|
|
996
|
+
const transcript = await voice.listen(audioStream);
|
|
997
|
+
|
|
998
|
+
console.log("Transcribed text:", transcript);
|
|
999
|
+
|
|
1000
|
+
// Convert text to speech
|
|
1001
|
+
const responseAudio = await voice.speak(`You said: ${transcript}`, {
|
|
1002
|
+
speaker: "Rachel", // ElevenLabs voice
|
|
1003
|
+
});
|
|
1004
|
+
|
|
1005
|
+
playAudio(responseAudio);
|
|
1006
|
+
```
|
|
1007
|
+
|
|
1008
|
+
You can also mix AI SDK models with Mastra providers:
|
|
1009
|
+
|
|
1010
|
+
```typescript
|
|
1011
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
1012
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
1013
|
+
import { groq } from "@ai-sdk/groq";
|
|
1014
|
+
|
|
1015
|
+
const voice = new CompositeVoice({
|
|
1016
|
+
input: groq.transcription('whisper-large-v3'), // AI SDK for STT
|
|
1017
|
+
output: new PlayAIVoice(), // Mastra provider for TTS
|
|
1018
|
+
});
|
|
1019
|
+
```
|
|
1020
|
+
|
|
954
1021
|
For more information on the CompositeVoice, refer to the [CompositeVoice Reference](/reference/voice/composite-voice).
|
|
955
1022
|
|
|
956
1023
|
## More Resources
|
|
@@ -112,6 +112,70 @@ export const testWorkflow = createWorkflow({
|
|
|
112
112
|
|
|
113
113
|
> 📹 Watch: How to run steps in parallel and optimize your Mastra workflow → [YouTube (3 minutes)](https://youtu.be/GQJxve5Hki4)
|
|
114
114
|
|
|
115
|
+
### Output structure
|
|
116
|
+
|
|
117
|
+
When steps run in parallel, the output is an object where each key is the step's `id` and the value is that step's output. This allows you to access each parallel step's result independently.
|
|
118
|
+
|
|
119
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
120
|
+
const step1 = createStep({
|
|
121
|
+
id: "format-step",
|
|
122
|
+
inputSchema: z.object({ message: z.string() }),
|
|
123
|
+
outputSchema: z.object({ formatted: z.string() }),
|
|
124
|
+
execute: async ({ inputData }) => ({
|
|
125
|
+
formatted: inputData.message.toUpperCase()
|
|
126
|
+
})
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
const step2 = createStep({
|
|
130
|
+
id: "count-step",
|
|
131
|
+
inputSchema: z.object({ message: z.string() }),
|
|
132
|
+
outputSchema: z.object({ count: z.number() }),
|
|
133
|
+
execute: async ({ inputData }) => ({
|
|
134
|
+
count: inputData.message.length
|
|
135
|
+
})
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
const step3 = createStep({
|
|
139
|
+
id: "combine-step",
|
|
140
|
+
// The inputSchema must match the structure of parallel outputs
|
|
141
|
+
inputSchema: z.object({
|
|
142
|
+
"format-step": z.object({ formatted: z.string() }),
|
|
143
|
+
"count-step": z.object({ count: z.number() })
|
|
144
|
+
}),
|
|
145
|
+
outputSchema: z.object({ result: z.string() }),
|
|
146
|
+
execute: async ({ inputData }) => {
|
|
147
|
+
// Access each parallel step's output by its id
|
|
148
|
+
const formatted = inputData["format-step"].formatted;
|
|
149
|
+
const count = inputData["count-step"].count;
|
|
150
|
+
return {
|
|
151
|
+
result: `${formatted} (${count} characters)`
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
export const testWorkflow = createWorkflow({
|
|
157
|
+
id: "parallel-output-example",
|
|
158
|
+
inputSchema: z.object({ message: z.string() }),
|
|
159
|
+
outputSchema: z.object({ result: z.string() })
|
|
160
|
+
})
|
|
161
|
+
.parallel([step1, step2])
|
|
162
|
+
.then(step3)
|
|
163
|
+
.commit();
|
|
164
|
+
|
|
165
|
+
// When executed with { message: "hello" }
|
|
166
|
+
// The parallel output structure will be:
|
|
167
|
+
// {
|
|
168
|
+
// "format-step": { formatted: "HELLO" },
|
|
169
|
+
// "count-step": { count: 5 }
|
|
170
|
+
// }
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
**Key points:**
|
|
174
|
+
- Each parallel step's output is keyed by its `id`
|
|
175
|
+
- All parallel steps execute simultaneously
|
|
176
|
+
- The next step receives an object containing all parallel step outputs
|
|
177
|
+
- You must define the `inputSchema` of the following step to match this structure
|
|
178
|
+
|
|
115
179
|
## Conditional logic with `.branch()`
|
|
116
180
|
|
|
117
181
|
Use `.branch()` to choose which step to run based on a condition. All steps in a branch need the same `inputSchema` and `outputSchema` because branching requires consistent schemas so workflows can follow different paths.
|
|
@@ -158,6 +222,85 @@ export const testWorkflow = createWorkflow({
|
|
|
158
222
|
.commit();
|
|
159
223
|
```
|
|
160
224
|
|
|
225
|
+
### Output structure
|
|
226
|
+
|
|
227
|
+
When using conditional branching, only one branch executes based on which condition evaluates to `true` first. The output structure is similar to `.parallel()`, where the result is keyed by the executed step's `id`.
|
|
228
|
+
|
|
229
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
230
|
+
const step1 = createStep({
|
|
231
|
+
id: "initial-step",
|
|
232
|
+
inputSchema: z.object({ value: z.number() }),
|
|
233
|
+
outputSchema: z.object({ value: z.number() }),
|
|
234
|
+
execute: async ({ inputData }) => inputData
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
const highValueStep = createStep({
|
|
238
|
+
id: "high-value-step",
|
|
239
|
+
inputSchema: z.object({ value: z.number() }),
|
|
240
|
+
outputSchema: z.object({ result: z.string() }),
|
|
241
|
+
execute: async ({ inputData }) => ({
|
|
242
|
+
result: `High value: ${inputData.value}`
|
|
243
|
+
})
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
const lowValueStep = createStep({
|
|
247
|
+
id: "low-value-step",
|
|
248
|
+
inputSchema: z.object({ value: z.number() }),
|
|
249
|
+
outputSchema: z.object({ result: z.string() }),
|
|
250
|
+
execute: async ({ inputData }) => ({
|
|
251
|
+
result: `Low value: ${inputData.value}`
|
|
252
|
+
})
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
const finalStep = createStep({
|
|
256
|
+
id: "final-step",
|
|
257
|
+
// The inputSchema must account for either branch's output
|
|
258
|
+
inputSchema: z.object({
|
|
259
|
+
"high-value-step": z.object({ result: z.string() }).optional(),
|
|
260
|
+
"low-value-step": z.object({ result: z.string() }).optional()
|
|
261
|
+
}),
|
|
262
|
+
outputSchema: z.object({ message: z.string() }),
|
|
263
|
+
execute: async ({ inputData }) => {
|
|
264
|
+
// Only one branch will have executed
|
|
265
|
+
const result = inputData["high-value-step"]?.result ||
|
|
266
|
+
inputData["low-value-step"]?.result;
|
|
267
|
+
return { message: result };
|
|
268
|
+
}
|
|
269
|
+
});
|
|
270
|
+
|
|
271
|
+
export const testWorkflow = createWorkflow({
|
|
272
|
+
id: "branch-output-example",
|
|
273
|
+
inputSchema: z.object({ value: z.number() }),
|
|
274
|
+
outputSchema: z.object({ message: z.string() })
|
|
275
|
+
})
|
|
276
|
+
.then(step1)
|
|
277
|
+
.branch([
|
|
278
|
+
[async ({ inputData }) => inputData.value > 10, highValueStep],
|
|
279
|
+
[async ({ inputData }) => inputData.value <= 10, lowValueStep]
|
|
280
|
+
])
|
|
281
|
+
.then(finalStep)
|
|
282
|
+
.commit();
|
|
283
|
+
|
|
284
|
+
// When executed with { value: 15 }
|
|
285
|
+
// Only the high-value-step executes, output structure:
|
|
286
|
+
// {
|
|
287
|
+
// "high-value-step": { result: "High value: 15" }
|
|
288
|
+
// }
|
|
289
|
+
|
|
290
|
+
// When executed with { value: 5 }
|
|
291
|
+
// Only the low-value-step executes, output structure:
|
|
292
|
+
// {
|
|
293
|
+
// "low-value-step": { result: "Low value: 5" }
|
|
294
|
+
// }
|
|
295
|
+
```
|
|
296
|
+
|
|
297
|
+
**Key points:**
|
|
298
|
+
- Only one branch executes based on condition evaluation order
|
|
299
|
+
- The output is keyed by the executed step's `id`
|
|
300
|
+
- Subsequent steps should handle all possible branch outputs
|
|
301
|
+
- Use optional fields in the `inputSchema` when the next step needs to handle multiple possible branches
|
|
302
|
+
- Conditions are evaluated in the order they're defined
|
|
303
|
+
|
|
161
304
|
## Input data mapping
|
|
162
305
|
|
|
163
306
|
When using `.then()`, `.parallel()`, or `.branch()`, it is sometimes necessary to transform the output of a previous step to match the input of the next. In these cases you can use `.map()` to access the `inputData` and transform it to create a suitable data shape for the next step.
|
|
@@ -188,6 +331,43 @@ The `.map()` method provides additional helper functions for more complex mappin
|
|
|
188
331
|
- [`getInitData()`](/reference/workflows/workflow-methods/map#using-getinitdata): Access the workflow's initial input data
|
|
189
332
|
- [`mapVariable()`](/reference/workflows/workflow-methods/map#using-mapvariable): Use declarative object syntax to extract and rename fields
|
|
190
333
|
|
|
334
|
+
### Parallel and Branch outputs
|
|
335
|
+
|
|
336
|
+
When working with `.parallel()` or `.branch()` outputs, you can use `.map()` to transform the data structure before passing it to the next step. This is especially useful when you need to flatten or restructure the output.
|
|
337
|
+
|
|
338
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
339
|
+
export const testWorkflow = createWorkflow({...})
|
|
340
|
+
.parallel([step1, step2])
|
|
341
|
+
.map(async ({ inputData }) => {
|
|
342
|
+
// Transform the parallel output structure
|
|
343
|
+
return {
|
|
344
|
+
combined: `${inputData["step1"].value} - ${inputData["step2"].value}`
|
|
345
|
+
};
|
|
346
|
+
})
|
|
347
|
+
.then(nextStep)
|
|
348
|
+
.commit();
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
You can also use the helper functions provided by `.map()`:
|
|
352
|
+
|
|
353
|
+
```typescript title="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
|
|
354
|
+
export const testWorkflow = createWorkflow({...})
|
|
355
|
+
.branch([
|
|
356
|
+
[condition1, stepA],
|
|
357
|
+
[condition2, stepB]
|
|
358
|
+
])
|
|
359
|
+
.map(async ({ inputData, getStepResult }) => {
|
|
360
|
+
// Access specific step results
|
|
361
|
+
const stepAResult = getStepResult("stepA");
|
|
362
|
+
const stepBResult = getStepResult("stepB");
|
|
363
|
+
|
|
364
|
+
// Return the result from whichever branch executed
|
|
365
|
+
return stepAResult || stepBResult;
|
|
366
|
+
})
|
|
367
|
+
.then(nextStep)
|
|
368
|
+
.commit();
|
|
369
|
+
```
|
|
370
|
+
|
|
191
371
|
## Looping steps
|
|
192
372
|
|
|
193
373
|
Workflows support different looping methods that let you repeat steps until or while a condition is met, or iterate over arrays. Loops can be combined with other control methods like `.then()`.
|
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,25 @@
|
|
|
1
1
|
# @mastra/mcp-docs-server
|
|
2
2
|
|
|
3
|
+
## 0.13.45
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Ensure changelog truncation includes at least 2 versions before cutting off ([#10576](https://github.com/mastra-ai/mastra/pull/10576))
|
|
8
|
+
|
|
9
|
+
- Updated dependencies [[`5657314`](https://github.com/mastra-ai/mastra/commit/5657314a1f9d49019bb53f357fa48f75a69247ca), [`e5aca78`](https://github.com/mastra-ai/mastra/commit/e5aca78bb7f263bb8b470bedae81efe9805d7544), [`33a607a`](https://github.com/mastra-ai/mastra/commit/33a607a1f716c2029d4a1ff1603dd756129a33b3), [`cc10fc1`](https://github.com/mastra-ai/mastra/commit/cc10fc192d9f527c71a23cc9def10d8718935ee1), [`1f7ee84`](https://github.com/mastra-ai/mastra/commit/1f7ee841a643ef12d90392125881f06fdf877293), [`e7d5149`](https://github.com/mastra-ai/mastra/commit/e7d514995260b63b2108308e85c64de37dcd0f71), [`f195082`](https://github.com/mastra-ai/mastra/commit/f1950822a2425d5ccae78c5d010e02ddb027a869), [`d9986dd`](https://github.com/mastra-ai/mastra/commit/d9986dd3513f7ca3244a8e599a440ccf4d8bc28b), [`a45b0f0`](https://github.com/mastra-ai/mastra/commit/a45b0f0cd19eab1fe4deceae3abf029442c22f74), [`f6e8eb3`](https://github.com/mastra-ai/mastra/commit/f6e8eb3dac53b70b06e906b2818b1d2a5b0486d7), [`ce57a2b`](https://github.com/mastra-ai/mastra/commit/ce57a2b62fd0d5f6532e4ecd1ba9ba93ac9b95fc), [`3236f35`](https://github.com/mastra-ai/mastra/commit/3236f352ae13cc8552c2965164e97bd125dae48d), [`ce57a2b`](https://github.com/mastra-ai/mastra/commit/ce57a2b62fd0d5f6532e4ecd1ba9ba93ac9b95fc), [`0230321`](https://github.com/mastra-ai/mastra/commit/02303217870bedea0ef009bea9a952f24ed38aaf), [`7b541f4`](https://github.com/mastra-ai/mastra/commit/7b541f49eda6f5a87b738198edbd136927599475), [`0eea842`](https://github.com/mastra-ai/mastra/commit/0eea8423cbdd37f2111593c6f7d2efcde4b7e4ce), [`8812fb8`](https://github.com/mastra-ai/mastra/commit/8812fb86ec16fa7096d92dbf82b4fd187b56fa61), [`63ae8a2`](https://github.com/mastra-ai/mastra/commit/63ae8a22c0c09bbb8b9779f5f38934cd75f616af), [`bf810c5`](https://github.com/mastra-ai/mastra/commit/bf810c5c561bd8ef221c0f6bd84e69770b9a38cc), [`ac7ef07`](https://github.com/mastra-ai/mastra/commit/ac7ef07633caee89707142171d2873c888ffef85), [`522f0b4`](https://github.com/mastra-ai/mastra/commit/522f0b45330719858794eabffffde4f343f55549), [`bf810c5`](https://github.com/mastra-ai/mastra/commit/bf810c5c561bd8ef221c0f6bd84e69770b9a38cc), [`8b51d55`](https://github.com/mastra-ai/mastra/commit/8b51d55bae531edf7e383958d7ecee04df31f5d5), [`2131ac5`](https://github.com/mastra-ai/mastra/commit/2131ac571d5065f0a656c57494bca98691bb7609)]:
|
|
10
|
+
- @mastra/core@0.24.6
|
|
11
|
+
- @mastra/mcp@0.14.4
|
|
12
|
+
|
|
13
|
+
## 0.13.45-alpha.0
|
|
14
|
+
|
|
15
|
+
### Patch Changes
|
|
16
|
+
|
|
17
|
+
- Ensure changelog truncation includes at least 2 versions before cutting off ([#10576](https://github.com/mastra-ai/mastra/pull/10576))
|
|
18
|
+
|
|
19
|
+
- Updated dependencies [[`5657314`](https://github.com/mastra-ai/mastra/commit/5657314a1f9d49019bb53f357fa48f75a69247ca), [`e5aca78`](https://github.com/mastra-ai/mastra/commit/e5aca78bb7f263bb8b470bedae81efe9805d7544), [`33a607a`](https://github.com/mastra-ai/mastra/commit/33a607a1f716c2029d4a1ff1603dd756129a33b3), [`cc10fc1`](https://github.com/mastra-ai/mastra/commit/cc10fc192d9f527c71a23cc9def10d8718935ee1), [`1f7ee84`](https://github.com/mastra-ai/mastra/commit/1f7ee841a643ef12d90392125881f06fdf877293), [`e7d5149`](https://github.com/mastra-ai/mastra/commit/e7d514995260b63b2108308e85c64de37dcd0f71), [`f195082`](https://github.com/mastra-ai/mastra/commit/f1950822a2425d5ccae78c5d010e02ddb027a869), [`d9986dd`](https://github.com/mastra-ai/mastra/commit/d9986dd3513f7ca3244a8e599a440ccf4d8bc28b), [`a45b0f0`](https://github.com/mastra-ai/mastra/commit/a45b0f0cd19eab1fe4deceae3abf029442c22f74), [`f6e8eb3`](https://github.com/mastra-ai/mastra/commit/f6e8eb3dac53b70b06e906b2818b1d2a5b0486d7), [`ce57a2b`](https://github.com/mastra-ai/mastra/commit/ce57a2b62fd0d5f6532e4ecd1ba9ba93ac9b95fc), [`3236f35`](https://github.com/mastra-ai/mastra/commit/3236f352ae13cc8552c2965164e97bd125dae48d), [`ce57a2b`](https://github.com/mastra-ai/mastra/commit/ce57a2b62fd0d5f6532e4ecd1ba9ba93ac9b95fc), [`0230321`](https://github.com/mastra-ai/mastra/commit/02303217870bedea0ef009bea9a952f24ed38aaf), [`7b541f4`](https://github.com/mastra-ai/mastra/commit/7b541f49eda6f5a87b738198edbd136927599475), [`0eea842`](https://github.com/mastra-ai/mastra/commit/0eea8423cbdd37f2111593c6f7d2efcde4b7e4ce), [`8812fb8`](https://github.com/mastra-ai/mastra/commit/8812fb86ec16fa7096d92dbf82b4fd187b56fa61), [`63ae8a2`](https://github.com/mastra-ai/mastra/commit/63ae8a22c0c09bbb8b9779f5f38934cd75f616af), [`bf810c5`](https://github.com/mastra-ai/mastra/commit/bf810c5c561bd8ef221c0f6bd84e69770b9a38cc), [`ac7ef07`](https://github.com/mastra-ai/mastra/commit/ac7ef07633caee89707142171d2873c888ffef85), [`522f0b4`](https://github.com/mastra-ai/mastra/commit/522f0b45330719858794eabffffde4f343f55549), [`bf810c5`](https://github.com/mastra-ai/mastra/commit/bf810c5c561bd8ef221c0f6bd84e69770b9a38cc), [`8b51d55`](https://github.com/mastra-ai/mastra/commit/8b51d55bae531edf7e383958d7ecee04df31f5d5), [`2131ac5`](https://github.com/mastra-ai/mastra/commit/2131ac571d5065f0a656c57494bca98691bb7609)]:
|
|
20
|
+
- @mastra/core@0.24.6-alpha.0
|
|
21
|
+
- @mastra/mcp@0.14.4-alpha.0
|
|
22
|
+
|
|
3
23
|
## 0.13.44
|
|
4
24
|
|
|
5
25
|
### Patch Changes
|
|
@@ -271,15 +271,35 @@ var SOURCE_DIRS = ["packages", "speech", "stores", "voice", "integrations", "dep
|
|
|
271
271
|
fromRepoRoot
|
|
272
272
|
);
|
|
273
273
|
var CHANGELOGS_DEST = fromPackageRoot(".docs/organized/changelogs");
|
|
274
|
-
var MAX_LINES =
|
|
274
|
+
var MAX_LINES = 500;
|
|
275
|
+
var MIN_VERSIONS = 2;
|
|
276
|
+
var VERSION_HEADER_REGEX = /^## \d+\.\d+\.\d+/;
|
|
275
277
|
function truncateContent(content, maxLines) {
|
|
276
278
|
const lines = content.split("\n");
|
|
277
279
|
if (lines.length <= maxLines) return content;
|
|
278
|
-
const
|
|
279
|
-
|
|
280
|
-
|
|
280
|
+
const versionIndices = [];
|
|
281
|
+
for (let i = 0; i < lines.length; i++) {
|
|
282
|
+
if (VERSION_HEADER_REGEX.test(lines[i])) {
|
|
283
|
+
versionIndices.push(i);
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
const versionsInMaxLines = versionIndices.filter((idx) => idx < maxLines).length;
|
|
287
|
+
let cutoffLine;
|
|
288
|
+
if (versionsInMaxLines >= MIN_VERSIONS) {
|
|
289
|
+
cutoffLine = maxLines;
|
|
290
|
+
} else if (versionIndices.length > MIN_VERSIONS) {
|
|
291
|
+
cutoffLine = versionIndices[MIN_VERSIONS];
|
|
292
|
+
} else {
|
|
293
|
+
cutoffLine = lines.length;
|
|
294
|
+
}
|
|
295
|
+
const visibleLines = lines.slice(0, cutoffLine);
|
|
296
|
+
const hiddenCount = lines.length - cutoffLine;
|
|
297
|
+
if (hiddenCount > 0) {
|
|
298
|
+
return visibleLines.join("\n") + `
|
|
281
299
|
|
|
282
300
|
... ${hiddenCount} more lines hidden. See full changelog in package directory.`;
|
|
301
|
+
}
|
|
302
|
+
return visibleLines.join("\n");
|
|
283
303
|
}
|
|
284
304
|
async function processPackageDir(packagePath, outputDir) {
|
|
285
305
|
let packageName;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"package-changes.d.ts","sourceRoot":"","sources":["../../src/prepare-docs/package-changes.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"package-changes.d.ts","sourceRoot":"","sources":["../../src/prepare-docs/package-changes.ts"],"names":[],"mappings":"AAiGA;;GAEG;AACH,wBAAsB,qBAAqB,kBAmC1C"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
export { prepare } from '../chunk-
|
|
1
|
+
export { prepare } from '../chunk-VE65X75W.js';
|