@mastra/mcp-docs-server 0.13.7-alpha.4 → 0.13.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fastra.md +8 -8
  2. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +8 -8
  3. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +8 -8
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +7 -7
  5. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +8 -8
  6. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +8 -8
  7. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +9 -9
  8. package/.docs/organized/changelogs/%40mastra%2Fcore.md +3 -3
  9. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +8 -8
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +10 -10
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +10 -10
  12. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +10 -10
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +13 -13
  14. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +8 -8
  15. package/.docs/organized/changelogs/%40mastra%2Fevals.md +11 -11
  16. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +10 -10
  17. package/.docs/organized/changelogs/%40mastra%2Fgithub.md +8 -8
  18. package/.docs/organized/changelogs/%40mastra%2Flance.md +7 -0
  19. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +11 -11
  20. package/.docs/organized/changelogs/%40mastra%2Floggers.md +8 -8
  21. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +9 -9
  22. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +8 -8
  23. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +9 -9
  24. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +8 -8
  25. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +11 -11
  26. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +8 -8
  27. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +7 -0
  28. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +8 -8
  29. package/.docs/organized/changelogs/%40mastra%2Fpg.md +11 -11
  30. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +9 -9
  31. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +10 -10
  32. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +9 -9
  33. package/.docs/organized/changelogs/%40mastra%2Frag.md +8 -8
  34. package/.docs/organized/changelogs/%40mastra%2Fragie.md +8 -8
  35. package/.docs/organized/changelogs/%40mastra%2Fserver.md +11 -11
  36. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +9 -9
  37. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +11 -11
  38. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +8 -8
  39. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +8 -8
  40. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +8 -8
  41. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +8 -8
  42. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +8 -8
  43. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +8 -8
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +8 -8
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +8 -8
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +8 -8
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +8 -8
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +8 -8
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +8 -8
  50. package/.docs/organized/changelogs/mastra.md +12 -12
  51. package/.docs/raw/agents/streaming.mdx +118 -0
  52. package/.docs/raw/evals/custom-eval.mdx +4 -0
  53. package/.docs/raw/evals/overview.mdx +4 -0
  54. package/.docs/raw/evals/running-in-ci.mdx +4 -0
  55. package/.docs/raw/evals/textual-evals.mdx +4 -0
  56. package/.docs/raw/reference/agents/streamVNext.mdx +598 -0
  57. package/.docs/raw/reference/evals/answer-relevancy.mdx +4 -0
  58. package/.docs/raw/reference/evals/bias.mdx +4 -0
  59. package/.docs/raw/reference/evals/completeness.mdx +4 -0
  60. package/.docs/raw/reference/evals/content-similarity.mdx +4 -0
  61. package/.docs/raw/reference/evals/context-position.mdx +4 -0
  62. package/.docs/raw/reference/evals/context-precision.mdx +4 -0
  63. package/.docs/raw/reference/evals/context-relevancy.mdx +4 -0
  64. package/.docs/raw/reference/evals/contextual-recall.mdx +4 -0
  65. package/.docs/raw/reference/evals/faithfulness.mdx +4 -0
  66. package/.docs/raw/reference/evals/hallucination.mdx +4 -0
  67. package/.docs/raw/reference/evals/keyword-coverage.mdx +4 -0
  68. package/.docs/raw/reference/evals/prompt-alignment.mdx +4 -0
  69. package/.docs/raw/reference/evals/summarization.mdx +4 -1
  70. package/.docs/raw/reference/evals/textual-difference.mdx +4 -0
  71. package/.docs/raw/reference/evals/tone-consistency.mdx +4 -0
  72. package/.docs/raw/reference/evals/toxicity.mdx +4 -0
  73. package/.docs/raw/reference/workflows/streamVNext.mdx +215 -0
  74. package/.docs/raw/server-db/mastra-client.mdx +10 -6
  75. package/.docs/raw/workflows/control-flow.mdx +3 -3
  76. package/.docs/raw/workflows/streaming.mdx +115 -0
  77. package/package.json +5 -5
@@ -3,8 +3,12 @@ title: "Reference: Contextual Recall | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Contextual Recall Metric, which evaluates the completeness of LLM responses in incorporating relevant context.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ContextualRecallMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ContextualRecallMetric` class evaluates how effectively an LLM's response incorporates all relevant information from the provided context. It measures whether important information from the reference documents was successfully included in the response, focusing on completeness rather than precision.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Faithfulness | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Faithfulness Metric in Mastra, which evaluates the factual accuracy of LLM outputs compared to the provided context.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # FaithfulnessMetric Reference
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `FaithfulnessMetric` in Mastra evaluates how factually accurate an LLM's output is compared to the provided context. It extracts claims from the output and verifies them against the context, making it essential to measure RAG pipeline responses' reliability.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Hallucination | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Hallucination Metric in Mastra, which evaluates the factual correctness of LLM outputs by identifying contradictions with provided context.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # HallucinationMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `HallucinationMetric` evaluates whether an LLM generates factually correct information by comparing its output against the provided context. This metric measures hallucination by identifying direct contradictions between the context and the output.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Keyword Coverage | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Keyword Coverage Metric in Mastra, which evaluates how well LLM outputs cover important keywords from the input.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # KeywordCoverageMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `KeywordCoverageMetric` class evaluates how well an LLM's output covers the important keywords from the input. It analyzes keyword presence and matches while ignoring common words and stop words.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Prompt Alignment | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Prompt Alignment Metric in Mastra, which evaluates how well LLM outputs adhere to given prompt instructions.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # PromptAlignmentMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `PromptAlignmentMetric` class evaluates how strictly an LLM's output follows a set of given prompt instructions. It uses a judge-based system to verify each instruction is followed exactly and provides detailed reasoning for any deviations.
9
13
 
10
14
  ## Basic Usage
@@ -3,9 +3,12 @@ title: "Reference: Summarization | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Summarization Metric in Mastra, which evaluates the quality of LLM-generated summaries for content and factual accuracy.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # SummarizationMetric
7
9
 
8
- ,
10
+ <ScorerCallout />
11
+
9
12
  The `SummarizationMetric` evaluates how well an LLM's summary captures the original text's content while maintaining factual accuracy. It combines two aspects: alignment (factual correctness) and coverage (inclusion of key information), using the minimum scores to ensure both qualities are necessary for a good summary.
10
13
 
11
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Textual Difference | Evals | Mastra Docs"
3
3
  description: Documentation for the Textual Difference Metric in Mastra, which measures textual differences between strings using sequence matching.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # TextualDifferenceMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `TextualDifferenceMetric` class uses sequence matching to measure the textual differences between two strings. It provides detailed information about changes, including the number of operations needed to transform one text into another.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Tone Consistency | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Tone Consistency Metric in Mastra, which evaluates emotional tone and sentiment consistency in text.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ToneConsistencyMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ToneConsistencyMetric` class evaluates the text's emotional tone and sentiment consistency. It can operate in two modes: comparing tone between input/output pairs or analyzing tone stability within a single text.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Toxicity | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Toxicity Metric in Mastra, which evaluates LLM outputs for racist, biased, or toxic elements.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ToxicityMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ToxicityMetric` class evaluates whether an LLM's output contains racist, biased, or toxic elements. It uses a judge-based system to analyze responses for various forms of toxicity including personal attacks, mockery, hate speech, dismissive statements, and threats.
9
13
 
10
14
  ## Basic Usage
@@ -0,0 +1,215 @@
1
+ ---
2
+ title: "Reference: Workflow.streamVNext() | Streaming | Workflows | Mastra Docs"
3
+ description: Documentation for the `.streamVNext()` method in Mastra workflows, which enables real-time streaming of responses.
4
+ ---
5
+
6
+ # `streamVNext()`
7
+
8
+ The `streamVNext()` method enables real-time streaming of responses from a workflow.
9
+
10
+ ## Usage
11
+
12
+ ```typescript
13
+ const run = await myWorkflow.createRunAsync();
14
+
15
+ // Add a stream to monitor execution
16
+ const stream = run.streamVNext({ inputData: {...} });
17
+
18
+
19
+ for (const chunk of stream) {
20
+ // do something with the chunk
21
+ }
22
+
23
+ ```
24
+
25
+ ## Protocol
26
+
27
+ <PropertiesTable
28
+ content={[
29
+ {
30
+ name: "start",
31
+ type: "object",
32
+ description: "The workflow starts",
33
+ isOptional: false,
34
+ properties: [
35
+ {
36
+ type: "object",
37
+ parameters: [
38
+ {
39
+ name: "example",
40
+ type: "{ type: 'start', runId: '1', from: 'WORKFLOW', payload: { runId: '1' } }",
41
+ description: "Example message structure",
42
+ isOptional: false,
43
+ },
44
+ ],
45
+ },
46
+ ],
47
+ },
48
+ {
49
+ name: "step-start",
50
+ type: "object",
51
+ description: "The start of a step",
52
+ isOptional: false,
53
+ properties: [
54
+ {
55
+ type: "object",
56
+ parameters: [
57
+ {
58
+ name: "example",
59
+ type: "{ type: 'step-start', runId: '1', from: 'WORKFLOW', payload: { id: 'fetch-weather' } }",
60
+ description: "Example message structure",
61
+ isOptional: false,
62
+ },
63
+ ],
64
+ },
65
+ ],
66
+ },
67
+ {
68
+ name: "step-output",
69
+ type: "object",
70
+ description: "Custom output from a step",
71
+ isOptional: false,
72
+ properties: [
73
+ {
74
+ type: "object",
75
+ parameters: [
76
+ {
77
+ name: "example",
78
+ type: "{ type: 'step-output', runId: '1', from: 'WORKFLOW', payload: { stepName: 'my step', args: { ... }, stepCallId: 'uuid', startedAt: 1717000000000, status: 'running' } }",
79
+ description: "Example message structure",
80
+ isOptional: false,
81
+ },
82
+ ],
83
+ },
84
+ ],
85
+ },
86
+ {
87
+ name: "step-result",
88
+ type: "object",
89
+ description: "The result of a step",
90
+ isOptional: false,
91
+ properties: [
92
+ {
93
+ type: "object",
94
+ parameters: [
95
+ {
96
+ name: "example",
97
+ type: "{ type: 'step-result', runId: '1', from: 'WORKFLOW', payload: { stepName: 'my step', result: { ... }, stepCallId: 'uuid', endedAt: 1717000000000, status: 'success', output: [Object] } }",
98
+ description: "Example message structure",
99
+ isOptional: false,
100
+ },
101
+ ],
102
+ },
103
+ ],
104
+ },
105
+ {
106
+ name: "finish",
107
+ type: "object",
108
+ description: "The end of the workflow",
109
+ isOptional: false,
110
+ properties: [
111
+ {
112
+ type: "object",
113
+ parameters: [
114
+ {
115
+ name: "example",
116
+ type: "{ type: 'finish', runId: '1', from: 'WORKFLOW', payload: { totalUsage: { promptTokens: 100, completionTokens: 100, totalTokens: 200 } } }",
117
+ description: "Example message structure",
118
+ isOptional: false,
119
+ },
120
+ ],
121
+ },
122
+ ],
123
+ },
124
+ ]}
125
+ />
126
+
127
+ ## Returns
128
+
129
+ ### PropertiesTable for Return Values
130
+
131
+ <PropertiesTable
132
+ content={[
133
+ {
134
+ name: "usage",
135
+ type: "Promise<object>",
136
+ isOptional: true,
137
+ description:
138
+ "Total usage of the workflow, including sub agents/workflows as a step.",
139
+ properties: [
140
+ {
141
+ type: "number",
142
+ parameters: [
143
+ {
144
+ name: "promptTokens",
145
+ type: "number",
146
+ isOptional: true,
147
+ description: "The number of prompt tokens used by the agent.",
148
+ },
149
+ ],
150
+ },
151
+ {
152
+ type: "number",
153
+ parameters: [
154
+ {
155
+ name: "completionTokens",
156
+ type: "number",
157
+ isOptional: true,
158
+ description: "The number of completion tokens used by the agent.",
159
+ },
160
+ ],
161
+ },
162
+ {
163
+ type: "number",
164
+ parameters: [
165
+ {
166
+ name: "totalTokens",
167
+ type: "number",
168
+ isOptional: true,
169
+ description: "The total number of tokens used by the agent.",
170
+ },
171
+ ],
172
+ },
173
+ ],
174
+ },
175
+ {
176
+ name: "status",
177
+ type: "Promise<string>",
178
+ isOptional: true,
179
+ description:
180
+ "The status of the workflow run.",
181
+ },
182
+ {
183
+ name: "result",
184
+ type: "Promise<object>",
185
+ isOptional: true,
186
+ description:
187
+ "The result of the workflow run.",
188
+ },
189
+ ]}
190
+ />
191
+
192
+ ## Examples
193
+
194
+ ### Basic Streaming
195
+
196
+ ```typescript
197
+ const run = await myWorkflow.createRunAsync();
198
+ const stream = run.streamVNext({ inputData: {...} });
199
+
200
+ for await (const chunk of stream) {
201
+ process.stdout.write(chunk);
202
+ }
203
+ ```
204
+
205
+ ### Structured Output Streaming
206
+
207
+ ```typescript
208
+ const run = await myWorkflow.createRunAsync();
209
+ const stream = run.streamVNext({ inputData: {...} });
210
+
211
+
212
+ const result = await stream.result;
213
+ console.log("Final structured result:", result);
214
+ ```
215
+
@@ -50,10 +50,10 @@ All commands install the same @mastra/client-js package but use different packag
50
50
 
51
51
  To get started you'll need to initialize your MastraClient with necessary parameters:
52
52
 
53
- ```typescript
53
+ ```typescript filename="lib/mastra-client.ts" showLineNumbers copy
54
54
  import { MastraClient } from "@mastra/client-js";
55
55
 
56
- const client = new MastraClient({
56
+ export const mastraClient = new MastraClient({
57
57
  baseUrl: "http://localhost:4111", // Default Mastra development server port
58
58
  });
59
59
  ```
@@ -62,8 +62,10 @@ const client = new MastraClient({
62
62
 
63
63
  You can customize the client with various options:
64
64
 
65
- ```typescript
66
- const client = new MastraClient({
65
+ ```typescript filename="lib/mastra-client.ts" showLineNumbers copy
66
+ import { MastraClient } from "@mastra/client-js";
67
+
68
+ export const mastraClient = new MastraClient({
67
69
  // Required
68
70
  baseUrl: "http://localhost:4111",
69
71
 
@@ -82,10 +84,12 @@ const client = new MastraClient({
82
84
 
83
85
  The Mastra Client SDK supports request cancellation using the standard Web API `AbortSignal`. Pass an `AbortSignal` to the client constructor to enable cancellation for all requests:
84
86
 
85
- ```typescript
87
+ ```typescript filename="lib/mastra-client.ts" showLineNumbers copy
88
+ import { MastraClient } from "@mastra/client-js";
89
+
86
90
  const controller = new AbortController();
87
91
 
88
- const client = new MastraClient({
92
+ export const mastraClient = new MastraClient({
89
93
  baseUrl: "http://localhost:4111",
90
94
  abortSignal: controller.signal,
91
95
  });
@@ -37,7 +37,7 @@ Execute steps simultaneously using `.parallel()`:
37
37
 
38
38
  ![Concurrent steps with .parallel()](/image/workflows/workflows-control-flow-parallel.jpg)
39
39
 
40
- ```typescript {8,4-5} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
40
+ ```typescript {9,4-5} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
41
41
  import { createWorkflow, createStep } from "@mastra/core/workflows";
42
42
  import { z } from "zod";
43
43
 
@@ -70,8 +70,8 @@ const greaterThanStep = createStep({...});
70
70
 
71
71
  export const testWorkflow = createWorkflow({...})
72
72
  .branch([
73
- [async ({ inputData: { value } }) => (value < 9), lessThanStep],
74
- [async ({ inputData: { value } }) => (value >= 9), greaterThanStep]
73
+ [async ({ inputData: { value } }) => value <= 10, lessThanStep],
74
+ [async ({ inputData: { value } }) => value > 10, greaterThanStep]
75
75
  ])
76
76
  .commit();
77
77
  ```
@@ -0,0 +1,115 @@
1
+ ---
2
+ title: "Using Workflow Streaming | Workflows | Mastra Docs"
3
+ description: Documentation on how to stream workflows
4
+ ---
5
+
6
+ # Workflow Streaming
7
+
8
+ Workflows in Mastra have access to a powerful streaming protocol! With seamless integration into tools or agents as a step, you can stream responses directly back to your clients, creating a more interactive and engaging experience.
9
+
10
+ ## Usage
11
+
12
+ To use the new protocol, you can use the `streamVNext` method on an workflow. This method will return a custom MatraWorkflowStream. This stream extends a ReadableStream, so all basic stream methods are available.
13
+
14
+ ```typescript
15
+ const run = await myWorkflow.createRunAsync();
16
+ const stream = await run.streamVNext({ inputData: { city: 'New York' } });
17
+
18
+ for await (const chunk of stream) {
19
+ console.log(chunk);
20
+ }
21
+ ```
22
+
23
+ Each chunk is a JSON object with the following properties:
24
+
25
+ ```json
26
+ {
27
+ type: string;
28
+ runId: string;
29
+ from: string;
30
+ payload: Record<string, any>;
31
+ }
32
+ ```
33
+
34
+ We have a couple of utility functions on the stream to help you with the streaming process.
35
+
36
+ - `stream.status` - The status of the workflow run.
37
+ - `stream.result` - The result of the workflow run.
38
+ - `stream.usage` - The total token usage of the workflow run.
39
+
40
+ ### How to use the stream in a tool
41
+
42
+ Each tool gets a `writer` argument, which is a writable stream with a custom write function. This write function is used to write the tool's response to the stream.
43
+
44
+ ```typescript filename="src/mastra/workflows/weather.ts" showLineNumbers copy
45
+ import { createStep } from "@mastra/core/workflows";
46
+ import { z } from "zod";
47
+
48
+ export const weatherInfo = createStep({
49
+ id: "weather-info",
50
+ inputSchema: z.object({
51
+ city: z.string(),
52
+ }),
53
+ outputSchema: z.object({
54
+ conditions: z.string(),
55
+ temperature: z.number(),
56
+ }),
57
+ description: `Fetches the current weather information for a given city`,
58
+ execute: async ({ inputData: { city }, writer }) => {
59
+ writer.write({
60
+ type: "weather-data",
61
+ args: {
62
+ city
63
+ },
64
+ status: "pending"
65
+ })
66
+ // Tool logic here (e.g., API call)
67
+ console.log("Using tool to fetch weather information for", city);
68
+
69
+ writer.write({
70
+ type: "weather-data",
71
+ args: {
72
+ city
73
+ },
74
+ status: "success",
75
+ result: {
76
+ temperature: 20,
77
+ conditions: "Sunny"
78
+ }
79
+ })
80
+
81
+ return { temperature: 20, conditions: "Sunny" }; // Example return
82
+ },
83
+ });
84
+ ```
85
+
86
+ If you want to use the stream in an agent, you can use the `streamVNext` method on the agent and pipe it to the agent's input stream.
87
+
88
+ ```typescript filename="src/mastra/workflows/weather.ts" showLineNumbers copy
89
+ import { createStep } from "@mastra/core/workflows";
90
+ import { z } from "zod";
91
+
92
+ export const weatherInfo = createStep({
93
+ id: "weather-info",
94
+ inputSchema: z.object({
95
+ city: z.string(),
96
+ }),
97
+ outputSchema: z.object({
98
+ text: z.string(),
99
+ }),
100
+ description: `Fetches the current weather information for a given city`,
101
+ execute: async ({ inputData: { city }, writer, mastra }) => {
102
+ const agent = mastra.getAgent('weatherAgent')
103
+ const stream = await agent.streamVNext(`What is the weather in ${city}?`);
104
+
105
+ await stream.pipeTo(writer);
106
+
107
+ return {
108
+ text: await stream.text,
109
+ }
110
+ },
111
+ });
112
+ ```
113
+
114
+ Piping the stream to the agent's input stream will allow us to automatically sum up the usage of the agent so the total usage count can be calculated.
115
+
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/mcp-docs-server",
3
- "version": "0.13.7-alpha.4",
3
+ "version": "0.13.7",
4
4
  "description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -32,8 +32,8 @@
32
32
  "uuid": "^11.1.0",
33
33
  "zod": "^3.25.67",
34
34
  "zod-to-json-schema": "^3.24.5",
35
- "@mastra/core": "0.12.0-alpha.4",
36
- "@mastra/mcp": "^0.10.7"
35
+ "@mastra/core": "0.12.0",
36
+ "@mastra/mcp": "^0.10.8"
37
37
  },
38
38
  "devDependencies": {
39
39
  "@hono/node-server": "^1.17.1",
@@ -48,8 +48,8 @@
48
48
  "tsx": "^4.19.4",
49
49
  "typescript": "^5.8.3",
50
50
  "vitest": "^3.2.4",
51
- "@internal/lint": "0.0.23",
52
- "@mastra/core": "0.12.0-alpha.4"
51
+ "@internal/lint": "0.0.24",
52
+ "@mastra/core": "0.12.0"
53
53
  },
54
54
  "scripts": {
55
55
  "prepare-docs": "cross-env PREPARE=true node dist/prepare-docs/prepare.js",