@mastra/mcp-docs-server 0.13.7-alpha.4 → 0.13.7-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fastra.md +8 -8
  2. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +8 -8
  3. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +8 -8
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +7 -7
  5. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +8 -8
  6. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +8 -8
  7. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +9 -9
  8. package/.docs/organized/changelogs/%40mastra%2Fcore.md +3 -3
  9. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +8 -8
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +10 -10
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +10 -10
  12. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +10 -10
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +13 -13
  14. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +8 -8
  15. package/.docs/organized/changelogs/%40mastra%2Fevals.md +11 -11
  16. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +10 -10
  17. package/.docs/organized/changelogs/%40mastra%2Fgithub.md +8 -8
  18. package/.docs/organized/changelogs/%40mastra%2Flance.md +7 -0
  19. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +11 -11
  20. package/.docs/organized/changelogs/%40mastra%2Floggers.md +8 -8
  21. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +9 -9
  22. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +8 -8
  23. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +9 -9
  24. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +8 -8
  25. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +11 -11
  26. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +8 -8
  27. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +7 -0
  28. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +8 -8
  29. package/.docs/organized/changelogs/%40mastra%2Fpg.md +11 -11
  30. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +9 -9
  31. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +10 -10
  32. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +9 -9
  33. package/.docs/organized/changelogs/%40mastra%2Frag.md +8 -8
  34. package/.docs/organized/changelogs/%40mastra%2Fragie.md +8 -8
  35. package/.docs/organized/changelogs/%40mastra%2Fserver.md +11 -11
  36. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +9 -9
  37. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +11 -11
  38. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +8 -8
  39. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +8 -8
  40. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +8 -8
  41. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +8 -8
  42. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +8 -8
  43. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +8 -8
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +8 -8
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +8 -8
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +8 -8
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +8 -8
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +8 -8
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +8 -8
  50. package/.docs/organized/changelogs/mastra.md +12 -12
  51. package/.docs/raw/evals/custom-eval.mdx +4 -0
  52. package/.docs/raw/evals/overview.mdx +4 -0
  53. package/.docs/raw/evals/running-in-ci.mdx +4 -0
  54. package/.docs/raw/evals/textual-evals.mdx +4 -0
  55. package/.docs/raw/reference/evals/answer-relevancy.mdx +4 -0
  56. package/.docs/raw/reference/evals/bias.mdx +4 -0
  57. package/.docs/raw/reference/evals/completeness.mdx +4 -0
  58. package/.docs/raw/reference/evals/content-similarity.mdx +4 -0
  59. package/.docs/raw/reference/evals/context-position.mdx +4 -0
  60. package/.docs/raw/reference/evals/context-precision.mdx +4 -0
  61. package/.docs/raw/reference/evals/context-relevancy.mdx +4 -0
  62. package/.docs/raw/reference/evals/contextual-recall.mdx +4 -0
  63. package/.docs/raw/reference/evals/faithfulness.mdx +4 -0
  64. package/.docs/raw/reference/evals/hallucination.mdx +4 -0
  65. package/.docs/raw/reference/evals/keyword-coverage.mdx +4 -0
  66. package/.docs/raw/reference/evals/prompt-alignment.mdx +4 -0
  67. package/.docs/raw/reference/evals/summarization.mdx +4 -1
  68. package/.docs/raw/reference/evals/textual-difference.mdx +4 -0
  69. package/.docs/raw/reference/evals/tone-consistency.mdx +4 -0
  70. package/.docs/raw/reference/evals/toxicity.mdx +4 -0
  71. package/.docs/raw/server-db/mastra-client.mdx +10 -6
  72. package/.docs/raw/workflows/control-flow.mdx +3 -3
  73. package/package.json +4 -4
@@ -1,5 +1,16 @@
1
1
  # mastra
2
2
 
3
+ ## 0.10.16-alpha.3
4
+
5
+ ### Patch Changes
6
+
7
+ - f42c4c2: update peer deps for packages to latest core range
8
+ - Updated dependencies [f42c4c2]
9
+ - @mastra/deployer@0.12.0-alpha.5
10
+ - @mastra/loggers@0.10.5-alpha.0
11
+ - @mastra/mcp@0.10.8-alpha.0
12
+ - @mastra/core@0.12.0-alpha.5
13
+
3
14
  ## 0.10.16-alpha.2
4
15
 
5
16
  ### Patch Changes
@@ -287,16 +298,5 @@
287
298
  - Updated dependencies [626b0f4]
288
299
  - Updated dependencies [c22a91f]
289
300
  - Updated dependencies [f7403ab]
290
- - Updated dependencies [6c89d7f]
291
- - @mastra/deployer@0.10.15-alpha.0
292
- - @mastra/core@0.10.15-alpha.0
293
-
294
- ## 0.10.12
295
-
296
- ### Patch Changes
297
-
298
- - 640f47e: move agent model settings into agent settings
299
- - 5d0c163: Scaffold create-mastra projects with zod@^3 to prevent package version conflicts during install
300
- - 53e3f58: Add support for custom instrumentation files
301
301
 
302
- ... 4594 more lines hidden. See full changelog in package directory.
302
+ ... 4605 more lines hidden. See full changelog in package directory.
@@ -3,8 +3,12 @@ title: "Create a custom eval"
3
3
  description: "Mastra allows you to create your own evals, here is how."
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # Create a Custom Eval
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  Create a custom eval by extending the `Metric` class and implementing the `measure` method. This gives you full control over how scores are calculated and what information is returned. For LLM-based evaluations, extend the `MastraAgentJudge` class to define how the model reasons and scores output.
9
13
 
10
14
  ## Native JavaScript evaluation
@@ -3,8 +3,12 @@ title: "Overview"
3
3
  description: "Understanding how to evaluate and measure AI agent quality using Mastra evals."
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # Testing your agents with evals
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  While traditional software tests have clear pass/fail conditions, AI outputs are non-deterministic — they can vary with the same input. Evals help bridge this gap by providing quantifiable metrics for measuring agent quality.
9
13
 
10
14
  Evals are automated tests that evaluate Agents outputs using model-graded, rule-based, and statistical methods. Each eval returns a normalized score between 0-1 that can be logged and compared. Evals can be customized with your own prompts and scoring functions.
@@ -3,8 +3,12 @@ title: "Running in CI"
3
3
  description: "Learn how to run Mastra evals in your CI/CD pipeline to monitor agent quality over time."
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # Running Evals in CI
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  Running evals in your CI pipeline helps bridge this gap by providing quantifiable metrics for measuring agent quality over time.
9
13
 
10
14
  ## Setting Up CI Integration
@@ -3,8 +3,12 @@ title: "Textual Evals"
3
3
  description: "Understand how Mastra uses LLM-as-judge methodology to evaluate text quality."
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # Textual Evals
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  Textual evals use an LLM-as-judge methodology to evaluate agent outputs. This approach leverages language models to assess various aspects of text quality, similar to how a teaching assistant might grade assignments using a rubric.
9
13
 
10
14
  Each eval focuses on specific quality aspects and returns a score between 0 and 1, providing quantifiable metrics for non-deterministic AI outputs.
@@ -3,8 +3,12 @@ title: "Reference: Answer Relevancy | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Answer Relevancy Metric in Mastra, which evaluates how well LLM outputs address the input query.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # AnswerRelevancyMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `AnswerRelevancyMetric` class evaluates how well an LLM's output answers or addresses the input query. It uses a judge-based system to determine relevancy and provides detailed scoring and reasoning.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Bias | Output Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Bias Metric in Mastra, which evaluates LLM outputs for various forms of bias, including gender, political, racial/ethnic, or geographical bias.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # BiasMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `BiasMetric` class evaluates whether an LLM's output contains various forms of bias, including gender, political, racial/ethnic, or geographical bias. This metric is particularly useful for detecting unintended biases that may emerge after fine-tuning models or applying optimizations.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Completeness | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Completeness Metric in Mastra, which evaluates how thoroughly LLM outputs cover key elements present in the input.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # CompletenessMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `CompletenessMetric` class evaluates how thoroughly an LLM's output covers the key elements present in the input. It analyzes nouns, verbs, topics, and terms to determine coverage and provides a detailed completeness score.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Content Similarity | Evals | Mastra Docs"
3
3
  description: Documentation for the Content Similarity Metric in Mastra, which measures textual similarity between strings and provides a matching score.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ContentSimilarityMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ContentSimilarityMetric` class measures the textual similarity between two strings, providing a score that indicates how closely they match. It supports configurable options for case sensitivity and whitespace handling.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Context Position | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Context Position Metric in Mastra, which evaluates the ordering of context nodes based on their relevance to the query and output.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ContextPositionMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ContextPositionMetric` class evaluates how well context nodes are ordered based on their relevance to the query and output. It uses position-weighted scoring to emphasize the importance of having the most relevant context pieces appear earlier in the sequence.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Context Precision | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Context Precision Metric in Mastra, which evaluates the relevance and precision of retrieved context nodes for generating expected outputs.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ContextPrecisionMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ContextPrecisionMetric` class evaluates how relevant and precise the retrieved context nodes are for generating the expected output. It uses a judge-based system to analyze each context piece's contribution and provides weighted scoring based on position.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Context Relevancy | Evals | Mastra Docs"
3
3
  description: Documentation for the Context Relevancy Metric, which evaluates the relevance of retrieved context in RAG pipelines.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ContextRelevancyMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ContextRelevancyMetric` class evaluates the quality of your RAG (Retrieval-Augmented Generation) pipeline's retriever by measuring how relevant the retrieved context is to the input query. It uses an LLM-based evaluation system that first extracts statements from the context and then assesses their relevance to the input.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Contextual Recall | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Contextual Recall Metric, which evaluates the completeness of LLM responses in incorporating relevant context.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ContextualRecallMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ContextualRecallMetric` class evaluates how effectively an LLM's response incorporates all relevant information from the provided context. It measures whether important information from the reference documents was successfully included in the response, focusing on completeness rather than precision.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Faithfulness | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Faithfulness Metric in Mastra, which evaluates the factual accuracy of LLM outputs compared to the provided context.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # FaithfulnessMetric Reference
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `FaithfulnessMetric` in Mastra evaluates how factually accurate an LLM's output is compared to the provided context. It extracts claims from the output and verifies them against the context, making it essential to measure RAG pipeline responses' reliability.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Hallucination | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Hallucination Metric in Mastra, which evaluates the factual correctness of LLM outputs by identifying contradictions with provided context.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # HallucinationMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `HallucinationMetric` evaluates whether an LLM generates factually correct information by comparing its output against the provided context. This metric measures hallucination by identifying direct contradictions between the context and the output.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Keyword Coverage | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Keyword Coverage Metric in Mastra, which evaluates how well LLM outputs cover important keywords from the input.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # KeywordCoverageMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `KeywordCoverageMetric` class evaluates how well an LLM's output covers the important keywords from the input. It analyzes keyword presence and matches while ignoring common words and stop words.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Prompt Alignment | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Prompt Alignment Metric in Mastra, which evaluates how well LLM outputs adhere to given prompt instructions.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # PromptAlignmentMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `PromptAlignmentMetric` class evaluates how strictly an LLM's output follows a set of given prompt instructions. It uses a judge-based system to verify each instruction is followed exactly and provides detailed reasoning for any deviations.
9
13
 
10
14
  ## Basic Usage
@@ -3,9 +3,12 @@ title: "Reference: Summarization | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Summarization Metric in Mastra, which evaluates the quality of LLM-generated summaries for content and factual accuracy.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # SummarizationMetric
7
9
 
8
- ,
10
+ <ScorerCallout />
11
+
9
12
  The `SummarizationMetric` evaluates how well an LLM's summary captures the original text's content while maintaining factual accuracy. It combines two aspects: alignment (factual correctness) and coverage (inclusion of key information), using the minimum scores to ensure both qualities are necessary for a good summary.
10
13
 
11
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Textual Difference | Evals | Mastra Docs"
3
3
  description: Documentation for the Textual Difference Metric in Mastra, which measures textual differences between strings using sequence matching.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # TextualDifferenceMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `TextualDifferenceMetric` class uses sequence matching to measure the textual differences between two strings. It provides detailed information about changes, including the number of operations needed to transform one text into another.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Tone Consistency | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Tone Consistency Metric in Mastra, which evaluates emotional tone and sentiment consistency in text.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ToneConsistencyMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ToneConsistencyMetric` class evaluates the text's emotional tone and sentiment consistency. It can operate in two modes: comparing tone between input/output pairs or analyzing tone stability within a single text.
9
13
 
10
14
  ## Basic Usage
@@ -3,8 +3,12 @@ title: "Reference: Toxicity | Metrics | Evals | Mastra Docs"
3
3
  description: Documentation for the Toxicity Metric in Mastra, which evaluates LLM outputs for racist, biased, or toxic elements.
4
4
  ---
5
5
 
6
+ import { ScorerCallout } from '@/components/scorer-callout'
7
+
6
8
  # ToxicityMetric
7
9
 
10
+ <ScorerCallout />
11
+
8
12
  The `ToxicityMetric` class evaluates whether an LLM's output contains racist, biased, or toxic elements. It uses a judge-based system to analyze responses for various forms of toxicity including personal attacks, mockery, hate speech, dismissive statements, and threats.
9
13
 
10
14
  ## Basic Usage
@@ -50,10 +50,10 @@ All commands install the same @mastra/client-js package but use different packag
50
50
 
51
51
  To get started you'll need to initialize your MastraClient with necessary parameters:
52
52
 
53
- ```typescript
53
+ ```typescript filename="lib/mastra-client.ts" showLineNumbers copy
54
54
  import { MastraClient } from "@mastra/client-js";
55
55
 
56
- const client = new MastraClient({
56
+ export const mastraClient = new MastraClient({
57
57
  baseUrl: "http://localhost:4111", // Default Mastra development server port
58
58
  });
59
59
  ```
@@ -62,8 +62,10 @@ const client = new MastraClient({
62
62
 
63
63
  You can customize the client with various options:
64
64
 
65
- ```typescript
66
- const client = new MastraClient({
65
+ ```typescript filename="lib/mastra-client.ts" showLineNumbers copy
66
+ import { MastraClient } from "@mastra/client-js";
67
+
68
+ export const mastraClient = new MastraClient({
67
69
  // Required
68
70
  baseUrl: "http://localhost:4111",
69
71
 
@@ -82,10 +84,12 @@ const client = new MastraClient({
82
84
 
83
85
  The Mastra Client SDK supports request cancellation using the standard Web API `AbortSignal`. Pass an `AbortSignal` to the client constructor to enable cancellation for all requests:
84
86
 
85
- ```typescript
87
+ ```typescript filename="lib/mastra-client.ts" showLineNumbers copy
88
+ import { MastraClient } from "@mastra/client-js";
89
+
86
90
  const controller = new AbortController();
87
91
 
88
- const client = new MastraClient({
92
+ export const mastraClient = new MastraClient({
89
93
  baseUrl: "http://localhost:4111",
90
94
  abortSignal: controller.signal,
91
95
  });
@@ -37,7 +37,7 @@ Execute steps simultaneously using `.parallel()`:
37
37
 
38
38
  ![Concurrent steps with .parallel()](/image/workflows/workflows-control-flow-parallel.jpg)
39
39
 
40
- ```typescript {8,4-5} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
40
+ ```typescript {9,4-5} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
41
41
  import { createWorkflow, createStep } from "@mastra/core/workflows";
42
42
  import { z } from "zod";
43
43
 
@@ -70,8 +70,8 @@ const greaterThanStep = createStep({...});
70
70
 
71
71
  export const testWorkflow = createWorkflow({...})
72
72
  .branch([
73
- [async ({ inputData: { value } }) => (value < 9), lessThanStep],
74
- [async ({ inputData: { value } }) => (value >= 9), greaterThanStep]
73
+ [async ({ inputData: { value } }) => value <= 10, lessThanStep],
74
+ [async ({ inputData: { value } }) => value > 10, greaterThanStep]
75
75
  ])
76
76
  .commit();
77
77
  ```
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/mcp-docs-server",
3
- "version": "0.13.7-alpha.4",
3
+ "version": "0.13.7-alpha.5",
4
4
  "description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -32,8 +32,8 @@
32
32
  "uuid": "^11.1.0",
33
33
  "zod": "^3.25.67",
34
34
  "zod-to-json-schema": "^3.24.5",
35
- "@mastra/core": "0.12.0-alpha.4",
36
- "@mastra/mcp": "^0.10.7"
35
+ "@mastra/core": "0.12.0-alpha.5",
36
+ "@mastra/mcp": "^0.10.8-alpha.0"
37
37
  },
38
38
  "devDependencies": {
39
39
  "@hono/node-server": "^1.17.1",
@@ -49,7 +49,7 @@
49
49
  "typescript": "^5.8.3",
50
50
  "vitest": "^3.2.4",
51
51
  "@internal/lint": "0.0.23",
52
- "@mastra/core": "0.12.0-alpha.4"
52
+ "@mastra/core": "0.12.0-alpha.5"
53
53
  },
54
54
  "scripts": {
55
55
  "prepare-docs": "cross-env PREPARE=true node dist/prepare-docs/prepare.js",