@mastra/mcp-docs-server 0.13.44 → 0.13.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +2 -0
  2. package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +2 -0
  3. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
  4. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  5. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
  6. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +251 -51
  7. package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
  8. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
  9. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
  10. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
  11. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +201 -1
  12. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
  13. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
  14. package/.docs/organized/changelogs/%40mastra%2Fcore.md +422 -222
  15. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
  18. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
  19. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
  20. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
  21. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
  22. package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
  23. package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
  24. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
  25. package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
  27. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
  28. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
  29. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
  30. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
  31. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
  32. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
  33. package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
  34. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
  35. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
  36. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
  37. package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
  38. package/.docs/organized/changelogs/%40mastra%2Freact.md +201 -1
  39. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +72 -0
  40. package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
  41. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
  42. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
  43. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +104 -1
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +49 -1
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
  55. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
  56. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
  57. package/.docs/organized/changelogs/create-mastra.md +201 -1
  58. package/.docs/organized/changelogs/mastra.md +201 -1
  59. package/.docs/raw/agents/adding-voice.mdx +49 -0
  60. package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
  61. package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
  62. package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
  63. package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
  64. package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
  65. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +23 -1
  66. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  67. package/.docs/raw/reference/core/mastra-class.mdx +8 -0
  68. package/.docs/raw/reference/core/mastra-model-gateway.mdx +223 -0
  69. package/.docs/raw/reference/scorers/answer-relevancy.mdx +28 -98
  70. package/.docs/raw/reference/scorers/answer-similarity.mdx +12 -258
  71. package/.docs/raw/reference/scorers/bias.mdx +29 -87
  72. package/.docs/raw/reference/scorers/completeness.mdx +32 -91
  73. package/.docs/raw/reference/scorers/content-similarity.mdx +29 -99
  74. package/.docs/raw/reference/scorers/context-precision.mdx +28 -130
  75. package/.docs/raw/reference/scorers/faithfulness.mdx +28 -101
  76. package/.docs/raw/reference/scorers/hallucination.mdx +28 -103
  77. package/.docs/raw/reference/scorers/keyword-coverage.mdx +28 -107
  78. package/.docs/raw/reference/scorers/textual-difference.mdx +27 -100
  79. package/.docs/raw/reference/scorers/tone-consistency.mdx +25 -98
  80. package/.docs/raw/reference/scorers/toxicity.mdx +29 -92
  81. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  82. package/.docs/raw/reference/storage/lance.mdx +33 -0
  83. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  84. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  85. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  86. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  87. package/.docs/raw/reference/streaming/agents/stream.mdx +7 -0
  88. package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
  89. package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
  90. package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
  91. package/.docs/raw/voice/overview.mdx +67 -0
  92. package/.docs/raw/workflows/control-flow.mdx +180 -0
  93. package/CHANGELOG.md +20 -0
  94. package/dist/{chunk-TUAHUTTB.js → chunk-VE65X75W.js} +24 -4
  95. package/dist/prepare-docs/package-changes.d.ts.map +1 -1
  96. package/dist/prepare-docs/prepare.js +1 -1
  97. package/dist/stdio.js +1 -1
  98. package/package.json +5 -5
@@ -0,0 +1,223 @@
1
+ ---
2
+ title: "Reference: MastraModelGateway | Core"
3
+ description: "Base class for creating custom model gateways"
4
+ ---
5
+
6
+ # MastraModelGateway
7
+
8
+ Abstract base class for implementing custom model gateways. Gateways handle provider-specific logic for accessing language models, including provider configuration, authentication, URL construction, and model instantiation.
9
+
10
+ ## Class Overview
11
+
12
+ ```typescript
13
+ import { MastraModelGateway, type ProviderConfig } from '@mastra/core/llm';
14
+ import { createOpenAICompatible } from '@ai-sdk/openai-compatible-v5';
15
+ import type { LanguageModelV2 } from '@ai-sdk/provider-v5';
16
+
17
+ class MyCustomGateway extends MastraModelGateway {
18
+ readonly id = 'custom';
19
+ readonly name = 'My Custom Gateway';
20
+
21
+ async fetchProviders(): Promise<Record<string, ProviderConfig>> {
22
+ return {
23
+ 'my-provider': {
24
+ name: 'My Provider',
25
+ models: ['model-1', 'model-2'],
26
+ apiKeyEnvVar: 'MY_API_KEY',
27
+ gateway: this.id,
28
+ },
29
+ };
30
+ }
31
+
32
+ buildUrl(modelId: string, envVars?: Record<string, string>): string {
33
+ return 'https://api.my-provider.com/v1';
34
+ }
35
+
36
+ async getApiKey(modelId: string): Promise<string> {
37
+ const apiKey = process.env.MY_API_KEY;
38
+ if (!apiKey) throw new Error('MY_API_KEY not set');
39
+ return apiKey;
40
+ }
41
+
42
+ async resolveLanguageModel({
43
+ modelId,
44
+ providerId,
45
+ apiKey,
46
+ }: {
47
+ modelId: string;
48
+ providerId: string;
49
+ apiKey: string;
50
+ }): Promise<LanguageModelV2> {
51
+ const baseURL = this.buildUrl(`${providerId}/${modelId}`);
52
+ return createOpenAICompatible({
53
+ name: providerId,
54
+ apiKey,
55
+ baseURL,
56
+ }).chatModel(modelId);
57
+ }
58
+ }
59
+ ```
60
+
61
+ ## Required Properties
62
+
63
+ <PropertiesTable
64
+ content={[
65
+ {
66
+ name: 'id',
67
+ type: 'string',
68
+ description: 'Unique identifier for the gateway. This ID is used as the prefix for all providers from this gateway (e.g., "netlify/anthropic"). Exception: models.dev is a provider registry and doesn\'t use a prefix.',
69
+ },
70
+ {
71
+ name: 'name',
72
+ type: 'string',
73
+ description: 'Human-readable name for the gateway.',
74
+ },
75
+ ]}
76
+ />
77
+
78
+ ## Required Methods
79
+
80
+ ### fetchProviders()
81
+
82
+ Fetches provider configurations from the gateway.
83
+
84
+ **Returns:** `Promise<Record<string, ProviderConfig>>`
85
+
86
+ **ProviderConfig Structure:**
87
+ <PropertiesTable
88
+ content={[
89
+ {
90
+ name: 'name',
91
+ type: 'string',
92
+ description: 'Display name of the provider',
93
+ },
94
+ {
95
+ name: 'models',
96
+ type: 'string[]',
97
+ description: 'Array of available model IDs',
98
+ },
99
+ {
100
+ name: 'apiKeyEnvVar',
101
+ type: 'string | string[]',
102
+ description: 'Environment variable(s) for API key',
103
+ },
104
+ {
105
+ name: 'gateway',
106
+ type: 'string',
107
+ description: 'Gateway identifier',
108
+ },
109
+ {
110
+ name: 'url',
111
+ type: 'string',
112
+ isOptional: true,
113
+ description: 'Optional base API URL',
114
+ },
115
+ {
116
+ name: 'apiKeyHeader',
117
+ type: 'string',
118
+ isOptional: true,
119
+ description: 'Optional custom auth header name',
120
+ },
121
+ {
122
+ name: 'docUrl',
123
+ type: 'string',
124
+ isOptional: true,
125
+ description: 'Optional documentation URL',
126
+ },
127
+ ]}
128
+ />
129
+
130
+ ### buildUrl()
131
+
132
+ Builds the API URL for a specific model/provider combination.
133
+
134
+ **Parameters:**
135
+ <PropertiesTable
136
+ content={[
137
+ {
138
+ name: 'modelId',
139
+ type: 'string',
140
+ description: 'Full model ID (e.g., "custom/my-provider/model-1")',
141
+ },
142
+ {
143
+ name: 'envVars',
144
+ type: 'Record<string, string>',
145
+ isOptional: true,
146
+ description: 'Optional environment variables',
147
+ },
148
+ ]}
149
+ />
150
+
151
+ **Returns:** `string | undefined | Promise<string | undefined>`
152
+
153
+ ### getApiKey()
154
+
155
+ Retrieves the API key for authentication.
156
+
157
+ **Parameters:**
158
+ <PropertiesTable
159
+ content={[
160
+ {
161
+ name: 'modelId',
162
+ type: 'string',
163
+ description: 'Full model ID',
164
+ },
165
+ ]}
166
+ />
167
+
168
+ **Returns:** `Promise<string>`
169
+
170
+ ### resolveLanguageModel()
171
+
172
+ Creates a language model instance.
173
+
174
+ **Parameters:**
175
+ <PropertiesTable
176
+ content={[
177
+ {
178
+ name: 'modelId',
179
+ type: 'string',
180
+ description: 'The model ID',
181
+ },
182
+ {
183
+ name: 'providerId',
184
+ type: 'string',
185
+ description: 'The provider ID',
186
+ },
187
+ {
188
+ name: 'apiKey',
189
+ type: 'string',
190
+ description: 'The API key for authentication',
191
+ },
192
+ ]}
193
+ />
194
+
195
+ **Returns:** `Promise<LanguageModelV2> | LanguageModelV2`
196
+
197
+ ## Instance Methods
198
+
199
+ ### getId()
200
+
201
+ Returns the gateway's unique identifier.
202
+
203
+ **Returns:** `string` - The gateway's `id` property
204
+
205
+ ## Model ID Format
206
+
207
+ For true gateways, the gateway ID is used as a prefix and models are accessed using this format:
208
+
209
+ ```
210
+ [gateway-id]/[provider]/[model]
211
+ ```
212
+
213
+ Examples:
214
+ - Gateway with `id = 'custom'`: `'custom/my-provider/model-1'`
215
+
216
+ ## Built-in Implementations
217
+
218
+ - **NetlifyGateway** - Netlify AI Gateway integration
219
+ - **ModelsDevGateway** - Registry of OpenAI-compatible providers
220
+
221
+ ## Related
222
+
223
+ - [Custom Gateways Guide](/models/gateways/custom-gateways) - Complete guide to creating custom gateways
@@ -112,115 +112,45 @@ A relevancy score between 0 and 1:
112
112
  - **0.1–0.3**: The response includes minimal relevant content and largely misses the intent of the query.
113
113
  - **0.0**: The response is entirely unrelated and does not answer the query.
114
114
 
115
- ## Examples
115
+ ## Example
116
116
 
117
- ### High relevancy example
117
+ Evaluate agent responses for relevancy across different scenarios:
118
118
 
119
- In this example, the response accurately addresses the input query with specific and relevant information.
120
-
121
- ```typescript title="src/example-high-answer-relevancy.ts" showLineNumbers copy
119
+ ```typescript title="src/example-answer-relevancy.ts" showLineNumbers copy
120
+ import { runExperiment } from "@mastra/core/scores";
122
121
  import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
122
+ import { myAgent } from "./agent";
123
123
 
124
- const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o-mini" });
124
+ const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o" });
125
125
 
126
- const inputMessages = [
127
- {
128
- role: "user",
129
- content: "What are the health benefits of regular exercise?",
126
+ const result = await runExperiment({
127
+ data: [
128
+ {
129
+ input: "What are the health benefits of regular exercise?",
130
+ },
131
+ {
132
+ input: "What should a healthy breakfast include?",
133
+ },
134
+ {
135
+ input: "What are the benefits of meditation?",
136
+ },
137
+ ],
138
+ scorers: [scorer],
139
+ target: myAgent,
140
+ onItemComplete: ({ scorerResults }) => {
141
+ console.log({
142
+ score: scorerResults[scorer.name].score,
143
+ reason: scorerResults[scorer.name].reason,
144
+ });
130
145
  },
131
- ];
132
- const outputMessage = {
133
- text: "Regular exercise improves cardiovascular health, strengthens muscles, boosts metabolism, and enhances mental well-being through the release of endorphins.",
134
- };
135
-
136
- const result = await scorer.run({
137
- input: inputMessages,
138
- output: outputMessage,
139
146
  });
140
147
 
141
- console.log(result);
148
+ console.log(result.scores);
142
149
  ```
143
150
 
144
- #### High relevancy output
145
-
146
- The output receives a high score because it accurately answers the query without including unrelated information.
147
-
148
- ```typescript
149
- {
150
- score: 1,
151
- reason: 'The score is 1 because the output directly addresses the question by providing multiple explicit health benefits of regular exercise, including improvements in cardiovascular health, muscle strength, metabolism, and mental well-being. Each point is relevant and contributes to a comprehensive understanding of the health benefits.'
152
- }
153
- ```
151
+ For more details on `runExperiment`, see the [runExperiment reference](/reference/scorers/run-experiment).
154
152
 
155
- ### Partial relevancy example
156
-
157
- In this example, the response addresses the query in part but includes additional information that isn’t directly relevant.
158
-
159
- ```typescript title="src/example-partial-answer-relevancy.ts" showLineNumbers copy
160
- import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
161
-
162
- const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o-mini" });
163
-
164
- const inputMessages = [
165
- { role: "user", content: "What should a healthy breakfast include?" },
166
- ];
167
- const outputMessage = {
168
- text: "A nutritious breakfast should include whole grains and protein. However, the timing of your breakfast is just as important - studies show eating within 2 hours of waking optimizes metabolism and energy levels throughout the day.",
169
- };
170
-
171
- const result = await scorer.run({
172
- input: inputMessages,
173
- output: outputMessage,
174
- });
175
-
176
- console.log(result);
177
- ```
178
-
179
- #### Partial relevancy output
180
-
181
- The output receives a lower score because it partially answers the query. While some relevant information is included, unrelated details reduce the overall relevance.
182
-
183
- ```typescript
184
- {
185
- score: 0.25,
186
- reason: 'The score is 0.25 because the output provides a direct answer by mentioning whole grains and protein as components of a healthy breakfast, which is relevant. However, the additional information about the timing of breakfast and its effects on metabolism and energy levels is not directly related to the question, leading to a lower overall relevance score.'
187
- }
188
- ```
189
-
190
- ## Low relevancy example
191
-
192
- In this example, the response does not address the query and contains information that is entirely unrelated.
193
-
194
- ```typescript title="src/example-low-answer-relevancy.ts" showLineNumbers copy
195
- import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
196
-
197
- const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o-mini" });
198
-
199
- const inputMessages = [
200
- { role: "user", content: "What are the benefits of meditation?" },
201
- ];
202
- const outputMessage = {
203
- text: "The Great Wall of China is over 13,000 miles long and was built during the Ming Dynasty to protect against invasions.",
204
- };
205
-
206
- const result = await scorer.run({
207
- input: inputMessages,
208
- output: outputMessage,
209
- });
210
-
211
- console.log(result);
212
- ```
213
-
214
- #### Low relevancy output
215
-
216
- The output receives a score of 0 because it fails to answer the query or provide any relevant information.
217
-
218
- ```typescript
219
- {
220
- score: 0,
221
- reason: 'The score is 0 because the output about the Great Wall of China is completely unrelated to the benefits of meditation, providing no relevant information or context that addresses the input question.'
222
- }
223
- ```
153
+ To add this scorer to an agent, see the [Scorers overview](/docs/scorers/overview) guide.
224
154
 
225
155
  ## Related
226
156
 
@@ -149,46 +149,16 @@ The scorer uses a multi-step process:
149
149
 
150
150
  Score calculation: `max(0, base_score - contradiction_penalty - missing_penalty - extra_info_penalty) × scale`
151
151
 
152
- ## Examples
152
+ ## Example
153
153
 
154
- ### Usage with runExperiment
154
+ Evaluate agent responses for similarity to ground truth across different scenarios:
155
155
 
156
- This scorer is designed for use with `runExperiment` for CI/CD testing:
157
-
158
- ```typescript
159
- import { runExperiment } from "@mastra/core/scores";
160
- import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
161
-
162
- const scorer = createAnswerSimilarityScorer({ model });
163
-
164
- await runExperiment({
165
- data: [
166
- {
167
- input: "What is the capital of France?",
168
- groundTruth: "Paris is the capital of France",
169
- },
170
- ],
171
- scorers: [scorer],
172
- target: myAgent,
173
- onItemComplete: ({ scorerResults }) => {
174
- // Assert similarity score meets threshold
175
- expect(scorerResults["Answer Similarity Scorer"].score).toBeGreaterThan(
176
- 0.8,
177
- );
178
- },
179
- });
180
- ```
181
-
182
- ### Perfect similarity example
183
-
184
- In this example, the agent's output semantically matches the ground truth perfectly.
185
-
186
- ```typescript title="src/example-perfect-similarity.ts" showLineNumbers copy
156
+ ```typescript title="src/example-answer-similarity.ts" showLineNumbers copy
187
157
  import { runExperiment } from "@mastra/core/scores";
188
158
  import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
189
159
  import { myAgent } from "./agent";
190
160
 
191
- const scorer = createAnswerSimilarityScorer({ model: "openai/gpt-4o-mini" });
161
+ const scorer = createAnswerSimilarityScorer({ model: "openai/gpt-4o" });
192
162
 
193
163
  const result = await runExperiment({
194
164
  data: [
@@ -196,78 +166,10 @@ const result = await runExperiment({
196
166
  input: "What is 2+2?",
197
167
  groundTruth: "4",
198
168
  },
199
- ],
200
- scorers: [scorer],
201
- target: myAgent,
202
- });
203
-
204
- console.log(result.scores);
205
- ```
206
-
207
- #### Perfect similarity output
208
-
209
- The output receives a perfect score because both the agent's answer and ground truth are identical.
210
-
211
- ```typescript
212
- {
213
- "Answer Similarity Scorer": {
214
- score: 1.0,
215
- reason: "The score is 1.0/1 because the output matches the ground truth exactly. The agent correctly provided the numerical answer. No improvements needed as the response is fully accurate."
216
- }
217
- }
218
- ```
219
-
220
- ### High semantic similarity example
221
-
222
- In this example, the agent provides the same information as the ground truth but with different phrasing.
223
-
224
- ```typescript title="src/example-semantic-similarity.ts" showLineNumbers copy
225
- import { runExperiment } from "@mastra/core/scores";
226
- import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
227
- import { myAgent } from "./agent";
228
-
229
- const scorer = createAnswerSimilarityScorer({ model: "openai/gpt-4o-mini" });
230
-
231
- const result = await runExperiment({
232
- data: [
233
169
  {
234
170
  input: "What is the capital of France?",
235
171
  groundTruth: "The capital of France is Paris",
236
172
  },
237
- ],
238
- scorers: [scorer],
239
- target: myAgent,
240
- });
241
-
242
- console.log(result.scores);
243
- ```
244
-
245
- #### High semantic similarity output
246
-
247
- The output receives a high score because it conveys the same information with equivalent meaning.
248
-
249
- ```typescript
250
- {
251
- "Answer Similarity Scorer": {
252
- score: 0.9,
253
- reason: "The score is 0.9/1 because both answers convey the same information about Paris being the capital of France. The agent correctly identified the main fact with slightly different phrasing. Minor variation in structure but semantically equivalent."
254
- }
255
- }
256
- ```
257
-
258
- ### Partial similarity example
259
-
260
- In this example, the agent's response is partially correct but missing key information.
261
-
262
- ```typescript title="src/example-partial-similarity.ts" showLineNumbers copy
263
- import { runExperiment } from "@mastra/core/scores";
264
- import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
265
- import { myAgent } from "./agent";
266
-
267
- const scorer = createAnswerSimilarityScorer({ model: "openai/gpt-4o-mini" });
268
-
269
- const result = await runExperiment({
270
- data: [
271
173
  {
272
174
  input: "What are the primary colors?",
273
175
  groundTruth: "The primary colors are red, blue, and yellow",
@@ -275,165 +177,17 @@ const result = await runExperiment({
275
177
  ],
276
178
  scorers: [scorer],
277
179
  target: myAgent,
278
- });
279
-
280
- console.log(result.scores);
281
- ```
282
-
283
- #### Partial similarity output
284
-
285
- The output receives a moderate score because it includes some correct information but is incomplete.
286
-
287
- ```typescript
288
- {
289
- "Answer Similarity Scorer": {
290
- score: 0.6,
291
- reason: "The score is 0.6/1 because the answer captures some key elements but is incomplete. The agent correctly identified red and blue as primary colors. However, it missed the critical color yellow, which is essential for a complete answer."
292
- }
293
- }
294
- ```
295
-
296
- ### Contradiction example
297
-
298
- In this example, the agent provides factually incorrect information that contradicts the ground truth.
299
-
300
- ```typescript title="src/example-contradiction.ts" showLineNumbers copy
301
- import { runExperiment } from "@mastra/core/scores";
302
- import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
303
- import { myAgent } from "./agent";
304
-
305
- const scorer = createAnswerSimilarityScorer({ model: "openai/gpt-4o-mini" });
306
-
307
- const result = await runExperiment({
308
- data: [
309
- {
310
- input: "Who wrote Romeo and Juliet?",
311
- groundTruth: "William Shakespeare wrote Romeo and Juliet",
312
- },
313
- ],
314
- scorers: [scorer],
315
- target: myAgent,
316
- });
317
-
318
- console.log(result.scores);
319
- ```
320
-
321
- #### Contradiction output
322
-
323
- The output receives a very low score because it contains factually incorrect information.
324
-
325
- ```typescript
326
- {
327
- "Answer Similarity Scorer": {
328
- score: 0.0,
329
- reason: "The score is 0.0/1 because the output contains a critical error regarding authorship. The agent correctly identified the play title but incorrectly attributed it to Christopher Marlowe instead of William Shakespeare, which is a fundamental contradiction."
330
- }
331
- }
332
- ```
333
-
334
- ### CI/CD Integration example
335
-
336
- Use the scorer in your test suites to ensure agent consistency over time:
337
-
338
- ```typescript title="src/ci-integration.test.ts" showLineNumbers copy
339
- import { describe, it, expect } from "vitest";
340
- import { runExperiment } from "@mastra/core/scores";
341
- import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
342
- import { myAgent } from "./agent";
343
-
344
- describe("Agent Consistency Tests", () => {
345
- const scorer = createAnswerSimilarityScorer({ model: "openai/gpt-4o-mini" });
346
-
347
- it("should provide accurate factual answers", async () => {
348
- const result = await runExperiment({
349
- data: [
350
- {
351
- input: "What is the speed of light?",
352
- groundTruth:
353
- "The speed of light in vacuum is 299,792,458 meters per second",
354
- },
355
- {
356
- input: "What is the capital of Japan?",
357
- groundTruth: "Tokyo is the capital of Japan",
358
- },
359
- ],
360
- scorers: [scorer],
361
- target: myAgent,
180
+ onItemComplete: ({ scorerResults }) => {
181
+ console.log({
182
+ score: scorerResults[scorer.name].score,
183
+ reason: scorerResults[scorer.name].reason,
362
184
  });
363
-
364
- // Assert all answers meet similarity threshold
365
- expect(result.scores["Answer Similarity Scorer"].score).toBeGreaterThan(
366
- 0.8,
367
- );
368
- });
369
-
370
- it("should maintain consistency across runs", async () => {
371
- const testData = {
372
- input: "Define machine learning",
373
- groundTruth:
374
- "Machine learning is a subset of AI that enables systems to learn and improve from experience",
375
- };
376
-
377
- // Run multiple times to check consistency
378
- const results = await Promise.all([
379
- runExperiment({ data: [testData], scorers: [scorer], target: myAgent }),
380
- runExperiment({ data: [testData], scorers: [scorer], target: myAgent }),
381
- runExperiment({ data: [testData], scorers: [scorer], target: myAgent }),
382
- ]);
383
-
384
- // Check that all runs produce similar scores (within 0.1 tolerance)
385
- const scores = results.map(
386
- (r) => r.scores["Answer Similarity Scorer"].score,
387
- );
388
- const maxDiff = Math.max(...scores) - Math.min(...scores);
389
- expect(maxDiff).toBeLessThan(0.1);
390
- });
391
- });
392
- ```
393
-
394
- ### Custom configuration example
395
-
396
- Customize the scorer behavior for specific use cases:
397
-
398
- ```typescript title="src/custom-config.ts" showLineNumbers copy
399
- import { runExperiment } from "@mastra/core/scores";
400
- import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
401
- import { myAgent } from "./agent";
402
-
403
- // Configure for strict exact matching with high scale
404
- const strictScorer = createAnswerSimilarityScorer({
405
- model: "openai/gpt-4o-mini",
406
- options: {
407
- exactMatchBonus: 0.5, // Higher bonus for exact matches
408
- contradictionPenalty: 2.0, // Very strict on contradictions
409
- missingPenalty: 0.3, // Higher penalty for missing info
410
- scale: 10, // Score out of 10 instead of 1
411
185
  },
412
186
  });
413
187
 
414
- // Configure for lenient semantic matching
415
- const lenientScorer = createAnswerSimilarityScorer({
416
- model: "openai/gpt-4o-mini",
417
- options: {
418
- semanticThreshold: 0.6, // Lower threshold for semantic matches
419
- contradictionPenalty: 0.5, // More forgiving on minor contradictions
420
- extraInfoPenalty: 0, // No penalty for extra information
421
- requireGroundTruth: false, // Allow missing ground truth
422
- },
423
- });
188
+ console.log(result.scores);
189
+ ```
424
190
 
425
- const result = await runExperiment({
426
- data: [
427
- {
428
- input: "Explain photosynthesis",
429
- groundTruth:
430
- "Photosynthesis is the process by which plants convert light energy into chemical energy",
431
- },
432
- ],
433
- scorers: [strictScorer, lenientScorer],
434
- target: myAgent,
435
- });
191
+ For more details on `runExperiment`, see the [runExperiment reference](/reference/scorers/run-experiment).
436
192
 
437
- console.log("Strict scorer:", result.scores["Answer Similarity Scorer"].score); // Out of 10
438
- console.log("Lenient scorer:", result.scores["Answer Similarity Scorer"].score); // Out of 1
439
- ```
193
+ To add this scorer to an agent, see the [Scorers overview](/docs/scorers/overview#adding-scorers-to-agents) guide.