@mastra/mcp-docs-server 0.13.44 → 0.13.45-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +253 -53
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +307 -107
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +182 -1
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +104 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +49 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/raw/agents/adding-voice.mdx +49 -0
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +23 -1
- package/.docs/raw/reference/client-js/memory.mdx +43 -0
- package/.docs/raw/reference/core/mastra-class.mdx +8 -0
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +223 -0
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/scorers/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/scorers/bias.mdx +29 -87
- package/.docs/raw/reference/scorers/completeness.mdx +32 -91
- package/.docs/raw/reference/scorers/content-similarity.mdx +29 -99
- package/.docs/raw/reference/scorers/context-precision.mdx +28 -130
- package/.docs/raw/reference/scorers/faithfulness.mdx +28 -101
- package/.docs/raw/reference/scorers/hallucination.mdx +28 -103
- package/.docs/raw/reference/scorers/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/scorers/textual-difference.mdx +27 -100
- package/.docs/raw/reference/scorers/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/scorers/toxicity.mdx +29 -92
- package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
- package/.docs/raw/reference/storage/lance.mdx +33 -0
- package/.docs/raw/reference/storage/libsql.mdx +37 -0
- package/.docs/raw/reference/storage/mongodb.mdx +39 -0
- package/.docs/raw/reference/storage/mssql.mdx +37 -0
- package/.docs/raw/reference/storage/postgresql.mdx +37 -0
- package/.docs/raw/reference/streaming/agents/stream.mdx +7 -0
- package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
- package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
- package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
- package/.docs/raw/voice/overview.mdx +67 -0
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/CHANGELOG.md +10 -0
- package/dist/{chunk-TUAHUTTB.js → chunk-VE65X75W.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +5 -5
|
@@ -78,116 +78,46 @@ The scorer evaluates textual similarity through character-level matching and con
|
|
|
78
78
|
|
|
79
79
|
Final score: `similarity_value * scale`
|
|
80
80
|
|
|
81
|
-
##
|
|
81
|
+
## Example
|
|
82
82
|
|
|
83
|
-
|
|
83
|
+
Evaluate textual similarity between expected and actual agent outputs:
|
|
84
84
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
import {
|
|
85
|
+
```typescript title="src/example-content-similarity.ts" showLineNumbers copy
|
|
86
|
+
import { runExperiment } from "@mastra/core/scores";
|
|
87
|
+
import { createContentSimilarityScorer } from "@mastra/evals/scorers/code";
|
|
88
|
+
import { myAgent } from "./agent";
|
|
89
89
|
|
|
90
90
|
const scorer = createContentSimilarityScorer();
|
|
91
91
|
|
|
92
|
-
const
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
{
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
92
|
+
const result = await runExperiment({
|
|
93
|
+
data: [
|
|
94
|
+
{
|
|
95
|
+
input: "Summarize the benefits of TypeScript",
|
|
96
|
+
groundTruth:
|
|
97
|
+
"TypeScript provides static typing, better tooling support, and improved code maintainability.",
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
input: "What is machine learning?",
|
|
101
|
+
groundTruth:
|
|
102
|
+
"Machine learning is a subset of AI that enables systems to learn from data without explicit programming.",
|
|
103
|
+
},
|
|
104
|
+
],
|
|
105
|
+
scorers: [scorer],
|
|
106
|
+
target: myAgent,
|
|
107
|
+
onItemComplete: ({ scorerResults }) => {
|
|
108
|
+
console.log({
|
|
109
|
+
score: scorerResults[scorer.name].score,
|
|
110
|
+
groundTruth: scorerResults[scorer.name].groundTruth,
|
|
111
|
+
});
|
|
112
112
|
},
|
|
113
|
-
}
|
|
114
|
-
```
|
|
115
|
-
|
|
116
|
-
### Moderate similarity example
|
|
117
|
-
|
|
118
|
-
In this example, the response shares some conceptual overlap with the query but diverges in structure and wording. Key elements remain present, but the phrasing introduces moderate variation.
|
|
119
|
-
|
|
120
|
-
```typescript title="src/example-moderate-similarity.ts" showLineNumbers copy
|
|
121
|
-
import { createContentSimilarityScorer } from "@mastra/evals/scorers/llm";
|
|
122
|
-
|
|
123
|
-
const scorer = createContentSimilarityScorer();
|
|
124
|
-
|
|
125
|
-
const query = "A brown fox quickly leaps across a sleeping dog.";
|
|
126
|
-
const response = "The quick brown fox jumps over the lazy dog.";
|
|
127
|
-
|
|
128
|
-
const result = await scorer.run({
|
|
129
|
-
input: [{ role: "user", content: query }],
|
|
130
|
-
output: { text: response },
|
|
131
113
|
});
|
|
132
114
|
|
|
133
|
-
console.log(result);
|
|
115
|
+
console.log(result.scores);
|
|
134
116
|
```
|
|
135
117
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
The output receives a mid-range score because the response captures the general idea of the query, though it differs enough in wording to reduce overall similarity.
|
|
139
|
-
|
|
140
|
-
```typescript
|
|
141
|
-
{
|
|
142
|
-
score: 0.40540540540540543,
|
|
143
|
-
analyzeStepResult: {
|
|
144
|
-
similarity: 0.40540540540540543
|
|
145
|
-
}
|
|
146
|
-
}
|
|
147
|
-
```
|
|
148
|
-
|
|
149
|
-
### Low similarity example
|
|
150
|
-
|
|
151
|
-
In this example, the response and query are unrelated in meaning, despite having a similar grammatical structure. There is little to no shared content overlap.
|
|
152
|
-
|
|
153
|
-
```typescript title="src/example-low-similarity.ts" showLineNumbers copy
|
|
154
|
-
import { createContentSimilarityScorer } from "@mastra/evals/scorers/llm";
|
|
155
|
-
|
|
156
|
-
const scorer = createContentSimilarityScorer();
|
|
157
|
-
|
|
158
|
-
const query = "The cat sleeps on the windowsill.";
|
|
159
|
-
const response = "The quick brown fox jumps over the lazy dog.";
|
|
160
|
-
|
|
161
|
-
const result = await scorer.run({
|
|
162
|
-
input: [{ role: "user", content: query }],
|
|
163
|
-
output: { text: response },
|
|
164
|
-
});
|
|
165
|
-
|
|
166
|
-
console.log(result);
|
|
167
|
-
```
|
|
168
|
-
|
|
169
|
-
#### Low similarity output
|
|
170
|
-
|
|
171
|
-
The output receives a low score because the response does not align with the content or intent of the query.
|
|
172
|
-
|
|
173
|
-
```typescript
|
|
174
|
-
{
|
|
175
|
-
score: 0.25806451612903225,
|
|
176
|
-
analyzeStepResult: {
|
|
177
|
-
similarity: 0.25806451612903225
|
|
178
|
-
},
|
|
179
|
-
}
|
|
180
|
-
```
|
|
181
|
-
|
|
182
|
-
### Score interpretation
|
|
183
|
-
|
|
184
|
-
A similarity score between 0 and 1:
|
|
118
|
+
For more details on `runExperiment`, see the [runExperiment reference](/reference/scorers/run-experiment).
|
|
185
119
|
|
|
186
|
-
|
|
187
|
-
- **0.7–0.9**: High similarity – minor differences in word choice or structure.
|
|
188
|
-
- **0.4–0.6**: Moderate similarity – general overlap with noticeable variation.
|
|
189
|
-
- **0.1–0.3**: Low similarity – few common elements or shared meaning.
|
|
190
|
-
- **0.0**: No similarity – completely different content.
|
|
120
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/scorers/overview#adding-scorers-to-agents) guide.
|
|
191
121
|
|
|
192
122
|
## Related
|
|
193
123
|
|
|
@@ -152,7 +152,7 @@ MAP = (1.0 + 0.67) / 2 = 0.835 ≈ **0.83**
|
|
|
152
152
|
|
|
153
153
|
```typescript
|
|
154
154
|
const scorer = createContextPrecisionScorer({
|
|
155
|
-
model: "openai/gpt-
|
|
155
|
+
model: "openai/gpt-5.1",
|
|
156
156
|
options: {
|
|
157
157
|
contextExtractor: (input, output) => {
|
|
158
158
|
// Extract context dynamically based on the query
|
|
@@ -171,7 +171,7 @@ const scorer = createContextPrecisionScorer({
|
|
|
171
171
|
|
|
172
172
|
```typescript
|
|
173
173
|
const scorer = createContextPrecisionScorer({
|
|
174
|
-
model: "openai/gpt-
|
|
174
|
+
model: "openai/gpt-5.1",
|
|
175
175
|
options: {
|
|
176
176
|
context: [
|
|
177
177
|
// Simulate retrieved documents from vector database
|
|
@@ -186,152 +186,50 @@ const scorer = createContextPrecisionScorer({
|
|
|
186
186
|
});
|
|
187
187
|
```
|
|
188
188
|
|
|
189
|
-
##
|
|
189
|
+
## Example
|
|
190
190
|
|
|
191
|
-
|
|
191
|
+
Evaluate RAG system context retrieval precision for different queries:
|
|
192
192
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
import {
|
|
193
|
+
```typescript title="src/example-context-precision.ts" showLineNumbers copy
|
|
194
|
+
import { runExperiment } from "@mastra/core/scores";
|
|
195
|
+
import { createContextPrecisionScorer } from "@mastra/evals/scorers/llm";
|
|
196
|
+
import { myAgent } from "./agent";
|
|
197
197
|
|
|
198
198
|
const scorer = createContextPrecisionScorer({
|
|
199
|
-
model: "openai/gpt-4o
|
|
199
|
+
model: "openai/gpt-4o",
|
|
200
200
|
options: {
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
],
|
|
206
|
-
scale: 1,
|
|
201
|
+
contextExtractor: (input, output) => {
|
|
202
|
+
// Extract context from agent's retrieved documents
|
|
203
|
+
return output.metadata?.retrievedContext || [];
|
|
204
|
+
},
|
|
207
205
|
},
|
|
208
206
|
});
|
|
209
207
|
|
|
210
|
-
const result = await
|
|
211
|
-
|
|
212
|
-
inputMessages: [
|
|
213
|
-
{
|
|
214
|
-
id: "1",
|
|
215
|
-
role: "user",
|
|
216
|
-
content: "How does photosynthesis work in plants?",
|
|
217
|
-
},
|
|
218
|
-
],
|
|
219
|
-
},
|
|
220
|
-
output: [
|
|
208
|
+
const result = await runExperiment({
|
|
209
|
+
data: [
|
|
221
210
|
{
|
|
222
|
-
|
|
223
|
-
role: "assistant",
|
|
224
|
-
content:
|
|
225
|
-
"Photosynthesis is the process where plants convert sunlight, CO2, and water into glucose and oxygen using chloroplasts.",
|
|
211
|
+
input: "How does photosynthesis work in plants?",
|
|
226
212
|
},
|
|
227
|
-
],
|
|
228
|
-
});
|
|
229
|
-
|
|
230
|
-
console.log(result);
|
|
231
|
-
// Output:
|
|
232
|
-
// {
|
|
233
|
-
// score: 1.0,
|
|
234
|
-
// reason: "The score is 1.0 because all context pieces are highly relevant to explaining photosynthesis and are optimally ordered to support the expected output."
|
|
235
|
-
// }
|
|
236
|
-
```
|
|
237
|
-
|
|
238
|
-
### Mixed precision example
|
|
239
|
-
|
|
240
|
-
This example shows moderate precision with both relevant and irrelevant context:
|
|
241
|
-
|
|
242
|
-
```typescript
|
|
243
|
-
import { createContextPrecisionScorer } from "@mastra/evals";
|
|
244
|
-
|
|
245
|
-
const scorer = createContextPrecisionScorer({
|
|
246
|
-
model: "openai/gpt-4o-mini",
|
|
247
|
-
options: {
|
|
248
|
-
context: [
|
|
249
|
-
"Regular exercise improves cardiovascular health by strengthening the heart muscle.",
|
|
250
|
-
"A balanced diet should include fruits, vegetables, and whole grains.",
|
|
251
|
-
"Physical activity releases endorphins which improve mood and reduce stress.",
|
|
252
|
-
"The average person should drink 8 glasses of water per day.",
|
|
253
|
-
"Exercise also helps maintain healthy body weight and muscle mass.",
|
|
254
|
-
],
|
|
255
|
-
scale: 1,
|
|
256
|
-
},
|
|
257
|
-
});
|
|
258
|
-
|
|
259
|
-
const result = await scorer.run({
|
|
260
|
-
input: {
|
|
261
|
-
inputMessages: [
|
|
262
|
-
{
|
|
263
|
-
id: "1",
|
|
264
|
-
role: "user",
|
|
265
|
-
content: "What are the mental and physical benefits of exercise?",
|
|
266
|
-
},
|
|
267
|
-
],
|
|
268
|
-
},
|
|
269
|
-
output: [
|
|
270
213
|
{
|
|
271
|
-
|
|
272
|
-
role: "assistant",
|
|
273
|
-
content:
|
|
274
|
-
"Exercise provides cardiovascular benefits, improves mood through endorphin release, and helps maintain healthy body composition.",
|
|
214
|
+
input: "What are the mental and physical benefits of exercise?",
|
|
275
215
|
},
|
|
276
216
|
],
|
|
217
|
+
scorers: [scorer],
|
|
218
|
+
target: myAgent,
|
|
219
|
+
onItemComplete: ({ scorerResults }) => {
|
|
220
|
+
console.log({
|
|
221
|
+
score: scorerResults[scorer.name].score,
|
|
222
|
+
reason: scorerResults[scorer.name].reason,
|
|
223
|
+
});
|
|
224
|
+
},
|
|
277
225
|
});
|
|
278
226
|
|
|
279
|
-
console.log(result);
|
|
280
|
-
// Output:
|
|
281
|
-
// {
|
|
282
|
-
// score: 0.72,
|
|
283
|
-
// reason: "The score is 0.72 because contexts 1, 3, and 5 are relevant to exercise benefits, but irrelevant contexts about diet and hydration reduce the precision score."
|
|
284
|
-
// }
|
|
227
|
+
console.log(result.scores);
|
|
285
228
|
```
|
|
286
229
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
This example shows poor context precision with mostly irrelevant context:
|
|
290
|
-
|
|
291
|
-
```typescript
|
|
292
|
-
import { createContextPrecisionScorer } from "@mastra/evals";
|
|
293
|
-
|
|
294
|
-
const scorer = createContextPrecisionScorer({
|
|
295
|
-
model: "openai/gpt-4o-mini",
|
|
296
|
-
options: {
|
|
297
|
-
context: [
|
|
298
|
-
"The weather forecast shows sunny skies this weekend.",
|
|
299
|
-
"Coffee is one of the world's most popular beverages.",
|
|
300
|
-
"Machine learning requires large amounts of training data.",
|
|
301
|
-
"Cats typically sleep 12-16 hours per day.",
|
|
302
|
-
"The capital of France is Paris.",
|
|
303
|
-
],
|
|
304
|
-
scale: 1,
|
|
305
|
-
},
|
|
306
|
-
});
|
|
307
|
-
|
|
308
|
-
const result = await scorer.run({
|
|
309
|
-
input: {
|
|
310
|
-
inputMessages: [
|
|
311
|
-
{
|
|
312
|
-
id: "1",
|
|
313
|
-
role: "user",
|
|
314
|
-
content: "How does photosynthesis work?",
|
|
315
|
-
},
|
|
316
|
-
],
|
|
317
|
-
},
|
|
318
|
-
output: [
|
|
319
|
-
{
|
|
320
|
-
id: "2",
|
|
321
|
-
role: "assistant",
|
|
322
|
-
content:
|
|
323
|
-
"Photosynthesis is the process by which plants convert sunlight into energy using chlorophyll.",
|
|
324
|
-
},
|
|
325
|
-
],
|
|
326
|
-
});
|
|
230
|
+
For more details on `runExperiment`, see the [runExperiment reference](/reference/scorers/run-experiment).
|
|
327
231
|
|
|
328
|
-
|
|
329
|
-
// Output:
|
|
330
|
-
// {
|
|
331
|
-
// score: 0.0,
|
|
332
|
-
// reason: "The score is 0.0 because none of the retrieved context pieces are relevant to explaining photosynthesis."
|
|
333
|
-
// }
|
|
334
|
-
```
|
|
232
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/scorers/overview#adding-scorers-to-agents) guide.
|
|
335
233
|
|
|
336
234
|
## Comparison with Context Relevance
|
|
337
235
|
|
|
@@ -122,118 +122,45 @@ A faithfulness score between 0 and 1:
|
|
|
122
122
|
- **0.1–0.3**: Most of the content is inaccurate or unsupported.
|
|
123
123
|
- **0.0**: All claims are false or contradict the context.
|
|
124
124
|
|
|
125
|
-
##
|
|
125
|
+
## Example
|
|
126
126
|
|
|
127
|
-
|
|
127
|
+
Evaluate agent responses for faithfulness to provided context:
|
|
128
128
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
```typescript title="src/example-high-faithfulness.ts" showLineNumbers copy
|
|
132
|
-
import { createFaithfulnessScorer } from "@mastra/evals/scorers/llm";
|
|
133
|
-
|
|
134
|
-
const scorer = createFaithfulnessScorer({ model: 'openai/gpt-4o-mini', options: {
|
|
135
|
-
context: [
|
|
136
|
-
"The Tesla Model 3 was launched in 2017.",
|
|
137
|
-
"It has a range of up to 358 miles.",
|
|
138
|
-
"The base model accelerates 0-60 mph in 5.8 seconds."
|
|
139
|
-
]
|
|
140
|
-
});
|
|
141
|
-
|
|
142
|
-
const query = "Tell me about the Tesla Model 3.";
|
|
143
|
-
const response = "The Tesla Model 3 was introduced in 2017. It can travel up to 358 miles on a single charge and the base version goes from 0 to 60 mph in 5.8 seconds.";
|
|
144
|
-
|
|
145
|
-
const result = await scorer.run({
|
|
146
|
-
input: [{ role: 'user', content: query }],
|
|
147
|
-
output: { text: response },
|
|
148
|
-
});
|
|
149
|
-
|
|
150
|
-
console.log(result);
|
|
151
|
-
```
|
|
152
|
-
|
|
153
|
-
#### High faithfulness output
|
|
154
|
-
|
|
155
|
-
The output receives a score of 1 because all the information it provides can be directly traced to the context. There are no missing or contradictory facts.
|
|
156
|
-
|
|
157
|
-
```typescript
|
|
158
|
-
{
|
|
159
|
-
score: 1,
|
|
160
|
-
reason: 'The score is 1 because all claims made in the output are supported by the provided context.'
|
|
161
|
-
}
|
|
162
|
-
```
|
|
163
|
-
|
|
164
|
-
### Mixed faithfulness example
|
|
165
|
-
|
|
166
|
-
In this example, there are a mix of supported and unsupported claims. Some parts of the response are backed by the context, while others introduce new information not found in the source material.
|
|
167
|
-
|
|
168
|
-
```typescript title="src/example-mixed-faithfulness.ts" showLineNumbers copy
|
|
169
|
-
import { createFaithfulnessScorer } from "@mastra/evals/scorers/llm";
|
|
170
|
-
|
|
171
|
-
const scorer = createFaithfulnessScorer({ model: 'openai/gpt-4o-mini', options: {
|
|
172
|
-
context: [
|
|
173
|
-
"Python was created by Guido van Rossum.",
|
|
174
|
-
"The first version was released in 1991.",
|
|
175
|
-
"Python emphasizes code readability."
|
|
176
|
-
]
|
|
177
|
-
});
|
|
178
|
-
|
|
179
|
-
const query = "What can you tell me about Python?";
|
|
180
|
-
const response = "Python was created by Guido van Rossum and released in 1991. It is the most popular programming language today and is used by millions of developers worldwide.";
|
|
181
|
-
|
|
182
|
-
const result = await scorer.run({
|
|
183
|
-
input: [{ role: 'user', content: query }],
|
|
184
|
-
output: { text: response },
|
|
185
|
-
});
|
|
186
|
-
|
|
187
|
-
console.log(result);
|
|
188
|
-
```
|
|
189
|
-
|
|
190
|
-
#### Mixed faithfulness output
|
|
191
|
-
|
|
192
|
-
The score is lower because only a portion of the response is verifiable. While some claims match the context, others are unconfirmed or out of scope, reducing the overall faithfulness.
|
|
193
|
-
|
|
194
|
-
```typescript
|
|
195
|
-
{
|
|
196
|
-
score: 0.5,
|
|
197
|
-
reason: "The score is 0.5 because while two claims are supported by the context (Python was created by Guido van Rossum and Python was released in 1991), the other two claims regarding Python's popularity and usage cannot be verified as they are not mentioned in the context."
|
|
198
|
-
}
|
|
199
|
-
```
|
|
200
|
-
|
|
201
|
-
### Low faithfulness example
|
|
202
|
-
|
|
203
|
-
In this example, the response directly contradicts the context. None of the claims are supported, and several conflict with the facts provided.
|
|
204
|
-
|
|
205
|
-
```typescript title="src/example-low-faithfulness.ts" showLineNumbers copy
|
|
129
|
+
```typescript title="src/example-faithfulness.ts" showLineNumbers copy
|
|
130
|
+
import { runExperiment } from "@mastra/core/scores";
|
|
206
131
|
import { createFaithfulnessScorer } from "@mastra/evals/scorers/llm";
|
|
132
|
+
import { myAgent } from "./agent";
|
|
207
133
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
"It has a thin atmosphere of mostly carbon dioxide.",
|
|
212
|
-
"Two small moons orbit Mars: Phobos and Deimos."
|
|
213
|
-
]
|
|
134
|
+
// Context is typically populated from agent tool calls or RAG retrieval
|
|
135
|
+
const scorer = createFaithfulnessScorer({
|
|
136
|
+
model: "openai/gpt-4o",
|
|
214
137
|
});
|
|
215
138
|
|
|
216
|
-
const
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
139
|
+
const result = await runExperiment({
|
|
140
|
+
data: [
|
|
141
|
+
{
|
|
142
|
+
input: "Tell me about the Tesla Model 3.",
|
|
143
|
+
},
|
|
144
|
+
{
|
|
145
|
+
input: "What are the key features of this electric vehicle?",
|
|
146
|
+
},
|
|
147
|
+
],
|
|
148
|
+
scorers: [scorer],
|
|
149
|
+
target: myAgent,
|
|
150
|
+
onItemComplete: ({ scorerResults }) => {
|
|
151
|
+
console.log({
|
|
152
|
+
score: scorerResults[scorer.name].score,
|
|
153
|
+
reason: scorerResults[scorer.name].reason,
|
|
154
|
+
});
|
|
155
|
+
},
|
|
222
156
|
});
|
|
223
157
|
|
|
224
|
-
console.log(result);
|
|
158
|
+
console.log(result.scores);
|
|
225
159
|
```
|
|
226
160
|
|
|
227
|
-
|
|
161
|
+
For more details on `runExperiment`, see the [runExperiment reference](/reference/scorers/run-experiment).
|
|
228
162
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
```typescript
|
|
232
|
-
{
|
|
233
|
-
score: 0,
|
|
234
|
-
reason: "The score is 0 because all claims made in the output contradict the provided context. The output states that Mars is the third planet from the Sun, while the context clearly states it is the fourth. Additionally, it claims that Mars has a thick atmosphere rich in oxygen and nitrogen, contradicting the context's description of a thin atmosphere mostly composed of carbon dioxide. Finally, the output mentions that Mars is orbited by three large moons, while the context specifies that it has only two small moons, Phobos and Deimos. Therefore, there are no supported claims, leading to a score of 0."
|
|
235
|
-
}
|
|
236
|
-
```
|
|
163
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/scorers/overview#adding-scorers-to-agents) guide.
|
|
237
164
|
|
|
238
165
|
## Related
|
|
239
166
|
|
|
@@ -131,120 +131,45 @@ A hallucination score between 0 and 1:
|
|
|
131
131
|
|
|
132
132
|
**Note:** The score represents the degree of hallucination - lower scores indicate better factual alignment with the provided context
|
|
133
133
|
|
|
134
|
-
##
|
|
134
|
+
## Example
|
|
135
135
|
|
|
136
|
-
|
|
136
|
+
Evaluate agent responses for hallucinations against provided context:
|
|
137
137
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
```typescript title="src/example-no-hallucination.ts" showLineNumbers copy
|
|
141
|
-
import { createHallucinationScorer } from "@mastra/evals/scorers/llm";
|
|
142
|
-
|
|
143
|
-
const scorer = createHallucinationScorer({ model: 'openai/gpt-4o-mini', options: {
|
|
144
|
-
context: [
|
|
145
|
-
"The iPhone was first released in 2007.",
|
|
146
|
-
"Steve Jobs unveiled it at Macworld.",
|
|
147
|
-
"The original model had a 3.5-inch screen."
|
|
148
|
-
]
|
|
149
|
-
});
|
|
150
|
-
|
|
151
|
-
const query = "When was the first iPhone released?";
|
|
152
|
-
const response = "The iPhone was first released in 2007, when Steve Jobs unveiled it at Macworld. The original iPhone featured a 3.5-inch screen.";
|
|
153
|
-
|
|
154
|
-
const result = await scorer.run({
|
|
155
|
-
input: [{ role: 'user', content: query }],
|
|
156
|
-
output: { text: response },
|
|
157
|
-
});
|
|
158
|
-
|
|
159
|
-
console.log(result);
|
|
160
|
-
```
|
|
161
|
-
|
|
162
|
-
#### No hallucination output
|
|
163
|
-
|
|
164
|
-
The response receives a score of 0 because there are no contradictions. Every statement is consistent with the context, and no new or fabricated information has been introduced.
|
|
165
|
-
|
|
166
|
-
```typescript
|
|
167
|
-
{
|
|
168
|
-
score: 0,
|
|
169
|
-
reason: 'The score is 0 because none of the statements from the context were contradicted by the output.'
|
|
170
|
-
}
|
|
171
|
-
```
|
|
172
|
-
|
|
173
|
-
### Mixed hallucination example
|
|
174
|
-
|
|
175
|
-
In this example, the response includes both accurate and inaccurate claims. Some details align with the context, while others directly contradict it—such as inflated numbers or incorrect locations. These contradictions increase the hallucination score.
|
|
176
|
-
|
|
177
|
-
```typescript title="src/example-mixed-hallucination.ts" showLineNumbers copy
|
|
138
|
+
```typescript title="src/example-hallucination.ts" showLineNumbers copy
|
|
139
|
+
import { runExperiment } from "@mastra/core/scores";
|
|
178
140
|
import { createHallucinationScorer } from "@mastra/evals/scorers/llm";
|
|
141
|
+
import { myAgent } from "./agent";
|
|
179
142
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
"It was directed by George Lucas.",
|
|
184
|
-
"The film earned $775 million worldwide.",
|
|
185
|
-
"The movie was filmed in Tunisia and England."
|
|
186
|
-
]
|
|
143
|
+
// Context is typically populated from agent tool calls or RAG retrieval
|
|
144
|
+
const scorer = createHallucinationScorer({
|
|
145
|
+
model: "openai/gpt-4o",
|
|
187
146
|
});
|
|
188
147
|
|
|
189
|
-
const
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
score: 0.5,
|
|
207
|
-
reason: 'The score is 0.5 because two out of four statements from the output were contradicted by claims in the context, indicating a balance of accurate and inaccurate information.'
|
|
208
|
-
}
|
|
209
|
-
```
|
|
210
|
-
|
|
211
|
-
### Complete hallucination example
|
|
212
|
-
|
|
213
|
-
In this example, the response contradicts every key fact in the context. None of the claims can be verified, and all presented details are factually incorrect.
|
|
214
|
-
|
|
215
|
-
```typescript title="src/example-complete-hallucination.ts" showLineNumbers copy
|
|
216
|
-
import { createHallucinationScorer } from "@mastra/evals/scorers/llm";
|
|
217
|
-
|
|
218
|
-
const scorer = createHallucinationScorer({ model: 'openai/gpt-4o-mini', options: {
|
|
219
|
-
context: [
|
|
220
|
-
"The Wright brothers made their first flight in 1903.",
|
|
221
|
-
"The flight lasted 12 seconds.",
|
|
222
|
-
"It covered a distance of 120 feet."
|
|
223
|
-
]
|
|
224
|
-
});
|
|
225
|
-
|
|
226
|
-
const query = "When did the Wright brothers first fly?";
|
|
227
|
-
const response = "The Wright brothers achieved their historic first flight in 1908. The flight lasted about 2 minutes and covered nearly a mile.";
|
|
228
|
-
|
|
229
|
-
const result = await scorer.run({
|
|
230
|
-
input: [{ role: 'user', content: query }],
|
|
231
|
-
output: { text: response },
|
|
148
|
+
const result = await runExperiment({
|
|
149
|
+
data: [
|
|
150
|
+
{
|
|
151
|
+
input: "When was the first iPhone released?",
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
input: "Tell me about the original iPhone announcement.",
|
|
155
|
+
},
|
|
156
|
+
],
|
|
157
|
+
scorers: [scorer],
|
|
158
|
+
target: myAgent,
|
|
159
|
+
onItemComplete: ({ scorerResults }) => {
|
|
160
|
+
console.log({
|
|
161
|
+
score: scorerResults[scorer.name].score,
|
|
162
|
+
reason: scorerResults[scorer.name].reason,
|
|
163
|
+
});
|
|
164
|
+
},
|
|
232
165
|
});
|
|
233
166
|
|
|
234
|
-
console.log(result);
|
|
235
|
-
|
|
167
|
+
console.log(result.scores);
|
|
236
168
|
```
|
|
237
169
|
|
|
238
|
-
|
|
170
|
+
For more details on `runExperiment`, see the [runExperiment reference](/reference/scorers/run-experiment).
|
|
239
171
|
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
```typescript
|
|
243
|
-
{
|
|
244
|
-
score: 1,
|
|
245
|
-
reason: 'The score is 1.0 because all three statements from the output directly contradict the context: the first flight was in 1903, not 1908; it lasted 12 seconds, not about 2 minutes; and it covered 120 feet, not nearly a mile.'
|
|
246
|
-
}
|
|
247
|
-
```
|
|
172
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/scorers/overview#adding-scorers-to-agents) guide.
|
|
248
173
|
|
|
249
174
|
## Related
|
|
250
175
|
|