@mastra/mcp-docs-server 1.0.0-beta.4 → 1.0.0-beta.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +326 -126
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +80 -1
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/organized/code-examples/memory-with-processors.md +1 -1
- package/.docs/organized/code-examples/quick-start.md +1 -1
- package/.docs/raw/agents/adding-voice.mdx +7 -10
- package/.docs/raw/agents/guardrails.mdx +19 -20
- package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +6 -5
- package/.docs/raw/agents/networks.mdx +1 -2
- package/.docs/raw/agents/overview.mdx +5 -5
- package/.docs/raw/agents/using-tools.mdx +4 -5
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/deployment/building-mastra.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/index.mdx +1 -1
- package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
- package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
- package/.docs/raw/deployment/overview.mdx +2 -2
- package/.docs/raw/deployment/web-framework.mdx +5 -5
- package/.docs/raw/evals/custom-scorers.mdx +3 -5
- package/.docs/raw/evals/overview.mdx +2 -3
- package/.docs/raw/getting-started/project-structure.mdx +1 -1
- package/.docs/raw/getting-started/start.mdx +72 -0
- package/.docs/raw/getting-started/studio.mdx +1 -1
- package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +105 -11
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
- package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
- package/.docs/raw/guides/{guide → getting-started}/manual-install.mdx +1 -1
- package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
- package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
- package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
- package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
- package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
- package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
- package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
- package/.docs/raw/guides/guide/web-search.mdx +12 -10
- package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
- package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
- package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +29 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +22 -0
- package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
- package/.docs/raw/index.mdx +2 -2
- package/.docs/raw/mcp/overview.mdx +3 -5
- package/.docs/raw/memory/memory-processors.mdx +1 -2
- package/.docs/raw/memory/semantic-recall.mdx +7 -7
- package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
- package/.docs/raw/memory/threads-and-resources.mdx +3 -3
- package/.docs/raw/memory/working-memory.mdx +4 -5
- package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
- package/.docs/raw/observability/overview.mdx +2 -2
- package/.docs/raw/observability/tracing/exporters/otel.mdx +21 -2
- package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
- package/.docs/raw/observability/tracing/overview.mdx +3 -2
- package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
- package/.docs/raw/rag/overview.mdx +3 -2
- package/.docs/raw/rag/retrieval.mdx +20 -32
- package/.docs/raw/reference/agents/agent.mdx +7 -10
- package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
- package/.docs/raw/reference/agents/getLLM.mdx +1 -1
- package/.docs/raw/reference/agents/network.mdx +2 -3
- package/.docs/raw/reference/cli/mastra.mdx +2 -1
- package/.docs/raw/reference/client-js/agents.mdx +3 -3
- package/.docs/raw/reference/core/getLogger.mdx +1 -1
- package/.docs/raw/reference/core/listLogs.mdx +1 -1
- package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
- package/.docs/raw/reference/core/setLogger.mdx +1 -1
- package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
- package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/evals/bias.mdx +29 -87
- package/.docs/raw/reference/evals/completeness.mdx +31 -90
- package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
- package/.docs/raw/reference/evals/context-precision.mdx +28 -130
- package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
- package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
- package/.docs/raw/reference/evals/hallucination.mdx +28 -103
- package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
- package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
- package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
- package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
- package/.docs/raw/reference/evals/toxicity.mdx +29 -92
- package/.docs/raw/reference/memory/memory-class.mdx +5 -7
- package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
- package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
- package/.docs/raw/reference/processors/language-detector.mdx +1 -1
- package/.docs/raw/reference/processors/moderation-processor.mdx +2 -2
- package/.docs/raw/reference/processors/pii-detector.mdx +2 -2
- package/.docs/raw/reference/processors/prompt-injection-detector.mdx +1 -1
- package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -3
- package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
- package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
- package/.docs/raw/reference/rag/embeddings.mdx +5 -5
- package/.docs/raw/reference/rag/rerank.mdx +1 -2
- package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
- package/.docs/raw/reference/streaming/agents/stream.mdx +8 -1
- package/.docs/raw/reference/templates/overview.mdx +1 -4
- package/.docs/raw/reference/tools/client.mdx +1 -2
- package/.docs/raw/reference/tools/create-tool.mdx +132 -0
- package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
- package/.docs/raw/reference/tools/mcp-client.mdx +2 -4
- package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
- package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
- package/.docs/raw/reference/vectors/chroma.mdx +81 -1
- package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
- package/.docs/raw/reference/vectors/lance.mdx +38 -22
- package/.docs/raw/reference/vectors/libsql.mdx +35 -2
- package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
- package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
- package/.docs/raw/reference/vectors/pg.mdx +43 -36
- package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
- package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
- package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
- package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
- package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
- package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
- package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
- package/.docs/raw/reference/voice/voice.close.mdx +1 -1
- package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
- package/.docs/raw/reference/voice/voice.off.mdx +1 -1
- package/.docs/raw/reference/voice/voice.on.mdx +1 -1
- package/.docs/raw/reference/voice/voice.send.mdx +1 -1
- package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
- package/.docs/raw/server-db/mastra-client.mdx +1 -2
- package/.docs/raw/streaming/overview.mdx +20 -9
- package/.docs/raw/streaming/tool-streaming.mdx +47 -4
- package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
- package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
- package/.docs/raw/voice/overview.mdx +21 -41
- package/.docs/raw/voice/speech-to-speech.mdx +4 -4
- package/.docs/raw/voice/speech-to-text.mdx +1 -2
- package/.docs/raw/voice/text-to-speech.mdx +1 -2
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/CHANGELOG.md +10 -0
- package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +7 -7
- package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
- package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
- package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
- package/.docs/raw/getting-started/quickstart.mdx +0 -27
- package/.docs/raw/getting-started/templates.mdx +0 -73
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
|
@@ -131,120 +131,45 @@ A hallucination score between 0 and 1:
|
|
|
131
131
|
|
|
132
132
|
**Note:** The score represents the degree of hallucination - lower scores indicate better factual alignment with the provided context
|
|
133
133
|
|
|
134
|
-
##
|
|
134
|
+
## Example
|
|
135
135
|
|
|
136
|
-
|
|
136
|
+
Evaluate agent responses for hallucinations against provided context:
|
|
137
137
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
```typescript title="src/example-no-hallucination.ts" showLineNumbers copy
|
|
141
|
-
import { createHallucinationScorer } from "@mastra/evals/scorers/prebuilt";
|
|
142
|
-
|
|
143
|
-
const scorer = createHallucinationScorer({ model: 'openai/gpt-4o-mini', options: {
|
|
144
|
-
context: [
|
|
145
|
-
"The iPhone was first released in 2007.",
|
|
146
|
-
"Steve Jobs unveiled it at Macworld.",
|
|
147
|
-
"The original model had a 3.5-inch screen."
|
|
148
|
-
]
|
|
149
|
-
});
|
|
150
|
-
|
|
151
|
-
const query = "When was the first iPhone released?";
|
|
152
|
-
const response = "The iPhone was first released in 2007, when Steve Jobs unveiled it at Macworld. The original iPhone featured a 3.5-inch screen.";
|
|
153
|
-
|
|
154
|
-
const result = await scorer.run({
|
|
155
|
-
input: [{ role: 'user', content: query }],
|
|
156
|
-
output: { text: response },
|
|
157
|
-
});
|
|
158
|
-
|
|
159
|
-
console.log(result);
|
|
160
|
-
```
|
|
161
|
-
|
|
162
|
-
#### No hallucination output
|
|
163
|
-
|
|
164
|
-
The response receives a score of 0 because there are no contradictions. Every statement is consistent with the context, and no new or fabricated information has been introduced.
|
|
165
|
-
|
|
166
|
-
```typescript
|
|
167
|
-
{
|
|
168
|
-
score: 0,
|
|
169
|
-
reason: 'The score is 0 because none of the statements from the context were contradicted by the output.'
|
|
170
|
-
}
|
|
171
|
-
```
|
|
172
|
-
|
|
173
|
-
### Mixed hallucination example
|
|
174
|
-
|
|
175
|
-
In this example, the response includes both accurate and inaccurate claims. Some details align with the context, while others directly contradict it—such as inflated numbers or incorrect locations. These contradictions increase the hallucination score.
|
|
176
|
-
|
|
177
|
-
```typescript title="src/example-mixed-hallucination.ts" showLineNumbers copy
|
|
138
|
+
```typescript title="src/example-hallucination.ts" showLineNumbers copy
|
|
139
|
+
import { runEvals } from "@mastra/core/evals";
|
|
178
140
|
import { createHallucinationScorer } from "@mastra/evals/scorers/prebuilt";
|
|
141
|
+
import { myAgent } from "./agent";
|
|
179
142
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
"It was directed by George Lucas.",
|
|
184
|
-
"The film earned $775 million worldwide.",
|
|
185
|
-
"The movie was filmed in Tunisia and England."
|
|
186
|
-
]
|
|
143
|
+
// Context is typically populated from agent tool calls or RAG retrieval
|
|
144
|
+
const scorer = createHallucinationScorer({
|
|
145
|
+
model: "openai/gpt-4o",
|
|
187
146
|
});
|
|
188
147
|
|
|
189
|
-
const
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
score: 0.5,
|
|
207
|
-
reason: 'The score is 0.5 because two out of four statements from the output were contradicted by claims in the context, indicating a balance of accurate and inaccurate information.'
|
|
208
|
-
}
|
|
209
|
-
```
|
|
210
|
-
|
|
211
|
-
### Complete hallucination example
|
|
212
|
-
|
|
213
|
-
In this example, the response contradicts every key fact in the context. None of the claims can be verified, and all presented details are factually incorrect.
|
|
214
|
-
|
|
215
|
-
```typescript title="src/example-complete-hallucination.ts" showLineNumbers copy
|
|
216
|
-
import { createHallucinationScorer } from "@mastra/evals/scorers/prebuilt";
|
|
217
|
-
|
|
218
|
-
const scorer = createHallucinationScorer({ model: 'openai/gpt-4o-mini', options: {
|
|
219
|
-
context: [
|
|
220
|
-
"The Wright brothers made their first flight in 1903.",
|
|
221
|
-
"The flight lasted 12 seconds.",
|
|
222
|
-
"It covered a distance of 120 feet."
|
|
223
|
-
]
|
|
224
|
-
});
|
|
225
|
-
|
|
226
|
-
const query = "When did the Wright brothers first fly?";
|
|
227
|
-
const response = "The Wright brothers achieved their historic first flight in 1908. The flight lasted about 2 minutes and covered nearly a mile.";
|
|
228
|
-
|
|
229
|
-
const result = await scorer.run({
|
|
230
|
-
input: [{ role: 'user', content: query }],
|
|
231
|
-
output: { text: response },
|
|
148
|
+
const result = await runEvals({
|
|
149
|
+
data: [
|
|
150
|
+
{
|
|
151
|
+
input: "When was the first iPhone released?",
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
input: "Tell me about the original iPhone announcement.",
|
|
155
|
+
},
|
|
156
|
+
],
|
|
157
|
+
scorers: [scorer],
|
|
158
|
+
target: myAgent,
|
|
159
|
+
onItemComplete: ({ scorerResults }) => {
|
|
160
|
+
console.log({
|
|
161
|
+
score: scorerResults[scorer.id].score,
|
|
162
|
+
reason: scorerResults[scorer.id].reason,
|
|
163
|
+
});
|
|
164
|
+
},
|
|
232
165
|
});
|
|
233
166
|
|
|
234
|
-
console.log(result);
|
|
235
|
-
|
|
167
|
+
console.log(result.scores);
|
|
236
168
|
```
|
|
237
169
|
|
|
238
|
-
|
|
170
|
+
For more details on `runEvals`, see the [runEvals reference](/reference/v1/evals/run-evals).
|
|
239
171
|
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
```typescript
|
|
243
|
-
{
|
|
244
|
-
score: 1,
|
|
245
|
-
reason: 'The score is 1.0 because all three statements from the output directly contradict the context: the first flight was in 1903, not 1908; it lasted 12 seconds, not about 2 minutes; and it covered 120 feet, not nearly a mile.'
|
|
246
|
-
}
|
|
247
|
-
```
|
|
172
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/v1/evals/overview#adding-scorers-to-agents) guide.
|
|
248
173
|
|
|
249
174
|
## Related
|
|
250
175
|
|
|
@@ -102,124 +102,45 @@ The scorer handles several special cases:
|
|
|
102
102
|
- Case differences: "JavaScript" matches "javascript"
|
|
103
103
|
- Common words: Ignored in scoring to focus on meaningful keywords
|
|
104
104
|
|
|
105
|
-
##
|
|
105
|
+
## Example
|
|
106
106
|
|
|
107
|
-
|
|
107
|
+
Evaluate keyword coverage between input queries and agent responses:
|
|
108
108
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
```typescript title="src/example-full-keyword-coverage.ts" showLineNumbers copy
|
|
112
|
-
import { createKeywordCoverageScorer } from "@mastra/evals/scorers/prebuilt";
|
|
113
|
-
|
|
114
|
-
const scorer = createKeywordCoverageScorer();
|
|
115
|
-
|
|
116
|
-
const input = "JavaScript frameworks like React and Vue";
|
|
117
|
-
const output =
|
|
118
|
-
"Popular JavaScript frameworks include React and Vue for web development";
|
|
119
|
-
|
|
120
|
-
const result = await scorer.run({
|
|
121
|
-
input: [{ role: "user", content: input }],
|
|
122
|
-
output: { role: "assistant", text: output },
|
|
123
|
-
});
|
|
124
|
-
|
|
125
|
-
console.log("Score:", result.score);
|
|
126
|
-
console.log("AnalyzeStepResult:", result.analyzeStepResult);
|
|
127
|
-
```
|
|
128
|
-
|
|
129
|
-
#### Full coverage output
|
|
130
|
-
|
|
131
|
-
A score of 1 indicates that all expected keywords were found in the response. The `analyzeStepResult` field confirms that the number of matched keywords equals the total number extracted from the input.
|
|
132
|
-
|
|
133
|
-
```typescript
|
|
134
|
-
{
|
|
135
|
-
score: 1,
|
|
136
|
-
analyzeStepResult: {
|
|
137
|
-
totalKeywords: 4,
|
|
138
|
-
matchedKeywords: 4
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
```
|
|
142
|
-
|
|
143
|
-
### Partial coverage example
|
|
144
|
-
|
|
145
|
-
In this example, the response includes some, but not all, of the important keywords from the input. The score reflects partial coverage, with key terms either missing or only partially matched.
|
|
146
|
-
|
|
147
|
-
```typescript title="src/example-partial-keyword-coverage.ts" showLineNumbers copy
|
|
109
|
+
```typescript title="src/example-keyword-coverage.ts" showLineNumbers copy
|
|
110
|
+
import { runEvals } from "@mastra/core/evals";
|
|
148
111
|
import { createKeywordCoverageScorer } from "@mastra/evals/scorers/prebuilt";
|
|
112
|
+
import { myAgent } from "./agent";
|
|
149
113
|
|
|
150
114
|
const scorer = createKeywordCoverageScorer();
|
|
151
115
|
|
|
152
|
-
const
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
totalKeywords: 6,
|
|
173
|
-
matchedKeywords: 3
|
|
174
|
-
}
|
|
175
|
-
}
|
|
176
|
-
```
|
|
177
|
-
|
|
178
|
-
### Minimal coverage example
|
|
179
|
-
|
|
180
|
-
In this example, the response includes very few of the important keywords from the input. The score reflects minimal coverage, with most key terms missing or unaccounted for.
|
|
181
|
-
|
|
182
|
-
```typescript title="src/example-minimal-keyword-coverage.ts" showLineNumbers copy
|
|
183
|
-
import { createKeywordCoverageScorer } from "@mastra/evals/scorers/prebuilt";
|
|
184
|
-
|
|
185
|
-
const scorer = createKeywordCoverageScorer();
|
|
186
|
-
|
|
187
|
-
const input =
|
|
188
|
-
"Machine learning models require data preprocessing, feature engineering, and hyperparameter tuning";
|
|
189
|
-
const output = "Data preparation is important for models";
|
|
190
|
-
|
|
191
|
-
const result = await scorer.run({
|
|
192
|
-
input: [{ role: "user", content: input }],
|
|
193
|
-
output: { role: "assistant", text: output },
|
|
116
|
+
const result = await runEvals({
|
|
117
|
+
data: [
|
|
118
|
+
{
|
|
119
|
+
input: "JavaScript frameworks like React and Vue",
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
input: "TypeScript offers interfaces, generics, and type inference",
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
input:
|
|
126
|
+
"Machine learning models require data preprocessing, feature engineering, and hyperparameter tuning",
|
|
127
|
+
},
|
|
128
|
+
],
|
|
129
|
+
scorers: [scorer],
|
|
130
|
+
target: myAgent,
|
|
131
|
+
onItemComplete: ({ scorerResults }) => {
|
|
132
|
+
console.log({
|
|
133
|
+
score: scorerResults[scorer.id].score,
|
|
134
|
+
});
|
|
135
|
+
},
|
|
194
136
|
});
|
|
195
137
|
|
|
196
|
-
console.log(
|
|
197
|
-
console.log("AnalyzeStepResult:", result.analyzeStepResult);
|
|
198
|
-
```
|
|
199
|
-
|
|
200
|
-
#### Minimal coverage output
|
|
201
|
-
|
|
202
|
-
A low score indicates that only a small number of the expected keywords were present in the response. The `analyzeStepResult` field highlights the gap between total and matched keywords, signaling insufficient coverage.
|
|
203
|
-
|
|
204
|
-
```typescript
|
|
205
|
-
{
|
|
206
|
-
score: 0.2,
|
|
207
|
-
analyzeStepResult: {
|
|
208
|
-
totalKeywords: 10,
|
|
209
|
-
matchedKeywords: 2
|
|
210
|
-
}
|
|
211
|
-
}
|
|
138
|
+
console.log(result.scores);
|
|
212
139
|
```
|
|
213
140
|
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
You can create a `KeywordCoverageMetric` instance with default settings. No additional configuration is required.
|
|
217
|
-
|
|
218
|
-
```typescript
|
|
219
|
-
const metric = new KeywordCoverageMetric();
|
|
220
|
-
```
|
|
141
|
+
For more details on `runEvals`, see the [runEvals reference](/reference/v1/evals/run-evals).
|
|
221
142
|
|
|
222
|
-
|
|
143
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/v1/evals/overview#adding-scorers-to-agents) guide.
|
|
223
144
|
|
|
224
145
|
## Related
|
|
225
146
|
|
|
@@ -180,7 +180,7 @@ describe("Agent Noise Resistance Tests", () => {
|
|
|
180
180
|
|
|
181
181
|
// Step 4: Evaluate using noise sensitivity scorer
|
|
182
182
|
const scorer = createNoiseSensitivityScorerLLM({
|
|
183
|
-
model: "openai/gpt-
|
|
183
|
+
model: "openai/gpt-5.1",
|
|
184
184
|
options: {
|
|
185
185
|
baselineResponse,
|
|
186
186
|
noisyQuery,
|
|
@@ -389,7 +389,7 @@ describe("Agent Noise Resistance CI Tests", () => {
|
|
|
389
389
|
|
|
390
390
|
// Evaluate using noise sensitivity scorer
|
|
391
391
|
const scorer = createNoiseSensitivityScorerLLM({
|
|
392
|
-
model: "openai/gpt-
|
|
392
|
+
model: "openai/gpt-5.1",
|
|
393
393
|
options: {
|
|
394
394
|
baselineResponse: testCase.baselineResponse,
|
|
395
395
|
noisyQuery: testCase.noisyQuery,
|
|
@@ -424,7 +424,7 @@ This example shows an agent that completely resists misinformation in a test sce
|
|
|
424
424
|
import { createNoiseSensitivityScorerLLM } from "@mastra/evals";
|
|
425
425
|
|
|
426
426
|
const scorer = createNoiseSensitivityScorerLLM({
|
|
427
|
-
model: "openai/gpt-
|
|
427
|
+
model: "openai/gpt-5.1",
|
|
428
428
|
options: {
|
|
429
429
|
baselineResponse:
|
|
430
430
|
"Regular exercise improves cardiovascular health, strengthens muscles, and enhances mental wellbeing.",
|
|
@@ -470,7 +470,7 @@ This example shows an agent partially distracted by irrelevant requests:
|
|
|
470
470
|
import { createNoiseSensitivityScorerLLM } from "@mastra/evals/scorers/prebuilt";
|
|
471
471
|
|
|
472
472
|
const scorer = createNoiseSensitivityScorerLLM({
|
|
473
|
-
model: "openai/gpt-
|
|
473
|
+
model: "openai/gpt-5.1",
|
|
474
474
|
options: {
|
|
475
475
|
baselineResponse:
|
|
476
476
|
"To bake a cake: Mix flour, sugar, eggs, and butter. Bake at 350°F for 30 minutes.",
|
|
@@ -516,7 +516,7 @@ This example shows an agent that incorporates misinformation:
|
|
|
516
516
|
import { createNoiseSensitivityScorerLLM } from "@mastra/evals";
|
|
517
517
|
|
|
518
518
|
const scorer = createNoiseSensitivityScorerLLM({
|
|
519
|
-
model: "openai/gpt-
|
|
519
|
+
model: "openai/gpt-5.1",
|
|
520
520
|
options: {
|
|
521
521
|
baselineResponse:
|
|
522
522
|
"Climate change is caused by greenhouse gas emissions from human activities.",
|
|
@@ -563,7 +563,7 @@ import { createNoiseSensitivityScorerLLM } from "@mastra/evals";
|
|
|
563
563
|
|
|
564
564
|
// Lenient scoring - more forgiving of minor issues
|
|
565
565
|
const lenientScorer = createNoiseSensitivityScorerLLM({
|
|
566
|
-
model: "openai/gpt-
|
|
566
|
+
model: "openai/gpt-5.1",
|
|
567
567
|
options: {
|
|
568
568
|
baselineResponse: "Python is a high-level programming language.",
|
|
569
569
|
noisyQuery: "What is Python? Also, snakes are dangerous!",
|
|
@@ -583,7 +583,7 @@ const lenientScorer = createNoiseSensitivityScorerLLM({
|
|
|
583
583
|
|
|
584
584
|
// Strict scoring - harsh on any deviation
|
|
585
585
|
const strictScorer = createNoiseSensitivityScorerLLM({
|
|
586
|
-
model: "openai/gpt-
|
|
586
|
+
model: "openai/gpt-5.1",
|
|
587
587
|
options: {
|
|
588
588
|
baselineResponse: "Python is a high-level programming language.",
|
|
589
589
|
noisyQuery: "What is Python? Also, snakes are dangerous!",
|
|
@@ -639,7 +639,7 @@ async function evaluateNoiseResistance(testCases) {
|
|
|
639
639
|
|
|
640
640
|
for (const testCase of testCases) {
|
|
641
641
|
const scorer = createNoiseSensitivityScorerLLM({
|
|
642
|
-
model: "openai/gpt-
|
|
642
|
+
model: "openai/gpt-5.1",
|
|
643
643
|
options: {
|
|
644
644
|
baselineResponse: testCase.baseline,
|
|
645
645
|
noisyQuery: testCase.noisyQuery,
|
|
@@ -686,8 +686,8 @@ import { createNoiseSensitivityScorerLLM } from "@mastra/evals";
|
|
|
686
686
|
|
|
687
687
|
async function compareModelRobustness() {
|
|
688
688
|
const models = [
|
|
689
|
-
{ name: "GPT-
|
|
690
|
-
{ name: "GPT-
|
|
689
|
+
{ name: "GPT-5.1", model: "openai/gpt-5.1" },
|
|
690
|
+
{ name: "GPT-4.1", model: "openai/gpt-4.1" },
|
|
691
691
|
{ name: "Claude", model: "anthropic/claude-3-opus" },
|
|
692
692
|
];
|
|
693
693
|
|
|
@@ -738,7 +738,7 @@ Include noise sensitivity tests in your security test suite to validate prompt i
|
|
|
738
738
|
import { createNoiseSensitivityScorerLLM } from "@mastra/evals";
|
|
739
739
|
|
|
740
740
|
const scorer = createNoiseSensitivityScorerLLM({
|
|
741
|
-
model: "openai/gpt-
|
|
741
|
+
model: "openai/gpt-5.1",
|
|
742
742
|
options: {
|
|
743
743
|
baselineResponse: "I can help you with programming questions.",
|
|
744
744
|
noisyQuery:
|
|
@@ -109,7 +109,7 @@ You can customize the Prompt Alignment Scorer by adjusting the scale parameter a
|
|
|
109
109
|
|
|
110
110
|
```typescript showLineNumbers copy
|
|
111
111
|
const scorer = createPromptAlignmentScorerLLM({
|
|
112
|
-
model: "openai/gpt-
|
|
112
|
+
model: "openai/gpt-5.1",
|
|
113
113
|
options: {
|
|
114
114
|
scale: 10, // Score from 0-10 instead of 0-1
|
|
115
115
|
evaluationMode: "both", // 'user', 'system', or 'both' (default)
|
|
@@ -272,24 +272,24 @@ const agent = new Agent({
|
|
|
272
272
|
name: "CodingAssistant",
|
|
273
273
|
instructions:
|
|
274
274
|
"You are a helpful coding assistant. Always provide working code examples.",
|
|
275
|
-
model: "openai/gpt-
|
|
275
|
+
model: "openai/gpt-5.1",
|
|
276
276
|
});
|
|
277
277
|
|
|
278
278
|
// Evaluate comprehensive alignment (default)
|
|
279
279
|
const scorer = createPromptAlignmentScorerLLM({
|
|
280
|
-
model: "openai/gpt-
|
|
280
|
+
model: "openai/gpt-5.1",
|
|
281
281
|
options: { evaluationMode: "both" }, // Evaluates both user intent and system guidelines
|
|
282
282
|
});
|
|
283
283
|
|
|
284
284
|
// Evaluate just user satisfaction
|
|
285
285
|
const userScorer = createPromptAlignmentScorerLLM({
|
|
286
|
-
model: "openai/gpt-
|
|
286
|
+
model: "openai/gpt-5.1",
|
|
287
287
|
options: { evaluationMode: "user" }, // Focus only on user request fulfillment
|
|
288
288
|
});
|
|
289
289
|
|
|
290
290
|
// Evaluate system compliance
|
|
291
291
|
const systemScorer = createPromptAlignmentScorerLLM({
|
|
292
|
-
model: "openai/gpt-
|
|
292
|
+
model: "openai/gpt-5.1",
|
|
293
293
|
options: { evaluationMode: "system" }, // Check adherence to system instructions
|
|
294
294
|
});
|
|
295
295
|
|
|
@@ -341,7 +341,7 @@ for (const agent of agents) {
|
|
|
341
341
|
import { createPromptAlignmentScorerLLM } from "@mastra/evals";
|
|
342
342
|
|
|
343
343
|
const scorer = createPromptAlignmentScorerLLM({
|
|
344
|
-
model: "openai/gpt-
|
|
344
|
+
model: "openai/gpt-5.1",
|
|
345
345
|
});
|
|
346
346
|
|
|
347
347
|
// Evaluate a code generation task
|
|
@@ -371,7 +371,7 @@ const result = await scorer.run({
|
|
|
371
371
|
```typescript
|
|
372
372
|
// Configure scale and evaluation mode
|
|
373
373
|
const scorer = createPromptAlignmentScorerLLM({
|
|
374
|
-
model: "openai/gpt-
|
|
374
|
+
model: "openai/gpt-5.1",
|
|
375
375
|
options: {
|
|
376
376
|
scale: 10, // Score from 0-10 instead of 0-1
|
|
377
377
|
evaluationMode: "both", // 'user', 'system', or 'both' (default)
|
|
@@ -380,13 +380,13 @@ const scorer = createPromptAlignmentScorerLLM({
|
|
|
380
380
|
|
|
381
381
|
// User-only evaluation - focus on user satisfaction
|
|
382
382
|
const userScorer = createPromptAlignmentScorerLLM({
|
|
383
|
-
model: "openai/gpt-
|
|
383
|
+
model: "openai/gpt-5.1",
|
|
384
384
|
options: { evaluationMode: "user" },
|
|
385
385
|
});
|
|
386
386
|
|
|
387
387
|
// System-only evaluation - focus on compliance
|
|
388
388
|
const systemScorer = createPromptAlignmentScorerLLM({
|
|
389
|
-
model: "openai/gpt-
|
|
389
|
+
model: "openai/gpt-5.1",
|
|
390
390
|
options: { evaluationMode: "system" },
|
|
391
391
|
});
|
|
392
392
|
|
|
@@ -421,7 +421,7 @@ In this example, the response fully addresses the user's prompt with all require
|
|
|
421
421
|
import { createPromptAlignmentScorerLLM } from "@mastra/evals/scorers/prebuilt";
|
|
422
422
|
|
|
423
423
|
const scorer = createPromptAlignmentScorerLLM({
|
|
424
|
-
model: "openai/gpt-
|
|
424
|
+
model: "openai/gpt-5.1",
|
|
425
425
|
});
|
|
426
426
|
|
|
427
427
|
const inputMessages = [
|
|
@@ -469,7 +469,7 @@ In this example, the response addresses the core intent but misses some requirem
|
|
|
469
469
|
import { createPromptAlignmentScorerLLM } from "@mastra/evals/scorers/prebuilt";
|
|
470
470
|
|
|
471
471
|
const scorer = createPromptAlignmentScorerLLM({
|
|
472
|
-
model: "openai/gpt-
|
|
472
|
+
model: "openai/gpt-5.1",
|
|
473
473
|
});
|
|
474
474
|
|
|
475
475
|
const inputMessages = [
|
|
@@ -510,7 +510,7 @@ In this example, the response fails to address the user's specific requirements.
|
|
|
510
510
|
import { createPromptAlignmentScorerLLM } from "@mastra/evals/scorers/prebuilt";
|
|
511
511
|
|
|
512
512
|
const scorer = createPromptAlignmentScorerLLM({
|
|
513
|
-
model: "openai/gpt-
|
|
513
|
+
model: "openai/gpt-5.1",
|
|
514
514
|
});
|
|
515
515
|
|
|
516
516
|
const inputMessages = [
|
|
@@ -554,7 +554,7 @@ Evaluates how well the response addresses the user's request, ignoring system in
|
|
|
554
554
|
|
|
555
555
|
```typescript title="src/example-user-mode.ts" showLineNumbers copy
|
|
556
556
|
const scorer = createPromptAlignmentScorerLLM({
|
|
557
|
-
model: "openai/gpt-
|
|
557
|
+
model: "openai/gpt-5.1",
|
|
558
558
|
options: { evaluationMode: "user" },
|
|
559
559
|
});
|
|
560
560
|
|
|
@@ -586,7 +586,7 @@ Evaluates compliance with system behavioral guidelines and constraints:
|
|
|
586
586
|
|
|
587
587
|
```typescript title="src/example-system-mode.ts" showLineNumbers copy
|
|
588
588
|
const scorer = createPromptAlignmentScorerLLM({
|
|
589
|
-
model: "openai/gpt-
|
|
589
|
+
model: "openai/gpt-5.1",
|
|
590
590
|
options: { evaluationMode: "system" },
|
|
591
591
|
});
|
|
592
592
|
|
|
@@ -619,7 +619,7 @@ Evaluates both user intent fulfillment and system compliance with weighted scori
|
|
|
619
619
|
|
|
620
620
|
```typescript title="src/example-both-mode.ts" showLineNumbers copy
|
|
621
621
|
const scorer = createPromptAlignmentScorerLLM({
|
|
622
|
-
model: "openai/gpt-
|
|
622
|
+
model: "openai/gpt-5.1",
|
|
623
623
|
options: { evaluationMode: "both" }, // This is the default
|
|
624
624
|
});
|
|
625
625
|
|
|
@@ -83,118 +83,45 @@ A textual difference score between 0 and 1:
|
|
|
83
83
|
- **0.1–0.3**: Major differences – extensive changes needed.
|
|
84
84
|
- **0.0**: Completely different texts.
|
|
85
85
|
|
|
86
|
-
##
|
|
86
|
+
## Example
|
|
87
87
|
|
|
88
|
-
|
|
88
|
+
Measure textual differences between expected and actual agent outputs:
|
|
89
89
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
```typescript title="src/example-no-differences.ts" showLineNumbers copy
|
|
90
|
+
```typescript title="src/example-textual-difference.ts" showLineNumbers copy
|
|
91
|
+
import { runEvals } from "@mastra/core/evals";
|
|
93
92
|
import { createTextualDifferenceScorer } from "@mastra/evals/scorers/prebuilt";
|
|
93
|
+
import { myAgent } from "./agent";
|
|
94
94
|
|
|
95
95
|
const scorer = createTextualDifferenceScorer();
|
|
96
96
|
|
|
97
|
-
const
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
analyzeStepResult: {
|
|
117
|
-
confidence: 1,
|
|
118
|
-
ratio: 1,
|
|
119
|
-
changes: 0,
|
|
120
|
-
lengthDiff: 0,
|
|
97
|
+
const result = await runEvals({
|
|
98
|
+
data: [
|
|
99
|
+
{
|
|
100
|
+
input: "Summarize the concept of recursion",
|
|
101
|
+
groundTruth:
|
|
102
|
+
"Recursion is when a function calls itself to solve a problem by breaking it into smaller subproblems.",
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
input: "What is the capital of France?",
|
|
106
|
+
groundTruth: "The capital of France is Paris.",
|
|
107
|
+
},
|
|
108
|
+
],
|
|
109
|
+
scorers: [scorer],
|
|
110
|
+
target: myAgent,
|
|
111
|
+
onItemComplete: ({ scorerResults }) => {
|
|
112
|
+
console.log({
|
|
113
|
+
score: scorerResults[scorer.id].score,
|
|
114
|
+
groundTruth: scorerResults[scorer.id].groundTruth,
|
|
115
|
+
});
|
|
121
116
|
},
|
|
122
|
-
}
|
|
123
|
-
```
|
|
124
|
-
|
|
125
|
-
### Minor differences example
|
|
126
|
-
|
|
127
|
-
In this example, the texts have small variations. The scorer detects these minor differences and returns a moderate similarity score.
|
|
128
|
-
|
|
129
|
-
```typescript title="src/example-minor-differences.ts" showLineNumbers copy
|
|
130
|
-
import { createTextualDifferenceScorer } from "@mastra/evals/scorers/prebuilt";
|
|
131
|
-
|
|
132
|
-
const scorer = createTextualDifferenceScorer();
|
|
133
|
-
|
|
134
|
-
const input = "Hello world! How are you?";
|
|
135
|
-
const output = "Hello there! How is it going?";
|
|
136
|
-
|
|
137
|
-
const result = await scorer.run({
|
|
138
|
-
input: [{ role: "user", content: input }],
|
|
139
|
-
output: { role: "assistant", text: output },
|
|
140
117
|
});
|
|
141
118
|
|
|
142
|
-
console.log(
|
|
143
|
-
console.log("AnalyzeStepResult:", result.analyzeStepResult);
|
|
119
|
+
console.log(result.scores);
|
|
144
120
|
```
|
|
145
121
|
|
|
146
|
-
|
|
122
|
+
For more details on `runEvals`, see the [runEvals reference](/reference/v1/evals/run-evals).
|
|
147
123
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
```typescript
|
|
151
|
-
{
|
|
152
|
-
score: 0.5925925925925926,
|
|
153
|
-
analyzeStepResult: {
|
|
154
|
-
confidence: 0.8620689655172413,
|
|
155
|
-
ratio: 0.5925925925925926,
|
|
156
|
-
changes: 5,
|
|
157
|
-
lengthDiff: 0.13793103448275862
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
```
|
|
161
|
-
|
|
162
|
-
### Major differences example
|
|
163
|
-
|
|
164
|
-
In this example, the texts differ significantly. The scorer detects extensive changes and returns a low similarity score.
|
|
165
|
-
|
|
166
|
-
```typescript title="src/example-major-differences.ts" showLineNumbers copy
|
|
167
|
-
import { createTextualDifferenceScorer } from "@mastra/evals/scorers/prebuilt";
|
|
168
|
-
|
|
169
|
-
const scorer = createTextualDifferenceScorer();
|
|
170
|
-
|
|
171
|
-
const input = "Python is a high-level programming language";
|
|
172
|
-
const output = "JavaScript is used for web development";
|
|
173
|
-
|
|
174
|
-
const result = await scorer.run({
|
|
175
|
-
input: [{ role: "user", content: input }],
|
|
176
|
-
output: { role: "assistant", text: output },
|
|
177
|
-
});
|
|
178
|
-
|
|
179
|
-
console.log("Score:", result.score);
|
|
180
|
-
console.log("AnalyzeStepResult:", result.analyzeStepResult);
|
|
181
|
-
```
|
|
182
|
-
|
|
183
|
-
#### Major differences output
|
|
184
|
-
|
|
185
|
-
The scorer returns a low score due to significant differences between the texts. The detailed `analyzeStepResult` shows numerous changes and a notable length difference.
|
|
186
|
-
|
|
187
|
-
```typescript
|
|
188
|
-
{
|
|
189
|
-
score: 0.3170731707317073,
|
|
190
|
-
analyzeStepResult: {
|
|
191
|
-
confidence: 0.8636363636363636,
|
|
192
|
-
ratio: 0.3170731707317073,
|
|
193
|
-
changes: 8,
|
|
194
|
-
lengthDiff: 0.13636363636363635
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
```
|
|
124
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/v1/evals/overview#adding-scorers-to-agents) guide.
|
|
198
125
|
|
|
199
126
|
## Related
|
|
200
127
|
|