@mastra/mcp-docs-server 1.0.0-beta.4 → 1.0.0-beta.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +326 -126
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +80 -1
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/organized/code-examples/memory-with-processors.md +1 -1
- package/.docs/organized/code-examples/quick-start.md +1 -1
- package/.docs/raw/agents/adding-voice.mdx +7 -10
- package/.docs/raw/agents/guardrails.mdx +19 -20
- package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +6 -5
- package/.docs/raw/agents/networks.mdx +1 -2
- package/.docs/raw/agents/overview.mdx +5 -5
- package/.docs/raw/agents/using-tools.mdx +4 -5
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/deployment/building-mastra.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/index.mdx +1 -1
- package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
- package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
- package/.docs/raw/deployment/overview.mdx +2 -2
- package/.docs/raw/deployment/web-framework.mdx +5 -5
- package/.docs/raw/evals/custom-scorers.mdx +3 -5
- package/.docs/raw/evals/overview.mdx +2 -3
- package/.docs/raw/getting-started/project-structure.mdx +1 -1
- package/.docs/raw/getting-started/start.mdx +72 -0
- package/.docs/raw/getting-started/studio.mdx +1 -1
- package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +105 -11
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
- package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
- package/.docs/raw/guides/{guide → getting-started}/manual-install.mdx +1 -1
- package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
- package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
- package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
- package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
- package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
- package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
- package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
- package/.docs/raw/guides/guide/web-search.mdx +12 -10
- package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
- package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
- package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +29 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +22 -0
- package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
- package/.docs/raw/index.mdx +2 -2
- package/.docs/raw/mcp/overview.mdx +3 -5
- package/.docs/raw/memory/memory-processors.mdx +1 -2
- package/.docs/raw/memory/semantic-recall.mdx +7 -7
- package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
- package/.docs/raw/memory/threads-and-resources.mdx +3 -3
- package/.docs/raw/memory/working-memory.mdx +4 -5
- package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
- package/.docs/raw/observability/overview.mdx +2 -2
- package/.docs/raw/observability/tracing/exporters/otel.mdx +21 -2
- package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
- package/.docs/raw/observability/tracing/overview.mdx +3 -2
- package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
- package/.docs/raw/rag/overview.mdx +3 -2
- package/.docs/raw/rag/retrieval.mdx +20 -32
- package/.docs/raw/reference/agents/agent.mdx +7 -10
- package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
- package/.docs/raw/reference/agents/getLLM.mdx +1 -1
- package/.docs/raw/reference/agents/network.mdx +2 -3
- package/.docs/raw/reference/cli/mastra.mdx +2 -1
- package/.docs/raw/reference/client-js/agents.mdx +3 -3
- package/.docs/raw/reference/core/getLogger.mdx +1 -1
- package/.docs/raw/reference/core/listLogs.mdx +1 -1
- package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
- package/.docs/raw/reference/core/setLogger.mdx +1 -1
- package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
- package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/evals/bias.mdx +29 -87
- package/.docs/raw/reference/evals/completeness.mdx +31 -90
- package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
- package/.docs/raw/reference/evals/context-precision.mdx +28 -130
- package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
- package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
- package/.docs/raw/reference/evals/hallucination.mdx +28 -103
- package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
- package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
- package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
- package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
- package/.docs/raw/reference/evals/toxicity.mdx +29 -92
- package/.docs/raw/reference/memory/memory-class.mdx +5 -7
- package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
- package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
- package/.docs/raw/reference/processors/language-detector.mdx +1 -1
- package/.docs/raw/reference/processors/moderation-processor.mdx +2 -2
- package/.docs/raw/reference/processors/pii-detector.mdx +2 -2
- package/.docs/raw/reference/processors/prompt-injection-detector.mdx +1 -1
- package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -3
- package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
- package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
- package/.docs/raw/reference/rag/embeddings.mdx +5 -5
- package/.docs/raw/reference/rag/rerank.mdx +1 -2
- package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
- package/.docs/raw/reference/streaming/agents/stream.mdx +8 -1
- package/.docs/raw/reference/templates/overview.mdx +1 -4
- package/.docs/raw/reference/tools/client.mdx +1 -2
- package/.docs/raw/reference/tools/create-tool.mdx +132 -0
- package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
- package/.docs/raw/reference/tools/mcp-client.mdx +2 -4
- package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
- package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
- package/.docs/raw/reference/vectors/chroma.mdx +81 -1
- package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
- package/.docs/raw/reference/vectors/lance.mdx +38 -22
- package/.docs/raw/reference/vectors/libsql.mdx +35 -2
- package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
- package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
- package/.docs/raw/reference/vectors/pg.mdx +43 -36
- package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
- package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
- package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
- package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
- package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
- package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
- package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
- package/.docs/raw/reference/voice/voice.close.mdx +1 -1
- package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
- package/.docs/raw/reference/voice/voice.off.mdx +1 -1
- package/.docs/raw/reference/voice/voice.on.mdx +1 -1
- package/.docs/raw/reference/voice/voice.send.mdx +1 -1
- package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
- package/.docs/raw/server-db/mastra-client.mdx +1 -2
- package/.docs/raw/streaming/overview.mdx +20 -9
- package/.docs/raw/streaming/tool-streaming.mdx +47 -4
- package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
- package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
- package/.docs/raw/voice/overview.mdx +21 -41
- package/.docs/raw/voice/speech-to-speech.mdx +4 -4
- package/.docs/raw/voice/speech-to-text.mdx +1 -2
- package/.docs/raw/voice/text-to-speech.mdx +1 -2
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/CHANGELOG.md +10 -0
- package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +7 -7
- package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
- package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
- package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
- package/.docs/raw/getting-started/quickstart.mdx +0 -27
- package/.docs/raw/getting-started/templates.mdx +0 -73
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
|
@@ -27,14 +27,14 @@ Mastra provides flexible retrieval options with support for semantic search, fil
|
|
|
27
27
|
The simplest approach is direct semantic search. This method uses vector similarity to find chunks that are semantically similar to the query:
|
|
28
28
|
|
|
29
29
|
```ts showLineNumbers copy
|
|
30
|
-
import { openai } from "@ai-sdk/openai";
|
|
31
30
|
import { embed } from "ai";
|
|
32
31
|
import { PgVector } from "@mastra/pg";
|
|
32
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
33
33
|
|
|
34
34
|
// Convert query to embedding
|
|
35
35
|
const { embedding } = await embed({
|
|
36
36
|
value: "What are the main points in the article?",
|
|
37
|
-
model:
|
|
37
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
38
38
|
});
|
|
39
39
|
|
|
40
40
|
// Query vector store
|
|
@@ -156,7 +156,7 @@ Sometimes you want to give your agent the ability to query a vector database dir
|
|
|
156
156
|
const vectorQueryTool = createVectorQueryTool({
|
|
157
157
|
vectorStoreName: "pgVector",
|
|
158
158
|
indexName: "embeddings",
|
|
159
|
-
model:
|
|
159
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
160
160
|
});
|
|
161
161
|
```
|
|
162
162
|
|
|
@@ -177,7 +177,7 @@ The Vector Query Tool supports database-specific configurations that enable you
|
|
|
177
177
|
const pineconeQueryTool = createVectorQueryTool({
|
|
178
178
|
vectorStoreName: "pinecone",
|
|
179
179
|
indexName: "docs",
|
|
180
|
-
model:
|
|
180
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
181
181
|
databaseConfig: {
|
|
182
182
|
pinecone: {
|
|
183
183
|
namespace: "production", // Isolate data by environment
|
|
@@ -189,7 +189,7 @@ const pineconeQueryTool = createVectorQueryTool({
|
|
|
189
189
|
const pgVectorQueryTool = createVectorQueryTool({
|
|
190
190
|
vectorStoreName: "postgres",
|
|
191
191
|
indexName: "embeddings",
|
|
192
|
-
model:
|
|
192
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
193
193
|
databaseConfig: {
|
|
194
194
|
pgvector: {
|
|
195
195
|
minScore: 0.7, // Filter low-quality results
|
|
@@ -203,7 +203,7 @@ const pgVectorQueryTool = createVectorQueryTool({
|
|
|
203
203
|
const chromaQueryTool = createVectorQueryTool({
|
|
204
204
|
vectorStoreName: "chroma",
|
|
205
205
|
indexName: "documents",
|
|
206
|
-
model:
|
|
206
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
207
207
|
databaseConfig: {
|
|
208
208
|
chroma: {
|
|
209
209
|
where: { category: "technical" },
|
|
@@ -216,7 +216,7 @@ const chromaQueryTool = createVectorQueryTool({
|
|
|
216
216
|
const lanceQueryTool = createVectorQueryTool({
|
|
217
217
|
vectorStoreName: "lance",
|
|
218
218
|
indexName: "documents",
|
|
219
|
-
model:
|
|
219
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
220
220
|
databaseConfig: {
|
|
221
221
|
lance: {
|
|
222
222
|
tableName: "myVectors", // Specify which table to query
|
|
@@ -272,13 +272,12 @@ When implementing filtering, these prompts are required in the agent's instructi
|
|
|
272
272
|
<TabItem value="pgvector" label="pgVector">
|
|
273
273
|
|
|
274
274
|
```ts showLineNumbers copy
|
|
275
|
-
import { openai } from "@ai-sdk/openai";
|
|
276
275
|
import { PGVECTOR_PROMPT } from "@mastra/pg";
|
|
277
276
|
|
|
278
277
|
export const ragAgent = new Agent({
|
|
279
278
|
id: "rag-agent",
|
|
280
279
|
name: "RAG Agent",
|
|
281
|
-
model: openai
|
|
280
|
+
model: "openai/gpt-5.1",
|
|
282
281
|
instructions: `
|
|
283
282
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
284
283
|
${PGVECTOR_PROMPT}
|
|
@@ -292,13 +291,12 @@ export const ragAgent = new Agent({
|
|
|
292
291
|
<TabItem value="pinecone" label="Pinecone">
|
|
293
292
|
|
|
294
293
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
295
|
-
import { openai } from "@ai-sdk/openai";
|
|
296
294
|
import { PINECONE_PROMPT } from "@mastra/pinecone";
|
|
297
295
|
|
|
298
296
|
export const ragAgent = new Agent({
|
|
299
297
|
id: "rag-agent",
|
|
300
298
|
name: "RAG Agent",
|
|
301
|
-
model: openai
|
|
299
|
+
model: "openai/gpt-5.1",
|
|
302
300
|
instructions: `
|
|
303
301
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
304
302
|
${PINECONE_PROMPT}
|
|
@@ -312,13 +310,12 @@ export const ragAgent = new Agent({
|
|
|
312
310
|
<TabItem value="qdrant" label="Qdrant">
|
|
313
311
|
|
|
314
312
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
315
|
-
import { openai } from "@ai-sdk/openai";
|
|
316
313
|
import { QDRANT_PROMPT } from "@mastra/qdrant";
|
|
317
314
|
|
|
318
315
|
export const ragAgent = new Agent({
|
|
319
316
|
id: "rag-agent",
|
|
320
317
|
name: "RAG Agent",
|
|
321
|
-
model: openai
|
|
318
|
+
model: "openai/gpt-5.1",
|
|
322
319
|
instructions: `
|
|
323
320
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
324
321
|
${QDRANT_PROMPT}
|
|
@@ -332,13 +329,12 @@ export const ragAgent = new Agent({
|
|
|
332
329
|
<TabItem value="chroma" label="Chroma">
|
|
333
330
|
|
|
334
331
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
335
|
-
import { openai } from "@ai-sdk/openai";
|
|
336
332
|
import { CHROMA_PROMPT } from "@mastra/chroma";
|
|
337
333
|
|
|
338
334
|
export const ragAgent = new Agent({
|
|
339
335
|
id: "rag-agent",
|
|
340
336
|
name: "RAG Agent",
|
|
341
|
-
model: openai
|
|
337
|
+
model: "openai/gpt-5.1",
|
|
342
338
|
instructions: `
|
|
343
339
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
344
340
|
${CHROMA_PROMPT}
|
|
@@ -352,13 +348,12 @@ export const ragAgent = new Agent({
|
|
|
352
348
|
<TabItem value="astra" label="Astra">
|
|
353
349
|
|
|
354
350
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
355
|
-
import { openai } from "@ai-sdk/openai";
|
|
356
351
|
import { ASTRA_PROMPT } from "@mastra/astra";
|
|
357
352
|
|
|
358
353
|
export const ragAgent = new Agent({
|
|
359
354
|
id: "rag-agent",
|
|
360
355
|
name: "RAG Agent",
|
|
361
|
-
model: openai
|
|
356
|
+
model: "openai/gpt-5.1",
|
|
362
357
|
instructions: `
|
|
363
358
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
364
359
|
${ASTRA_PROMPT}
|
|
@@ -372,13 +367,12 @@ export const ragAgent = new Agent({
|
|
|
372
367
|
<TabItem value="libsql" label="LibSQL">
|
|
373
368
|
|
|
374
369
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
375
|
-
import { openai } from "@ai-sdk/openai";
|
|
376
370
|
import { LIBSQL_PROMPT } from "@mastra/libsql";
|
|
377
371
|
|
|
378
372
|
export const ragAgent = new Agent({
|
|
379
373
|
id: "rag-agent",
|
|
380
374
|
name: "RAG Agent",
|
|
381
|
-
model: openai
|
|
375
|
+
model: "openai/gpt-5.1",
|
|
382
376
|
instructions: `
|
|
383
377
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
384
378
|
${LIBSQL_PROMPT}
|
|
@@ -392,13 +386,12 @@ export const ragAgent = new Agent({
|
|
|
392
386
|
<TabItem value="upstash" label="Upstash">
|
|
393
387
|
|
|
394
388
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
395
|
-
import { openai } from "@ai-sdk/openai";
|
|
396
389
|
import { UPSTASH_PROMPT } from "@mastra/upstash";
|
|
397
390
|
|
|
398
391
|
export const ragAgent = new Agent({
|
|
399
392
|
id: "rag-agent",
|
|
400
393
|
name: "RAG Agent",
|
|
401
|
-
model: openai
|
|
394
|
+
model: "openai/gpt-5.1",
|
|
402
395
|
instructions: `
|
|
403
396
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
404
397
|
${UPSTASH_PROMPT}
|
|
@@ -412,13 +405,12 @@ export const ragAgent = new Agent({
|
|
|
412
405
|
<TabItem value="vectorize" label="Vectorize">
|
|
413
406
|
|
|
414
407
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
415
|
-
import { openai } from "@ai-sdk/openai";
|
|
416
408
|
import { VECTORIZE_PROMPT } from "@mastra/vectorize";
|
|
417
409
|
|
|
418
410
|
export const ragAgent = new Agent({
|
|
419
411
|
id: "rag-agent",
|
|
420
412
|
name: "RAG Agent",
|
|
421
|
-
model: openai
|
|
413
|
+
model: "openai/gpt-5.1",
|
|
422
414
|
instructions: `
|
|
423
415
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
424
416
|
${VECTORIZE_PROMPT}
|
|
@@ -432,13 +424,12 @@ export const ragAgent = new Agent({
|
|
|
432
424
|
<TabItem value="mongodb" label="MongoDB">
|
|
433
425
|
|
|
434
426
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
435
|
-
import { openai } from "@ai-sdk/openai";
|
|
436
427
|
import { MONGODB_PROMPT } from "@mastra/mongodb";
|
|
437
428
|
|
|
438
429
|
export const ragAgent = new Agent({
|
|
439
430
|
id: "rag-agent",
|
|
440
431
|
name: "RAG Agent",
|
|
441
|
-
model: openai
|
|
432
|
+
model: "openai/gpt-5.1",
|
|
442
433
|
instructions: `
|
|
443
434
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
444
435
|
${MONGODB_PROMPT}
|
|
@@ -452,13 +443,12 @@ export const ragAgent = new Agent({
|
|
|
452
443
|
<TabItem value="opensearch" label="OpenSearch">
|
|
453
444
|
|
|
454
445
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
455
|
-
import { openai } from "@ai-sdk/openai";
|
|
456
446
|
import { OPENSEARCH_PROMPT } from "@mastra/opensearch";
|
|
457
447
|
|
|
458
448
|
export const ragAgent = new Agent({
|
|
459
449
|
id: "rag-agent",
|
|
460
450
|
name: "RAG Agent",
|
|
461
|
-
model: openai
|
|
451
|
+
model: "openai/gpt-5.1",
|
|
462
452
|
instructions: `
|
|
463
453
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
464
454
|
${OPENSEARCH_PROMPT}
|
|
@@ -472,13 +462,12 @@ export const ragAgent = new Agent({
|
|
|
472
462
|
<TabItem value="s3vectors" label="S3Vectors">
|
|
473
463
|
|
|
474
464
|
```ts title="vector-store.ts" showLineNumbers copy
|
|
475
|
-
import { openai } from "@ai-sdk/openai";
|
|
476
465
|
import { S3VECTORS_PROMPT } from "@mastra/s3vectors";
|
|
477
466
|
|
|
478
467
|
export const ragAgent = new Agent({
|
|
479
468
|
id: "rag-agent",
|
|
480
469
|
name: "RAG Agent",
|
|
481
|
-
model: openai
|
|
470
|
+
model: "openai/gpt-5.1",
|
|
482
471
|
instructions: `
|
|
483
472
|
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
484
473
|
${S3VECTORS_PROMPT}
|
|
@@ -502,7 +491,6 @@ Initial vector similarity search can sometimes miss nuanced relevance. Re-rankin
|
|
|
502
491
|
Here's how to use re-ranking:
|
|
503
492
|
|
|
504
493
|
```ts showLineNumbers copy
|
|
505
|
-
import { openai } from "@ai-sdk/openai";
|
|
506
494
|
import {
|
|
507
495
|
rerankWithScorer as rerank,
|
|
508
496
|
MastraAgentRelevanceScorer
|
|
@@ -516,7 +504,7 @@ const initialResults = await pgVector.query({
|
|
|
516
504
|
});
|
|
517
505
|
|
|
518
506
|
// Create a relevance scorer
|
|
519
|
-
const relevanceProvider = new MastraAgentRelevanceScorer('relevance-scorer', openai
|
|
507
|
+
const relevanceProvider = new MastraAgentRelevanceScorer('relevance-scorer', "openai/gpt-5.1");
|
|
520
508
|
|
|
521
509
|
// Re-rank the results
|
|
522
510
|
const rerankedResults = await rerank({
|
|
@@ -561,7 +549,7 @@ Example setup:
|
|
|
561
549
|
const graphQueryTool = createGraphQueryTool({
|
|
562
550
|
vectorStoreName: "pgVector",
|
|
563
551
|
indexName: "embeddings",
|
|
564
|
-
model:
|
|
552
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
565
553
|
graphOptions: {
|
|
566
554
|
threshold: 0.7,
|
|
567
555
|
},
|
|
@@ -12,7 +12,6 @@ The `Agent` class is the foundation for creating AI agents in Mastra. It provide
|
|
|
12
12
|
### Basic string instructions
|
|
13
13
|
|
|
14
14
|
```typescript title="src/mastra/agents/string-agent.ts" showLineNumbers copy
|
|
15
|
-
import { openai } from "@ai-sdk/openai";
|
|
16
15
|
import { Agent } from "@mastra/core/agent";
|
|
17
16
|
|
|
18
17
|
// String instructions
|
|
@@ -20,7 +19,7 @@ export const agent = new Agent({
|
|
|
20
19
|
id: "test-agent",
|
|
21
20
|
name: "Test Agent",
|
|
22
21
|
instructions: "You are a helpful assistant that provides concise answers.",
|
|
23
|
-
model: openai
|
|
22
|
+
model: "openai/gpt-5.1",
|
|
24
23
|
});
|
|
25
24
|
|
|
26
25
|
// System message object
|
|
@@ -31,7 +30,7 @@ export const agent2 = new Agent({
|
|
|
31
30
|
role: "system",
|
|
32
31
|
content: "You are an expert programmer",
|
|
33
32
|
},
|
|
34
|
-
model: openai
|
|
33
|
+
model: "openai/gpt-5.1",
|
|
35
34
|
});
|
|
36
35
|
|
|
37
36
|
// Array of system messages
|
|
@@ -42,7 +41,7 @@ export const agent3 = new Agent({
|
|
|
42
41
|
{ role: "system", content: "You are a helpful assistant" },
|
|
43
42
|
{ role: "system", content: "You have expertise in TypeScript" },
|
|
44
43
|
],
|
|
45
|
-
model: openai
|
|
44
|
+
model: "openai/gpt-5.1",
|
|
46
45
|
});
|
|
47
46
|
```
|
|
48
47
|
|
|
@@ -51,7 +50,6 @@ export const agent3 = new Agent({
|
|
|
51
50
|
Use CoreSystemMessage format to access additional properties like `providerOptions` for provider-specific configurations:
|
|
52
51
|
|
|
53
52
|
```typescript title="src/mastra/agents/core-message-agent.ts" showLineNumbers copy
|
|
54
|
-
import { openai } from "@ai-sdk/openai";
|
|
55
53
|
import { Agent } from "@mastra/core/agent";
|
|
56
54
|
|
|
57
55
|
export const agent = new Agent({
|
|
@@ -67,14 +65,13 @@ export const agent = new Agent({
|
|
|
67
65
|
},
|
|
68
66
|
},
|
|
69
67
|
},
|
|
70
|
-
model: openai
|
|
68
|
+
model: "openai/gpt-5.1",
|
|
71
69
|
});
|
|
72
70
|
```
|
|
73
71
|
|
|
74
72
|
### Multiple CoreSystemMessages
|
|
75
73
|
|
|
76
74
|
```typescript title="src/mastra/agents/multi-message-agent.ts" showLineNumbers copy
|
|
77
|
-
import { anthropic } from "@ai-sdk/anthropic";
|
|
78
75
|
import { Agent } from "@mastra/core/agent";
|
|
79
76
|
|
|
80
77
|
// This could be customizable based on the user
|
|
@@ -97,7 +94,7 @@ export const agent = new Agent({
|
|
|
97
94
|
},
|
|
98
95
|
},
|
|
99
96
|
],
|
|
100
|
-
model: anthropic
|
|
97
|
+
model: "anthropic/claude-sonnet-4-20250514",
|
|
101
98
|
});
|
|
102
99
|
```
|
|
103
100
|
|
|
@@ -129,8 +126,8 @@ export const agent = new Agent({
|
|
|
129
126
|
name: "instructions",
|
|
130
127
|
type: "SystemMessage | ({ requestContext: RequestContext }) => SystemMessage | Promise<SystemMessage>",
|
|
131
128
|
isOptional: false,
|
|
132
|
-
description: `Instructions that guide the agent's behavior. Can be a string, array of strings, system message object,
|
|
133
|
-
array of system messages, or a function that returns any of these types dynamically.
|
|
129
|
+
description: `Instructions that guide the agent's behavior. Can be a string, array of strings, system message object,
|
|
130
|
+
array of system messages, or a function that returns any of these types dynamically.
|
|
134
131
|
SystemMessage types: string | string[] | CoreSystemMessage | CoreSystemMessage[] | SystemModelMessage | SystemModelMessage[]`,
|
|
135
132
|
},
|
|
136
133
|
{
|
|
@@ -590,12 +590,12 @@ await agent.generateLegacy(
|
|
|
590
590
|
sentiment: z.enum(["positive", "negative", "neutral"]),
|
|
591
591
|
confidence: z.number(),
|
|
592
592
|
}),
|
|
593
|
-
model: openai
|
|
593
|
+
model: "openai/gpt-5.1",
|
|
594
594
|
errorStrategy: "warn",
|
|
595
595
|
},
|
|
596
596
|
// Output processors for response validation
|
|
597
597
|
outputProcessors: [
|
|
598
|
-
new ModerationProcessor({ model: openai
|
|
598
|
+
new ModerationProcessor({ model: "openai/gpt-4.1-nano" }),
|
|
599
599
|
new TokenLimiterProcessor({ maxTokens: 1000 }),
|
|
600
600
|
],
|
|
601
601
|
},
|
|
@@ -17,7 +17,6 @@ The `.network()` method enables multi-agent collaboration and routing. This meth
|
|
|
17
17
|
|
|
18
18
|
```typescript copy
|
|
19
19
|
import { Agent } from "@mastra/core/agent";
|
|
20
|
-
import { openai } from "@ai-sdk/openai";
|
|
21
20
|
import { agent1, agent2 } from "./agents";
|
|
22
21
|
import { workflow1 } from "./workflows";
|
|
23
22
|
import { tool1, tool2 } from "./tools";
|
|
@@ -27,7 +26,7 @@ const agent = new Agent({
|
|
|
27
26
|
name: "Network Agent",
|
|
28
27
|
instructions:
|
|
29
28
|
"You are a network agent that can help users with a variety of tasks.",
|
|
30
|
-
model: openai
|
|
29
|
+
model: "openai/gpt-5.1",
|
|
31
30
|
agents: {
|
|
32
31
|
agent1,
|
|
33
32
|
agent2,
|
|
@@ -42,7 +41,7 @@ const agent = new Agent({
|
|
|
42
41
|
});
|
|
43
42
|
|
|
44
43
|
await agent.network(`
|
|
45
|
-
Find me the weather in Tokyo.
|
|
44
|
+
Find me the weather in Tokyo.
|
|
46
45
|
Based on the weather, plan an activity for me.
|
|
47
46
|
`);
|
|
48
47
|
```
|
|
@@ -75,7 +75,8 @@ ANTHROPIC_BASE_URL=https://anthropic.internal \
|
|
|
75
75
|
mastra dev
|
|
76
76
|
```
|
|
77
77
|
|
|
78
|
-
These are forwarded
|
|
78
|
+
These are forwarded to the Mastra model router and will work with any `"openai/..."`
|
|
79
|
+
or `"anthropic/..."` model selections.
|
|
79
80
|
|
|
80
81
|
## `mastra build`
|
|
81
82
|
|
|
@@ -174,11 +174,11 @@ response.processDataStream({
|
|
|
174
174
|
|
|
175
175
|
#### AI SDK compatible format
|
|
176
176
|
|
|
177
|
-
To stream AI SDK-formatted parts on the client from an `agent.stream(...)` response, wrap `response.processDataStream` into a `ReadableStream<ChunkType>` and use `
|
|
177
|
+
To stream AI SDK-formatted parts on the client from an `agent.stream(...)` response, wrap `response.processDataStream` into a `ReadableStream<ChunkType>` and use `toAISdkStream`:
|
|
178
178
|
|
|
179
179
|
```typescript title="client-ai-sdk-transform.ts" copy
|
|
180
180
|
import { createUIMessageStream } from "ai";
|
|
181
|
-
import {
|
|
181
|
+
import { toAISdkStream } from "@mastra/ai-sdk";
|
|
182
182
|
import type { ChunkType, MastraModelOutput } from "@mastra/core/stream";
|
|
183
183
|
|
|
184
184
|
const response = await agent.stream({ messages: "Tell me a story" });
|
|
@@ -195,7 +195,7 @@ const chunkStream: ReadableStream<ChunkType> = new ReadableStream<ChunkType>({
|
|
|
195
195
|
|
|
196
196
|
const uiMessageStream = createUIMessageStream({
|
|
197
197
|
execute: async ({ writer }) => {
|
|
198
|
-
for await (const part of
|
|
198
|
+
for await (const part of toAISdkStream(
|
|
199
199
|
chunkStream as unknown as MastraModelOutput,
|
|
200
200
|
{ from: "agent" },
|
|
201
201
|
)) {
|
|
@@ -15,9 +15,8 @@ import { createOpenAICompatible } from '@ai-sdk/openai-compatible-v5';
|
|
|
15
15
|
import type { LanguageModelV2 } from '@ai-sdk/provider-v5';
|
|
16
16
|
|
|
17
17
|
class MyCustomGateway extends MastraModelGateway {
|
|
18
|
-
readonly id = '
|
|
18
|
+
readonly id = 'custom';
|
|
19
19
|
readonly name = 'My Custom Gateway';
|
|
20
|
-
readonly prefix = 'custom';
|
|
21
20
|
|
|
22
21
|
async fetchProviders(): Promise<Record<string, ProviderConfig>> {
|
|
23
22
|
return {
|
|
@@ -66,7 +65,7 @@ class MyCustomGateway extends MastraModelGateway {
|
|
|
66
65
|
{
|
|
67
66
|
name: 'id',
|
|
68
67
|
type: 'string',
|
|
69
|
-
description: 'Unique identifier for the gateway.
|
|
68
|
+
description: 'Unique identifier for the gateway. This ID is used as the prefix for all providers from this gateway (e.g., "netlify/anthropic"). Exception: models.dev is a provider registry and doesn\'t use a prefix.',
|
|
70
69
|
},
|
|
71
70
|
{
|
|
72
71
|
name: 'name',
|
|
@@ -76,18 +75,6 @@ class MyCustomGateway extends MastraModelGateway {
|
|
|
76
75
|
]}
|
|
77
76
|
/>
|
|
78
77
|
|
|
79
|
-
## Optional Properties
|
|
80
|
-
|
|
81
|
-
<PropertiesTable
|
|
82
|
-
content={[
|
|
83
|
-
{
|
|
84
|
-
name: 'prefix',
|
|
85
|
-
type: 'string | undefined',
|
|
86
|
-
description: 'Optional prefix for provider IDs. If set, all providers from this gateway will be prefixed (e.g., "netlify/openai"). Registry gateways typically don\'t have a prefix.',
|
|
87
|
-
},
|
|
88
|
-
]}
|
|
89
|
-
/>
|
|
90
|
-
|
|
91
78
|
## Required Methods
|
|
92
79
|
|
|
93
80
|
### fetchProviders()
|
|
@@ -217,15 +204,14 @@ Returns the gateway's unique identifier.
|
|
|
217
204
|
|
|
218
205
|
## Model ID Format
|
|
219
206
|
|
|
220
|
-
|
|
207
|
+
For true gateways, the gateway ID is used as a prefix and models are accessed using this format:
|
|
221
208
|
|
|
222
209
|
```
|
|
223
|
-
[
|
|
210
|
+
[gateway-id]/[provider]/[model]
|
|
224
211
|
```
|
|
225
212
|
|
|
226
213
|
Examples:
|
|
227
|
-
-
|
|
228
|
-
- Without prefix: `'my-provider/model-1'`
|
|
214
|
+
- Gateway with `id = 'custom'`: `'custom/my-provider/model-1'`
|
|
229
215
|
|
|
230
216
|
## Built-in Implementations
|
|
231
217
|
|
|
@@ -112,115 +112,45 @@ A relevancy score between 0 and 1:
|
|
|
112
112
|
- **0.1–0.3**: The response includes minimal relevant content and largely misses the intent of the query.
|
|
113
113
|
- **0.0**: The response is entirely unrelated and does not answer the query.
|
|
114
114
|
|
|
115
|
-
##
|
|
115
|
+
## Example
|
|
116
116
|
|
|
117
|
-
|
|
117
|
+
Evaluate agent responses for relevancy across different scenarios:
|
|
118
118
|
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
```typescript title="src/example-high-answer-relevancy.ts" showLineNumbers copy
|
|
119
|
+
```typescript title="src/example-answer-relevancy.ts" showLineNumbers copy
|
|
120
|
+
import { runEvals } from "@mastra/core/evals";
|
|
122
121
|
import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/prebuilt";
|
|
122
|
+
import { myAgent } from "./agent";
|
|
123
123
|
|
|
124
|
-
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o
|
|
124
|
+
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o" });
|
|
125
125
|
|
|
126
|
-
const
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
126
|
+
const result = await runEvals({
|
|
127
|
+
data: [
|
|
128
|
+
{
|
|
129
|
+
input: "What are the health benefits of regular exercise?",
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
input: "What should a healthy breakfast include?",
|
|
133
|
+
},
|
|
134
|
+
{
|
|
135
|
+
input: "What are the benefits of meditation?",
|
|
136
|
+
},
|
|
137
|
+
],
|
|
138
|
+
scorers: [scorer],
|
|
139
|
+
target: myAgent,
|
|
140
|
+
onItemComplete: ({ scorerResults }) => {
|
|
141
|
+
console.log({
|
|
142
|
+
score: scorerResults[scorer.id].score,
|
|
143
|
+
reason: scorerResults[scorer.id].reason,
|
|
144
|
+
});
|
|
130
145
|
},
|
|
131
|
-
];
|
|
132
|
-
const outputMessage = {
|
|
133
|
-
text: "Regular exercise improves cardiovascular health, strengthens muscles, boosts metabolism, and enhances mental well-being through the release of endorphins.",
|
|
134
|
-
};
|
|
135
|
-
|
|
136
|
-
const result = await scorer.run({
|
|
137
|
-
input: inputMessages,
|
|
138
|
-
output: outputMessage,
|
|
139
146
|
});
|
|
140
147
|
|
|
141
|
-
console.log(result);
|
|
148
|
+
console.log(result.scores);
|
|
142
149
|
```
|
|
143
150
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
The output receives a high score because it accurately answers the query without including unrelated information.
|
|
147
|
-
|
|
148
|
-
```typescript
|
|
149
|
-
{
|
|
150
|
-
score: 1,
|
|
151
|
-
reason: 'The score is 1 because the output directly addresses the question by providing multiple explicit health benefits of regular exercise, including improvements in cardiovascular health, muscle strength, metabolism, and mental well-being. Each point is relevant and contributes to a comprehensive understanding of the health benefits.'
|
|
152
|
-
}
|
|
153
|
-
```
|
|
151
|
+
For more details on `runEvals`, see the [runEvals reference](/reference/v1/evals/run-evals).
|
|
154
152
|
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
In this example, the response addresses the query in part but includes additional information that isn't directly relevant.
|
|
158
|
-
|
|
159
|
-
```typescript title="src/example-partial-answer-relevancy.ts" showLineNumbers copy
|
|
160
|
-
import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/prebuilt";
|
|
161
|
-
|
|
162
|
-
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o-mini" });
|
|
163
|
-
|
|
164
|
-
const inputMessages = [
|
|
165
|
-
{ role: "user", content: "What should a healthy breakfast include?" },
|
|
166
|
-
];
|
|
167
|
-
const outputMessage = {
|
|
168
|
-
text: "A nutritious breakfast should include whole grains and protein. However, the timing of your breakfast is just as important - studies show eating within 2 hours of waking optimizes metabolism and energy levels throughout the day.",
|
|
169
|
-
};
|
|
170
|
-
|
|
171
|
-
const result = await scorer.run({
|
|
172
|
-
input: inputMessages,
|
|
173
|
-
output: outputMessage,
|
|
174
|
-
});
|
|
175
|
-
|
|
176
|
-
console.log(result);
|
|
177
|
-
```
|
|
178
|
-
|
|
179
|
-
#### Partial relevancy output
|
|
180
|
-
|
|
181
|
-
The output receives a lower score because it partially answers the query. While some relevant information is included, unrelated details reduce the overall relevance.
|
|
182
|
-
|
|
183
|
-
```typescript
|
|
184
|
-
{
|
|
185
|
-
score: 0.25,
|
|
186
|
-
reason: 'The score is 0.25 because the output provides a direct answer by mentioning whole grains and protein as components of a healthy breakfast, which is relevant. However, the additional information about the timing of breakfast and its effects on metabolism and energy levels is not directly related to the question, leading to a lower overall relevance score.'
|
|
187
|
-
}
|
|
188
|
-
```
|
|
189
|
-
|
|
190
|
-
## Low relevancy example
|
|
191
|
-
|
|
192
|
-
In this example, the response does not address the query and contains information that is entirely unrelated.
|
|
193
|
-
|
|
194
|
-
```typescript title="src/example-low-answer-relevancy.ts" showLineNumbers copy
|
|
195
|
-
import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/prebuilt";
|
|
196
|
-
|
|
197
|
-
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o-mini" });
|
|
198
|
-
|
|
199
|
-
const inputMessages = [
|
|
200
|
-
{ role: "user", content: "What are the benefits of meditation?" },
|
|
201
|
-
];
|
|
202
|
-
const outputMessage = {
|
|
203
|
-
text: "The Great Wall of China is over 13,000 miles long and was built during the Ming Dynasty to protect against invasions.",
|
|
204
|
-
};
|
|
205
|
-
|
|
206
|
-
const result = await scorer.run({
|
|
207
|
-
input: inputMessages,
|
|
208
|
-
output: outputMessage,
|
|
209
|
-
});
|
|
210
|
-
|
|
211
|
-
console.log(result);
|
|
212
|
-
```
|
|
213
|
-
|
|
214
|
-
#### Low relevancy output
|
|
215
|
-
|
|
216
|
-
The output receives a score of 0 because it fails to answer the query or provide any relevant information.
|
|
217
|
-
|
|
218
|
-
```typescript
|
|
219
|
-
{
|
|
220
|
-
score: 0,
|
|
221
|
-
reason: 'The score is 0 because the output about the Great Wall of China is completely unrelated to the benefits of meditation, providing no relevant information or context that addresses the input question.'
|
|
222
|
-
}
|
|
223
|
-
```
|
|
153
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/v1/evals/overview) guide.
|
|
224
154
|
|
|
225
155
|
## Related
|
|
226
156
|
|