@mastra/mcp-docs-server 0.13.44 → 0.13.45-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +253 -53
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +307 -107
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +182 -1
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +104 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +49 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/raw/agents/adding-voice.mdx +49 -0
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +23 -1
- package/.docs/raw/reference/client-js/memory.mdx +43 -0
- package/.docs/raw/reference/core/mastra-class.mdx +8 -0
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +223 -0
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/scorers/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/scorers/bias.mdx +29 -87
- package/.docs/raw/reference/scorers/completeness.mdx +32 -91
- package/.docs/raw/reference/scorers/content-similarity.mdx +29 -99
- package/.docs/raw/reference/scorers/context-precision.mdx +28 -130
- package/.docs/raw/reference/scorers/faithfulness.mdx +28 -101
- package/.docs/raw/reference/scorers/hallucination.mdx +28 -103
- package/.docs/raw/reference/scorers/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/scorers/textual-difference.mdx +27 -100
- package/.docs/raw/reference/scorers/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/scorers/toxicity.mdx +29 -92
- package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
- package/.docs/raw/reference/storage/lance.mdx +33 -0
- package/.docs/raw/reference/storage/libsql.mdx +37 -0
- package/.docs/raw/reference/storage/mongodb.mdx +39 -0
- package/.docs/raw/reference/storage/mssql.mdx +37 -0
- package/.docs/raw/reference/storage/postgresql.mdx +37 -0
- package/.docs/raw/reference/streaming/agents/stream.mdx +7 -0
- package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
- package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
- package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
- package/.docs/raw/voice/overview.mdx +67 -0
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/CHANGELOG.md +10 -0
- package/dist/{chunk-TUAHUTTB.js → chunk-VE65X75W.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +5 -5
|
@@ -280,6 +280,55 @@ export const agent = new Agent({
|
|
|
280
280
|
});
|
|
281
281
|
```
|
|
282
282
|
|
|
283
|
+
### Using AI SDK
|
|
284
|
+
|
|
285
|
+
Mastra supports using AI SDK's transcription and speech models directly in `CompositeVoice`, giving you access to a wide range of providers through the AI SDK ecosystem:
|
|
286
|
+
|
|
287
|
+
```typescript
|
|
288
|
+
import { Agent } from "@mastra/core/agent";
|
|
289
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
290
|
+
import { openai } from "@ai-sdk/openai";
|
|
291
|
+
import { elevenlabs } from "@ai-sdk/elevenlabs";
|
|
292
|
+
import { groq } from "@ai-sdk/groq";
|
|
293
|
+
|
|
294
|
+
export const agent = new Agent({
|
|
295
|
+
id: "aisdk-voice-agent",
|
|
296
|
+
name: "AI SDK Voice Agent",
|
|
297
|
+
instructions: `You are a helpful assistant with voice capabilities.`,
|
|
298
|
+
model: openai("gpt-4o"),
|
|
299
|
+
|
|
300
|
+
// Pass AI SDK models directly to CompositeVoice
|
|
301
|
+
voice: new CompositeVoice({
|
|
302
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription model
|
|
303
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech model
|
|
304
|
+
}),
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
// Use voice capabilities as usual
|
|
308
|
+
const audioStream = await agent.voice.speak("Hello!");
|
|
309
|
+
const transcribedText = await agent.voice.listen(audioStream);
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
#### Mix and Match Providers
|
|
313
|
+
|
|
314
|
+
You can mix AI SDK models with Mastra voice providers:
|
|
315
|
+
|
|
316
|
+
```typescript
|
|
317
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
318
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
319
|
+
import { openai } from "@ai-sdk/openai";
|
|
320
|
+
|
|
321
|
+
// Use AI SDK for transcription and Mastra provider for speech
|
|
322
|
+
const voice = new CompositeVoice({
|
|
323
|
+
input: openai.transcription('whisper-1'), // AI SDK
|
|
324
|
+
output: new PlayAIVoice(), // Mastra provider
|
|
325
|
+
});
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
For the complete list of supported AI SDK providers and their capabilities:
|
|
329
|
+
* [Transcription](https://ai-sdk.dev/docs/providers/openai/transcription)
|
|
330
|
+
* [Speech](https://ai-sdk.dev/docs/providers/elevenlabs/speech)
|
|
331
|
+
|
|
283
332
|
## Supported Voice Providers
|
|
284
333
|
|
|
285
334
|
Mastra supports multiple voice providers for text-to-speech (TTS) and speech-to-text (STT) capabilities:
|
|
@@ -1,14 +1,14 @@
|
|
|
1
|
-
# Running
|
|
1
|
+
# Running Mastra Studio
|
|
2
2
|
|
|
3
|
-
To test your agent as you build it, you'll need to run the Mastra
|
|
3
|
+
To test your agent as you build it, you'll need to run the Mastra Studio:
|
|
4
4
|
|
|
5
5
|
```bash
|
|
6
6
|
npm run dev
|
|
7
7
|
```
|
|
8
8
|
|
|
9
|
-
This will start the
|
|
9
|
+
This will start the studio at `http://localhost:4111`, where you can interact with your agent and test its capabilities.
|
|
10
10
|
|
|
11
|
-
The
|
|
11
|
+
The studio provides a user-friendly interface for testing your agent, allowing you to:
|
|
12
12
|
|
|
13
13
|
- Send messages to your agent
|
|
14
14
|
- See the agent's responses
|
|
@@ -16,4 +16,4 @@ The playground provides a user-friendly interface for testing your agent, allowi
|
|
|
16
16
|
- Test tools directly
|
|
17
17
|
- Debug any issues that arise
|
|
18
18
|
|
|
19
|
-
In the next step, we'll create our first agent with a simple system prompt and test it in the
|
|
19
|
+
In the next step, we'll create our first agent with a simple system prompt and test it in the studio.
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
# Testing Your Agent
|
|
2
2
|
|
|
3
|
-
Now let's test our agent in
|
|
3
|
+
Now let's test our agent in Mastra Studio:
|
|
4
4
|
|
|
5
5
|
1. Make sure your development server is running with `npm run dev`
|
|
6
|
-
2. Open the
|
|
6
|
+
2. Open the studio at http://localhost:4111/
|
|
7
7
|
3. You should see your "Financial Assistant Agent" in the list of agents
|
|
8
8
|
4. Try sending a message like "Hello, can you help me analyze my spending?"
|
|
9
9
|
|
|
10
10
|
At this point, your agent can respond to basic questions but doesn't have access to any transaction data. In the next step, we'll create a custom tool to fetch transaction data from a Google Sheet.
|
|
11
11
|
|
|
12
|
-
Testing your agent in the
|
|
12
|
+
Testing your agent in the studio is an important step in the development process. It allows you to see how your agent responds to different inputs and identify any issues that need to be addressed before deploying it to production.
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
# Testing Your Tool
|
|
2
2
|
|
|
3
|
-
Let's test our tool and agent in
|
|
3
|
+
Let's test our tool and agent in Mastra Studio:
|
|
4
4
|
|
|
5
5
|
1. Make sure your development server is running with `npm run dev`
|
|
6
|
-
2. Open the
|
|
6
|
+
2. Open the studio at http://localhost:4111/
|
|
7
7
|
3. You can test the tool directly in the Tools tab to make sure it's working
|
|
8
8
|
4. Then, try asking your agent questions like:
|
|
9
9
|
- "Can you show me my recent transactions?"
|
|
@@ -12,4 +12,4 @@ Let's test our tool and agent in the playground:
|
|
|
12
12
|
|
|
13
13
|
Your agent should now be able to fetch the transaction data and answer questions about it. However, it doesn't yet have memory, so it won't remember previous conversations. We'll add that in the next step.
|
|
14
14
|
|
|
15
|
-
Testing your tool directly in the
|
|
15
|
+
Testing your tool directly in the studio is a great way to verify that it's working correctly before integrating it with your agent. This helps you identify and fix any issues with the tool itself before troubleshooting potential issues with the agent's use of the tool.
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
# Testing Your Agent with Memory
|
|
2
2
|
|
|
3
|
-
Let's test our agent's memory capabilities in
|
|
3
|
+
Let's test our agent's memory capabilities in Mastra Studio:
|
|
4
4
|
|
|
5
5
|
1. Make sure your development server is running with `npm run dev`
|
|
6
|
-
2. Open the
|
|
6
|
+
2. Open the studio at http://localhost:4111/
|
|
7
7
|
3. Start a conversation with your agent by asking about transactions
|
|
8
8
|
4. Then, ask a follow-up question that references the previous conversation, like:
|
|
9
9
|
- "What was that largest transaction again?"
|
|
@@ -118,7 +118,7 @@ export const mastra = new Mastra({
|
|
|
118
118
|
apiRoutes: [
|
|
119
119
|
workflowRoute({
|
|
120
120
|
path: "/workflow",
|
|
121
|
-
|
|
121
|
+
workflow: "weatherWorkflow",
|
|
122
122
|
}),
|
|
123
123
|
],
|
|
124
124
|
},
|
|
@@ -148,6 +148,28 @@ const { error, status, sendMessage, messages, regenerate, stop } = useChat({
|
|
|
148
148
|
});
|
|
149
149
|
```
|
|
150
150
|
|
|
151
|
+
To enable agent text chunks and tool calls streaming when a workflow step pipes an agent's stream to the workflow writer (see [Workflow Streaming](/docs/streaming/workflow-streaming#workflow-using-an-agent)).
|
|
152
|
+
|
|
153
|
+
Set the `includeTextStreamParts` `workflowRoute` option to `true`
|
|
154
|
+
```typescript title="src/mastra/index.ts" copy
|
|
155
|
+
import { Mastra } from "@mastra/core/mastra";
|
|
156
|
+
import { workflowRoute } from "@mastra/ai-sdk";
|
|
157
|
+
|
|
158
|
+
export const mastra = new Mastra({
|
|
159
|
+
server: {
|
|
160
|
+
apiRoutes: [
|
|
161
|
+
workflowRoute({
|
|
162
|
+
path: "/workflow",
|
|
163
|
+
workflow: "weatherWorkflow",
|
|
164
|
+
//This provides a seamless streaming experience even when agents are running inside workflow steps.
|
|
165
|
+
includeTextStreamParts: true
|
|
166
|
+
}),
|
|
167
|
+
],
|
|
168
|
+
},
|
|
169
|
+
});
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
|
|
151
173
|
### `networkRoute()`
|
|
152
174
|
|
|
153
175
|
Use the `networkRoute()` utility to create a route handler that automatically formats the agent network stream into an AI SDK-compatible format.
|
|
@@ -125,6 +125,49 @@ const result = await thread.deleteMessages([
|
|
|
125
125
|
// Returns: { success: true, message: "3 messages deleted successfully" }
|
|
126
126
|
```
|
|
127
127
|
|
|
128
|
+
## Working Memory
|
|
129
|
+
|
|
130
|
+
Working memory allows agents to maintain persistent information about users across interactions. It can be scoped to either a specific thread or across all threads for a resource (user).
|
|
131
|
+
|
|
132
|
+
### Get Working Memory
|
|
133
|
+
|
|
134
|
+
Retrieve the current working memory for a thread:
|
|
135
|
+
|
|
136
|
+
```typescript
|
|
137
|
+
const workingMemory = await mastraClient.getWorkingMemory({
|
|
138
|
+
agentId: "agent-1",
|
|
139
|
+
threadId: "thread-1",
|
|
140
|
+
resourceId: "user-123", // Optional, required for resource-scoped memory
|
|
141
|
+
});
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
The response includes:
|
|
145
|
+
- `workingMemory`: The current working memory content (string or null)
|
|
146
|
+
- `source`: Whether the memory is from `"thread"` or `"resource"` scope
|
|
147
|
+
- `workingMemoryTemplate`: The template used for working memory (if configured)
|
|
148
|
+
- `threadExists`: Whether the thread exists
|
|
149
|
+
|
|
150
|
+
### Update Working Memory
|
|
151
|
+
|
|
152
|
+
Update the working memory content for a thread:
|
|
153
|
+
|
|
154
|
+
```typescript
|
|
155
|
+
await mastraClient.updateWorkingMemory({
|
|
156
|
+
agentId: "agent-1",
|
|
157
|
+
threadId: "thread-1",
|
|
158
|
+
workingMemory: `# User Profile
|
|
159
|
+
- Name: John Doe
|
|
160
|
+
- Location: New York
|
|
161
|
+
- Preferences: Prefers formal communication
|
|
162
|
+
`,
|
|
163
|
+
resourceId: "user-123", // Optional, required for resource-scoped memory
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
// Returns: { success: true }
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
**Note:** For resource-scoped working memory, you must provide the `resourceId` parameter. This allows the memory to persist across all conversation threads for that user.
|
|
170
|
+
|
|
128
171
|
### Get Memory Status
|
|
129
172
|
|
|
130
173
|
Check the status of the memory system:
|
|
@@ -139,5 +139,13 @@ export const mastra = new Mastra({
|
|
|
139
139
|
isOptional: true,
|
|
140
140
|
defaultValue: "{}",
|
|
141
141
|
},
|
|
142
|
+
{
|
|
143
|
+
name: "gateways",
|
|
144
|
+
type: "Record<string, MastraModelGateway>",
|
|
145
|
+
description:
|
|
146
|
+
"Custom model gateways to register for accessing AI models through alternative providers or private deployments. Structured as a key-value pair, with keys being the registry key (used for getGateway()) and values being gateway instances.",
|
|
147
|
+
isOptional: true,
|
|
148
|
+
defaultValue: "{}",
|
|
149
|
+
},
|
|
142
150
|
]}
|
|
143
151
|
/>
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Reference: MastraModelGateway | Core"
|
|
3
|
+
description: "Base class for creating custom model gateways"
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# MastraModelGateway
|
|
7
|
+
|
|
8
|
+
Abstract base class for implementing custom model gateways. Gateways handle provider-specific logic for accessing language models, including provider configuration, authentication, URL construction, and model instantiation.
|
|
9
|
+
|
|
10
|
+
## Class Overview
|
|
11
|
+
|
|
12
|
+
```typescript
|
|
13
|
+
import { MastraModelGateway, type ProviderConfig } from '@mastra/core/llm';
|
|
14
|
+
import { createOpenAICompatible } from '@ai-sdk/openai-compatible-v5';
|
|
15
|
+
import type { LanguageModelV2 } from '@ai-sdk/provider-v5';
|
|
16
|
+
|
|
17
|
+
class MyCustomGateway extends MastraModelGateway {
|
|
18
|
+
readonly id = 'custom';
|
|
19
|
+
readonly name = 'My Custom Gateway';
|
|
20
|
+
|
|
21
|
+
async fetchProviders(): Promise<Record<string, ProviderConfig>> {
|
|
22
|
+
return {
|
|
23
|
+
'my-provider': {
|
|
24
|
+
name: 'My Provider',
|
|
25
|
+
models: ['model-1', 'model-2'],
|
|
26
|
+
apiKeyEnvVar: 'MY_API_KEY',
|
|
27
|
+
gateway: this.id,
|
|
28
|
+
},
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
buildUrl(modelId: string, envVars?: Record<string, string>): string {
|
|
33
|
+
return 'https://api.my-provider.com/v1';
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
async getApiKey(modelId: string): Promise<string> {
|
|
37
|
+
const apiKey = process.env.MY_API_KEY;
|
|
38
|
+
if (!apiKey) throw new Error('MY_API_KEY not set');
|
|
39
|
+
return apiKey;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
async resolveLanguageModel({
|
|
43
|
+
modelId,
|
|
44
|
+
providerId,
|
|
45
|
+
apiKey,
|
|
46
|
+
}: {
|
|
47
|
+
modelId: string;
|
|
48
|
+
providerId: string;
|
|
49
|
+
apiKey: string;
|
|
50
|
+
}): Promise<LanguageModelV2> {
|
|
51
|
+
const baseURL = this.buildUrl(`${providerId}/${modelId}`);
|
|
52
|
+
return createOpenAICompatible({
|
|
53
|
+
name: providerId,
|
|
54
|
+
apiKey,
|
|
55
|
+
baseURL,
|
|
56
|
+
}).chatModel(modelId);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Required Properties
|
|
62
|
+
|
|
63
|
+
<PropertiesTable
|
|
64
|
+
content={[
|
|
65
|
+
{
|
|
66
|
+
name: 'id',
|
|
67
|
+
type: 'string',
|
|
68
|
+
description: 'Unique identifier for the gateway. This ID is used as the prefix for all providers from this gateway (e.g., "netlify/anthropic"). Exception: models.dev is a provider registry and doesn\'t use a prefix.',
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
name: 'name',
|
|
72
|
+
type: 'string',
|
|
73
|
+
description: 'Human-readable name for the gateway.',
|
|
74
|
+
},
|
|
75
|
+
]}
|
|
76
|
+
/>
|
|
77
|
+
|
|
78
|
+
## Required Methods
|
|
79
|
+
|
|
80
|
+
### fetchProviders()
|
|
81
|
+
|
|
82
|
+
Fetches provider configurations from the gateway.
|
|
83
|
+
|
|
84
|
+
**Returns:** `Promise<Record<string, ProviderConfig>>`
|
|
85
|
+
|
|
86
|
+
**ProviderConfig Structure:**
|
|
87
|
+
<PropertiesTable
|
|
88
|
+
content={[
|
|
89
|
+
{
|
|
90
|
+
name: 'name',
|
|
91
|
+
type: 'string',
|
|
92
|
+
description: 'Display name of the provider',
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
name: 'models',
|
|
96
|
+
type: 'string[]',
|
|
97
|
+
description: 'Array of available model IDs',
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
name: 'apiKeyEnvVar',
|
|
101
|
+
type: 'string | string[]',
|
|
102
|
+
description: 'Environment variable(s) for API key',
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
name: 'gateway',
|
|
106
|
+
type: 'string',
|
|
107
|
+
description: 'Gateway identifier',
|
|
108
|
+
},
|
|
109
|
+
{
|
|
110
|
+
name: 'url',
|
|
111
|
+
type: 'string',
|
|
112
|
+
isOptional: true,
|
|
113
|
+
description: 'Optional base API URL',
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
name: 'apiKeyHeader',
|
|
117
|
+
type: 'string',
|
|
118
|
+
isOptional: true,
|
|
119
|
+
description: 'Optional custom auth header name',
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
name: 'docUrl',
|
|
123
|
+
type: 'string',
|
|
124
|
+
isOptional: true,
|
|
125
|
+
description: 'Optional documentation URL',
|
|
126
|
+
},
|
|
127
|
+
]}
|
|
128
|
+
/>
|
|
129
|
+
|
|
130
|
+
### buildUrl()
|
|
131
|
+
|
|
132
|
+
Builds the API URL for a specific model/provider combination.
|
|
133
|
+
|
|
134
|
+
**Parameters:**
|
|
135
|
+
<PropertiesTable
|
|
136
|
+
content={[
|
|
137
|
+
{
|
|
138
|
+
name: 'modelId',
|
|
139
|
+
type: 'string',
|
|
140
|
+
description: 'Full model ID (e.g., "custom/my-provider/model-1")',
|
|
141
|
+
},
|
|
142
|
+
{
|
|
143
|
+
name: 'envVars',
|
|
144
|
+
type: 'Record<string, string>',
|
|
145
|
+
isOptional: true,
|
|
146
|
+
description: 'Optional environment variables',
|
|
147
|
+
},
|
|
148
|
+
]}
|
|
149
|
+
/>
|
|
150
|
+
|
|
151
|
+
**Returns:** `string | undefined | Promise<string | undefined>`
|
|
152
|
+
|
|
153
|
+
### getApiKey()
|
|
154
|
+
|
|
155
|
+
Retrieves the API key for authentication.
|
|
156
|
+
|
|
157
|
+
**Parameters:**
|
|
158
|
+
<PropertiesTable
|
|
159
|
+
content={[
|
|
160
|
+
{
|
|
161
|
+
name: 'modelId',
|
|
162
|
+
type: 'string',
|
|
163
|
+
description: 'Full model ID',
|
|
164
|
+
},
|
|
165
|
+
]}
|
|
166
|
+
/>
|
|
167
|
+
|
|
168
|
+
**Returns:** `Promise<string>`
|
|
169
|
+
|
|
170
|
+
### resolveLanguageModel()
|
|
171
|
+
|
|
172
|
+
Creates a language model instance.
|
|
173
|
+
|
|
174
|
+
**Parameters:**
|
|
175
|
+
<PropertiesTable
|
|
176
|
+
content={[
|
|
177
|
+
{
|
|
178
|
+
name: 'modelId',
|
|
179
|
+
type: 'string',
|
|
180
|
+
description: 'The model ID',
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
name: 'providerId',
|
|
184
|
+
type: 'string',
|
|
185
|
+
description: 'The provider ID',
|
|
186
|
+
},
|
|
187
|
+
{
|
|
188
|
+
name: 'apiKey',
|
|
189
|
+
type: 'string',
|
|
190
|
+
description: 'The API key for authentication',
|
|
191
|
+
},
|
|
192
|
+
]}
|
|
193
|
+
/>
|
|
194
|
+
|
|
195
|
+
**Returns:** `Promise<LanguageModelV2> | LanguageModelV2`
|
|
196
|
+
|
|
197
|
+
## Instance Methods
|
|
198
|
+
|
|
199
|
+
### getId()
|
|
200
|
+
|
|
201
|
+
Returns the gateway's unique identifier.
|
|
202
|
+
|
|
203
|
+
**Returns:** `string` - The gateway's `id` property
|
|
204
|
+
|
|
205
|
+
## Model ID Format
|
|
206
|
+
|
|
207
|
+
For true gateways, the gateway ID is used as a prefix and models are accessed using this format:
|
|
208
|
+
|
|
209
|
+
```
|
|
210
|
+
[gateway-id]/[provider]/[model]
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
Examples:
|
|
214
|
+
- Gateway with `id = 'custom'`: `'custom/my-provider/model-1'`
|
|
215
|
+
|
|
216
|
+
## Built-in Implementations
|
|
217
|
+
|
|
218
|
+
- **NetlifyGateway** - Netlify AI Gateway integration
|
|
219
|
+
- **ModelsDevGateway** - Registry of OpenAI-compatible providers
|
|
220
|
+
|
|
221
|
+
## Related
|
|
222
|
+
|
|
223
|
+
- [Custom Gateways Guide](/models/gateways/custom-gateways) - Complete guide to creating custom gateways
|
|
@@ -112,115 +112,45 @@ A relevancy score between 0 and 1:
|
|
|
112
112
|
- **0.1–0.3**: The response includes minimal relevant content and largely misses the intent of the query.
|
|
113
113
|
- **0.0**: The response is entirely unrelated and does not answer the query.
|
|
114
114
|
|
|
115
|
-
##
|
|
115
|
+
## Example
|
|
116
116
|
|
|
117
|
-
|
|
117
|
+
Evaluate agent responses for relevancy across different scenarios:
|
|
118
118
|
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
```typescript title="src/example-high-answer-relevancy.ts" showLineNumbers copy
|
|
119
|
+
```typescript title="src/example-answer-relevancy.ts" showLineNumbers copy
|
|
120
|
+
import { runExperiment } from "@mastra/core/scores";
|
|
122
121
|
import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
|
|
122
|
+
import { myAgent } from "./agent";
|
|
123
123
|
|
|
124
|
-
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o
|
|
124
|
+
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o" });
|
|
125
125
|
|
|
126
|
-
const
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
126
|
+
const result = await runExperiment({
|
|
127
|
+
data: [
|
|
128
|
+
{
|
|
129
|
+
input: "What are the health benefits of regular exercise?",
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
input: "What should a healthy breakfast include?",
|
|
133
|
+
},
|
|
134
|
+
{
|
|
135
|
+
input: "What are the benefits of meditation?",
|
|
136
|
+
},
|
|
137
|
+
],
|
|
138
|
+
scorers: [scorer],
|
|
139
|
+
target: myAgent,
|
|
140
|
+
onItemComplete: ({ scorerResults }) => {
|
|
141
|
+
console.log({
|
|
142
|
+
score: scorerResults[scorer.name].score,
|
|
143
|
+
reason: scorerResults[scorer.name].reason,
|
|
144
|
+
});
|
|
130
145
|
},
|
|
131
|
-
];
|
|
132
|
-
const outputMessage = {
|
|
133
|
-
text: "Regular exercise improves cardiovascular health, strengthens muscles, boosts metabolism, and enhances mental well-being through the release of endorphins.",
|
|
134
|
-
};
|
|
135
|
-
|
|
136
|
-
const result = await scorer.run({
|
|
137
|
-
input: inputMessages,
|
|
138
|
-
output: outputMessage,
|
|
139
146
|
});
|
|
140
147
|
|
|
141
|
-
console.log(result);
|
|
148
|
+
console.log(result.scores);
|
|
142
149
|
```
|
|
143
150
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
The output receives a high score because it accurately answers the query without including unrelated information.
|
|
147
|
-
|
|
148
|
-
```typescript
|
|
149
|
-
{
|
|
150
|
-
score: 1,
|
|
151
|
-
reason: 'The score is 1 because the output directly addresses the question by providing multiple explicit health benefits of regular exercise, including improvements in cardiovascular health, muscle strength, metabolism, and mental well-being. Each point is relevant and contributes to a comprehensive understanding of the health benefits.'
|
|
152
|
-
}
|
|
153
|
-
```
|
|
151
|
+
For more details on `runExperiment`, see the [runExperiment reference](/reference/scorers/run-experiment).
|
|
154
152
|
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
In this example, the response addresses the query in part but includes additional information that isn’t directly relevant.
|
|
158
|
-
|
|
159
|
-
```typescript title="src/example-partial-answer-relevancy.ts" showLineNumbers copy
|
|
160
|
-
import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
|
|
161
|
-
|
|
162
|
-
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o-mini" });
|
|
163
|
-
|
|
164
|
-
const inputMessages = [
|
|
165
|
-
{ role: "user", content: "What should a healthy breakfast include?" },
|
|
166
|
-
];
|
|
167
|
-
const outputMessage = {
|
|
168
|
-
text: "A nutritious breakfast should include whole grains and protein. However, the timing of your breakfast is just as important - studies show eating within 2 hours of waking optimizes metabolism and energy levels throughout the day.",
|
|
169
|
-
};
|
|
170
|
-
|
|
171
|
-
const result = await scorer.run({
|
|
172
|
-
input: inputMessages,
|
|
173
|
-
output: outputMessage,
|
|
174
|
-
});
|
|
175
|
-
|
|
176
|
-
console.log(result);
|
|
177
|
-
```
|
|
178
|
-
|
|
179
|
-
#### Partial relevancy output
|
|
180
|
-
|
|
181
|
-
The output receives a lower score because it partially answers the query. While some relevant information is included, unrelated details reduce the overall relevance.
|
|
182
|
-
|
|
183
|
-
```typescript
|
|
184
|
-
{
|
|
185
|
-
score: 0.25,
|
|
186
|
-
reason: 'The score is 0.25 because the output provides a direct answer by mentioning whole grains and protein as components of a healthy breakfast, which is relevant. However, the additional information about the timing of breakfast and its effects on metabolism and energy levels is not directly related to the question, leading to a lower overall relevance score.'
|
|
187
|
-
}
|
|
188
|
-
```
|
|
189
|
-
|
|
190
|
-
## Low relevancy example
|
|
191
|
-
|
|
192
|
-
In this example, the response does not address the query and contains information that is entirely unrelated.
|
|
193
|
-
|
|
194
|
-
```typescript title="src/example-low-answer-relevancy.ts" showLineNumbers copy
|
|
195
|
-
import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
|
|
196
|
-
|
|
197
|
-
const scorer = createAnswerRelevancyScorer({ model: "openai/gpt-4o-mini" });
|
|
198
|
-
|
|
199
|
-
const inputMessages = [
|
|
200
|
-
{ role: "user", content: "What are the benefits of meditation?" },
|
|
201
|
-
];
|
|
202
|
-
const outputMessage = {
|
|
203
|
-
text: "The Great Wall of China is over 13,000 miles long and was built during the Ming Dynasty to protect against invasions.",
|
|
204
|
-
};
|
|
205
|
-
|
|
206
|
-
const result = await scorer.run({
|
|
207
|
-
input: inputMessages,
|
|
208
|
-
output: outputMessage,
|
|
209
|
-
});
|
|
210
|
-
|
|
211
|
-
console.log(result);
|
|
212
|
-
```
|
|
213
|
-
|
|
214
|
-
#### Low relevancy output
|
|
215
|
-
|
|
216
|
-
The output receives a score of 0 because it fails to answer the query or provide any relevant information.
|
|
217
|
-
|
|
218
|
-
```typescript
|
|
219
|
-
{
|
|
220
|
-
score: 0,
|
|
221
|
-
reason: 'The score is 0 because the output about the Great Wall of China is completely unrelated to the benefits of meditation, providing no relevant information or context that addresses the input question.'
|
|
222
|
-
}
|
|
223
|
-
```
|
|
153
|
+
To add this scorer to an agent, see the [Scorers overview](/docs/scorers/overview) guide.
|
|
224
154
|
|
|
225
155
|
## Related
|
|
226
156
|
|