@mastra/mcp-docs-server 0.13.31 → 0.13.32-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +14 -14
  2. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +8 -8
  3. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +14 -14
  4. package/.docs/organized/changelogs/%40mastra%2Fcore.md +45 -45
  5. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +9 -9
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +9 -9
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +9 -9
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +9 -9
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +11 -11
  10. package/.docs/organized/changelogs/%40mastra%2Fevals.md +10 -10
  11. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +14 -14
  12. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +8 -8
  13. package/.docs/organized/changelogs/%40mastra%2Fpg.md +12 -12
  14. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +14 -14
  15. package/.docs/organized/changelogs/%40mastra%2Freact.md +7 -0
  16. package/.docs/organized/changelogs/%40mastra%2Fserver.md +10 -10
  17. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +11 -11
  18. package/.docs/organized/changelogs/create-mastra.md +3 -3
  19. package/.docs/organized/changelogs/mastra.md +11 -11
  20. package/.docs/organized/code-examples/agui.md +2 -2
  21. package/.docs/organized/code-examples/ai-elements.md +2 -2
  22. package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -2
  23. package/.docs/organized/code-examples/ai-sdk-v5.md +2 -2
  24. package/.docs/organized/code-examples/assistant-ui.md +2 -2
  25. package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +2 -2
  26. package/.docs/organized/code-examples/bird-checker-with-nextjs.md +2 -2
  27. package/.docs/organized/code-examples/client-side-tools.md +2 -2
  28. package/.docs/organized/code-examples/crypto-chatbot.md +2 -2
  29. package/.docs/organized/code-examples/heads-up-game.md +2 -2
  30. package/.docs/organized/code-examples/openapi-spec-writer.md +2 -2
  31. package/.docs/raw/agents/agent-memory.mdx +48 -31
  32. package/.docs/raw/agents/guardrails.mdx +8 -1
  33. package/.docs/raw/agents/networks.mdx +197 -128
  34. package/.docs/raw/agents/overview.mdx +10 -9
  35. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +92 -1
  36. package/.docs/raw/getting-started/installation.mdx +61 -68
  37. package/.docs/raw/memory/conversation-history.mdx +2 -2
  38. package/.docs/raw/memory/semantic-recall.mdx +36 -10
  39. package/.docs/raw/rag/chunking-and-embedding.mdx +19 -7
  40. package/.docs/raw/reference/client-js/agents.mdx +44 -25
  41. package/.docs/raw/reference/scorers/answer-relevancy.mdx +3 -6
  42. package/.docs/raw/reference/scorers/answer-similarity.mdx +7 -13
  43. package/.docs/raw/reference/scorers/bias.mdx +3 -6
  44. package/.docs/raw/reference/scorers/completeness.mdx +3 -6
  45. package/.docs/raw/reference/scorers/context-precision.mdx +6 -9
  46. package/.docs/raw/reference/scorers/context-relevance.mdx +12 -18
  47. package/.docs/raw/reference/scorers/faithfulness.mdx +3 -6
  48. package/.docs/raw/reference/scorers/hallucination.mdx +3 -6
  49. package/.docs/raw/reference/scorers/noise-sensitivity.mdx +13 -23
  50. package/.docs/raw/reference/scorers/prompt-alignment.mdx +16 -20
  51. package/.docs/raw/reference/scorers/tool-call-accuracy.mdx +4 -5
  52. package/.docs/raw/reference/scorers/toxicity.mdx +3 -6
  53. package/.docs/raw/reference/workflows/step.mdx +1 -1
  54. package/.docs/raw/reference/workflows/workflow-methods/sendEvent.mdx +23 -2
  55. package/.docs/raw/reference/workflows/workflow-methods/sleep.mdx +22 -4
  56. package/.docs/raw/reference/workflows/workflow-methods/sleepUntil.mdx +14 -4
  57. package/.docs/raw/reference/workflows/workflow-methods/waitForEvent.mdx +18 -1
  58. package/.docs/raw/server-db/runtime-context.mdx +13 -3
  59. package/.docs/raw/streaming/tool-streaming.mdx +30 -0
  60. package/.docs/raw/tools-mcp/overview.mdx +1 -1
  61. package/.docs/raw/workflows/overview.mdx +1 -1
  62. package/.docs/raw/workflows/suspend-and-resume.mdx +34 -23
  63. package/CHANGELOG.md +7 -0
  64. package/package.json +4 -4
  65. package/.docs/raw/workflows/pausing-execution.mdx +0 -142
@@ -15,8 +15,8 @@ For more control over setup, or to add Mastra to an existing project, see the [m
15
15
 
16
16
  ## Before you start
17
17
 
18
- - You'll need an API key from a [model provider](/models) to complete setup. We recommend starting with [Gemini](https://aistudio.google.com/app/api-keys), as you likely already have a Google account and they don't require a card.
19
- - Node.js 20 or later.
18
+ - You'll need an API key from a [model provider](/models) to complete setup. We suggest starting with [OpenAI](https://platform.openai.com/api-keys), but if you need a provider that doesn't require a credit card, Google's [Gemini](https://aistudio.google.com/app/api-keys) is also an option.
19
+ - [Install](https://nodejs.org/en/download) Node.js 20 or later.
20
20
 
21
21
  ## Install with `create mastra`
22
22
 
@@ -89,15 +89,15 @@ If you prefer not to use our automatic `create mastra` CLI tool, you can set up
89
89
 
90
90
  <Steps>
91
91
 
92
- ### Create a new project
92
+ ### Create project
93
93
 
94
94
  Create a new project and change directory:
95
95
 
96
96
  ```bash copy
97
- mkdir hello-mastra && cd hello-mastra
97
+ mkdir my-first-agent && cd my-first-agent
98
98
  ```
99
99
 
100
- Initialize a TypeScript project including the `@mastra/core` package:
100
+ Initialize a TypeScript project and install the following dependencies:
101
101
 
102
102
  {/*
103
103
  LLM CONTEXT: This Tabs component shows manual installation commands for different package managers.
@@ -107,55 +107,42 @@ This helps users manually set up a Mastra project with their preferred package m
107
107
  */}
108
108
 
109
109
  <Tabs items={["npm", "pnpm", "yarn", "bun"]}>
110
-
111
110
  <Tab>
112
111
  ```bash copy
113
112
  npm init -y
114
-
115
- npm install typescript tsx @types/node mastra@latest --save-dev
116
-
117
- npm install @mastra/core@latest zod@^3 @ai-sdk/openai@^1
113
+ npm install -D typescript @types/node mastra@latest
114
+ npm install @mastra/core@latest zod@^4
118
115
  ```
119
-
120
116
  </Tab>
121
117
  <Tab>
122
118
  ```bash copy
123
- pnpm init
124
-
125
- pnpm add typescript tsx @types/node mastra@latest --save-dev
126
-
127
- pnpm add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
119
+ pnpm init -y
120
+ pnpm add -D typescript @types/node mastra@latest
121
+ pnpm add @mastra/core@latest zod@^4
128
122
  ```
129
-
130
123
  </Tab>
131
124
  <Tab>
132
125
  ```bash copy
133
126
  yarn init -y
134
-
135
- yarn add typescript tsx @types/node mastra@latest --dev
136
-
137
- yarn add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
127
+ yarn add -D typescript @types/node mastra@latest
128
+ yarn add @mastra/core@latest zod@^4
138
129
  ```
139
-
140
130
  </Tab>
141
131
  <Tab>
142
132
  ```bash copy
143
133
  bun init -y
144
-
145
- bun add typescript tsx @types/node mastra@latest --dev
146
-
147
- bun add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
134
+ bun add -d typescript @types/node mastra@latest
135
+ bun add @mastra/core@latest zod@^4
148
136
  ```
149
-
150
137
  </Tab>
151
138
  </Tabs>
152
139
 
153
- Add the `dev` and `build` scripts to `package.json`:
140
+ Add `dev` and `build` scripts to your `package.json` file:
154
141
 
155
- ```json filename="package.json" copy
142
+ ```json filename="package.json" copy /,/ /"dev": "mastra dev",/ /"build": "mastra build"/
156
143
  {
157
144
  "scripts": {
158
- // ...
145
+ "test": "echo \"Error: no test specified\" && exit 1",
159
146
  "dev": "mastra dev",
160
147
  "build": "mastra build"
161
148
  }
@@ -172,9 +159,8 @@ touch tsconfig.json
172
159
 
173
160
  Add the following configuration:
174
161
 
175
- Mastra requires `module` and `moduleResolution` values that support modern Node.js versions. Older settings like `CommonJS` or `node` are incompatible with Mastra’s packages and will cause resolution errors.
176
162
 
177
- ```json {4-5} filename="tsconfig.json" copy
163
+ ```json filename="tsconfig.json" copy
178
164
  {
179
165
  "compilerOptions": {
180
166
  "target": "ES2022",
@@ -192,12 +178,13 @@ Mastra requires `module` and `moduleResolution` values that support modern Node.
192
178
  ]
193
179
  }
194
180
  ```
181
+ <Callout type="info">
182
+ Mastra requires modern `module` and `moduleResolution` settings. Using `CommonJS` or `node` will cause resolution errors.
183
+ </Callout>
195
184
 
196
- > This TypeScript configuration is optimized for Mastra projects, using modern module resolution and strict type checking.
197
-
198
- ### Set up your API key
185
+ ### Set API key
199
186
 
200
- Create `.env` file:
187
+ Create an `.env` file:
201
188
 
202
189
  ```bash copy
203
190
  touch .env
@@ -206,12 +193,14 @@ touch .env
206
193
  Add your API key:
207
194
 
208
195
  ```bash filename=".env" copy
209
- OPENAI_API_KEY=<your-api-key>
196
+ GOOGLE_GENERATIVE_AI_API_KEY=<your-api-key>
210
197
  ```
211
198
 
212
- > This example uses OpenAI. Each LLM provider uses a unique name. See [Model Capabilities](/docs/getting-started/model-capability) for more information.
199
+ <Callout type="default">
200
+ This guide uses Google Gemini, but you can use any supported [model provider](/models), including OpenAI, Anthropic, and more.
201
+ </Callout>
213
202
 
214
- ### Create a Tool
203
+ ### Add tool
215
204
 
216
205
  Create a `weather-tool.ts` file:
217
206
 
@@ -242,9 +231,11 @@ export const weatherTool = createTool({
242
231
  });
243
232
  ```
244
233
 
245
- > See the full weatherTool example in [Giving an Agent a Tool](/examples/agents/using-a-tool).
234
+ <Callout type="info">
235
+ We've shortened and simplified the `weatherTool` example here. You can see the complete weather tool under [Giving an Agent a Tool](/examples/agents/using-a-tool).
236
+ </Callout>
246
237
 
247
- ### Create an Agent
238
+ ### Add agent
248
239
 
249
240
  Create a `weather-agent.ts` file:
250
241
 
@@ -255,7 +246,6 @@ mkdir -p src/mastra/agents && touch src/mastra/agents/weather-agent.ts
255
246
  Add the following code:
256
247
 
257
248
  ```ts filename="src/mastra/agents/weather-agent.ts" showLineNumbers copy
258
- import { openai } from "@ai-sdk/openai";
259
249
  import { Agent } from "@mastra/core/agent";
260
250
  import { weatherTool } from "../tools/weather-tool";
261
251
 
@@ -266,21 +256,21 @@ export const weatherAgent = new Agent({
266
256
 
267
257
  Your primary function is to help users get weather details for specific locations. When responding:
268
258
  - Always ask for a location if none is provided
269
- - If the location name isnt in English, please translate it
259
+ - If the location name isn't in English, please translate it
270
260
  - If giving a location with multiple parts (e.g. "New York, NY"), use the most relevant part (e.g. "New York")
271
261
  - Include relevant details like humidity, wind conditions, and precipitation
272
262
  - Keep responses concise but informative
273
263
 
274
264
  Use the weatherTool to fetch current weather data.
275
265
  `,
276
- model: openai('gpt-4o-mini'),
266
+ model: "google/gemini-2.5-pro",
277
267
  tools: { weatherTool }
278
268
  });
279
269
  ```
280
270
 
281
- ### Register the Agent
271
+ ### Register agent
282
272
 
283
- Create the Mastra entry point and register agent:
273
+ Create the Mastra entry point and register your agent:
284
274
 
285
275
  ```bash copy
286
276
  touch src/mastra/index.ts
@@ -296,28 +286,31 @@ export const mastra = new Mastra({
296
286
  agents: { weatherAgent }
297
287
  });
298
288
  ```
289
+ ### Test your agent
290
+ You can now launch the [Playground](/docs/server-db/local-dev-playground) and test your agent.
299
291
 
300
- You can now launch the [Mastra Development Server](/docs/server-db/local-dev-playground) and test your agent using the Mastra Playground.
301
-
302
- </Steps>
303
-
304
- ## Add to an existing project
305
-
306
- Mastra can be installed and integrated into a wide range of projects. Below are links to integration guides to help you get started:
307
-
308
- - [Next.js](/docs/frameworks/web-frameworks/next-js)
309
- - [Vite + React](/docs/frameworks/web-frameworks/vite-react)
310
- - [Astro](/docs/frameworks/web-frameworks/astro)
311
- - [Express](/docs/frameworks/servers/express)
312
-
313
-
314
- ### `mastra init`
315
-
316
- To install Mastra in an existing project, use the `mastra init` command.
317
-
318
- > See [mastra init](/reference/cli/init) for more information.
292
+ <Tabs items={["npm", "pnpm", "yarn", "bun"]}>
293
+ <Tab>
294
+ ```bash copy
295
+ npm run dev
296
+ ```
297
+ </Tab>
298
+ <Tab>
299
+ ```bash copy
300
+ pnpm run dev
301
+ ```
302
+ </Tab>
303
+ <Tab>
304
+ ```bash copy
305
+ yarn run dev
306
+ ```
307
+ </Tab>
308
+ <Tab>
309
+ ```bash copy
310
+ bun run dev
311
+ ```
312
+ </Tab>
313
+ </Tabs>
319
314
 
320
- ### Next steps
321
315
 
322
- - [Local Development](/docs/server-db/local-dev-playground)
323
- - [Deploy to Mastra Cloud](/docs/deployment/overview)
316
+ </Steps>
@@ -16,8 +16,8 @@ export const testAgent = new Agent({
16
16
  // ...
17
17
  memory: new Memory({
18
18
  options: {
19
- lastMessages: 100
19
+ lastMessages: 20
20
20
  },
21
21
  })
22
22
  });
23
- ```
23
+ ```
@@ -98,40 +98,66 @@ const agent = new Agent({
98
98
 
99
99
  ### Embedder configuration
100
100
 
101
- Semantic recall relies on an [embedding model](/reference/memory/Memory#embedder) to convert messages into embeddings. You can specify any [embedding model](https://sdk.vercel.ai/docs/ai-sdk-core/embeddings) compatible with the AI SDK.
101
+ Semantic recall relies on an [embedding model](/reference/memory/Memory#embedder) to convert messages into embeddings. Mastra supports embedding models through the model router using `provider/model` strings, or you can use any [embedding model](https://sdk.vercel.ai/docs/ai-sdk-core/embeddings) compatible with the AI SDK.
102
102
 
103
- To use FastEmbed (a local embedding model), install `@mastra/fastembed`:
103
+ #### Using the Model Router (Recommended)
104
104
 
105
- ```bash npm2yarn copy
106
- npm install @mastra/fastembed
105
+ The simplest way is to use a `provider/model` string with autocomplete support:
106
+
107
+ ```ts {7}
108
+ import { Memory } from "@mastra/memory";
109
+ import { Agent } from "@mastra/core/agent";
110
+
111
+ const agent = new Agent({
112
+ memory: new Memory({
113
+ // ... other memory options
114
+ embedder: "openai/text-embedding-3-small", // TypeScript autocomplete supported
115
+ }),
116
+ });
107
117
  ```
108
118
 
109
- Then configure it in your memory:
119
+ Supported embedding models:
120
+ - **OpenAI**: `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002`
121
+ - **Google**: `gemini-embedding-001`, `text-embedding-004`
122
+
123
+ The model router automatically handles API key detection from environment variables (`OPENAI_API_KEY`, `GOOGLE_GENERATIVE_AI_API_KEY`).
124
+
125
+ #### Using AI SDK Packages
126
+
127
+ You can also use AI SDK embedding models directly:
110
128
 
111
129
  ```ts {3,8}
112
130
  import { Memory } from "@mastra/memory";
113
131
  import { Agent } from "@mastra/core/agent";
114
- import { fastembed } from "@mastra/fastembed";
132
+ import { openai } from "@ai-sdk/openai";
115
133
 
116
134
  const agent = new Agent({
117
135
  memory: new Memory({
118
136
  // ... other memory options
119
- embedder: fastembed,
137
+ embedder: openai.embedding("text-embedding-3-small"),
120
138
  }),
121
139
  });
122
140
  ```
123
141
 
124
- Alternatively, use a different provider like OpenAI:
142
+ #### Using FastEmbed (Local)
143
+
144
+ To use FastEmbed (a local embedding model), install `@mastra/fastembed`:
145
+
146
+ ```bash npm2yarn copy
147
+ npm install @mastra/fastembed
148
+ ```
149
+
150
+ Then configure it in your memory:
125
151
 
126
152
  ```ts {3,8}
127
153
  import { Memory } from "@mastra/memory";
128
154
  import { Agent } from "@mastra/core/agent";
129
- import { openai } from "@ai-sdk/openai";
155
+ import { fastembed } from "@mastra/fastembed";
130
156
 
131
157
  const agent = new Agent({
132
158
  memory: new Memory({
133
159
  // ... other memory options
134
- embedder: openai.embedding("text-embedding-3-small"),
160
+ embedder: fastembed,
135
161
  }),
136
162
  });
137
163
  ```
@@ -73,28 +73,40 @@ We go deeper into chunking strategies in our [chunk documentation](/reference/ra
73
73
 
74
74
  ## Step 2: Embedding Generation
75
75
 
76
- Transform chunks into embeddings using your preferred provider. Mastra supports many embedding providers, including OpenAI and Cohere:
76
+ Transform chunks into embeddings using your preferred provider. Mastra supports embedding models through the model router or AI SDK packages.
77
77
 
78
- ### Using OpenAI
78
+ ### Using the Model Router (Recommended)
79
+
80
+ The simplest way is to use Mastra's model router with `provider/model` strings:
79
81
 
80
82
  ```ts showLineNumbers copy
81
- import { openai } from "@ai-sdk/openai";
83
+ import { ModelRouterEmbeddingModel } from "@mastra/core";
82
84
  import { embedMany } from "ai";
83
85
 
86
+ const embeddingModel = new ModelRouterEmbeddingModel("openai/text-embedding-3-small");
87
+
84
88
  const { embeddings } = await embedMany({
85
- model: openai.embedding("text-embedding-3-small"),
89
+ model: embeddingModel,
86
90
  values: chunks.map((chunk) => chunk.text),
87
91
  });
88
92
  ```
89
93
 
90
- ### Using Cohere
94
+ Supported embedding models:
95
+ - **OpenAI**: `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002`
96
+ - **Google**: `gemini-embedding-001`, `text-embedding-004`
97
+
98
+ The model router automatically handles API key detection from environment variables.
99
+
100
+ ### Using AI SDK Packages
101
+
102
+ You can also use AI SDK embedding models directly:
91
103
 
92
104
  ```ts showLineNumbers copy
93
- import { cohere } from "@ai-sdk/cohere";
105
+ import { openai } from "@ai-sdk/openai";
94
106
  import { embedMany } from "ai";
95
107
 
96
108
  const { embeddings } = await embedMany({
97
- model: cohere.embedding("embed-english-v3.0"),
109
+ model: openai.embedding("text-embedding-3-small"),
98
110
  values: chunks.map((chunk) => chunk.text),
99
111
  });
100
112
  ```
@@ -67,27 +67,11 @@ const response = await agent.stream({
67
67
 
68
68
  // Process data stream with the processDataStream util
69
69
  response.processDataStream({
70
- onTextPart: (text) => {
71
- process.stdout.write(text);
72
- },
73
- onFilePart: (file) => {
74
- console.log(file);
75
- },
76
- onDataPart: (data) => {
77
- console.log(data);
78
- },
79
- onErrorPart: (error) => {
80
- console.error(error);
70
+ onChunk: async(chunk) => {
71
+ console.log(chunk);
81
72
  },
82
73
  });
83
74
 
84
- // Process text stream with the processTextStream util
85
- // (used with structured output)
86
- response.processTextStream({
87
- onTextPart: text => {
88
- process.stdout.write(text);
89
- },
90
- });
91
75
 
92
76
  // You can also read from response body directly
93
77
  const reader = response.body.getReader();
@@ -134,8 +118,13 @@ const response = await agent.stream({
134
118
  });
135
119
 
136
120
  response.processDataStream({
137
- onTextPart: (text) => console.log(text),
138
- onToolCallPart: (toolCall) => console.log('Tool called:', toolCall.toolName),
121
+ onChunk: async (chunk) => {
122
+ if (chunk.type === 'text-delta') {
123
+ console.log(chunk.payload.text);
124
+ } else if (chunk.type === 'tool-call') {
125
+ console.log(`calling tool ${chunk.payload.toolName} with args ${JSON.stringify(chunk.payload.args, null, 2)}`);
126
+ }
127
+ },
139
128
  });
140
129
  ```
141
130
 
@@ -176,15 +165,45 @@ const response = await agent.stream(
176
165
 
177
166
  // Process the stream
178
167
  response.processDataStream({
179
- onChunk: (chunk) => {
180
- console.log(chunk);
168
+ onChunk: async (chunk) => {
169
+ if (chunk.type === 'text-delta') {
170
+ console.log(chunk.payload.text);
171
+ }
181
172
  },
182
173
  });
183
174
  ```
184
175
 
185
- Currently, AI SDK V5 format is not supported in the client SDK.
186
- For AI SDK v5 compatible format, leverage the `@mastra/ai-sdk` package
187
- [AI SDK v5 Stream Compatibility](/docs/frameworks/agentic-uis/ai-sdk#enabling-stream-compatibility)
176
+ #### AI SDK compatible format
177
+
178
+ To stream AI SDK-formatted parts on the client from an `agent.stream(...)` response, wrap `response.processDataStream` into a `ReadableStream<ChunkType>` and use `toAISdkFormat`:
179
+
180
+ ```typescript filename="client-ai-sdk-transform.ts" copy
181
+ import { createUIMessageStream } from 'ai';
182
+ import { toAISdkFormat } from '@mastra/ai-sdk';
183
+ import type { ChunkType, MastraModelOutput } from '@mastra/core/stream';
184
+
185
+ const response = await agent.stream({ messages: 'Tell me a story' });
186
+
187
+ const chunkStream: ReadableStream<ChunkType> = new ReadableStream<ChunkType>({
188
+ start(controller) {
189
+ response.processDataStream({
190
+ onChunk: async (chunk) => controller.enqueue(chunk as ChunkType),
191
+ }).finally(() => controller.close());
192
+ },
193
+ });
194
+
195
+ const uiMessageStream = createUIMessageStream({
196
+ execute: async ({ writer }) => {
197
+ for await (const part of toAISdkFormat(chunkStream as unknown as MastraModelOutput, { from: 'agent' })) {
198
+ writer.write(part);
199
+ }
200
+ },
201
+ });
202
+
203
+ for await (const part of uiMessageStream) {
204
+ console.log(part);
205
+ }
206
+ ```
188
207
 
189
208
  ### Generate
190
209
 
@@ -116,10 +116,9 @@ A relevancy score between 0 and 1:
116
116
  In this example, the response accurately addresses the input query with specific and relevant information.
117
117
 
118
118
  ```typescript filename="src/example-high-answer-relevancy.ts" showLineNumbers copy
119
- import { openai } from "@ai-sdk/openai";
120
119
  import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
121
120
 
122
- const scorer = createAnswerRelevancyScorer({ model: openai("gpt-4o-mini") });
121
+ const scorer = createAnswerRelevancyScorer({ model: 'openai/gpt-4o-mini' });
123
122
 
124
123
  const inputMessages = [{ role: 'user', content: "What are the health benefits of regular exercise?" }];
125
124
  const outputMessage = { text: "Regular exercise improves cardiovascular health, strengthens muscles, boosts metabolism, and enhances mental well-being through the release of endorphins." };
@@ -148,10 +147,9 @@ The output receives a high score because it accurately answers the query without
148
147
  In this example, the response addresses the query in part but includes additional information that isn’t directly relevant.
149
148
 
150
149
  ```typescript filename="src/example-partial-answer-relevancy.ts" showLineNumbers copy
151
- import { openai } from "@ai-sdk/openai";
152
150
  import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
153
151
 
154
- const scorer = createAnswerRelevancyScorer({ model: openai("gpt-4o-mini") });
152
+ const scorer = createAnswerRelevancyScorer({ model: 'openai/gpt-4o-mini' });
155
153
 
156
154
  const inputMessages = [{ role: 'user', content: "What should a healthy breakfast include?" }];
157
155
  const outputMessage = { text: "A nutritious breakfast should include whole grains and protein. However, the timing of your breakfast is just as important - studies show eating within 2 hours of waking optimizes metabolism and energy levels throughout the day." };
@@ -180,10 +178,9 @@ The output receives a lower score because it partially answers the query. While
180
178
  In this example, the response does not address the query and contains information that is entirely unrelated.
181
179
 
182
180
  ```typescript filename="src/example-low-answer-relevancy.ts" showLineNumbers copy
183
- import { openai } from "@ai-sdk/openai";
184
181
  import { createAnswerRelevancyScorer } from "@mastra/evals/scorers/llm";
185
182
 
186
- const scorer = createAnswerRelevancyScorer({ model: openai("gpt-4o-mini") });
183
+ const scorer = createAnswerRelevancyScorer({ model: 'openai/gpt-4o-mini' });
187
184
 
188
185
  const inputMessages = [{ role: 'user', content: "What are the benefits of meditation?" }];
189
186
  const outputMessage = { text: "The Great Wall of China is over 13,000 miles long and was built during the Ming Dynasty to protect against invasions." };
@@ -175,12 +175,11 @@ await runExperiment({
175
175
  In this example, the agent's output semantically matches the ground truth perfectly.
176
176
 
177
177
  ```typescript filename="src/example-perfect-similarity.ts" showLineNumbers copy
178
- import { openai } from "@ai-sdk/openai";
179
178
  import { runExperiment } from "@mastra/core/scores";
180
179
  import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
181
180
  import { myAgent } from "./agent";
182
181
 
183
- const scorer = createAnswerSimilarityScorer({ model: openai("gpt-4o-mini") });
182
+ const scorer = createAnswerSimilarityScorer({ model: 'openai/gpt-4o-mini' });
184
183
 
185
184
  const result = await runExperiment({
186
185
  data: [
@@ -214,12 +213,11 @@ The output receives a perfect score because both the agent's answer and ground t
214
213
  In this example, the agent provides the same information as the ground truth but with different phrasing.
215
214
 
216
215
  ```typescript filename="src/example-semantic-similarity.ts" showLineNumbers copy
217
- import { openai } from "@ai-sdk/openai";
218
216
  import { runExperiment } from "@mastra/core/scores";
219
217
  import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
220
218
  import { myAgent } from "./agent";
221
219
 
222
- const scorer = createAnswerSimilarityScorer({ model: openai("gpt-4o-mini") });
220
+ const scorer = createAnswerSimilarityScorer({ model: 'openai/gpt-4o-mini' });
223
221
 
224
222
  const result = await runExperiment({
225
223
  data: [
@@ -253,12 +251,11 @@ The output receives a high score because it conveys the same information with eq
253
251
  In this example, the agent's response is partially correct but missing key information.
254
252
 
255
253
  ```typescript filename="src/example-partial-similarity.ts" showLineNumbers copy
256
- import { openai } from "@ai-sdk/openai";
257
254
  import { runExperiment } from "@mastra/core/scores";
258
255
  import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
259
256
  import { myAgent } from "./agent";
260
257
 
261
- const scorer = createAnswerSimilarityScorer({ model: openai("gpt-4o-mini") });
258
+ const scorer = createAnswerSimilarityScorer({ model: 'openai/gpt-4o-mini' });
262
259
 
263
260
  const result = await runExperiment({
264
261
  data: [
@@ -292,12 +289,11 @@ The output receives a moderate score because it includes some correct informatio
292
289
  In this example, the agent provides factually incorrect information that contradicts the ground truth.
293
290
 
294
291
  ```typescript filename="src/example-contradiction.ts" showLineNumbers copy
295
- import { openai } from "@ai-sdk/openai";
296
292
  import { runExperiment } from "@mastra/core/scores";
297
293
  import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
298
294
  import { myAgent } from "./agent";
299
295
 
300
- const scorer = createAnswerSimilarityScorer({ model: openai("gpt-4o-mini") });
296
+ const scorer = createAnswerSimilarityScorer({ model: 'openai/gpt-4o-mini' });
301
297
 
302
298
  const result = await runExperiment({
303
299
  data: [
@@ -332,13 +328,12 @@ Use the scorer in your test suites to ensure agent consistency over time:
332
328
 
333
329
  ```typescript filename="src/ci-integration.test.ts" showLineNumbers copy
334
330
  import { describe, it, expect } from 'vitest';
335
- import { openai } from "@ai-sdk/openai";
336
331
  import { runExperiment } from "@mastra/core/scores";
337
332
  import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
338
333
  import { myAgent } from "./agent";
339
334
 
340
335
  describe('Agent Consistency Tests', () => {
341
- const scorer = createAnswerSimilarityScorer({ model: openai("gpt-4o-mini") });
336
+ const scorer = createAnswerSimilarityScorer({ model: 'openai/gpt-4o-mini' });
342
337
 
343
338
  it('should provide accurate factual answers', async () => {
344
339
  const result = await runExperiment({
@@ -386,14 +381,13 @@ describe('Agent Consistency Tests', () => {
386
381
  Customize the scorer behavior for specific use cases:
387
382
 
388
383
  ```typescript filename="src/custom-config.ts" showLineNumbers copy
389
- import { openai } from "@ai-sdk/openai";
390
384
  import { runExperiment } from "@mastra/core/scores";
391
385
  import { createAnswerSimilarityScorer } from "@mastra/evals/scorers/llm";
392
386
  import { myAgent } from "./agent";
393
387
 
394
388
  // Configure for strict exact matching with high scale
395
389
  const strictScorer = createAnswerSimilarityScorer({
396
- model: openai("gpt-4o-mini"),
390
+ model: 'openai/gpt-4o-mini',
397
391
  options: {
398
392
  exactMatchBonus: 0.5, // Higher bonus for exact matches
399
393
  contradictionPenalty: 2.0, // Very strict on contradictions
@@ -404,7 +398,7 @@ const strictScorer = createAnswerSimilarityScorer({
404
398
 
405
399
  // Configure for lenient semantic matching
406
400
  const lenientScorer = createAnswerSimilarityScorer({
407
- model: openai("gpt-4o-mini"),
401
+ model: 'openai/gpt-4o-mini',
408
402
  options: {
409
403
  semanticThreshold: 0.6, // Lower threshold for semantic matches
410
404
  contradictionPenalty: 0.5, // More forgiving on minor contradictions
@@ -127,10 +127,9 @@ A bias score between 0 and 1:
127
127
  In this example, the response contains overtly biased language and generalizations. The assumptions are explicit and strongly reinforce stereotypes or discriminatory views.
128
128
 
129
129
  ```typescript filename="src/example-high-bias.ts" showLineNumbers copy
130
- import { openai } from "@ai-sdk/openai";
131
130
  import { createBiasScorer } from "@mastra/evals/scorers/llm";
132
131
 
133
- const scorer = createBiasScorer({ model: openai("gpt-4o-mini") });
132
+ const scorer = createBiasScorer({ model: 'openai/gpt-4o-mini' });
134
133
 
135
134
  const query = "What makes someone a good leader?";
136
135
  const response =
@@ -160,10 +159,9 @@ The output receives a high score because it's a clear example of discriminatory
160
159
  In this example, the response contains a mix of reasonable points with subtle bias. While parts of the response are fair, underlying bias is still present.
161
160
 
162
161
  ```typescript filename="src/example-mixed-bias.ts" showLineNumbers copy
163
- import { openai } from "@ai-sdk/openai";
164
162
  import { createBiasScorer } from "@mastra/evals/scorers/llm";
165
163
 
166
- const scorer = createBiasScorer({ model: openai("gpt-4o-mini") });
164
+ const scorer = createBiasScorer({ model: 'openai/gpt-4o-mini' });
167
165
 
168
166
  const query = "How do different age groups perform at work?";
169
167
  const response =
@@ -193,10 +191,9 @@ The output receives a lower score because the response introduces bias in a more
193
191
  In this example, the response focuses on objective and neutral criteria without introducing biased assumptions.
194
192
 
195
193
  ```typescript filename="src/example-low-bias.ts" showLineNumbers copy
196
- import { openai } from "@ai-sdk/openai";
197
194
  import { createBiasScorer } from "@mastra/evals/scorers/llm";
198
195
 
199
- const scorer = createBiasScorer({ model: openai("gpt-4o-mini") });
196
+ const scorer = createBiasScorer({ model: 'openai/gpt-4o-mini' });
200
197
 
201
198
  const query = "What is the best hiring practice?";
202
199
  const response =