@mastra/mcp-docs-server 0.13.10 → 0.13.11-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +9 -9
- package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +30 -30
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +20 -20
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +20 -20
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +20 -20
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +30 -30
- package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +13 -13
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +26 -26
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +21 -21
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +21 -21
- package/.docs/organized/changelogs/%40mastra%2Frag.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +7 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +26 -26
- package/.docs/organized/changelogs/create-mastra.md +11 -11
- package/.docs/organized/changelogs/mastra.md +31 -31
- package/.docs/organized/code-examples/agent-network.md +4 -3
- package/.docs/organized/code-examples/agent.md +33 -2
- package/.docs/raw/agents/overview.mdx +21 -1
- package/.docs/raw/getting-started/mcp-docs-server.mdx +2 -2
- package/.docs/raw/rag/chunking-and-embedding.mdx +11 -0
- package/.docs/raw/reference/agents/agent.mdx +64 -38
- package/.docs/raw/reference/agents/generate.mdx +206 -202
- package/.docs/raw/reference/agents/getAgent.mdx +23 -38
- package/.docs/raw/reference/agents/getDefaultGenerateOptions.mdx +62 -0
- package/.docs/raw/reference/agents/getDefaultStreamOptions.mdx +62 -0
- package/.docs/raw/reference/agents/getDefaultVNextStreamOptions.mdx +62 -0
- package/.docs/raw/reference/agents/getDescription.mdx +30 -0
- package/.docs/raw/reference/agents/getInstructions.mdx +36 -73
- package/.docs/raw/reference/agents/getLLM.mdx +69 -0
- package/.docs/raw/reference/agents/getMemory.mdx +42 -119
- package/.docs/raw/reference/agents/getModel.mdx +36 -75
- package/.docs/raw/reference/agents/getScorers.mdx +62 -0
- package/.docs/raw/reference/agents/getTools.mdx +36 -128
- package/.docs/raw/reference/agents/getVoice.mdx +36 -83
- package/.docs/raw/reference/agents/getWorkflows.mdx +37 -74
- package/.docs/raw/reference/agents/stream.mdx +263 -226
- package/.docs/raw/reference/agents/streamVNext.mdx +208 -402
- package/.docs/raw/reference/cli/build.mdx +1 -0
- package/.docs/raw/reference/rag/chunk.mdx +51 -2
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +6 -6
- package/.docs/raw/reference/scorers/bias.mdx +6 -6
- package/.docs/raw/reference/scorers/completeness.mdx +2 -2
- package/.docs/raw/reference/scorers/content-similarity.mdx +1 -1
- package/.docs/raw/reference/scorers/create-scorer.mdx +445 -0
- package/.docs/raw/reference/scorers/faithfulness.mdx +6 -6
- package/.docs/raw/reference/scorers/hallucination.mdx +6 -6
- package/.docs/raw/reference/scorers/keyword-coverage.mdx +2 -2
- package/.docs/raw/reference/scorers/mastra-scorer.mdx +116 -158
- package/.docs/raw/reference/scorers/toxicity.mdx +2 -2
- package/.docs/raw/scorers/custom-scorers.mdx +166 -268
- package/.docs/raw/scorers/overview.mdx +21 -13
- package/.docs/raw/server-db/local-dev-playground.mdx +3 -3
- package/package.json +5 -5
- package/.docs/raw/reference/agents/createTool.mdx +0 -241
- package/.docs/raw/reference/scorers/custom-code-scorer.mdx +0 -155
- package/.docs/raw/reference/scorers/llm-scorer.mdx +0 -210
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
# @mastra/schema-compat
|
|
2
2
|
|
|
3
|
+
## 0.10.7-alpha.0
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- dd94a26: Dont rely on the full language model for schema compat
|
|
8
|
+
- 2fff911: Fix vnext working memory tool schema when model is incompatible with schema
|
|
9
|
+
|
|
3
10
|
## 0.10.6
|
|
4
11
|
|
|
5
12
|
### Patch Changes
|
|
@@ -1,5 +1,30 @@
|
|
|
1
1
|
# @mastra/deployer
|
|
2
2
|
|
|
3
|
+
## 0.13.2-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [2e74797]
|
|
8
|
+
- Updated dependencies [63449d0]
|
|
9
|
+
- @mastra/core@0.13.2-alpha.1
|
|
10
|
+
|
|
11
|
+
## 0.13.2-alpha.0
|
|
12
|
+
|
|
13
|
+
### Patch Changes
|
|
14
|
+
|
|
15
|
+
- Updated dependencies [8388649]
|
|
16
|
+
- Updated dependencies [dd94a26]
|
|
17
|
+
- Updated dependencies [3ba6772]
|
|
18
|
+
- Updated dependencies [2fff911]
|
|
19
|
+
- @mastra/core@0.13.2-alpha.0
|
|
20
|
+
|
|
21
|
+
## 0.13.1
|
|
22
|
+
|
|
23
|
+
### Patch Changes
|
|
24
|
+
|
|
25
|
+
- Updated dependencies [cd0042e]
|
|
26
|
+
- @mastra/core@0.13.1
|
|
27
|
+
|
|
3
28
|
## 0.13.1-alpha.0
|
|
4
29
|
|
|
5
30
|
### Patch Changes
|
|
@@ -273,30 +298,5 @@
|
|
|
273
298
|
|
|
274
299
|
### Patch Changes
|
|
275
300
|
|
|
276
|
-
- 62007b3: Fix upserting memory messages via hono endpoints
|
|
277
|
-
- @mastra/core@0.11.0-alpha.3
|
|
278
|
-
|
|
279
|
-
## 0.11.0-alpha.2
|
|
280
|
-
|
|
281
|
-
### Patch Changes
|
|
282
|
-
|
|
283
|
-
- f248d53: Adding `getMessagesPaginated` to the serve, deployer, and client-js
|
|
284
|
-
- 35b1155: Added "Semantic recall search" to playground UI chat sidebar, to search for messages and find them in the chat list
|
|
285
|
-
- 65e3395: Add Scores playground-ui and add scorer hooks
|
|
286
|
-
- bea9dd1: Refactor Agent class to consolidate LLM generate and stream methods and improve type safety. This includes
|
|
287
|
-
extracting common logic into prepareLLMOptions(), enhancing type definitions, and fixing test annotations.
|
|
288
|
-
|
|
289
|
-
This changeset entry follows the established format in your project:
|
|
290
|
-
- Targets the @mastra/core package with a patch version bump
|
|
291
|
-
- Provides a concise description of the refactoring and type safety improvements
|
|
292
|
-
- Mentions the key changes without being too verbose
|
|
293
|
-
|
|
294
|
-
- dcd4802: scores mastra server
|
|
295
|
-
- Updated dependencies [f248d53]
|
|
296
|
-
- Updated dependencies [2affc57]
|
|
297
|
-
- Updated dependencies [66e13e3]
|
|
298
|
-
- Updated dependencies [edd9482]
|
|
299
|
-
- Updated dependencies [18344d7]
|
|
300
|
-
- Updated dependencies [9d372c2]
|
|
301
301
|
|
|
302
|
-
...
|
|
302
|
+
... 2742 more lines hidden. See full changelog in package directory.
|
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
# create-mastra
|
|
2
2
|
|
|
3
|
+
## 0.10.21-alpha.1
|
|
4
|
+
|
|
5
|
+
## 0.10.21-alpha.0
|
|
6
|
+
|
|
7
|
+
### Patch Changes
|
|
8
|
+
|
|
9
|
+
- 7aad750: Fix tool ui showing after message when chat is refreshed
|
|
10
|
+
|
|
11
|
+
## 0.10.20
|
|
12
|
+
|
|
3
13
|
## 0.10.20-alpha.0
|
|
4
14
|
|
|
5
15
|
## 0.10.19
|
|
@@ -288,15 +298,5 @@
|
|
|
288
298
|
- Updated dependency [`posthog-node@^4.18.0` ↗︎](https://www.npmjs.com/package/posthog-node/v/4.18.0) (from `^4.10.1`, in `dependencies`)
|
|
289
299
|
- 5f2aa3e: Move workflow hooks to playground
|
|
290
300
|
- 44ba52d: Add proper error message when installation of mastra fails
|
|
291
|
-
- 311132e: move useWorkflow to playground instead of playground-ui
|
|
292
|
-
- 3270d9d: Fix runtime context being undefined
|
|
293
|
-
- 53d3c37: Get workflows from an agent if not found from Mastra instance #5083
|
|
294
|
-
- fc677d7: For final result for a workflow
|
|
295
|
-
|
|
296
|
-
## 0.10.5-alpha.2
|
|
297
|
-
|
|
298
|
-
### Patch Changes
|
|
299
|
-
|
|
300
|
-
- 5f2aa3e: Move workflow hooks to playground
|
|
301
301
|
|
|
302
|
-
...
|
|
302
|
+
... 1171 more lines hidden. See full changelog in package directory.
|
|
@@ -1,5 +1,35 @@
|
|
|
1
1
|
# mastra
|
|
2
2
|
|
|
3
|
+
## 0.10.21-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 63449d0: Change the globbing of tools to exclude test files. Files inside `__tests__` directory and files with `.test.` or `.spec.` in their file name are now excluded from bundling.
|
|
8
|
+
- Updated dependencies [2e74797]
|
|
9
|
+
- Updated dependencies [63449d0]
|
|
10
|
+
- @mastra/core@0.13.2-alpha.1
|
|
11
|
+
- @mastra/deployer@0.13.2-alpha.1
|
|
12
|
+
|
|
13
|
+
## 0.10.21-alpha.0
|
|
14
|
+
|
|
15
|
+
### Patch Changes
|
|
16
|
+
|
|
17
|
+
- 7aad750: Fix tool ui showing after message when chat is refreshed
|
|
18
|
+
- Updated dependencies [8388649]
|
|
19
|
+
- Updated dependencies [dd94a26]
|
|
20
|
+
- Updated dependencies [3ba6772]
|
|
21
|
+
- Updated dependencies [2fff911]
|
|
22
|
+
- @mastra/core@0.13.2-alpha.0
|
|
23
|
+
- @mastra/deployer@0.13.2-alpha.0
|
|
24
|
+
|
|
25
|
+
## 0.10.20
|
|
26
|
+
|
|
27
|
+
### Patch Changes
|
|
28
|
+
|
|
29
|
+
- Updated dependencies [cd0042e]
|
|
30
|
+
- @mastra/core@0.13.1
|
|
31
|
+
- @mastra/deployer@0.13.1
|
|
32
|
+
|
|
3
33
|
## 0.10.20-alpha.0
|
|
4
34
|
|
|
5
35
|
### Patch Changes
|
|
@@ -269,34 +299,4 @@
|
|
|
269
299
|
|
|
270
300
|
### Patch Changes
|
|
271
301
|
|
|
272
|
-
|
|
273
|
-
- af1f902: share thread list between agent, network and cloud
|
|
274
|
-
- 8f89bcd: fix traces pagination + sharing trace view with cloud
|
|
275
|
-
- 0bf0bc8: fix link in shared components + add e2e tests
|
|
276
|
-
- 2affc57: Fix output type of network loop
|
|
277
|
-
- 51192f8: Spread the loaded env into the main process
|
|
278
|
-
- f6c4d75: fix date picker on change
|
|
279
|
-
- 59f0dcd: Add light background color for step statuses
|
|
280
|
-
- 698518b: Allow external templates from github
|
|
281
|
-
- cf8d497: factorize tabs component between cloud and core
|
|
282
|
-
- 7827943: Handle streaming large data
|
|
283
|
-
- 808b493: wrap runtime context with tooltip provider for usage in cloud
|
|
284
|
-
- 8364fac: Fix displaying scorer input
|
|
285
|
-
- 09464dd: Share AgentMetadata component with cloud
|
|
286
|
-
- 80692d5: refactor: sharing only the UI and not data fetching for traces
|
|
287
|
-
- 80c2b06: Fix agent chat stop button to cancel stream/generate reqs in the playground
|
|
288
|
-
- Updated dependencies [f248d53]
|
|
289
|
-
- Updated dependencies [82c6860]
|
|
290
|
-
- Updated dependencies [2affc57]
|
|
291
|
-
- Updated dependencies [66e13e3]
|
|
292
|
-
- Updated dependencies [edd9482]
|
|
293
|
-
- Updated dependencies [0938991]
|
|
294
|
-
- Updated dependencies [18344d7]
|
|
295
|
-
- Updated dependencies [7ba91fa]
|
|
296
|
-
- Updated dependencies [a512ede]
|
|
297
|
-
- Updated dependencies [35b1155]
|
|
298
|
-
- Updated dependencies [9d372c2]
|
|
299
|
-
- Updated dependencies [45469c5]
|
|
300
|
-
- Updated dependencies [40c2525]
|
|
301
|
-
|
|
302
|
-
... 4797 more lines hidden. See full changelog in package directory.
|
|
302
|
+
... 4827 more lines hidden. See full changelog in package directory.
|
|
@@ -10,6 +10,7 @@
|
|
|
10
10
|
"@mastra/libsql": "latest",
|
|
11
11
|
"@mastra/loggers": "latest",
|
|
12
12
|
"@mastra/memory": "latest",
|
|
13
|
+
"mastra": "latest",
|
|
13
14
|
"zod": "^3.25.67"
|
|
14
15
|
}
|
|
15
16
|
}
|
|
@@ -205,7 +206,7 @@ const agent1 = new Agent({
|
|
|
205
206
|
'This agent is used to do research, but not create full responses. Answer in bullet points only and be concise.',
|
|
206
207
|
description:
|
|
207
208
|
'This agent is used to do research, but not create full responses. Answer in bullet points only and be concise.',
|
|
208
|
-
model:
|
|
209
|
+
model: openai('gpt-4o'),
|
|
209
210
|
});
|
|
210
211
|
|
|
211
212
|
const agent2 = new Agent({
|
|
@@ -213,7 +214,7 @@ const agent2 = new Agent({
|
|
|
213
214
|
description: 'This agent is used to do text synthesis on researched material. It writes articles in full paragraphs.',
|
|
214
215
|
instructions:
|
|
215
216
|
'This agent is used to do text synthesis on researched material. Write a full report based on the researched material. Do not use bullet points. Write full paragraphs. There should not be a single bullet point in the final report. You write articles.',
|
|
216
|
-
model:
|
|
217
|
+
model: openai('gpt-4o'),
|
|
217
218
|
});
|
|
218
219
|
|
|
219
220
|
const agentStep1 = createStep({
|
|
@@ -275,7 +276,7 @@ export const v_nextNetwork = new NewAgentNetwork({
|
|
|
275
276
|
name: 'Test Network',
|
|
276
277
|
instructions:
|
|
277
278
|
'You can research cities. You can also synthesize research material. You can also write a full report based on the researched material. You can also get weather information. workflow1 is the best primitive for researching an *individual* city and it should always be used when running multiple primitives to accomplish a task.',
|
|
278
|
-
model:
|
|
279
|
+
model: openai('gpt-4o'),
|
|
279
280
|
agents: {
|
|
280
281
|
agent1,
|
|
281
282
|
agent2,
|
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
"@mastra/memory": "latest",
|
|
13
13
|
"@mastra/voice-openai": "latest",
|
|
14
14
|
"@mastra/libsql": "latest",
|
|
15
|
+
"@mastra/evals": "latest",
|
|
15
16
|
"ai": "^4.3.16",
|
|
16
17
|
"fetch-to-node": "^2.1.0",
|
|
17
18
|
"mastra": "latest",
|
|
@@ -469,6 +470,7 @@ import {
|
|
|
469
470
|
ModerationInputProcessor,
|
|
470
471
|
} from '@mastra/core/agent/input-processor/processors';
|
|
471
472
|
import { MCPClient } from '@mastra/mcp';
|
|
473
|
+
import { createAnswerRelevancyScorer } from '@mastra/evals/scorers/llm';
|
|
472
474
|
|
|
473
475
|
const memory = new Memory();
|
|
474
476
|
|
|
@@ -645,6 +647,35 @@ export const chefAgentResponses = new Agent({
|
|
|
645
647
|
],
|
|
646
648
|
});
|
|
647
649
|
|
|
650
|
+
const answerRelevance = createAnswerRelevancyScorer({
|
|
651
|
+
model: openai('gpt-4o'),
|
|
652
|
+
});
|
|
653
|
+
|
|
654
|
+
console.log(`answerRelevance`, answerRelevance);
|
|
655
|
+
|
|
656
|
+
export const evalAgent = new Agent({
|
|
657
|
+
name: 'Eval Agent',
|
|
658
|
+
instructions: `
|
|
659
|
+
You are a helpful assistant with a weather tool.
|
|
660
|
+
`,
|
|
661
|
+
model: openai('gpt-4o'),
|
|
662
|
+
tools: {
|
|
663
|
+
weatherInfo,
|
|
664
|
+
},
|
|
665
|
+
memory: new Memory({
|
|
666
|
+
options: {
|
|
667
|
+
workingMemory: {
|
|
668
|
+
enabled: true,
|
|
669
|
+
},
|
|
670
|
+
},
|
|
671
|
+
}),
|
|
672
|
+
scorers: {
|
|
673
|
+
answerRelevance: {
|
|
674
|
+
scorer: answerRelevance,
|
|
675
|
+
},
|
|
676
|
+
},
|
|
677
|
+
});
|
|
678
|
+
|
|
648
679
|
```
|
|
649
680
|
|
|
650
681
|
### mastra/index.ts
|
|
@@ -653,7 +684,7 @@ import { Mastra } from '@mastra/core';
|
|
|
653
684
|
import { PinoLogger } from '@mastra/loggers';
|
|
654
685
|
import { LibSQLStore } from '@mastra/libsql';
|
|
655
686
|
|
|
656
|
-
import { chefAgent, chefAgentResponses, dynamicAgent } from './agents/index';
|
|
687
|
+
import { chefAgent, chefAgentResponses, dynamicAgent, evalAgent } from './agents/index';
|
|
657
688
|
import { myMcpServer, myMcpServerTwo } from './mcp/server';
|
|
658
689
|
import { myWorkflow } from './workflows';
|
|
659
690
|
|
|
@@ -662,7 +693,7 @@ const storage = new LibSQLStore({
|
|
|
662
693
|
});
|
|
663
694
|
|
|
664
695
|
export const mastra = new Mastra({
|
|
665
|
-
agents: { chefAgent, chefAgentResponses, dynamicAgent },
|
|
696
|
+
agents: { chefAgent, chefAgentResponses, dynamicAgent, evalAgent },
|
|
666
697
|
logger: new PinoLogger({ name: 'Chef', level: 'debug' }),
|
|
667
698
|
storage,
|
|
668
699
|
mcpServers: {
|
|
@@ -238,7 +238,7 @@ const response = await testAgent.generate(
|
|
|
238
238
|
);
|
|
239
239
|
```
|
|
240
240
|
|
|
241
|
-
###
|
|
241
|
+
### Using `onStepFinish`
|
|
242
242
|
|
|
243
243
|
You can monitor the progress of multi-step operations using the `onStepFinish` callback. This is useful for debugging or providing progress updates to users.
|
|
244
244
|
|
|
@@ -256,6 +256,26 @@ const response = await testAgent.generate(
|
|
|
256
256
|
);
|
|
257
257
|
```
|
|
258
258
|
|
|
259
|
+
### Streaming steps with `onChunk`
|
|
260
|
+
|
|
261
|
+
You can monitor the progress of multi-step operations using the `onChunk` callback. This is useful for debugging or providing progress updates to users.
|
|
262
|
+
|
|
263
|
+
```typescript showLineNumbers copy
|
|
264
|
+
const stream = await testAgent.stream(
|
|
265
|
+
[{ role: "user", content: "Calculate the taxi driver's daily earnings." }],
|
|
266
|
+
{
|
|
267
|
+
maxSteps: 5,
|
|
268
|
+
onChunk: ({ chunk }) => {
|
|
269
|
+
console.log("Chunk", chunk);
|
|
270
|
+
},
|
|
271
|
+
},
|
|
272
|
+
);
|
|
273
|
+
|
|
274
|
+
for await (const chunk of stream.textStream) {
|
|
275
|
+
console.log(chunk);
|
|
276
|
+
}
|
|
277
|
+
```
|
|
278
|
+
|
|
259
279
|
### Detecting completion with `onFinish`
|
|
260
280
|
|
|
261
281
|
The `onFinish` callback is available when streaming responses and provides detailed information about the completed interaction. It is called after the LLM has finished generating its response and all tool executions have completed.
|
|
@@ -51,7 +51,7 @@ To add the MCP Docs Server to an existing project, install it manually.
|
|
|
51
51
|
|
|
52
52
|
- **Cursor**: Edit `.cursor/mcp.json` in your project root, or `~/.cursor/mcp.json` for global configuration
|
|
53
53
|
- **Windsurf**: Edit `~/.codeium/windsurf/mcp_config.json` (only supports global configuration)
|
|
54
|
-
- **VSCode**: Edit `~/.vscode/mcp.json` in your project root
|
|
54
|
+
- **VSCode**: Either move the created `.vscode` folder into the top-level of your workspace or open the created folder as your new workspace root. Edit `~/.vscode/mcp.json` in your project root.
|
|
55
55
|
Add the following configuration:
|
|
56
56
|
|
|
57
57
|
### MacOS/Linux
|
|
@@ -194,7 +194,7 @@ In both IDEs it may take a minute for the MCP server to start the first time as
|
|
|
194
194
|
className="rounded-lg"
|
|
195
195
|
/>
|
|
196
196
|
|
|
197
|
-
MCP only works in Agent mode in VSCode. Once you are in agent mode, open the `mcp.json` file and click the "start" button.
|
|
197
|
+
MCP only works in Agent mode in VSCode. Once you are in agent mode, open the `mcp.json` file and click the "start" button. Note that the "start" button will only appear if the `.vscode` folder containing `mcp.json` is in your workspace root, or the highest level of the in-editor file explorer.
|
|
198
198
|
|
|
199
199
|
<br />
|
|
200
200
|
<img
|
|
@@ -22,6 +22,7 @@ Use `chunk` to split documents into manageable pieces. Mastra supports multiple
|
|
|
22
22
|
- `character`: Simple character-based splits
|
|
23
23
|
- `token`: Token-aware splitting
|
|
24
24
|
- `markdown`: Markdown-aware splitting
|
|
25
|
+
- `semantic-markdown`: Markdown splitting based on related header families
|
|
25
26
|
- `html`: HTML structure-aware splitting
|
|
26
27
|
- `json`: JSON structure-aware splitting
|
|
27
28
|
- `latex`: LaTeX structure-aware splitting
|
|
@@ -56,6 +57,16 @@ const chunks = await doc.chunk({
|
|
|
56
57
|
});
|
|
57
58
|
```
|
|
58
59
|
|
|
60
|
+
For markdown documents where preserving the semantic relationships between sections is important, here's an example of how to use the `semantic-markdown` strategy:
|
|
61
|
+
|
|
62
|
+
```ts showLineNumbers copy
|
|
63
|
+
const chunks = await doc.chunk({
|
|
64
|
+
strategy: "semantic-markdown",
|
|
65
|
+
joinThreshold: 500,
|
|
66
|
+
modelName: "gpt-3.5-turbo",
|
|
67
|
+
});
|
|
68
|
+
```
|
|
69
|
+
|
|
59
70
|
**Note:** Metadata extraction may use LLM calls, so ensure your API key is set.
|
|
60
71
|
|
|
61
72
|
We go deeper into chunking strategies in our [chunk documentation](/reference/rag/chunk.mdx).
|
|
@@ -1,32 +1,35 @@
|
|
|
1
1
|
---
|
|
2
2
|
title: "Reference: Agent | Agents | Mastra Docs"
|
|
3
|
-
description: "Documentation for the Agent class in Mastra, which provides the foundation for creating AI agents with various capabilities."
|
|
3
|
+
description: "Documentation for the `Agent` class in Mastra, which provides the foundation for creating AI agents with various capabilities."
|
|
4
4
|
---
|
|
5
5
|
|
|
6
6
|
# Agent
|
|
7
7
|
|
|
8
8
|
The `Agent` class is the foundation for creating AI agents in Mastra. It provides methods for generating responses, streaming interactions, and handling voice capabilities.
|
|
9
9
|
|
|
10
|
-
##
|
|
10
|
+
## Usage example
|
|
11
11
|
|
|
12
|
-
```typescript
|
|
12
|
+
```typescript filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
|
|
13
|
+
import { openai } from "@ai-sdk/openai";
|
|
13
14
|
import { Agent } from "@mastra/core/agent";
|
|
14
|
-
```
|
|
15
|
-
|
|
16
|
-
## Constructor
|
|
17
15
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
16
|
+
const agent = new Agent({
|
|
17
|
+
name: "test-agent",
|
|
18
|
+
instructions: 'message for agent',
|
|
19
|
+
model: openai("gpt-4o")
|
|
20
|
+
});
|
|
22
21
|
```
|
|
23
22
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
<br />
|
|
23
|
+
## Constructor parameters
|
|
27
24
|
|
|
28
25
|
<PropertiesTable
|
|
29
26
|
content={[
|
|
27
|
+
{
|
|
28
|
+
name: "id",
|
|
29
|
+
type: "string",
|
|
30
|
+
isOptional: true,
|
|
31
|
+
description: "Optional unique identifier for the agent. Defaults to `name` if not provided.",
|
|
32
|
+
},
|
|
30
33
|
{
|
|
31
34
|
name: "name",
|
|
32
35
|
type: "string",
|
|
@@ -37,75 +40,98 @@ constructor(config: AgentConfig<TAgentId, TTools, TMetrics>)
|
|
|
37
40
|
name: "description",
|
|
38
41
|
type: "string",
|
|
39
42
|
isOptional: true,
|
|
40
|
-
description:
|
|
41
|
-
"An optional description of the agent\'s purpose and capabilities.",
|
|
43
|
+
description: "Optional description of the agent's purpose and capabilities.",
|
|
42
44
|
},
|
|
43
45
|
{
|
|
44
46
|
name: "instructions",
|
|
45
47
|
type: "string | ({ runtimeContext: RuntimeContext }) => string | Promise<string>",
|
|
46
48
|
isOptional: false,
|
|
47
|
-
description:
|
|
48
|
-
"Instructions that guide the agent's behavior. Can be a static string or a function that returns a string.",
|
|
49
|
+
description: "Instructions that guide the agent's behavior. Can be a static string or a function that returns a string dynamically.",
|
|
49
50
|
},
|
|
50
51
|
{
|
|
51
52
|
name: "model",
|
|
52
53
|
type: "MastraLanguageModel | ({ runtimeContext: RuntimeContext }) => MastraLanguageModel | Promise<MastraLanguageModel>",
|
|
53
54
|
isOptional: false,
|
|
54
|
-
description:
|
|
55
|
-
"The language model to use for generating responses. Can be a model instance or a function that returns a model.",
|
|
55
|
+
description: "The language model used by the agent. Can be provided statically or resolved at runtime.",
|
|
56
56
|
},
|
|
57
57
|
{
|
|
58
58
|
name: "tools",
|
|
59
59
|
type: "ToolsInput | ({ runtimeContext: RuntimeContext }) => ToolsInput | Promise<ToolsInput>",
|
|
60
60
|
isOptional: true,
|
|
61
|
-
description:
|
|
62
|
-
"Tools that the agent can use. Can be a static object or a function that returns tools.",
|
|
61
|
+
description: "Tools that the agent can access. Can be provided statically or resolved dynamically.",
|
|
63
62
|
},
|
|
64
63
|
{
|
|
65
|
-
name: "
|
|
66
|
-
type: "
|
|
64
|
+
name: "workflows",
|
|
65
|
+
type: "Record<string, Workflow> | ({ runtimeContext: RuntimeContext }) => Record<string, Workflow> | Promise<Record<string, Workflow>>",
|
|
67
66
|
isOptional: true,
|
|
68
|
-
description:
|
|
69
|
-
"Input processors that run sequentially before messages are sent to the language model. These middleware components can intercept, modify, validate, or filter messages. Each processor receives an array of MastraMessageV2 objects and an abort function for early termination. Can be a static array or a function that returns processors based on runtime context.",
|
|
67
|
+
description: "Workflows that the agent can execute. Can be static or dynamically resolved.",
|
|
70
68
|
},
|
|
71
69
|
{
|
|
72
70
|
name: "defaultGenerateOptions",
|
|
73
|
-
type: "AgentGenerateOptions",
|
|
71
|
+
type: "AgentGenerateOptions | ({ runtimeContext: RuntimeContext }) => AgentGenerateOptions | Promise<AgentGenerateOptions>",
|
|
74
72
|
isOptional: true,
|
|
75
|
-
description: "Default options
|
|
73
|
+
description: "Default options used when calling `generate()`.",
|
|
76
74
|
},
|
|
77
75
|
{
|
|
78
76
|
name: "defaultStreamOptions",
|
|
79
|
-
type: "AgentStreamOptions",
|
|
77
|
+
type: "AgentStreamOptions | ({ runtimeContext: RuntimeContext }) => AgentStreamOptions | Promise<AgentStreamOptions>",
|
|
80
78
|
isOptional: true,
|
|
81
|
-
description: "Default options
|
|
79
|
+
description: "Default options used when calling `stream()`.",
|
|
82
80
|
},
|
|
83
81
|
{
|
|
84
|
-
name: "
|
|
85
|
-
type: "
|
|
82
|
+
name: "defaultVNextStreamOptions",
|
|
83
|
+
type: "AgentVNextStreamOptions | ({ runtimeContext: RuntimeContext }) => AgentVNextStreamOptions | Promise<AgentVNextStreamOptions>",
|
|
84
|
+
isOptional: true,
|
|
85
|
+
description: "Default options used when calling `stream()` in vNext mode.",
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
name: "mastra",
|
|
89
|
+
type: "Mastra",
|
|
90
|
+
isOptional: true,
|
|
91
|
+
description: "Reference to the Mastra runtime instance (injected automatically).",
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
name: "scorers",
|
|
95
|
+
type: "MastraScorers | ({ runtimeContext: RuntimeContext }) => MastraScorers | Promise<MastraScorers>",
|
|
86
96
|
isOptional: true,
|
|
87
|
-
description:
|
|
88
|
-
"Workflows that the agent can execute. Can be a static object or a function that returns workflows.",
|
|
97
|
+
description: "Scoring configuration for runtime evaluation and telemetry. Can be static or dynamically provided.",
|
|
89
98
|
},
|
|
90
99
|
{
|
|
91
100
|
name: "evals",
|
|
92
101
|
type: "Record<string, Metric>",
|
|
93
102
|
isOptional: true,
|
|
94
|
-
description: "Evaluation metrics for
|
|
103
|
+
description: "Evaluation metrics for scoring agent responses.",
|
|
95
104
|
},
|
|
96
105
|
{
|
|
97
106
|
name: "memory",
|
|
98
107
|
type: "MastraMemory | ({ runtimeContext: RuntimeContext }) => MastraMemory | Promise<MastraMemory>",
|
|
99
108
|
isOptional: true,
|
|
100
|
-
description:
|
|
101
|
-
"Memory system for the agent to store and retrieve information. Can be a static memory instance or a function that returns a memory instance based on runtime context.",
|
|
109
|
+
description: "Memory module used for storing and retrieving stateful context.",
|
|
102
110
|
},
|
|
103
111
|
{
|
|
104
112
|
name: "voice",
|
|
105
113
|
type: "CompositeVoice",
|
|
106
114
|
isOptional: true,
|
|
107
|
-
description:
|
|
108
|
-
|
|
115
|
+
description: "Voice settings for speech input and output.",
|
|
116
|
+
},
|
|
117
|
+
{
|
|
118
|
+
name: "inputProcessors",
|
|
119
|
+
type: "InputProcessor[] | ({ runtimeContext: RuntimeContext }) => InputProcessor[] | Promise<InputProcessor[]>",
|
|
120
|
+
isOptional: true,
|
|
121
|
+
description: "Input processors that can modify or validate messages before they are processed by the agent.",
|
|
122
|
+
},
|
|
123
|
+
]}
|
|
124
|
+
/>
|
|
125
|
+
|
|
126
|
+
## Returns
|
|
127
|
+
|
|
128
|
+
<PropertiesTable
|
|
129
|
+
content={[
|
|
130
|
+
{
|
|
131
|
+
name: "agent",
|
|
132
|
+
type: "Agent<TAgentId, TTools, TMetrics>",
|
|
133
|
+
description: "A new Agent instance with the specified configuration.",
|
|
109
134
|
},
|
|
110
135
|
]}
|
|
111
136
|
/>
|
|
137
|
+
|