@mastra/mcp-docs-server 0.13.31 → 0.13.32-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +1 -0
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +25 -25
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +15 -15
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +122 -122
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +20 -20
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +31 -31
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +14 -14
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +21 -21
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +35 -35
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Frag.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Freact.md +20 -0
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +37 -37
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +13 -13
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +10 -10
- package/.docs/organized/changelogs/create-mastra.md +11 -11
- package/.docs/organized/changelogs/mastra.md +26 -26
- package/.docs/organized/code-examples/agent.md +55 -1
- package/.docs/organized/code-examples/agui.md +2 -2
- package/.docs/organized/code-examples/ai-elements.md +2 -2
- package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -2
- package/.docs/organized/code-examples/ai-sdk-v5.md +2 -2
- package/.docs/organized/code-examples/assistant-ui.md +2 -2
- package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +2 -2
- package/.docs/organized/code-examples/bird-checker-with-nextjs.md +2 -2
- package/.docs/organized/code-examples/client-side-tools.md +2 -2
- package/.docs/organized/code-examples/crypto-chatbot.md +2 -2
- package/.docs/organized/code-examples/heads-up-game.md +2 -2
- package/.docs/organized/code-examples/openapi-spec-writer.md +2 -2
- package/.docs/raw/agents/agent-memory.mdx +48 -31
- package/.docs/raw/agents/guardrails.mdx +8 -1
- package/.docs/raw/agents/networks.mdx +197 -128
- package/.docs/raw/agents/overview.mdx +10 -9
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +92 -1
- package/.docs/raw/getting-started/installation.mdx +61 -68
- package/.docs/raw/memory/conversation-history.mdx +2 -2
- package/.docs/raw/memory/semantic-recall.mdx +36 -10
- package/.docs/raw/observability/ai-tracing/overview.mdx +220 -0
- package/.docs/raw/rag/chunking-and-embedding.mdx +19 -7
- package/.docs/raw/reference/cli/create-mastra.mdx +1 -1
- package/.docs/raw/reference/cli/mastra.mdx +1 -1
- package/.docs/raw/reference/client-js/agents.mdx +44 -25
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +3 -6
- package/.docs/raw/reference/scorers/answer-similarity.mdx +7 -13
- package/.docs/raw/reference/scorers/bias.mdx +3 -6
- package/.docs/raw/reference/scorers/completeness.mdx +3 -6
- package/.docs/raw/reference/scorers/context-precision.mdx +6 -9
- package/.docs/raw/reference/scorers/context-relevance.mdx +12 -18
- package/.docs/raw/reference/scorers/faithfulness.mdx +3 -6
- package/.docs/raw/reference/scorers/hallucination.mdx +3 -6
- package/.docs/raw/reference/scorers/noise-sensitivity.mdx +13 -23
- package/.docs/raw/reference/scorers/prompt-alignment.mdx +16 -20
- package/.docs/raw/reference/scorers/tool-call-accuracy.mdx +4 -5
- package/.docs/raw/reference/scorers/toxicity.mdx +3 -6
- package/.docs/raw/reference/workflows/step.mdx +1 -1
- package/.docs/raw/reference/workflows/workflow-methods/sendEvent.mdx +23 -2
- package/.docs/raw/reference/workflows/workflow-methods/sleep.mdx +22 -4
- package/.docs/raw/reference/workflows/workflow-methods/sleepUntil.mdx +14 -4
- package/.docs/raw/reference/workflows/workflow-methods/waitForEvent.mdx +18 -1
- package/.docs/raw/server-db/runtime-context.mdx +13 -3
- package/.docs/raw/streaming/tool-streaming.mdx +30 -0
- package/.docs/raw/tools-mcp/overview.mdx +1 -1
- package/.docs/raw/workflows/overview.mdx +1 -1
- package/.docs/raw/workflows/suspend-and-resume.mdx +34 -23
- package/CHANGELOG.md +15 -0
- package/package.json +5 -5
- package/.docs/raw/workflows/pausing-execution.mdx +0 -142
|
@@ -3,46 +3,57 @@ title: "Agent Memory | Agents | Mastra Docs"
|
|
|
3
3
|
description: Learn how to add memory to agents to store conversation history and maintain context across interactions.
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
-
import {
|
|
6
|
+
import { Steps } from "nextra/components";
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
## Agent memory
|
|
9
9
|
|
|
10
|
-
Agents
|
|
10
|
+
Agents use memory to maintain context across interactions. LLMs are stateless and don't retain information between calls, so agents need memory to track conversation history and recall relevant information.
|
|
11
11
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
Use memory when an agent needs to retain information across multiple user interactions. This includes recalling user-specific details, facts, or tool calls and their results. Without memory, the agent handles each request in isolation, with no awareness of previous messages or responses.
|
|
15
|
-
|
|
16
|
-
For more information about the different ways memory works in Mastra see the following pages.
|
|
12
|
+
Mastra agents can be configured to store conversation history, with optional [working memory](../memory/working-memory) to maintain recent context or [semantic recall](../memory/semantic-recall) to retrieve past messages based on meaning.
|
|
17
13
|
|
|
18
|
-
|
|
19
|
-
- [Semantic Recall](../memory/semantic-recall.mdx)
|
|
14
|
+
## When to use memory
|
|
20
15
|
|
|
21
|
-
|
|
16
|
+
Use memory when your agent needs to maintain multi-turn conversations that reference prior exchanges, recall user preferences or facts from earlier in a session, or build context over time within a conversation thread. Skip memory for single-turn requests where each interaction is independent.
|
|
22
17
|
|
|
23
|
-
|
|
18
|
+
## Setting up memory
|
|
24
19
|
|
|
25
|
-
|
|
20
|
+
To enable memory in Mastra, install the `@mastra/memory` package along with a storage provider.
|
|
26
21
|
|
|
27
22
|
```bash npm2yarn copy
|
|
28
23
|
npm install @mastra/memory@latest @mastra/libsql@latest
|
|
29
24
|
```
|
|
30
25
|
|
|
31
|
-
|
|
26
|
+
## Storage providers
|
|
27
|
+
|
|
28
|
+
Memory requires a storage provider to persist conversation history, including user messages and agent responses. For more details on available providers and how storage works in Mastra, see the [Storage](../server-db/storage.mdx) documentation.
|
|
29
|
+
|
|
30
|
+
## Configuring memory
|
|
31
|
+
|
|
32
|
+
<Steps>
|
|
33
|
+
|
|
34
|
+
### Agent memory
|
|
35
|
+
|
|
36
|
+
Enable memory by creating a `Memory` instance and passing it to the agent’s `memory` option.
|
|
32
37
|
|
|
33
|
-
|
|
38
|
+
```typescript {6-9} filename="src/mastra/agents/memory-agent.ts" showLineNumbers copy
|
|
39
|
+
import { Agent } from "@mastra/core/agent";
|
|
40
|
+
import { Memory } from "@mastra/memory";
|
|
34
41
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
42
|
+
export const memoryAgent = new Agent({
|
|
43
|
+
// ...
|
|
44
|
+
memory: new Memory({
|
|
45
|
+
options: {
|
|
46
|
+
lastMessages: 20
|
|
47
|
+
}
|
|
48
|
+
})
|
|
49
|
+
});
|
|
50
|
+
```
|
|
38
51
|
|
|
39
|
-
|
|
40
|
-
`LibSQLStore` works well for local development and when deploying to [Mastra Cloud](../mastra-cloud/overview.mdx), but may not be supported by some [serverless platforms](../deployment/serverless-platforms) or [cloud providers](../deployment/cloud-providers).
|
|
41
|
-
</Callout>
|
|
52
|
+
> See the [Memory Class](../../reference/memory/Memory.mdx) for a full list of configuration options.
|
|
42
53
|
|
|
43
|
-
###
|
|
54
|
+
### Mastra storage
|
|
44
55
|
|
|
45
|
-
|
|
56
|
+
Add a storage provider to your main Mastra instance to enable memory across all configured agents.
|
|
46
57
|
|
|
47
58
|
```typescript {6-8} filename="src/mastra/index.ts" showLineNumbers copy
|
|
48
59
|
import { Mastra } from "@mastra/core/mastra";
|
|
@@ -56,9 +67,11 @@ export const mastra = new Mastra({
|
|
|
56
67
|
});
|
|
57
68
|
```
|
|
58
69
|
|
|
59
|
-
|
|
70
|
+
> See the [LibSQL Storage](../../reference/storage/libsql.mdx) for a full list of configuration options.
|
|
71
|
+
|
|
72
|
+
</Steps>
|
|
60
73
|
|
|
61
|
-
|
|
74
|
+
Alternatively, add storage directly to an agent’s memory to keep data separate or use different providers per agent.
|
|
62
75
|
|
|
63
76
|
```typescript {7-10} filename="src/mastra/agents/memory-agent.ts" showLineNumbers copy
|
|
64
77
|
import { Agent } from "@mastra/core/agent";
|
|
@@ -69,20 +82,20 @@ export const memoryAgent = new Agent({
|
|
|
69
82
|
// ...
|
|
70
83
|
memory: new Memory({
|
|
71
84
|
storage: new LibSQLStore({
|
|
72
|
-
url: "
|
|
85
|
+
url: ":memory:"
|
|
73
86
|
})
|
|
74
87
|
})
|
|
75
88
|
});
|
|
76
89
|
```
|
|
77
90
|
|
|
78
|
-
##
|
|
91
|
+
## Conversation history
|
|
79
92
|
|
|
80
|
-
|
|
93
|
+
Include a `memory` object with both `resource` and `thread` to track conversation history during agent calls.
|
|
81
94
|
|
|
82
95
|
- `resource`: A stable identifier for the user or entity.
|
|
83
96
|
- `thread`: An ID that isolates a specific conversation or session.
|
|
84
97
|
|
|
85
|
-
These fields tell the agent where to store and retrieve context, enabling persistent, thread-aware memory across
|
|
98
|
+
These fields tell the agent where to store and retrieve context, enabling persistent, thread-aware memory across a conversation.
|
|
86
99
|
|
|
87
100
|
```typescript {3-4}
|
|
88
101
|
const response = await memoryAgent.generate("Remember my favorite color is blue.", {
|
|
@@ -93,7 +106,7 @@ const response = await memoryAgent.generate("Remember my favorite color is blue.
|
|
|
93
106
|
});
|
|
94
107
|
```
|
|
95
108
|
|
|
96
|
-
To recall information stored in memory, call the agent with the same `resource` and `thread` values used in the original
|
|
109
|
+
To recall information stored in memory, call the agent with the same `resource` and `thread` values used in the original conversation.
|
|
97
110
|
|
|
98
111
|
```typescript {3-4}
|
|
99
112
|
const response = await memoryAgent.generate("What's my favorite color?", {
|
|
@@ -104,9 +117,11 @@ const response = await memoryAgent.generate("What's my favorite color?", {
|
|
|
104
117
|
});
|
|
105
118
|
```
|
|
106
119
|
|
|
120
|
+
To learn more about memory see the [Memory](../memory/overview.mdx) documentation.
|
|
121
|
+
|
|
107
122
|
## Using `RuntimeContext`
|
|
108
123
|
|
|
109
|
-
Use
|
|
124
|
+
Use [RuntimeContext](../server-db/runtime-context.mdx) to access request-specific values. This lets you conditionally select different memory or storage configurations based on the context of the request.
|
|
110
125
|
|
|
111
126
|
```typescript filename="src/mastra/agents/memory-agent.ts" showLineNumbers
|
|
112
127
|
export type UserTier = {
|
|
@@ -133,6 +148,8 @@ export const memoryAgent = new Agent({
|
|
|
133
148
|
});
|
|
134
149
|
```
|
|
135
150
|
|
|
151
|
+
> See [Runtime Context](../server-db/runtime-context.mdx) for more information.
|
|
152
|
+
|
|
136
153
|
## Related
|
|
137
154
|
|
|
138
155
|
- [Working Memory](../memory/working-memory.mdx)
|
|
@@ -325,11 +325,18 @@ for await (const chunk of stream.fullStream) {
|
|
|
325
325
|
}
|
|
326
326
|
}
|
|
327
327
|
```
|
|
328
|
-
|
|
329
328
|
In this case, the `tripwireReason` indicates that a credit card number was detected:
|
|
330
329
|
|
|
331
330
|
```text
|
|
332
331
|
PII detected. Types: credit-card
|
|
333
332
|
```
|
|
334
333
|
|
|
334
|
+
## Custom processors
|
|
335
|
+
|
|
336
|
+
If the built-in processors don’t cover your needs, you can create your own by extending the `Processor` class.
|
|
337
|
+
|
|
338
|
+
Available examples:
|
|
335
339
|
|
|
340
|
+
- [Message Length Limiter](../../examples/processors/message-length-limiter)
|
|
341
|
+
- [Response Length Limiter](../../examples/rocessors/response-length-limiter)
|
|
342
|
+
- [Response Validator](../../examples/processors/response-validator)
|
|
@@ -1,166 +1,235 @@
|
|
|
1
1
|
---
|
|
2
|
-
title: "
|
|
3
|
-
description:
|
|
2
|
+
title: "Agent Networks | Agents | Mastra Docs"
|
|
3
|
+
description: Learn how to coordinate multiple agents, workflows, and tools using agent networks for complex, non-deterministic task execution.
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
-
# Agent
|
|
6
|
+
# Agent Networks
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
Agent networks in Mastra coordinate multiple agents, workflows, and tools to handle tasks that aren't clearly defined upfront but can be inferred from the user's message or context. A top-level **routing agent** (a Mastra agent with other agents, workflows, and tools configured) uses an LLM to interpret the request and decide which primitives (sub-agents, workflows, or tools) to call, in what order, and with what data.
|
|
9
9
|
|
|
10
|
-
|
|
10
|
+
## When to use networks
|
|
11
11
|
|
|
12
|
-
|
|
13
|
-
- Scenarios where the task is not fully defined and is initiated with unstructured input. A network allows your Agent to figure out which primitive to call and turn unstructured input into a structured task.
|
|
12
|
+
Use networks for complex tasks that require coordination across multiple primitives. Unlike workflows, which follow a predefined sequence, networks rely on LLM reasoning to interpret the request and decide what to run.
|
|
14
13
|
|
|
15
|
-
##
|
|
14
|
+
## Core principles
|
|
16
15
|
|
|
17
|
-
|
|
18
|
-
- `Agent.network()` adds a layer of non-deterministic LLM-based orchestration, allowing dynamic, multi-agent collaboration and routing. This creates a non-deterministic flow of execution.
|
|
16
|
+
Mastra agent networks operate using these principles:
|
|
19
17
|
|
|
20
|
-
|
|
18
|
+
- Memory is required when using `.network()` and is used to store task history and determine when a task is complete.
|
|
19
|
+
- Primitives are selected based on their descriptions. Clear, specific descriptions improve routing. For workflows and tools, the input schema helps determine the right inputs at runtime.
|
|
20
|
+
- If multiple primitives have overlapping functionality, the agent favors the more specific one, using a combination of schema and descriptions to decide which to run.
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
- Any available primitives (agents, workflows) are used based on their descriptions. The better the description, the better the routing agent will be able to select the right primitive. For workflows, the input schema is also used to determine which inputs to use when calling the workflow. More descriptive naming yields better results.
|
|
24
|
-
- When primitives with overlapping capabilities are available, the agent will use the most specific primitive. For example, if both an agent and a workflow can do research, it will use the input schema of the workflow to determine which primitive to select.
|
|
22
|
+
## Creating an agent network
|
|
25
23
|
|
|
26
|
-
|
|
24
|
+
An agent network is built around a top-level routing agent that delegates tasks to agents, workflows, and tools defined in its configuration. Memory is configured on the routing agent using the `memory` option, and `instructions` define the agent's routing behavior.
|
|
27
25
|
|
|
28
|
-
|
|
26
|
+
```typescript {22-23,26,29} filename="src/mastra/agents/routing-agent.ts" showLineNumbers copy
|
|
27
|
+
import { openai } from "@ai-sdk/openai";
|
|
28
|
+
import { Agent } from "@mastra/core/agent";
|
|
29
|
+
import { Memory } from "@mastra/memory";
|
|
30
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
29
31
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
- `workflow1`: A workflow that can research a given city and write a full report based on the researched material (using both agent1 and agent2).
|
|
32
|
+
import { researchAgent } from "./research-agent";
|
|
33
|
+
import { writingAgent } from "./writing-agent";
|
|
33
34
|
|
|
34
|
-
|
|
35
|
+
import { cityWorkflow } from "../workflows/city-workflow";
|
|
36
|
+
import { weatherTool } from "../tools/weather-tool";
|
|
35
37
|
|
|
38
|
+
export const routingAgent = new Agent({
|
|
39
|
+
name: "routing-agent",
|
|
40
|
+
instructions: `
|
|
41
|
+
You are a network of writers and researchers.
|
|
42
|
+
The user will ask you to research a topic.
|
|
43
|
+
Always respond with a complete report—no bullet points.
|
|
44
|
+
Write in full paragraphs, like a blog post.
|
|
45
|
+
Do not answer with incomplete or uncertain information.`,
|
|
46
|
+
model: openai("gpt-4o-mini"),
|
|
47
|
+
agents: {
|
|
48
|
+
researchAgent,
|
|
49
|
+
writingAgent
|
|
50
|
+
},
|
|
51
|
+
workflows: {
|
|
52
|
+
cityWorkflow
|
|
53
|
+
},
|
|
54
|
+
tools: {
|
|
55
|
+
weatherTool
|
|
56
|
+
},
|
|
57
|
+
memory: new Memory({
|
|
58
|
+
storage: new LibSQLStore({
|
|
59
|
+
url: "file:../mastra.db"
|
|
60
|
+
})
|
|
61
|
+
})
|
|
62
|
+
});
|
|
63
|
+
```
|
|
36
64
|
|
|
37
|
-
|
|
38
|
-
import { Agent } from '@mastra/core/agent';
|
|
39
|
-
import { createStep, createWorkflow } from '@mastra/core/workflows';
|
|
40
|
-
import { RuntimeContext } from '@mastra/core/runtime-context';
|
|
41
|
-
import { Memory } from '@mastra/memory';
|
|
42
|
-
import { openai } from '@ai-sdk/openai';
|
|
43
|
-
import { LibSQLStore } from '@mastra/libsql';
|
|
44
|
-
import { z } from 'zod';
|
|
65
|
+
### Writing descriptions for network primitives
|
|
45
66
|
|
|
46
|
-
|
|
47
|
-
storage: new LibSQLStore({
|
|
48
|
-
url: 'file:../mastra.db', // Or your database URL
|
|
49
|
-
}),
|
|
50
|
-
});
|
|
67
|
+
When configuring a Mastra agent network, each primitive (agent, workflow, or tool) needs a clear description to help the routing agent decide which to use. The routing agent uses each primitive's description and schema to determine what it does and how to use it. Clear descriptions and well-defined input and output schemas improve routing accuracy.
|
|
51
68
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
structuredOutput: {
|
|
64
|
-
schema: z.object({
|
|
65
|
-
text: z.string(),
|
|
66
|
-
})
|
|
67
|
-
},
|
|
68
|
-
});
|
|
69
|
-
|
|
70
|
-
return { text: resp.object.text };
|
|
71
|
-
},
|
|
69
|
+
#### Agent descriptions
|
|
70
|
+
|
|
71
|
+
Each agent in a network should include a clear `description` that explains what the agent does.
|
|
72
|
+
|
|
73
|
+
```typescript filename="src/mastra/agents/research-agent.ts" showLineNumbers
|
|
74
|
+
export const researchAgent = new Agent({
|
|
75
|
+
name: "research-agent",
|
|
76
|
+
description: `This agent gathers concise research insights in bullet-point form.
|
|
77
|
+
It's designed to extract key facts without generating full
|
|
78
|
+
responses or narrative content.`,
|
|
79
|
+
// ...
|
|
72
80
|
});
|
|
81
|
+
```
|
|
82
|
+
```typescript filename="src/mastra/agents/writing-agent.ts" showLineNumbers
|
|
83
|
+
export const writingAgent = new Agent({
|
|
84
|
+
name: "writing-agent",
|
|
85
|
+
description: `This agent turns researched material into well-structured
|
|
86
|
+
written content. It produces full-paragraph reports with no bullet points,
|
|
87
|
+
suitable for use in articles, summaries, or blog posts.`,
|
|
88
|
+
// ...
|
|
89
|
+
});
|
|
90
|
+
```
|
|
73
91
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
92
|
+
#### Workflow descriptions
|
|
93
|
+
|
|
94
|
+
Workflows in a network should include a `description` to explain their purpose, along with `inputSchema` and `outputSchema` to describe the expected data.
|
|
95
|
+
|
|
96
|
+
```typescript filename="src/mastra/workflows/city-workflow.ts" showLineNumbers
|
|
97
|
+
export const cityWorkflow = createWorkflow({
|
|
98
|
+
id: "city-workflow",
|
|
99
|
+
description: `This workflow handles city-specific research tasks.
|
|
100
|
+
It first gathers factual information about the city, then synthesizes
|
|
101
|
+
that research into a full written report. Use it when the user input
|
|
102
|
+
includes a city to be researched.`,
|
|
77
103
|
inputSchema: z.object({
|
|
78
|
-
|
|
104
|
+
city: z.string()
|
|
79
105
|
}),
|
|
80
106
|
outputSchema: z.object({
|
|
81
|
-
text: z.string()
|
|
82
|
-
})
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
schema: z.object({
|
|
87
|
-
text: z.string(),
|
|
88
|
-
})
|
|
89
|
-
},
|
|
90
|
-
});
|
|
91
|
-
|
|
92
|
-
return { text: resp.object.text };
|
|
93
|
-
},
|
|
94
|
-
});
|
|
107
|
+
text: z.string()
|
|
108
|
+
})
|
|
109
|
+
//...
|
|
110
|
+
})
|
|
111
|
+
```
|
|
95
112
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
113
|
+
#### Tool descriptions
|
|
114
|
+
|
|
115
|
+
Tools in a network should include a `description` to explain their purpose, along with `inputSchema` and `outputSchema` to describe the expected data.
|
|
116
|
+
|
|
117
|
+
```typescript filename="src/mastra/tools/weather-tool.ts" showLineNumbers
|
|
118
|
+
export const weatherTool = createTool({
|
|
119
|
+
id: "weather-tool",
|
|
120
|
+
description: ` Retrieves current weather information using the wttr.in API.
|
|
121
|
+
Accepts a city or location name as input and returns a short weather summary.
|
|
122
|
+
Use this tool whenever up-to-date weather data is requested.
|
|
123
|
+
`,
|
|
101
124
|
inputSchema: z.object({
|
|
102
|
-
|
|
125
|
+
location: z.string()
|
|
103
126
|
}),
|
|
104
127
|
outputSchema: z.object({
|
|
105
|
-
|
|
128
|
+
weather: z.string()
|
|
106
129
|
}),
|
|
107
|
-
|
|
108
|
-
.then(agentStep1)
|
|
109
|
-
.then(agentStep2)
|
|
110
|
-
.commit();
|
|
111
|
-
|
|
112
|
-
const agent1 = new Agent({
|
|
113
|
-
name: 'agent1',
|
|
114
|
-
instructions:
|
|
115
|
-
'This agent is used to do research, but not create full responses. Answer in bullet points only and be concise.',
|
|
116
|
-
description:
|
|
117
|
-
'This agent is used to do research, but not create full responses. Answer in bullet points only and be concise.',
|
|
118
|
-
model: openai('gpt-4o'),
|
|
130
|
+
// ...
|
|
119
131
|
});
|
|
132
|
+
```
|
|
120
133
|
|
|
121
|
-
|
|
122
|
-
name: 'agent2',
|
|
123
|
-
description:
|
|
124
|
-
'This agent is used to do text synthesis on researched material. Write a full report based on the researched material. Writes reports in full paragraphs. Should be used to synthesize text from different sources together as a final report.',
|
|
125
|
-
instructions:
|
|
126
|
-
'This agent is used to do text synthesis on researched material. Write a full report based on the researched material. Do not use bullet points. Write full paragraphs. There should not be a single bullet point in the final report.',
|
|
127
|
-
model: openai('gpt-4o'),
|
|
128
|
-
});
|
|
134
|
+
## Calling agent networks
|
|
129
135
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
}
|
|
136
|
+
Call a Mastra agent network using `.network()` with a user message. The method returns a stream of events that you can iterate over to track execution progress and retrieve the final result.
|
|
137
|
+
|
|
138
|
+
### Agent example
|
|
139
|
+
|
|
140
|
+
In this example, the network interprets the message and would route the request to both the `researchAgent` and `writingAgent` to generate a complete response.
|
|
141
|
+
|
|
142
|
+
```typescript showLineNumbers copy
|
|
143
|
+
const result = await routingAgent.network("Tell me three cool ways to use Mastra");
|
|
144
|
+
|
|
145
|
+
for await (const chunk of result) {
|
|
146
|
+
console.log(chunk.type);
|
|
147
|
+
if (chunk.type === "network-execution-event-step-finish") {
|
|
148
|
+
console.log(chunk.payload.result);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
#### Agent output
|
|
154
|
+
|
|
155
|
+
The following `chunk.type` events are emitted during this request:
|
|
156
|
+
|
|
157
|
+
```text
|
|
158
|
+
routing-agent-start
|
|
159
|
+
routing-agent-end
|
|
160
|
+
agent-execution-start
|
|
161
|
+
agent-execution-event-start
|
|
162
|
+
agent-execution-event-step-start
|
|
163
|
+
agent-execution-event-text-start
|
|
164
|
+
agent-execution-event-text-delta
|
|
165
|
+
agent-execution-event-text-end
|
|
166
|
+
agent-execution-event-step-finish
|
|
167
|
+
agent-execution-event-finish
|
|
168
|
+
agent-execution-end
|
|
169
|
+
network-execution-event-step-finish
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
## Workflow example
|
|
173
|
+
|
|
174
|
+
In this example, the routing agent recognizes the city name in the message and runs the `cityWorkflow`. The workflow defines steps that call the `researchAgent` to gather facts, then the `writingAgent` to generate the final text.
|
|
175
|
+
|
|
176
|
+
```typescript showLineNumbers copy
|
|
177
|
+
const result = await routingAgent.network("Tell me some historical facts about London");
|
|
145
178
|
|
|
146
|
-
const
|
|
179
|
+
for await (const chunk of result) {
|
|
180
|
+
console.log(chunk.type);
|
|
181
|
+
if (chunk.type === "network-execution-event-step-finish") {
|
|
182
|
+
console.log(chunk.payload.result);
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
#### Workflow output
|
|
188
|
+
|
|
189
|
+
The following `chunk.type` events are emitted during this request:
|
|
147
190
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
191
|
+
```text
|
|
192
|
+
routing-agent-end
|
|
193
|
+
workflow-execution-start
|
|
194
|
+
workflow-execution-event-workflow-start
|
|
195
|
+
workflow-execution-event-workflow-step-start
|
|
196
|
+
workflow-execution-event-workflow-step-result
|
|
197
|
+
workflow-execution-event-workflow-finish
|
|
198
|
+
workflow-execution-end
|
|
199
|
+
routing-agent-start
|
|
200
|
+
network-execution-event-step-finish
|
|
155
201
|
```
|
|
156
202
|
|
|
157
|
-
|
|
203
|
+
### Tool example
|
|
204
|
+
|
|
205
|
+
In this example, the routing agent skips the `researchAgent`, `writingAgent`, and `cityWorkflow`, and calls the `weatherTool` directly to complete the task.
|
|
206
|
+
|
|
207
|
+
```typescript showLineNumbers copy
|
|
208
|
+
const result = await routingAgent.network("What's the weather in London?");
|
|
209
|
+
|
|
210
|
+
for await (const chunk of result) {
|
|
211
|
+
console.log(chunk.type);
|
|
212
|
+
if (chunk.type === "network-execution-event-step-finish") {
|
|
213
|
+
console.log(chunk.payload.result);
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
#### Tool output
|
|
219
|
+
|
|
220
|
+
The following `chunk.type` events are emitted during this request:
|
|
221
|
+
|
|
222
|
+
```text
|
|
223
|
+
routing-agent-start
|
|
224
|
+
routing-agent-end
|
|
225
|
+
tool-execution-start
|
|
226
|
+
tool-execution-end
|
|
227
|
+
network-execution-event-step-finish
|
|
228
|
+
```
|
|
158
229
|
|
|
159
|
-
|
|
160
|
-
2. `workflow1` to research each city one by one. The workflow uses `memory` to figure out which cities have already been researched and makes sure it has researched all of them before proceeding.
|
|
161
|
-
3. `agent2` to synthesize the final report.
|
|
230
|
+
## Related
|
|
162
231
|
|
|
163
|
-
|
|
232
|
+
- [Agent Memory](./agent-memory.mdx)
|
|
233
|
+
- [Workflows Overview](../workflows/overview.mdx)
|
|
234
|
+
- [Runtime Context](../server-db/runtime-context.mdx)
|
|
164
235
|
|
|
165
|
-
- The underlying engine is a Mastra workflow that wraps the single call `generate` workflow.
|
|
166
|
-
- The workflow will repeatedly call the network execution workflow with a `dountil` structure, until the routing model determines the task is complete. This check is used as the `dountil` condition.
|
|
@@ -13,12 +13,12 @@ Agents use LLMs and tools to solve open-ended tasks. They reason about goals, de
|
|
|
13
13
|
|
|
14
14
|
> **📹 Watch**: → An introduction to agents, and how they compare to workflows [YouTube (7 minutes)](https://youtu.be/0jg2g3sNvgw)
|
|
15
15
|
|
|
16
|
-
##
|
|
16
|
+
## Setting up agents
|
|
17
17
|
|
|
18
18
|
<Tabs items={["Mastra model router", "Vercel AI SDK"]}>
|
|
19
19
|
<Tabs.Tab>
|
|
20
20
|
<Steps>
|
|
21
|
-
### Install dependencies
|
|
21
|
+
### Install dependencies [#install-dependencies-mastra-router]
|
|
22
22
|
|
|
23
23
|
Add the Mastra core package to your project:
|
|
24
24
|
|
|
@@ -26,7 +26,7 @@ Add the Mastra core package to your project:
|
|
|
26
26
|
npm install @mastra/core
|
|
27
27
|
```
|
|
28
28
|
|
|
29
|
-
### Set your API key
|
|
29
|
+
### Set your API key [#set-api-key-mastra-router]
|
|
30
30
|
|
|
31
31
|
Mastra's model router auto-detects environment variables for your chosen provider. For OpenAI, set `OPENAI_API_KEY`:
|
|
32
32
|
|
|
@@ -36,7 +36,7 @@ OPENAI_API_KEY=<your-api-key>
|
|
|
36
36
|
|
|
37
37
|
> Mastra supports more than 600 models. Choose from the full list [here](/models).
|
|
38
38
|
|
|
39
|
-
###
|
|
39
|
+
### Creating an agent [#creating-an-agent-mastra-router]
|
|
40
40
|
|
|
41
41
|
Create an agent by instantiating the `Agent` class with system `instructions` and a `model`:
|
|
42
42
|
|
|
@@ -53,7 +53,8 @@ export const testAgent = new Agent({
|
|
|
53
53
|
</Tabs.Tab>
|
|
54
54
|
<Tabs.Tab>
|
|
55
55
|
<Steps>
|
|
56
|
-
|
|
56
|
+
|
|
57
|
+
### Install dependencies [#install-dependencies-ai-sdk]
|
|
57
58
|
|
|
58
59
|
Include the Mastra core package alongside the Vercel AI SDK provider you want to use:
|
|
59
60
|
|
|
@@ -61,7 +62,7 @@ Include the Mastra core package alongside the Vercel AI SDK provider you want to
|
|
|
61
62
|
npm install @mastra/core @ai-sdk/openai
|
|
62
63
|
```
|
|
63
64
|
|
|
64
|
-
### Set your API key
|
|
65
|
+
### Set your API key [#set-api-key-ai-sdk]
|
|
65
66
|
|
|
66
67
|
Set the corresponding environment variable for your provider. For OpenAI via the AI SDK:
|
|
67
68
|
|
|
@@ -71,7 +72,7 @@ OPENAI_API_KEY=<your-api-key>
|
|
|
71
72
|
|
|
72
73
|
> See the [AI SDK Providers](https://ai-sdk.dev/providers/ai-sdk-providers) in the Vercel AI SDK docs for additional configuration options.
|
|
73
74
|
|
|
74
|
-
###
|
|
75
|
+
### Creating an agent [#creating-an-agent-ai-sdk]
|
|
75
76
|
|
|
76
77
|
To create an agent in Mastra, use the `Agent` class. Every agent must include `instructions` to define its behavior, and a `model` parameter to specify the LLM provider and model. When using the Vercel AI SDK, provide the client to your agent's `model` field:
|
|
77
78
|
|
|
@@ -125,8 +126,8 @@ instructions: {
|
|
|
125
126
|
content:
|
|
126
127
|
"You are an expert code reviewer. Analyze code for bugs, performance issues, and best practices.",
|
|
127
128
|
providerOptions: {
|
|
128
|
-
openai: {
|
|
129
|
-
anthropic: {
|
|
129
|
+
openai: { reasoningEffort: "high" }, // OpenAI's reasoning models
|
|
130
|
+
anthropic: { cacheControl: { type: "ephemeral" } } // Anthropic's prompt caching
|
|
130
131
|
}
|
|
131
132
|
}
|
|
132
133
|
```
|