@mastra/libsql 1.6.0 → 1.6.1-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +11 -0
  2. package/dist/index.cjs +17 -8
  3. package/dist/index.cjs.map +1 -1
  4. package/dist/index.js +17 -8
  5. package/dist/index.js.map +1 -1
  6. package/dist/storage/domains/prompt-blocks/index.d.ts.map +1 -1
  7. package/package.json +4 -4
  8. package/dist/docs/SKILL.md +0 -50
  9. package/dist/docs/assets/SOURCE_MAP.json +0 -6
  10. package/dist/docs/references/docs-agents-agent-approval.md +0 -377
  11. package/dist/docs/references/docs-agents-agent-memory.md +0 -212
  12. package/dist/docs/references/docs-agents-network-approval.md +0 -275
  13. package/dist/docs/references/docs-agents-networks.md +0 -290
  14. package/dist/docs/references/docs-memory-memory-processors.md +0 -316
  15. package/dist/docs/references/docs-memory-message-history.md +0 -260
  16. package/dist/docs/references/docs-memory-overview.md +0 -45
  17. package/dist/docs/references/docs-memory-semantic-recall.md +0 -272
  18. package/dist/docs/references/docs-memory-storage.md +0 -261
  19. package/dist/docs/references/docs-memory-working-memory.md +0 -400
  20. package/dist/docs/references/docs-observability-overview.md +0 -70
  21. package/dist/docs/references/docs-observability-tracing-exporters-default.md +0 -211
  22. package/dist/docs/references/docs-rag-retrieval.md +0 -521
  23. package/dist/docs/references/docs-workflows-snapshots.md +0 -238
  24. package/dist/docs/references/guides-agent-frameworks-ai-sdk.md +0 -140
  25. package/dist/docs/references/reference-core-getMemory.md +0 -50
  26. package/dist/docs/references/reference-core-listMemory.md +0 -56
  27. package/dist/docs/references/reference-core-mastra-class.md +0 -66
  28. package/dist/docs/references/reference-memory-memory-class.md +0 -147
  29. package/dist/docs/references/reference-storage-composite.md +0 -235
  30. package/dist/docs/references/reference-storage-dynamodb.md +0 -282
  31. package/dist/docs/references/reference-storage-libsql.md +0 -135
  32. package/dist/docs/references/reference-vectors-libsql.md +0 -305
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../../src/storage/domains/prompt-blocks/index.ts"],"names":[],"mappings":"AAEA,OAAO,EACL,mBAAmB,EAQpB,MAAM,sBAAsB,CAAC;AAC9B,OAAO,KAAK,EACV,sBAAsB,EACtB,6BAA6B,EAC7B,6BAA6B,EAC7B,4BAA4B,EAC5B,6BAA6B,EAC7B,kBAAkB,EAClB,6BAA6B,EAC7B,4BAA4B,EAC5B,6BAA6B,EAC9B,MAAM,sBAAsB,CAAC;AAE9B,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,UAAU,CAAC;AAGnD,qBAAa,kBAAmB,SAAQ,mBAAmB;;gBAI7C,MAAM,EAAE,kBAAkB;IAOhC,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC;IAUrB,mBAAmB,IAAI,OAAO,CAAC,IAAI,CAAC;IASpC,OAAO,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,sBAAsB,GAAG,IAAI,CAAC;IAqB3D,MAAM,CAAC,KAAK,EAAE;QAAE,WAAW,EAAE,6BAA6B,CAAA;KAAE,GAAG,OAAO,CAAC,sBAAsB,CAAC;IAqD9F,MAAM,CAAC,KAAK,EAAE,6BAA6B,GAAG,OAAO,CAAC,sBAAsB,CAAC;IAqD7E,MAAM,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAoBjC,IAAI,CAAC,IAAI,CAAC,EAAE,4BAA4B,GAAG,OAAO,CAAC,6BAA6B,CAAC;IAwFjF,aAAa,CAAC,KAAK,EAAE,6BAA6B,GAAG,OAAO,CAAC,kBAAkB,CAAC;IAoChF,UAAU,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC;IAqB1D,kBAAkB,CAAC,OAAO,EAAE,MAAM,EAAE,aAAa,EAAE,MAAM,GAAG,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC;IAqB9F,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC;IAqBrE,YAAY,CAAC,KAAK,EAAE,4BAA4B,GAAG,OAAO,CAAC,6BAA6B,CAAC;IAsDzF,aAAa,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAmBxC,wBAAwB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAmBzD,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;CA0EtD"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../../src/storage/domains/prompt-blocks/index.ts"],"names":[],"mappings":"AAEA,OAAO,EACL,mBAAmB,EAQpB,MAAM,sBAAsB,CAAC;AAC9B,OAAO,KAAK,EACV,sBAAsB,EACtB,6BAA6B,EAC7B,6BAA6B,EAC7B,4BAA4B,EAC5B,6BAA6B,EAC7B,kBAAkB,EAClB,6BAA6B,EAC7B,4BAA4B,EAC5B,6BAA6B,EAC9B,MAAM,sBAAsB,CAAC;AAE9B,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,UAAU,CAAC;AAGnD,qBAAa,kBAAmB,SAAQ,mBAAmB;;gBAI7C,MAAM,EAAE,kBAAkB;IAOhC,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC;IAiBrB,mBAAmB,IAAI,OAAO,CAAC,IAAI,CAAC;IASpC,OAAO,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,sBAAsB,GAAG,IAAI,CAAC;IAqB3D,MAAM,CAAC,KAAK,EAAE;QAAE,WAAW,EAAE,6BAA6B,CAAA;KAAE,GAAG,OAAO,CAAC,sBAAsB,CAAC;IAqD9F,MAAM,CAAC,KAAK,EAAE,6BAA6B,GAAG,OAAO,CAAC,sBAAsB,CAAC;IAqD7E,MAAM,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAoBjC,IAAI,CAAC,IAAI,CAAC,EAAE,4BAA4B,GAAG,OAAO,CAAC,6BAA6B,CAAC;IA0FjF,aAAa,CAAC,KAAK,EAAE,6BAA6B,GAAG,OAAO,CAAC,kBAAkB,CAAC;IAqChF,UAAU,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC;IAqB1D,kBAAkB,CAAC,OAAO,EAAE,MAAM,EAAE,aAAa,EAAE,MAAM,GAAG,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC;IAqB9F,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,kBAAkB,GAAG,IAAI,CAAC;IAqBrE,YAAY,CAAC,KAAK,EAAE,4BAA4B,GAAG,OAAO,CAAC,6BAA6B,CAAC;IAsDzF,aAAa,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAmBxC,wBAAwB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAmBzD,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;CA2EtD"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/libsql",
3
- "version": "1.6.0",
3
+ "version": "1.6.1-alpha.0",
4
4
  "description": "Libsql provider for Mastra - includes both vector and db storage capabilities",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -30,10 +30,10 @@
30
30
  "tsup": "^8.5.1",
31
31
  "typescript": "^5.9.3",
32
32
  "vitest": "4.0.18",
33
- "@internal/lint": "0.0.61",
34
- "@internal/types-builder": "0.0.36",
35
33
  "@internal/storage-test-utils": "0.0.57",
36
- "@mastra/core": "1.6.0"
34
+ "@mastra/core": "1.7.0-alpha.0",
35
+ "@internal/lint": "0.0.61",
36
+ "@internal/types-builder": "0.0.36"
37
37
  },
38
38
  "peerDependencies": {
39
39
  "@mastra/core": ">=1.4.0-0 <2.0.0-0"
@@ -1,50 +0,0 @@
1
- ---
2
- name: mastra-libsql
3
- description: Documentation for @mastra/libsql. Use when working with @mastra/libsql APIs, configuration, or implementation.
4
- metadata:
5
- package: "@mastra/libsql"
6
- version: "1.6.0"
7
- ---
8
-
9
- ## When to use
10
-
11
- Use this skill whenever you are working with @mastra/libsql to obtain the domain-specific knowledge.
12
-
13
- ## How to use
14
-
15
- Read the individual reference documents for detailed explanations and code examples.
16
-
17
- ### Docs
18
-
19
- - [Agent Approval](references/docs-agents-agent-approval.md) - Learn how to require approvals, suspend tool execution, and automatically resume suspended tools while keeping humans in control of agent workflows.
20
- - [Agent Memory](references/docs-agents-agent-memory.md) - Learn how to add memory to agents to store message history and maintain context across interactions.
21
- - [Network Approval](references/docs-agents-network-approval.md) - Learn how to require approvals, suspend execution, and resume suspended networks while keeping humans in control of agent network workflows.
22
- - [Agent Networks](references/docs-agents-networks.md) - Learn how to coordinate multiple agents, workflows, and tools using agent networks for complex, non-deterministic task execution.
23
- - [Memory Processors](references/docs-memory-memory-processors.md) - Learn how to use memory processors in Mastra to filter, trim, and transform messages before they're sent to the language model to manage context window limits.
24
- - [Message History](references/docs-memory-message-history.md) - Learn how to configure message history in Mastra to store recent messages from the current conversation.
25
- - [Memory overview](references/docs-memory-overview.md) - Learn how Mastra's memory system works with working memory, message history, semantic recall, and observational memory.
26
- - [Semantic Recall](references/docs-memory-semantic-recall.md) - Learn how to use semantic recall in Mastra to retrieve relevant messages from past conversations using vector search and embeddings.
27
- - [Storage](references/docs-memory-storage.md) - Configure storage for Mastra's memory system to persist conversations, workflows, and traces.
28
- - [Working Memory](references/docs-memory-working-memory.md) - Learn how to configure working memory in Mastra to store persistent user data, preferences.
29
- - [Observability Overview](references/docs-observability-overview.md) - Monitor and debug applications with Mastra's Observability features.
30
- - [Default Exporter](references/docs-observability-tracing-exporters-default.md) - Store traces locally for development and debugging
31
- - [Retrieval, Semantic Search, Reranking](references/docs-rag-retrieval.md) - Guide on retrieval processes in Mastra's RAG systems, including semantic search, filtering, and re-ranking.
32
- - [Snapshots](references/docs-workflows-snapshots.md) - Learn how to save and resume workflow execution state with snapshots in Mastra
33
-
34
- ### Guides
35
-
36
- - [AI SDK](references/guides-agent-frameworks-ai-sdk.md) - Use Mastra processors and memory with the Vercel AI SDK
37
-
38
- ### Reference
39
-
40
- - [Reference: Mastra.getMemory()](references/reference-core-getMemory.md) - Documentation for the `Mastra.getMemory()` method in Mastra, which retrieves a registered memory instance by its registry key.
41
- - [Reference: Mastra.listMemory()](references/reference-core-listMemory.md) - Documentation for the `Mastra.listMemory()` method in Mastra, which returns all registered memory instances.
42
- - [Reference: Mastra Class](references/reference-core-mastra-class.md) - Documentation for the `Mastra` class in Mastra, the core entry point for managing agents, workflows, MCP servers, and server endpoints.
43
- - [Reference: Memory Class](references/reference-memory-memory-class.md) - Documentation for the `Memory` class in Mastra, which provides a robust system for managing conversation history and thread-based message storage.
44
- - [Reference: Composite Storage](references/reference-storage-composite.md) - Documentation for combining multiple storage backends in Mastra.
45
- - [Reference: DynamoDB Storage](references/reference-storage-dynamodb.md) - Documentation for the DynamoDB storage implementation in Mastra, using a single-table design with ElectroDB.
46
- - [Reference: libSQL Storage](references/reference-storage-libsql.md) - Documentation for the libSQL storage implementation in Mastra.
47
- - [Reference: libSQL Vector Store](references/reference-vectors-libsql.md) - Documentation for the LibSQLVector class in Mastra, which provides vector search using libSQL with vector extensions.
48
-
49
-
50
- Read [assets/SOURCE_MAP.json](assets/SOURCE_MAP.json) for source code references.
@@ -1,6 +0,0 @@
1
- {
2
- "version": "1.6.0",
3
- "package": "@mastra/libsql",
4
- "exports": {},
5
- "modules": {}
6
- }
@@ -1,377 +0,0 @@
1
- # Agent Approval
2
-
3
- Agents sometimes require the same [human-in-the-loop](https://mastra.ai/docs/workflows/human-in-the-loop) oversight used in workflows when calling tools that handle sensitive operations, like deleting resources or performing running long processes. With agent approval you can suspend a tool call and provide feedback to the user, or approve or decline a tool call based on targeted application conditions.
4
-
5
- ## Tool call approval
6
-
7
- Tool call approval can be enabled at the agent level and apply to every tool the agent uses, or at the tool level providing more granular control over individual tool calls.
8
-
9
- ### Storage
10
-
11
- Agent approval uses a snapshot to capture the state of the request. Ensure you've enabled a storage provider in your main Mastra instance. If storage isn't enabled you'll see an error relating to snapshot not found.
12
-
13
- ```typescript
14
- import { Mastra } from "@mastra/core/mastra";
15
- import { LibSQLStore } from "@mastra/libsql";
16
-
17
- export const mastra = new Mastra({
18
- storage: new LibSQLStore({
19
- id: "mastra-storage",
20
- url: ":memory:"
21
- })
22
- });
23
- ```
24
-
25
- ## Agent-level approval
26
-
27
- When calling an agent using `.stream()` set `requireToolApproval` to `true` which will prevent the agent from calling any of the tools defined in its configuration.
28
-
29
- ```typescript
30
- const stream = await agent.stream("What's the weather in London?", {
31
- requireToolApproval: true
32
- });
33
- ```
34
-
35
- ### Approving tool calls
36
-
37
- To approve a tool call, access `approveToolCall` from the `agent`, passing in the `runId` of the stream. This will let the agent know its now OK to call its tools.
38
-
39
- ```typescript
40
- const handleApproval = async () => {
41
- const approvedStream = await agent.approveToolCall({ runId: stream.runId });
42
-
43
- for await (const chunk of approvedStream.textStream) {
44
- process.stdout.write(chunk);
45
- }
46
- process.stdout.write("\n");
47
- };
48
- ```
49
-
50
- ### Declining tool calls
51
-
52
- To decline a tool call, access the `declineToolCall` from the `agent`. You will see the streamed response from the agent, but it won't call its tools.
53
-
54
- ```typescript
55
- const handleDecline = async () => {
56
- const declinedStream = await agent.declineToolCall({ runId: stream.runId });
57
-
58
- for await (const chunk of declinedStream.textStream) {
59
- process.stdout.write(chunk);
60
- }
61
- process.stdout.write("\n");
62
- };
63
- ```
64
-
65
- ## Tool approval with generate()
66
-
67
- Tool approval also works with the `generate()` method for non-streaming use cases. When using `generate()` with `requireToolApproval: true`, the method returns immediately when a tool requires approval instead of executing it.
68
-
69
- ### How it works
70
-
71
- When a tool requires approval during a `generate()` call, the response includes:
72
-
73
- - `finishReason: 'suspended'` - indicates the agent is waiting for approval
74
- - `suspendPayload` - contains tool call details (`toolCallId`, `toolName`, `args`)
75
- - `runId` - needed to approve or decline the tool call
76
-
77
- ### Approving tool calls
78
-
79
- To approve a tool call with `generate()`, use the `approveToolCallGenerate` method:
80
-
81
- ```typescript
82
- const output = await agent.generate("Find user John", {
83
- requireToolApproval: true,
84
- });
85
-
86
- if (output.finishReason === "suspended") {
87
- console.log("Tool requires approval:", output.suspendPayload.toolName);
88
- console.log("Arguments:", output.suspendPayload.args);
89
-
90
- // Approve the tool call and get the final result
91
- const result = await agent.approveToolCallGenerate({
92
- runId: output.runId,
93
- toolCallId: output.suspendPayload.toolCallId,
94
- });
95
-
96
- console.log("Final result:", result.text);
97
- }
98
- ```
99
-
100
- ### Declining tool calls
101
-
102
- To decline a tool call, use the `declineToolCallGenerate` method:
103
-
104
- ```typescript
105
- if (output.finishReason === "suspended") {
106
- const result = await agent.declineToolCallGenerate({
107
- runId: output.runId,
108
- toolCallId: output.suspendPayload.toolCallId,
109
- });
110
-
111
- // Agent will respond acknowledging the declined tool
112
- console.log(result.text);
113
- }
114
- ```
115
-
116
- ### Stream vs Generate comparison
117
-
118
- | Aspect | `stream()` | `generate()` |
119
- | ------------------ | ---------------------------- | ------------------------------------------------ |
120
- | Response type | Streaming chunks | Complete response |
121
- | Approval detection | `tool-call-approval` chunk | `finishReason: 'suspended'` |
122
- | Approve method | `approveToolCall({ runId })` | `approveToolCallGenerate({ runId, toolCallId })` |
123
- | Decline method | `declineToolCall({ runId })` | `declineToolCallGenerate({ runId, toolCallId })` |
124
- | Result | Stream to iterate | Full output object |
125
-
126
- ## Tool-level approval
127
-
128
- There are two types of tool call approval. The first uses `requireApproval`, which is a property on the tool definition, while `requireToolApproval` is a parameter passed to `agent.stream()`. The second uses `suspend` and lets the agent provide context or confirmation prompts so the user can decide whether the tool call should continue.
129
-
130
- ### Tool approval using `requireToolApproval`
131
-
132
- In this approach, `requireApproval` is configured on the tool definition (shown below) rather than on the agent.
133
-
134
- ```typescript
135
- export const testTool = createTool({
136
- id: "test-tool",
137
- description: "Fetches weather for a location",
138
- inputSchema: z.object({
139
- location: z.string()
140
- }),
141
- outputSchema: z.object({
142
- weather: z.string()
143
- }),
144
- resumeSchema: z.object({
145
- approved: z.boolean()
146
- }),
147
- execute: async (inputData) => {
148
- const response = await fetch(`https://wttr.in/${inputData.location}?format=3`);
149
- const weather = await response.text();
150
-
151
- return { weather };
152
- },
153
- requireApproval: true
154
- });
155
- ```
156
-
157
- When `requireApproval` is true for a tool, the stream will include chunks of type `tool-call-approval` to indicate that the call is paused. To continue the call, invoke `resumeStream` with the required `resumeSchema` and the `runId`.
158
-
159
- ```typescript
160
- const stream = await agent.stream("What's the weather in London?");
161
-
162
- for await (const chunk of stream.fullStream) {
163
- if (chunk.type === "tool-call-approval") {
164
- console.log("Approval required.");
165
- }
166
- }
167
-
168
- const handleResume = async () => {
169
- const resumedStream = await agent.resumeStream({ approved: true }, { runId: stream.runId });
170
-
171
- for await (const chunk of resumedStream.textStream) {
172
- process.stdout.write(chunk);
173
- }
174
- process.stdout.write("\n");
175
- };
176
- ```
177
-
178
- ### Tool approval using `suspend`
179
-
180
- With this approach, neither the agent nor the tool uses `requireApproval`. Instead, the tool implementation calls `suspend` to pause execution and return context or confirmation prompts to the user.
181
-
182
- ```typescript
183
- export const testToolB = createTool({
184
- id: "test-tool-b",
185
- description: "Fetches weather for a location",
186
- inputSchema: z.object({
187
- location: z.string()
188
- }),
189
- outputSchema: z.object({
190
- weather: z.string()
191
- }),
192
- resumeSchema: z.object({
193
- approved: z.boolean()
194
- }),
195
- suspendSchema: z.object({
196
- reason: z.string()
197
- }),
198
- execute: async (inputData, context) => {
199
- const { resumeData: { approved } = {}, suspend } = context?.agent ?? {};
200
-
201
- if (!approved) {
202
- return suspend?.({ reason: "Approval required." });
203
- }
204
-
205
- const response = await fetch(`https://wttr.in/${inputData.location}?format=3`);
206
- const weather = await response.text();
207
-
208
- return { weather };
209
- }
210
- });
211
- ```
212
-
213
- With this approach the stream will include a `tool-call-suspended` chunk, and the `suspendPayload` will contain the `reason` defined by the tool's `suspendSchema`. To continue the call, invoke `resumeStream` with the required `resumeSchema` and the `runId`.
214
-
215
- ```typescript
216
- const stream = await agent.stream("What's the weather in London?");
217
-
218
- for await (const chunk of stream.fullStream) {
219
- if (chunk.type === "tool-call-suspended") {
220
- console.log(chunk.payload.suspendPayload);
221
- }
222
- }
223
-
224
- const handleResume = async () => {
225
- const resumedStream = await agent.resumeStream({ approved: true }, { runId: stream.runId });
226
-
227
- for await (const chunk of resumedStream.textStream) {
228
- process.stdout.write(chunk);
229
- }
230
- process.stdout.write("\n");
231
- };
232
- ```
233
-
234
- ## Automatic tool resumption
235
-
236
- When using tools that call `suspend()`, you can enable automatic resumption so the agent resumes suspended tools based on the user's next message. This creates a conversational flow where users provide the required information naturally, without your application needing to call `resumeStream()` explicitly.
237
-
238
- ### Enabling auto-resume
239
-
240
- Set `autoResumeSuspendedTools` to `true` in the agent's default options or when calling `stream()`:
241
-
242
- ```typescript
243
- import { Agent } from "@mastra/core/agent";
244
- import { Memory } from "@mastra/memory";
245
-
246
- // Option 1: In agent configuration
247
- const agent = new Agent({
248
- id: "my-agent",
249
- name: "My Agent",
250
- instructions: "You are a helpful assistant",
251
- model: "openai/gpt-4o-mini",
252
- tools: { weatherTool },
253
- memory: new Memory(),
254
- defaultOptions: {
255
- autoResumeSuspendedTools: true,
256
- },
257
- });
258
-
259
- // Option 2: Per-request
260
- const stream = await agent.stream("What's the weather?", {
261
- autoResumeSuspendedTools: true,
262
- });
263
- ```
264
-
265
- ### How it works
266
-
267
- When `autoResumeSuspendedTools` is enabled:
268
-
269
- 1. A tool suspends execution by calling `suspend()` with a payload (e.g., requesting more information)
270
-
271
- 2. The suspension is persisted to memory along with the conversation
272
-
273
- 3. When the user sends their next message on the same thread, the agent:
274
-
275
- - Detects the suspended tool from message history
276
- - Extracts `resumeData` from the user's message based on the tool's `resumeSchema`
277
- - Automatically resumes the tool with the extracted data
278
-
279
- ### Example
280
-
281
- ```typescript
282
- import { createTool } from "@mastra/core/tools";
283
- import { z } from "zod";
284
-
285
- export const weatherTool = createTool({
286
- id: "weather-info",
287
- description: "Fetches weather information for a city",
288
- suspendSchema: z.object({
289
- message: z.string(),
290
- }),
291
- resumeSchema: z.object({
292
- city: z.string(),
293
- }),
294
- execute: async (_inputData, context) => {
295
- // Check if this is a resume with data
296
- if (!context?.agent?.resumeData) {
297
- // First call - suspend and ask for the city
298
- return context?.agent?.suspend({
299
- message: "What city do you want to know the weather for?",
300
- });
301
- }
302
-
303
- // Resume call - city was extracted from user's message
304
- const { city } = context.agent.resumeData;
305
- const response = await fetch(`https://wttr.in/${city}?format=3`);
306
- const weather = await response.text();
307
-
308
- return { city, weather };
309
- },
310
- });
311
-
312
- const agent = new Agent({
313
- id: "my-agent",
314
- name: "My Agent",
315
- instructions: "You are a helpful assistant",
316
- model: "openai/gpt-4o-mini",
317
- tools: { weatherTool },
318
- memory: new Memory(),
319
- defaultOptions: {
320
- autoResumeSuspendedTools: true,
321
- },
322
- });
323
-
324
- const stream = await agent.stream("What's the weather like?");
325
-
326
- for await (const chunk of stream.fullStream) {
327
- if (chunk.type === "tool-call-suspended") {
328
- console.log(chunk.payload.suspendPayload);
329
- }
330
- }
331
-
332
- const handleResume = async () => {
333
- const resumedStream = await agent.stream("San Francisco");
334
-
335
- for await (const chunk of resumedStream.textStream) {
336
- process.stdout.write(chunk);
337
- }
338
- process.stdout.write("\n");
339
- };
340
- ```
341
-
342
- **Conversation flow:**
343
-
344
- ```text
345
- User: "What's the weather like?"
346
- Agent: "What city do you want to know the weather for?"
347
-
348
- User: "San Francisco"
349
- Agent: "The weather in San Francisco is: San Francisco: ☀️ +72°F"
350
- ```
351
-
352
- The second message automatically resumes the suspended tool - the agent extracts `{ city: "San Francisco" }` from the user's message and passes it as `resumeData`.
353
-
354
- ### Requirements
355
-
356
- For automatic tool resumption to work:
357
-
358
- - **Memory configured**: The agent needs memory to track suspended tools across messages
359
- - **Same thread**: The follow-up message must use the same memory thread and resource identifiers
360
- - **`resumeSchema` defined**: The tool must define a `resumeSchema` so the agent knows what data structure to extract from the user's message
361
-
362
- ### Manual vs automatic resumption
363
-
364
- | Approach | Use case |
365
- | -------------------------------------- | ------------------------------------------------------------------------ |
366
- | Manual (`resumeStream()`) | Programmatic control, webhooks, button clicks, external triggers |
367
- | Automatic (`autoResumeSuspendedTools`) | Conversational flows where users provide resume data in natural language |
368
-
369
- Both approaches work with the same tool definitions. Automatic resumption triggers only when suspended tools exist in the message history and the user sends a new message on the same thread.
370
-
371
- ## Related
372
-
373
- - [Using Tools](https://mastra.ai/docs/agents/using-tools)
374
- - [Agent Overview](https://mastra.ai/docs/agents/overview)
375
- - [Tools Overview](https://mastra.ai/docs/mcp/overview)
376
- - [Agent Memory](https://mastra.ai/docs/agents/agent-memory)
377
- - [Request Context](https://mastra.ai/docs/server/request-context)
@@ -1,212 +0,0 @@
1
- # Agent memory
2
-
3
- Agents use memory to maintain context across interactions. LLMs are stateless and don't retain information between calls, so agents need memory to track message history and recall relevant information.
4
-
5
- Mastra agents can be configured to store message history, with optional [working memory](https://mastra.ai/docs/memory/working-memory) to maintain recent context, [semantic recall](https://mastra.ai/docs/memory/semantic-recall) to retrieve past messages based on meaning, or [observational memory](https://mastra.ai/docs/memory/observational-memory) for automatic long-term memory that compresses conversations as they grow.
6
-
7
- ## When to use memory
8
-
9
- Use memory when your agent needs to maintain multi-turn conversations that reference prior exchanges, recall user preferences or facts from earlier in a session, or build context over time within a conversation thread. Skip memory for single-turn requests where each interaction is independent.
10
-
11
- ## Setting up memory
12
-
13
- To enable memory in Mastra, install the `@mastra/memory` package along with a storage provider.
14
-
15
- **npm**:
16
-
17
- ```bash
18
- npm install @mastra/memory@latest @mastra/libsql@latest
19
- ```
20
-
21
- **pnpm**:
22
-
23
- ```bash
24
- pnpm add @mastra/memory@latest @mastra/libsql@latest
25
- ```
26
-
27
- **Yarn**:
28
-
29
- ```bash
30
- yarn add @mastra/memory@latest @mastra/libsql@latest
31
- ```
32
-
33
- **Bun**:
34
-
35
- ```bash
36
- bun add @mastra/memory@latest @mastra/libsql@latest
37
- ```
38
-
39
- ## Storage providers
40
-
41
- Memory requires a storage provider to persist message history, including user messages and agent responses. For more details on available providers and how storage works in Mastra, see the [Storage](https://mastra.ai/docs/memory/storage) documentation.
42
-
43
- ## Configuring memory
44
-
45
- 1. Enable memory by creating a `Memory` instance and passing it to the agent’s `memory` option.
46
-
47
- ```typescript
48
- import { Agent } from "@mastra/core/agent";
49
- import { Memory } from "@mastra/memory";
50
-
51
- export const memoryAgent = new Agent({
52
- id: 'memory-agent',
53
- name: 'Memory Agent',
54
- memory: new Memory({
55
- options: {
56
- lastMessages: 20,
57
- },
58
- }),
59
- });
60
- ```
61
-
62
- > **Info:** Visit [Memory Class](https://mastra.ai/reference/memory/memory-class) for a full list of configuration options.
63
-
64
- 2. Add a storage provider to your main Mastra instance to enable memory across all configured agents.
65
-
66
- ```typescript
67
- import { Mastra } from "@mastra/core";
68
- import { LibSQLStore } from "@mastra/libsql";
69
-
70
- export const mastra = new Mastra({
71
- storage: new LibSQLStore({
72
- id: 'mastra-storage',
73
- url: ":memory:",
74
- }),
75
- });
76
- ```
77
-
78
- > **Info:** Visit [libSQL Storage](https://mastra.ai/reference/storage/libsql) for a full list of configuration options.
79
-
80
- Alternatively, add storage directly to an agent’s memory to keep data separate or use different providers per agent.
81
-
82
- ```typescript
83
- import { Agent } from "@mastra/core/agent";
84
- import { Memory } from "@mastra/memory";
85
- import { LibSQLStore } from "@mastra/libsql";
86
-
87
- export const memoryAgent = new Agent({
88
- id: 'memory-agent',
89
- name: 'Memory Agent',
90
- memory: new Memory({
91
- storage: new LibSQLStore({
92
- id: 'mastra-storage',
93
- url: ":memory:",
94
- }),
95
- }),
96
- });
97
- ```
98
-
99
- > **Mastra Cloud Store limitation:** Agent-level storage is not supported when using [Mastra Cloud Store](https://mastra.ai/docs/mastra-cloud/deployment). If you use Mastra Cloud Store, configure storage on the Mastra instance instead. This limitation does not apply if you bring your own database.
100
-
101
- ## Message history
102
-
103
- Include a `memory` object with both `resource` and `thread` to track message history during agent calls.
104
-
105
- - `resource`: A stable identifier for the user or entity.
106
- - `thread`: An ID that isolates a specific conversation or session.
107
-
108
- These fields tell the agent where to store and retrieve context, enabling persistent, thread-aware memory across a conversation.
109
-
110
- ```typescript
111
- const response = await memoryAgent.generate(
112
- "Remember my favorite color is blue.",
113
- {
114
- memory: {
115
- resource: "user-123",
116
- thread: "conversation-123",
117
- },
118
- },
119
- );
120
- ```
121
-
122
- To recall information stored in memory, call the agent with the same `resource` and `thread` values used in the original conversation.
123
-
124
- ```typescript
125
- const response = await memoryAgent.generate("What's my favorite color?", {
126
- memory: {
127
- resource: "user-123",
128
- thread: "conversation-123",
129
- },
130
- });
131
- ```
132
-
133
- > **Warning:** Each thread has an owner (`resourceId`) that cannot be changed after creation. Avoid reusing the same thread ID for threads with different owners, as this will cause errors when querying.
134
-
135
- To learn more about memory see the [Memory](https://mastra.ai/docs/memory/overview) documentation.
136
-
137
- ## Observational Memory
138
-
139
- For long-running conversations, raw message history grows until it fills the context window, degrading agent performance. [Observational Memory](https://mastra.ai/docs/memory/observational-memory) solves this by running background agents that compress old messages into dense observations, keeping the context window small while preserving long-term memory.
140
-
141
- ```typescript
142
- import { Agent } from "@mastra/core/agent";
143
- import { Memory } from "@mastra/memory";
144
-
145
- export const memoryAgent = new Agent({
146
- id: 'memory-agent',
147
- name: 'Memory Agent',
148
- memory: new Memory({
149
- options: {
150
- observationalMemory: true,
151
- },
152
- }),
153
- });
154
- ```
155
-
156
- Setting `observationalMemory: true` uses `google/gemini-2.5-flash` as the default model for the Observer and Reflector. To use a different model or customize thresholds, pass a config object:
157
-
158
- ```typescript
159
- import { Agent } from "@mastra/core/agent";
160
- import { Memory } from "@mastra/memory";
161
-
162
- export const memoryAgent = new Agent({
163
- id: 'memory-agent',
164
- name: 'Memory Agent',
165
- memory: new Memory({
166
- options: {
167
- observationalMemory: {
168
- model: "deepseek/deepseek-reasoner",
169
- observation: {
170
- messageTokens: 20_000,
171
- },
172
- },
173
- },
174
- }),
175
- });
176
- ```
177
-
178
- > **Info:** See [Observational Memory](https://mastra.ai/docs/memory/observational-memory) for details on how observations and reflections work, and [the reference](https://mastra.ai/reference/memory/observational-memory) for all configuration options.
179
-
180
- ## Using `RequestContext`
181
-
182
- Use [RequestContext](https://mastra.ai/docs/server/request-context) to access request-specific values. This lets you conditionally select different memory or storage configurations based on the context of the request.
183
-
184
- ```typescript
185
- export type UserTier = {
186
- "user-tier": "enterprise" | "pro";
187
- };
188
-
189
- const premiumMemory = new Memory();
190
-
191
- const standardMemory = new Memory();
192
-
193
- export const memoryAgent = new Agent({
194
- id: 'memory-agent',
195
- name: 'Memory Agent',
196
- memory: ({ requestContext }) => {
197
- const userTier = requestContext.get("user-tier") as UserTier["user-tier"];
198
-
199
- return userTier === "enterprise" ? premiumMemory : standardMemory;
200
- },
201
- });
202
- ```
203
-
204
- > **Info:** Visit [Request Context](https://mastra.ai/docs/server/request-context) for more information.
205
-
206
- ## Related
207
-
208
- - [Observational Memory](https://mastra.ai/docs/memory/observational-memory)
209
- - [Working Memory](https://mastra.ai/docs/memory/working-memory)
210
- - [Semantic Recall](https://mastra.ai/docs/memory/semantic-recall)
211
- - [Storage](https://mastra.ai/docs/memory/storage)
212
- - [Request Context](https://mastra.ai/docs/server/request-context)