@mastra/memory 1.0.0-beta.1 → 1.0.0-beta.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +355 -0
- package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
- package/dist/chunk-DGUM43GV.js +10 -0
- package/dist/chunk-DGUM43GV.js.map +1 -0
- package/dist/chunk-JEQ2X3Z6.cjs +12 -0
- package/dist/chunk-JEQ2X3Z6.cjs.map +1 -0
- package/dist/chunk-KMQS2YEC.js +79 -0
- package/dist/chunk-KMQS2YEC.js.map +1 -0
- package/dist/chunk-MMUHFOCG.js +79 -0
- package/dist/chunk-MMUHFOCG.js.map +1 -0
- package/dist/chunk-QY6BZOPJ.js +250 -0
- package/dist/chunk-QY6BZOPJ.js.map +1 -0
- package/dist/chunk-SG3GRV3O.cjs +84 -0
- package/dist/chunk-SG3GRV3O.cjs.map +1 -0
- package/dist/chunk-W72AYUIF.cjs +252 -0
- package/dist/chunk-W72AYUIF.cjs.map +1 -0
- package/dist/chunk-WC4XBMZT.js +250 -0
- package/dist/chunk-WC4XBMZT.js.map +1 -0
- package/dist/chunk-YMNW6DEN.cjs +252 -0
- package/dist/chunk-YMNW6DEN.cjs.map +1 -0
- package/dist/chunk-ZUQPUTTO.cjs +84 -0
- package/dist/chunk-ZUQPUTTO.cjs.map +1 -0
- package/dist/docs/README.md +36 -0
- package/dist/docs/SKILL.md +42 -0
- package/dist/docs/SOURCE_MAP.json +31 -0
- package/dist/docs/agents/01-agent-memory.md +160 -0
- package/dist/docs/agents/02-networks.md +236 -0
- package/dist/docs/agents/03-agent-approval.md +317 -0
- package/dist/docs/core/01-reference.md +114 -0
- package/dist/docs/memory/01-overview.md +76 -0
- package/dist/docs/memory/02-storage.md +181 -0
- package/dist/docs/memory/03-working-memory.md +386 -0
- package/dist/docs/memory/04-semantic-recall.md +235 -0
- package/dist/docs/memory/05-memory-processors.md +319 -0
- package/dist/docs/memory/06-reference.md +617 -0
- package/dist/docs/processors/01-reference.md +81 -0
- package/dist/docs/storage/01-reference.md +972 -0
- package/dist/docs/vectors/01-reference.md +929 -0
- package/dist/index.cjs +14845 -115
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +145 -5
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +14807 -119
- package/dist/index.js.map +1 -1
- package/dist/token-6GSAFR2W-JV3TZR4M.cjs +63 -0
- package/dist/token-6GSAFR2W-JV3TZR4M.cjs.map +1 -0
- package/dist/token-6GSAFR2W-K2BTU23I.js +61 -0
- package/dist/token-6GSAFR2W-K2BTU23I.js.map +1 -0
- package/dist/token-6GSAFR2W-VLY2XUPA.js +61 -0
- package/dist/token-6GSAFR2W-VLY2XUPA.js.map +1 -0
- package/dist/token-6GSAFR2W-YCB5SK2Z.cjs +63 -0
- package/dist/token-6GSAFR2W-YCB5SK2Z.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs +10 -0
- package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs +10 -0
- package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-KSXDO2NO.js +8 -0
- package/dist/token-util-NEHG7TUY-KSXDO2NO.js.map +1 -0
- package/dist/token-util-NEHG7TUY-TIJ3LMSH.js +8 -0
- package/dist/token-util-NEHG7TUY-TIJ3LMSH.js.map +1 -0
- package/dist/tools/working-memory.d.ts +10 -2
- package/dist/tools/working-memory.d.ts.map +1 -1
- package/package.json +19 -25
- package/dist/processors/index.cjs +0 -165
- package/dist/processors/index.cjs.map +0 -1
- package/dist/processors/index.d.ts +0 -3
- package/dist/processors/index.d.ts.map +0 -1
- package/dist/processors/index.js +0 -158
- package/dist/processors/index.js.map +0 -1
- package/dist/processors/token-limiter.d.ts +0 -32
- package/dist/processors/token-limiter.d.ts.map +0 -1
- package/dist/processors/tool-call-filter.d.ts +0 -20
- package/dist/processors/tool-call-filter.d.ts.map +0 -1
|
@@ -0,0 +1,972 @@
|
|
|
1
|
+
# Storage API Reference
|
|
2
|
+
|
|
3
|
+
> API reference for storage - 5 entries
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Reference: DynamoDB Storage
|
|
9
|
+
|
|
10
|
+
> Documentation for the DynamoDB storage implementation in Mastra, using a single-table design with ElectroDB.
|
|
11
|
+
|
|
12
|
+
The DynamoDB storage implementation provides a scalable and performant NoSQL database solution for Mastra, leveraging a single-table design pattern with [ElectroDB](https://electrodb.dev/).
|
|
13
|
+
|
|
14
|
+
## Features
|
|
15
|
+
|
|
16
|
+
- Efficient single-table design for all Mastra storage needs
|
|
17
|
+
- Based on ElectroDB for type-safe DynamoDB access
|
|
18
|
+
- Support for AWS credentials, regions, and endpoints
|
|
19
|
+
- Compatible with AWS DynamoDB Local for development
|
|
20
|
+
- Stores Thread, Message, Trace, Eval, and Workflow data
|
|
21
|
+
- Optimized for serverless environments
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
npm install @mastra/dynamodb@beta
|
|
27
|
+
# or
|
|
28
|
+
pnpm add @mastra/dynamodb@beta
|
|
29
|
+
# or
|
|
30
|
+
yarn add @mastra/dynamodb@beta
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Prerequisites
|
|
34
|
+
|
|
35
|
+
Before using this package, you **must** create a DynamoDB table with a specific structure, including primary keys and Global Secondary Indexes (GSIs). This adapter expects the DynamoDB table and its GSIs to be provisioned externally.
|
|
36
|
+
|
|
37
|
+
Detailed instructions for setting up the table using AWS CloudFormation or AWS CDK are available in [TABLE_SETUP.md](https://github.com/mastra-ai/mastra/blob/main/stores/dynamodb/TABLE_SETUP.md). Please ensure your table is configured according to those instructions before proceeding.
|
|
38
|
+
|
|
39
|
+
## Usage
|
|
40
|
+
|
|
41
|
+
### Basic Usage
|
|
42
|
+
|
|
43
|
+
```typescript
|
|
44
|
+
import { Memory } from "@mastra/memory";
|
|
45
|
+
import { DynamoDBStore } from "@mastra/dynamodb";
|
|
46
|
+
|
|
47
|
+
// Initialize the DynamoDB storage
|
|
48
|
+
const storage = new DynamoDBStore({
|
|
49
|
+
name: "dynamodb", // A name for this storage instance
|
|
50
|
+
config: {
|
|
51
|
+
tableName: "mastra-single-table", // Name of your DynamoDB table
|
|
52
|
+
region: "us-east-1", // Optional: AWS region, defaults to 'us-east-1'
|
|
53
|
+
// endpoint: "http://localhost:8000", // Optional: For local DynamoDB
|
|
54
|
+
// credentials: { accessKeyId: "YOUR_ACCESS_KEY", secretAccessKey: "YOUR_SECRET_KEY" } // Optional
|
|
55
|
+
},
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
// Example: Initialize Memory with DynamoDB storage
|
|
59
|
+
const memory = new Memory({
|
|
60
|
+
storage,
|
|
61
|
+
options: {
|
|
62
|
+
lastMessages: 10,
|
|
63
|
+
},
|
|
64
|
+
});
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Local Development with DynamoDB Local
|
|
68
|
+
|
|
69
|
+
For local development, you can use [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html).
|
|
70
|
+
|
|
71
|
+
1. **Run DynamoDB Local (e.g., using Docker):**
|
|
72
|
+
|
|
73
|
+
```bash
|
|
74
|
+
docker run -p 8000:8000 amazon/dynamodb-local
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
2. **Configure `DynamoDBStore` to use the local endpoint:**
|
|
78
|
+
|
|
79
|
+
```typescript
|
|
80
|
+
import { DynamoDBStore } from "@mastra/dynamodb";
|
|
81
|
+
|
|
82
|
+
const storage = new DynamoDBStore({
|
|
83
|
+
name: "dynamodb-local",
|
|
84
|
+
config: {
|
|
85
|
+
tableName: "mastra-single-table", // Ensure this table is created in your local DynamoDB
|
|
86
|
+
region: "localhost", // Can be any string for local, 'localhost' is common
|
|
87
|
+
endpoint: "http://localhost:8000",
|
|
88
|
+
// For DynamoDB Local, credentials are not typically required unless configured.
|
|
89
|
+
// If you've configured local credentials:
|
|
90
|
+
// credentials: { accessKeyId: "fakeMyKeyId", secretAccessKey: "fakeSecretAccessKey" }
|
|
91
|
+
},
|
|
92
|
+
});
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
You will still need to create the table and GSIs in your local DynamoDB instance, for example, using the AWS CLI pointed to your local endpoint.
|
|
96
|
+
|
|
97
|
+
## Parameters
|
|
98
|
+
|
|
99
|
+
## AWS IAM Permissions
|
|
100
|
+
|
|
101
|
+
The IAM role or user executing the code needs appropriate permissions to interact with the specified DynamoDB table and its indexes. Below is a sample policy. Replace `${YOUR_TABLE_NAME}` with your actual table name and `${YOUR_AWS_REGION}` and `${YOUR_AWS_ACCOUNT_ID}` with appropriate values.
|
|
102
|
+
|
|
103
|
+
```json
|
|
104
|
+
{
|
|
105
|
+
"Version": "2012-10-17",
|
|
106
|
+
"Statement": [
|
|
107
|
+
{
|
|
108
|
+
"Effect": "Allow",
|
|
109
|
+
"Action": [
|
|
110
|
+
"dynamodb:DescribeTable",
|
|
111
|
+
"dynamodb:GetItem",
|
|
112
|
+
"dynamodb:PutItem",
|
|
113
|
+
"dynamodb:UpdateItem",
|
|
114
|
+
"dynamodb:DeleteItem",
|
|
115
|
+
"dynamodb:Query",
|
|
116
|
+
"dynamodb:Scan",
|
|
117
|
+
"dynamodb:BatchGetItem",
|
|
118
|
+
"dynamodb:BatchWriteItem"
|
|
119
|
+
],
|
|
120
|
+
"Resource": [
|
|
121
|
+
"arn:aws:dynamodb:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:table/${YOUR_TABLE_NAME}",
|
|
122
|
+
"arn:aws:dynamodb:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:table/${YOUR_TABLE_NAME}/index/*"
|
|
123
|
+
]
|
|
124
|
+
}
|
|
125
|
+
]
|
|
126
|
+
}
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
## Key Considerations
|
|
130
|
+
|
|
131
|
+
Before diving into the architectural details, keep these key points in mind when working with the DynamoDB storage adapter:
|
|
132
|
+
|
|
133
|
+
- **External Table Provisioning:** This adapter _requires_ you to create and configure the DynamoDB table and its Global Secondary Indexes (GSIs) yourself, prior to using the adapter. Follow the guide in [TABLE_SETUP.md](https://github.com/mastra-ai/mastra/blob/main/stores/dynamodb/TABLE_SETUP.md).
|
|
134
|
+
- **Single-Table Design:** All Mastra data (threads, messages, etc.) is stored in one DynamoDB table. This is a deliberate design choice optimized for DynamoDB, differing from relational database approaches.
|
|
135
|
+
- **Understanding GSIs:** Familiarity with how the GSIs are structured (as per `TABLE_SETUP.md`) is important for understanding data retrieval and potential query patterns.
|
|
136
|
+
- **ElectroDB:** The adapter uses ElectroDB to manage interactions with DynamoDB, providing a layer of abstraction and type safety over raw DynamoDB operations.
|
|
137
|
+
|
|
138
|
+
## Architectural Approach
|
|
139
|
+
|
|
140
|
+
This storage adapter utilizes a **single-table design pattern** leveraging [ElectroDB](https://electrodb.dev/), a common and recommended approach for DynamoDB. This differs architecturally from relational database adapters (like `@mastra/pg` or `@mastra/libsql`) that typically use multiple tables, each dedicated to a specific entity (threads, messages, etc.).
|
|
141
|
+
|
|
142
|
+
Key aspects of this approach:
|
|
143
|
+
|
|
144
|
+
- **DynamoDB Native:** The single-table design is optimized for DynamoDB's key-value and query capabilities, often leading to better performance and scalability compared to mimicking relational models.
|
|
145
|
+
- **External Table Management:** Unlike some adapters that might offer helper functions to create tables via code, this adapter **expects the DynamoDB table and its associated Global Secondary Indexes (GSIs) to be provisioned externally** before use. Please refer to [TABLE_SETUP.md](https://github.com/mastra-ai/mastra/blob/main/stores/dynamodb/TABLE_SETUP.md) for detailed instructions using tools like AWS CloudFormation or CDK. The adapter focuses solely on interacting with the pre-existing table structure.
|
|
146
|
+
- **Consistency via Interface:** While the underlying storage model differs, this adapter adheres to the same `MastraStorage` interface as other adapters, ensuring it can be used interchangeably within the Mastra `Memory` component.
|
|
147
|
+
|
|
148
|
+
### Mastra Data in the Single Table
|
|
149
|
+
|
|
150
|
+
Within the single DynamoDB table, different Mastra data entities (such as Threads, Messages, Traces, Evals, and Workflows) are managed and distinguished using ElectroDB. ElectroDB defines specific models for each entity type, which include unique key structures and attributes. This allows the adapter to store and retrieve diverse data types efficiently within the same table.
|
|
151
|
+
|
|
152
|
+
For example, a `Thread` item might have a primary key like `THREAD#<threadId>`, while a `Message` item belonging to that thread might use `THREAD#<threadId>` as a partition key and `MESSAGE#<messageId>` as a sort key. The Global Secondary Indexes (GSIs), detailed in `TABLE_SETUP.md`, are strategically designed to support common access patterns across these different entities, such as fetching all messages for a thread or querying traces associated with a particular workflow.
|
|
153
|
+
|
|
154
|
+
### Advantages of Single-Table Design
|
|
155
|
+
|
|
156
|
+
This implementation uses a single-table design pattern with ElectroDB, which offers several advantages within the context of DynamoDB:
|
|
157
|
+
|
|
158
|
+
1. **Lower cost (potentially):** Fewer tables can simplify Read/Write Capacity Unit (RCU/WCU) provisioning and management, especially with on-demand capacity.
|
|
159
|
+
2. **Better performance:** Related data can be co-located or accessed efficiently through GSIs, enabling fast lookups for common access patterns.
|
|
160
|
+
3. **Simplified administration:** Fewer distinct tables to monitor, back up, and manage.
|
|
161
|
+
4. **Reduced complexity in access patterns:** ElectroDB helps manage the complexity of item types and access patterns on a single table.
|
|
162
|
+
5. **Transaction support:** DynamoDB transactions can be used across different "entity" types stored within the same table if needed.
|
|
163
|
+
|
|
164
|
+
---
|
|
165
|
+
|
|
166
|
+
## Reference: libSQL Storage
|
|
167
|
+
|
|
168
|
+
> Documentation for the libSQL storage implementation in Mastra.
|
|
169
|
+
|
|
170
|
+
[libSQL](https://docs.turso.tech/libsql) is an open-source, SQLite-compatible database that supports both local and remote deployments. It can be used to store message history, workflow snapshots, traces, and eval scores.
|
|
171
|
+
|
|
172
|
+
For vectors like semantic recall or traditional RAG, use [libSQL Vector](https://mastra.ai/reference/v1/vectors/libsql) which covers embeddings and vector search.
|
|
173
|
+
|
|
174
|
+
## Installation
|
|
175
|
+
|
|
176
|
+
Storage providers must be installed as separate packages:
|
|
177
|
+
|
|
178
|
+
```bash
|
|
179
|
+
npm install @mastra/libsql@beta
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
## Usage
|
|
183
|
+
|
|
184
|
+
```typescript
|
|
185
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
186
|
+
import { Mastra } from "@mastra/core";
|
|
187
|
+
|
|
188
|
+
const mastra = new Mastra({
|
|
189
|
+
storage: new LibSQLStore({
|
|
190
|
+
id: 'libsql-storage',
|
|
191
|
+
url: "file:./storage.db",
|
|
192
|
+
}),
|
|
193
|
+
});
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
Agent-level file storage:
|
|
197
|
+
|
|
198
|
+
```typescript
|
|
199
|
+
import { Memory } from "@mastra/memory";
|
|
200
|
+
import { Agent } from "@mastra/core/agent";
|
|
201
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
202
|
+
|
|
203
|
+
export const agent = new Agent({
|
|
204
|
+
id: "example-agent",
|
|
205
|
+
memory: new Memory({
|
|
206
|
+
storage: new LibSQLStore({
|
|
207
|
+
id: 'libsql-storage',
|
|
208
|
+
url: "file:./agent.db",
|
|
209
|
+
}),
|
|
210
|
+
}),
|
|
211
|
+
});
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
> **Note:**
|
|
215
|
+
File storage doesn't work with serverless platforms that have ephemeral file systems. For serverless deployments, use [Turso](https://turso.tech) or a different database engine.
|
|
216
|
+
|
|
217
|
+
Production with remote database:
|
|
218
|
+
|
|
219
|
+
```typescript
|
|
220
|
+
storage: new LibSQLStore({
|
|
221
|
+
id: 'libsql-storage',
|
|
222
|
+
url: "libsql://your-db-name.aws-ap-northeast-1.turso.io",
|
|
223
|
+
authToken: process.env.TURSO_AUTH_TOKEN,
|
|
224
|
+
})
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
For local development and testing, you can store data in memory:
|
|
228
|
+
|
|
229
|
+
```typescript
|
|
230
|
+
storage: new LibSQLStore({
|
|
231
|
+
id: 'libsql-storage',
|
|
232
|
+
url: ":memory:",
|
|
233
|
+
})
|
|
234
|
+
```
|
|
235
|
+
> **Note:**
|
|
236
|
+
In-memory storage resets when the process changes. Only suitable for development.
|
|
237
|
+
|
|
238
|
+
## Options
|
|
239
|
+
|
|
240
|
+
## Initialization
|
|
241
|
+
|
|
242
|
+
When you pass storage to the Mastra class, `init()` is called automatically to create the [core schema](https://mastra.ai/reference/v1/storage/overview#core-schema):
|
|
243
|
+
|
|
244
|
+
```typescript
|
|
245
|
+
import { Mastra } from "@mastra/core";
|
|
246
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
247
|
+
|
|
248
|
+
const storage = new LibSQLStore({
|
|
249
|
+
id: 'libsql-storage',
|
|
250
|
+
url: "file:./storage.db",
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
const mastra = new Mastra({
|
|
254
|
+
storage, // init() called automatically
|
|
255
|
+
});
|
|
256
|
+
```
|
|
257
|
+
|
|
258
|
+
If using storage directly without Mastra, call `init()` explicitly:
|
|
259
|
+
|
|
260
|
+
```typescript
|
|
261
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
262
|
+
|
|
263
|
+
const storage = new LibSQLStore({
|
|
264
|
+
id: 'libsql-storage',
|
|
265
|
+
url: "file:./storage.db",
|
|
266
|
+
});
|
|
267
|
+
|
|
268
|
+
await storage.init();
|
|
269
|
+
|
|
270
|
+
// Access domain-specific stores via getStore()
|
|
271
|
+
const memoryStore = await storage.getStore('memory');
|
|
272
|
+
const thread = await memoryStore?.getThreadById({ threadId: "..." });
|
|
273
|
+
```
|
|
274
|
+
|
|
275
|
+
---
|
|
276
|
+
|
|
277
|
+
## Reference: MongoDB Storage
|
|
278
|
+
|
|
279
|
+
> Documentation for the MongoDB storage implementation in Mastra.
|
|
280
|
+
|
|
281
|
+
The MongoDB storage implementation provides a scalable storage solution using MongoDB databases with support for both document storage and vector operations.
|
|
282
|
+
|
|
283
|
+
## Installation
|
|
284
|
+
|
|
285
|
+
```bash
|
|
286
|
+
npm install @mastra/mongodb@beta
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
## Usage
|
|
290
|
+
|
|
291
|
+
Ensure you have a MongoDB Atlas Local (via Docker) or MongoDB Atlas Cloud instance with Atlas Search enabled. MongoDB 7.0+ is recommended.
|
|
292
|
+
|
|
293
|
+
```typescript
|
|
294
|
+
import { MongoDBStore } from "@mastra/mongodb";
|
|
295
|
+
|
|
296
|
+
const storage = new MongoDBStore({
|
|
297
|
+
url: process.env.MONGODB_URL,
|
|
298
|
+
dbName: process.env.MONGODB_DATABASE,
|
|
299
|
+
});
|
|
300
|
+
```
|
|
301
|
+
|
|
302
|
+
## Parameters
|
|
303
|
+
|
|
304
|
+
## Constructor Examples
|
|
305
|
+
|
|
306
|
+
You can instantiate `MongoDBStore` in the following ways:
|
|
307
|
+
|
|
308
|
+
```ts
|
|
309
|
+
import { MongoDBStore } from "@mastra/mongodb";
|
|
310
|
+
|
|
311
|
+
// Basic connection without custom options
|
|
312
|
+
const store1 = new MongoDBStore({
|
|
313
|
+
url: "mongodb+srv://user:password@cluster.mongodb.net",
|
|
314
|
+
dbName: "mastra_storage",
|
|
315
|
+
});
|
|
316
|
+
|
|
317
|
+
// Using connection string with options
|
|
318
|
+
const store2 = new MongoDBStore({
|
|
319
|
+
url: "mongodb+srv://user:password@cluster.mongodb.net",
|
|
320
|
+
dbName: "mastra_storage",
|
|
321
|
+
options: {
|
|
322
|
+
retryWrites: true,
|
|
323
|
+
maxPoolSize: 10,
|
|
324
|
+
serverSelectionTimeoutMS: 5000,
|
|
325
|
+
socketTimeoutMS: 45000,
|
|
326
|
+
},
|
|
327
|
+
});
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
## Additional Notes
|
|
331
|
+
|
|
332
|
+
### Collection Management
|
|
333
|
+
|
|
334
|
+
The storage implementation handles collection creation and management automatically. It creates the following collections:
|
|
335
|
+
|
|
336
|
+
- `mastra_workflow_snapshot`: Stores workflow state and execution data
|
|
337
|
+
- `mastra_evals`: Stores evaluation results and metadata
|
|
338
|
+
- `mastra_threads`: Stores conversation threads
|
|
339
|
+
- `mastra_messages`: Stores individual messages
|
|
340
|
+
- `mastra_traces`: Stores telemetry and tracing data
|
|
341
|
+
- `mastra_scorers`: Stores scoring and evaluation data
|
|
342
|
+
- `mastra_resources`: Stores resource working memory data
|
|
343
|
+
|
|
344
|
+
### Initialization
|
|
345
|
+
|
|
346
|
+
When you pass storage to the Mastra class, `init()` is called automatically before any storage operation:
|
|
347
|
+
|
|
348
|
+
```typescript
|
|
349
|
+
import { Mastra } from "@mastra/core";
|
|
350
|
+
import { MongoDBStore } from "@mastra/mongodb";
|
|
351
|
+
|
|
352
|
+
const storage = new MongoDBStore({
|
|
353
|
+
url: process.env.MONGODB_URL,
|
|
354
|
+
dbName: process.env.MONGODB_DATABASE,
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
const mastra = new Mastra({
|
|
358
|
+
storage, // init() is called automatically
|
|
359
|
+
});
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
If you're using storage directly without Mastra, you must call `init()` explicitly to create the collections:
|
|
363
|
+
|
|
364
|
+
```typescript
|
|
365
|
+
import { MongoDBStore } from "@mastra/mongodb";
|
|
366
|
+
|
|
367
|
+
const storage = new MongoDBStore({
|
|
368
|
+
id: 'mongodb-storage',
|
|
369
|
+
url: process.env.MONGODB_URL,
|
|
370
|
+
dbName: process.env.MONGODB_DATABASE,
|
|
371
|
+
});
|
|
372
|
+
|
|
373
|
+
// Required when using storage directly
|
|
374
|
+
await storage.init();
|
|
375
|
+
|
|
376
|
+
// Access domain-specific stores via getStore()
|
|
377
|
+
const memoryStore = await storage.getStore('memory');
|
|
378
|
+
const thread = await memoryStore?.getThreadById({ threadId: "..." });
|
|
379
|
+
```
|
|
380
|
+
|
|
381
|
+
> **Note:**
|
|
382
|
+
If `init()` is not called, collections won't be created and storage operations will fail silently or throw errors.
|
|
383
|
+
|
|
384
|
+
## Vector Search Capabilities
|
|
385
|
+
|
|
386
|
+
MongoDB storage includes built-in vector search capabilities for AI applications:
|
|
387
|
+
|
|
388
|
+
### Vector Index Creation
|
|
389
|
+
|
|
390
|
+
```typescript
|
|
391
|
+
import { MongoDBVector } from "@mastra/mongodb";
|
|
392
|
+
|
|
393
|
+
const vectorStore = new MongoDBVector({
|
|
394
|
+
url: process.env.MONGODB_URL,
|
|
395
|
+
dbName: process.env.MONGODB_DATABASE,
|
|
396
|
+
});
|
|
397
|
+
|
|
398
|
+
// Create a vector index for embeddings
|
|
399
|
+
await vectorStore.createIndex({
|
|
400
|
+
indexName: "document_embeddings",
|
|
401
|
+
dimension: 1536,
|
|
402
|
+
});
|
|
403
|
+
```
|
|
404
|
+
|
|
405
|
+
### Vector Operations
|
|
406
|
+
|
|
407
|
+
```typescript
|
|
408
|
+
// Store vectors with metadata
|
|
409
|
+
await vectorStore.upsert({
|
|
410
|
+
indexName: "document_embeddings",
|
|
411
|
+
vectors: [
|
|
412
|
+
{
|
|
413
|
+
id: "doc-1",
|
|
414
|
+
values: [0.1, 0.2, 0.3, ...], // 1536-dimensional vector
|
|
415
|
+
metadata: {
|
|
416
|
+
title: "Document Title",
|
|
417
|
+
category: "technical",
|
|
418
|
+
source: "api-docs",
|
|
419
|
+
},
|
|
420
|
+
},
|
|
421
|
+
],
|
|
422
|
+
});
|
|
423
|
+
|
|
424
|
+
// Similarity search
|
|
425
|
+
const results = await vectorStore.query({
|
|
426
|
+
indexName: "document_embeddings",
|
|
427
|
+
vector: queryEmbedding,
|
|
428
|
+
topK: 5,
|
|
429
|
+
filter: {
|
|
430
|
+
category: "technical",
|
|
431
|
+
},
|
|
432
|
+
});
|
|
433
|
+
```
|
|
434
|
+
|
|
435
|
+
## Usage Example
|
|
436
|
+
|
|
437
|
+
### Adding memory to an agent
|
|
438
|
+
|
|
439
|
+
To add MongoDB memory to an agent use the `Memory` class and create a new `storage` key using `MongoDBStore`. The configuration supports both local and remote MongoDB instances.
|
|
440
|
+
|
|
441
|
+
```typescript title="src/mastra/agents/example-mongodb-agent.ts"
|
|
442
|
+
import { Memory } from "@mastra/memory";
|
|
443
|
+
import { Agent } from "@mastra/core/agent";
|
|
444
|
+
import { MongoDBStore } from "@mastra/mongodb";
|
|
445
|
+
|
|
446
|
+
export const mongodbAgent = new Agent({
|
|
447
|
+
id: "mongodb-agent",
|
|
448
|
+
name: "mongodb-agent",
|
|
449
|
+
instructions:
|
|
450
|
+
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
451
|
+
model: "openai/gpt-5.1",
|
|
452
|
+
memory: new Memory({
|
|
453
|
+
storage: new MongoDBStore({
|
|
454
|
+
url: process.env.MONGODB_URI!,
|
|
455
|
+
dbName: process.env.MONGODB_DB_NAME!,
|
|
456
|
+
}),
|
|
457
|
+
options: {
|
|
458
|
+
threads: {
|
|
459
|
+
generateTitle: true,
|
|
460
|
+
},
|
|
461
|
+
},
|
|
462
|
+
}),
|
|
463
|
+
});
|
|
464
|
+
```
|
|
465
|
+
|
|
466
|
+
### Using the agent
|
|
467
|
+
|
|
468
|
+
Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
|
|
469
|
+
|
|
470
|
+
```typescript title="src/test-mongodb-agent.ts"
|
|
471
|
+
import "dotenv/config";
|
|
472
|
+
|
|
473
|
+
import { mastra } from "./mastra";
|
|
474
|
+
|
|
475
|
+
const threadId = "123";
|
|
476
|
+
const resourceId = "user-456";
|
|
477
|
+
|
|
478
|
+
const agent = mastra.getAgent("mongodbAgent");
|
|
479
|
+
|
|
480
|
+
const message = await agent.stream("My name is Mastra", {
|
|
481
|
+
memory: {
|
|
482
|
+
thread: threadId,
|
|
483
|
+
resource: resourceId,
|
|
484
|
+
},
|
|
485
|
+
});
|
|
486
|
+
|
|
487
|
+
await message.textStream.pipeTo(new WritableStream());
|
|
488
|
+
|
|
489
|
+
const stream = await agent.stream("What's my name?", {
|
|
490
|
+
memory: {
|
|
491
|
+
thread: threadId,
|
|
492
|
+
resource: resourceId,
|
|
493
|
+
},
|
|
494
|
+
memoryOptions: {
|
|
495
|
+
lastMessages: 5,
|
|
496
|
+
semanticRecall: {
|
|
497
|
+
topK: 3,
|
|
498
|
+
messageRange: 2,
|
|
499
|
+
},
|
|
500
|
+
},
|
|
501
|
+
});
|
|
502
|
+
|
|
503
|
+
for await (const chunk of stream.textStream) {
|
|
504
|
+
process.stdout.write(chunk);
|
|
505
|
+
}
|
|
506
|
+
```
|
|
507
|
+
|
|
508
|
+
---
|
|
509
|
+
|
|
510
|
+
## Reference: PostgreSQL Storage
|
|
511
|
+
|
|
512
|
+
> Documentation for the PostgreSQL storage implementation in Mastra.
|
|
513
|
+
|
|
514
|
+
The PostgreSQL storage implementation provides a production-ready storage solution using PostgreSQL databases.
|
|
515
|
+
|
|
516
|
+
## Installation
|
|
517
|
+
|
|
518
|
+
```bash
|
|
519
|
+
npm install @mastra/pg@beta
|
|
520
|
+
```
|
|
521
|
+
|
|
522
|
+
## Usage
|
|
523
|
+
|
|
524
|
+
```typescript
|
|
525
|
+
import { PostgresStore } from "@mastra/pg";
|
|
526
|
+
|
|
527
|
+
const storage = new PostgresStore({
|
|
528
|
+
id: 'pg-storage',
|
|
529
|
+
connectionString: process.env.DATABASE_URL,
|
|
530
|
+
});
|
|
531
|
+
```
|
|
532
|
+
|
|
533
|
+
## Parameters
|
|
534
|
+
|
|
535
|
+
## Constructor Examples
|
|
536
|
+
|
|
537
|
+
You can instantiate `PostgresStore` in the following ways:
|
|
538
|
+
|
|
539
|
+
```ts
|
|
540
|
+
import { PostgresStore } from "@mastra/pg";
|
|
541
|
+
|
|
542
|
+
// Using a connection string only
|
|
543
|
+
const store1 = new PostgresStore({
|
|
544
|
+
id: 'pg-storage-1',
|
|
545
|
+
connectionString: "postgresql://user:password@localhost:5432/mydb",
|
|
546
|
+
});
|
|
547
|
+
|
|
548
|
+
// Using a connection string with a custom schema name
|
|
549
|
+
const store2 = new PostgresStore({
|
|
550
|
+
id: 'pg-storage-2',
|
|
551
|
+
connectionString: "postgresql://user:password@localhost:5432/mydb",
|
|
552
|
+
schemaName: "custom_schema", // optional
|
|
553
|
+
});
|
|
554
|
+
|
|
555
|
+
// Using individual connection parameters
|
|
556
|
+
const store4 = new PostgresStore({
|
|
557
|
+
id: 'pg-storage-3',
|
|
558
|
+
host: "localhost",
|
|
559
|
+
port: 5432,
|
|
560
|
+
database: "mydb",
|
|
561
|
+
user: "user",
|
|
562
|
+
password: "password",
|
|
563
|
+
});
|
|
564
|
+
|
|
565
|
+
// Individual parameters with schemaName
|
|
566
|
+
const store5 = new PostgresStore({
|
|
567
|
+
id: 'pg-storage-4',
|
|
568
|
+
host: "localhost",
|
|
569
|
+
port: 5432,
|
|
570
|
+
database: "mydb",
|
|
571
|
+
user: "user",
|
|
572
|
+
password: "password",
|
|
573
|
+
schemaName: "custom_schema", // optional
|
|
574
|
+
});
|
|
575
|
+
```
|
|
576
|
+
|
|
577
|
+
## Additional Notes
|
|
578
|
+
|
|
579
|
+
### Schema Management
|
|
580
|
+
|
|
581
|
+
The storage implementation handles schema creation and updates automatically. It creates the following tables:
|
|
582
|
+
|
|
583
|
+
- `mastra_workflow_snapshot`: Stores workflow state and execution data
|
|
584
|
+
- `mastra_evals`: Stores evaluation results and metadata
|
|
585
|
+
- `mastra_threads`: Stores conversation threads
|
|
586
|
+
- `mastra_messages`: Stores individual messages
|
|
587
|
+
- `mastra_traces`: Stores telemetry and tracing data
|
|
588
|
+
- `mastra_scorers`: Stores scoring and evaluation data
|
|
589
|
+
- `mastra_resources`: Stores resource working memory data
|
|
590
|
+
|
|
591
|
+
### Initialization
|
|
592
|
+
|
|
593
|
+
When you pass storage to the Mastra class, `init()` is called automatically before any storage operation:
|
|
594
|
+
|
|
595
|
+
```typescript
|
|
596
|
+
import { Mastra } from "@mastra/core";
|
|
597
|
+
import { PostgresStore } from "@mastra/pg";
|
|
598
|
+
|
|
599
|
+
const storage = new PostgresStore({
|
|
600
|
+
connectionString: process.env.DATABASE_URL,
|
|
601
|
+
});
|
|
602
|
+
|
|
603
|
+
const mastra = new Mastra({
|
|
604
|
+
storage, // init() is called automatically
|
|
605
|
+
});
|
|
606
|
+
```
|
|
607
|
+
|
|
608
|
+
If you're using storage directly without Mastra, you must call `init()` explicitly to create the tables:
|
|
609
|
+
|
|
610
|
+
```typescript
|
|
611
|
+
import { PostgresStore } from "@mastra/pg";
|
|
612
|
+
|
|
613
|
+
const storage = new PostgresStore({
|
|
614
|
+
id: 'pg-storage',
|
|
615
|
+
connectionString: process.env.DATABASE_URL,
|
|
616
|
+
});
|
|
617
|
+
|
|
618
|
+
// Required when using storage directly
|
|
619
|
+
await storage.init();
|
|
620
|
+
|
|
621
|
+
// Access domain-specific stores via getStore()
|
|
622
|
+
const memoryStore = await storage.getStore('memory');
|
|
623
|
+
const thread = await memoryStore?.getThreadById({ threadId: "..." });
|
|
624
|
+
```
|
|
625
|
+
|
|
626
|
+
> **Note:**
|
|
627
|
+
If `init()` is not called, tables won't be created and storage operations will fail silently or throw errors.
|
|
628
|
+
|
|
629
|
+
### Direct Database and Pool Access
|
|
630
|
+
|
|
631
|
+
`PostgresStore` exposes both the underlying database object and the pg-promise instance as public fields:
|
|
632
|
+
|
|
633
|
+
```typescript
|
|
634
|
+
store.db; // pg-promise database instance
|
|
635
|
+
store.pgp; // pg-promise main instance
|
|
636
|
+
```
|
|
637
|
+
|
|
638
|
+
This enables direct queries and custom transaction management. When using these fields:
|
|
639
|
+
|
|
640
|
+
- You are responsible for proper connection and transaction handling.
|
|
641
|
+
- Closing the store (`store.close()`) will destroy the associated connection pool.
|
|
642
|
+
- Direct access bypasses any additional logic or validation provided by PostgresStore methods.
|
|
643
|
+
|
|
644
|
+
This approach is intended for advanced scenarios where low-level access is required.
|
|
645
|
+
|
|
646
|
+
## Usage Example
|
|
647
|
+
|
|
648
|
+
### Adding memory to an agent
|
|
649
|
+
|
|
650
|
+
To add PostgreSQL memory to an agent use the `Memory` class and create a new `storage` key using `PostgresStore`. The `connectionString` can either be a remote location, or a local database connection.
|
|
651
|
+
|
|
652
|
+
```typescript title="src/mastra/agents/example-pg-agent.ts"
|
|
653
|
+
import { Memory } from "@mastra/memory";
|
|
654
|
+
import { Agent } from "@mastra/core/agent";
|
|
655
|
+
import { PostgresStore } from "@mastra/pg";
|
|
656
|
+
|
|
657
|
+
export const pgAgent = new Agent({
|
|
658
|
+
id: "pg-agent",
|
|
659
|
+
name: "PG Agent",
|
|
660
|
+
instructions:
|
|
661
|
+
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
662
|
+
model: "openai/gpt-5.1",
|
|
663
|
+
memory: new Memory({
|
|
664
|
+
storage: new PostgresStore({
|
|
665
|
+
id: 'pg-agent-storage',
|
|
666
|
+
connectionString: process.env.DATABASE_URL!,
|
|
667
|
+
}),
|
|
668
|
+
options: {
|
|
669
|
+
generateTitle: true, // Explicitly enable automatic title generation
|
|
670
|
+
},
|
|
671
|
+
}),
|
|
672
|
+
});
|
|
673
|
+
```
|
|
674
|
+
|
|
675
|
+
### Using the agent
|
|
676
|
+
|
|
677
|
+
Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
|
|
678
|
+
|
|
679
|
+
```typescript title="src/test-pg-agent.ts"
|
|
680
|
+
import "dotenv/config";
|
|
681
|
+
|
|
682
|
+
import { mastra } from "./mastra";
|
|
683
|
+
|
|
684
|
+
const threadId = "123";
|
|
685
|
+
const resourceId = "user-456";
|
|
686
|
+
|
|
687
|
+
const agent = mastra.getAgent("pg-agent");
|
|
688
|
+
|
|
689
|
+
const message = await agent.stream("My name is Mastra", {
|
|
690
|
+
memory: {
|
|
691
|
+
thread: threadId,
|
|
692
|
+
resource: resourceId,
|
|
693
|
+
},
|
|
694
|
+
});
|
|
695
|
+
|
|
696
|
+
await message.textStream.pipeTo(new WritableStream());
|
|
697
|
+
|
|
698
|
+
const stream = await agent.stream("What's my name?", {
|
|
699
|
+
memory: {
|
|
700
|
+
thread: threadId,
|
|
701
|
+
resource: resourceId,
|
|
702
|
+
},
|
|
703
|
+
memoryOptions: {
|
|
704
|
+
lastMessages: 5,
|
|
705
|
+
semanticRecall: {
|
|
706
|
+
topK: 3,
|
|
707
|
+
messageRange: 2,
|
|
708
|
+
},
|
|
709
|
+
},
|
|
710
|
+
});
|
|
711
|
+
|
|
712
|
+
for await (const chunk of stream.textStream) {
|
|
713
|
+
process.stdout.write(chunk);
|
|
714
|
+
}
|
|
715
|
+
```
|
|
716
|
+
|
|
717
|
+
## Index Management
|
|
718
|
+
|
|
719
|
+
PostgreSQL storage provides index management to optimize query performance.
|
|
720
|
+
|
|
721
|
+
### Default Indexes
|
|
722
|
+
|
|
723
|
+
PostgreSQL storage creates composite indexes during initialization for common query patterns:
|
|
724
|
+
|
|
725
|
+
- `mastra_threads_resourceid_createdat_idx`: (resourceId, createdAt DESC)
|
|
726
|
+
- `mastra_messages_thread_id_createdat_idx`: (thread_id, createdAt DESC)
|
|
727
|
+
- `mastra_ai_spans_traceid_startedat_idx`: (traceId, startedAt DESC)
|
|
728
|
+
- `mastra_ai_spans_parentspanid_startedat_idx`: (parentSpanId, startedAt DESC)
|
|
729
|
+
- `mastra_ai_spans_name_startedat_idx`: (name, startedAt DESC)
|
|
730
|
+
- `mastra_ai_spans_scope_startedat_idx`: (scope, startedAt DESC)
|
|
731
|
+
- `mastra_scores_trace_id_span_id_created_at_idx`: (traceId, spanId, createdAt DESC)
|
|
732
|
+
|
|
733
|
+
These indexes improve performance for filtered queries with sorting, including `dateRange` filters on message queries.
|
|
734
|
+
|
|
735
|
+
### Configuring Indexes
|
|
736
|
+
|
|
737
|
+
You can control index creation via constructor options:
|
|
738
|
+
|
|
739
|
+
```typescript
|
|
740
|
+
import { PostgresStore } from "@mastra/pg";
|
|
741
|
+
|
|
742
|
+
// Skip default indexes (manage indexes separately)
|
|
743
|
+
const store = new PostgresStore({
|
|
744
|
+
id: 'pg-storage',
|
|
745
|
+
connectionString: process.env.DATABASE_URL,
|
|
746
|
+
skipDefaultIndexes: true,
|
|
747
|
+
});
|
|
748
|
+
|
|
749
|
+
// Add custom indexes during initialization
|
|
750
|
+
const storeWithCustomIndexes = new PostgresStore({
|
|
751
|
+
id: 'pg-storage',
|
|
752
|
+
connectionString: process.env.DATABASE_URL,
|
|
753
|
+
indexes: [
|
|
754
|
+
{
|
|
755
|
+
name: "idx_threads_metadata_type",
|
|
756
|
+
table: "mastra_threads",
|
|
757
|
+
columns: ["metadata->>'type'"],
|
|
758
|
+
},
|
|
759
|
+
{
|
|
760
|
+
name: "idx_messages_status",
|
|
761
|
+
table: "mastra_messages",
|
|
762
|
+
columns: ["metadata->>'status'"],
|
|
763
|
+
},
|
|
764
|
+
],
|
|
765
|
+
});
|
|
766
|
+
```
|
|
767
|
+
|
|
768
|
+
For advanced index types, you can specify additional options:
|
|
769
|
+
|
|
770
|
+
- `unique: true` for unique constraints
|
|
771
|
+
- `where: 'condition'` for partial indexes
|
|
772
|
+
- `method: 'brin'` for time-series data
|
|
773
|
+
- `storage: { fillfactor: 90 }` for update-heavy tables
|
|
774
|
+
- `concurrent: true` for non-blocking creation (default)
|
|
775
|
+
|
|
776
|
+
### Index Options
|
|
777
|
+
|
|
778
|
+
### Schema-Specific Indexes
|
|
779
|
+
|
|
780
|
+
When using custom schemas, index names are prefixed with the schema name:
|
|
781
|
+
|
|
782
|
+
```typescript
|
|
783
|
+
const storage = new PostgresStore({
|
|
784
|
+
id: 'pg-storage',
|
|
785
|
+
connectionString: process.env.DATABASE_URL,
|
|
786
|
+
schemaName: "custom_schema",
|
|
787
|
+
indexes: [
|
|
788
|
+
{
|
|
789
|
+
name: "idx_threads_status",
|
|
790
|
+
table: "mastra_threads",
|
|
791
|
+
columns: ["status"],
|
|
792
|
+
},
|
|
793
|
+
],
|
|
794
|
+
});
|
|
795
|
+
|
|
796
|
+
// Creates index as: custom_schema_idx_threads_status
|
|
797
|
+
```
|
|
798
|
+
|
|
799
|
+
### Managing Indexes via SQL
|
|
800
|
+
|
|
801
|
+
For advanced index management (listing, dropping, analyzing), use direct SQL queries via the `db` accessor:
|
|
802
|
+
|
|
803
|
+
```typescript
|
|
804
|
+
// List indexes for a table
|
|
805
|
+
const indexes = await storage.db.any(`
|
|
806
|
+
SELECT indexname, indexdef
|
|
807
|
+
FROM pg_indexes
|
|
808
|
+
WHERE tablename = 'mastra_messages'
|
|
809
|
+
`);
|
|
810
|
+
|
|
811
|
+
// Drop an index
|
|
812
|
+
await storage.db.none('DROP INDEX IF EXISTS idx_my_custom_index');
|
|
813
|
+
|
|
814
|
+
// Analyze index usage
|
|
815
|
+
const stats = await storage.db.one(`
|
|
816
|
+
SELECT idx_scan, idx_tup_read
|
|
817
|
+
FROM pg_stat_user_indexes
|
|
818
|
+
WHERE indexrelname = 'mastra_messages_thread_id_createdat_idx'
|
|
819
|
+
`);
|
|
820
|
+
```
|
|
821
|
+
|
|
822
|
+
### Index Types and Use Cases
|
|
823
|
+
|
|
824
|
+
PostgreSQL offers different index types optimized for specific scenarios:
|
|
825
|
+
|
|
826
|
+
| Index Type | Best For | Storage | Speed |
|
|
827
|
+
| ------------------- | --------------------------------------- | ---------- | -------------------------- |
|
|
828
|
+
| **btree** (default) | Range queries, sorting, general purpose | Moderate | Fast |
|
|
829
|
+
| **hash** | Equality comparisons only | Small | Very fast for `=` |
|
|
830
|
+
| **gin** | JSONB, arrays, full-text search | Large | Fast for contains |
|
|
831
|
+
| **gist** | Geometric data, full-text search | Moderate | Fast for nearest-neighbor |
|
|
832
|
+
| **spgist** | Non-balanced data, text patterns | Small | Fast for specific patterns |
|
|
833
|
+
| **brin** | Large tables with natural ordering | Very small | Fast for ranges |
|
|
834
|
+
|
|
835
|
+
---
|
|
836
|
+
|
|
837
|
+
## Reference: Upstash Storage
|
|
838
|
+
|
|
839
|
+
> Documentation for the Upstash storage implementation in Mastra.
|
|
840
|
+
|
|
841
|
+
The Upstash storage implementation provides a serverless-friendly storage solution using Upstash's Redis-compatible key-value store.
|
|
842
|
+
|
|
843
|
+
> **Note:**
|
|
844
|
+
|
|
845
|
+
**Important:** When using Mastra with Upstash, the pay-as-you-go model can result in unexpectedly high costs due to the high volume of Redis commands generated during agent conversations. We strongly recommend using a **fixed pricing plan** for predictable costs. See [Upstash pricing](https://upstash.com/pricing/redis) for details and [GitHub issue #5850](https://github.com/mastra-ai/mastra/issues/5850) for context.
|
|
846
|
+
|
|
847
|
+
## Installation
|
|
848
|
+
|
|
849
|
+
```bash
|
|
850
|
+
npm install @mastra/upstash@beta
|
|
851
|
+
```
|
|
852
|
+
|
|
853
|
+
## Usage
|
|
854
|
+
|
|
855
|
+
```typescript
|
|
856
|
+
import { UpstashStore } from "@mastra/upstash";
|
|
857
|
+
|
|
858
|
+
const storage = new UpstashStore({
|
|
859
|
+
id: 'upstash-storage',
|
|
860
|
+
url: process.env.UPSTASH_URL,
|
|
861
|
+
token: process.env.UPSTASH_TOKEN,
|
|
862
|
+
});
|
|
863
|
+
```
|
|
864
|
+
|
|
865
|
+
## Parameters
|
|
866
|
+
|
|
867
|
+
## Additional Notes
|
|
868
|
+
|
|
869
|
+
### Key Structure
|
|
870
|
+
|
|
871
|
+
The Upstash storage implementation uses a key-value structure:
|
|
872
|
+
|
|
873
|
+
- Thread keys: `{prefix}thread:{threadId}`
|
|
874
|
+
- Message keys: `{prefix}message:{messageId}`
|
|
875
|
+
- Metadata keys: `{prefix}metadata:{entityId}`
|
|
876
|
+
|
|
877
|
+
### Serverless Benefits
|
|
878
|
+
|
|
879
|
+
Upstash storage is particularly well-suited for serverless deployments:
|
|
880
|
+
|
|
881
|
+
- No connection management needed
|
|
882
|
+
- Pay-per-request pricing
|
|
883
|
+
- Global replication options
|
|
884
|
+
- Edge-compatible
|
|
885
|
+
|
|
886
|
+
### Data Persistence
|
|
887
|
+
|
|
888
|
+
Upstash provides:
|
|
889
|
+
|
|
890
|
+
- Automatic data persistence
|
|
891
|
+
- Point-in-time recovery
|
|
892
|
+
- Cross-region replication options
|
|
893
|
+
|
|
894
|
+
### Performance Considerations
|
|
895
|
+
|
|
896
|
+
For optimal performance:
|
|
897
|
+
|
|
898
|
+
- Use appropriate key prefixes to organize data
|
|
899
|
+
- Monitor Redis memory usage
|
|
900
|
+
- Consider data expiration policies if needed
|
|
901
|
+
|
|
902
|
+
## Usage Example
|
|
903
|
+
|
|
904
|
+
### Adding memory to an agent
|
|
905
|
+
|
|
906
|
+
To add Upstash memory to an agent use the `Memory` class and create a new `storage` key using `UpstashStore` and a new `vector` key using `UpstashVector`. The configuration can point to either a remote service or a local setup.
|
|
907
|
+
|
|
908
|
+
```typescript title="src/mastra/agents/example-upstash-agent.ts"
|
|
909
|
+
import { Memory } from "@mastra/memory";
|
|
910
|
+
import { Agent } from "@mastra/core/agent";
|
|
911
|
+
import { UpstashStore } from "@mastra/upstash";
|
|
912
|
+
|
|
913
|
+
export const upstashAgent = new Agent({
|
|
914
|
+
id: "upstash-agent",
|
|
915
|
+
name: "Upstash Agent",
|
|
916
|
+
instructions:
|
|
917
|
+
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
918
|
+
model: "openai/gpt-5.1",
|
|
919
|
+
memory: new Memory({
|
|
920
|
+
storage: new UpstashStore({
|
|
921
|
+
id: 'upstash-agent-storage',
|
|
922
|
+
url: process.env.UPSTASH_REDIS_REST_URL!,
|
|
923
|
+
token: process.env.UPSTASH_REDIS_REST_TOKEN!,
|
|
924
|
+
}),
|
|
925
|
+
options: {
|
|
926
|
+
generateTitle: true, // Explicitly enable automatic title generation
|
|
927
|
+
},
|
|
928
|
+
}),
|
|
929
|
+
});
|
|
930
|
+
```
|
|
931
|
+
|
|
932
|
+
### Using the agent
|
|
933
|
+
|
|
934
|
+
Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
|
|
935
|
+
|
|
936
|
+
```typescript title="src/test-upstash-agent.ts"
|
|
937
|
+
import "dotenv/config";
|
|
938
|
+
|
|
939
|
+
import { mastra } from "./mastra";
|
|
940
|
+
|
|
941
|
+
const threadId = "123";
|
|
942
|
+
const resourceId = "user-456";
|
|
943
|
+
|
|
944
|
+
const agent = mastra.getAgent("upstashAgent");
|
|
945
|
+
|
|
946
|
+
const message = await agent.stream("My name is Mastra", {
|
|
947
|
+
memory: {
|
|
948
|
+
thread: threadId,
|
|
949
|
+
resource: resourceId,
|
|
950
|
+
},
|
|
951
|
+
});
|
|
952
|
+
|
|
953
|
+
await message.textStream.pipeTo(new WritableStream());
|
|
954
|
+
|
|
955
|
+
const stream = await agent.stream("What's my name?", {
|
|
956
|
+
memory: {
|
|
957
|
+
thread: threadId,
|
|
958
|
+
resource: resourceId,
|
|
959
|
+
},
|
|
960
|
+
memoryOptions: {
|
|
961
|
+
lastMessages: 5,
|
|
962
|
+
semanticRecall: {
|
|
963
|
+
topK: 3,
|
|
964
|
+
messageRange: 2,
|
|
965
|
+
},
|
|
966
|
+
},
|
|
967
|
+
});
|
|
968
|
+
|
|
969
|
+
for await (const chunk of stream.textStream) {
|
|
970
|
+
process.stdout.write(chunk);
|
|
971
|
+
}
|
|
972
|
+
```
|