@mastra/libsql 1.0.0-beta.9 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +1164 -0
- package/dist/docs/README.md +39 -0
- package/dist/docs/SKILL.md +40 -0
- package/dist/docs/SOURCE_MAP.json +6 -0
- package/dist/docs/agents/01-agent-memory.md +166 -0
- package/dist/docs/agents/02-networks.md +292 -0
- package/dist/docs/agents/03-agent-approval.md +377 -0
- package/dist/docs/agents/04-network-approval.md +274 -0
- package/dist/docs/core/01-reference.md +151 -0
- package/dist/docs/guides/01-ai-sdk.md +141 -0
- package/dist/docs/memory/01-overview.md +76 -0
- package/dist/docs/memory/02-storage.md +233 -0
- package/dist/docs/memory/03-working-memory.md +390 -0
- package/dist/docs/memory/04-semantic-recall.md +233 -0
- package/dist/docs/memory/05-memory-processors.md +318 -0
- package/dist/docs/memory/06-reference.md +133 -0
- package/dist/docs/observability/01-overview.md +64 -0
- package/dist/docs/observability/02-default.md +177 -0
- package/dist/docs/rag/01-retrieval.md +548 -0
- package/dist/docs/storage/01-reference.md +542 -0
- package/dist/docs/vectors/01-reference.md +213 -0
- package/dist/docs/workflows/01-snapshots.md +240 -0
- package/dist/index.cjs +546 -107
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +543 -109
- package/dist/index.js.map +1 -1
- package/dist/storage/db/index.d.ts +42 -1
- package/dist/storage/db/index.d.ts.map +1 -1
- package/dist/storage/db/utils.d.ts +16 -1
- package/dist/storage/db/utils.d.ts.map +1 -1
- package/dist/storage/domains/memory/index.d.ts +3 -2
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +23 -0
- package/dist/storage/domains/observability/index.d.ts.map +1 -1
- package/dist/storage/domains/scores/index.d.ts +0 -1
- package/dist/storage/domains/scores/index.d.ts.map +1 -1
- package/dist/storage/domains/workflows/index.d.ts +1 -0
- package/dist/storage/domains/workflows/index.d.ts.map +1 -1
- package/dist/storage/index.d.ts +10 -4
- package/dist/storage/index.d.ts.map +1 -1
- package/dist/vector/index.d.ts +6 -2
- package/dist/vector/index.d.ts.map +1 -1
- package/dist/vector/sql-builder.d.ts.map +1 -1
- package/package.json +9 -8
|
@@ -0,0 +1,542 @@
|
|
|
1
|
+
# Storage API Reference
|
|
2
|
+
|
|
3
|
+
> API reference for storage - 3 entries
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Reference: Composite Storage
|
|
9
|
+
|
|
10
|
+
> Documentation for combining multiple storage backends in Mastra.
|
|
11
|
+
|
|
12
|
+
`MastraCompositeStore` can compose storage domains from different providers. Use it when you need different databases for different purposes. For example, use LibSQL for memory and PostgreSQL for workflows.
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
`MastraCompositeStore` is included in `@mastra/core`:
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
npm install @mastra/core@beta
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
You'll also need to install the storage providers you want to compose:
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
npm install @mastra/pg@beta @mastra/libsql@beta
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## Storage domains
|
|
29
|
+
|
|
30
|
+
Mastra organizes storage into five specialized domains, each handling a specific type of data. Each domain can be backed by a different storage adapter, and domain classes are exported from each storage package.
|
|
31
|
+
|
|
32
|
+
| Domain | Description |
|
|
33
|
+
|--------|-------------|
|
|
34
|
+
| `memory` | Conversation persistence for agents. Stores threads (conversation sessions), messages, resources (user identities), and working memory (persistent context across conversations). |
|
|
35
|
+
| `workflows` | Workflow execution state. When workflows suspend for human input, external events, or scheduled resumption, their state is persisted here to enable resumption after server restarts. |
|
|
36
|
+
| `scores` | Evaluation results from Mastra's evals system. Scores and metrics are persisted here for analysis and comparison over time. |
|
|
37
|
+
| `observability` | Telemetry data including traces and spans. Agent interactions, tool calls, and LLM requests generate spans collected into traces for debugging and performance analysis. |
|
|
38
|
+
| `agents` | Agent configurations for stored agents. Enables agents to be defined and updated at runtime without code deployments. |
|
|
39
|
+
|
|
40
|
+
## Usage
|
|
41
|
+
|
|
42
|
+
### Basic composition
|
|
43
|
+
|
|
44
|
+
Import domain classes directly from each store package and compose them:
|
|
45
|
+
|
|
46
|
+
```typescript title="src/mastra/index.ts"
|
|
47
|
+
import { MastraCompositeStore } from "@mastra/core/storage";
|
|
48
|
+
import { WorkflowsPG, ScoresPG } from "@mastra/pg";
|
|
49
|
+
import { MemoryLibSQL } from "@mastra/libsql";
|
|
50
|
+
import { Mastra } from "@mastra/core";
|
|
51
|
+
|
|
52
|
+
export const mastra = new Mastra({
|
|
53
|
+
storage: new MastraCompositeStore({
|
|
54
|
+
id: "composite",
|
|
55
|
+
domains: {
|
|
56
|
+
memory: new MemoryLibSQL({ url: "file:./local.db" }),
|
|
57
|
+
workflows: new WorkflowsPG({ connectionString: process.env.DATABASE_URL }),
|
|
58
|
+
scores: new ScoresPG({ connectionString: process.env.DATABASE_URL }),
|
|
59
|
+
},
|
|
60
|
+
}),
|
|
61
|
+
});
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
### With a default storage
|
|
65
|
+
|
|
66
|
+
Use `default` to specify a fallback storage, then override specific domains:
|
|
67
|
+
|
|
68
|
+
```typescript title="src/mastra/index.ts"
|
|
69
|
+
import { MastraCompositeStore } from "@mastra/core/storage";
|
|
70
|
+
import { PostgresStore } from "@mastra/pg";
|
|
71
|
+
import { MemoryLibSQL } from "@mastra/libsql";
|
|
72
|
+
import { Mastra } from "@mastra/core";
|
|
73
|
+
|
|
74
|
+
const pgStore = new PostgresStore({
|
|
75
|
+
id: "pg",
|
|
76
|
+
connectionString: process.env.DATABASE_URL,
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
export const mastra = new Mastra({
|
|
80
|
+
storage: new MastraCompositeStore({
|
|
81
|
+
id: "composite",
|
|
82
|
+
default: pgStore,
|
|
83
|
+
domains: {
|
|
84
|
+
memory: new MemoryLibSQL({ url: "file:./local.db" }),
|
|
85
|
+
},
|
|
86
|
+
}),
|
|
87
|
+
});
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Options
|
|
91
|
+
|
|
92
|
+
## Initialization
|
|
93
|
+
|
|
94
|
+
`MastraCompositeStore` initializes each configured domain independently. When passed to the Mastra class, `init()` is called automatically:
|
|
95
|
+
|
|
96
|
+
```typescript title="src/mastra/index.ts"
|
|
97
|
+
import { MastraCompositeStore } from "@mastra/core/storage";
|
|
98
|
+
import { MemoryPG, WorkflowsPG, ScoresPG } from "@mastra/pg";
|
|
99
|
+
import { Mastra } from "@mastra/core";
|
|
100
|
+
|
|
101
|
+
const storage = new MastraCompositeStore({
|
|
102
|
+
id: "composite",
|
|
103
|
+
domains: {
|
|
104
|
+
memory: new MemoryPG({ connectionString: process.env.DATABASE_URL }),
|
|
105
|
+
workflows: new WorkflowsPG({ connectionString: process.env.DATABASE_URL }),
|
|
106
|
+
scores: new ScoresPG({ connectionString: process.env.DATABASE_URL }),
|
|
107
|
+
},
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
export const mastra = new Mastra({
|
|
111
|
+
storage, // init() called automatically
|
|
112
|
+
});
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
If using storage directly, call `init()` explicitly:
|
|
116
|
+
|
|
117
|
+
```typescript
|
|
118
|
+
import { MastraCompositeStore } from "@mastra/core/storage";
|
|
119
|
+
import { MemoryPG } from "@mastra/pg";
|
|
120
|
+
|
|
121
|
+
const storage = new MastraCompositeStore({
|
|
122
|
+
id: "composite",
|
|
123
|
+
domains: {
|
|
124
|
+
memory: new MemoryPG({ connectionString: process.env.DATABASE_URL }),
|
|
125
|
+
},
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
await storage.init();
|
|
129
|
+
|
|
130
|
+
// Access domain-specific stores via getStore()
|
|
131
|
+
const memoryStore = await storage.getStore("memory");
|
|
132
|
+
const thread = await memoryStore?.getThreadById({ threadId: "..." });
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Use cases
|
|
136
|
+
|
|
137
|
+
### Separate databases for different workloads
|
|
138
|
+
|
|
139
|
+
Use a local database for development while keeping production data in a managed service:
|
|
140
|
+
|
|
141
|
+
```typescript
|
|
142
|
+
import { MastraCompositeStore } from "@mastra/core/storage";
|
|
143
|
+
import { MemoryPG, WorkflowsPG, ScoresPG } from "@mastra/pg";
|
|
144
|
+
import { MemoryLibSQL } from "@mastra/libsql";
|
|
145
|
+
|
|
146
|
+
const storage = new MastraCompositeStore({
|
|
147
|
+
id: "composite",
|
|
148
|
+
domains: {
|
|
149
|
+
// Use local SQLite for development, PostgreSQL for production
|
|
150
|
+
memory:
|
|
151
|
+
process.env.NODE_ENV === "development"
|
|
152
|
+
? new MemoryLibSQL({ url: "file:./dev.db" })
|
|
153
|
+
: new MemoryPG({ connectionString: process.env.DATABASE_URL }),
|
|
154
|
+
workflows: new WorkflowsPG({ connectionString: process.env.DATABASE_URL }),
|
|
155
|
+
scores: new ScoresPG({ connectionString: process.env.DATABASE_URL }),
|
|
156
|
+
},
|
|
157
|
+
});
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
### Specialized storage for observability
|
|
161
|
+
|
|
162
|
+
Use a time-series database for traces while keeping other data in PostgreSQL:
|
|
163
|
+
|
|
164
|
+
```typescript
|
|
165
|
+
import { MastraCompositeStore } from "@mastra/core/storage";
|
|
166
|
+
import { MemoryPG, WorkflowsPG, ScoresPG } from "@mastra/pg";
|
|
167
|
+
import { ObservabilityStorageClickhouse } from "@mastra/clickhouse";
|
|
168
|
+
|
|
169
|
+
const storage = new MastraCompositeStore({
|
|
170
|
+
id: "composite",
|
|
171
|
+
domains: {
|
|
172
|
+
memory: new MemoryPG({ connectionString: process.env.DATABASE_URL }),
|
|
173
|
+
workflows: new WorkflowsPG({ connectionString: process.env.DATABASE_URL }),
|
|
174
|
+
scores: new ScoresPG({ connectionString: process.env.DATABASE_URL }),
|
|
175
|
+
observability: new ObservabilityStorageClickhouse({
|
|
176
|
+
url: process.env.CLICKHOUSE_URL,
|
|
177
|
+
username: process.env.CLICKHOUSE_USERNAME,
|
|
178
|
+
password: process.env.CLICKHOUSE_PASSWORD,
|
|
179
|
+
}),
|
|
180
|
+
},
|
|
181
|
+
});
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
---
|
|
185
|
+
|
|
186
|
+
## Reference: DynamoDB Storage
|
|
187
|
+
|
|
188
|
+
> Documentation for the DynamoDB storage implementation in Mastra, using a single-table design with ElectroDB.
|
|
189
|
+
|
|
190
|
+
The DynamoDB storage implementation provides a scalable and performant NoSQL database solution for Mastra, leveraging a single-table design pattern with [ElectroDB](https://electrodb.dev/).
|
|
191
|
+
|
|
192
|
+
## Features
|
|
193
|
+
|
|
194
|
+
- Efficient single-table design for all Mastra storage needs
|
|
195
|
+
- Based on ElectroDB for type-safe DynamoDB access
|
|
196
|
+
- Support for AWS credentials, regions, and endpoints
|
|
197
|
+
- Compatible with AWS DynamoDB Local for development
|
|
198
|
+
- Stores Thread, Message, Trace, Eval, and Workflow data
|
|
199
|
+
- Optimized for serverless environments
|
|
200
|
+
- Configurable TTL (Time To Live) for automatic data expiration per entity type
|
|
201
|
+
|
|
202
|
+
## Installation
|
|
203
|
+
|
|
204
|
+
```bash
|
|
205
|
+
npm install @mastra/dynamodb@beta
|
|
206
|
+
# or
|
|
207
|
+
pnpm add @mastra/dynamodb@beta
|
|
208
|
+
# or
|
|
209
|
+
yarn add @mastra/dynamodb@beta
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
## Prerequisites
|
|
213
|
+
|
|
214
|
+
Before using this package, you **must** create a DynamoDB table with a specific structure, including primary keys and Global Secondary Indexes (GSIs). This adapter expects the DynamoDB table and its GSIs to be provisioned externally.
|
|
215
|
+
|
|
216
|
+
Detailed instructions for setting up the table using AWS CloudFormation or AWS CDK are available in [TABLE_SETUP.md](https://github.com/mastra-ai/mastra/blob/main/stores/dynamodb/TABLE_SETUP.md). Please ensure your table is configured according to those instructions before proceeding.
|
|
217
|
+
|
|
218
|
+
## Usage
|
|
219
|
+
|
|
220
|
+
### Basic Usage
|
|
221
|
+
|
|
222
|
+
```typescript
|
|
223
|
+
import { Memory } from "@mastra/memory";
|
|
224
|
+
import { DynamoDBStore } from "@mastra/dynamodb";
|
|
225
|
+
|
|
226
|
+
// Initialize the DynamoDB storage
|
|
227
|
+
const storage = new DynamoDBStore({
|
|
228
|
+
id: "dynamodb", // Unique identifier for this storage instance
|
|
229
|
+
config: {
|
|
230
|
+
tableName: "mastra-single-table", // Name of your DynamoDB table
|
|
231
|
+
region: "us-east-1", // Optional: AWS region, defaults to 'us-east-1'
|
|
232
|
+
// endpoint: "http://localhost:8000", // Optional: For local DynamoDB
|
|
233
|
+
// credentials: { accessKeyId: "YOUR_ACCESS_KEY", secretAccessKey: "YOUR_SECRET_KEY" } // Optional
|
|
234
|
+
},
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
// Example: Initialize Memory with DynamoDB storage
|
|
238
|
+
const memory = new Memory({
|
|
239
|
+
storage,
|
|
240
|
+
options: {
|
|
241
|
+
lastMessages: 10,
|
|
242
|
+
},
|
|
243
|
+
});
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
### Local Development with DynamoDB Local
|
|
247
|
+
|
|
248
|
+
For local development, you can use [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html).
|
|
249
|
+
|
|
250
|
+
1. **Run DynamoDB Local (e.g., using Docker):**
|
|
251
|
+
|
|
252
|
+
```bash
|
|
253
|
+
docker run -p 8000:8000 amazon/dynamodb-local
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
2. **Configure `DynamoDBStore` to use the local endpoint:**
|
|
257
|
+
|
|
258
|
+
```typescript
|
|
259
|
+
import { DynamoDBStore } from "@mastra/dynamodb";
|
|
260
|
+
|
|
261
|
+
const storage = new DynamoDBStore({
|
|
262
|
+
id: "dynamodb-local",
|
|
263
|
+
config: {
|
|
264
|
+
tableName: "mastra-single-table", // Ensure this table is created in your local DynamoDB
|
|
265
|
+
region: "localhost", // Can be any string for local, 'localhost' is common
|
|
266
|
+
endpoint: "http://localhost:8000",
|
|
267
|
+
// For DynamoDB Local, credentials are not typically required unless configured.
|
|
268
|
+
// If you've configured local credentials:
|
|
269
|
+
// credentials: { accessKeyId: "fakeMyKeyId", secretAccessKey: "fakeSecretAccessKey" }
|
|
270
|
+
},
|
|
271
|
+
});
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
You will still need to create the table and GSIs in your local DynamoDB instance, for example, using the AWS CLI pointed to your local endpoint.
|
|
275
|
+
|
|
276
|
+
## Parameters
|
|
277
|
+
|
|
278
|
+
## TTL (Time To Live) Configuration
|
|
279
|
+
|
|
280
|
+
DynamoDB TTL allows you to automatically delete items after a specified time period. This is useful for:
|
|
281
|
+
|
|
282
|
+
- **Cost optimization**: Automatically remove old data to reduce storage costs
|
|
283
|
+
- **Data lifecycle management**: Implement retention policies for compliance
|
|
284
|
+
- **Performance**: Prevent tables from growing indefinitely
|
|
285
|
+
- **Privacy compliance**: Automatically purge personal data after specified periods
|
|
286
|
+
|
|
287
|
+
### Enabling TTL
|
|
288
|
+
|
|
289
|
+
To use TTL, you must:
|
|
290
|
+
|
|
291
|
+
1. **Configure TTL in DynamoDBStore** (shown below)
|
|
292
|
+
2. **Enable TTL on your DynamoDB table** via AWS Console or CLI, specifying the attribute name (default: `ttl`)
|
|
293
|
+
|
|
294
|
+
```typescript
|
|
295
|
+
import { DynamoDBStore } from "@mastra/dynamodb";
|
|
296
|
+
|
|
297
|
+
const storage = new DynamoDBStore({
|
|
298
|
+
name: "dynamodb",
|
|
299
|
+
config: {
|
|
300
|
+
tableName: "mastra-single-table",
|
|
301
|
+
region: "us-east-1",
|
|
302
|
+
ttl: {
|
|
303
|
+
// Messages expire after 30 days
|
|
304
|
+
message: {
|
|
305
|
+
enabled: true,
|
|
306
|
+
defaultTtlSeconds: 30 * 24 * 60 * 60, // 30 days
|
|
307
|
+
},
|
|
308
|
+
// Threads expire after 90 days
|
|
309
|
+
thread: {
|
|
310
|
+
enabled: true,
|
|
311
|
+
defaultTtlSeconds: 90 * 24 * 60 * 60, // 90 days
|
|
312
|
+
},
|
|
313
|
+
// Traces expire after 7 days with custom attribute name
|
|
314
|
+
trace: {
|
|
315
|
+
enabled: true,
|
|
316
|
+
attributeName: "expiresAt", // Custom TTL attribute
|
|
317
|
+
defaultTtlSeconds: 7 * 24 * 60 * 60, // 7 days
|
|
318
|
+
},
|
|
319
|
+
// Workflow snapshots don't expire
|
|
320
|
+
workflow_snapshot: {
|
|
321
|
+
enabled: false,
|
|
322
|
+
},
|
|
323
|
+
},
|
|
324
|
+
},
|
|
325
|
+
});
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
### Supported Entity Types
|
|
329
|
+
|
|
330
|
+
TTL can be configured for these entity types:
|
|
331
|
+
|
|
332
|
+
| Entity | Description |
|
|
333
|
+
|--------|-------------|
|
|
334
|
+
| `thread` | Conversation threads |
|
|
335
|
+
| `message` | Messages within threads |
|
|
336
|
+
| `trace` | Observability traces |
|
|
337
|
+
| `eval` | Evaluation results |
|
|
338
|
+
| `workflow_snapshot` | Workflow state snapshots |
|
|
339
|
+
| `resource` | User/resource data |
|
|
340
|
+
| `score` | Scoring results |
|
|
341
|
+
|
|
342
|
+
### TTL Entity Configuration
|
|
343
|
+
|
|
344
|
+
Each entity type accepts the following configuration:
|
|
345
|
+
|
|
346
|
+
### Enabling TTL on Your DynamoDB Table
|
|
347
|
+
|
|
348
|
+
After configuring TTL in your code, you must enable TTL on the DynamoDB table itself:
|
|
349
|
+
|
|
350
|
+
**Using AWS CLI:**
|
|
351
|
+
|
|
352
|
+
```bash
|
|
353
|
+
aws dynamodb update-time-to-live \
|
|
354
|
+
--table-name mastra-single-table \
|
|
355
|
+
--time-to-live-specification "Enabled=true, AttributeName=ttl"
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
**Using AWS Console:**
|
|
359
|
+
|
|
360
|
+
1. Go to the DynamoDB console
|
|
361
|
+
2. Select your table
|
|
362
|
+
3. Go to "Additional settings" tab
|
|
363
|
+
4. Under "Time to Live (TTL)", click "Manage TTL"
|
|
364
|
+
5. Enable TTL and specify the attribute name (default: `ttl`)
|
|
365
|
+
|
|
366
|
+
> **Note**: DynamoDB deletes expired items within 48 hours after expiration. Items remain queryable until actually deleted.
|
|
367
|
+
|
|
368
|
+
## AWS IAM Permissions
|
|
369
|
+
|
|
370
|
+
The IAM role or user executing the code needs appropriate permissions to interact with the specified DynamoDB table and its indexes. Below is a sample policy. Replace `${YOUR_TABLE_NAME}` with your actual table name and `${YOUR_AWS_REGION}` and `${YOUR_AWS_ACCOUNT_ID}` with appropriate values.
|
|
371
|
+
|
|
372
|
+
```json
|
|
373
|
+
{
|
|
374
|
+
"Version": "2012-10-17",
|
|
375
|
+
"Statement": [
|
|
376
|
+
{
|
|
377
|
+
"Effect": "Allow",
|
|
378
|
+
"Action": [
|
|
379
|
+
"dynamodb:DescribeTable",
|
|
380
|
+
"dynamodb:GetItem",
|
|
381
|
+
"dynamodb:PutItem",
|
|
382
|
+
"dynamodb:UpdateItem",
|
|
383
|
+
"dynamodb:DeleteItem",
|
|
384
|
+
"dynamodb:Query",
|
|
385
|
+
"dynamodb:Scan",
|
|
386
|
+
"dynamodb:BatchGetItem",
|
|
387
|
+
"dynamodb:BatchWriteItem"
|
|
388
|
+
],
|
|
389
|
+
"Resource": [
|
|
390
|
+
"arn:aws:dynamodb:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:table/${YOUR_TABLE_NAME}",
|
|
391
|
+
"arn:aws:dynamodb:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:table/${YOUR_TABLE_NAME}/index/*"
|
|
392
|
+
]
|
|
393
|
+
}
|
|
394
|
+
]
|
|
395
|
+
}
|
|
396
|
+
```
|
|
397
|
+
|
|
398
|
+
## Key Considerations
|
|
399
|
+
|
|
400
|
+
Before diving into the architectural details, keep these key points in mind when working with the DynamoDB storage adapter:
|
|
401
|
+
|
|
402
|
+
- **External Table Provisioning:** This adapter _requires_ you to create and configure the DynamoDB table and its Global Secondary Indexes (GSIs) yourself, prior to using the adapter. Follow the guide in [TABLE_SETUP.md](https://github.com/mastra-ai/mastra/blob/main/stores/dynamodb/TABLE_SETUP.md).
|
|
403
|
+
- **Single-Table Design:** All Mastra data (threads, messages, etc.) is stored in one DynamoDB table. This is a deliberate design choice optimized for DynamoDB, differing from relational database approaches.
|
|
404
|
+
- **Understanding GSIs:** Familiarity with how the GSIs are structured (as per `TABLE_SETUP.md`) is important for understanding data retrieval and potential query patterns.
|
|
405
|
+
- **ElectroDB:** The adapter uses ElectroDB to manage interactions with DynamoDB, providing a layer of abstraction and type safety over raw DynamoDB operations.
|
|
406
|
+
|
|
407
|
+
## Architectural Approach
|
|
408
|
+
|
|
409
|
+
This storage adapter utilizes a **single-table design pattern** leveraging [ElectroDB](https://electrodb.dev/), a common and recommended approach for DynamoDB. This differs architecturally from relational database adapters (like `@mastra/pg` or `@mastra/libsql`) that typically use multiple tables, each dedicated to a specific entity (threads, messages, etc.).
|
|
410
|
+
|
|
411
|
+
Key aspects of this approach:
|
|
412
|
+
|
|
413
|
+
- **DynamoDB Native:** The single-table design is optimized for DynamoDB's key-value and query capabilities, often leading to better performance and scalability compared to mimicking relational models.
|
|
414
|
+
- **External Table Management:** Unlike some adapters that might offer helper functions to create tables via code, this adapter **expects the DynamoDB table and its associated Global Secondary Indexes (GSIs) to be provisioned externally** before use. Please refer to [TABLE_SETUP.md](https://github.com/mastra-ai/mastra/blob/main/stores/dynamodb/TABLE_SETUP.md) for detailed instructions using tools like AWS CloudFormation or CDK. The adapter focuses solely on interacting with the pre-existing table structure.
|
|
415
|
+
- **Consistency via Interface:** While the underlying storage model differs, this adapter adheres to the same `MastraStorage` interface as other adapters, ensuring it can be used interchangeably within the Mastra `Memory` component.
|
|
416
|
+
|
|
417
|
+
### Mastra Data in the Single Table
|
|
418
|
+
|
|
419
|
+
Within the single DynamoDB table, different Mastra data entities (such as Threads, Messages, Traces, Evals, and Workflows) are managed and distinguished using ElectroDB. ElectroDB defines specific models for each entity type, which include unique key structures and attributes. This allows the adapter to store and retrieve diverse data types efficiently within the same table.
|
|
420
|
+
|
|
421
|
+
For example, a `Thread` item might have a primary key like `THREAD#<threadId>`, while a `Message` item belonging to that thread might use `THREAD#<threadId>` as a partition key and `MESSAGE#<messageId>` as a sort key. The Global Secondary Indexes (GSIs), detailed in `TABLE_SETUP.md`, are strategically designed to support common access patterns across these different entities, such as fetching all messages for a thread or querying traces associated with a particular workflow.
|
|
422
|
+
|
|
423
|
+
### Advantages of Single-Table Design
|
|
424
|
+
|
|
425
|
+
This implementation uses a single-table design pattern with ElectroDB, which offers several advantages within the context of DynamoDB:
|
|
426
|
+
|
|
427
|
+
1. **Lower cost (potentially):** Fewer tables can simplify Read/Write Capacity Unit (RCU/WCU) provisioning and management, especially with on-demand capacity.
|
|
428
|
+
2. **Better performance:** Related data can be co-located or accessed efficiently through GSIs, enabling fast lookups for common access patterns.
|
|
429
|
+
3. **Simplified administration:** Fewer distinct tables to monitor, back up, and manage.
|
|
430
|
+
4. **Reduced complexity in access patterns:** ElectroDB helps manage the complexity of item types and access patterns on a single table.
|
|
431
|
+
5. **Transaction support:** DynamoDB transactions can be used across different "entity" types stored within the same table if needed.
|
|
432
|
+
|
|
433
|
+
---
|
|
434
|
+
|
|
435
|
+
## Reference: libSQL Storage
|
|
436
|
+
|
|
437
|
+
> Documentation for the libSQL storage implementation in Mastra.
|
|
438
|
+
|
|
439
|
+
[libSQL](https://docs.turso.tech/libsql) is an open-source, SQLite-compatible database that supports both local and remote deployments. It can be used to store message history, workflow snapshots, traces, and eval scores.
|
|
440
|
+
|
|
441
|
+
For vectors like semantic recall or traditional RAG, use [libSQL Vector](https://mastra.ai/reference/v1/vectors/libsql) which covers embeddings and vector search.
|
|
442
|
+
|
|
443
|
+
## Installation
|
|
444
|
+
|
|
445
|
+
Storage providers must be installed as separate packages:
|
|
446
|
+
|
|
447
|
+
```bash
|
|
448
|
+
npm install @mastra/libsql@beta
|
|
449
|
+
```
|
|
450
|
+
|
|
451
|
+
## Usage
|
|
452
|
+
|
|
453
|
+
```typescript
|
|
454
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
455
|
+
import { Mastra } from "@mastra/core";
|
|
456
|
+
|
|
457
|
+
const mastra = new Mastra({
|
|
458
|
+
storage: new LibSQLStore({
|
|
459
|
+
id: 'libsql-storage',
|
|
460
|
+
url: "file:./storage.db",
|
|
461
|
+
}),
|
|
462
|
+
});
|
|
463
|
+
```
|
|
464
|
+
|
|
465
|
+
Agent-level file storage:
|
|
466
|
+
|
|
467
|
+
```typescript
|
|
468
|
+
import { Memory } from "@mastra/memory";
|
|
469
|
+
import { Agent } from "@mastra/core/agent";
|
|
470
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
471
|
+
|
|
472
|
+
export const agent = new Agent({
|
|
473
|
+
id: "example-agent",
|
|
474
|
+
memory: new Memory({
|
|
475
|
+
storage: new LibSQLStore({
|
|
476
|
+
id: 'libsql-storage',
|
|
477
|
+
url: "file:./agent.db",
|
|
478
|
+
}),
|
|
479
|
+
}),
|
|
480
|
+
});
|
|
481
|
+
```
|
|
482
|
+
|
|
483
|
+
> **Note:**
|
|
484
|
+
File storage doesn't work with serverless platforms that have ephemeral file systems. For serverless deployments, use [Turso](https://turso.tech) or a different database engine.
|
|
485
|
+
|
|
486
|
+
Production with remote database:
|
|
487
|
+
|
|
488
|
+
```typescript
|
|
489
|
+
storage: new LibSQLStore({
|
|
490
|
+
id: 'libsql-storage',
|
|
491
|
+
url: "libsql://your-db-name.aws-ap-northeast-1.turso.io",
|
|
492
|
+
authToken: process.env.TURSO_AUTH_TOKEN,
|
|
493
|
+
})
|
|
494
|
+
```
|
|
495
|
+
|
|
496
|
+
For local development and testing, you can store data in memory:
|
|
497
|
+
|
|
498
|
+
```typescript
|
|
499
|
+
storage: new LibSQLStore({
|
|
500
|
+
id: 'libsql-storage',
|
|
501
|
+
url: ":memory:",
|
|
502
|
+
})
|
|
503
|
+
```
|
|
504
|
+
> **Note:**
|
|
505
|
+
In-memory storage resets when the process changes. Only suitable for development.
|
|
506
|
+
|
|
507
|
+
## Options
|
|
508
|
+
|
|
509
|
+
## Initialization
|
|
510
|
+
|
|
511
|
+
When you pass storage to the Mastra class, `init()` is called automatically to create the [core schema](https://mastra.ai/reference/v1/storage/overview#core-schema):
|
|
512
|
+
|
|
513
|
+
```typescript
|
|
514
|
+
import { Mastra } from "@mastra/core";
|
|
515
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
516
|
+
|
|
517
|
+
const storage = new LibSQLStore({
|
|
518
|
+
id: 'libsql-storage',
|
|
519
|
+
url: "file:./storage.db",
|
|
520
|
+
});
|
|
521
|
+
|
|
522
|
+
const mastra = new Mastra({
|
|
523
|
+
storage, // init() called automatically
|
|
524
|
+
});
|
|
525
|
+
```
|
|
526
|
+
|
|
527
|
+
If using storage directly without Mastra, call `init()` explicitly:
|
|
528
|
+
|
|
529
|
+
```typescript
|
|
530
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
531
|
+
|
|
532
|
+
const storage = new LibSQLStore({
|
|
533
|
+
id: 'libsql-storage',
|
|
534
|
+
url: "file:./storage.db",
|
|
535
|
+
});
|
|
536
|
+
|
|
537
|
+
await storage.init();
|
|
538
|
+
|
|
539
|
+
// Access domain-specific stores via getStore()
|
|
540
|
+
const memoryStore = await storage.getStore('memory');
|
|
541
|
+
const thread = await memoryStore?.getThreadById({ threadId: "..." });
|
|
542
|
+
```
|