@mastra/pg 1.0.0-beta.12 → 1.0.0-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,27 +5,27 @@
5
5
 
6
6
  ---
7
7
 
8
- ## Reference: Storage Composition
8
+ ## Reference: Composite Storage
9
9
 
10
10
  > Documentation for combining multiple storage backends in Mastra.
11
11
 
12
- MastraStorage can compose storage domains from different adapters. Use it when you need different databases for different purposes. For example, use LibSQL for memory and PostgreSQL for workflows.
12
+ `MastraStorage` can compose storage domains from different providers. Use it when you need different databases for different purposes. For example, use LibSQL for memory and PostgreSQL for workflows.
13
13
 
14
14
  ## Installation
15
15
 
16
- MastraStorage is included in `@mastra/core`:
16
+ `MastraStorage` is included in `@mastra/core`:
17
17
 
18
18
  ```bash
19
19
  npm install @mastra/core@beta
20
20
  ```
21
21
 
22
- You'll also need to install the storage adapters you want to compose:
22
+ You'll also need to install the storage providers you want to compose:
23
23
 
24
24
  ```bash
25
25
  npm install @mastra/pg@beta @mastra/libsql@beta
26
26
  ```
27
27
 
28
- ## Storage Domains
28
+ ## Storage domains
29
29
 
30
30
  Mastra organizes storage into five specialized domains, each handling a specific type of data. Each domain can be backed by a different storage adapter, and domain classes are exported from each storage package.
31
31
 
@@ -43,13 +43,13 @@ Mastra organizes storage into five specialized domains, each handling a specific
43
43
 
44
44
  Import domain classes directly from each store package and compose them:
45
45
 
46
- ```typescript
46
+ ```typescript title="src/mastra/index.ts"
47
47
  import { MastraStorage } from "@mastra/core/storage";
48
48
  import { WorkflowsPG, ScoresPG } from "@mastra/pg";
49
49
  import { MemoryLibSQL } from "@mastra/libsql";
50
50
  import { Mastra } from "@mastra/core";
51
51
 
52
- const mastra = new Mastra({
52
+ export const mastra = new Mastra({
53
53
  storage: new MastraStorage({
54
54
  id: "composite",
55
55
  domains: {
@@ -65,7 +65,7 @@ const mastra = new Mastra({
65
65
 
66
66
  Use `default` to specify a fallback storage, then override specific domains:
67
67
 
68
- ```typescript
68
+ ```typescript title="src/mastra/index.ts"
69
69
  import { MastraStorage } from "@mastra/core/storage";
70
70
  import { PostgresStore } from "@mastra/pg";
71
71
  import { MemoryLibSQL } from "@mastra/libsql";
@@ -76,7 +76,7 @@ const pgStore = new PostgresStore({
76
76
  connectionString: process.env.DATABASE_URL,
77
77
  });
78
78
 
79
- const mastra = new Mastra({
79
+ export const mastra = new Mastra({
80
80
  storage: new MastraStorage({
81
81
  id: "composite",
82
82
  default: pgStore,
@@ -91,9 +91,9 @@ const mastra = new Mastra({
91
91
 
92
92
  ## Initialization
93
93
 
94
- MastraStorage initializes each configured domain independently. When passed to the Mastra class, `init()` is called automatically:
94
+ `MastraStorage` initializes each configured domain independently. When passed to the Mastra class, `init()` is called automatically:
95
95
 
96
- ```typescript
96
+ ```typescript title="src/mastra/index.ts"
97
97
  import { MastraStorage } from "@mastra/core/storage";
98
98
  import { MemoryPG, WorkflowsPG, ScoresPG } from "@mastra/pg";
99
99
  import { Mastra } from "@mastra/core";
@@ -107,7 +107,7 @@ const storage = new MastraStorage({
107
107
  },
108
108
  });
109
109
 
110
- const mastra = new Mastra({
110
+ export const mastra = new Mastra({
111
111
  storage, // init() called automatically
112
112
  });
113
113
  ```
@@ -132,7 +132,7 @@ const memoryStore = await storage.getStore("memory");
132
132
  const thread = await memoryStore?.getThreadById({ threadId: "..." });
133
133
  ```
134
134
 
135
- ## Use Cases
135
+ ## Use cases
136
136
 
137
137
  ### Separate databases for different workloads
138
138
 
@@ -197,6 +197,7 @@ The DynamoDB storage implementation provides a scalable and performant NoSQL dat
197
197
  - Compatible with AWS DynamoDB Local for development
198
198
  - Stores Thread, Message, Trace, Eval, and Workflow data
199
199
  - Optimized for serverless environments
200
+ - Configurable TTL (Time To Live) for automatic data expiration per entity type
200
201
 
201
202
  ## Installation
202
203
 
@@ -224,7 +225,7 @@ import { DynamoDBStore } from "@mastra/dynamodb";
224
225
 
225
226
  // Initialize the DynamoDB storage
226
227
  const storage = new DynamoDBStore({
227
- name: "dynamodb", // A name for this storage instance
228
+ id: "dynamodb", // Unique identifier for this storage instance
228
229
  config: {
229
230
  tableName: "mastra-single-table", // Name of your DynamoDB table
230
231
  region: "us-east-1", // Optional: AWS region, defaults to 'us-east-1'
@@ -258,7 +259,7 @@ For local development, you can use [DynamoDB Local](https://docs.aws.amazon.com/
258
259
  import { DynamoDBStore } from "@mastra/dynamodb";
259
260
 
260
261
  const storage = new DynamoDBStore({
261
- name: "dynamodb-local",
262
+ id: "dynamodb-local",
262
263
  config: {
263
264
  tableName: "mastra-single-table", // Ensure this table is created in your local DynamoDB
264
265
  region: "localhost", // Can be any string for local, 'localhost' is common
@@ -274,6 +275,96 @@ For local development, you can use [DynamoDB Local](https://docs.aws.amazon.com/
274
275
 
275
276
  ## Parameters
276
277
 
278
+ ## TTL (Time To Live) Configuration
279
+
280
+ DynamoDB TTL allows you to automatically delete items after a specified time period. This is useful for:
281
+
282
+ - **Cost optimization**: Automatically remove old data to reduce storage costs
283
+ - **Data lifecycle management**: Implement retention policies for compliance
284
+ - **Performance**: Prevent tables from growing indefinitely
285
+ - **Privacy compliance**: Automatically purge personal data after specified periods
286
+
287
+ ### Enabling TTL
288
+
289
+ To use TTL, you must:
290
+
291
+ 1. **Configure TTL in DynamoDBStore** (shown below)
292
+ 2. **Enable TTL on your DynamoDB table** via AWS Console or CLI, specifying the attribute name (default: `ttl`)
293
+
294
+ ```typescript
295
+ import { DynamoDBStore } from "@mastra/dynamodb";
296
+
297
+ const storage = new DynamoDBStore({
298
+ name: "dynamodb",
299
+ config: {
300
+ tableName: "mastra-single-table",
301
+ region: "us-east-1",
302
+ ttl: {
303
+ // Messages expire after 30 days
304
+ message: {
305
+ enabled: true,
306
+ defaultTtlSeconds: 30 * 24 * 60 * 60, // 30 days
307
+ },
308
+ // Threads expire after 90 days
309
+ thread: {
310
+ enabled: true,
311
+ defaultTtlSeconds: 90 * 24 * 60 * 60, // 90 days
312
+ },
313
+ // Traces expire after 7 days with custom attribute name
314
+ trace: {
315
+ enabled: true,
316
+ attributeName: "expiresAt", // Custom TTL attribute
317
+ defaultTtlSeconds: 7 * 24 * 60 * 60, // 7 days
318
+ },
319
+ // Workflow snapshots don't expire
320
+ workflow_snapshot: {
321
+ enabled: false,
322
+ },
323
+ },
324
+ },
325
+ });
326
+ ```
327
+
328
+ ### Supported Entity Types
329
+
330
+ TTL can be configured for these entity types:
331
+
332
+ | Entity | Description |
333
+ |--------|-------------|
334
+ | `thread` | Conversation threads |
335
+ | `message` | Messages within threads |
336
+ | `trace` | Observability traces |
337
+ | `eval` | Evaluation results |
338
+ | `workflow_snapshot` | Workflow state snapshots |
339
+ | `resource` | User/resource data |
340
+ | `score` | Scoring results |
341
+
342
+ ### TTL Entity Configuration
343
+
344
+ Each entity type accepts the following configuration:
345
+
346
+ ### Enabling TTL on Your DynamoDB Table
347
+
348
+ After configuring TTL in your code, you must enable TTL on the DynamoDB table itself:
349
+
350
+ **Using AWS CLI:**
351
+
352
+ ```bash
353
+ aws dynamodb update-time-to-live \
354
+ --table-name mastra-single-table \
355
+ --time-to-live-specification "Enabled=true, AttributeName=ttl"
356
+ ```
357
+
358
+ **Using AWS Console:**
359
+
360
+ 1. Go to the DynamoDB console
361
+ 2. Select your table
362
+ 3. Go to "Additional settings" tab
363
+ 4. Under "Time to Live (TTL)", click "Manage TTL"
364
+ 5. Enable TTL and specify the attribute name (default: `ttl`)
365
+
366
+ > **Note**: DynamoDB deletes expired items within 48 hours after expiration. Items remain queryable until actually deleted.
367
+
277
368
  ## AWS IAM Permissions
278
369
 
279
370
  The IAM role or user executing the code needs appropriate permissions to interact with the specified DynamoDB table and its indexes. Below is a sample policy. Replace `${YOUR_TABLE_NAME}` with your actual table name and `${YOUR_AWS_REGION}` and `${YOUR_AWS_ACCOUNT_ID}` with appropriate values.
@@ -431,6 +522,7 @@ import { Mastra } from "@mastra/core";
431
522
  import { PostgresStore } from "@mastra/pg";
432
523
 
433
524
  const storage = new PostgresStore({
525
+ id: 'pg-storage',
434
526
  connectionString: process.env.DATABASE_URL,
435
527
  });
436
528
 
@@ -477,6 +569,75 @@ This enables direct queries and custom transaction management. When using these
477
569
 
478
570
  This approach is intended for advanced scenarios where low-level access is required.
479
571
 
572
+ ### Using with Next.js
573
+
574
+ When using `PostgresStore` in Next.js applications, [Hot Module Replacement (HMR)](https://nextjs.org/docs/architecture/fast-refresh) during development can cause multiple storage instances to be created, resulting in this warning:
575
+
576
+ ```
577
+ WARNING: Creating a duplicate database object for the same connection.
578
+ ```
579
+
580
+ To prevent this, store the `PostgresStore` instance on the global object so it persists across HMR reloads:
581
+
582
+ ```typescript title="src/mastra/storage.ts"
583
+ import { PostgresStore } from "@mastra/pg";
584
+ import { Memory } from "@mastra/memory";
585
+
586
+ // Extend the global type to include our instances
587
+ declare global {
588
+ var pgStore: PostgresStore | undefined;
589
+ var memory: Memory | undefined;
590
+ }
591
+
592
+ // Get or create the PostgresStore instance
593
+ function getPgStore(): PostgresStore {
594
+ if (!global.pgStore) {
595
+ if (!process.env.DATABASE_URL) {
596
+ throw new Error("DATABASE_URL is not defined in environment variables");
597
+ }
598
+ global.pgStore = new PostgresStore({
599
+ id: "pg-storage",
600
+ connectionString: process.env.DATABASE_URL,
601
+ ssl:
602
+ process.env.DATABASE_SSL === "true"
603
+ ? { rejectUnauthorized: false }
604
+ : false,
605
+ });
606
+ }
607
+ return global.pgStore;
608
+ }
609
+
610
+ // Get or create the Memory instance
611
+ function getMemory(): Memory {
612
+ if (!global.memory) {
613
+ global.memory = new Memory({
614
+ storage: getPgStore(),
615
+ });
616
+ }
617
+ return global.memory;
618
+ }
619
+
620
+ export const storage = getPgStore();
621
+ export const memory = getMemory();
622
+ ```
623
+
624
+ Then use the exported instances in your Mastra configuration:
625
+
626
+ ```typescript title="src/mastra/index.ts"
627
+ import { Mastra } from "@mastra/core/mastra";
628
+ import { storage } from "./storage";
629
+
630
+ export const mastra = new Mastra({
631
+ storage,
632
+ // ...other config
633
+ });
634
+ ```
635
+
636
+ This pattern ensures only one `PostgresStore` instance is created regardless of how many times the module is reloaded during development. The same pattern can be applied to other storage providers like `LibSQLStore`.
637
+
638
+ > **Note:**
639
+ This singleton pattern is only necessary during local development with HMR. In production builds, modules are only loaded once.
640
+
480
641
  ## Usage Example
481
642
 
482
643
  ### Adding memory to an agent
package/dist/index.cjs CHANGED
@@ -2102,7 +2102,7 @@ var PgDB = class extends base.MastraBase {
2102
2102
  SELECT 1 FROM information_schema.tables
2103
2103
  WHERE table_schema = $1 AND table_name = $2
2104
2104
  )`,
2105
- [this.schemaName || "mastra", tableName]
2105
+ [this.schemaName || "public", tableName]
2106
2106
  );
2107
2107
  if (tableExists?.exists) {
2108
2108
  await this.client.none(`TRUNCATE TABLE ${tableNameWithSchema} CASCADE`);
@@ -4757,6 +4757,11 @@ var ScoresPG = class _ScoresPG extends storage.ScoresStorage {
4757
4757
  }
4758
4758
  async init() {
4759
4759
  await this.#db.createTable({ tableName: storage.TABLE_SCORERS, schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS] });
4760
+ await this.#db.alterTable({
4761
+ tableName: storage.TABLE_SCORERS,
4762
+ schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS],
4763
+ ifNotExists: ["spanId", "requestContext"]
4764
+ });
4760
4765
  await this.createDefaultIndexes();
4761
4766
  await this.createCustomIndexes();
4762
4767
  }
@@ -5108,23 +5113,8 @@ function getTableName5({ indexName, schemaName }) {
5108
5113
  const quotedIndexName = `"${indexName}"`;
5109
5114
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
5110
5115
  }
5111
- function parseWorkflowRun(row) {
5112
- let parsedSnapshot = row.snapshot;
5113
- if (typeof parsedSnapshot === "string") {
5114
- try {
5115
- parsedSnapshot = JSON.parse(row.snapshot);
5116
- } catch (e) {
5117
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5118
- }
5119
- }
5120
- return {
5121
- workflowName: row.workflow_name,
5122
- runId: row.run_id,
5123
- snapshot: parsedSnapshot,
5124
- resourceId: row.resourceId,
5125
- createdAt: new Date(row.createdAtZ || row.createdAt),
5126
- updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5127
- };
5116
+ function sanitizeJsonForPg(jsonString) {
5117
+ return jsonString.replace(/\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})/g, "");
5128
5118
  }
5129
5119
  var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5130
5120
  #db;
@@ -5141,6 +5131,24 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5141
5131
  this.#skipDefaultIndexes = skipDefaultIndexes;
5142
5132
  this.#indexes = indexes?.filter((idx) => _WorkflowsPG.MANAGED_TABLES.includes(idx.table));
5143
5133
  }
5134
+ parseWorkflowRun(row) {
5135
+ let parsedSnapshot = row.snapshot;
5136
+ if (typeof parsedSnapshot === "string") {
5137
+ try {
5138
+ parsedSnapshot = JSON.parse(row.snapshot);
5139
+ } catch (e) {
5140
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5141
+ }
5142
+ }
5143
+ return {
5144
+ workflowName: row.workflow_name,
5145
+ runId: row.run_id,
5146
+ snapshot: parsedSnapshot,
5147
+ resourceId: row.resourceId,
5148
+ createdAt: new Date(row.createdAtZ || row.createdAt),
5149
+ updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5150
+ };
5151
+ }
5144
5152
  /**
5145
5153
  * Returns default index definitions for the workflows domain tables.
5146
5154
  * Currently no default indexes are defined for workflows.
@@ -5213,12 +5221,13 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5213
5221
  const now = /* @__PURE__ */ new Date();
5214
5222
  const createdAtValue = createdAt ? createdAt : now;
5215
5223
  const updatedAtValue = updatedAt ? updatedAt : now;
5224
+ const sanitizedSnapshot = sanitizeJsonForPg(JSON.stringify(snapshot));
5216
5225
  await this.#db.client.none(
5217
5226
  `INSERT INTO ${getTableName5({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName5(this.#schema) })} (workflow_name, run_id, "resourceId", snapshot, "createdAt", "updatedAt")
5218
5227
  VALUES ($1, $2, $3, $4, $5, $6)
5219
5228
  ON CONFLICT (workflow_name, run_id) DO UPDATE
5220
5229
  SET "resourceId" = $3, snapshot = $4, "updatedAt" = $6`,
5221
- [workflowName, runId, resourceId, JSON.stringify(snapshot), createdAtValue, updatedAtValue]
5230
+ [workflowName, runId, resourceId, sanitizedSnapshot, createdAtValue, updatedAtValue]
5222
5231
  );
5223
5232
  } catch (error$1) {
5224
5233
  throw new error.MastraError(
@@ -5281,7 +5290,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5281
5290
  if (!result) {
5282
5291
  return null;
5283
5292
  }
5284
- return parseWorkflowRun(result);
5293
+ return this.parseWorkflowRun(result);
5285
5294
  } catch (error$1) {
5286
5295
  throw new error.MastraError(
5287
5296
  {
@@ -5337,7 +5346,9 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5337
5346
  paramIndex++;
5338
5347
  }
5339
5348
  if (status) {
5340
- conditions.push(`snapshot::jsonb ->> 'status' = $${paramIndex}`);
5349
+ conditions.push(
5350
+ `regexp_replace(snapshot::text, '\\\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})', '', 'g')::jsonb ->> 'status' = $${paramIndex}`
5351
+ );
5341
5352
  values.push(status);
5342
5353
  paramIndex++;
5343
5354
  }
@@ -5382,7 +5393,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5382
5393
  const queryValues = usePagination ? [...values, normalizedPerPage, offset] : values;
5383
5394
  const result = await this.#db.client.manyOrNone(query, queryValues);
5384
5395
  const runs = (result || []).map((row) => {
5385
- return parseWorkflowRun(row);
5396
+ return this.parseWorkflowRun(row);
5386
5397
  });
5387
5398
  return { runs, total: total || runs.length };
5388
5399
  } catch (error$1) {
@@ -5415,7 +5426,7 @@ var PostgresStore = class extends storage.MastraStorage {
5415
5426
  try {
5416
5427
  validateConfig("PostgresStore", config);
5417
5428
  super({ id: config.id, name: "PostgresStore", disableInit: config.disableInit });
5418
- this.schema = config.schemaName || "public";
5429
+ this.schema = utils.parseSqlIdentifier(config.schemaName || "public", "schema name");
5419
5430
  if (isPoolConfig(config)) {
5420
5431
  this.#pool = config.pool;
5421
5432
  this.#ownsPool = false;