@mastra/pg 1.0.0-beta.12 → 1.0.0-beta.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,27 +5,27 @@
5
5
 
6
6
  ---
7
7
 
8
- ## Reference: Storage Composition
8
+ ## Reference: Composite Storage
9
9
 
10
10
  > Documentation for combining multiple storage backends in Mastra.
11
11
 
12
- MastraStorage can compose storage domains from different adapters. Use it when you need different databases for different purposes. For example, use LibSQL for memory and PostgreSQL for workflows.
12
+ `MastraStorage` can compose storage domains from different providers. Use it when you need different databases for different purposes. For example, use LibSQL for memory and PostgreSQL for workflows.
13
13
 
14
14
  ## Installation
15
15
 
16
- MastraStorage is included in `@mastra/core`:
16
+ `MastraStorage` is included in `@mastra/core`:
17
17
 
18
18
  ```bash
19
19
  npm install @mastra/core@beta
20
20
  ```
21
21
 
22
- You'll also need to install the storage adapters you want to compose:
22
+ You'll also need to install the storage providers you want to compose:
23
23
 
24
24
  ```bash
25
25
  npm install @mastra/pg@beta @mastra/libsql@beta
26
26
  ```
27
27
 
28
- ## Storage Domains
28
+ ## Storage domains
29
29
 
30
30
  Mastra organizes storage into five specialized domains, each handling a specific type of data. Each domain can be backed by a different storage adapter, and domain classes are exported from each storage package.
31
31
 
@@ -43,13 +43,13 @@ Mastra organizes storage into five specialized domains, each handling a specific
43
43
 
44
44
  Import domain classes directly from each store package and compose them:
45
45
 
46
- ```typescript
46
+ ```typescript title="src/mastra/index.ts"
47
47
  import { MastraStorage } from "@mastra/core/storage";
48
48
  import { WorkflowsPG, ScoresPG } from "@mastra/pg";
49
49
  import { MemoryLibSQL } from "@mastra/libsql";
50
50
  import { Mastra } from "@mastra/core";
51
51
 
52
- const mastra = new Mastra({
52
+ export const mastra = new Mastra({
53
53
  storage: new MastraStorage({
54
54
  id: "composite",
55
55
  domains: {
@@ -65,7 +65,7 @@ const mastra = new Mastra({
65
65
 
66
66
  Use `default` to specify a fallback storage, then override specific domains:
67
67
 
68
- ```typescript
68
+ ```typescript title="src/mastra/index.ts"
69
69
  import { MastraStorage } from "@mastra/core/storage";
70
70
  import { PostgresStore } from "@mastra/pg";
71
71
  import { MemoryLibSQL } from "@mastra/libsql";
@@ -76,7 +76,7 @@ const pgStore = new PostgresStore({
76
76
  connectionString: process.env.DATABASE_URL,
77
77
  });
78
78
 
79
- const mastra = new Mastra({
79
+ export const mastra = new Mastra({
80
80
  storage: new MastraStorage({
81
81
  id: "composite",
82
82
  default: pgStore,
@@ -91,9 +91,9 @@ const mastra = new Mastra({
91
91
 
92
92
  ## Initialization
93
93
 
94
- MastraStorage initializes each configured domain independently. When passed to the Mastra class, `init()` is called automatically:
94
+ `MastraStorage` initializes each configured domain independently. When passed to the Mastra class, `init()` is called automatically:
95
95
 
96
- ```typescript
96
+ ```typescript title="src/mastra/index.ts"
97
97
  import { MastraStorage } from "@mastra/core/storage";
98
98
  import { MemoryPG, WorkflowsPG, ScoresPG } from "@mastra/pg";
99
99
  import { Mastra } from "@mastra/core";
@@ -107,7 +107,7 @@ const storage = new MastraStorage({
107
107
  },
108
108
  });
109
109
 
110
- const mastra = new Mastra({
110
+ export const mastra = new Mastra({
111
111
  storage, // init() called automatically
112
112
  });
113
113
  ```
@@ -132,7 +132,7 @@ const memoryStore = await storage.getStore("memory");
132
132
  const thread = await memoryStore?.getThreadById({ threadId: "..." });
133
133
  ```
134
134
 
135
- ## Use Cases
135
+ ## Use cases
136
136
 
137
137
  ### Separate databases for different workloads
138
138
 
@@ -197,6 +197,7 @@ The DynamoDB storage implementation provides a scalable and performant NoSQL dat
197
197
  - Compatible with AWS DynamoDB Local for development
198
198
  - Stores Thread, Message, Trace, Eval, and Workflow data
199
199
  - Optimized for serverless environments
200
+ - Configurable TTL (Time To Live) for automatic data expiration per entity type
200
201
 
201
202
  ## Installation
202
203
 
@@ -224,7 +225,7 @@ import { DynamoDBStore } from "@mastra/dynamodb";
224
225
 
225
226
  // Initialize the DynamoDB storage
226
227
  const storage = new DynamoDBStore({
227
- name: "dynamodb", // A name for this storage instance
228
+ id: "dynamodb", // Unique identifier for this storage instance
228
229
  config: {
229
230
  tableName: "mastra-single-table", // Name of your DynamoDB table
230
231
  region: "us-east-1", // Optional: AWS region, defaults to 'us-east-1'
@@ -258,7 +259,7 @@ For local development, you can use [DynamoDB Local](https://docs.aws.amazon.com/
258
259
  import { DynamoDBStore } from "@mastra/dynamodb";
259
260
 
260
261
  const storage = new DynamoDBStore({
261
- name: "dynamodb-local",
262
+ id: "dynamodb-local",
262
263
  config: {
263
264
  tableName: "mastra-single-table", // Ensure this table is created in your local DynamoDB
264
265
  region: "localhost", // Can be any string for local, 'localhost' is common
@@ -274,6 +275,96 @@ For local development, you can use [DynamoDB Local](https://docs.aws.amazon.com/
274
275
 
275
276
  ## Parameters
276
277
 
278
+ ## TTL (Time To Live) Configuration
279
+
280
+ DynamoDB TTL allows you to automatically delete items after a specified time period. This is useful for:
281
+
282
+ - **Cost optimization**: Automatically remove old data to reduce storage costs
283
+ - **Data lifecycle management**: Implement retention policies for compliance
284
+ - **Performance**: Prevent tables from growing indefinitely
285
+ - **Privacy compliance**: Automatically purge personal data after specified periods
286
+
287
+ ### Enabling TTL
288
+
289
+ To use TTL, you must:
290
+
291
+ 1. **Configure TTL in DynamoDBStore** (shown below)
292
+ 2. **Enable TTL on your DynamoDB table** via AWS Console or CLI, specifying the attribute name (default: `ttl`)
293
+
294
+ ```typescript
295
+ import { DynamoDBStore } from "@mastra/dynamodb";
296
+
297
+ const storage = new DynamoDBStore({
298
+ name: "dynamodb",
299
+ config: {
300
+ tableName: "mastra-single-table",
301
+ region: "us-east-1",
302
+ ttl: {
303
+ // Messages expire after 30 days
304
+ message: {
305
+ enabled: true,
306
+ defaultTtlSeconds: 30 * 24 * 60 * 60, // 30 days
307
+ },
308
+ // Threads expire after 90 days
309
+ thread: {
310
+ enabled: true,
311
+ defaultTtlSeconds: 90 * 24 * 60 * 60, // 90 days
312
+ },
313
+ // Traces expire after 7 days with custom attribute name
314
+ trace: {
315
+ enabled: true,
316
+ attributeName: "expiresAt", // Custom TTL attribute
317
+ defaultTtlSeconds: 7 * 24 * 60 * 60, // 7 days
318
+ },
319
+ // Workflow snapshots don't expire
320
+ workflow_snapshot: {
321
+ enabled: false,
322
+ },
323
+ },
324
+ },
325
+ });
326
+ ```
327
+
328
+ ### Supported Entity Types
329
+
330
+ TTL can be configured for these entity types:
331
+
332
+ | Entity | Description |
333
+ |--------|-------------|
334
+ | `thread` | Conversation threads |
335
+ | `message` | Messages within threads |
336
+ | `trace` | Observability traces |
337
+ | `eval` | Evaluation results |
338
+ | `workflow_snapshot` | Workflow state snapshots |
339
+ | `resource` | User/resource data |
340
+ | `score` | Scoring results |
341
+
342
+ ### TTL Entity Configuration
343
+
344
+ Each entity type accepts the following configuration:
345
+
346
+ ### Enabling TTL on Your DynamoDB Table
347
+
348
+ After configuring TTL in your code, you must enable TTL on the DynamoDB table itself:
349
+
350
+ **Using AWS CLI:**
351
+
352
+ ```bash
353
+ aws dynamodb update-time-to-live \
354
+ --table-name mastra-single-table \
355
+ --time-to-live-specification "Enabled=true, AttributeName=ttl"
356
+ ```
357
+
358
+ **Using AWS Console:**
359
+
360
+ 1. Go to the DynamoDB console
361
+ 2. Select your table
362
+ 3. Go to "Additional settings" tab
363
+ 4. Under "Time to Live (TTL)", click "Manage TTL"
364
+ 5. Enable TTL and specify the attribute name (default: `ttl`)
365
+
366
+ > **Note**: DynamoDB deletes expired items within 48 hours after expiration. Items remain queryable until actually deleted.
367
+
277
368
  ## AWS IAM Permissions
278
369
 
279
370
  The IAM role or user executing the code needs appropriate permissions to interact with the specified DynamoDB table and its indexes. Below is a sample policy. Replace `${YOUR_TABLE_NAME}` with your actual table name and `${YOUR_AWS_REGION}` and `${YOUR_AWS_ACCOUNT_ID}` with appropriate values.
@@ -431,6 +522,7 @@ import { Mastra } from "@mastra/core";
431
522
  import { PostgresStore } from "@mastra/pg";
432
523
 
433
524
  const storage = new PostgresStore({
525
+ id: 'pg-storage',
434
526
  connectionString: process.env.DATABASE_URL,
435
527
  });
436
528
 
@@ -477,6 +569,75 @@ This enables direct queries and custom transaction management. When using these
477
569
 
478
570
  This approach is intended for advanced scenarios where low-level access is required.
479
571
 
572
+ ### Using with Next.js
573
+
574
+ When using `PostgresStore` in Next.js applications, [Hot Module Replacement (HMR)](https://nextjs.org/docs/architecture/fast-refresh) during development can cause multiple storage instances to be created, resulting in this warning:
575
+
576
+ ```
577
+ WARNING: Creating a duplicate database object for the same connection.
578
+ ```
579
+
580
+ To prevent this, store the `PostgresStore` instance on the global object so it persists across HMR reloads:
581
+
582
+ ```typescript title="src/mastra/storage.ts"
583
+ import { PostgresStore } from "@mastra/pg";
584
+ import { Memory } from "@mastra/memory";
585
+
586
+ // Extend the global type to include our instances
587
+ declare global {
588
+ var pgStore: PostgresStore | undefined;
589
+ var memory: Memory | undefined;
590
+ }
591
+
592
+ // Get or create the PostgresStore instance
593
+ function getPgStore(): PostgresStore {
594
+ if (!global.pgStore) {
595
+ if (!process.env.DATABASE_URL) {
596
+ throw new Error("DATABASE_URL is not defined in environment variables");
597
+ }
598
+ global.pgStore = new PostgresStore({
599
+ id: "pg-storage",
600
+ connectionString: process.env.DATABASE_URL,
601
+ ssl:
602
+ process.env.DATABASE_SSL === "true"
603
+ ? { rejectUnauthorized: false }
604
+ : false,
605
+ });
606
+ }
607
+ return global.pgStore;
608
+ }
609
+
610
+ // Get or create the Memory instance
611
+ function getMemory(): Memory {
612
+ if (!global.memory) {
613
+ global.memory = new Memory({
614
+ storage: getPgStore(),
615
+ });
616
+ }
617
+ return global.memory;
618
+ }
619
+
620
+ export const storage = getPgStore();
621
+ export const memory = getMemory();
622
+ ```
623
+
624
+ Then use the exported instances in your Mastra configuration:
625
+
626
+ ```typescript title="src/mastra/index.ts"
627
+ import { Mastra } from "@mastra/core/mastra";
628
+ import { storage } from "./storage";
629
+
630
+ export const mastra = new Mastra({
631
+ storage,
632
+ // ...other config
633
+ });
634
+ ```
635
+
636
+ This pattern ensures only one `PostgresStore` instance is created regardless of how many times the module is reloaded during development. The same pattern can be applied to other storage providers like `LibSQLStore`.
637
+
638
+ > **Note:**
639
+ This singleton pattern is only necessary during local development with HMR. In production builds, modules are only loaded once.
640
+
480
641
  ## Usage Example
481
642
 
482
643
  ### Adding memory to an agent
package/dist/index.cjs CHANGED
@@ -2102,7 +2102,7 @@ var PgDB = class extends base.MastraBase {
2102
2102
  SELECT 1 FROM information_schema.tables
2103
2103
  WHERE table_schema = $1 AND table_name = $2
2104
2104
  )`,
2105
- [this.schemaName || "mastra", tableName]
2105
+ [this.schemaName || "public", tableName]
2106
2106
  );
2107
2107
  if (tableExists?.exists) {
2108
2108
  await this.client.none(`TRUNCATE TABLE ${tableNameWithSchema} CASCADE`);
@@ -3219,27 +3219,52 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3219
3219
  );
3220
3220
  }
3221
3221
  }
3222
- async listThreadsByResourceId(args) {
3223
- const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
3224
- if (page < 0) {
3222
+ async listThreads(args) {
3223
+ const { page = 0, perPage: perPageInput, orderBy, filter } = args;
3224
+ try {
3225
+ this.validatePaginationInput(page, perPageInput ?? 100);
3226
+ } catch (error$1) {
3225
3227
  throw new error.MastraError({
3226
- id: storage.createStorageErrorId("PG", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
3228
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "INVALID_PAGE"),
3227
3229
  domain: error.ErrorDomain.STORAGE,
3228
3230
  category: error.ErrorCategory.USER,
3229
- text: "Page number must be non-negative",
3230
- details: {
3231
- resourceId,
3232
- page
3233
- }
3231
+ text: error$1 instanceof Error ? error$1.message : "Invalid pagination parameters",
3232
+ details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
3234
3233
  });
3235
3234
  }
3236
- const { field, direction } = this.parseOrderBy(orderBy);
3237
3235
  const perPage = storage.normalizePerPage(perPageInput, 100);
3236
+ try {
3237
+ this.validateMetadataKeys(filter?.metadata);
3238
+ } catch (error$1) {
3239
+ throw new error.MastraError({
3240
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "INVALID_METADATA_KEY"),
3241
+ domain: error.ErrorDomain.STORAGE,
3242
+ category: error.ErrorCategory.USER,
3243
+ text: error$1 instanceof Error ? error$1.message : "Invalid metadata key",
3244
+ details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
3245
+ });
3246
+ }
3247
+ const { field, direction } = this.parseOrderBy(orderBy);
3238
3248
  const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
3239
3249
  try {
3240
3250
  const tableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
3241
- const baseQuery = `FROM ${tableName} WHERE "resourceId" = $1`;
3242
- const queryParams = [resourceId];
3251
+ const whereClauses = [];
3252
+ const queryParams = [];
3253
+ let paramIndex = 1;
3254
+ if (filter?.resourceId) {
3255
+ whereClauses.push(`"resourceId" = $${paramIndex}`);
3256
+ queryParams.push(filter.resourceId);
3257
+ paramIndex++;
3258
+ }
3259
+ if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
3260
+ for (const [key, value] of Object.entries(filter.metadata)) {
3261
+ whereClauses.push(`metadata::jsonb @> $${paramIndex}::jsonb`);
3262
+ queryParams.push(JSON.stringify({ [key]: value }));
3263
+ paramIndex++;
3264
+ }
3265
+ }
3266
+ const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : "";
3267
+ const baseQuery = `FROM ${tableName} ${whereClause}`;
3243
3268
  const countQuery = `SELECT COUNT(*) ${baseQuery}`;
3244
3269
  const countResult = await this.#db.client.one(countQuery, queryParams);
3245
3270
  const total = parseInt(countResult.count, 10);
@@ -3253,7 +3278,7 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3253
3278
  };
3254
3279
  }
3255
3280
  const limitValue = perPageInput === false ? total : perPage;
3256
- const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
3281
+ const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $${paramIndex} OFFSET $${paramIndex + 1}`;
3257
3282
  const rows = await this.#db.client.manyOrNone(
3258
3283
  dataQuery,
3259
3284
  [...queryParams, limitValue, offset]
@@ -3277,11 +3302,12 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3277
3302
  } catch (error$1) {
3278
3303
  const mastraError = new error.MastraError(
3279
3304
  {
3280
- id: storage.createStorageErrorId("PG", "LIST_THREADS_BY_RESOURCE_ID", "FAILED"),
3305
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "FAILED"),
3281
3306
  domain: error.ErrorDomain.STORAGE,
3282
3307
  category: error.ErrorCategory.THIRD_PARTY,
3283
3308
  details: {
3284
- resourceId,
3309
+ ...filter?.resourceId && { resourceId: filter.resourceId },
3310
+ hasMetadataFilter: !!filter?.metadata,
3285
3311
  page
3286
3312
  }
3287
3313
  },
@@ -4757,6 +4783,11 @@ var ScoresPG = class _ScoresPG extends storage.ScoresStorage {
4757
4783
  }
4758
4784
  async init() {
4759
4785
  await this.#db.createTable({ tableName: storage.TABLE_SCORERS, schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS] });
4786
+ await this.#db.alterTable({
4787
+ tableName: storage.TABLE_SCORERS,
4788
+ schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS],
4789
+ ifNotExists: ["spanId", "requestContext"]
4790
+ });
4760
4791
  await this.createDefaultIndexes();
4761
4792
  await this.createCustomIndexes();
4762
4793
  }
@@ -5108,23 +5139,8 @@ function getTableName5({ indexName, schemaName }) {
5108
5139
  const quotedIndexName = `"${indexName}"`;
5109
5140
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
5110
5141
  }
5111
- function parseWorkflowRun(row) {
5112
- let parsedSnapshot = row.snapshot;
5113
- if (typeof parsedSnapshot === "string") {
5114
- try {
5115
- parsedSnapshot = JSON.parse(row.snapshot);
5116
- } catch (e) {
5117
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5118
- }
5119
- }
5120
- return {
5121
- workflowName: row.workflow_name,
5122
- runId: row.run_id,
5123
- snapshot: parsedSnapshot,
5124
- resourceId: row.resourceId,
5125
- createdAt: new Date(row.createdAtZ || row.createdAt),
5126
- updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5127
- };
5142
+ function sanitizeJsonForPg(jsonString) {
5143
+ return jsonString.replace(/\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})/g, "");
5128
5144
  }
5129
5145
  var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5130
5146
  #db;
@@ -5141,6 +5157,24 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5141
5157
  this.#skipDefaultIndexes = skipDefaultIndexes;
5142
5158
  this.#indexes = indexes?.filter((idx) => _WorkflowsPG.MANAGED_TABLES.includes(idx.table));
5143
5159
  }
5160
+ parseWorkflowRun(row) {
5161
+ let parsedSnapshot = row.snapshot;
5162
+ if (typeof parsedSnapshot === "string") {
5163
+ try {
5164
+ parsedSnapshot = JSON.parse(row.snapshot);
5165
+ } catch (e) {
5166
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5167
+ }
5168
+ }
5169
+ return {
5170
+ workflowName: row.workflow_name,
5171
+ runId: row.run_id,
5172
+ snapshot: parsedSnapshot,
5173
+ resourceId: row.resourceId,
5174
+ createdAt: new Date(row.createdAtZ || row.createdAt),
5175
+ updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5176
+ };
5177
+ }
5144
5178
  /**
5145
5179
  * Returns default index definitions for the workflows domain tables.
5146
5180
  * Currently no default indexes are defined for workflows.
@@ -5213,12 +5247,13 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5213
5247
  const now = /* @__PURE__ */ new Date();
5214
5248
  const createdAtValue = createdAt ? createdAt : now;
5215
5249
  const updatedAtValue = updatedAt ? updatedAt : now;
5250
+ const sanitizedSnapshot = sanitizeJsonForPg(JSON.stringify(snapshot));
5216
5251
  await this.#db.client.none(
5217
5252
  `INSERT INTO ${getTableName5({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName5(this.#schema) })} (workflow_name, run_id, "resourceId", snapshot, "createdAt", "updatedAt")
5218
5253
  VALUES ($1, $2, $3, $4, $5, $6)
5219
5254
  ON CONFLICT (workflow_name, run_id) DO UPDATE
5220
5255
  SET "resourceId" = $3, snapshot = $4, "updatedAt" = $6`,
5221
- [workflowName, runId, resourceId, JSON.stringify(snapshot), createdAtValue, updatedAtValue]
5256
+ [workflowName, runId, resourceId, sanitizedSnapshot, createdAtValue, updatedAtValue]
5222
5257
  );
5223
5258
  } catch (error$1) {
5224
5259
  throw new error.MastraError(
@@ -5281,7 +5316,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5281
5316
  if (!result) {
5282
5317
  return null;
5283
5318
  }
5284
- return parseWorkflowRun(result);
5319
+ return this.parseWorkflowRun(result);
5285
5320
  } catch (error$1) {
5286
5321
  throw new error.MastraError(
5287
5322
  {
@@ -5337,7 +5372,9 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5337
5372
  paramIndex++;
5338
5373
  }
5339
5374
  if (status) {
5340
- conditions.push(`snapshot::jsonb ->> 'status' = $${paramIndex}`);
5375
+ conditions.push(
5376
+ `regexp_replace(snapshot::text, '\\\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})', '', 'g')::jsonb ->> 'status' = $${paramIndex}`
5377
+ );
5341
5378
  values.push(status);
5342
5379
  paramIndex++;
5343
5380
  }
@@ -5382,7 +5419,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5382
5419
  const queryValues = usePagination ? [...values, normalizedPerPage, offset] : values;
5383
5420
  const result = await this.#db.client.manyOrNone(query, queryValues);
5384
5421
  const runs = (result || []).map((row) => {
5385
- return parseWorkflowRun(row);
5422
+ return this.parseWorkflowRun(row);
5386
5423
  });
5387
5424
  return { runs, total: total || runs.length };
5388
5425
  } catch (error$1) {
@@ -5415,7 +5452,7 @@ var PostgresStore = class extends storage.MastraStorage {
5415
5452
  try {
5416
5453
  validateConfig("PostgresStore", config);
5417
5454
  super({ id: config.id, name: "PostgresStore", disableInit: config.disableInit });
5418
- this.schema = config.schemaName || "public";
5455
+ this.schema = utils.parseSqlIdentifier(config.schemaName || "public", "schema name");
5419
5456
  if (isPoolConfig(config)) {
5420
5457
  this.#pool = config.pool;
5421
5458
  this.#ownsPool = false;