@mastra/pg 1.0.0-beta.11 → 1.0.0-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,307 @@
1
+ # Vectors API Reference
2
+
3
+ > API reference for vectors - 1 entries
4
+
5
+
6
+ ---
7
+
8
+ ## Reference: PG Vector Store
9
+
10
+ > Documentation for the PgVector class in Mastra, which provides vector search using PostgreSQL with pgvector extension.
11
+
12
+ The PgVector class provides vector search using [PostgreSQL](https://www.postgresql.org/) with [pgvector](https://github.com/pgvector/pgvector) extension.
13
+ It provides robust vector similarity search capabilities within your existing PostgreSQL database.
14
+
15
+ ## Constructor Options
16
+
17
+ ## Constructor Examples
18
+
19
+ ### Connection String
20
+
21
+ ```ts
22
+ import { PgVector } from "@mastra/pg";
23
+
24
+ const vectorStore = new PgVector({
25
+ id: 'pg-vector',
26
+ connectionString: "postgresql://user:password@localhost:5432/mydb",
27
+ });
28
+ ```
29
+
30
+ ### Host/Port/Database Configuration
31
+
32
+ ```ts
33
+ const vectorStore = new PgVector({
34
+ id: 'pg-vector',
35
+ host: "localhost",
36
+ port: 5432,
37
+ database: "mydb",
38
+ user: "postgres",
39
+ password: "password",
40
+ });
41
+ ```
42
+
43
+ ### Advanced Configuration
44
+
45
+ ```ts
46
+ const vectorStore = new PgVector({
47
+ id: 'pg-vector',
48
+ connectionString: "postgresql://user:password@localhost:5432/mydb",
49
+ schemaName: "custom_schema",
50
+ max: 30,
51
+ idleTimeoutMillis: 60000,
52
+ pgPoolOptions: {
53
+ connectionTimeoutMillis: 5000,
54
+ allowExitOnIdle: true,
55
+ },
56
+ });
57
+ ```
58
+
59
+ ## Methods
60
+
61
+ ### createIndex()
62
+
63
+ #### IndexConfig
64
+
65
+ #### Memory Requirements
66
+
67
+ HNSW indexes require significant shared memory during construction. For 100K vectors:
68
+
69
+ - Small dimensions (64d): ~60MB with default settings
70
+ - Medium dimensions (256d): ~180MB with default settings
71
+ - Large dimensions (384d+): ~250MB+ with default settings
72
+
73
+ Higher M values or efConstruction values will increase memory requirements significantly. Adjust your system's shared memory limits if needed.
74
+
75
+ ### upsert()
76
+
77
+ ### query()
78
+
79
+ ### listIndexes()
80
+
81
+ Returns an array of index names as strings.
82
+
83
+ ### describeIndex()
84
+
85
+ Returns:
86
+
87
+ ```typescript
88
+ interface PGIndexStats {
89
+ dimension: number;
90
+ count: number;
91
+ metric: "cosine" | "euclidean" | "dotproduct";
92
+ type: "flat" | "hnsw" | "ivfflat";
93
+ config: {
94
+ m?: number;
95
+ efConstruction?: number;
96
+ lists?: number;
97
+ probes?: number;
98
+ };
99
+ }
100
+ ```
101
+
102
+ ### deleteIndex()
103
+
104
+ ### updateVector()
105
+
106
+ Update a single vector by ID or by metadata filter. Either `id` or `filter` must be provided, but not both.
107
+
108
+ Updates an existing vector by ID or filter. At least one of vector or metadata must be provided in the update object.
109
+
110
+ ```typescript
111
+ // Update by ID
112
+ await pgVector.updateVector({
113
+ indexName: "my_vectors",
114
+ id: "vector123",
115
+ update: {
116
+ vector: [0.1, 0.2, 0.3],
117
+ metadata: { label: "updated" },
118
+ },
119
+ });
120
+
121
+ // Update by filter
122
+ await pgVector.updateVector({
123
+ indexName: "my_vectors",
124
+ filter: { category: "product" },
125
+ update: {
126
+ metadata: { status: "reviewed" },
127
+ },
128
+ });
129
+ ```
130
+
131
+ ### deleteVector()
132
+
133
+ Deletes a single vector by ID from the specified index.
134
+
135
+ ```typescript
136
+ await pgVector.deleteVector({ indexName: "my_vectors", id: "vector123" });
137
+ ```
138
+
139
+ ### deleteVectors()
140
+
141
+ Delete multiple vectors by IDs or by metadata filter. Either `ids` or `filter` must be provided, but not both.
142
+
143
+ ### disconnect()
144
+
145
+ Closes the database connection pool. Should be called when done using the store.
146
+
147
+ ### buildIndex()
148
+
149
+ Builds or rebuilds an index with specified metric and configuration. Will drop any existing index before creating the new one.
150
+
151
+ ```typescript
152
+ // Define HNSW index
153
+ await pgVector.buildIndex("my_vectors", "cosine", {
154
+ type: "hnsw",
155
+ hnsw: {
156
+ m: 8,
157
+ efConstruction: 32,
158
+ },
159
+ });
160
+
161
+ // Define IVF index
162
+ await pgVector.buildIndex("my_vectors", "cosine", {
163
+ type: "ivfflat",
164
+ ivf: {
165
+ lists: 100,
166
+ },
167
+ });
168
+
169
+ // Define flat index
170
+ await pgVector.buildIndex("my_vectors", "cosine", {
171
+ type: "flat",
172
+ });
173
+ ```
174
+
175
+ ## Response Types
176
+
177
+ Query results are returned in this format:
178
+
179
+ ```typescript
180
+ interface QueryResult {
181
+ id: string;
182
+ score: number;
183
+ metadata: Record<string, any>;
184
+ vector?: number[]; // Only included if includeVector is true
185
+ }
186
+ ```
187
+
188
+ ## Error Handling
189
+
190
+ The store throws typed errors that can be caught:
191
+
192
+ ```typescript
193
+ try {
194
+ await store.query({
195
+ indexName: "index_name",
196
+ queryVector: queryVector,
197
+ });
198
+ } catch (error) {
199
+ if (error instanceof VectorStoreError) {
200
+ console.log(error.code); // 'connection_failed' | 'invalid_dimension' | etc
201
+ console.log(error.details); // Additional error context
202
+ }
203
+ }
204
+ ```
205
+
206
+ ## Index Configuration Guide
207
+
208
+ ### Performance Optimization
209
+
210
+ #### IVFFlat Tuning
211
+
212
+ - **lists parameter**: Set to `sqrt(n) * 2` where n is the number of vectors
213
+ - More lists = better accuracy but slower build time
214
+ - Fewer lists = faster build but potentially lower accuracy
215
+
216
+ #### HNSW Tuning
217
+
218
+ - **m parameter**:
219
+ - 8-16: Moderate accuracy, lower memory
220
+ - 16-32: High accuracy, moderate memory
221
+ - 32-64: Very high accuracy, high memory
222
+ - **efConstruction**:
223
+ - 32-64: Fast build, good quality
224
+ - 64-128: Slower build, better quality
225
+ - 128-256: Slowest build, best quality
226
+
227
+ ### Index Recreation Behavior
228
+
229
+ The system automatically detects configuration changes and only rebuilds indexes when necessary:
230
+
231
+ - Same configuration: Index is kept (no recreation)
232
+ - Changed configuration: Index is dropped and rebuilt
233
+ - This prevents the performance issues from unnecessary index recreations
234
+
235
+ ## Best Practices
236
+
237
+ - Regularly evaluate your index configuration to ensure optimal performance.
238
+ - Adjust parameters like `lists` and `m` based on dataset size and query requirements.
239
+ - **Monitor index performance** using `describeIndex()` to track usage
240
+ - Rebuild indexes periodically to maintain efficiency, especially after significant data changes
241
+
242
+ ## Direct Pool Access
243
+
244
+ The `PgVector` class exposes its underlying PostgreSQL connection pool as a public field:
245
+
246
+ ```typescript
247
+ pgVector.pool; // instance of pg.Pool
248
+ ```
249
+
250
+ This enables advanced usage such as running direct SQL queries, managing transactions, or monitoring pool state. When using the pool directly:
251
+
252
+ - You are responsible for releasing clients (`client.release()`) after use.
253
+ - The pool remains accessible after calling `disconnect()`, but new queries will fail.
254
+ - Direct access bypasses any validation or transaction logic provided by PgVector methods.
255
+
256
+ This design supports advanced use cases but requires careful resource management by the user.
257
+
258
+ ## Usage Example
259
+
260
+ ### Local embeddings with fastembed
261
+
262
+ Embeddings are numeric vectors used by memory's `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
263
+
264
+ Install `fastembed` to get started:
265
+
266
+ ```bash
267
+ npm install @mastra/fastembed@beta
268
+ ```
269
+
270
+ Add the following to your agent:
271
+
272
+ ```typescript title="src/mastra/agents/example-pg-agent.ts"
273
+ import { Memory } from "@mastra/memory";
274
+ import { Agent } from "@mastra/core/agent";
275
+ import { PostgresStore, PgVector } from "@mastra/pg";
276
+ import { fastembed } from "@mastra/fastembed";
277
+
278
+ export const pgAgent = new Agent({
279
+ id: "pg-agent",
280
+ name: "PG Agent",
281
+ instructions:
282
+ "You are an AI agent with the ability to automatically recall memories from previous interactions.",
283
+ model: "openai/gpt-5.1",
284
+ memory: new Memory({
285
+ storage: new PostgresStore({
286
+ id: 'pg-agent-storage',
287
+ connectionString: process.env.DATABASE_URL!,
288
+ }),
289
+ vector: new PgVector({
290
+ id: 'pg-agent-vector',
291
+ connectionString: process.env.DATABASE_URL!,
292
+ }),
293
+ embedder: fastembed,
294
+ options: {
295
+ lastMessages: 10,
296
+ semanticRecall: {
297
+ topK: 3,
298
+ messageRange: 2,
299
+ },
300
+ },
301
+ }),
302
+ });
303
+ ```
304
+
305
+ ## Related
306
+
307
+ - [Metadata Filters](../rag/metadata-filters)
package/dist/index.cjs CHANGED
@@ -2102,7 +2102,7 @@ var PgDB = class extends base.MastraBase {
2102
2102
  SELECT 1 FROM information_schema.tables
2103
2103
  WHERE table_schema = $1 AND table_name = $2
2104
2104
  )`,
2105
- [this.schemaName || "mastra", tableName]
2105
+ [this.schemaName || "public", tableName]
2106
2106
  );
2107
2107
  if (tableExists?.exists) {
2108
2108
  await this.client.none(`TRUNCATE TABLE ${tableNameWithSchema} CASCADE`);
@@ -3253,13 +3253,19 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3253
3253
  };
3254
3254
  }
3255
3255
  const limitValue = perPageInput === false ? total : perPage;
3256
- const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "updatedAt" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
3257
- const rows = await this.#db.client.manyOrNone(dataQuery, [...queryParams, limitValue, offset]);
3256
+ const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
3257
+ const rows = await this.#db.client.manyOrNone(
3258
+ dataQuery,
3259
+ [...queryParams, limitValue, offset]
3260
+ );
3258
3261
  const threads = (rows || []).map((thread) => ({
3259
- ...thread,
3262
+ id: thread.id,
3263
+ resourceId: thread.resourceId,
3264
+ title: thread.title,
3260
3265
  metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
3261
- createdAt: thread.createdAt,
3262
- updatedAt: thread.updatedAt
3266
+ // Use timezone-aware columns (*Z) for correct UTC timestamps, with fallback for legacy data
3267
+ createdAt: thread.createdAtZ || thread.createdAt,
3268
+ updatedAt: thread.updatedAtZ || thread.updatedAt
3263
3269
  }));
3264
3270
  return {
3265
3271
  threads,
@@ -3587,11 +3593,13 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3587
3593
  queryParams.push(resourceId);
3588
3594
  }
3589
3595
  if (filter?.dateRange?.start) {
3590
- conditions.push(`"createdAt" >= $${paramIndex++}`);
3596
+ const startOp = filter.dateRange.startExclusive ? ">" : ">=";
3597
+ conditions.push(`"createdAt" ${startOp} $${paramIndex++}`);
3591
3598
  queryParams.push(filter.dateRange.start);
3592
3599
  }
3593
3600
  if (filter?.dateRange?.end) {
3594
- conditions.push(`"createdAt" <= $${paramIndex++}`);
3601
+ const endOp = filter.dateRange.endExclusive ? "<" : "<=";
3602
+ conditions.push(`"createdAt" ${endOp} $${paramIndex++}`);
3595
3603
  queryParams.push(filter.dateRange.end);
3596
3604
  }
3597
3605
  const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
@@ -3976,6 +3984,150 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3976
3984
  await this.#db.client.none(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = $${paramIndex}`, values);
3977
3985
  return updatedResource;
3978
3986
  }
3987
+ async cloneThread(args) {
3988
+ const { sourceThreadId, newThreadId: providedThreadId, resourceId, title, metadata, options } = args;
3989
+ const sourceThread = await this.getThreadById({ threadId: sourceThreadId });
3990
+ if (!sourceThread) {
3991
+ throw new error.MastraError({
3992
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "SOURCE_NOT_FOUND"),
3993
+ domain: error.ErrorDomain.STORAGE,
3994
+ category: error.ErrorCategory.USER,
3995
+ text: `Source thread with id ${sourceThreadId} not found`,
3996
+ details: { sourceThreadId }
3997
+ });
3998
+ }
3999
+ const newThreadId = providedThreadId || crypto.randomUUID();
4000
+ const existingThread = await this.getThreadById({ threadId: newThreadId });
4001
+ if (existingThread) {
4002
+ throw new error.MastraError({
4003
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "THREAD_EXISTS"),
4004
+ domain: error.ErrorDomain.STORAGE,
4005
+ category: error.ErrorCategory.USER,
4006
+ text: `Thread with id ${newThreadId} already exists`,
4007
+ details: { newThreadId }
4008
+ });
4009
+ }
4010
+ const threadTableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
4011
+ const messageTableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
4012
+ try {
4013
+ return await this.#db.client.tx(async (t) => {
4014
+ let messageQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"
4015
+ FROM ${messageTableName} WHERE thread_id = $1`;
4016
+ const messageParams = [sourceThreadId];
4017
+ let paramIndex = 2;
4018
+ if (options?.messageFilter?.startDate) {
4019
+ messageQuery += ` AND "createdAt" >= $${paramIndex++}`;
4020
+ messageParams.push(options.messageFilter.startDate);
4021
+ }
4022
+ if (options?.messageFilter?.endDate) {
4023
+ messageQuery += ` AND "createdAt" <= $${paramIndex++}`;
4024
+ messageParams.push(options.messageFilter.endDate);
4025
+ }
4026
+ if (options?.messageFilter?.messageIds && options.messageFilter.messageIds.length > 0) {
4027
+ messageQuery += ` AND id IN (${options.messageFilter.messageIds.map(() => `$${paramIndex++}`).join(", ")})`;
4028
+ messageParams.push(...options.messageFilter.messageIds);
4029
+ }
4030
+ messageQuery += ` ORDER BY "createdAt" ASC`;
4031
+ if (options?.messageLimit && options.messageLimit > 0) {
4032
+ const limitQuery = `SELECT * FROM (${messageQuery.replace('ORDER BY "createdAt" ASC', 'ORDER BY "createdAt" DESC')} LIMIT $${paramIndex}) AS limited ORDER BY "createdAt" ASC`;
4033
+ messageParams.push(options.messageLimit);
4034
+ messageQuery = limitQuery;
4035
+ }
4036
+ const sourceMessages = await t.manyOrNone(messageQuery, messageParams);
4037
+ const now = /* @__PURE__ */ new Date();
4038
+ const lastMessageId = sourceMessages.length > 0 ? sourceMessages[sourceMessages.length - 1].id : void 0;
4039
+ const cloneMetadata = {
4040
+ sourceThreadId,
4041
+ clonedAt: now,
4042
+ ...lastMessageId && { lastMessageId }
4043
+ };
4044
+ const newThread = {
4045
+ id: newThreadId,
4046
+ resourceId: resourceId || sourceThread.resourceId,
4047
+ title: title || (sourceThread.title ? `Clone of ${sourceThread.title}` : void 0),
4048
+ metadata: {
4049
+ ...metadata,
4050
+ clone: cloneMetadata
4051
+ },
4052
+ createdAt: now,
4053
+ updatedAt: now
4054
+ };
4055
+ await t.none(
4056
+ `INSERT INTO ${threadTableName} (
4057
+ id,
4058
+ "resourceId",
4059
+ title,
4060
+ metadata,
4061
+ "createdAt",
4062
+ "createdAtZ",
4063
+ "updatedAt",
4064
+ "updatedAtZ"
4065
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4066
+ [
4067
+ newThread.id,
4068
+ newThread.resourceId,
4069
+ newThread.title,
4070
+ newThread.metadata ? JSON.stringify(newThread.metadata) : null,
4071
+ now,
4072
+ now,
4073
+ now,
4074
+ now
4075
+ ]
4076
+ );
4077
+ const clonedMessages = [];
4078
+ const targetResourceId = resourceId || sourceThread.resourceId;
4079
+ for (const sourceMsg of sourceMessages) {
4080
+ const newMessageId = crypto.randomUUID();
4081
+ const normalizedMsg = this.normalizeMessageRow(sourceMsg);
4082
+ let parsedContent = normalizedMsg.content;
4083
+ try {
4084
+ parsedContent = JSON.parse(normalizedMsg.content);
4085
+ } catch {
4086
+ }
4087
+ await t.none(
4088
+ `INSERT INTO ${messageTableName} (id, thread_id, content, "createdAt", "createdAtZ", role, type, "resourceId")
4089
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4090
+ [
4091
+ newMessageId,
4092
+ newThreadId,
4093
+ typeof normalizedMsg.content === "string" ? normalizedMsg.content : JSON.stringify(normalizedMsg.content),
4094
+ normalizedMsg.createdAt,
4095
+ normalizedMsg.createdAt,
4096
+ normalizedMsg.role,
4097
+ normalizedMsg.type || "v2",
4098
+ targetResourceId
4099
+ ]
4100
+ );
4101
+ clonedMessages.push({
4102
+ id: newMessageId,
4103
+ threadId: newThreadId,
4104
+ content: parsedContent,
4105
+ role: normalizedMsg.role,
4106
+ type: normalizedMsg.type,
4107
+ createdAt: new Date(normalizedMsg.createdAt),
4108
+ resourceId: targetResourceId
4109
+ });
4110
+ }
4111
+ return {
4112
+ thread: newThread,
4113
+ clonedMessages
4114
+ };
4115
+ });
4116
+ } catch (error$1) {
4117
+ if (error$1 instanceof error.MastraError) {
4118
+ throw error$1;
4119
+ }
4120
+ throw new error.MastraError(
4121
+ {
4122
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "FAILED"),
4123
+ domain: error.ErrorDomain.STORAGE,
4124
+ category: error.ErrorCategory.THIRD_PARTY,
4125
+ details: { sourceThreadId, newThreadId }
4126
+ },
4127
+ error$1
4128
+ );
4129
+ }
4130
+ }
3979
4131
  };
3980
4132
  var ObservabilityPG = class _ObservabilityPG extends storage.ObservabilityStorage {
3981
4133
  #db;
@@ -4605,6 +4757,11 @@ var ScoresPG = class _ScoresPG extends storage.ScoresStorage {
4605
4757
  }
4606
4758
  async init() {
4607
4759
  await this.#db.createTable({ tableName: storage.TABLE_SCORERS, schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS] });
4760
+ await this.#db.alterTable({
4761
+ tableName: storage.TABLE_SCORERS,
4762
+ schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS],
4763
+ ifNotExists: ["spanId", "requestContext"]
4764
+ });
4608
4765
  await this.createDefaultIndexes();
4609
4766
  await this.createCustomIndexes();
4610
4767
  }
@@ -4956,23 +5113,8 @@ function getTableName5({ indexName, schemaName }) {
4956
5113
  const quotedIndexName = `"${indexName}"`;
4957
5114
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
4958
5115
  }
4959
- function parseWorkflowRun(row) {
4960
- let parsedSnapshot = row.snapshot;
4961
- if (typeof parsedSnapshot === "string") {
4962
- try {
4963
- parsedSnapshot = JSON.parse(row.snapshot);
4964
- } catch (e) {
4965
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
4966
- }
4967
- }
4968
- return {
4969
- workflowName: row.workflow_name,
4970
- runId: row.run_id,
4971
- snapshot: parsedSnapshot,
4972
- resourceId: row.resourceId,
4973
- createdAt: new Date(row.createdAtZ || row.createdAt),
4974
- updatedAt: new Date(row.updatedAtZ || row.updatedAt)
4975
- };
5116
+ function sanitizeJsonForPg(jsonString) {
5117
+ return jsonString.replace(/\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})/g, "");
4976
5118
  }
4977
5119
  var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4978
5120
  #db;
@@ -4989,6 +5131,24 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4989
5131
  this.#skipDefaultIndexes = skipDefaultIndexes;
4990
5132
  this.#indexes = indexes?.filter((idx) => _WorkflowsPG.MANAGED_TABLES.includes(idx.table));
4991
5133
  }
5134
+ parseWorkflowRun(row) {
5135
+ let parsedSnapshot = row.snapshot;
5136
+ if (typeof parsedSnapshot === "string") {
5137
+ try {
5138
+ parsedSnapshot = JSON.parse(row.snapshot);
5139
+ } catch (e) {
5140
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5141
+ }
5142
+ }
5143
+ return {
5144
+ workflowName: row.workflow_name,
5145
+ runId: row.run_id,
5146
+ snapshot: parsedSnapshot,
5147
+ resourceId: row.resourceId,
5148
+ createdAt: new Date(row.createdAtZ || row.createdAt),
5149
+ updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5150
+ };
5151
+ }
4992
5152
  /**
4993
5153
  * Returns default index definitions for the workflows domain tables.
4994
5154
  * Currently no default indexes are defined for workflows.
@@ -5061,12 +5221,13 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5061
5221
  const now = /* @__PURE__ */ new Date();
5062
5222
  const createdAtValue = createdAt ? createdAt : now;
5063
5223
  const updatedAtValue = updatedAt ? updatedAt : now;
5224
+ const sanitizedSnapshot = sanitizeJsonForPg(JSON.stringify(snapshot));
5064
5225
  await this.#db.client.none(
5065
5226
  `INSERT INTO ${getTableName5({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName5(this.#schema) })} (workflow_name, run_id, "resourceId", snapshot, "createdAt", "updatedAt")
5066
5227
  VALUES ($1, $2, $3, $4, $5, $6)
5067
5228
  ON CONFLICT (workflow_name, run_id) DO UPDATE
5068
5229
  SET "resourceId" = $3, snapshot = $4, "updatedAt" = $6`,
5069
- [workflowName, runId, resourceId, JSON.stringify(snapshot), createdAtValue, updatedAtValue]
5230
+ [workflowName, runId, resourceId, sanitizedSnapshot, createdAtValue, updatedAtValue]
5070
5231
  );
5071
5232
  } catch (error$1) {
5072
5233
  throw new error.MastraError(
@@ -5129,7 +5290,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5129
5290
  if (!result) {
5130
5291
  return null;
5131
5292
  }
5132
- return parseWorkflowRun(result);
5293
+ return this.parseWorkflowRun(result);
5133
5294
  } catch (error$1) {
5134
5295
  throw new error.MastraError(
5135
5296
  {
@@ -5185,7 +5346,9 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5185
5346
  paramIndex++;
5186
5347
  }
5187
5348
  if (status) {
5188
- conditions.push(`snapshot::jsonb ->> 'status' = $${paramIndex}`);
5349
+ conditions.push(
5350
+ `regexp_replace(snapshot::text, '\\\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})', '', 'g')::jsonb ->> 'status' = $${paramIndex}`
5351
+ );
5189
5352
  values.push(status);
5190
5353
  paramIndex++;
5191
5354
  }
@@ -5230,7 +5393,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5230
5393
  const queryValues = usePagination ? [...values, normalizedPerPage, offset] : values;
5231
5394
  const result = await this.#db.client.manyOrNone(query, queryValues);
5232
5395
  const runs = (result || []).map((row) => {
5233
- return parseWorkflowRun(row);
5396
+ return this.parseWorkflowRun(row);
5234
5397
  });
5235
5398
  return { runs, total: total || runs.length };
5236
5399
  } catch (error$1) {
@@ -5263,7 +5426,7 @@ var PostgresStore = class extends storage.MastraStorage {
5263
5426
  try {
5264
5427
  validateConfig("PostgresStore", config);
5265
5428
  super({ id: config.id, name: "PostgresStore", disableInit: config.disableInit });
5266
- this.schema = config.schemaName || "public";
5429
+ this.schema = utils.parseSqlIdentifier(config.schemaName || "public", "schema name");
5267
5430
  if (isPoolConfig(config)) {
5268
5431
  this.#pool = config.pool;
5269
5432
  this.#ownsPool = false;