@mastra/pg 1.0.0-beta.11 → 1.0.0-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -2078,7 +2078,7 @@ var PgDB = class extends MastraBase {
2078
2078
  SELECT 1 FROM information_schema.tables
2079
2079
  WHERE table_schema = $1 AND table_name = $2
2080
2080
  )`,
2081
- [this.schemaName || "mastra", tableName]
2081
+ [this.schemaName || "public", tableName]
2082
2082
  );
2083
2083
  if (tableExists?.exists) {
2084
2084
  await this.client.none(`TRUNCATE TABLE ${tableNameWithSchema} CASCADE`);
@@ -3229,13 +3229,19 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
3229
3229
  };
3230
3230
  }
3231
3231
  const limitValue = perPageInput === false ? total : perPage;
3232
- const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "updatedAt" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
3233
- const rows = await this.#db.client.manyOrNone(dataQuery, [...queryParams, limitValue, offset]);
3232
+ const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
3233
+ const rows = await this.#db.client.manyOrNone(
3234
+ dataQuery,
3235
+ [...queryParams, limitValue, offset]
3236
+ );
3234
3237
  const threads = (rows || []).map((thread) => ({
3235
- ...thread,
3238
+ id: thread.id,
3239
+ resourceId: thread.resourceId,
3240
+ title: thread.title,
3236
3241
  metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
3237
- createdAt: thread.createdAt,
3238
- updatedAt: thread.updatedAt
3242
+ // Use timezone-aware columns (*Z) for correct UTC timestamps, with fallback for legacy data
3243
+ createdAt: thread.createdAtZ || thread.createdAt,
3244
+ updatedAt: thread.updatedAtZ || thread.updatedAt
3239
3245
  }));
3240
3246
  return {
3241
3247
  threads,
@@ -3563,11 +3569,13 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
3563
3569
  queryParams.push(resourceId);
3564
3570
  }
3565
3571
  if (filter?.dateRange?.start) {
3566
- conditions.push(`"createdAt" >= $${paramIndex++}`);
3572
+ const startOp = filter.dateRange.startExclusive ? ">" : ">=";
3573
+ conditions.push(`"createdAt" ${startOp} $${paramIndex++}`);
3567
3574
  queryParams.push(filter.dateRange.start);
3568
3575
  }
3569
3576
  if (filter?.dateRange?.end) {
3570
- conditions.push(`"createdAt" <= $${paramIndex++}`);
3577
+ const endOp = filter.dateRange.endExclusive ? "<" : "<=";
3578
+ conditions.push(`"createdAt" ${endOp} $${paramIndex++}`);
3571
3579
  queryParams.push(filter.dateRange.end);
3572
3580
  }
3573
3581
  const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
@@ -3952,6 +3960,150 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
3952
3960
  await this.#db.client.none(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = $${paramIndex}`, values);
3953
3961
  return updatedResource;
3954
3962
  }
3963
+ async cloneThread(args) {
3964
+ const { sourceThreadId, newThreadId: providedThreadId, resourceId, title, metadata, options } = args;
3965
+ const sourceThread = await this.getThreadById({ threadId: sourceThreadId });
3966
+ if (!sourceThread) {
3967
+ throw new MastraError({
3968
+ id: createStorageErrorId("PG", "CLONE_THREAD", "SOURCE_NOT_FOUND"),
3969
+ domain: ErrorDomain.STORAGE,
3970
+ category: ErrorCategory.USER,
3971
+ text: `Source thread with id ${sourceThreadId} not found`,
3972
+ details: { sourceThreadId }
3973
+ });
3974
+ }
3975
+ const newThreadId = providedThreadId || crypto.randomUUID();
3976
+ const existingThread = await this.getThreadById({ threadId: newThreadId });
3977
+ if (existingThread) {
3978
+ throw new MastraError({
3979
+ id: createStorageErrorId("PG", "CLONE_THREAD", "THREAD_EXISTS"),
3980
+ domain: ErrorDomain.STORAGE,
3981
+ category: ErrorCategory.USER,
3982
+ text: `Thread with id ${newThreadId} already exists`,
3983
+ details: { newThreadId }
3984
+ });
3985
+ }
3986
+ const threadTableName = getTableName3({ indexName: TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
3987
+ const messageTableName = getTableName3({ indexName: TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
3988
+ try {
3989
+ return await this.#db.client.tx(async (t) => {
3990
+ let messageQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"
3991
+ FROM ${messageTableName} WHERE thread_id = $1`;
3992
+ const messageParams = [sourceThreadId];
3993
+ let paramIndex = 2;
3994
+ if (options?.messageFilter?.startDate) {
3995
+ messageQuery += ` AND "createdAt" >= $${paramIndex++}`;
3996
+ messageParams.push(options.messageFilter.startDate);
3997
+ }
3998
+ if (options?.messageFilter?.endDate) {
3999
+ messageQuery += ` AND "createdAt" <= $${paramIndex++}`;
4000
+ messageParams.push(options.messageFilter.endDate);
4001
+ }
4002
+ if (options?.messageFilter?.messageIds && options.messageFilter.messageIds.length > 0) {
4003
+ messageQuery += ` AND id IN (${options.messageFilter.messageIds.map(() => `$${paramIndex++}`).join(", ")})`;
4004
+ messageParams.push(...options.messageFilter.messageIds);
4005
+ }
4006
+ messageQuery += ` ORDER BY "createdAt" ASC`;
4007
+ if (options?.messageLimit && options.messageLimit > 0) {
4008
+ const limitQuery = `SELECT * FROM (${messageQuery.replace('ORDER BY "createdAt" ASC', 'ORDER BY "createdAt" DESC')} LIMIT $${paramIndex}) AS limited ORDER BY "createdAt" ASC`;
4009
+ messageParams.push(options.messageLimit);
4010
+ messageQuery = limitQuery;
4011
+ }
4012
+ const sourceMessages = await t.manyOrNone(messageQuery, messageParams);
4013
+ const now = /* @__PURE__ */ new Date();
4014
+ const lastMessageId = sourceMessages.length > 0 ? sourceMessages[sourceMessages.length - 1].id : void 0;
4015
+ const cloneMetadata = {
4016
+ sourceThreadId,
4017
+ clonedAt: now,
4018
+ ...lastMessageId && { lastMessageId }
4019
+ };
4020
+ const newThread = {
4021
+ id: newThreadId,
4022
+ resourceId: resourceId || sourceThread.resourceId,
4023
+ title: title || (sourceThread.title ? `Clone of ${sourceThread.title}` : void 0),
4024
+ metadata: {
4025
+ ...metadata,
4026
+ clone: cloneMetadata
4027
+ },
4028
+ createdAt: now,
4029
+ updatedAt: now
4030
+ };
4031
+ await t.none(
4032
+ `INSERT INTO ${threadTableName} (
4033
+ id,
4034
+ "resourceId",
4035
+ title,
4036
+ metadata,
4037
+ "createdAt",
4038
+ "createdAtZ",
4039
+ "updatedAt",
4040
+ "updatedAtZ"
4041
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4042
+ [
4043
+ newThread.id,
4044
+ newThread.resourceId,
4045
+ newThread.title,
4046
+ newThread.metadata ? JSON.stringify(newThread.metadata) : null,
4047
+ now,
4048
+ now,
4049
+ now,
4050
+ now
4051
+ ]
4052
+ );
4053
+ const clonedMessages = [];
4054
+ const targetResourceId = resourceId || sourceThread.resourceId;
4055
+ for (const sourceMsg of sourceMessages) {
4056
+ const newMessageId = crypto.randomUUID();
4057
+ const normalizedMsg = this.normalizeMessageRow(sourceMsg);
4058
+ let parsedContent = normalizedMsg.content;
4059
+ try {
4060
+ parsedContent = JSON.parse(normalizedMsg.content);
4061
+ } catch {
4062
+ }
4063
+ await t.none(
4064
+ `INSERT INTO ${messageTableName} (id, thread_id, content, "createdAt", "createdAtZ", role, type, "resourceId")
4065
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4066
+ [
4067
+ newMessageId,
4068
+ newThreadId,
4069
+ typeof normalizedMsg.content === "string" ? normalizedMsg.content : JSON.stringify(normalizedMsg.content),
4070
+ normalizedMsg.createdAt,
4071
+ normalizedMsg.createdAt,
4072
+ normalizedMsg.role,
4073
+ normalizedMsg.type || "v2",
4074
+ targetResourceId
4075
+ ]
4076
+ );
4077
+ clonedMessages.push({
4078
+ id: newMessageId,
4079
+ threadId: newThreadId,
4080
+ content: parsedContent,
4081
+ role: normalizedMsg.role,
4082
+ type: normalizedMsg.type,
4083
+ createdAt: new Date(normalizedMsg.createdAt),
4084
+ resourceId: targetResourceId
4085
+ });
4086
+ }
4087
+ return {
4088
+ thread: newThread,
4089
+ clonedMessages
4090
+ };
4091
+ });
4092
+ } catch (error) {
4093
+ if (error instanceof MastraError) {
4094
+ throw error;
4095
+ }
4096
+ throw new MastraError(
4097
+ {
4098
+ id: createStorageErrorId("PG", "CLONE_THREAD", "FAILED"),
4099
+ domain: ErrorDomain.STORAGE,
4100
+ category: ErrorCategory.THIRD_PARTY,
4101
+ details: { sourceThreadId, newThreadId }
4102
+ },
4103
+ error
4104
+ );
4105
+ }
4106
+ }
3955
4107
  };
3956
4108
  var ObservabilityPG = class _ObservabilityPG extends ObservabilityStorage {
3957
4109
  #db;
@@ -4581,6 +4733,11 @@ var ScoresPG = class _ScoresPG extends ScoresStorage {
4581
4733
  }
4582
4734
  async init() {
4583
4735
  await this.#db.createTable({ tableName: TABLE_SCORERS, schema: TABLE_SCHEMAS[TABLE_SCORERS] });
4736
+ await this.#db.alterTable({
4737
+ tableName: TABLE_SCORERS,
4738
+ schema: TABLE_SCHEMAS[TABLE_SCORERS],
4739
+ ifNotExists: ["spanId", "requestContext"]
4740
+ });
4584
4741
  await this.createDefaultIndexes();
4585
4742
  await this.createCustomIndexes();
4586
4743
  }
@@ -4932,23 +5089,8 @@ function getTableName5({ indexName, schemaName }) {
4932
5089
  const quotedIndexName = `"${indexName}"`;
4933
5090
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
4934
5091
  }
4935
- function parseWorkflowRun(row) {
4936
- let parsedSnapshot = row.snapshot;
4937
- if (typeof parsedSnapshot === "string") {
4938
- try {
4939
- parsedSnapshot = JSON.parse(row.snapshot);
4940
- } catch (e) {
4941
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
4942
- }
4943
- }
4944
- return {
4945
- workflowName: row.workflow_name,
4946
- runId: row.run_id,
4947
- snapshot: parsedSnapshot,
4948
- resourceId: row.resourceId,
4949
- createdAt: new Date(row.createdAtZ || row.createdAt),
4950
- updatedAt: new Date(row.updatedAtZ || row.updatedAt)
4951
- };
5092
+ function sanitizeJsonForPg(jsonString) {
5093
+ return jsonString.replace(/\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})/g, "");
4952
5094
  }
4953
5095
  var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
4954
5096
  #db;
@@ -4965,6 +5107,24 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
4965
5107
  this.#skipDefaultIndexes = skipDefaultIndexes;
4966
5108
  this.#indexes = indexes?.filter((idx) => _WorkflowsPG.MANAGED_TABLES.includes(idx.table));
4967
5109
  }
5110
+ parseWorkflowRun(row) {
5111
+ let parsedSnapshot = row.snapshot;
5112
+ if (typeof parsedSnapshot === "string") {
5113
+ try {
5114
+ parsedSnapshot = JSON.parse(row.snapshot);
5115
+ } catch (e) {
5116
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5117
+ }
5118
+ }
5119
+ return {
5120
+ workflowName: row.workflow_name,
5121
+ runId: row.run_id,
5122
+ snapshot: parsedSnapshot,
5123
+ resourceId: row.resourceId,
5124
+ createdAt: new Date(row.createdAtZ || row.createdAt),
5125
+ updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5126
+ };
5127
+ }
4968
5128
  /**
4969
5129
  * Returns default index definitions for the workflows domain tables.
4970
5130
  * Currently no default indexes are defined for workflows.
@@ -5037,12 +5197,13 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
5037
5197
  const now = /* @__PURE__ */ new Date();
5038
5198
  const createdAtValue = createdAt ? createdAt : now;
5039
5199
  const updatedAtValue = updatedAt ? updatedAt : now;
5200
+ const sanitizedSnapshot = sanitizeJsonForPg(JSON.stringify(snapshot));
5040
5201
  await this.#db.client.none(
5041
5202
  `INSERT INTO ${getTableName5({ indexName: TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName5(this.#schema) })} (workflow_name, run_id, "resourceId", snapshot, "createdAt", "updatedAt")
5042
5203
  VALUES ($1, $2, $3, $4, $5, $6)
5043
5204
  ON CONFLICT (workflow_name, run_id) DO UPDATE
5044
5205
  SET "resourceId" = $3, snapshot = $4, "updatedAt" = $6`,
5045
- [workflowName, runId, resourceId, JSON.stringify(snapshot), createdAtValue, updatedAtValue]
5206
+ [workflowName, runId, resourceId, sanitizedSnapshot, createdAtValue, updatedAtValue]
5046
5207
  );
5047
5208
  } catch (error) {
5048
5209
  throw new MastraError(
@@ -5105,7 +5266,7 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
5105
5266
  if (!result) {
5106
5267
  return null;
5107
5268
  }
5108
- return parseWorkflowRun(result);
5269
+ return this.parseWorkflowRun(result);
5109
5270
  } catch (error) {
5110
5271
  throw new MastraError(
5111
5272
  {
@@ -5161,7 +5322,9 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
5161
5322
  paramIndex++;
5162
5323
  }
5163
5324
  if (status) {
5164
- conditions.push(`snapshot::jsonb ->> 'status' = $${paramIndex}`);
5325
+ conditions.push(
5326
+ `regexp_replace(snapshot::text, '\\\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})', '', 'g')::jsonb ->> 'status' = $${paramIndex}`
5327
+ );
5165
5328
  values.push(status);
5166
5329
  paramIndex++;
5167
5330
  }
@@ -5206,7 +5369,7 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
5206
5369
  const queryValues = usePagination ? [...values, normalizedPerPage, offset] : values;
5207
5370
  const result = await this.#db.client.manyOrNone(query, queryValues);
5208
5371
  const runs = (result || []).map((row) => {
5209
- return parseWorkflowRun(row);
5372
+ return this.parseWorkflowRun(row);
5210
5373
  });
5211
5374
  return { runs, total: total || runs.length };
5212
5375
  } catch (error) {
@@ -5239,7 +5402,7 @@ var PostgresStore = class extends MastraStorage {
5239
5402
  try {
5240
5403
  validateConfig("PostgresStore", config);
5241
5404
  super({ id: config.id, name: "PostgresStore", disableInit: config.disableInit });
5242
- this.schema = config.schemaName || "public";
5405
+ this.schema = parseSqlIdentifier(config.schemaName || "public", "schema name");
5243
5406
  if (isPoolConfig(config)) {
5244
5407
  this.#pool = config.pool;
5245
5408
  this.#ownsPool = false;