@mastra/pg 1.0.0-beta.9 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CHANGELOG.md +1404 -0
  2. package/dist/docs/README.md +36 -0
  3. package/dist/docs/SKILL.md +37 -0
  4. package/dist/docs/SOURCE_MAP.json +6 -0
  5. package/dist/docs/memory/01-storage.md +233 -0
  6. package/dist/docs/memory/02-working-memory.md +390 -0
  7. package/dist/docs/memory/03-semantic-recall.md +233 -0
  8. package/dist/docs/memory/04-reference.md +133 -0
  9. package/dist/docs/processors/01-reference.md +297 -0
  10. package/dist/docs/rag/01-overview.md +74 -0
  11. package/dist/docs/rag/02-vector-databases.md +643 -0
  12. package/dist/docs/rag/03-retrieval.md +548 -0
  13. package/dist/docs/rag/04-reference.md +369 -0
  14. package/dist/docs/storage/01-reference.md +828 -0
  15. package/dist/docs/tools/01-reference.md +440 -0
  16. package/dist/docs/vectors/01-reference.md +307 -0
  17. package/dist/index.cjs +1003 -223
  18. package/dist/index.cjs.map +1 -1
  19. package/dist/index.d.ts +1 -1
  20. package/dist/index.d.ts.map +1 -1
  21. package/dist/index.js +1000 -225
  22. package/dist/index.js.map +1 -1
  23. package/dist/shared/config.d.ts +61 -66
  24. package/dist/shared/config.d.ts.map +1 -1
  25. package/dist/storage/client.d.ts +91 -0
  26. package/dist/storage/client.d.ts.map +1 -0
  27. package/dist/storage/db/index.d.ts +82 -17
  28. package/dist/storage/db/index.d.ts.map +1 -1
  29. package/dist/storage/domains/memory/index.d.ts +3 -2
  30. package/dist/storage/domains/memory/index.d.ts.map +1 -1
  31. package/dist/storage/domains/observability/index.d.ts +23 -0
  32. package/dist/storage/domains/observability/index.d.ts.map +1 -1
  33. package/dist/storage/domains/scores/index.d.ts.map +1 -1
  34. package/dist/storage/domains/workflows/index.d.ts +1 -0
  35. package/dist/storage/domains/workflows/index.d.ts.map +1 -1
  36. package/dist/storage/index.d.ts +44 -17
  37. package/dist/storage/index.d.ts.map +1 -1
  38. package/dist/storage/test-utils.d.ts.map +1 -1
  39. package/dist/vector/index.d.ts.map +1 -1
  40. package/dist/vector/sql-builder.d.ts.map +1 -1
  41. package/package.json +11 -11
package/dist/index.cjs CHANGED
@@ -8,7 +8,6 @@ var asyncMutex = require('async-mutex');
8
8
  var pg = require('pg');
9
9
  var xxhash = require('xxhash-wasm');
10
10
  var filter = require('@mastra/core/vector/filter');
11
- var pgPromise = require('pg-promise');
12
11
  var base = require('@mastra/core/base');
13
12
  var agent = require('@mastra/core/agent');
14
13
  var evals = require('@mastra/core/evals');
@@ -35,13 +34,15 @@ function _interopNamespace(e) {
35
34
 
36
35
  var pg__namespace = /*#__PURE__*/_interopNamespace(pg);
37
36
  var xxhash__default = /*#__PURE__*/_interopDefault(xxhash);
38
- var pgPromise__default = /*#__PURE__*/_interopDefault(pgPromise);
39
37
 
40
38
  // src/vector/index.ts
41
39
 
42
40
  // src/shared/config.ts
41
+ var isPoolConfig = (cfg) => {
42
+ return "pool" in cfg;
43
+ };
43
44
  var isConnectionStringConfig = (cfg) => {
44
- return "connectionString" in cfg;
45
+ return "connectionString" in cfg && typeof cfg.connectionString === "string";
45
46
  };
46
47
  var isHostConfig = (cfg) => {
47
48
  return "host" in cfg && "database" in cfg && "user" in cfg && "password" in cfg;
@@ -49,16 +50,13 @@ var isHostConfig = (cfg) => {
49
50
  var isCloudSqlConfig = (cfg) => {
50
51
  return "stream" in cfg || "password" in cfg && typeof cfg.password === "function";
51
52
  };
52
- var isClientConfig = (cfg) => {
53
- return "client" in cfg;
54
- };
55
53
  var validateConfig = (name, config) => {
56
54
  if (!config.id || typeof config.id !== "string" || config.id.trim() === "") {
57
55
  throw new Error(`${name}: id must be provided and cannot be empty.`);
58
56
  }
59
- if (isClientConfig(config)) {
60
- if (!config.client) {
61
- throw new Error(`${name}: client must be provided when using client config.`);
57
+ if (isPoolConfig(config)) {
58
+ if (!config.pool) {
59
+ throw new Error(`${name}: pool must be provided when using pool config.`);
62
60
  }
63
61
  return;
64
62
  }
@@ -79,7 +77,7 @@ var validateConfig = (name, config) => {
79
77
  }
80
78
  } else {
81
79
  throw new Error(
82
- `${name}: invalid config. Provide either {client}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with \`stream\`).`
80
+ `${name}: invalid config. Provide either {pool}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with \`stream\`).`
83
81
  );
84
82
  }
85
83
  };
@@ -287,8 +285,14 @@ var FILTER_OPERATORS = {
287
285
  };
288
286
  },
289
287
  // Element Operators
290
- $exists: (key) => {
288
+ $exists: (key, paramIndex, value) => {
291
289
  const jsonPathKey = parseJsonPathKey(key);
290
+ if (value === false) {
291
+ return {
292
+ sql: `NOT (metadata ? '${jsonPathKey}')`,
293
+ needsValue: false
294
+ };
295
+ }
292
296
  return {
293
297
  sql: `metadata ? '${jsonPathKey}'`,
294
298
  needsValue: false
@@ -366,17 +370,62 @@ function buildDeleteFilterQuery(filter) {
366
370
  values.push(value);
367
371
  return `metadata#>>'{${parseJsonPathKey(key)}}' = $${values.length}`;
368
372
  }
369
- const [[operator, operatorValue] = []] = Object.entries(value);
373
+ const entries = Object.entries(value);
374
+ if (entries.length > 1) {
375
+ const conditions2 = entries.map(([operator2, operatorValue2]) => {
376
+ if (operator2 === "$not") {
377
+ const nestedEntries = Object.entries(operatorValue2);
378
+ const nestedConditions = nestedEntries.map(([nestedOp, nestedValue]) => {
379
+ if (!FILTER_OPERATORS[nestedOp]) {
380
+ throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
381
+ }
382
+ const operatorFn3 = FILTER_OPERATORS[nestedOp];
383
+ const operatorResult3 = operatorFn3(key, values.length + 1, nestedValue);
384
+ if (operatorResult3.needsValue) {
385
+ const transformedValue = operatorResult3.transformValue ? operatorResult3.transformValue() : nestedValue;
386
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
387
+ values.push(...transformedValue);
388
+ } else {
389
+ values.push(transformedValue);
390
+ }
391
+ }
392
+ return operatorResult3.sql;
393
+ }).join(" AND ");
394
+ return `NOT (${nestedConditions})`;
395
+ }
396
+ if (!FILTER_OPERATORS[operator2]) {
397
+ throw new Error(`Invalid operator: ${operator2}`);
398
+ }
399
+ const operatorFn2 = FILTER_OPERATORS[operator2];
400
+ const operatorResult2 = operatorFn2(key, values.length + 1, operatorValue2);
401
+ if (operatorResult2.needsValue) {
402
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : operatorValue2;
403
+ if (Array.isArray(transformedValue) && operator2 === "$elemMatch") {
404
+ values.push(...transformedValue);
405
+ } else {
406
+ values.push(transformedValue);
407
+ }
408
+ }
409
+ return operatorResult2.sql;
410
+ });
411
+ return conditions2.join(" AND ");
412
+ }
413
+ const [[operator, operatorValue] = []] = entries;
370
414
  if (operator === "$not") {
371
- const entries = Object.entries(operatorValue);
372
- const conditions2 = entries.map(([nestedOp, nestedValue]) => {
415
+ const nestedEntries = Object.entries(operatorValue);
416
+ const conditions2 = nestedEntries.map(([nestedOp, nestedValue]) => {
373
417
  if (!FILTER_OPERATORS[nestedOp]) {
374
418
  throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
375
419
  }
376
420
  const operatorFn2 = FILTER_OPERATORS[nestedOp];
377
421
  const operatorResult2 = operatorFn2(key, values.length + 1, nestedValue);
378
422
  if (operatorResult2.needsValue) {
379
- values.push(nestedValue);
423
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : nestedValue;
424
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
425
+ values.push(...transformedValue);
426
+ } else {
427
+ values.push(transformedValue);
428
+ }
380
429
  }
381
430
  return operatorResult2.sql;
382
431
  }).join(" AND ");
@@ -443,17 +492,62 @@ function buildFilterQuery(filter, minScore, topK) {
443
492
  values.push(value);
444
493
  return `metadata#>>'{${parseJsonPathKey(key)}}' = $${values.length}`;
445
494
  }
446
- const [[operator, operatorValue] = []] = Object.entries(value);
495
+ const entries = Object.entries(value);
496
+ if (entries.length > 1) {
497
+ const conditions2 = entries.map(([operator2, operatorValue2]) => {
498
+ if (operator2 === "$not") {
499
+ const nestedEntries = Object.entries(operatorValue2);
500
+ const nestedConditions = nestedEntries.map(([nestedOp, nestedValue]) => {
501
+ if (!FILTER_OPERATORS[nestedOp]) {
502
+ throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
503
+ }
504
+ const operatorFn3 = FILTER_OPERATORS[nestedOp];
505
+ const operatorResult3 = operatorFn3(key, values.length + 1, nestedValue);
506
+ if (operatorResult3.needsValue) {
507
+ const transformedValue = operatorResult3.transformValue ? operatorResult3.transformValue() : nestedValue;
508
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
509
+ values.push(...transformedValue);
510
+ } else {
511
+ values.push(transformedValue);
512
+ }
513
+ }
514
+ return operatorResult3.sql;
515
+ }).join(" AND ");
516
+ return `NOT (${nestedConditions})`;
517
+ }
518
+ if (!FILTER_OPERATORS[operator2]) {
519
+ throw new Error(`Invalid operator: ${operator2}`);
520
+ }
521
+ const operatorFn2 = FILTER_OPERATORS[operator2];
522
+ const operatorResult2 = operatorFn2(key, values.length + 1, operatorValue2);
523
+ if (operatorResult2.needsValue) {
524
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : operatorValue2;
525
+ if (Array.isArray(transformedValue) && operator2 === "$elemMatch") {
526
+ values.push(...transformedValue);
527
+ } else {
528
+ values.push(transformedValue);
529
+ }
530
+ }
531
+ return operatorResult2.sql;
532
+ });
533
+ return conditions2.join(" AND ");
534
+ }
535
+ const [[operator, operatorValue] = []] = entries;
447
536
  if (operator === "$not") {
448
- const entries = Object.entries(operatorValue);
449
- const conditions2 = entries.map(([nestedOp, nestedValue]) => {
537
+ const nestedEntries = Object.entries(operatorValue);
538
+ const conditions2 = nestedEntries.map(([nestedOp, nestedValue]) => {
450
539
  if (!FILTER_OPERATORS[nestedOp]) {
451
540
  throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
452
541
  }
453
542
  const operatorFn2 = FILTER_OPERATORS[nestedOp];
454
543
  const operatorResult2 = operatorFn2(key, values.length + 1, nestedValue);
455
544
  if (operatorResult2.needsValue) {
456
- values.push(nestedValue);
545
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : nestedValue;
546
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
547
+ values.push(...transformedValue);
548
+ } else {
549
+ values.push(transformedValue);
550
+ }
457
551
  }
458
552
  return operatorResult2.sql;
459
553
  }).join(" AND ");
@@ -544,8 +638,8 @@ var PgVector = class extends vector.MastraVector {
544
638
  } else if (isCloudSqlConfig(config)) {
545
639
  poolConfig = {
546
640
  ...config,
547
- max: config.max ?? 20,
548
- idleTimeoutMillis: config.idleTimeoutMillis ?? 3e4,
641
+ max: config.pgPoolOptions?.max ?? 20,
642
+ idleTimeoutMillis: config.pgPoolOptions?.idleTimeoutMillis ?? 3e4,
549
643
  connectionTimeoutMillis: 2e3,
550
644
  ...config.pgPoolOptions
551
645
  };
@@ -713,9 +807,7 @@ var PgVector = class extends vector.MastraVector {
713
807
  probes
714
808
  }) {
715
809
  try {
716
- if (!Number.isInteger(topK) || topK <= 0) {
717
- throw new Error("topK must be a positive integer");
718
- }
810
+ vector.validateTopK("PG", topK);
719
811
  if (!Array.isArray(queryVector) || !queryVector.every((x) => typeof x === "number" && Number.isFinite(x))) {
720
812
  throw new Error("queryVector must be an array of finite numbers");
721
813
  }
@@ -800,6 +892,7 @@ var PgVector = class extends vector.MastraVector {
800
892
  ids,
801
893
  deleteFilter
802
894
  }) {
895
+ vector.validateUpsertInput("PG", vectors, metadata, ids);
803
896
  const { tableName } = this.getTableName(indexName);
804
897
  const client = await this.pool.connect();
805
898
  try {
@@ -1687,6 +1780,132 @@ var PgVector = class extends vector.MastraVector {
1687
1780
  }
1688
1781
  }
1689
1782
  };
1783
+
1784
+ // src/storage/client.ts
1785
+ function truncateQuery(query, maxLength = 100) {
1786
+ const normalized = query.replace(/\s+/g, " ").trim();
1787
+ if (normalized.length <= maxLength) {
1788
+ return normalized;
1789
+ }
1790
+ return normalized.slice(0, maxLength) + "...";
1791
+ }
1792
+ var PoolAdapter = class {
1793
+ constructor($pool) {
1794
+ this.$pool = $pool;
1795
+ }
1796
+ connect() {
1797
+ return this.$pool.connect();
1798
+ }
1799
+ async none(query, values) {
1800
+ await this.$pool.query(query, values);
1801
+ return null;
1802
+ }
1803
+ async one(query, values) {
1804
+ const result = await this.$pool.query(query, values);
1805
+ if (result.rows.length === 0) {
1806
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1807
+ }
1808
+ if (result.rows.length > 1) {
1809
+ throw new Error(`Multiple rows returned when one was expected: ${truncateQuery(query)}`);
1810
+ }
1811
+ return result.rows[0];
1812
+ }
1813
+ async oneOrNone(query, values) {
1814
+ const result = await this.$pool.query(query, values);
1815
+ if (result.rows.length === 0) {
1816
+ return null;
1817
+ }
1818
+ if (result.rows.length > 1) {
1819
+ throw new Error(`Multiple rows returned when one or none was expected: ${truncateQuery(query)}`);
1820
+ }
1821
+ return result.rows[0];
1822
+ }
1823
+ async any(query, values) {
1824
+ const result = await this.$pool.query(query, values);
1825
+ return result.rows;
1826
+ }
1827
+ async manyOrNone(query, values) {
1828
+ return this.any(query, values);
1829
+ }
1830
+ async many(query, values) {
1831
+ const result = await this.$pool.query(query, values);
1832
+ if (result.rows.length === 0) {
1833
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1834
+ }
1835
+ return result.rows;
1836
+ }
1837
+ async query(query, values) {
1838
+ return this.$pool.query(query, values);
1839
+ }
1840
+ async tx(callback) {
1841
+ const client = await this.$pool.connect();
1842
+ try {
1843
+ await client.query("BEGIN");
1844
+ const txClient = new TransactionClient(client);
1845
+ const result = await callback(txClient);
1846
+ await client.query("COMMIT");
1847
+ return result;
1848
+ } catch (error) {
1849
+ try {
1850
+ await client.query("ROLLBACK");
1851
+ } catch (rollbackError) {
1852
+ console.error("Transaction rollback failed:", rollbackError);
1853
+ }
1854
+ throw error;
1855
+ } finally {
1856
+ client.release();
1857
+ }
1858
+ }
1859
+ };
1860
+ var TransactionClient = class {
1861
+ constructor(client) {
1862
+ this.client = client;
1863
+ }
1864
+ async none(query, values) {
1865
+ await this.client.query(query, values);
1866
+ return null;
1867
+ }
1868
+ async one(query, values) {
1869
+ const result = await this.client.query(query, values);
1870
+ if (result.rows.length === 0) {
1871
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1872
+ }
1873
+ if (result.rows.length > 1) {
1874
+ throw new Error(`Multiple rows returned when one was expected: ${truncateQuery(query)}`);
1875
+ }
1876
+ return result.rows[0];
1877
+ }
1878
+ async oneOrNone(query, values) {
1879
+ const result = await this.client.query(query, values);
1880
+ if (result.rows.length === 0) {
1881
+ return null;
1882
+ }
1883
+ if (result.rows.length > 1) {
1884
+ throw new Error(`Multiple rows returned when one or none was expected: ${truncateQuery(query)}`);
1885
+ }
1886
+ return result.rows[0];
1887
+ }
1888
+ async any(query, values) {
1889
+ const result = await this.client.query(query, values);
1890
+ return result.rows;
1891
+ }
1892
+ async manyOrNone(query, values) {
1893
+ return this.any(query, values);
1894
+ }
1895
+ async many(query, values) {
1896
+ const result = await this.client.query(query, values);
1897
+ if (result.rows.length === 0) {
1898
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1899
+ }
1900
+ return result.rows;
1901
+ }
1902
+ async query(query, values) {
1903
+ return this.client.query(query, values);
1904
+ }
1905
+ async batch(promises) {
1906
+ return Promise.all(promises);
1907
+ }
1908
+ };
1690
1909
  function resolvePgConfig(config) {
1691
1910
  if ("client" in config) {
1692
1911
  return {
@@ -1696,10 +1915,32 @@ function resolvePgConfig(config) {
1696
1915
  indexes: config.indexes
1697
1916
  };
1698
1917
  }
1699
- const pgp = pgPromise__default.default();
1700
- const client = pgp(config);
1918
+ if ("pool" in config) {
1919
+ return {
1920
+ client: new PoolAdapter(config.pool),
1921
+ schemaName: config.schemaName,
1922
+ skipDefaultIndexes: config.skipDefaultIndexes,
1923
+ indexes: config.indexes
1924
+ };
1925
+ }
1926
+ let pool;
1927
+ if ("connectionString" in config) {
1928
+ pool = new pg.Pool({
1929
+ connectionString: config.connectionString,
1930
+ ssl: config.ssl
1931
+ });
1932
+ } else {
1933
+ pool = new pg.Pool({
1934
+ host: config.host,
1935
+ port: config.port,
1936
+ database: config.database,
1937
+ user: config.user,
1938
+ password: config.password,
1939
+ ssl: config.ssl
1940
+ });
1941
+ }
1701
1942
  return {
1702
- client,
1943
+ client: new PoolAdapter(pool),
1703
1944
  schemaName: config.schemaName,
1704
1945
  skipDefaultIndexes: config.skipDefaultIndexes,
1705
1946
  indexes: config.indexes
@@ -1714,6 +1955,91 @@ function getTableName({ indexName, schemaName }) {
1714
1955
  const quotedSchemaName = schemaName;
1715
1956
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
1716
1957
  }
1958
+ function mapToSqlType(type) {
1959
+ switch (type) {
1960
+ case "uuid":
1961
+ return "UUID";
1962
+ case "boolean":
1963
+ return "BOOLEAN";
1964
+ default:
1965
+ return storage.getSqlType(type);
1966
+ }
1967
+ }
1968
+ function generateTableSQL({
1969
+ tableName,
1970
+ schema,
1971
+ schemaName,
1972
+ includeAllConstraints = false
1973
+ }) {
1974
+ const timeZColumns = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => {
1975
+ const parsedName = utils.parseSqlIdentifier(name, "column name");
1976
+ return `"${parsedName}Z" TIMESTAMPTZ DEFAULT NOW()`;
1977
+ });
1978
+ const columns = Object.entries(schema).map(([name, def]) => {
1979
+ const parsedName = utils.parseSqlIdentifier(name, "column name");
1980
+ const constraints = [];
1981
+ if (def.primaryKey) constraints.push("PRIMARY KEY");
1982
+ if (!def.nullable) constraints.push("NOT NULL");
1983
+ return `"${parsedName}" ${mapToSqlType(def.type)} ${constraints.join(" ")}`;
1984
+ });
1985
+ const finalColumns = [...columns, ...timeZColumns].join(",\n");
1986
+ const parsedSchemaName = schemaName ? utils.parseSqlIdentifier(schemaName, "schema name") : "";
1987
+ const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
1988
+ const quotedSchemaName = getSchemaName(schemaName);
1989
+ const sql = `
1990
+ CREATE TABLE IF NOT EXISTS ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })} (
1991
+ ${finalColumns}
1992
+ );
1993
+ ${tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? `
1994
+ DO $$ BEGIN
1995
+ IF NOT EXISTS (
1996
+ SELECT 1 FROM pg_constraint WHERE conname = lower('${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key')
1997
+ ) AND NOT EXISTS (
1998
+ SELECT 1 FROM pg_indexes WHERE indexname = lower('${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key')
1999
+ ) THEN
2000
+ ALTER TABLE ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })}
2001
+ ADD CONSTRAINT ${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key
2002
+ UNIQUE (workflow_name, run_id);
2003
+ END IF;
2004
+ END $$;
2005
+ ` : ""}
2006
+ ${// For spans table: Include PRIMARY KEY in exports, but not in runtime (handled after deduplication)
2007
+ tableName === storage.TABLE_SPANS && includeAllConstraints ? `
2008
+ DO $$ BEGIN
2009
+ IF NOT EXISTS (
2010
+ SELECT 1 FROM pg_constraint WHERE conname = lower('${constraintPrefix}mastra_ai_spans_traceid_spanid_pk')
2011
+ ) THEN
2012
+ ALTER TABLE ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })}
2013
+ ADD CONSTRAINT ${constraintPrefix}mastra_ai_spans_traceid_spanid_pk
2014
+ PRIMARY KEY ("traceId", "spanId");
2015
+ END IF;
2016
+ END $$;
2017
+ ` : ""}
2018
+ `;
2019
+ return sql;
2020
+ }
2021
+ function exportSchemas(schemaName) {
2022
+ const statements = [];
2023
+ if (schemaName) {
2024
+ const quotedSchemaName = getSchemaName(schemaName);
2025
+ statements.push(`-- Create schema if it doesn't exist`);
2026
+ statements.push(`CREATE SCHEMA IF NOT EXISTS ${quotedSchemaName};`);
2027
+ statements.push("");
2028
+ }
2029
+ for (const [tableName, schema] of Object.entries(storage.TABLE_SCHEMAS)) {
2030
+ statements.push(`-- Table: ${tableName}`);
2031
+ const sql = generateTableSQL({
2032
+ tableName,
2033
+ schema,
2034
+ schemaName,
2035
+ includeAllConstraints: true
2036
+ // Include all constraints for exports/documentation
2037
+ });
2038
+ statements.push(sql.trim());
2039
+ statements.push("");
2040
+ }
2041
+ return statements.join("\n");
2042
+ }
1717
2043
  var schemaSetupRegistry = /* @__PURE__ */ new Map();
1718
2044
  var PgDB = class extends base.MastraBase {
1719
2045
  client;
@@ -1831,16 +2157,6 @@ var PgDB = class extends base.MastraBase {
1831
2157
  }
1832
2158
  await registryEntry.promise;
1833
2159
  }
1834
- getSqlType(type) {
1835
- switch (type) {
1836
- case "uuid":
1837
- return "UUID";
1838
- case "boolean":
1839
- return "BOOLEAN";
1840
- default:
1841
- return storage.getSqlType(type);
1842
- }
1843
- }
1844
2160
  getDefaultValue(type) {
1845
2161
  switch (type) {
1846
2162
  case "timestamp":
@@ -1858,10 +2174,27 @@ var PgDB = class extends base.MastraBase {
1858
2174
  const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1859
2175
  const values = this.prepareValuesForInsert(record, tableName);
1860
2176
  const placeholders = values.map((_, i) => `$${i + 1}`).join(", ");
1861
- await this.client.none(
1862
- `INSERT INTO ${getTableName({ indexName: tableName, schemaName })} (${columns.map((c) => `"${c}"`).join(", ")}) VALUES (${placeholders})`,
1863
- values
1864
- );
2177
+ const fullTableName = getTableName({ indexName: tableName, schemaName });
2178
+ const columnList = columns.map((c) => `"${c}"`).join(", ");
2179
+ if (tableName === storage.TABLE_SPANS) {
2180
+ const updateColumns = columns.filter((c) => c !== "traceId" && c !== "spanId");
2181
+ if (updateColumns.length > 0) {
2182
+ const updateClause = updateColumns.map((c) => `"${c}" = EXCLUDED."${c}"`).join(", ");
2183
+ await this.client.none(
2184
+ `INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})
2185
+ ON CONFLICT ("traceId", "spanId") DO UPDATE SET ${updateClause}`,
2186
+ values
2187
+ );
2188
+ } else {
2189
+ await this.client.none(
2190
+ `INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})
2191
+ ON CONFLICT ("traceId", "spanId") DO NOTHING`,
2192
+ values
2193
+ );
2194
+ }
2195
+ } else {
2196
+ await this.client.none(`INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})`, values);
2197
+ }
1865
2198
  } catch (error$1) {
1866
2199
  throw new error.MastraError(
1867
2200
  {
@@ -1885,7 +2218,7 @@ var PgDB = class extends base.MastraBase {
1885
2218
  SELECT 1 FROM information_schema.tables
1886
2219
  WHERE table_schema = $1 AND table_name = $2
1887
2220
  )`,
1888
- [this.schemaName || "mastra", tableName]
2221
+ [this.schemaName || "public", tableName]
1889
2222
  );
1890
2223
  if (tableExists?.exists) {
1891
2224
  await this.client.none(`TRUNCATE TABLE ${tableNameWithSchema} CASCADE`);
@@ -1910,52 +2243,10 @@ var PgDB = class extends base.MastraBase {
1910
2243
  }) {
1911
2244
  try {
1912
2245
  const timeZColumnNames = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => name);
1913
- const timeZColumns = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => {
1914
- const parsedName = utils.parseSqlIdentifier(name, "column name");
1915
- return `"${parsedName}Z" TIMESTAMPTZ DEFAULT NOW()`;
1916
- });
1917
- const columns = Object.entries(schema).map(([name, def]) => {
1918
- const parsedName = utils.parseSqlIdentifier(name, "column name");
1919
- const constraints = [];
1920
- if (def.primaryKey) constraints.push("PRIMARY KEY");
1921
- if (!def.nullable) constraints.push("NOT NULL");
1922
- return `"${parsedName}" ${this.getSqlType(def.type)} ${constraints.join(" ")}`;
1923
- });
1924
2246
  if (this.schemaName) {
1925
2247
  await this.setupSchema();
1926
2248
  }
1927
- const finalColumns = [...columns, ...timeZColumns].join(",\n");
1928
- const constraintPrefix = this.schemaName ? `${this.schemaName}_` : "";
1929
- const schemaName = getSchemaName(this.schemaName);
1930
- const sql = `
1931
- CREATE TABLE IF NOT EXISTS ${getTableName({ indexName: tableName, schemaName })} (
1932
- ${finalColumns}
1933
- );
1934
- ${tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? `
1935
- DO $$ BEGIN
1936
- IF NOT EXISTS (
1937
- SELECT 1 FROM pg_constraint WHERE conname = '${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key'
1938
- ) AND NOT EXISTS (
1939
- SELECT 1 FROM pg_indexes WHERE indexname = '${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key'
1940
- ) THEN
1941
- ALTER TABLE ${getTableName({ indexName: tableName, schemaName })}
1942
- ADD CONSTRAINT ${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key
1943
- UNIQUE (workflow_name, run_id);
1944
- END IF;
1945
- END $$;
1946
- ` : ""}
1947
- ${tableName === storage.TABLE_SPANS ? `
1948
- DO $$ BEGIN
1949
- IF NOT EXISTS (
1950
- SELECT 1 FROM pg_constraint WHERE conname = '${constraintPrefix}mastra_ai_spans_traceid_spanid_pk'
1951
- ) THEN
1952
- ALTER TABLE ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })}
1953
- ADD CONSTRAINT ${constraintPrefix}mastra_ai_spans_traceid_spanid_pk
1954
- PRIMARY KEY ("traceId", "spanId");
1955
- END IF;
1956
- END $$;
1957
- ` : ""}
1958
- `;
2249
+ const sql = generateTableSQL({ tableName, schema, schemaName: this.schemaName });
1959
2250
  await this.client.none(sql);
1960
2251
  await this.alterTable({
1961
2252
  tableName,
@@ -1965,8 +2256,46 @@ var PgDB = class extends base.MastraBase {
1965
2256
  if (tableName === storage.TABLE_SPANS) {
1966
2257
  await this.setupTimestampTriggers(tableName);
1967
2258
  await this.migrateSpansTable();
2259
+ const pkExists = await this.spansPrimaryKeyExists();
2260
+ if (!pkExists) {
2261
+ const duplicateInfo = await this.checkForDuplicateSpans();
2262
+ if (duplicateInfo.hasDuplicates) {
2263
+ const errorMessage = `
2264
+ ===========================================================================
2265
+ MIGRATION REQUIRED: Duplicate spans detected in ${duplicateInfo.tableName}
2266
+ ===========================================================================
2267
+
2268
+ Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations.
2269
+
2270
+ The spans table requires a unique constraint on (traceId, spanId), but your
2271
+ database contains duplicate entries that must be resolved first.
2272
+
2273
+ To fix this, run the manual migration command:
2274
+
2275
+ npx mastra migrate
2276
+
2277
+ This command will:
2278
+ 1. Remove duplicate spans (keeping the most complete/recent version)
2279
+ 2. Add the required unique constraint
2280
+
2281
+ Note: This migration may take some time for large tables.
2282
+ ===========================================================================
2283
+ `;
2284
+ throw new error.MastraError({
2285
+ id: storage.createStorageErrorId("PG", "MIGRATION_REQUIRED", "DUPLICATE_SPANS"),
2286
+ domain: error.ErrorDomain.STORAGE,
2287
+ category: error.ErrorCategory.USER,
2288
+ text: errorMessage
2289
+ });
2290
+ } else {
2291
+ await this.addSpansPrimaryKey();
2292
+ }
2293
+ }
1968
2294
  }
1969
2295
  } catch (error$1) {
2296
+ if (error$1 instanceof error.MastraError) {
2297
+ throw error$1;
2298
+ }
1970
2299
  throw new error.MastraError(
1971
2300
  {
1972
2301
  id: storage.createStorageErrorId("PG", "CREATE_TABLE", "FAILED"),
@@ -2029,12 +2358,29 @@ var PgDB = class extends base.MastraBase {
2029
2358
  const columnExists = await this.hasColumn(storage.TABLE_SPANS, columnName);
2030
2359
  if (!columnExists) {
2031
2360
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
2032
- const sqlType = this.getSqlType(columnDef.type);
2361
+ const sqlType = mapToSqlType(columnDef.type);
2033
2362
  const nullable = columnDef.nullable ? "" : "NOT NULL";
2034
2363
  const defaultValue = !columnDef.nullable ? this.getDefaultValue(columnDef.type) : "";
2035
2364
  const alterSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}" ${sqlType} ${nullable} ${defaultValue}`.trim();
2036
2365
  await this.client.none(alterSql);
2037
2366
  this.logger?.debug?.(`Added column '${columnName}' to ${fullTableName}`);
2367
+ if (sqlType === "TIMESTAMP") {
2368
+ const timestampZSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}Z" TIMESTAMPTZ DEFAULT NOW()`.trim();
2369
+ await this.client.none(timestampZSql);
2370
+ this.logger?.debug?.(`Added timezone column '${columnName}Z' to ${fullTableName}`);
2371
+ }
2372
+ }
2373
+ }
2374
+ for (const [columnName, columnDef] of Object.entries(schema)) {
2375
+ if (columnDef.type === "timestamp") {
2376
+ const tzColumnName = `${columnName}Z`;
2377
+ const tzColumnExists = await this.hasColumn(storage.TABLE_SPANS, tzColumnName);
2378
+ if (!tzColumnExists) {
2379
+ const parsedTzColumnName = utils.parseSqlIdentifier(tzColumnName, "column name");
2380
+ const timestampZSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedTzColumnName}" TIMESTAMPTZ DEFAULT NOW()`.trim();
2381
+ await this.client.none(timestampZSql);
2382
+ this.logger?.debug?.(`Added timezone column '${tzColumnName}' to ${fullTableName}`);
2383
+ }
2038
2384
  }
2039
2385
  }
2040
2386
  this.logger?.info?.(`Migration completed for ${fullTableName}`);
@@ -2042,6 +2388,224 @@ var PgDB = class extends base.MastraBase {
2042
2388
  this.logger?.warn?.(`Failed to migrate spans table ${fullTableName}:`, error);
2043
2389
  }
2044
2390
  }
2391
+ /**
2392
+ * Deduplicates spans in the mastra_ai_spans table before adding the PRIMARY KEY constraint.
2393
+ * Keeps spans based on priority: completed (endedAt NOT NULL) > most recent updatedAt > most recent createdAt.
2394
+ *
2395
+ * Note: This prioritizes migration completion over perfect data preservation.
2396
+ * Old trace data may be lost, which is acceptable for this use case.
2397
+ */
2398
+ async deduplicateSpans() {
2399
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2400
+ try {
2401
+ const duplicateCheck = await this.client.oneOrNone(`
2402
+ SELECT EXISTS (
2403
+ SELECT 1
2404
+ FROM ${fullTableName}
2405
+ GROUP BY "traceId", "spanId"
2406
+ HAVING COUNT(*) > 1
2407
+ LIMIT 1
2408
+ ) as has_duplicates
2409
+ `);
2410
+ if (!duplicateCheck?.has_duplicates) {
2411
+ this.logger?.debug?.(`No duplicate spans found in ${fullTableName}`);
2412
+ return;
2413
+ }
2414
+ this.logger?.info?.(`Duplicate spans detected in ${fullTableName}, starting deduplication...`);
2415
+ const result = await this.client.query(`
2416
+ DELETE FROM ${fullTableName} t1
2417
+ USING ${fullTableName} t2
2418
+ WHERE t1."traceId" = t2."traceId"
2419
+ AND t1."spanId" = t2."spanId"
2420
+ AND (
2421
+ -- Keep completed spans over incomplete
2422
+ (t1."endedAt" IS NULL AND t2."endedAt" IS NOT NULL)
2423
+ OR
2424
+ -- If both have same completion status, keep more recent updatedAt
2425
+ (
2426
+ (t1."endedAt" IS NULL) = (t2."endedAt" IS NULL)
2427
+ AND (
2428
+ (t1."updatedAt" < t2."updatedAt")
2429
+ OR (t1."updatedAt" IS NULL AND t2."updatedAt" IS NOT NULL)
2430
+ OR
2431
+ -- If updatedAt is the same, keep more recent createdAt
2432
+ (
2433
+ (t1."updatedAt" = t2."updatedAt" OR (t1."updatedAt" IS NULL AND t2."updatedAt" IS NULL))
2434
+ AND (
2435
+ (t1."createdAt" < t2."createdAt")
2436
+ OR (t1."createdAt" IS NULL AND t2."createdAt" IS NOT NULL)
2437
+ OR
2438
+ -- If all else equal, use ctid as tiebreaker
2439
+ (
2440
+ (t1."createdAt" = t2."createdAt" OR (t1."createdAt" IS NULL AND t2."createdAt" IS NULL))
2441
+ AND t1.ctid < t2.ctid
2442
+ )
2443
+ )
2444
+ )
2445
+ )
2446
+ )
2447
+ )
2448
+ `);
2449
+ this.logger?.info?.(
2450
+ `Deduplication complete: removed ${result.rowCount ?? 0} duplicate spans from ${fullTableName}`
2451
+ );
2452
+ } catch (error$1) {
2453
+ throw new error.MastraError(
2454
+ {
2455
+ id: storage.createStorageErrorId("PG", "DEDUPLICATE_SPANS", "FAILED"),
2456
+ domain: error.ErrorDomain.STORAGE,
2457
+ category: error.ErrorCategory.THIRD_PARTY,
2458
+ details: {
2459
+ tableName: storage.TABLE_SPANS
2460
+ }
2461
+ },
2462
+ error$1
2463
+ );
2464
+ }
2465
+ }
2466
+ /**
2467
+ * Checks for duplicate (traceId, spanId) combinations in the spans table.
2468
+ * Returns information about duplicates for logging/CLI purposes.
2469
+ */
2470
+ async checkForDuplicateSpans() {
2471
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2472
+ try {
2473
+ const result = await this.client.oneOrNone(`
2474
+ SELECT COUNT(*) as duplicate_count
2475
+ FROM (
2476
+ SELECT "traceId", "spanId"
2477
+ FROM ${fullTableName}
2478
+ GROUP BY "traceId", "spanId"
2479
+ HAVING COUNT(*) > 1
2480
+ ) duplicates
2481
+ `);
2482
+ const duplicateCount = parseInt(result?.duplicate_count ?? "0", 10);
2483
+ return {
2484
+ hasDuplicates: duplicateCount > 0,
2485
+ duplicateCount,
2486
+ tableName: fullTableName
2487
+ };
2488
+ } catch (error) {
2489
+ this.logger?.debug?.(`Could not check for duplicates: ${error}`);
2490
+ return { hasDuplicates: false, duplicateCount: 0, tableName: fullTableName };
2491
+ }
2492
+ }
2493
+ /**
2494
+ * Checks if the PRIMARY KEY constraint on (traceId, spanId) already exists on the spans table.
2495
+ * Used to skip deduplication when the constraint already exists (migration already complete).
2496
+ */
2497
+ async spansPrimaryKeyExists() {
2498
+ const parsedSchemaName = this.schemaName ? utils.parseSqlIdentifier(this.schemaName, "schema name") : "";
2499
+ const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
2500
+ const constraintName = `${constraintPrefix}mastra_ai_spans_traceid_spanid_pk`;
2501
+ const result = await this.client.oneOrNone(
2502
+ `SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = $1) as exists`,
2503
+ [constraintName]
2504
+ );
2505
+ return result?.exists ?? false;
2506
+ }
2507
+ /**
2508
+ * Adds the PRIMARY KEY constraint on (traceId, spanId) to the spans table.
2509
+ * Should be called AFTER deduplication to ensure no duplicate key violations.
2510
+ */
2511
+ async addSpansPrimaryKey() {
2512
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2513
+ const parsedSchemaName = this.schemaName ? utils.parseSqlIdentifier(this.schemaName, "schema name") : "";
2514
+ const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
2515
+ const constraintName = `${constraintPrefix}mastra_ai_spans_traceid_spanid_pk`;
2516
+ try {
2517
+ const constraintExists = await this.client.oneOrNone(
2518
+ `
2519
+ SELECT EXISTS (
2520
+ SELECT 1 FROM pg_constraint WHERE conname = $1
2521
+ ) as exists
2522
+ `,
2523
+ [constraintName]
2524
+ );
2525
+ if (constraintExists?.exists) {
2526
+ this.logger?.debug?.(`PRIMARY KEY constraint ${constraintName} already exists on ${fullTableName}`);
2527
+ return;
2528
+ }
2529
+ await this.client.none(`
2530
+ ALTER TABLE ${fullTableName}
2531
+ ADD CONSTRAINT ${constraintName}
2532
+ PRIMARY KEY ("traceId", "spanId")
2533
+ `);
2534
+ this.logger?.info?.(`Added PRIMARY KEY constraint ${constraintName} to ${fullTableName}`);
2535
+ } catch (error$1) {
2536
+ throw new error.MastraError(
2537
+ {
2538
+ id: storage.createStorageErrorId("PG", "ADD_SPANS_PRIMARY_KEY", "FAILED"),
2539
+ domain: error.ErrorDomain.STORAGE,
2540
+ category: error.ErrorCategory.THIRD_PARTY,
2541
+ details: {
2542
+ tableName: storage.TABLE_SPANS,
2543
+ constraintName
2544
+ }
2545
+ },
2546
+ error$1
2547
+ );
2548
+ }
2549
+ }
2550
+ /**
2551
+ * Manually run the spans migration to deduplicate and add the unique constraint.
2552
+ * This is intended to be called from the CLI when duplicates are detected.
2553
+ *
2554
+ * @returns Migration result with status and details
2555
+ */
2556
+ async migrateSpans() {
2557
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2558
+ const pkExists = await this.spansPrimaryKeyExists();
2559
+ if (pkExists) {
2560
+ return {
2561
+ success: true,
2562
+ alreadyMigrated: true,
2563
+ duplicatesRemoved: 0,
2564
+ message: `Migration already complete. PRIMARY KEY constraint exists on ${fullTableName}.`
2565
+ };
2566
+ }
2567
+ const duplicateInfo = await this.checkForDuplicateSpans();
2568
+ if (duplicateInfo.hasDuplicates) {
2569
+ this.logger?.info?.(
2570
+ `Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations. Starting deduplication...`
2571
+ );
2572
+ await this.deduplicateSpans();
2573
+ } else {
2574
+ this.logger?.info?.(`No duplicate spans found.`);
2575
+ }
2576
+ await this.addSpansPrimaryKey();
2577
+ return {
2578
+ success: true,
2579
+ alreadyMigrated: false,
2580
+ duplicatesRemoved: duplicateInfo.duplicateCount,
2581
+ message: duplicateInfo.hasDuplicates ? `Migration complete. Removed duplicates and added PRIMARY KEY constraint to ${fullTableName}.` : `Migration complete. Added PRIMARY KEY constraint to ${fullTableName}.`
2582
+ };
2583
+ }
2584
+ /**
2585
+ * Check migration status for the spans table.
2586
+ * Returns information about whether migration is needed.
2587
+ */
2588
+ async checkSpansMigrationStatus() {
2589
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2590
+ const pkExists = await this.spansPrimaryKeyExists();
2591
+ if (pkExists) {
2592
+ return {
2593
+ needsMigration: false,
2594
+ hasDuplicates: false,
2595
+ duplicateCount: 0,
2596
+ constraintExists: true,
2597
+ tableName: fullTableName
2598
+ };
2599
+ }
2600
+ const duplicateInfo = await this.checkForDuplicateSpans();
2601
+ return {
2602
+ needsMigration: true,
2603
+ hasDuplicates: duplicateInfo.hasDuplicates,
2604
+ duplicateCount: duplicateInfo.duplicateCount,
2605
+ constraintExists: false,
2606
+ tableName: fullTableName
2607
+ };
2608
+ }
2045
2609
  /**
2046
2610
  * Alters table schema to add columns if they don't exist
2047
2611
  * @param tableName Name of the table
@@ -2059,7 +2623,7 @@ var PgDB = class extends base.MastraBase {
2059
2623
  if (schema[columnName]) {
2060
2624
  const columnDef = schema[columnName];
2061
2625
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
2062
- const sqlType = this.getSqlType(columnDef.type);
2626
+ const sqlType = mapToSqlType(columnDef.type);
2063
2627
  const nullable = columnDef.nullable ? "" : "NOT NULL";
2064
2628
  const defaultValue = !columnDef.nullable ? this.getDefaultValue(columnDef.type) : "";
2065
2629
  const alterSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}" ${sqlType} ${nullable} ${defaultValue}`.trim();
@@ -2379,9 +2943,9 @@ var PgDB = class extends base.MastraBase {
2379
2943
  size: result.size || "0",
2380
2944
  definition: result.definition || "",
2381
2945
  method: result.method || "btree",
2382
- scans: parseInt(result.scans) || 0,
2383
- tuples_read: parseInt(result.tuples_read) || 0,
2384
- tuples_fetched: parseInt(result.tuples_fetched) || 0
2946
+ scans: parseInt(String(result.scans)) || 0,
2947
+ tuples_read: parseInt(String(result.tuples_read)) || 0,
2948
+ tuples_fetched: parseInt(String(result.tuples_fetched)) || 0
2385
2949
  };
2386
2950
  } catch (error$1) {
2387
2951
  throw new error.MastraError(
@@ -2898,6 +3462,9 @@ function getTableName3({ indexName, schemaName }) {
2898
3462
  const quotedIndexName = `"${indexName}"`;
2899
3463
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
2900
3464
  }
3465
+ function inPlaceholders(count, startIndex = 1) {
3466
+ return Array.from({ length: count }, (_, i) => `$${i + startIndex}`).join(", ");
3467
+ }
2901
3468
  var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
2902
3469
  #db;
2903
3470
  #schema;
@@ -3024,27 +3591,52 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3024
3591
  );
3025
3592
  }
3026
3593
  }
3027
- async listThreadsByResourceId(args) {
3028
- const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
3029
- if (page < 0) {
3594
+ async listThreads(args) {
3595
+ const { page = 0, perPage: perPageInput, orderBy, filter } = args;
3596
+ try {
3597
+ this.validatePaginationInput(page, perPageInput ?? 100);
3598
+ } catch (error$1) {
3030
3599
  throw new error.MastraError({
3031
- id: storage.createStorageErrorId("PG", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
3600
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "INVALID_PAGE"),
3032
3601
  domain: error.ErrorDomain.STORAGE,
3033
3602
  category: error.ErrorCategory.USER,
3034
- text: "Page number must be non-negative",
3035
- details: {
3036
- resourceId,
3037
- page
3038
- }
3603
+ text: error$1 instanceof Error ? error$1.message : "Invalid pagination parameters",
3604
+ details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
3039
3605
  });
3040
3606
  }
3041
- const { field, direction } = this.parseOrderBy(orderBy);
3042
3607
  const perPage = storage.normalizePerPage(perPageInput, 100);
3608
+ try {
3609
+ this.validateMetadataKeys(filter?.metadata);
3610
+ } catch (error$1) {
3611
+ throw new error.MastraError({
3612
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "INVALID_METADATA_KEY"),
3613
+ domain: error.ErrorDomain.STORAGE,
3614
+ category: error.ErrorCategory.USER,
3615
+ text: error$1 instanceof Error ? error$1.message : "Invalid metadata key",
3616
+ details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
3617
+ });
3618
+ }
3619
+ const { field, direction } = this.parseOrderBy(orderBy);
3043
3620
  const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
3044
3621
  try {
3045
3622
  const tableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
3046
- const baseQuery = `FROM ${tableName} WHERE "resourceId" = $1`;
3047
- const queryParams = [resourceId];
3623
+ const whereClauses = [];
3624
+ const queryParams = [];
3625
+ let paramIndex = 1;
3626
+ if (filter?.resourceId) {
3627
+ whereClauses.push(`"resourceId" = $${paramIndex}`);
3628
+ queryParams.push(filter.resourceId);
3629
+ paramIndex++;
3630
+ }
3631
+ if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
3632
+ for (const [key, value] of Object.entries(filter.metadata)) {
3633
+ whereClauses.push(`metadata::jsonb @> $${paramIndex}::jsonb`);
3634
+ queryParams.push(JSON.stringify({ [key]: value }));
3635
+ paramIndex++;
3636
+ }
3637
+ }
3638
+ const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : "";
3639
+ const baseQuery = `FROM ${tableName} ${whereClause}`;
3048
3640
  const countQuery = `SELECT COUNT(*) ${baseQuery}`;
3049
3641
  const countResult = await this.#db.client.one(countQuery, queryParams);
3050
3642
  const total = parseInt(countResult.count, 10);
@@ -3058,13 +3650,19 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3058
3650
  };
3059
3651
  }
3060
3652
  const limitValue = perPageInput === false ? total : perPage;
3061
- const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "updatedAt" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
3062
- const rows = await this.#db.client.manyOrNone(dataQuery, [...queryParams, limitValue, offset]);
3653
+ const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $${paramIndex} OFFSET $${paramIndex + 1}`;
3654
+ const rows = await this.#db.client.manyOrNone(
3655
+ dataQuery,
3656
+ [...queryParams, limitValue, offset]
3657
+ );
3063
3658
  const threads = (rows || []).map((thread) => ({
3064
- ...thread,
3659
+ id: thread.id,
3660
+ resourceId: thread.resourceId,
3661
+ title: thread.title,
3065
3662
  metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
3066
- createdAt: thread.createdAt,
3067
- updatedAt: thread.updatedAt
3663
+ // Use timezone-aware columns (*Z) for correct UTC timestamps, with fallback for legacy data
3664
+ createdAt: thread.createdAtZ || thread.createdAt,
3665
+ updatedAt: thread.updatedAtZ || thread.updatedAt
3068
3666
  }));
3069
3667
  return {
3070
3668
  threads,
@@ -3076,11 +3674,12 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3076
3674
  } catch (error$1) {
3077
3675
  const mastraError = new error.MastraError(
3078
3676
  {
3079
- id: storage.createStorageErrorId("PG", "LIST_THREADS_BY_RESOURCE_ID", "FAILED"),
3677
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "FAILED"),
3080
3678
  domain: error.ErrorDomain.STORAGE,
3081
3679
  category: error.ErrorCategory.THIRD_PARTY,
3082
3680
  details: {
3083
- resourceId,
3681
+ ...filter?.resourceId && { resourceId: filter.resourceId },
3682
+ hasMetadataFilter: !!filter?.metadata,
3084
3683
  page
3085
3684
  }
3086
3685
  },
@@ -3169,17 +3768,18 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3169
3768
  ...metadata
3170
3769
  };
3171
3770
  try {
3771
+ const now = (/* @__PURE__ */ new Date()).toISOString();
3172
3772
  const thread = await this.#db.client.one(
3173
3773
  `UPDATE ${threadTableName}
3174
3774
  SET
3175
3775
  title = $1,
3176
3776
  metadata = $2,
3177
3777
  "updatedAt" = $3,
3178
- "updatedAtZ" = $3
3179
- WHERE id = $4
3778
+ "updatedAtZ" = $4
3779
+ WHERE id = $5
3180
3780
  RETURNING *
3181
3781
  `,
3182
- [title, mergedMetadata, (/* @__PURE__ */ new Date()).toISOString(), id]
3782
+ [title, mergedMetadata, now, now, id]
3183
3783
  );
3184
3784
  return {
3185
3785
  id: thread.id,
@@ -3322,7 +3922,7 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3322
3922
  const tableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
3323
3923
  const query = `
3324
3924
  ${selectStatement} FROM ${tableName}
3325
- WHERE id IN (${messageIds.map((_, i) => `$${i + 1}`).join(", ")})
3925
+ WHERE id IN (${inPlaceholders(messageIds.length)})
3326
3926
  ORDER BY "createdAt" DESC
3327
3927
  `;
3328
3928
  const resultRows = await this.#db.client.manyOrNone(query, messageIds);
@@ -3383,8 +3983,7 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3383
3983
  const orderByStatement = `ORDER BY "${field}" ${direction}`;
3384
3984
  const selectStatement = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"`;
3385
3985
  const tableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
3386
- const threadPlaceholders = threadIds.map((_, i) => `$${i + 1}`).join(", ");
3387
- const conditions = [`thread_id IN (${threadPlaceholders})`];
3986
+ const conditions = [`thread_id IN (${inPlaceholders(threadIds.length)})`];
3388
3987
  const queryParams = [...threadIds];
3389
3988
  let paramIndex = threadIds.length + 1;
3390
3989
  if (resourceId) {
@@ -3392,11 +3991,13 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3392
3991
  queryParams.push(resourceId);
3393
3992
  }
3394
3993
  if (filter?.dateRange?.start) {
3395
- conditions.push(`"createdAt" >= $${paramIndex++}`);
3994
+ const startOp = filter.dateRange.startExclusive ? ">" : ">=";
3995
+ conditions.push(`"createdAt" ${startOp} $${paramIndex++}`);
3396
3996
  queryParams.push(filter.dateRange.start);
3397
3997
  }
3398
3998
  if (filter?.dateRange?.end) {
3399
- conditions.push(`"createdAt" <= $${paramIndex++}`);
3999
+ const endOp = filter.dateRange.endExclusive ? "<" : "<=";
4000
+ conditions.push(`"createdAt" ${endOp} $${paramIndex++}`);
3400
4001
  queryParams.push(filter.dateRange.end);
3401
4002
  }
3402
4003
  const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
@@ -3541,14 +4142,15 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3541
4142
  );
3542
4143
  });
3543
4144
  const threadTableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
4145
+ const nowStr = (/* @__PURE__ */ new Date()).toISOString();
3544
4146
  const threadUpdate = t.none(
3545
4147
  `UPDATE ${threadTableName}
3546
4148
  SET
3547
4149
  "updatedAt" = $1,
3548
- "updatedAtZ" = $1
3549
- WHERE id = $2
4150
+ "updatedAtZ" = $2
4151
+ WHERE id = $3
3550
4152
  `,
3551
- [(/* @__PURE__ */ new Date()).toISOString(), threadId]
4153
+ [nowStr, nowStr, threadId]
3552
4154
  );
3553
4155
  await Promise.all([...messageInserts, threadUpdate]);
3554
4156
  });
@@ -3585,8 +4187,8 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3585
4187
  return [];
3586
4188
  }
3587
4189
  const messageIds = messages.map((m) => m.id);
3588
- const selectQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId" FROM ${getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) })} WHERE id IN ($1:list)`;
3589
- const existingMessagesDb = await this.#db.client.manyOrNone(selectQuery, [messageIds]);
4190
+ const selectQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId" FROM ${getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) })} WHERE id IN (${inPlaceholders(messageIds.length)})`;
4191
+ const existingMessagesDb = await this.#db.client.manyOrNone(selectQuery, messageIds);
3590
4192
  if (existingMessagesDb.length === 0) {
3591
4193
  return [];
3592
4194
  }
@@ -3647,10 +4249,11 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3647
4249
  }
3648
4250
  }
3649
4251
  if (threadIdsToUpdate.size > 0) {
4252
+ const threadIds = Array.from(threadIdsToUpdate);
3650
4253
  queries.push(
3651
4254
  t.none(
3652
- `UPDATE ${getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) })} SET "updatedAt" = NOW(), "updatedAtZ" = NOW() WHERE id IN ($1:list)`,
3653
- [Array.from(threadIdsToUpdate)]
4255
+ `UPDATE ${getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) })} SET "updatedAt" = NOW(), "updatedAtZ" = NOW() WHERE id IN (${inPlaceholders(threadIds.length)})`,
4256
+ threadIds
3654
4257
  )
3655
4258
  );
3656
4259
  }
@@ -3658,7 +4261,7 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3658
4261
  await t.batch(queries);
3659
4262
  }
3660
4263
  });
3661
- const updatedMessages = await this.#db.client.manyOrNone(selectQuery, [messageIds]);
4264
+ const updatedMessages = await this.#db.client.manyOrNone(selectQuery, messageIds);
3662
4265
  return (updatedMessages || []).map((row) => {
3663
4266
  const message = this.normalizeMessageRow(row);
3664
4267
  if (typeof message.content === "string") {
@@ -3770,15 +4373,159 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3770
4373
  values.push(JSON.stringify(updatedResource.metadata));
3771
4374
  paramIndex++;
3772
4375
  }
3773
- updates.push(`"updatedAt" = $${paramIndex}`);
3774
- values.push(updatedResource.updatedAt.toISOString());
4376
+ const updatedAtStr = updatedResource.updatedAt.toISOString();
4377
+ updates.push(`"updatedAt" = $${paramIndex++}`);
4378
+ values.push(updatedAtStr);
3775
4379
  updates.push(`"updatedAtZ" = $${paramIndex++}`);
3776
- values.push(updatedResource.updatedAt.toISOString());
3777
- paramIndex++;
4380
+ values.push(updatedAtStr);
3778
4381
  values.push(resourceId);
3779
4382
  await this.#db.client.none(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = $${paramIndex}`, values);
3780
4383
  return updatedResource;
3781
4384
  }
4385
+ async cloneThread(args) {
4386
+ const { sourceThreadId, newThreadId: providedThreadId, resourceId, title, metadata, options } = args;
4387
+ const sourceThread = await this.getThreadById({ threadId: sourceThreadId });
4388
+ if (!sourceThread) {
4389
+ throw new error.MastraError({
4390
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "SOURCE_NOT_FOUND"),
4391
+ domain: error.ErrorDomain.STORAGE,
4392
+ category: error.ErrorCategory.USER,
4393
+ text: `Source thread with id ${sourceThreadId} not found`,
4394
+ details: { sourceThreadId }
4395
+ });
4396
+ }
4397
+ const newThreadId = providedThreadId || crypto.randomUUID();
4398
+ const existingThread = await this.getThreadById({ threadId: newThreadId });
4399
+ if (existingThread) {
4400
+ throw new error.MastraError({
4401
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "THREAD_EXISTS"),
4402
+ domain: error.ErrorDomain.STORAGE,
4403
+ category: error.ErrorCategory.USER,
4404
+ text: `Thread with id ${newThreadId} already exists`,
4405
+ details: { newThreadId }
4406
+ });
4407
+ }
4408
+ const threadTableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
4409
+ const messageTableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
4410
+ try {
4411
+ return await this.#db.client.tx(async (t) => {
4412
+ let messageQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"
4413
+ FROM ${messageTableName} WHERE thread_id = $1`;
4414
+ const messageParams = [sourceThreadId];
4415
+ let paramIndex = 2;
4416
+ if (options?.messageFilter?.startDate) {
4417
+ messageQuery += ` AND "createdAt" >= $${paramIndex++}`;
4418
+ messageParams.push(options.messageFilter.startDate);
4419
+ }
4420
+ if (options?.messageFilter?.endDate) {
4421
+ messageQuery += ` AND "createdAt" <= $${paramIndex++}`;
4422
+ messageParams.push(options.messageFilter.endDate);
4423
+ }
4424
+ if (options?.messageFilter?.messageIds && options.messageFilter.messageIds.length > 0) {
4425
+ messageQuery += ` AND id IN (${options.messageFilter.messageIds.map(() => `$${paramIndex++}`).join(", ")})`;
4426
+ messageParams.push(...options.messageFilter.messageIds);
4427
+ }
4428
+ messageQuery += ` ORDER BY "createdAt" ASC`;
4429
+ if (options?.messageLimit && options.messageLimit > 0) {
4430
+ const limitQuery = `SELECT * FROM (${messageQuery.replace('ORDER BY "createdAt" ASC', 'ORDER BY "createdAt" DESC')} LIMIT $${paramIndex}) AS limited ORDER BY "createdAt" ASC`;
4431
+ messageParams.push(options.messageLimit);
4432
+ messageQuery = limitQuery;
4433
+ }
4434
+ const sourceMessages = await t.manyOrNone(messageQuery, messageParams);
4435
+ const now = /* @__PURE__ */ new Date();
4436
+ const lastMessageId = sourceMessages.length > 0 ? sourceMessages[sourceMessages.length - 1].id : void 0;
4437
+ const cloneMetadata = {
4438
+ sourceThreadId,
4439
+ clonedAt: now,
4440
+ ...lastMessageId && { lastMessageId }
4441
+ };
4442
+ const newThread = {
4443
+ id: newThreadId,
4444
+ resourceId: resourceId || sourceThread.resourceId,
4445
+ title: title || (sourceThread.title ? `Clone of ${sourceThread.title}` : void 0),
4446
+ metadata: {
4447
+ ...metadata,
4448
+ clone: cloneMetadata
4449
+ },
4450
+ createdAt: now,
4451
+ updatedAt: now
4452
+ };
4453
+ await t.none(
4454
+ `INSERT INTO ${threadTableName} (
4455
+ id,
4456
+ "resourceId",
4457
+ title,
4458
+ metadata,
4459
+ "createdAt",
4460
+ "createdAtZ",
4461
+ "updatedAt",
4462
+ "updatedAtZ"
4463
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4464
+ [
4465
+ newThread.id,
4466
+ newThread.resourceId,
4467
+ newThread.title,
4468
+ newThread.metadata ? JSON.stringify(newThread.metadata) : null,
4469
+ now,
4470
+ now,
4471
+ now,
4472
+ now
4473
+ ]
4474
+ );
4475
+ const clonedMessages = [];
4476
+ const targetResourceId = resourceId || sourceThread.resourceId;
4477
+ for (const sourceMsg of sourceMessages) {
4478
+ const newMessageId = crypto.randomUUID();
4479
+ const normalizedMsg = this.normalizeMessageRow(sourceMsg);
4480
+ let parsedContent = normalizedMsg.content;
4481
+ try {
4482
+ parsedContent = JSON.parse(normalizedMsg.content);
4483
+ } catch {
4484
+ }
4485
+ await t.none(
4486
+ `INSERT INTO ${messageTableName} (id, thread_id, content, "createdAt", "createdAtZ", role, type, "resourceId")
4487
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4488
+ [
4489
+ newMessageId,
4490
+ newThreadId,
4491
+ typeof normalizedMsg.content === "string" ? normalizedMsg.content : JSON.stringify(normalizedMsg.content),
4492
+ normalizedMsg.createdAt,
4493
+ normalizedMsg.createdAt,
4494
+ normalizedMsg.role,
4495
+ normalizedMsg.type || "v2",
4496
+ targetResourceId
4497
+ ]
4498
+ );
4499
+ clonedMessages.push({
4500
+ id: newMessageId,
4501
+ threadId: newThreadId,
4502
+ content: parsedContent,
4503
+ role: normalizedMsg.role,
4504
+ type: normalizedMsg.type,
4505
+ createdAt: new Date(normalizedMsg.createdAt),
4506
+ resourceId: targetResourceId
4507
+ });
4508
+ }
4509
+ return {
4510
+ thread: newThread,
4511
+ clonedMessages
4512
+ };
4513
+ });
4514
+ } catch (error$1) {
4515
+ if (error$1 instanceof error.MastraError) {
4516
+ throw error$1;
4517
+ }
4518
+ throw new error.MastraError(
4519
+ {
4520
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "FAILED"),
4521
+ domain: error.ErrorDomain.STORAGE,
4522
+ category: error.ErrorCategory.THIRD_PARTY,
4523
+ details: { sourceThreadId, newThreadId }
4524
+ },
4525
+ error$1
4526
+ );
4527
+ }
4528
+ }
3782
4529
  };
3783
4530
  var ObservabilityPG = class _ObservabilityPG extends storage.ObservabilityStorage {
3784
4531
  #db;
@@ -3896,6 +4643,22 @@ var ObservabilityPG = class _ObservabilityPG extends storage.ObservabilityStorag
3896
4643
  }
3897
4644
  }
3898
4645
  }
4646
+ /**
4647
+ * Manually run the spans migration to deduplicate and add the unique constraint.
4648
+ * This is intended to be called from the CLI when duplicates are detected.
4649
+ *
4650
+ * @returns Migration result with status and details
4651
+ */
4652
+ async migrateSpans() {
4653
+ return this.#db.migrateSpans();
4654
+ }
4655
+ /**
4656
+ * Check migration status for the spans table.
4657
+ * Returns information about whether migration is needed.
4658
+ */
4659
+ async checkSpansMigrationStatus() {
4660
+ return this.#db.checkSpansMigrationStatus();
4661
+ }
3899
4662
  async dangerouslyClearAll() {
3900
4663
  await this.#db.clearTable({ tableName: storage.TABLE_SPANS });
3901
4664
  }
@@ -4408,6 +5171,11 @@ var ScoresPG = class _ScoresPG extends storage.ScoresStorage {
4408
5171
  }
4409
5172
  async init() {
4410
5173
  await this.#db.createTable({ tableName: storage.TABLE_SCORERS, schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS] });
5174
+ await this.#db.alterTable({
5175
+ tableName: storage.TABLE_SCORERS,
5176
+ schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS],
5177
+ ifNotExists: ["spanId", "requestContext"]
5178
+ });
4411
5179
  await this.createDefaultIndexes();
4412
5180
  await this.createCustomIndexes();
4413
5181
  }
@@ -4759,23 +5527,8 @@ function getTableName5({ indexName, schemaName }) {
4759
5527
  const quotedIndexName = `"${indexName}"`;
4760
5528
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
4761
5529
  }
4762
- function parseWorkflowRun(row) {
4763
- let parsedSnapshot = row.snapshot;
4764
- if (typeof parsedSnapshot === "string") {
4765
- try {
4766
- parsedSnapshot = JSON.parse(row.snapshot);
4767
- } catch (e) {
4768
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
4769
- }
4770
- }
4771
- return {
4772
- workflowName: row.workflow_name,
4773
- runId: row.run_id,
4774
- snapshot: parsedSnapshot,
4775
- resourceId: row.resourceId,
4776
- createdAt: new Date(row.createdAtZ || row.createdAt),
4777
- updatedAt: new Date(row.updatedAtZ || row.updatedAt)
4778
- };
5530
+ function sanitizeJsonForPg(jsonString) {
5531
+ return jsonString.replace(/\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})/g, "");
4779
5532
  }
4780
5533
  var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4781
5534
  #db;
@@ -4792,6 +5545,24 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4792
5545
  this.#skipDefaultIndexes = skipDefaultIndexes;
4793
5546
  this.#indexes = indexes?.filter((idx) => _WorkflowsPG.MANAGED_TABLES.includes(idx.table));
4794
5547
  }
5548
+ parseWorkflowRun(row) {
5549
+ let parsedSnapshot = row.snapshot;
5550
+ if (typeof parsedSnapshot === "string") {
5551
+ try {
5552
+ parsedSnapshot = JSON.parse(row.snapshot);
5553
+ } catch (e) {
5554
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5555
+ }
5556
+ }
5557
+ return {
5558
+ workflowName: row.workflow_name,
5559
+ runId: row.run_id,
5560
+ snapshot: parsedSnapshot,
5561
+ resourceId: row.resourceId,
5562
+ createdAt: new Date(row.createdAtZ || row.createdAt),
5563
+ updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5564
+ };
5565
+ }
4795
5566
  /**
4796
5567
  * Returns default index definitions for the workflows domain tables.
4797
5568
  * Currently no default indexes are defined for workflows.
@@ -4864,12 +5635,13 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4864
5635
  const now = /* @__PURE__ */ new Date();
4865
5636
  const createdAtValue = createdAt ? createdAt : now;
4866
5637
  const updatedAtValue = updatedAt ? updatedAt : now;
5638
+ const sanitizedSnapshot = sanitizeJsonForPg(JSON.stringify(snapshot));
4867
5639
  await this.#db.client.none(
4868
5640
  `INSERT INTO ${getTableName5({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName5(this.#schema) })} (workflow_name, run_id, "resourceId", snapshot, "createdAt", "updatedAt")
4869
5641
  VALUES ($1, $2, $3, $4, $5, $6)
4870
5642
  ON CONFLICT (workflow_name, run_id) DO UPDATE
4871
5643
  SET "resourceId" = $3, snapshot = $4, "updatedAt" = $6`,
4872
- [workflowName, runId, resourceId, JSON.stringify(snapshot), createdAtValue, updatedAtValue]
5644
+ [workflowName, runId, resourceId, sanitizedSnapshot, createdAtValue, updatedAtValue]
4873
5645
  );
4874
5646
  } catch (error$1) {
4875
5647
  throw new error.MastraError(
@@ -4932,7 +5704,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4932
5704
  if (!result) {
4933
5705
  return null;
4934
5706
  }
4935
- return parseWorkflowRun(result);
5707
+ return this.parseWorkflowRun(result);
4936
5708
  } catch (error$1) {
4937
5709
  throw new error.MastraError(
4938
5710
  {
@@ -4988,7 +5760,9 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4988
5760
  paramIndex++;
4989
5761
  }
4990
5762
  if (status) {
4991
- conditions.push(`snapshot::jsonb ->> 'status' = $${paramIndex}`);
5763
+ conditions.push(
5764
+ `regexp_replace(snapshot::text, '\\\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})', '', 'g')::jsonb ->> 'status' = $${paramIndex}`
5765
+ );
4992
5766
  values.push(status);
4993
5767
  paramIndex++;
4994
5768
  }
@@ -5033,7 +5807,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5033
5807
  const queryValues = usePagination ? [...values, normalizedPerPage, offset] : values;
5034
5808
  const result = await this.#db.client.manyOrNone(query, queryValues);
5035
5809
  const runs = (result || []).map((row) => {
5036
- return parseWorkflowRun(row);
5810
+ return this.parseWorkflowRun(row);
5037
5811
  });
5038
5812
  return { runs, total: total || runs.length };
5039
5813
  } catch (error$1) {
@@ -5053,9 +5827,12 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5053
5827
  };
5054
5828
 
5055
5829
  // src/storage/index.ts
5056
- var PostgresStore = class extends storage.MastraStorage {
5830
+ var DEFAULT_MAX_CONNECTIONS = 20;
5831
+ var DEFAULT_IDLE_TIMEOUT_MS = 3e4;
5832
+ var PostgresStore = class extends storage.MastraCompositeStore {
5833
+ #pool;
5057
5834
  #db;
5058
- #pgp;
5835
+ #ownsPool;
5059
5836
  schema;
5060
5837
  isInitialized = false;
5061
5838
  stores;
@@ -5063,60 +5840,27 @@ var PostgresStore = class extends storage.MastraStorage {
5063
5840
  try {
5064
5841
  validateConfig("PostgresStore", config);
5065
5842
  super({ id: config.id, name: "PostgresStore", disableInit: config.disableInit });
5066
- this.schema = config.schemaName || "public";
5067
- this.#pgp = pgPromise__default.default();
5068
- if (isClientConfig(config)) {
5069
- this.#db = config.client;
5843
+ this.schema = utils.parseSqlIdentifier(config.schemaName || "public", "schema name");
5844
+ if (isPoolConfig(config)) {
5845
+ this.#pool = config.pool;
5846
+ this.#ownsPool = false;
5070
5847
  } else {
5071
- let pgConfig;
5072
- if (isConnectionStringConfig(config)) {
5073
- pgConfig = {
5074
- id: config.id,
5075
- connectionString: config.connectionString,
5076
- max: config.max,
5077
- idleTimeoutMillis: config.idleTimeoutMillis,
5078
- ssl: config.ssl
5079
- };
5080
- } else if (isCloudSqlConfig(config)) {
5081
- pgConfig = {
5082
- ...config,
5083
- id: config.id,
5084
- max: config.max,
5085
- idleTimeoutMillis: config.idleTimeoutMillis
5086
- };
5087
- } else if (isHostConfig(config)) {
5088
- pgConfig = {
5089
- id: config.id,
5090
- host: config.host,
5091
- port: config.port,
5092
- database: config.database,
5093
- user: config.user,
5094
- password: config.password,
5095
- ssl: config.ssl,
5096
- max: config.max,
5097
- idleTimeoutMillis: config.idleTimeoutMillis
5098
- };
5099
- } else {
5100
- throw new Error(
5101
- "PostgresStore: invalid config. Provide either {client}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with `stream`)."
5102
- );
5103
- }
5104
- this.#db = this.#pgp(pgConfig);
5105
- }
5106
- const skipDefaultIndexes = config.skipDefaultIndexes;
5107
- const indexes = config.indexes;
5108
- const domainConfig = { client: this.#db, schemaName: this.schema, skipDefaultIndexes, indexes };
5109
- const scores = new ScoresPG(domainConfig);
5110
- const workflows = new WorkflowsPG(domainConfig);
5111
- const memory = new MemoryPG(domainConfig);
5112
- const observability = new ObservabilityPG(domainConfig);
5113
- const agents = new AgentsPG(domainConfig);
5848
+ this.#pool = this.createPool(config);
5849
+ this.#ownsPool = true;
5850
+ }
5851
+ this.#db = new PoolAdapter(this.#pool);
5852
+ const domainConfig = {
5853
+ client: this.#db,
5854
+ schemaName: this.schema,
5855
+ skipDefaultIndexes: config.skipDefaultIndexes,
5856
+ indexes: config.indexes
5857
+ };
5114
5858
  this.stores = {
5115
- scores,
5116
- workflows,
5117
- memory,
5118
- observability,
5119
- agents
5859
+ scores: new ScoresPG(domainConfig),
5860
+ workflows: new WorkflowsPG(domainConfig),
5861
+ memory: new MemoryPG(domainConfig),
5862
+ observability: new ObservabilityPG(domainConfig),
5863
+ agents: new AgentsPG(domainConfig)
5120
5864
  };
5121
5865
  } catch (e) {
5122
5866
  throw new error.MastraError(
@@ -5129,6 +5873,32 @@ var PostgresStore = class extends storage.MastraStorage {
5129
5873
  );
5130
5874
  }
5131
5875
  }
5876
+ createPool(config) {
5877
+ if (isConnectionStringConfig(config)) {
5878
+ return new pg.Pool({
5879
+ connectionString: config.connectionString,
5880
+ ssl: config.ssl,
5881
+ max: config.max ?? DEFAULT_MAX_CONNECTIONS,
5882
+ idleTimeoutMillis: config.idleTimeoutMillis ?? DEFAULT_IDLE_TIMEOUT_MS
5883
+ });
5884
+ }
5885
+ if (isHostConfig(config)) {
5886
+ return new pg.Pool({
5887
+ host: config.host,
5888
+ port: config.port,
5889
+ database: config.database,
5890
+ user: config.user,
5891
+ password: config.password,
5892
+ ssl: config.ssl,
5893
+ max: config.max ?? DEFAULT_MAX_CONNECTIONS,
5894
+ idleTimeoutMillis: config.idleTimeoutMillis ?? DEFAULT_IDLE_TIMEOUT_MS
5895
+ });
5896
+ }
5897
+ if (isCloudSqlConfig(config)) {
5898
+ return new pg.Pool(config);
5899
+ }
5900
+ throw new Error("PostgresStore: invalid config");
5901
+ }
5132
5902
  async init() {
5133
5903
  if (this.isInitialized) {
5134
5904
  return;
@@ -5138,6 +5908,9 @@ var PostgresStore = class extends storage.MastraStorage {
5138
5908
  await super.init();
5139
5909
  } catch (error$1) {
5140
5910
  this.isInitialized = false;
5911
+ if (error$1 instanceof error.MastraError) {
5912
+ throw error$1;
5913
+ }
5141
5914
  throw new error.MastraError(
5142
5915
  {
5143
5916
  id: storage.createStorageErrorId("PG", "INIT", "FAILED"),
@@ -5148,32 +5921,32 @@ var PostgresStore = class extends storage.MastraStorage {
5148
5921
  );
5149
5922
  }
5150
5923
  }
5924
+ /**
5925
+ * Database client for executing queries.
5926
+ *
5927
+ * @example
5928
+ * ```typescript
5929
+ * const rows = await store.db.any('SELECT * FROM users WHERE active = $1', [true]);
5930
+ * const user = await store.db.one('SELECT * FROM users WHERE id = $1', [userId]);
5931
+ * ```
5932
+ */
5151
5933
  get db() {
5152
5934
  return this.#db;
5153
5935
  }
5154
- get pgp() {
5155
- return this.#pgp;
5156
- }
5157
- get supports() {
5158
- return {
5159
- selectByIncludeResourceScope: true,
5160
- resourceWorkingMemory: true,
5161
- hasColumn: true,
5162
- createTable: true,
5163
- deleteMessages: true,
5164
- observability: true,
5165
- indexManagement: true,
5166
- listScoresBySpan: true,
5167
- agents: true
5168
- };
5936
+ /**
5937
+ * The underlying pg.Pool for direct database access or ORM integration.
5938
+ */
5939
+ get pool() {
5940
+ return this.#pool;
5169
5941
  }
5170
5942
  /**
5171
- * Closes the pg-promise connection pool.
5172
- *
5173
- * This will close ALL connections in the pool, including pre-configured clients.
5943
+ * Closes the connection pool if it was created by this store.
5944
+ * If a pool was passed in via config, it will not be closed.
5174
5945
  */
5175
5946
  async close() {
5176
- this.pgp.end();
5947
+ if (this.#ownsPool) {
5948
+ await this.#pool.end();
5949
+ }
5177
5950
  }
5178
5951
  };
5179
5952
 
@@ -5276,8 +6049,15 @@ Example Complex Query:
5276
6049
  ]
5277
6050
  }`;
5278
6051
 
6052
+ exports.AgentsPG = AgentsPG;
6053
+ exports.MemoryPG = MemoryPG;
6054
+ exports.ObservabilityPG = ObservabilityPG;
5279
6055
  exports.PGVECTOR_PROMPT = PGVECTOR_PROMPT;
5280
6056
  exports.PgVector = PgVector;
6057
+ exports.PoolAdapter = PoolAdapter;
5281
6058
  exports.PostgresStore = PostgresStore;
6059
+ exports.ScoresPG = ScoresPG;
6060
+ exports.WorkflowsPG = WorkflowsPG;
6061
+ exports.exportSchemas = exportSchemas;
5282
6062
  //# sourceMappingURL=index.cjs.map
5283
6063
  //# sourceMappingURL=index.cjs.map