@mastra/pg 1.0.0-beta.9 → 1.1.0-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +1481 -0
  2. package/dist/docs/README.md +36 -0
  3. package/dist/docs/SKILL.md +37 -0
  4. package/dist/docs/SOURCE_MAP.json +6 -0
  5. package/dist/docs/memory/01-storage.md +261 -0
  6. package/dist/docs/memory/02-working-memory.md +411 -0
  7. package/dist/docs/memory/03-semantic-recall.md +256 -0
  8. package/dist/docs/memory/04-reference.md +133 -0
  9. package/dist/docs/processors/01-reference.md +296 -0
  10. package/dist/docs/rag/01-overview.md +74 -0
  11. package/dist/docs/rag/02-vector-databases.md +643 -0
  12. package/dist/docs/rag/03-retrieval.md +548 -0
  13. package/dist/docs/rag/04-reference.md +369 -0
  14. package/dist/docs/storage/01-reference.md +905 -0
  15. package/dist/docs/tools/01-reference.md +440 -0
  16. package/dist/docs/vectors/01-reference.md +307 -0
  17. package/dist/index.cjs +1293 -260
  18. package/dist/index.cjs.map +1 -1
  19. package/dist/index.d.ts +1 -1
  20. package/dist/index.d.ts.map +1 -1
  21. package/dist/index.js +1290 -262
  22. package/dist/index.js.map +1 -1
  23. package/dist/shared/config.d.ts +61 -66
  24. package/dist/shared/config.d.ts.map +1 -1
  25. package/dist/storage/client.d.ts +91 -0
  26. package/dist/storage/client.d.ts.map +1 -0
  27. package/dist/storage/db/index.d.ts +82 -17
  28. package/dist/storage/db/index.d.ts.map +1 -1
  29. package/dist/storage/domains/agents/index.d.ts +11 -1
  30. package/dist/storage/domains/agents/index.d.ts.map +1 -1
  31. package/dist/storage/domains/memory/index.d.ts +3 -2
  32. package/dist/storage/domains/memory/index.d.ts.map +1 -1
  33. package/dist/storage/domains/observability/index.d.ts +24 -1
  34. package/dist/storage/domains/observability/index.d.ts.map +1 -1
  35. package/dist/storage/domains/scores/index.d.ts.map +1 -1
  36. package/dist/storage/domains/workflows/index.d.ts +1 -0
  37. package/dist/storage/domains/workflows/index.d.ts.map +1 -1
  38. package/dist/storage/index.d.ts +44 -17
  39. package/dist/storage/index.d.ts.map +1 -1
  40. package/dist/storage/test-utils.d.ts.map +1 -1
  41. package/dist/vector/index.d.ts.map +1 -1
  42. package/dist/vector/sql-builder.d.ts.map +1 -1
  43. package/package.json +14 -14
package/dist/index.cjs CHANGED
@@ -8,7 +8,6 @@ var asyncMutex = require('async-mutex');
8
8
  var pg = require('pg');
9
9
  var xxhash = require('xxhash-wasm');
10
10
  var filter = require('@mastra/core/vector/filter');
11
- var pgPromise = require('pg-promise');
12
11
  var base = require('@mastra/core/base');
13
12
  var agent = require('@mastra/core/agent');
14
13
  var evals = require('@mastra/core/evals');
@@ -35,13 +34,15 @@ function _interopNamespace(e) {
35
34
 
36
35
  var pg__namespace = /*#__PURE__*/_interopNamespace(pg);
37
36
  var xxhash__default = /*#__PURE__*/_interopDefault(xxhash);
38
- var pgPromise__default = /*#__PURE__*/_interopDefault(pgPromise);
39
37
 
40
38
  // src/vector/index.ts
41
39
 
42
40
  // src/shared/config.ts
41
+ var isPoolConfig = (cfg) => {
42
+ return "pool" in cfg;
43
+ };
43
44
  var isConnectionStringConfig = (cfg) => {
44
- return "connectionString" in cfg;
45
+ return "connectionString" in cfg && typeof cfg.connectionString === "string";
45
46
  };
46
47
  var isHostConfig = (cfg) => {
47
48
  return "host" in cfg && "database" in cfg && "user" in cfg && "password" in cfg;
@@ -49,16 +50,13 @@ var isHostConfig = (cfg) => {
49
50
  var isCloudSqlConfig = (cfg) => {
50
51
  return "stream" in cfg || "password" in cfg && typeof cfg.password === "function";
51
52
  };
52
- var isClientConfig = (cfg) => {
53
- return "client" in cfg;
54
- };
55
53
  var validateConfig = (name, config) => {
56
54
  if (!config.id || typeof config.id !== "string" || config.id.trim() === "") {
57
55
  throw new Error(`${name}: id must be provided and cannot be empty.`);
58
56
  }
59
- if (isClientConfig(config)) {
60
- if (!config.client) {
61
- throw new Error(`${name}: client must be provided when using client config.`);
57
+ if (isPoolConfig(config)) {
58
+ if (!config.pool) {
59
+ throw new Error(`${name}: pool must be provided when using pool config.`);
62
60
  }
63
61
  return;
64
62
  }
@@ -79,7 +77,7 @@ var validateConfig = (name, config) => {
79
77
  }
80
78
  } else {
81
79
  throw new Error(
82
- `${name}: invalid config. Provide either {client}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with \`stream\`).`
80
+ `${name}: invalid config. Provide either {pool}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with \`stream\`).`
83
81
  );
84
82
  }
85
83
  };
@@ -287,8 +285,14 @@ var FILTER_OPERATORS = {
287
285
  };
288
286
  },
289
287
  // Element Operators
290
- $exists: (key) => {
288
+ $exists: (key, paramIndex, value) => {
291
289
  const jsonPathKey = parseJsonPathKey(key);
290
+ if (value === false) {
291
+ return {
292
+ sql: `NOT (metadata ? '${jsonPathKey}')`,
293
+ needsValue: false
294
+ };
295
+ }
292
296
  return {
293
297
  sql: `metadata ? '${jsonPathKey}'`,
294
298
  needsValue: false
@@ -366,17 +370,62 @@ function buildDeleteFilterQuery(filter) {
366
370
  values.push(value);
367
371
  return `metadata#>>'{${parseJsonPathKey(key)}}' = $${values.length}`;
368
372
  }
369
- const [[operator, operatorValue] = []] = Object.entries(value);
373
+ const entries = Object.entries(value);
374
+ if (entries.length > 1) {
375
+ const conditions2 = entries.map(([operator2, operatorValue2]) => {
376
+ if (operator2 === "$not") {
377
+ const nestedEntries = Object.entries(operatorValue2);
378
+ const nestedConditions = nestedEntries.map(([nestedOp, nestedValue]) => {
379
+ if (!FILTER_OPERATORS[nestedOp]) {
380
+ throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
381
+ }
382
+ const operatorFn3 = FILTER_OPERATORS[nestedOp];
383
+ const operatorResult3 = operatorFn3(key, values.length + 1, nestedValue);
384
+ if (operatorResult3.needsValue) {
385
+ const transformedValue = operatorResult3.transformValue ? operatorResult3.transformValue() : nestedValue;
386
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
387
+ values.push(...transformedValue);
388
+ } else {
389
+ values.push(transformedValue);
390
+ }
391
+ }
392
+ return operatorResult3.sql;
393
+ }).join(" AND ");
394
+ return `NOT (${nestedConditions})`;
395
+ }
396
+ if (!FILTER_OPERATORS[operator2]) {
397
+ throw new Error(`Invalid operator: ${operator2}`);
398
+ }
399
+ const operatorFn2 = FILTER_OPERATORS[operator2];
400
+ const operatorResult2 = operatorFn2(key, values.length + 1, operatorValue2);
401
+ if (operatorResult2.needsValue) {
402
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : operatorValue2;
403
+ if (Array.isArray(transformedValue) && operator2 === "$elemMatch") {
404
+ values.push(...transformedValue);
405
+ } else {
406
+ values.push(transformedValue);
407
+ }
408
+ }
409
+ return operatorResult2.sql;
410
+ });
411
+ return conditions2.join(" AND ");
412
+ }
413
+ const [[operator, operatorValue] = []] = entries;
370
414
  if (operator === "$not") {
371
- const entries = Object.entries(operatorValue);
372
- const conditions2 = entries.map(([nestedOp, nestedValue]) => {
415
+ const nestedEntries = Object.entries(operatorValue);
416
+ const conditions2 = nestedEntries.map(([nestedOp, nestedValue]) => {
373
417
  if (!FILTER_OPERATORS[nestedOp]) {
374
418
  throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
375
419
  }
376
420
  const operatorFn2 = FILTER_OPERATORS[nestedOp];
377
421
  const operatorResult2 = operatorFn2(key, values.length + 1, nestedValue);
378
422
  if (operatorResult2.needsValue) {
379
- values.push(nestedValue);
423
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : nestedValue;
424
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
425
+ values.push(...transformedValue);
426
+ } else {
427
+ values.push(transformedValue);
428
+ }
380
429
  }
381
430
  return operatorResult2.sql;
382
431
  }).join(" AND ");
@@ -443,17 +492,62 @@ function buildFilterQuery(filter, minScore, topK) {
443
492
  values.push(value);
444
493
  return `metadata#>>'{${parseJsonPathKey(key)}}' = $${values.length}`;
445
494
  }
446
- const [[operator, operatorValue] = []] = Object.entries(value);
495
+ const entries = Object.entries(value);
496
+ if (entries.length > 1) {
497
+ const conditions2 = entries.map(([operator2, operatorValue2]) => {
498
+ if (operator2 === "$not") {
499
+ const nestedEntries = Object.entries(operatorValue2);
500
+ const nestedConditions = nestedEntries.map(([nestedOp, nestedValue]) => {
501
+ if (!FILTER_OPERATORS[nestedOp]) {
502
+ throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
503
+ }
504
+ const operatorFn3 = FILTER_OPERATORS[nestedOp];
505
+ const operatorResult3 = operatorFn3(key, values.length + 1, nestedValue);
506
+ if (operatorResult3.needsValue) {
507
+ const transformedValue = operatorResult3.transformValue ? operatorResult3.transformValue() : nestedValue;
508
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
509
+ values.push(...transformedValue);
510
+ } else {
511
+ values.push(transformedValue);
512
+ }
513
+ }
514
+ return operatorResult3.sql;
515
+ }).join(" AND ");
516
+ return `NOT (${nestedConditions})`;
517
+ }
518
+ if (!FILTER_OPERATORS[operator2]) {
519
+ throw new Error(`Invalid operator: ${operator2}`);
520
+ }
521
+ const operatorFn2 = FILTER_OPERATORS[operator2];
522
+ const operatorResult2 = operatorFn2(key, values.length + 1, operatorValue2);
523
+ if (operatorResult2.needsValue) {
524
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : operatorValue2;
525
+ if (Array.isArray(transformedValue) && operator2 === "$elemMatch") {
526
+ values.push(...transformedValue);
527
+ } else {
528
+ values.push(transformedValue);
529
+ }
530
+ }
531
+ return operatorResult2.sql;
532
+ });
533
+ return conditions2.join(" AND ");
534
+ }
535
+ const [[operator, operatorValue] = []] = entries;
447
536
  if (operator === "$not") {
448
- const entries = Object.entries(operatorValue);
449
- const conditions2 = entries.map(([nestedOp, nestedValue]) => {
537
+ const nestedEntries = Object.entries(operatorValue);
538
+ const conditions2 = nestedEntries.map(([nestedOp, nestedValue]) => {
450
539
  if (!FILTER_OPERATORS[nestedOp]) {
451
540
  throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
452
541
  }
453
542
  const operatorFn2 = FILTER_OPERATORS[nestedOp];
454
543
  const operatorResult2 = operatorFn2(key, values.length + 1, nestedValue);
455
544
  if (operatorResult2.needsValue) {
456
- values.push(nestedValue);
545
+ const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : nestedValue;
546
+ if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
547
+ values.push(...transformedValue);
548
+ } else {
549
+ values.push(transformedValue);
550
+ }
457
551
  }
458
552
  return operatorResult2.sql;
459
553
  }).join(" AND ");
@@ -544,8 +638,8 @@ var PgVector = class extends vector.MastraVector {
544
638
  } else if (isCloudSqlConfig(config)) {
545
639
  poolConfig = {
546
640
  ...config,
547
- max: config.max ?? 20,
548
- idleTimeoutMillis: config.idleTimeoutMillis ?? 3e4,
641
+ max: config.pgPoolOptions?.max ?? 20,
642
+ idleTimeoutMillis: config.pgPoolOptions?.idleTimeoutMillis ?? 3e4,
549
643
  connectionTimeoutMillis: 2e3,
550
644
  ...config.pgPoolOptions
551
645
  };
@@ -713,9 +807,7 @@ var PgVector = class extends vector.MastraVector {
713
807
  probes
714
808
  }) {
715
809
  try {
716
- if (!Number.isInteger(topK) || topK <= 0) {
717
- throw new Error("topK must be a positive integer");
718
- }
810
+ vector.validateTopK("PG", topK);
719
811
  if (!Array.isArray(queryVector) || !queryVector.every((x) => typeof x === "number" && Number.isFinite(x))) {
720
812
  throw new Error("queryVector must be an array of finite numbers");
721
813
  }
@@ -800,6 +892,7 @@ var PgVector = class extends vector.MastraVector {
800
892
  ids,
801
893
  deleteFilter
802
894
  }) {
895
+ vector.validateUpsertInput("PG", vectors, metadata, ids);
803
896
  const { tableName } = this.getTableName(indexName);
804
897
  const client = await this.pool.connect();
805
898
  try {
@@ -1687,6 +1780,132 @@ var PgVector = class extends vector.MastraVector {
1687
1780
  }
1688
1781
  }
1689
1782
  };
1783
+
1784
+ // src/storage/client.ts
1785
+ function truncateQuery(query, maxLength = 100) {
1786
+ const normalized = query.replace(/\s+/g, " ").trim();
1787
+ if (normalized.length <= maxLength) {
1788
+ return normalized;
1789
+ }
1790
+ return normalized.slice(0, maxLength) + "...";
1791
+ }
1792
+ var PoolAdapter = class {
1793
+ constructor($pool) {
1794
+ this.$pool = $pool;
1795
+ }
1796
+ connect() {
1797
+ return this.$pool.connect();
1798
+ }
1799
+ async none(query, values) {
1800
+ await this.$pool.query(query, values);
1801
+ return null;
1802
+ }
1803
+ async one(query, values) {
1804
+ const result = await this.$pool.query(query, values);
1805
+ if (result.rows.length === 0) {
1806
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1807
+ }
1808
+ if (result.rows.length > 1) {
1809
+ throw new Error(`Multiple rows returned when one was expected: ${truncateQuery(query)}`);
1810
+ }
1811
+ return result.rows[0];
1812
+ }
1813
+ async oneOrNone(query, values) {
1814
+ const result = await this.$pool.query(query, values);
1815
+ if (result.rows.length === 0) {
1816
+ return null;
1817
+ }
1818
+ if (result.rows.length > 1) {
1819
+ throw new Error(`Multiple rows returned when one or none was expected: ${truncateQuery(query)}`);
1820
+ }
1821
+ return result.rows[0];
1822
+ }
1823
+ async any(query, values) {
1824
+ const result = await this.$pool.query(query, values);
1825
+ return result.rows;
1826
+ }
1827
+ async manyOrNone(query, values) {
1828
+ return this.any(query, values);
1829
+ }
1830
+ async many(query, values) {
1831
+ const result = await this.$pool.query(query, values);
1832
+ if (result.rows.length === 0) {
1833
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1834
+ }
1835
+ return result.rows;
1836
+ }
1837
+ async query(query, values) {
1838
+ return this.$pool.query(query, values);
1839
+ }
1840
+ async tx(callback) {
1841
+ const client = await this.$pool.connect();
1842
+ try {
1843
+ await client.query("BEGIN");
1844
+ const txClient = new TransactionClient(client);
1845
+ const result = await callback(txClient);
1846
+ await client.query("COMMIT");
1847
+ return result;
1848
+ } catch (error) {
1849
+ try {
1850
+ await client.query("ROLLBACK");
1851
+ } catch (rollbackError) {
1852
+ console.error("Transaction rollback failed:", rollbackError);
1853
+ }
1854
+ throw error;
1855
+ } finally {
1856
+ client.release();
1857
+ }
1858
+ }
1859
+ };
1860
+ var TransactionClient = class {
1861
+ constructor(client) {
1862
+ this.client = client;
1863
+ }
1864
+ async none(query, values) {
1865
+ await this.client.query(query, values);
1866
+ return null;
1867
+ }
1868
+ async one(query, values) {
1869
+ const result = await this.client.query(query, values);
1870
+ if (result.rows.length === 0) {
1871
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1872
+ }
1873
+ if (result.rows.length > 1) {
1874
+ throw new Error(`Multiple rows returned when one was expected: ${truncateQuery(query)}`);
1875
+ }
1876
+ return result.rows[0];
1877
+ }
1878
+ async oneOrNone(query, values) {
1879
+ const result = await this.client.query(query, values);
1880
+ if (result.rows.length === 0) {
1881
+ return null;
1882
+ }
1883
+ if (result.rows.length > 1) {
1884
+ throw new Error(`Multiple rows returned when one or none was expected: ${truncateQuery(query)}`);
1885
+ }
1886
+ return result.rows[0];
1887
+ }
1888
+ async any(query, values) {
1889
+ const result = await this.client.query(query, values);
1890
+ return result.rows;
1891
+ }
1892
+ async manyOrNone(query, values) {
1893
+ return this.any(query, values);
1894
+ }
1895
+ async many(query, values) {
1896
+ const result = await this.client.query(query, values);
1897
+ if (result.rows.length === 0) {
1898
+ throw new Error(`No data returned from query: ${truncateQuery(query)}`);
1899
+ }
1900
+ return result.rows;
1901
+ }
1902
+ async query(query, values) {
1903
+ return this.client.query(query, values);
1904
+ }
1905
+ async batch(promises) {
1906
+ return Promise.all(promises);
1907
+ }
1908
+ };
1690
1909
  function resolvePgConfig(config) {
1691
1910
  if ("client" in config) {
1692
1911
  return {
@@ -1696,10 +1915,32 @@ function resolvePgConfig(config) {
1696
1915
  indexes: config.indexes
1697
1916
  };
1698
1917
  }
1699
- const pgp = pgPromise__default.default();
1700
- const client = pgp(config);
1918
+ if ("pool" in config) {
1919
+ return {
1920
+ client: new PoolAdapter(config.pool),
1921
+ schemaName: config.schemaName,
1922
+ skipDefaultIndexes: config.skipDefaultIndexes,
1923
+ indexes: config.indexes
1924
+ };
1925
+ }
1926
+ let pool;
1927
+ if ("connectionString" in config) {
1928
+ pool = new pg.Pool({
1929
+ connectionString: config.connectionString,
1930
+ ssl: config.ssl
1931
+ });
1932
+ } else {
1933
+ pool = new pg.Pool({
1934
+ host: config.host,
1935
+ port: config.port,
1936
+ database: config.database,
1937
+ user: config.user,
1938
+ password: config.password,
1939
+ ssl: config.ssl
1940
+ });
1941
+ }
1701
1942
  return {
1702
- client,
1943
+ client: new PoolAdapter(pool),
1703
1944
  schemaName: config.schemaName,
1704
1945
  skipDefaultIndexes: config.skipDefaultIndexes,
1705
1946
  indexes: config.indexes
@@ -1714,6 +1955,91 @@ function getTableName({ indexName, schemaName }) {
1714
1955
  const quotedSchemaName = schemaName;
1715
1956
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
1716
1957
  }
1958
+ function mapToSqlType(type) {
1959
+ switch (type) {
1960
+ case "uuid":
1961
+ return "UUID";
1962
+ case "boolean":
1963
+ return "BOOLEAN";
1964
+ default:
1965
+ return storage.getSqlType(type);
1966
+ }
1967
+ }
1968
+ function generateTableSQL({
1969
+ tableName,
1970
+ schema,
1971
+ schemaName,
1972
+ includeAllConstraints = false
1973
+ }) {
1974
+ const timeZColumns = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => {
1975
+ const parsedName = utils.parseSqlIdentifier(name, "column name");
1976
+ return `"${parsedName}Z" TIMESTAMPTZ DEFAULT NOW()`;
1977
+ });
1978
+ const columns = Object.entries(schema).map(([name, def]) => {
1979
+ const parsedName = utils.parseSqlIdentifier(name, "column name");
1980
+ const constraints = [];
1981
+ if (def.primaryKey) constraints.push("PRIMARY KEY");
1982
+ if (!def.nullable) constraints.push("NOT NULL");
1983
+ return `"${parsedName}" ${mapToSqlType(def.type)} ${constraints.join(" ")}`;
1984
+ });
1985
+ const finalColumns = [...columns, ...timeZColumns].join(",\n");
1986
+ const parsedSchemaName = schemaName ? utils.parseSqlIdentifier(schemaName, "schema name") : "";
1987
+ const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
1988
+ const quotedSchemaName = getSchemaName(schemaName);
1989
+ const sql = `
1990
+ CREATE TABLE IF NOT EXISTS ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })} (
1991
+ ${finalColumns}
1992
+ );
1993
+ ${tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? `
1994
+ DO $$ BEGIN
1995
+ IF NOT EXISTS (
1996
+ SELECT 1 FROM pg_constraint WHERE conname = lower('${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key')
1997
+ ) AND NOT EXISTS (
1998
+ SELECT 1 FROM pg_indexes WHERE indexname = lower('${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key')
1999
+ ) THEN
2000
+ ALTER TABLE ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })}
2001
+ ADD CONSTRAINT ${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key
2002
+ UNIQUE (workflow_name, run_id);
2003
+ END IF;
2004
+ END $$;
2005
+ ` : ""}
2006
+ ${// For spans table: Include PRIMARY KEY in exports, but not in runtime (handled after deduplication)
2007
+ tableName === storage.TABLE_SPANS && includeAllConstraints ? `
2008
+ DO $$ BEGIN
2009
+ IF NOT EXISTS (
2010
+ SELECT 1 FROM pg_constraint WHERE conname = lower('${constraintPrefix}mastra_ai_spans_traceid_spanid_pk')
2011
+ ) THEN
2012
+ ALTER TABLE ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })}
2013
+ ADD CONSTRAINT ${constraintPrefix}mastra_ai_spans_traceid_spanid_pk
2014
+ PRIMARY KEY ("traceId", "spanId");
2015
+ END IF;
2016
+ END $$;
2017
+ ` : ""}
2018
+ `;
2019
+ return sql;
2020
+ }
2021
+ function exportSchemas(schemaName) {
2022
+ const statements = [];
2023
+ if (schemaName) {
2024
+ const quotedSchemaName = getSchemaName(schemaName);
2025
+ statements.push(`-- Create schema if it doesn't exist`);
2026
+ statements.push(`CREATE SCHEMA IF NOT EXISTS ${quotedSchemaName};`);
2027
+ statements.push("");
2028
+ }
2029
+ for (const [tableName, schema] of Object.entries(storage.TABLE_SCHEMAS)) {
2030
+ statements.push(`-- Table: ${tableName}`);
2031
+ const sql = generateTableSQL({
2032
+ tableName,
2033
+ schema,
2034
+ schemaName,
2035
+ includeAllConstraints: true
2036
+ // Include all constraints for exports/documentation
2037
+ });
2038
+ statements.push(sql.trim());
2039
+ statements.push("");
2040
+ }
2041
+ return statements.join("\n");
2042
+ }
1717
2043
  var schemaSetupRegistry = /* @__PURE__ */ new Map();
1718
2044
  var PgDB = class extends base.MastraBase {
1719
2045
  client;
@@ -1831,16 +2157,6 @@ var PgDB = class extends base.MastraBase {
1831
2157
  }
1832
2158
  await registryEntry.promise;
1833
2159
  }
1834
- getSqlType(type) {
1835
- switch (type) {
1836
- case "uuid":
1837
- return "UUID";
1838
- case "boolean":
1839
- return "BOOLEAN";
1840
- default:
1841
- return storage.getSqlType(type);
1842
- }
1843
- }
1844
2160
  getDefaultValue(type) {
1845
2161
  switch (type) {
1846
2162
  case "timestamp":
@@ -1858,10 +2174,27 @@ var PgDB = class extends base.MastraBase {
1858
2174
  const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1859
2175
  const values = this.prepareValuesForInsert(record, tableName);
1860
2176
  const placeholders = values.map((_, i) => `$${i + 1}`).join(", ");
1861
- await this.client.none(
1862
- `INSERT INTO ${getTableName({ indexName: tableName, schemaName })} (${columns.map((c) => `"${c}"`).join(", ")}) VALUES (${placeholders})`,
1863
- values
1864
- );
2177
+ const fullTableName = getTableName({ indexName: tableName, schemaName });
2178
+ const columnList = columns.map((c) => `"${c}"`).join(", ");
2179
+ if (tableName === storage.TABLE_SPANS) {
2180
+ const updateColumns = columns.filter((c) => c !== "traceId" && c !== "spanId");
2181
+ if (updateColumns.length > 0) {
2182
+ const updateClause = updateColumns.map((c) => `"${c}" = EXCLUDED."${c}"`).join(", ");
2183
+ await this.client.none(
2184
+ `INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})
2185
+ ON CONFLICT ("traceId", "spanId") DO UPDATE SET ${updateClause}`,
2186
+ values
2187
+ );
2188
+ } else {
2189
+ await this.client.none(
2190
+ `INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})
2191
+ ON CONFLICT ("traceId", "spanId") DO NOTHING`,
2192
+ values
2193
+ );
2194
+ }
2195
+ } else {
2196
+ await this.client.none(`INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})`, values);
2197
+ }
1865
2198
  } catch (error$1) {
1866
2199
  throw new error.MastraError(
1867
2200
  {
@@ -1885,7 +2218,7 @@ var PgDB = class extends base.MastraBase {
1885
2218
  SELECT 1 FROM information_schema.tables
1886
2219
  WHERE table_schema = $1 AND table_name = $2
1887
2220
  )`,
1888
- [this.schemaName || "mastra", tableName]
2221
+ [this.schemaName || "public", tableName]
1889
2222
  );
1890
2223
  if (tableExists?.exists) {
1891
2224
  await this.client.none(`TRUNCATE TABLE ${tableNameWithSchema} CASCADE`);
@@ -1910,52 +2243,10 @@ var PgDB = class extends base.MastraBase {
1910
2243
  }) {
1911
2244
  try {
1912
2245
  const timeZColumnNames = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => name);
1913
- const timeZColumns = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => {
1914
- const parsedName = utils.parseSqlIdentifier(name, "column name");
1915
- return `"${parsedName}Z" TIMESTAMPTZ DEFAULT NOW()`;
1916
- });
1917
- const columns = Object.entries(schema).map(([name, def]) => {
1918
- const parsedName = utils.parseSqlIdentifier(name, "column name");
1919
- const constraints = [];
1920
- if (def.primaryKey) constraints.push("PRIMARY KEY");
1921
- if (!def.nullable) constraints.push("NOT NULL");
1922
- return `"${parsedName}" ${this.getSqlType(def.type)} ${constraints.join(" ")}`;
1923
- });
1924
2246
  if (this.schemaName) {
1925
2247
  await this.setupSchema();
1926
2248
  }
1927
- const finalColumns = [...columns, ...timeZColumns].join(",\n");
1928
- const constraintPrefix = this.schemaName ? `${this.schemaName}_` : "";
1929
- const schemaName = getSchemaName(this.schemaName);
1930
- const sql = `
1931
- CREATE TABLE IF NOT EXISTS ${getTableName({ indexName: tableName, schemaName })} (
1932
- ${finalColumns}
1933
- );
1934
- ${tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? `
1935
- DO $$ BEGIN
1936
- IF NOT EXISTS (
1937
- SELECT 1 FROM pg_constraint WHERE conname = '${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key'
1938
- ) AND NOT EXISTS (
1939
- SELECT 1 FROM pg_indexes WHERE indexname = '${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key'
1940
- ) THEN
1941
- ALTER TABLE ${getTableName({ indexName: tableName, schemaName })}
1942
- ADD CONSTRAINT ${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key
1943
- UNIQUE (workflow_name, run_id);
1944
- END IF;
1945
- END $$;
1946
- ` : ""}
1947
- ${tableName === storage.TABLE_SPANS ? `
1948
- DO $$ BEGIN
1949
- IF NOT EXISTS (
1950
- SELECT 1 FROM pg_constraint WHERE conname = '${constraintPrefix}mastra_ai_spans_traceid_spanid_pk'
1951
- ) THEN
1952
- ALTER TABLE ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })}
1953
- ADD CONSTRAINT ${constraintPrefix}mastra_ai_spans_traceid_spanid_pk
1954
- PRIMARY KEY ("traceId", "spanId");
1955
- END IF;
1956
- END $$;
1957
- ` : ""}
1958
- `;
2249
+ const sql = generateTableSQL({ tableName, schema, schemaName: this.schemaName });
1959
2250
  await this.client.none(sql);
1960
2251
  await this.alterTable({
1961
2252
  tableName,
@@ -1965,8 +2256,46 @@ var PgDB = class extends base.MastraBase {
1965
2256
  if (tableName === storage.TABLE_SPANS) {
1966
2257
  await this.setupTimestampTriggers(tableName);
1967
2258
  await this.migrateSpansTable();
2259
+ const pkExists = await this.spansPrimaryKeyExists();
2260
+ if (!pkExists) {
2261
+ const duplicateInfo = await this.checkForDuplicateSpans();
2262
+ if (duplicateInfo.hasDuplicates) {
2263
+ const errorMessage = `
2264
+ ===========================================================================
2265
+ MIGRATION REQUIRED: Duplicate spans detected in ${duplicateInfo.tableName}
2266
+ ===========================================================================
2267
+
2268
+ Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations.
2269
+
2270
+ The spans table requires a unique constraint on (traceId, spanId), but your
2271
+ database contains duplicate entries that must be resolved first.
2272
+
2273
+ To fix this, run the manual migration command:
2274
+
2275
+ npx mastra migrate
2276
+
2277
+ This command will:
2278
+ 1. Remove duplicate spans (keeping the most complete/recent version)
2279
+ 2. Add the required unique constraint
2280
+
2281
+ Note: This migration may take some time for large tables.
2282
+ ===========================================================================
2283
+ `;
2284
+ throw new error.MastraError({
2285
+ id: storage.createStorageErrorId("PG", "MIGRATION_REQUIRED", "DUPLICATE_SPANS"),
2286
+ domain: error.ErrorDomain.STORAGE,
2287
+ category: error.ErrorCategory.USER,
2288
+ text: errorMessage
2289
+ });
2290
+ } else {
2291
+ await this.addSpansPrimaryKey();
2292
+ }
2293
+ }
1968
2294
  }
1969
2295
  } catch (error$1) {
2296
+ if (error$1 instanceof error.MastraError) {
2297
+ throw error$1;
2298
+ }
1970
2299
  throw new error.MastraError(
1971
2300
  {
1972
2301
  id: storage.createStorageErrorId("PG", "CREATE_TABLE", "FAILED"),
@@ -2029,12 +2358,29 @@ var PgDB = class extends base.MastraBase {
2029
2358
  const columnExists = await this.hasColumn(storage.TABLE_SPANS, columnName);
2030
2359
  if (!columnExists) {
2031
2360
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
2032
- const sqlType = this.getSqlType(columnDef.type);
2361
+ const sqlType = mapToSqlType(columnDef.type);
2033
2362
  const nullable = columnDef.nullable ? "" : "NOT NULL";
2034
2363
  const defaultValue = !columnDef.nullable ? this.getDefaultValue(columnDef.type) : "";
2035
2364
  const alterSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}" ${sqlType} ${nullable} ${defaultValue}`.trim();
2036
2365
  await this.client.none(alterSql);
2037
2366
  this.logger?.debug?.(`Added column '${columnName}' to ${fullTableName}`);
2367
+ if (sqlType === "TIMESTAMP") {
2368
+ const timestampZSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}Z" TIMESTAMPTZ DEFAULT NOW()`.trim();
2369
+ await this.client.none(timestampZSql);
2370
+ this.logger?.debug?.(`Added timezone column '${columnName}Z' to ${fullTableName}`);
2371
+ }
2372
+ }
2373
+ }
2374
+ for (const [columnName, columnDef] of Object.entries(schema)) {
2375
+ if (columnDef.type === "timestamp") {
2376
+ const tzColumnName = `${columnName}Z`;
2377
+ const tzColumnExists = await this.hasColumn(storage.TABLE_SPANS, tzColumnName);
2378
+ if (!tzColumnExists) {
2379
+ const parsedTzColumnName = utils.parseSqlIdentifier(tzColumnName, "column name");
2380
+ const timestampZSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedTzColumnName}" TIMESTAMPTZ DEFAULT NOW()`.trim();
2381
+ await this.client.none(timestampZSql);
2382
+ this.logger?.debug?.(`Added timezone column '${tzColumnName}' to ${fullTableName}`);
2383
+ }
2038
2384
  }
2039
2385
  }
2040
2386
  this.logger?.info?.(`Migration completed for ${fullTableName}`);
@@ -2042,6 +2388,224 @@ var PgDB = class extends base.MastraBase {
2042
2388
  this.logger?.warn?.(`Failed to migrate spans table ${fullTableName}:`, error);
2043
2389
  }
2044
2390
  }
2391
+ /**
2392
+ * Deduplicates spans in the mastra_ai_spans table before adding the PRIMARY KEY constraint.
2393
+ * Keeps spans based on priority: completed (endedAt NOT NULL) > most recent updatedAt > most recent createdAt.
2394
+ *
2395
+ * Note: This prioritizes migration completion over perfect data preservation.
2396
+ * Old trace data may be lost, which is acceptable for this use case.
2397
+ */
2398
+ async deduplicateSpans() {
2399
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2400
+ try {
2401
+ const duplicateCheck = await this.client.oneOrNone(`
2402
+ SELECT EXISTS (
2403
+ SELECT 1
2404
+ FROM ${fullTableName}
2405
+ GROUP BY "traceId", "spanId"
2406
+ HAVING COUNT(*) > 1
2407
+ LIMIT 1
2408
+ ) as has_duplicates
2409
+ `);
2410
+ if (!duplicateCheck?.has_duplicates) {
2411
+ this.logger?.debug?.(`No duplicate spans found in ${fullTableName}`);
2412
+ return;
2413
+ }
2414
+ this.logger?.info?.(`Duplicate spans detected in ${fullTableName}, starting deduplication...`);
2415
+ const result = await this.client.query(`
2416
+ DELETE FROM ${fullTableName} t1
2417
+ USING ${fullTableName} t2
2418
+ WHERE t1."traceId" = t2."traceId"
2419
+ AND t1."spanId" = t2."spanId"
2420
+ AND (
2421
+ -- Keep completed spans over incomplete
2422
+ (t1."endedAt" IS NULL AND t2."endedAt" IS NOT NULL)
2423
+ OR
2424
+ -- If both have same completion status, keep more recent updatedAt
2425
+ (
2426
+ (t1."endedAt" IS NULL) = (t2."endedAt" IS NULL)
2427
+ AND (
2428
+ (t1."updatedAt" < t2."updatedAt")
2429
+ OR (t1."updatedAt" IS NULL AND t2."updatedAt" IS NOT NULL)
2430
+ OR
2431
+ -- If updatedAt is the same, keep more recent createdAt
2432
+ (
2433
+ (t1."updatedAt" = t2."updatedAt" OR (t1."updatedAt" IS NULL AND t2."updatedAt" IS NULL))
2434
+ AND (
2435
+ (t1."createdAt" < t2."createdAt")
2436
+ OR (t1."createdAt" IS NULL AND t2."createdAt" IS NOT NULL)
2437
+ OR
2438
+ -- If all else equal, use ctid as tiebreaker
2439
+ (
2440
+ (t1."createdAt" = t2."createdAt" OR (t1."createdAt" IS NULL AND t2."createdAt" IS NULL))
2441
+ AND t1.ctid < t2.ctid
2442
+ )
2443
+ )
2444
+ )
2445
+ )
2446
+ )
2447
+ )
2448
+ `);
2449
+ this.logger?.info?.(
2450
+ `Deduplication complete: removed ${result.rowCount ?? 0} duplicate spans from ${fullTableName}`
2451
+ );
2452
+ } catch (error$1) {
2453
+ throw new error.MastraError(
2454
+ {
2455
+ id: storage.createStorageErrorId("PG", "DEDUPLICATE_SPANS", "FAILED"),
2456
+ domain: error.ErrorDomain.STORAGE,
2457
+ category: error.ErrorCategory.THIRD_PARTY,
2458
+ details: {
2459
+ tableName: storage.TABLE_SPANS
2460
+ }
2461
+ },
2462
+ error$1
2463
+ );
2464
+ }
2465
+ }
2466
+ /**
2467
+ * Checks for duplicate (traceId, spanId) combinations in the spans table.
2468
+ * Returns information about duplicates for logging/CLI purposes.
2469
+ */
2470
+ async checkForDuplicateSpans() {
2471
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2472
+ try {
2473
+ const result = await this.client.oneOrNone(`
2474
+ SELECT COUNT(*) as duplicate_count
2475
+ FROM (
2476
+ SELECT "traceId", "spanId"
2477
+ FROM ${fullTableName}
2478
+ GROUP BY "traceId", "spanId"
2479
+ HAVING COUNT(*) > 1
2480
+ ) duplicates
2481
+ `);
2482
+ const duplicateCount = parseInt(result?.duplicate_count ?? "0", 10);
2483
+ return {
2484
+ hasDuplicates: duplicateCount > 0,
2485
+ duplicateCount,
2486
+ tableName: fullTableName
2487
+ };
2488
+ } catch (error) {
2489
+ this.logger?.debug?.(`Could not check for duplicates: ${error}`);
2490
+ return { hasDuplicates: false, duplicateCount: 0, tableName: fullTableName };
2491
+ }
2492
+ }
2493
+ /**
2494
+ * Checks if the PRIMARY KEY constraint on (traceId, spanId) already exists on the spans table.
2495
+ * Used to skip deduplication when the constraint already exists (migration already complete).
2496
+ */
2497
+ async spansPrimaryKeyExists() {
2498
+ const parsedSchemaName = this.schemaName ? utils.parseSqlIdentifier(this.schemaName, "schema name") : "";
2499
+ const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
2500
+ const constraintName = `${constraintPrefix}mastra_ai_spans_traceid_spanid_pk`;
2501
+ const result = await this.client.oneOrNone(
2502
+ `SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = $1) as exists`,
2503
+ [constraintName]
2504
+ );
2505
+ return result?.exists ?? false;
2506
+ }
2507
+ /**
2508
+ * Adds the PRIMARY KEY constraint on (traceId, spanId) to the spans table.
2509
+ * Should be called AFTER deduplication to ensure no duplicate key violations.
2510
+ */
2511
+ async addSpansPrimaryKey() {
2512
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2513
+ const parsedSchemaName = this.schemaName ? utils.parseSqlIdentifier(this.schemaName, "schema name") : "";
2514
+ const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
2515
+ const constraintName = `${constraintPrefix}mastra_ai_spans_traceid_spanid_pk`;
2516
+ try {
2517
+ const constraintExists = await this.client.oneOrNone(
2518
+ `
2519
+ SELECT EXISTS (
2520
+ SELECT 1 FROM pg_constraint WHERE conname = $1
2521
+ ) as exists
2522
+ `,
2523
+ [constraintName]
2524
+ );
2525
+ if (constraintExists?.exists) {
2526
+ this.logger?.debug?.(`PRIMARY KEY constraint ${constraintName} already exists on ${fullTableName}`);
2527
+ return;
2528
+ }
2529
+ await this.client.none(`
2530
+ ALTER TABLE ${fullTableName}
2531
+ ADD CONSTRAINT ${constraintName}
2532
+ PRIMARY KEY ("traceId", "spanId")
2533
+ `);
2534
+ this.logger?.info?.(`Added PRIMARY KEY constraint ${constraintName} to ${fullTableName}`);
2535
+ } catch (error$1) {
2536
+ throw new error.MastraError(
2537
+ {
2538
+ id: storage.createStorageErrorId("PG", "ADD_SPANS_PRIMARY_KEY", "FAILED"),
2539
+ domain: error.ErrorDomain.STORAGE,
2540
+ category: error.ErrorCategory.THIRD_PARTY,
2541
+ details: {
2542
+ tableName: storage.TABLE_SPANS,
2543
+ constraintName
2544
+ }
2545
+ },
2546
+ error$1
2547
+ );
2548
+ }
2549
+ }
2550
+ /**
2551
+ * Manually run the spans migration to deduplicate and add the unique constraint.
2552
+ * This is intended to be called from the CLI when duplicates are detected.
2553
+ *
2554
+ * @returns Migration result with status and details
2555
+ */
2556
+ async migrateSpans() {
2557
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2558
+ const pkExists = await this.spansPrimaryKeyExists();
2559
+ if (pkExists) {
2560
+ return {
2561
+ success: true,
2562
+ alreadyMigrated: true,
2563
+ duplicatesRemoved: 0,
2564
+ message: `Migration already complete. PRIMARY KEY constraint exists on ${fullTableName}.`
2565
+ };
2566
+ }
2567
+ const duplicateInfo = await this.checkForDuplicateSpans();
2568
+ if (duplicateInfo.hasDuplicates) {
2569
+ this.logger?.info?.(
2570
+ `Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations. Starting deduplication...`
2571
+ );
2572
+ await this.deduplicateSpans();
2573
+ } else {
2574
+ this.logger?.info?.(`No duplicate spans found.`);
2575
+ }
2576
+ await this.addSpansPrimaryKey();
2577
+ return {
2578
+ success: true,
2579
+ alreadyMigrated: false,
2580
+ duplicatesRemoved: duplicateInfo.duplicateCount,
2581
+ message: duplicateInfo.hasDuplicates ? `Migration complete. Removed duplicates and added PRIMARY KEY constraint to ${fullTableName}.` : `Migration complete. Added PRIMARY KEY constraint to ${fullTableName}.`
2582
+ };
2583
+ }
2584
+ /**
2585
+ * Check migration status for the spans table.
2586
+ * Returns information about whether migration is needed.
2587
+ */
2588
+ async checkSpansMigrationStatus() {
2589
+ const fullTableName = getTableName({ indexName: storage.TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
2590
+ const pkExists = await this.spansPrimaryKeyExists();
2591
+ if (pkExists) {
2592
+ return {
2593
+ needsMigration: false,
2594
+ hasDuplicates: false,
2595
+ duplicateCount: 0,
2596
+ constraintExists: true,
2597
+ tableName: fullTableName
2598
+ };
2599
+ }
2600
+ const duplicateInfo = await this.checkForDuplicateSpans();
2601
+ return {
2602
+ needsMigration: true,
2603
+ hasDuplicates: duplicateInfo.hasDuplicates,
2604
+ duplicateCount: duplicateInfo.duplicateCount,
2605
+ constraintExists: false,
2606
+ tableName: fullTableName
2607
+ };
2608
+ }
2045
2609
  /**
2046
2610
  * Alters table schema to add columns if they don't exist
2047
2611
  * @param tableName Name of the table
@@ -2059,7 +2623,7 @@ var PgDB = class extends base.MastraBase {
2059
2623
  if (schema[columnName]) {
2060
2624
  const columnDef = schema[columnName];
2061
2625
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
2062
- const sqlType = this.getSqlType(columnDef.type);
2626
+ const sqlType = mapToSqlType(columnDef.type);
2063
2627
  const nullable = columnDef.nullable ? "" : "NOT NULL";
2064
2628
  const defaultValue = !columnDef.nullable ? this.getDefaultValue(columnDef.type) : "";
2065
2629
  const alterSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}" ${sqlType} ${nullable} ${defaultValue}`.trim();
@@ -2379,9 +2943,9 @@ var PgDB = class extends base.MastraBase {
2379
2943
  size: result.size || "0",
2380
2944
  definition: result.definition || "",
2381
2945
  method: result.method || "btree",
2382
- scans: parseInt(result.scans) || 0,
2383
- tuples_read: parseInt(result.tuples_read) || 0,
2384
- tuples_fetched: parseInt(result.tuples_fetched) || 0
2946
+ scans: parseInt(String(result.scans)) || 0,
2947
+ tuples_read: parseInt(String(result.tuples_read)) || 0,
2948
+ tuples_fetched: parseInt(String(result.tuples_fetched)) || 0
2385
2949
  };
2386
2950
  } catch (error$1) {
2387
2951
  throw new error.MastraError(
@@ -2553,7 +3117,7 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2553
3117
  #skipDefaultIndexes;
2554
3118
  #indexes;
2555
3119
  /** Tables managed by this domain */
2556
- static MANAGED_TABLES = [storage.TABLE_AGENTS];
3120
+ static MANAGED_TABLES = [storage.TABLE_AGENTS, storage.TABLE_AGENT_VERSIONS];
2557
3121
  constructor(config) {
2558
3122
  super();
2559
3123
  const { client, schemaName, skipDefaultIndexes, indexes } = resolvePgConfig(config);
@@ -2580,6 +3144,7 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2580
3144
  }
2581
3145
  async init() {
2582
3146
  await this.#db.createTable({ tableName: storage.TABLE_AGENTS, schema: storage.TABLE_SCHEMAS[storage.TABLE_AGENTS] });
3147
+ await this.#db.createTable({ tableName: storage.TABLE_AGENT_VERSIONS, schema: storage.TABLE_SCHEMAS[storage.TABLE_AGENT_VERSIONS] });
2583
3148
  await this.createDefaultIndexes();
2584
3149
  await this.createCustomIndexes();
2585
3150
  }
@@ -2599,6 +3164,7 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2599
3164
  }
2600
3165
  }
2601
3166
  async dangerouslyClearAll() {
3167
+ await this.#db.clearTable({ tableName: storage.TABLE_AGENT_VERSIONS });
2602
3168
  await this.#db.clearTable({ tableName: storage.TABLE_AGENTS });
2603
3169
  }
2604
3170
  parseJson(value, fieldName) {
@@ -2636,11 +3202,14 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2636
3202
  defaultOptions: this.parseJson(row.defaultOptions, "defaultOptions"),
2637
3203
  workflows: this.parseJson(row.workflows, "workflows"),
2638
3204
  agents: this.parseJson(row.agents, "agents"),
3205
+ integrationTools: this.parseJson(row.integrationTools, "integrationTools"),
2639
3206
  inputProcessors: this.parseJson(row.inputProcessors, "inputProcessors"),
2640
3207
  outputProcessors: this.parseJson(row.outputProcessors, "outputProcessors"),
2641
3208
  memory: this.parseJson(row.memory, "memory"),
2642
3209
  scorers: this.parseJson(row.scorers, "scorers"),
2643
3210
  metadata: this.parseJson(row.metadata, "metadata"),
3211
+ ownerId: row.ownerId,
3212
+ activeVersionId: row.activeVersionId,
2644
3213
  createdAt: row.createdAtZ || row.createdAt,
2645
3214
  updatedAt: row.updatedAtZ || row.updatedAt
2646
3215
  };
@@ -2672,10 +3241,12 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2672
3241
  const nowIso = now.toISOString();
2673
3242
  await this.#db.client.none(
2674
3243
  `INSERT INTO ${tableName} (
2675
- id, name, description, instructions, model, tools,
2676
- "defaultOptions", workflows, agents, "inputProcessors", "outputProcessors", memory, scorers, metadata,
3244
+ id, name, description, instructions, model, tools,
3245
+ "defaultOptions", workflows, agents, "integrationTools",
3246
+ "inputProcessors", "outputProcessors", memory, scorers, metadata,
3247
+ "ownerId", "activeVersionId",
2677
3248
  "createdAt", "createdAtZ", "updatedAt", "updatedAtZ"
2678
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18)`,
3249
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21)`,
2679
3250
  [
2680
3251
  agent.id,
2681
3252
  agent.name,
@@ -2686,11 +3257,14 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2686
3257
  agent.defaultOptions ? JSON.stringify(agent.defaultOptions) : null,
2687
3258
  agent.workflows ? JSON.stringify(agent.workflows) : null,
2688
3259
  agent.agents ? JSON.stringify(agent.agents) : null,
3260
+ agent.integrationTools ? JSON.stringify(agent.integrationTools) : null,
2689
3261
  agent.inputProcessors ? JSON.stringify(agent.inputProcessors) : null,
2690
3262
  agent.outputProcessors ? JSON.stringify(agent.outputProcessors) : null,
2691
3263
  agent.memory ? JSON.stringify(agent.memory) : null,
2692
3264
  agent.scorers ? JSON.stringify(agent.scorers) : null,
2693
3265
  agent.metadata ? JSON.stringify(agent.metadata) : null,
3266
+ agent.ownerId ?? null,
3267
+ agent.activeVersionId ?? null,
2694
3268
  nowIso,
2695
3269
  nowIso,
2696
3270
  nowIso,
@@ -2778,6 +3352,18 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2778
3352
  setClauses.push(`scorers = $${paramIndex++}`);
2779
3353
  values.push(JSON.stringify(updates.scorers));
2780
3354
  }
3355
+ if (updates.integrationTools !== void 0) {
3356
+ setClauses.push(`"integrationTools" = $${paramIndex++}`);
3357
+ values.push(JSON.stringify(updates.integrationTools));
3358
+ }
3359
+ if (updates.ownerId !== void 0) {
3360
+ setClauses.push(`"ownerId" = $${paramIndex++}`);
3361
+ values.push(updates.ownerId);
3362
+ }
3363
+ if (updates.activeVersionId !== void 0) {
3364
+ setClauses.push(`"activeVersionId" = $${paramIndex++}`);
3365
+ values.push(updates.activeVersionId);
3366
+ }
2781
3367
  if (updates.metadata !== void 0) {
2782
3368
  const mergedMetadata = { ...existingAgent.metadata, ...updates.metadata };
2783
3369
  setClauses.push(`metadata = $${paramIndex++}`);
@@ -2800,50 +3386,209 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2800
3386
  throw new error.MastraError({
2801
3387
  id: storage.createStorageErrorId("PG", "UPDATE_AGENT", "NOT_FOUND_AFTER_UPDATE"),
2802
3388
  domain: error.ErrorDomain.STORAGE,
2803
- category: error.ErrorCategory.SYSTEM,
2804
- text: `Agent ${id} not found after update`,
2805
- details: { agentId: id }
2806
- });
3389
+ category: error.ErrorCategory.SYSTEM,
3390
+ text: `Agent ${id} not found after update`,
3391
+ details: { agentId: id }
3392
+ });
3393
+ }
3394
+ return updatedAgent;
3395
+ } catch (error$1) {
3396
+ if (error$1 instanceof error.MastraError) {
3397
+ throw error$1;
3398
+ }
3399
+ throw new error.MastraError(
3400
+ {
3401
+ id: storage.createStorageErrorId("PG", "UPDATE_AGENT", "FAILED"),
3402
+ domain: error.ErrorDomain.STORAGE,
3403
+ category: error.ErrorCategory.THIRD_PARTY,
3404
+ details: { agentId: id }
3405
+ },
3406
+ error$1
3407
+ );
3408
+ }
3409
+ }
3410
+ async deleteAgent({ id }) {
3411
+ try {
3412
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENTS, schemaName: getSchemaName2(this.#schema) });
3413
+ await this.deleteVersionsByAgentId(id);
3414
+ await this.#db.client.none(`DELETE FROM ${tableName} WHERE id = $1`, [id]);
3415
+ } catch (error$1) {
3416
+ throw new error.MastraError(
3417
+ {
3418
+ id: storage.createStorageErrorId("PG", "DELETE_AGENT", "FAILED"),
3419
+ domain: error.ErrorDomain.STORAGE,
3420
+ category: error.ErrorCategory.THIRD_PARTY,
3421
+ details: { agentId: id }
3422
+ },
3423
+ error$1
3424
+ );
3425
+ }
3426
+ }
3427
+ async listAgents(args) {
3428
+ const { page = 0, perPage: perPageInput, orderBy } = args || {};
3429
+ const { field, direction } = this.parseOrderBy(orderBy);
3430
+ if (page < 0) {
3431
+ throw new error.MastraError(
3432
+ {
3433
+ id: storage.createStorageErrorId("PG", "LIST_AGENTS", "INVALID_PAGE"),
3434
+ domain: error.ErrorDomain.STORAGE,
3435
+ category: error.ErrorCategory.USER,
3436
+ details: { page }
3437
+ },
3438
+ new Error("page must be >= 0")
3439
+ );
3440
+ }
3441
+ const perPage = storage.normalizePerPage(perPageInput, 100);
3442
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
3443
+ try {
3444
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENTS, schemaName: getSchemaName2(this.#schema) });
3445
+ const countResult = await this.#db.client.one(`SELECT COUNT(*) as count FROM ${tableName}`);
3446
+ const total = parseInt(countResult.count, 10);
3447
+ if (total === 0) {
3448
+ return {
3449
+ agents: [],
3450
+ total: 0,
3451
+ page,
3452
+ perPage: perPageForResponse,
3453
+ hasMore: false
3454
+ };
3455
+ }
3456
+ const limitValue = perPageInput === false ? total : perPage;
3457
+ const dataResult = await this.#db.client.manyOrNone(
3458
+ `SELECT * FROM ${tableName} ORDER BY "${field}" ${direction} LIMIT $1 OFFSET $2`,
3459
+ [limitValue, offset]
3460
+ );
3461
+ const agents = (dataResult || []).map((row) => this.parseRow(row));
3462
+ return {
3463
+ agents,
3464
+ total,
3465
+ page,
3466
+ perPage: perPageForResponse,
3467
+ hasMore: perPageInput === false ? false : offset + perPage < total
3468
+ };
3469
+ } catch (error$1) {
3470
+ throw new error.MastraError(
3471
+ {
3472
+ id: storage.createStorageErrorId("PG", "LIST_AGENTS", "FAILED"),
3473
+ domain: error.ErrorDomain.STORAGE,
3474
+ category: error.ErrorCategory.THIRD_PARTY
3475
+ },
3476
+ error$1
3477
+ );
3478
+ }
3479
+ }
3480
+ // ==========================================================================
3481
+ // Agent Version Methods
3482
+ // ==========================================================================
3483
+ async createVersion(input) {
3484
+ try {
3485
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3486
+ const now = /* @__PURE__ */ new Date();
3487
+ const nowIso = now.toISOString();
3488
+ await this.#db.client.none(
3489
+ `INSERT INTO ${tableName} (
3490
+ id, "agentId", "versionNumber", name, snapshot, "changedFields", "changeMessage", "createdAt", "createdAtZ"
3491
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)`,
3492
+ [
3493
+ input.id,
3494
+ input.agentId,
3495
+ input.versionNumber,
3496
+ input.name ?? null,
3497
+ JSON.stringify(input.snapshot),
3498
+ input.changedFields ? JSON.stringify(input.changedFields) : null,
3499
+ input.changeMessage ?? null,
3500
+ nowIso,
3501
+ nowIso
3502
+ ]
3503
+ );
3504
+ return {
3505
+ ...input,
3506
+ createdAt: now
3507
+ };
3508
+ } catch (error$1) {
3509
+ throw new error.MastraError(
3510
+ {
3511
+ id: storage.createStorageErrorId("PG", "CREATE_VERSION", "FAILED"),
3512
+ domain: error.ErrorDomain.STORAGE,
3513
+ category: error.ErrorCategory.THIRD_PARTY,
3514
+ details: { versionId: input.id, agentId: input.agentId }
3515
+ },
3516
+ error$1
3517
+ );
3518
+ }
3519
+ }
3520
+ async getVersion(id) {
3521
+ try {
3522
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3523
+ const result = await this.#db.client.oneOrNone(`SELECT * FROM ${tableName} WHERE id = $1`, [id]);
3524
+ if (!result) {
3525
+ return null;
2807
3526
  }
2808
- return updatedAgent;
3527
+ return this.parseVersionRow(result);
2809
3528
  } catch (error$1) {
2810
- if (error$1 instanceof error.MastraError) {
2811
- throw error$1;
3529
+ throw new error.MastraError(
3530
+ {
3531
+ id: storage.createStorageErrorId("PG", "GET_VERSION", "FAILED"),
3532
+ domain: error.ErrorDomain.STORAGE,
3533
+ category: error.ErrorCategory.THIRD_PARTY,
3534
+ details: { versionId: id }
3535
+ },
3536
+ error$1
3537
+ );
3538
+ }
3539
+ }
3540
+ async getVersionByNumber(agentId, versionNumber) {
3541
+ try {
3542
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3543
+ const result = await this.#db.client.oneOrNone(
3544
+ `SELECT * FROM ${tableName} WHERE "agentId" = $1 AND "versionNumber" = $2`,
3545
+ [agentId, versionNumber]
3546
+ );
3547
+ if (!result) {
3548
+ return null;
2812
3549
  }
3550
+ return this.parseVersionRow(result);
3551
+ } catch (error$1) {
2813
3552
  throw new error.MastraError(
2814
3553
  {
2815
- id: storage.createStorageErrorId("PG", "UPDATE_AGENT", "FAILED"),
3554
+ id: storage.createStorageErrorId("PG", "GET_VERSION_BY_NUMBER", "FAILED"),
2816
3555
  domain: error.ErrorDomain.STORAGE,
2817
3556
  category: error.ErrorCategory.THIRD_PARTY,
2818
- details: { agentId: id }
3557
+ details: { agentId, versionNumber }
2819
3558
  },
2820
3559
  error$1
2821
3560
  );
2822
3561
  }
2823
3562
  }
2824
- async deleteAgent({ id }) {
3563
+ async getLatestVersion(agentId) {
2825
3564
  try {
2826
- const tableName = getTableName2({ indexName: storage.TABLE_AGENTS, schemaName: getSchemaName2(this.#schema) });
2827
- await this.#db.client.none(`DELETE FROM ${tableName} WHERE id = $1`, [id]);
3565
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3566
+ const result = await this.#db.client.oneOrNone(
3567
+ `SELECT * FROM ${tableName} WHERE "agentId" = $1 ORDER BY "versionNumber" DESC LIMIT 1`,
3568
+ [agentId]
3569
+ );
3570
+ if (!result) {
3571
+ return null;
3572
+ }
3573
+ return this.parseVersionRow(result);
2828
3574
  } catch (error$1) {
2829
3575
  throw new error.MastraError(
2830
3576
  {
2831
- id: storage.createStorageErrorId("PG", "DELETE_AGENT", "FAILED"),
3577
+ id: storage.createStorageErrorId("PG", "GET_LATEST_VERSION", "FAILED"),
2832
3578
  domain: error.ErrorDomain.STORAGE,
2833
3579
  category: error.ErrorCategory.THIRD_PARTY,
2834
- details: { agentId: id }
3580
+ details: { agentId }
2835
3581
  },
2836
3582
  error$1
2837
3583
  );
2838
3584
  }
2839
3585
  }
2840
- async listAgents(args) {
2841
- const { page = 0, perPage: perPageInput, orderBy } = args || {};
2842
- const { field, direction } = this.parseOrderBy(orderBy);
3586
+ async listVersions(input) {
3587
+ const { agentId, page = 0, perPage: perPageInput, orderBy } = input;
2843
3588
  if (page < 0) {
2844
3589
  throw new error.MastraError(
2845
3590
  {
2846
- id: storage.createStorageErrorId("PG", "LIST_AGENTS", "INVALID_PAGE"),
3591
+ id: storage.createStorageErrorId("PG", "LIST_VERSIONS", "INVALID_PAGE"),
2847
3592
  domain: error.ErrorDomain.STORAGE,
2848
3593
  category: error.ErrorCategory.USER,
2849
3594
  details: { page }
@@ -2851,15 +3596,18 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2851
3596
  new Error("page must be >= 0")
2852
3597
  );
2853
3598
  }
2854
- const perPage = storage.normalizePerPage(perPageInput, 100);
3599
+ const perPage = storage.normalizePerPage(perPageInput, 20);
2855
3600
  const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2856
3601
  try {
2857
- const tableName = getTableName2({ indexName: storage.TABLE_AGENTS, schemaName: getSchemaName2(this.#schema) });
2858
- const countResult = await this.#db.client.one(`SELECT COUNT(*) as count FROM ${tableName}`);
3602
+ const { field, direction } = this.parseVersionOrderBy(orderBy);
3603
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3604
+ const countResult = await this.#db.client.one(`SELECT COUNT(*) as count FROM ${tableName} WHERE "agentId" = $1`, [
3605
+ agentId
3606
+ ]);
2859
3607
  const total = parseInt(countResult.count, 10);
2860
3608
  if (total === 0) {
2861
3609
  return {
2862
- agents: [],
3610
+ versions: [],
2863
3611
  total: 0,
2864
3612
  page,
2865
3613
  perPage: perPageForResponse,
@@ -2868,12 +3616,12 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2868
3616
  }
2869
3617
  const limitValue = perPageInput === false ? total : perPage;
2870
3618
  const dataResult = await this.#db.client.manyOrNone(
2871
- `SELECT * FROM ${tableName} ORDER BY "${field}" ${direction} LIMIT $1 OFFSET $2`,
2872
- [limitValue, offset]
3619
+ `SELECT * FROM ${tableName} WHERE "agentId" = $1 ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`,
3620
+ [agentId, limitValue, offset]
2873
3621
  );
2874
- const agents = (dataResult || []).map((row) => this.parseRow(row));
3622
+ const versions = (dataResult || []).map((row) => this.parseVersionRow(row));
2875
3623
  return {
2876
- agents,
3624
+ versions,
2877
3625
  total,
2878
3626
  page,
2879
3627
  perPage: perPageForResponse,
@@ -2882,14 +3630,81 @@ var AgentsPG = class _AgentsPG extends storage.AgentsStorage {
2882
3630
  } catch (error$1) {
2883
3631
  throw new error.MastraError(
2884
3632
  {
2885
- id: storage.createStorageErrorId("PG", "LIST_AGENTS", "FAILED"),
3633
+ id: storage.createStorageErrorId("PG", "LIST_VERSIONS", "FAILED"),
2886
3634
  domain: error.ErrorDomain.STORAGE,
2887
- category: error.ErrorCategory.THIRD_PARTY
3635
+ category: error.ErrorCategory.THIRD_PARTY,
3636
+ details: { agentId }
3637
+ },
3638
+ error$1
3639
+ );
3640
+ }
3641
+ }
3642
+ async deleteVersion(id) {
3643
+ try {
3644
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3645
+ await this.#db.client.none(`DELETE FROM ${tableName} WHERE id = $1`, [id]);
3646
+ } catch (error$1) {
3647
+ throw new error.MastraError(
3648
+ {
3649
+ id: storage.createStorageErrorId("PG", "DELETE_VERSION", "FAILED"),
3650
+ domain: error.ErrorDomain.STORAGE,
3651
+ category: error.ErrorCategory.THIRD_PARTY,
3652
+ details: { versionId: id }
3653
+ },
3654
+ error$1
3655
+ );
3656
+ }
3657
+ }
3658
+ async deleteVersionsByAgentId(agentId) {
3659
+ try {
3660
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3661
+ await this.#db.client.none(`DELETE FROM ${tableName} WHERE "agentId" = $1`, [agentId]);
3662
+ } catch (error$1) {
3663
+ throw new error.MastraError(
3664
+ {
3665
+ id: storage.createStorageErrorId("PG", "DELETE_VERSIONS_BY_AGENT_ID", "FAILED"),
3666
+ domain: error.ErrorDomain.STORAGE,
3667
+ category: error.ErrorCategory.THIRD_PARTY,
3668
+ details: { agentId }
3669
+ },
3670
+ error$1
3671
+ );
3672
+ }
3673
+ }
3674
+ async countVersions(agentId) {
3675
+ try {
3676
+ const tableName = getTableName2({ indexName: storage.TABLE_AGENT_VERSIONS, schemaName: getSchemaName2(this.#schema) });
3677
+ const result = await this.#db.client.one(`SELECT COUNT(*) as count FROM ${tableName} WHERE "agentId" = $1`, [
3678
+ agentId
3679
+ ]);
3680
+ return parseInt(result.count, 10);
3681
+ } catch (error$1) {
3682
+ throw new error.MastraError(
3683
+ {
3684
+ id: storage.createStorageErrorId("PG", "COUNT_VERSIONS", "FAILED"),
3685
+ domain: error.ErrorDomain.STORAGE,
3686
+ category: error.ErrorCategory.THIRD_PARTY,
3687
+ details: { agentId }
2888
3688
  },
2889
3689
  error$1
2890
3690
  );
2891
3691
  }
2892
3692
  }
3693
+ // ==========================================================================
3694
+ // Private Helper Methods
3695
+ // ==========================================================================
3696
+ parseVersionRow(row) {
3697
+ return {
3698
+ id: row.id,
3699
+ agentId: row.agentId,
3700
+ versionNumber: row.versionNumber,
3701
+ name: row.name,
3702
+ snapshot: this.parseJson(row.snapshot, "snapshot"),
3703
+ changedFields: this.parseJson(row.changedFields, "changedFields"),
3704
+ changeMessage: row.changeMessage,
3705
+ createdAt: row.createdAtZ || row.createdAt
3706
+ };
3707
+ }
2893
3708
  };
2894
3709
  function getSchemaName3(schema) {
2895
3710
  return schema ? `"${schema}"` : '"public"';
@@ -2898,6 +3713,9 @@ function getTableName3({ indexName, schemaName }) {
2898
3713
  const quotedIndexName = `"${indexName}"`;
2899
3714
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
2900
3715
  }
3716
+ function inPlaceholders(count, startIndex = 1) {
3717
+ return Array.from({ length: count }, (_, i) => `$${i + startIndex}`).join(", ");
3718
+ }
2901
3719
  var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
2902
3720
  #db;
2903
3721
  #schema;
@@ -3024,27 +3842,52 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3024
3842
  );
3025
3843
  }
3026
3844
  }
3027
- async listThreadsByResourceId(args) {
3028
- const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
3029
- if (page < 0) {
3845
+ async listThreads(args) {
3846
+ const { page = 0, perPage: perPageInput, orderBy, filter } = args;
3847
+ try {
3848
+ this.validatePaginationInput(page, perPageInput ?? 100);
3849
+ } catch (error$1) {
3030
3850
  throw new error.MastraError({
3031
- id: storage.createStorageErrorId("PG", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
3851
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "INVALID_PAGE"),
3032
3852
  domain: error.ErrorDomain.STORAGE,
3033
3853
  category: error.ErrorCategory.USER,
3034
- text: "Page number must be non-negative",
3035
- details: {
3036
- resourceId,
3037
- page
3038
- }
3854
+ text: error$1 instanceof Error ? error$1.message : "Invalid pagination parameters",
3855
+ details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
3039
3856
  });
3040
3857
  }
3041
- const { field, direction } = this.parseOrderBy(orderBy);
3042
3858
  const perPage = storage.normalizePerPage(perPageInput, 100);
3859
+ try {
3860
+ this.validateMetadataKeys(filter?.metadata);
3861
+ } catch (error$1) {
3862
+ throw new error.MastraError({
3863
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "INVALID_METADATA_KEY"),
3864
+ domain: error.ErrorDomain.STORAGE,
3865
+ category: error.ErrorCategory.USER,
3866
+ text: error$1 instanceof Error ? error$1.message : "Invalid metadata key",
3867
+ details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
3868
+ });
3869
+ }
3870
+ const { field, direction } = this.parseOrderBy(orderBy);
3043
3871
  const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
3044
3872
  try {
3045
3873
  const tableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
3046
- const baseQuery = `FROM ${tableName} WHERE "resourceId" = $1`;
3047
- const queryParams = [resourceId];
3874
+ const whereClauses = [];
3875
+ const queryParams = [];
3876
+ let paramIndex = 1;
3877
+ if (filter?.resourceId) {
3878
+ whereClauses.push(`"resourceId" = $${paramIndex}`);
3879
+ queryParams.push(filter.resourceId);
3880
+ paramIndex++;
3881
+ }
3882
+ if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
3883
+ for (const [key, value] of Object.entries(filter.metadata)) {
3884
+ whereClauses.push(`metadata::jsonb @> $${paramIndex}::jsonb`);
3885
+ queryParams.push(JSON.stringify({ [key]: value }));
3886
+ paramIndex++;
3887
+ }
3888
+ }
3889
+ const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : "";
3890
+ const baseQuery = `FROM ${tableName} ${whereClause}`;
3048
3891
  const countQuery = `SELECT COUNT(*) ${baseQuery}`;
3049
3892
  const countResult = await this.#db.client.one(countQuery, queryParams);
3050
3893
  const total = parseInt(countResult.count, 10);
@@ -3058,13 +3901,19 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3058
3901
  };
3059
3902
  }
3060
3903
  const limitValue = perPageInput === false ? total : perPage;
3061
- const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "updatedAt" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
3062
- const rows = await this.#db.client.manyOrNone(dataQuery, [...queryParams, limitValue, offset]);
3904
+ const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $${paramIndex} OFFSET $${paramIndex + 1}`;
3905
+ const rows = await this.#db.client.manyOrNone(
3906
+ dataQuery,
3907
+ [...queryParams, limitValue, offset]
3908
+ );
3063
3909
  const threads = (rows || []).map((thread) => ({
3064
- ...thread,
3910
+ id: thread.id,
3911
+ resourceId: thread.resourceId,
3912
+ title: thread.title,
3065
3913
  metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
3066
- createdAt: thread.createdAt,
3067
- updatedAt: thread.updatedAt
3914
+ // Use timezone-aware columns (*Z) for correct UTC timestamps, with fallback for legacy data
3915
+ createdAt: thread.createdAtZ || thread.createdAt,
3916
+ updatedAt: thread.updatedAtZ || thread.updatedAt
3068
3917
  }));
3069
3918
  return {
3070
3919
  threads,
@@ -3076,11 +3925,12 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3076
3925
  } catch (error$1) {
3077
3926
  const mastraError = new error.MastraError(
3078
3927
  {
3079
- id: storage.createStorageErrorId("PG", "LIST_THREADS_BY_RESOURCE_ID", "FAILED"),
3928
+ id: storage.createStorageErrorId("PG", "LIST_THREADS", "FAILED"),
3080
3929
  domain: error.ErrorDomain.STORAGE,
3081
3930
  category: error.ErrorCategory.THIRD_PARTY,
3082
3931
  details: {
3083
- resourceId,
3932
+ ...filter?.resourceId && { resourceId: filter.resourceId },
3933
+ hasMetadataFilter: !!filter?.metadata,
3084
3934
  page
3085
3935
  }
3086
3936
  },
@@ -3169,17 +4019,18 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3169
4019
  ...metadata
3170
4020
  };
3171
4021
  try {
4022
+ const now = (/* @__PURE__ */ new Date()).toISOString();
3172
4023
  const thread = await this.#db.client.one(
3173
4024
  `UPDATE ${threadTableName}
3174
4025
  SET
3175
4026
  title = $1,
3176
4027
  metadata = $2,
3177
4028
  "updatedAt" = $3,
3178
- "updatedAtZ" = $3
3179
- WHERE id = $4
4029
+ "updatedAtZ" = $4
4030
+ WHERE id = $5
3180
4031
  RETURNING *
3181
4032
  `,
3182
- [title, mergedMetadata, (/* @__PURE__ */ new Date()).toISOString(), id]
4033
+ [title, mergedMetadata, now, now, id]
3183
4034
  );
3184
4035
  return {
3185
4036
  id: thread.id,
@@ -3322,7 +4173,7 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3322
4173
  const tableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
3323
4174
  const query = `
3324
4175
  ${selectStatement} FROM ${tableName}
3325
- WHERE id IN (${messageIds.map((_, i) => `$${i + 1}`).join(", ")})
4176
+ WHERE id IN (${inPlaceholders(messageIds.length)})
3326
4177
  ORDER BY "createdAt" DESC
3327
4178
  `;
3328
4179
  const resultRows = await this.#db.client.manyOrNone(query, messageIds);
@@ -3383,8 +4234,7 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3383
4234
  const orderByStatement = `ORDER BY "${field}" ${direction}`;
3384
4235
  const selectStatement = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"`;
3385
4236
  const tableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
3386
- const threadPlaceholders = threadIds.map((_, i) => `$${i + 1}`).join(", ");
3387
- const conditions = [`thread_id IN (${threadPlaceholders})`];
4237
+ const conditions = [`thread_id IN (${inPlaceholders(threadIds.length)})`];
3388
4238
  const queryParams = [...threadIds];
3389
4239
  let paramIndex = threadIds.length + 1;
3390
4240
  if (resourceId) {
@@ -3392,11 +4242,13 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3392
4242
  queryParams.push(resourceId);
3393
4243
  }
3394
4244
  if (filter?.dateRange?.start) {
3395
- conditions.push(`"createdAt" >= $${paramIndex++}`);
4245
+ const startOp = filter.dateRange.startExclusive ? ">" : ">=";
4246
+ conditions.push(`"createdAt" ${startOp} $${paramIndex++}`);
3396
4247
  queryParams.push(filter.dateRange.start);
3397
4248
  }
3398
4249
  if (filter?.dateRange?.end) {
3399
- conditions.push(`"createdAt" <= $${paramIndex++}`);
4250
+ const endOp = filter.dateRange.endExclusive ? "<" : "<=";
4251
+ conditions.push(`"createdAt" ${endOp} $${paramIndex++}`);
3400
4252
  queryParams.push(filter.dateRange.end);
3401
4253
  }
3402
4254
  const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
@@ -3541,14 +4393,15 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3541
4393
  );
3542
4394
  });
3543
4395
  const threadTableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
4396
+ const nowStr = (/* @__PURE__ */ new Date()).toISOString();
3544
4397
  const threadUpdate = t.none(
3545
4398
  `UPDATE ${threadTableName}
3546
4399
  SET
3547
4400
  "updatedAt" = $1,
3548
- "updatedAtZ" = $1
3549
- WHERE id = $2
4401
+ "updatedAtZ" = $2
4402
+ WHERE id = $3
3550
4403
  `,
3551
- [(/* @__PURE__ */ new Date()).toISOString(), threadId]
4404
+ [nowStr, nowStr, threadId]
3552
4405
  );
3553
4406
  await Promise.all([...messageInserts, threadUpdate]);
3554
4407
  });
@@ -3585,8 +4438,8 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3585
4438
  return [];
3586
4439
  }
3587
4440
  const messageIds = messages.map((m) => m.id);
3588
- const selectQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId" FROM ${getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) })} WHERE id IN ($1:list)`;
3589
- const existingMessagesDb = await this.#db.client.manyOrNone(selectQuery, [messageIds]);
4441
+ const selectQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId" FROM ${getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) })} WHERE id IN (${inPlaceholders(messageIds.length)})`;
4442
+ const existingMessagesDb = await this.#db.client.manyOrNone(selectQuery, messageIds);
3590
4443
  if (existingMessagesDb.length === 0) {
3591
4444
  return [];
3592
4445
  }
@@ -3647,10 +4500,11 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3647
4500
  }
3648
4501
  }
3649
4502
  if (threadIdsToUpdate.size > 0) {
4503
+ const threadIds = Array.from(threadIdsToUpdate);
3650
4504
  queries.push(
3651
4505
  t.none(
3652
- `UPDATE ${getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) })} SET "updatedAt" = NOW(), "updatedAtZ" = NOW() WHERE id IN ($1:list)`,
3653
- [Array.from(threadIdsToUpdate)]
4506
+ `UPDATE ${getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) })} SET "updatedAt" = NOW(), "updatedAtZ" = NOW() WHERE id IN (${inPlaceholders(threadIds.length)})`,
4507
+ threadIds
3654
4508
  )
3655
4509
  );
3656
4510
  }
@@ -3658,7 +4512,7 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3658
4512
  await t.batch(queries);
3659
4513
  }
3660
4514
  });
3661
- const updatedMessages = await this.#db.client.manyOrNone(selectQuery, [messageIds]);
4515
+ const updatedMessages = await this.#db.client.manyOrNone(selectQuery, messageIds);
3662
4516
  return (updatedMessages || []).map((row) => {
3663
4517
  const message = this.normalizeMessageRow(row);
3664
4518
  if (typeof message.content === "string") {
@@ -3770,15 +4624,159 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
3770
4624
  values.push(JSON.stringify(updatedResource.metadata));
3771
4625
  paramIndex++;
3772
4626
  }
3773
- updates.push(`"updatedAt" = $${paramIndex}`);
3774
- values.push(updatedResource.updatedAt.toISOString());
4627
+ const updatedAtStr = updatedResource.updatedAt.toISOString();
4628
+ updates.push(`"updatedAt" = $${paramIndex++}`);
4629
+ values.push(updatedAtStr);
3775
4630
  updates.push(`"updatedAtZ" = $${paramIndex++}`);
3776
- values.push(updatedResource.updatedAt.toISOString());
3777
- paramIndex++;
4631
+ values.push(updatedAtStr);
3778
4632
  values.push(resourceId);
3779
4633
  await this.#db.client.none(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = $${paramIndex}`, values);
3780
4634
  return updatedResource;
3781
4635
  }
4636
+ async cloneThread(args) {
4637
+ const { sourceThreadId, newThreadId: providedThreadId, resourceId, title, metadata, options } = args;
4638
+ const sourceThread = await this.getThreadById({ threadId: sourceThreadId });
4639
+ if (!sourceThread) {
4640
+ throw new error.MastraError({
4641
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "SOURCE_NOT_FOUND"),
4642
+ domain: error.ErrorDomain.STORAGE,
4643
+ category: error.ErrorCategory.USER,
4644
+ text: `Source thread with id ${sourceThreadId} not found`,
4645
+ details: { sourceThreadId }
4646
+ });
4647
+ }
4648
+ const newThreadId = providedThreadId || crypto.randomUUID();
4649
+ const existingThread = await this.getThreadById({ threadId: newThreadId });
4650
+ if (existingThread) {
4651
+ throw new error.MastraError({
4652
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "THREAD_EXISTS"),
4653
+ domain: error.ErrorDomain.STORAGE,
4654
+ category: error.ErrorCategory.USER,
4655
+ text: `Thread with id ${newThreadId} already exists`,
4656
+ details: { newThreadId }
4657
+ });
4658
+ }
4659
+ const threadTableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
4660
+ const messageTableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
4661
+ try {
4662
+ return await this.#db.client.tx(async (t) => {
4663
+ let messageQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"
4664
+ FROM ${messageTableName} WHERE thread_id = $1`;
4665
+ const messageParams = [sourceThreadId];
4666
+ let paramIndex = 2;
4667
+ if (options?.messageFilter?.startDate) {
4668
+ messageQuery += ` AND "createdAt" >= $${paramIndex++}`;
4669
+ messageParams.push(options.messageFilter.startDate);
4670
+ }
4671
+ if (options?.messageFilter?.endDate) {
4672
+ messageQuery += ` AND "createdAt" <= $${paramIndex++}`;
4673
+ messageParams.push(options.messageFilter.endDate);
4674
+ }
4675
+ if (options?.messageFilter?.messageIds && options.messageFilter.messageIds.length > 0) {
4676
+ messageQuery += ` AND id IN (${options.messageFilter.messageIds.map(() => `$${paramIndex++}`).join(", ")})`;
4677
+ messageParams.push(...options.messageFilter.messageIds);
4678
+ }
4679
+ messageQuery += ` ORDER BY "createdAt" ASC`;
4680
+ if (options?.messageLimit && options.messageLimit > 0) {
4681
+ const limitQuery = `SELECT * FROM (${messageQuery.replace('ORDER BY "createdAt" ASC', 'ORDER BY "createdAt" DESC')} LIMIT $${paramIndex}) AS limited ORDER BY "createdAt" ASC`;
4682
+ messageParams.push(options.messageLimit);
4683
+ messageQuery = limitQuery;
4684
+ }
4685
+ const sourceMessages = await t.manyOrNone(messageQuery, messageParams);
4686
+ const now = /* @__PURE__ */ new Date();
4687
+ const lastMessageId = sourceMessages.length > 0 ? sourceMessages[sourceMessages.length - 1].id : void 0;
4688
+ const cloneMetadata = {
4689
+ sourceThreadId,
4690
+ clonedAt: now,
4691
+ ...lastMessageId && { lastMessageId }
4692
+ };
4693
+ const newThread = {
4694
+ id: newThreadId,
4695
+ resourceId: resourceId || sourceThread.resourceId,
4696
+ title: title || (sourceThread.title ? `Clone of ${sourceThread.title}` : void 0),
4697
+ metadata: {
4698
+ ...metadata,
4699
+ clone: cloneMetadata
4700
+ },
4701
+ createdAt: now,
4702
+ updatedAt: now
4703
+ };
4704
+ await t.none(
4705
+ `INSERT INTO ${threadTableName} (
4706
+ id,
4707
+ "resourceId",
4708
+ title,
4709
+ metadata,
4710
+ "createdAt",
4711
+ "createdAtZ",
4712
+ "updatedAt",
4713
+ "updatedAtZ"
4714
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4715
+ [
4716
+ newThread.id,
4717
+ newThread.resourceId,
4718
+ newThread.title,
4719
+ newThread.metadata ? JSON.stringify(newThread.metadata) : null,
4720
+ now,
4721
+ now,
4722
+ now,
4723
+ now
4724
+ ]
4725
+ );
4726
+ const clonedMessages = [];
4727
+ const targetResourceId = resourceId || sourceThread.resourceId;
4728
+ for (const sourceMsg of sourceMessages) {
4729
+ const newMessageId = crypto.randomUUID();
4730
+ const normalizedMsg = this.normalizeMessageRow(sourceMsg);
4731
+ let parsedContent = normalizedMsg.content;
4732
+ try {
4733
+ parsedContent = JSON.parse(normalizedMsg.content);
4734
+ } catch {
4735
+ }
4736
+ await t.none(
4737
+ `INSERT INTO ${messageTableName} (id, thread_id, content, "createdAt", "createdAtZ", role, type, "resourceId")
4738
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
4739
+ [
4740
+ newMessageId,
4741
+ newThreadId,
4742
+ typeof normalizedMsg.content === "string" ? normalizedMsg.content : JSON.stringify(normalizedMsg.content),
4743
+ normalizedMsg.createdAt,
4744
+ normalizedMsg.createdAt,
4745
+ normalizedMsg.role,
4746
+ normalizedMsg.type || "v2",
4747
+ targetResourceId
4748
+ ]
4749
+ );
4750
+ clonedMessages.push({
4751
+ id: newMessageId,
4752
+ threadId: newThreadId,
4753
+ content: parsedContent,
4754
+ role: normalizedMsg.role,
4755
+ type: normalizedMsg.type,
4756
+ createdAt: new Date(normalizedMsg.createdAt),
4757
+ resourceId: targetResourceId
4758
+ });
4759
+ }
4760
+ return {
4761
+ thread: newThread,
4762
+ clonedMessages
4763
+ };
4764
+ });
4765
+ } catch (error$1) {
4766
+ if (error$1 instanceof error.MastraError) {
4767
+ throw error$1;
4768
+ }
4769
+ throw new error.MastraError(
4770
+ {
4771
+ id: storage.createStorageErrorId("PG", "CLONE_THREAD", "FAILED"),
4772
+ domain: error.ErrorDomain.STORAGE,
4773
+ category: error.ErrorCategory.THIRD_PARTY,
4774
+ details: { sourceThreadId, newThreadId }
4775
+ },
4776
+ error$1
4777
+ );
4778
+ }
4779
+ }
3782
4780
  };
3783
4781
  var ObservabilityPG = class _ObservabilityPG extends storage.ObservabilityStorage {
3784
4782
  #db;
@@ -3896,6 +4894,22 @@ var ObservabilityPG = class _ObservabilityPG extends storage.ObservabilityStorag
3896
4894
  }
3897
4895
  }
3898
4896
  }
4897
+ /**
4898
+ * Manually run the spans migration to deduplicate and add the unique constraint.
4899
+ * This is intended to be called from the CLI when duplicates are detected.
4900
+ *
4901
+ * @returns Migration result with status and details
4902
+ */
4903
+ async migrateSpans() {
4904
+ return this.#db.migrateSpans();
4905
+ }
4906
+ /**
4907
+ * Check migration status for the spans table.
4908
+ * Returns information about whether migration is needed.
4909
+ */
4910
+ async checkSpansMigrationStatus() {
4911
+ return this.#db.checkSpansMigrationStatus();
4912
+ }
3899
4913
  async dangerouslyClearAll() {
3900
4914
  await this.#db.clearTable({ tableName: storage.TABLE_SPANS });
3901
4915
  }
@@ -4277,11 +5291,13 @@ var ObservabilityPG = class _ObservabilityPG extends storage.ObservabilityStorag
4277
5291
  perPage,
4278
5292
  hasMore: (page + 1) * perPage < count
4279
5293
  },
4280
- spans: spans.map(
4281
- (span) => transformFromSqlRow({
4282
- tableName: storage.TABLE_SPANS,
4283
- sqlRow: span
4284
- })
5294
+ spans: storage.toTraceSpans(
5295
+ spans.map(
5296
+ (span) => transformFromSqlRow({
5297
+ tableName: storage.TABLE_SPANS,
5298
+ sqlRow: span
5299
+ })
5300
+ )
4285
5301
  )
4286
5302
  };
4287
5303
  } catch (error$1) {
@@ -4408,6 +5424,11 @@ var ScoresPG = class _ScoresPG extends storage.ScoresStorage {
4408
5424
  }
4409
5425
  async init() {
4410
5426
  await this.#db.createTable({ tableName: storage.TABLE_SCORERS, schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS] });
5427
+ await this.#db.alterTable({
5428
+ tableName: storage.TABLE_SCORERS,
5429
+ schema: storage.TABLE_SCHEMAS[storage.TABLE_SCORERS],
5430
+ ifNotExists: ["spanId", "requestContext"]
5431
+ });
4411
5432
  await this.createDefaultIndexes();
4412
5433
  await this.createCustomIndexes();
4413
5434
  }
@@ -4759,23 +5780,8 @@ function getTableName5({ indexName, schemaName }) {
4759
5780
  const quotedIndexName = `"${indexName}"`;
4760
5781
  return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
4761
5782
  }
4762
- function parseWorkflowRun(row) {
4763
- let parsedSnapshot = row.snapshot;
4764
- if (typeof parsedSnapshot === "string") {
4765
- try {
4766
- parsedSnapshot = JSON.parse(row.snapshot);
4767
- } catch (e) {
4768
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
4769
- }
4770
- }
4771
- return {
4772
- workflowName: row.workflow_name,
4773
- runId: row.run_id,
4774
- snapshot: parsedSnapshot,
4775
- resourceId: row.resourceId,
4776
- createdAt: new Date(row.createdAtZ || row.createdAt),
4777
- updatedAt: new Date(row.updatedAtZ || row.updatedAt)
4778
- };
5783
+ function sanitizeJsonForPg(jsonString) {
5784
+ return jsonString.replace(/\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})/g, "");
4779
5785
  }
4780
5786
  var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4781
5787
  #db;
@@ -4792,6 +5798,24 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4792
5798
  this.#skipDefaultIndexes = skipDefaultIndexes;
4793
5799
  this.#indexes = indexes?.filter((idx) => _WorkflowsPG.MANAGED_TABLES.includes(idx.table));
4794
5800
  }
5801
+ parseWorkflowRun(row) {
5802
+ let parsedSnapshot = row.snapshot;
5803
+ if (typeof parsedSnapshot === "string") {
5804
+ try {
5805
+ parsedSnapshot = JSON.parse(row.snapshot);
5806
+ } catch (e) {
5807
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
5808
+ }
5809
+ }
5810
+ return {
5811
+ workflowName: row.workflow_name,
5812
+ runId: row.run_id,
5813
+ snapshot: parsedSnapshot,
5814
+ resourceId: row.resourceId,
5815
+ createdAt: new Date(row.createdAtZ || row.createdAt),
5816
+ updatedAt: new Date(row.updatedAtZ || row.updatedAt)
5817
+ };
5818
+ }
4795
5819
  /**
4796
5820
  * Returns default index definitions for the workflows domain tables.
4797
5821
  * Currently no default indexes are defined for workflows.
@@ -4864,12 +5888,13 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4864
5888
  const now = /* @__PURE__ */ new Date();
4865
5889
  const createdAtValue = createdAt ? createdAt : now;
4866
5890
  const updatedAtValue = updatedAt ? updatedAt : now;
5891
+ const sanitizedSnapshot = sanitizeJsonForPg(JSON.stringify(snapshot));
4867
5892
  await this.#db.client.none(
4868
5893
  `INSERT INTO ${getTableName5({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName5(this.#schema) })} (workflow_name, run_id, "resourceId", snapshot, "createdAt", "updatedAt")
4869
5894
  VALUES ($1, $2, $3, $4, $5, $6)
4870
5895
  ON CONFLICT (workflow_name, run_id) DO UPDATE
4871
5896
  SET "resourceId" = $3, snapshot = $4, "updatedAt" = $6`,
4872
- [workflowName, runId, resourceId, JSON.stringify(snapshot), createdAtValue, updatedAtValue]
5897
+ [workflowName, runId, resourceId, sanitizedSnapshot, createdAtValue, updatedAtValue]
4873
5898
  );
4874
5899
  } catch (error$1) {
4875
5900
  throw new error.MastraError(
@@ -4932,7 +5957,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4932
5957
  if (!result) {
4933
5958
  return null;
4934
5959
  }
4935
- return parseWorkflowRun(result);
5960
+ return this.parseWorkflowRun(result);
4936
5961
  } catch (error$1) {
4937
5962
  throw new error.MastraError(
4938
5963
  {
@@ -4988,7 +6013,9 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
4988
6013
  paramIndex++;
4989
6014
  }
4990
6015
  if (status) {
4991
- conditions.push(`snapshot::jsonb ->> 'status' = $${paramIndex}`);
6016
+ conditions.push(
6017
+ `regexp_replace(snapshot::text, '\\\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})', '', 'g')::jsonb ->> 'status' = $${paramIndex}`
6018
+ );
4992
6019
  values.push(status);
4993
6020
  paramIndex++;
4994
6021
  }
@@ -5033,7 +6060,7 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5033
6060
  const queryValues = usePagination ? [...values, normalizedPerPage, offset] : values;
5034
6061
  const result = await this.#db.client.manyOrNone(query, queryValues);
5035
6062
  const runs = (result || []).map((row) => {
5036
- return parseWorkflowRun(row);
6063
+ return this.parseWorkflowRun(row);
5037
6064
  });
5038
6065
  return { runs, total: total || runs.length };
5039
6066
  } catch (error$1) {
@@ -5053,9 +6080,12 @@ var WorkflowsPG = class _WorkflowsPG extends storage.WorkflowsStorage {
5053
6080
  };
5054
6081
 
5055
6082
  // src/storage/index.ts
5056
- var PostgresStore = class extends storage.MastraStorage {
6083
+ var DEFAULT_MAX_CONNECTIONS = 20;
6084
+ var DEFAULT_IDLE_TIMEOUT_MS = 3e4;
6085
+ var PostgresStore = class extends storage.MastraCompositeStore {
6086
+ #pool;
5057
6087
  #db;
5058
- #pgp;
6088
+ #ownsPool;
5059
6089
  schema;
5060
6090
  isInitialized = false;
5061
6091
  stores;
@@ -5063,60 +6093,27 @@ var PostgresStore = class extends storage.MastraStorage {
5063
6093
  try {
5064
6094
  validateConfig("PostgresStore", config);
5065
6095
  super({ id: config.id, name: "PostgresStore", disableInit: config.disableInit });
5066
- this.schema = config.schemaName || "public";
5067
- this.#pgp = pgPromise__default.default();
5068
- if (isClientConfig(config)) {
5069
- this.#db = config.client;
6096
+ this.schema = utils.parseSqlIdentifier(config.schemaName || "public", "schema name");
6097
+ if (isPoolConfig(config)) {
6098
+ this.#pool = config.pool;
6099
+ this.#ownsPool = false;
5070
6100
  } else {
5071
- let pgConfig;
5072
- if (isConnectionStringConfig(config)) {
5073
- pgConfig = {
5074
- id: config.id,
5075
- connectionString: config.connectionString,
5076
- max: config.max,
5077
- idleTimeoutMillis: config.idleTimeoutMillis,
5078
- ssl: config.ssl
5079
- };
5080
- } else if (isCloudSqlConfig(config)) {
5081
- pgConfig = {
5082
- ...config,
5083
- id: config.id,
5084
- max: config.max,
5085
- idleTimeoutMillis: config.idleTimeoutMillis
5086
- };
5087
- } else if (isHostConfig(config)) {
5088
- pgConfig = {
5089
- id: config.id,
5090
- host: config.host,
5091
- port: config.port,
5092
- database: config.database,
5093
- user: config.user,
5094
- password: config.password,
5095
- ssl: config.ssl,
5096
- max: config.max,
5097
- idleTimeoutMillis: config.idleTimeoutMillis
5098
- };
5099
- } else {
5100
- throw new Error(
5101
- "PostgresStore: invalid config. Provide either {client}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with `stream`)."
5102
- );
5103
- }
5104
- this.#db = this.#pgp(pgConfig);
5105
- }
5106
- const skipDefaultIndexes = config.skipDefaultIndexes;
5107
- const indexes = config.indexes;
5108
- const domainConfig = { client: this.#db, schemaName: this.schema, skipDefaultIndexes, indexes };
5109
- const scores = new ScoresPG(domainConfig);
5110
- const workflows = new WorkflowsPG(domainConfig);
5111
- const memory = new MemoryPG(domainConfig);
5112
- const observability = new ObservabilityPG(domainConfig);
5113
- const agents = new AgentsPG(domainConfig);
6101
+ this.#pool = this.createPool(config);
6102
+ this.#ownsPool = true;
6103
+ }
6104
+ this.#db = new PoolAdapter(this.#pool);
6105
+ const domainConfig = {
6106
+ client: this.#db,
6107
+ schemaName: this.schema,
6108
+ skipDefaultIndexes: config.skipDefaultIndexes,
6109
+ indexes: config.indexes
6110
+ };
5114
6111
  this.stores = {
5115
- scores,
5116
- workflows,
5117
- memory,
5118
- observability,
5119
- agents
6112
+ scores: new ScoresPG(domainConfig),
6113
+ workflows: new WorkflowsPG(domainConfig),
6114
+ memory: new MemoryPG(domainConfig),
6115
+ observability: new ObservabilityPG(domainConfig),
6116
+ agents: new AgentsPG(domainConfig)
5120
6117
  };
5121
6118
  } catch (e) {
5122
6119
  throw new error.MastraError(
@@ -5129,6 +6126,32 @@ var PostgresStore = class extends storage.MastraStorage {
5129
6126
  );
5130
6127
  }
5131
6128
  }
6129
+ createPool(config) {
6130
+ if (isConnectionStringConfig(config)) {
6131
+ return new pg.Pool({
6132
+ connectionString: config.connectionString,
6133
+ ssl: config.ssl,
6134
+ max: config.max ?? DEFAULT_MAX_CONNECTIONS,
6135
+ idleTimeoutMillis: config.idleTimeoutMillis ?? DEFAULT_IDLE_TIMEOUT_MS
6136
+ });
6137
+ }
6138
+ if (isHostConfig(config)) {
6139
+ return new pg.Pool({
6140
+ host: config.host,
6141
+ port: config.port,
6142
+ database: config.database,
6143
+ user: config.user,
6144
+ password: config.password,
6145
+ ssl: config.ssl,
6146
+ max: config.max ?? DEFAULT_MAX_CONNECTIONS,
6147
+ idleTimeoutMillis: config.idleTimeoutMillis ?? DEFAULT_IDLE_TIMEOUT_MS
6148
+ });
6149
+ }
6150
+ if (isCloudSqlConfig(config)) {
6151
+ return new pg.Pool(config);
6152
+ }
6153
+ throw new Error("PostgresStore: invalid config");
6154
+ }
5132
6155
  async init() {
5133
6156
  if (this.isInitialized) {
5134
6157
  return;
@@ -5138,6 +6161,9 @@ var PostgresStore = class extends storage.MastraStorage {
5138
6161
  await super.init();
5139
6162
  } catch (error$1) {
5140
6163
  this.isInitialized = false;
6164
+ if (error$1 instanceof error.MastraError) {
6165
+ throw error$1;
6166
+ }
5141
6167
  throw new error.MastraError(
5142
6168
  {
5143
6169
  id: storage.createStorageErrorId("PG", "INIT", "FAILED"),
@@ -5148,32 +6174,32 @@ var PostgresStore = class extends storage.MastraStorage {
5148
6174
  );
5149
6175
  }
5150
6176
  }
6177
+ /**
6178
+ * Database client for executing queries.
6179
+ *
6180
+ * @example
6181
+ * ```typescript
6182
+ * const rows = await store.db.any('SELECT * FROM users WHERE active = $1', [true]);
6183
+ * const user = await store.db.one('SELECT * FROM users WHERE id = $1', [userId]);
6184
+ * ```
6185
+ */
5151
6186
  get db() {
5152
6187
  return this.#db;
5153
6188
  }
5154
- get pgp() {
5155
- return this.#pgp;
5156
- }
5157
- get supports() {
5158
- return {
5159
- selectByIncludeResourceScope: true,
5160
- resourceWorkingMemory: true,
5161
- hasColumn: true,
5162
- createTable: true,
5163
- deleteMessages: true,
5164
- observability: true,
5165
- indexManagement: true,
5166
- listScoresBySpan: true,
5167
- agents: true
5168
- };
6189
+ /**
6190
+ * The underlying pg.Pool for direct database access or ORM integration.
6191
+ */
6192
+ get pool() {
6193
+ return this.#pool;
5169
6194
  }
5170
6195
  /**
5171
- * Closes the pg-promise connection pool.
5172
- *
5173
- * This will close ALL connections in the pool, including pre-configured clients.
6196
+ * Closes the connection pool if it was created by this store.
6197
+ * If a pool was passed in via config, it will not be closed.
5174
6198
  */
5175
6199
  async close() {
5176
- this.pgp.end();
6200
+ if (this.#ownsPool) {
6201
+ await this.#pool.end();
6202
+ }
5177
6203
  }
5178
6204
  };
5179
6205
 
@@ -5276,8 +6302,15 @@ Example Complex Query:
5276
6302
  ]
5277
6303
  }`;
5278
6304
 
6305
+ exports.AgentsPG = AgentsPG;
6306
+ exports.MemoryPG = MemoryPG;
6307
+ exports.ObservabilityPG = ObservabilityPG;
5279
6308
  exports.PGVECTOR_PROMPT = PGVECTOR_PROMPT;
5280
6309
  exports.PgVector = PgVector;
6310
+ exports.PoolAdapter = PoolAdapter;
5281
6311
  exports.PostgresStore = PostgresStore;
6312
+ exports.ScoresPG = ScoresPG;
6313
+ exports.WorkflowsPG = WorkflowsPG;
6314
+ exports.exportSchemas = exportSchemas;
5282
6315
  //# sourceMappingURL=index.cjs.map
5283
6316
  //# sourceMappingURL=index.cjs.map