@mastra/pg 1.0.0-beta.9 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +1404 -0
- package/dist/docs/README.md +36 -0
- package/dist/docs/SKILL.md +37 -0
- package/dist/docs/SOURCE_MAP.json +6 -0
- package/dist/docs/memory/01-storage.md +233 -0
- package/dist/docs/memory/02-working-memory.md +390 -0
- package/dist/docs/memory/03-semantic-recall.md +233 -0
- package/dist/docs/memory/04-reference.md +133 -0
- package/dist/docs/processors/01-reference.md +297 -0
- package/dist/docs/rag/01-overview.md +74 -0
- package/dist/docs/rag/02-vector-databases.md +643 -0
- package/dist/docs/rag/03-retrieval.md +548 -0
- package/dist/docs/rag/04-reference.md +369 -0
- package/dist/docs/storage/01-reference.md +828 -0
- package/dist/docs/tools/01-reference.md +440 -0
- package/dist/docs/vectors/01-reference.md +307 -0
- package/dist/index.cjs +1003 -223
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1000 -225
- package/dist/index.js.map +1 -1
- package/dist/shared/config.d.ts +61 -66
- package/dist/shared/config.d.ts.map +1 -1
- package/dist/storage/client.d.ts +91 -0
- package/dist/storage/client.d.ts.map +1 -0
- package/dist/storage/db/index.d.ts +82 -17
- package/dist/storage/db/index.d.ts.map +1 -1
- package/dist/storage/domains/memory/index.d.ts +3 -2
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +23 -0
- package/dist/storage/domains/observability/index.d.ts.map +1 -1
- package/dist/storage/domains/scores/index.d.ts.map +1 -1
- package/dist/storage/domains/workflows/index.d.ts +1 -0
- package/dist/storage/domains/workflows/index.d.ts.map +1 -1
- package/dist/storage/index.d.ts +44 -17
- package/dist/storage/index.d.ts.map +1 -1
- package/dist/storage/test-utils.d.ts.map +1 -1
- package/dist/vector/index.d.ts.map +1 -1
- package/dist/vector/sql-builder.d.ts.map +1 -1
- package/package.json +11 -11
package/dist/index.js
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
import { MastraError, ErrorCategory, ErrorDomain } from '@mastra/core/error';
|
|
2
|
-
import { createVectorErrorId,
|
|
2
|
+
import { createVectorErrorId, TABLE_SCHEMAS, AgentsStorage, TABLE_AGENTS, createStorageErrorId, normalizePerPage, calculatePagination, MemoryStorage, TABLE_THREADS, TABLE_MESSAGES, TABLE_RESOURCES, ObservabilityStorage, TABLE_SPANS, listTracesArgsSchema, ScoresStorage, TABLE_SCORERS, WorkflowsStorage, TABLE_WORKFLOW_SNAPSHOT, MastraCompositeStore, TraceStatus, getDefaultValue, transformScoreRow as transformScoreRow$1, getSqlType } from '@mastra/core/storage';
|
|
3
3
|
import { parseSqlIdentifier, parseFieldKey } from '@mastra/core/utils';
|
|
4
|
-
import { MastraVector } from '@mastra/core/vector';
|
|
4
|
+
import { MastraVector, validateTopK, validateUpsertInput } from '@mastra/core/vector';
|
|
5
5
|
import { Mutex } from 'async-mutex';
|
|
6
6
|
import * as pg from 'pg';
|
|
7
|
+
import { Pool } from 'pg';
|
|
7
8
|
import xxhash from 'xxhash-wasm';
|
|
8
9
|
import { BaseFilterTranslator } from '@mastra/core/vector/filter';
|
|
9
|
-
import pgPromise from 'pg-promise';
|
|
10
10
|
import { MastraBase } from '@mastra/core/base';
|
|
11
11
|
import { MessageList } from '@mastra/core/agent';
|
|
12
12
|
import { saveScorePayloadSchema } from '@mastra/core/evals';
|
|
@@ -14,8 +14,11 @@ import { saveScorePayloadSchema } from '@mastra/core/evals';
|
|
|
14
14
|
// src/vector/index.ts
|
|
15
15
|
|
|
16
16
|
// src/shared/config.ts
|
|
17
|
+
var isPoolConfig = (cfg) => {
|
|
18
|
+
return "pool" in cfg;
|
|
19
|
+
};
|
|
17
20
|
var isConnectionStringConfig = (cfg) => {
|
|
18
|
-
return "connectionString" in cfg;
|
|
21
|
+
return "connectionString" in cfg && typeof cfg.connectionString === "string";
|
|
19
22
|
};
|
|
20
23
|
var isHostConfig = (cfg) => {
|
|
21
24
|
return "host" in cfg && "database" in cfg && "user" in cfg && "password" in cfg;
|
|
@@ -23,16 +26,13 @@ var isHostConfig = (cfg) => {
|
|
|
23
26
|
var isCloudSqlConfig = (cfg) => {
|
|
24
27
|
return "stream" in cfg || "password" in cfg && typeof cfg.password === "function";
|
|
25
28
|
};
|
|
26
|
-
var isClientConfig = (cfg) => {
|
|
27
|
-
return "client" in cfg;
|
|
28
|
-
};
|
|
29
29
|
var validateConfig = (name, config) => {
|
|
30
30
|
if (!config.id || typeof config.id !== "string" || config.id.trim() === "") {
|
|
31
31
|
throw new Error(`${name}: id must be provided and cannot be empty.`);
|
|
32
32
|
}
|
|
33
|
-
if (
|
|
34
|
-
if (!config.
|
|
35
|
-
throw new Error(`${name}:
|
|
33
|
+
if (isPoolConfig(config)) {
|
|
34
|
+
if (!config.pool) {
|
|
35
|
+
throw new Error(`${name}: pool must be provided when using pool config.`);
|
|
36
36
|
}
|
|
37
37
|
return;
|
|
38
38
|
}
|
|
@@ -53,7 +53,7 @@ var validateConfig = (name, config) => {
|
|
|
53
53
|
}
|
|
54
54
|
} else {
|
|
55
55
|
throw new Error(
|
|
56
|
-
`${name}: invalid config. Provide either {
|
|
56
|
+
`${name}: invalid config. Provide either {pool}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with \`stream\`).`
|
|
57
57
|
);
|
|
58
58
|
}
|
|
59
59
|
};
|
|
@@ -261,8 +261,14 @@ var FILTER_OPERATORS = {
|
|
|
261
261
|
};
|
|
262
262
|
},
|
|
263
263
|
// Element Operators
|
|
264
|
-
$exists: (key) => {
|
|
264
|
+
$exists: (key, paramIndex, value) => {
|
|
265
265
|
const jsonPathKey = parseJsonPathKey(key);
|
|
266
|
+
if (value === false) {
|
|
267
|
+
return {
|
|
268
|
+
sql: `NOT (metadata ? '${jsonPathKey}')`,
|
|
269
|
+
needsValue: false
|
|
270
|
+
};
|
|
271
|
+
}
|
|
266
272
|
return {
|
|
267
273
|
sql: `metadata ? '${jsonPathKey}'`,
|
|
268
274
|
needsValue: false
|
|
@@ -340,17 +346,62 @@ function buildDeleteFilterQuery(filter) {
|
|
|
340
346
|
values.push(value);
|
|
341
347
|
return `metadata#>>'{${parseJsonPathKey(key)}}' = $${values.length}`;
|
|
342
348
|
}
|
|
343
|
-
const
|
|
349
|
+
const entries = Object.entries(value);
|
|
350
|
+
if (entries.length > 1) {
|
|
351
|
+
const conditions2 = entries.map(([operator2, operatorValue2]) => {
|
|
352
|
+
if (operator2 === "$not") {
|
|
353
|
+
const nestedEntries = Object.entries(operatorValue2);
|
|
354
|
+
const nestedConditions = nestedEntries.map(([nestedOp, nestedValue]) => {
|
|
355
|
+
if (!FILTER_OPERATORS[nestedOp]) {
|
|
356
|
+
throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
|
|
357
|
+
}
|
|
358
|
+
const operatorFn3 = FILTER_OPERATORS[nestedOp];
|
|
359
|
+
const operatorResult3 = operatorFn3(key, values.length + 1, nestedValue);
|
|
360
|
+
if (operatorResult3.needsValue) {
|
|
361
|
+
const transformedValue = operatorResult3.transformValue ? operatorResult3.transformValue() : nestedValue;
|
|
362
|
+
if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
|
|
363
|
+
values.push(...transformedValue);
|
|
364
|
+
} else {
|
|
365
|
+
values.push(transformedValue);
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
return operatorResult3.sql;
|
|
369
|
+
}).join(" AND ");
|
|
370
|
+
return `NOT (${nestedConditions})`;
|
|
371
|
+
}
|
|
372
|
+
if (!FILTER_OPERATORS[operator2]) {
|
|
373
|
+
throw new Error(`Invalid operator: ${operator2}`);
|
|
374
|
+
}
|
|
375
|
+
const operatorFn2 = FILTER_OPERATORS[operator2];
|
|
376
|
+
const operatorResult2 = operatorFn2(key, values.length + 1, operatorValue2);
|
|
377
|
+
if (operatorResult2.needsValue) {
|
|
378
|
+
const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : operatorValue2;
|
|
379
|
+
if (Array.isArray(transformedValue) && operator2 === "$elemMatch") {
|
|
380
|
+
values.push(...transformedValue);
|
|
381
|
+
} else {
|
|
382
|
+
values.push(transformedValue);
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
return operatorResult2.sql;
|
|
386
|
+
});
|
|
387
|
+
return conditions2.join(" AND ");
|
|
388
|
+
}
|
|
389
|
+
const [[operator, operatorValue] = []] = entries;
|
|
344
390
|
if (operator === "$not") {
|
|
345
|
-
const
|
|
346
|
-
const conditions2 =
|
|
391
|
+
const nestedEntries = Object.entries(operatorValue);
|
|
392
|
+
const conditions2 = nestedEntries.map(([nestedOp, nestedValue]) => {
|
|
347
393
|
if (!FILTER_OPERATORS[nestedOp]) {
|
|
348
394
|
throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
|
|
349
395
|
}
|
|
350
396
|
const operatorFn2 = FILTER_OPERATORS[nestedOp];
|
|
351
397
|
const operatorResult2 = operatorFn2(key, values.length + 1, nestedValue);
|
|
352
398
|
if (operatorResult2.needsValue) {
|
|
353
|
-
|
|
399
|
+
const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : nestedValue;
|
|
400
|
+
if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
|
|
401
|
+
values.push(...transformedValue);
|
|
402
|
+
} else {
|
|
403
|
+
values.push(transformedValue);
|
|
404
|
+
}
|
|
354
405
|
}
|
|
355
406
|
return operatorResult2.sql;
|
|
356
407
|
}).join(" AND ");
|
|
@@ -417,17 +468,62 @@ function buildFilterQuery(filter, minScore, topK) {
|
|
|
417
468
|
values.push(value);
|
|
418
469
|
return `metadata#>>'{${parseJsonPathKey(key)}}' = $${values.length}`;
|
|
419
470
|
}
|
|
420
|
-
const
|
|
471
|
+
const entries = Object.entries(value);
|
|
472
|
+
if (entries.length > 1) {
|
|
473
|
+
const conditions2 = entries.map(([operator2, operatorValue2]) => {
|
|
474
|
+
if (operator2 === "$not") {
|
|
475
|
+
const nestedEntries = Object.entries(operatorValue2);
|
|
476
|
+
const nestedConditions = nestedEntries.map(([nestedOp, nestedValue]) => {
|
|
477
|
+
if (!FILTER_OPERATORS[nestedOp]) {
|
|
478
|
+
throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
|
|
479
|
+
}
|
|
480
|
+
const operatorFn3 = FILTER_OPERATORS[nestedOp];
|
|
481
|
+
const operatorResult3 = operatorFn3(key, values.length + 1, nestedValue);
|
|
482
|
+
if (operatorResult3.needsValue) {
|
|
483
|
+
const transformedValue = operatorResult3.transformValue ? operatorResult3.transformValue() : nestedValue;
|
|
484
|
+
if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
|
|
485
|
+
values.push(...transformedValue);
|
|
486
|
+
} else {
|
|
487
|
+
values.push(transformedValue);
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
return operatorResult3.sql;
|
|
491
|
+
}).join(" AND ");
|
|
492
|
+
return `NOT (${nestedConditions})`;
|
|
493
|
+
}
|
|
494
|
+
if (!FILTER_OPERATORS[operator2]) {
|
|
495
|
+
throw new Error(`Invalid operator: ${operator2}`);
|
|
496
|
+
}
|
|
497
|
+
const operatorFn2 = FILTER_OPERATORS[operator2];
|
|
498
|
+
const operatorResult2 = operatorFn2(key, values.length + 1, operatorValue2);
|
|
499
|
+
if (operatorResult2.needsValue) {
|
|
500
|
+
const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : operatorValue2;
|
|
501
|
+
if (Array.isArray(transformedValue) && operator2 === "$elemMatch") {
|
|
502
|
+
values.push(...transformedValue);
|
|
503
|
+
} else {
|
|
504
|
+
values.push(transformedValue);
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
return operatorResult2.sql;
|
|
508
|
+
});
|
|
509
|
+
return conditions2.join(" AND ");
|
|
510
|
+
}
|
|
511
|
+
const [[operator, operatorValue] = []] = entries;
|
|
421
512
|
if (operator === "$not") {
|
|
422
|
-
const
|
|
423
|
-
const conditions2 =
|
|
513
|
+
const nestedEntries = Object.entries(operatorValue);
|
|
514
|
+
const conditions2 = nestedEntries.map(([nestedOp, nestedValue]) => {
|
|
424
515
|
if (!FILTER_OPERATORS[nestedOp]) {
|
|
425
516
|
throw new Error(`Invalid operator in $not condition: ${nestedOp}`);
|
|
426
517
|
}
|
|
427
518
|
const operatorFn2 = FILTER_OPERATORS[nestedOp];
|
|
428
519
|
const operatorResult2 = operatorFn2(key, values.length + 1, nestedValue);
|
|
429
520
|
if (operatorResult2.needsValue) {
|
|
430
|
-
|
|
521
|
+
const transformedValue = operatorResult2.transformValue ? operatorResult2.transformValue() : nestedValue;
|
|
522
|
+
if (Array.isArray(transformedValue) && nestedOp === "$elemMatch") {
|
|
523
|
+
values.push(...transformedValue);
|
|
524
|
+
} else {
|
|
525
|
+
values.push(transformedValue);
|
|
526
|
+
}
|
|
431
527
|
}
|
|
432
528
|
return operatorResult2.sql;
|
|
433
529
|
}).join(" AND ");
|
|
@@ -518,8 +614,8 @@ var PgVector = class extends MastraVector {
|
|
|
518
614
|
} else if (isCloudSqlConfig(config)) {
|
|
519
615
|
poolConfig = {
|
|
520
616
|
...config,
|
|
521
|
-
max: config.max ?? 20,
|
|
522
|
-
idleTimeoutMillis: config.idleTimeoutMillis ?? 3e4,
|
|
617
|
+
max: config.pgPoolOptions?.max ?? 20,
|
|
618
|
+
idleTimeoutMillis: config.pgPoolOptions?.idleTimeoutMillis ?? 3e4,
|
|
523
619
|
connectionTimeoutMillis: 2e3,
|
|
524
620
|
...config.pgPoolOptions
|
|
525
621
|
};
|
|
@@ -687,9 +783,7 @@ var PgVector = class extends MastraVector {
|
|
|
687
783
|
probes
|
|
688
784
|
}) {
|
|
689
785
|
try {
|
|
690
|
-
|
|
691
|
-
throw new Error("topK must be a positive integer");
|
|
692
|
-
}
|
|
786
|
+
validateTopK("PG", topK);
|
|
693
787
|
if (!Array.isArray(queryVector) || !queryVector.every((x) => typeof x === "number" && Number.isFinite(x))) {
|
|
694
788
|
throw new Error("queryVector must be an array of finite numbers");
|
|
695
789
|
}
|
|
@@ -774,6 +868,7 @@ var PgVector = class extends MastraVector {
|
|
|
774
868
|
ids,
|
|
775
869
|
deleteFilter
|
|
776
870
|
}) {
|
|
871
|
+
validateUpsertInput("PG", vectors, metadata, ids);
|
|
777
872
|
const { tableName } = this.getTableName(indexName);
|
|
778
873
|
const client = await this.pool.connect();
|
|
779
874
|
try {
|
|
@@ -1661,6 +1756,132 @@ var PgVector = class extends MastraVector {
|
|
|
1661
1756
|
}
|
|
1662
1757
|
}
|
|
1663
1758
|
};
|
|
1759
|
+
|
|
1760
|
+
// src/storage/client.ts
|
|
1761
|
+
function truncateQuery(query, maxLength = 100) {
|
|
1762
|
+
const normalized = query.replace(/\s+/g, " ").trim();
|
|
1763
|
+
if (normalized.length <= maxLength) {
|
|
1764
|
+
return normalized;
|
|
1765
|
+
}
|
|
1766
|
+
return normalized.slice(0, maxLength) + "...";
|
|
1767
|
+
}
|
|
1768
|
+
var PoolAdapter = class {
|
|
1769
|
+
constructor($pool) {
|
|
1770
|
+
this.$pool = $pool;
|
|
1771
|
+
}
|
|
1772
|
+
connect() {
|
|
1773
|
+
return this.$pool.connect();
|
|
1774
|
+
}
|
|
1775
|
+
async none(query, values) {
|
|
1776
|
+
await this.$pool.query(query, values);
|
|
1777
|
+
return null;
|
|
1778
|
+
}
|
|
1779
|
+
async one(query, values) {
|
|
1780
|
+
const result = await this.$pool.query(query, values);
|
|
1781
|
+
if (result.rows.length === 0) {
|
|
1782
|
+
throw new Error(`No data returned from query: ${truncateQuery(query)}`);
|
|
1783
|
+
}
|
|
1784
|
+
if (result.rows.length > 1) {
|
|
1785
|
+
throw new Error(`Multiple rows returned when one was expected: ${truncateQuery(query)}`);
|
|
1786
|
+
}
|
|
1787
|
+
return result.rows[0];
|
|
1788
|
+
}
|
|
1789
|
+
async oneOrNone(query, values) {
|
|
1790
|
+
const result = await this.$pool.query(query, values);
|
|
1791
|
+
if (result.rows.length === 0) {
|
|
1792
|
+
return null;
|
|
1793
|
+
}
|
|
1794
|
+
if (result.rows.length > 1) {
|
|
1795
|
+
throw new Error(`Multiple rows returned when one or none was expected: ${truncateQuery(query)}`);
|
|
1796
|
+
}
|
|
1797
|
+
return result.rows[0];
|
|
1798
|
+
}
|
|
1799
|
+
async any(query, values) {
|
|
1800
|
+
const result = await this.$pool.query(query, values);
|
|
1801
|
+
return result.rows;
|
|
1802
|
+
}
|
|
1803
|
+
async manyOrNone(query, values) {
|
|
1804
|
+
return this.any(query, values);
|
|
1805
|
+
}
|
|
1806
|
+
async many(query, values) {
|
|
1807
|
+
const result = await this.$pool.query(query, values);
|
|
1808
|
+
if (result.rows.length === 0) {
|
|
1809
|
+
throw new Error(`No data returned from query: ${truncateQuery(query)}`);
|
|
1810
|
+
}
|
|
1811
|
+
return result.rows;
|
|
1812
|
+
}
|
|
1813
|
+
async query(query, values) {
|
|
1814
|
+
return this.$pool.query(query, values);
|
|
1815
|
+
}
|
|
1816
|
+
async tx(callback) {
|
|
1817
|
+
const client = await this.$pool.connect();
|
|
1818
|
+
try {
|
|
1819
|
+
await client.query("BEGIN");
|
|
1820
|
+
const txClient = new TransactionClient(client);
|
|
1821
|
+
const result = await callback(txClient);
|
|
1822
|
+
await client.query("COMMIT");
|
|
1823
|
+
return result;
|
|
1824
|
+
} catch (error) {
|
|
1825
|
+
try {
|
|
1826
|
+
await client.query("ROLLBACK");
|
|
1827
|
+
} catch (rollbackError) {
|
|
1828
|
+
console.error("Transaction rollback failed:", rollbackError);
|
|
1829
|
+
}
|
|
1830
|
+
throw error;
|
|
1831
|
+
} finally {
|
|
1832
|
+
client.release();
|
|
1833
|
+
}
|
|
1834
|
+
}
|
|
1835
|
+
};
|
|
1836
|
+
var TransactionClient = class {
|
|
1837
|
+
constructor(client) {
|
|
1838
|
+
this.client = client;
|
|
1839
|
+
}
|
|
1840
|
+
async none(query, values) {
|
|
1841
|
+
await this.client.query(query, values);
|
|
1842
|
+
return null;
|
|
1843
|
+
}
|
|
1844
|
+
async one(query, values) {
|
|
1845
|
+
const result = await this.client.query(query, values);
|
|
1846
|
+
if (result.rows.length === 0) {
|
|
1847
|
+
throw new Error(`No data returned from query: ${truncateQuery(query)}`);
|
|
1848
|
+
}
|
|
1849
|
+
if (result.rows.length > 1) {
|
|
1850
|
+
throw new Error(`Multiple rows returned when one was expected: ${truncateQuery(query)}`);
|
|
1851
|
+
}
|
|
1852
|
+
return result.rows[0];
|
|
1853
|
+
}
|
|
1854
|
+
async oneOrNone(query, values) {
|
|
1855
|
+
const result = await this.client.query(query, values);
|
|
1856
|
+
if (result.rows.length === 0) {
|
|
1857
|
+
return null;
|
|
1858
|
+
}
|
|
1859
|
+
if (result.rows.length > 1) {
|
|
1860
|
+
throw new Error(`Multiple rows returned when one or none was expected: ${truncateQuery(query)}`);
|
|
1861
|
+
}
|
|
1862
|
+
return result.rows[0];
|
|
1863
|
+
}
|
|
1864
|
+
async any(query, values) {
|
|
1865
|
+
const result = await this.client.query(query, values);
|
|
1866
|
+
return result.rows;
|
|
1867
|
+
}
|
|
1868
|
+
async manyOrNone(query, values) {
|
|
1869
|
+
return this.any(query, values);
|
|
1870
|
+
}
|
|
1871
|
+
async many(query, values) {
|
|
1872
|
+
const result = await this.client.query(query, values);
|
|
1873
|
+
if (result.rows.length === 0) {
|
|
1874
|
+
throw new Error(`No data returned from query: ${truncateQuery(query)}`);
|
|
1875
|
+
}
|
|
1876
|
+
return result.rows;
|
|
1877
|
+
}
|
|
1878
|
+
async query(query, values) {
|
|
1879
|
+
return this.client.query(query, values);
|
|
1880
|
+
}
|
|
1881
|
+
async batch(promises) {
|
|
1882
|
+
return Promise.all(promises);
|
|
1883
|
+
}
|
|
1884
|
+
};
|
|
1664
1885
|
function resolvePgConfig(config) {
|
|
1665
1886
|
if ("client" in config) {
|
|
1666
1887
|
return {
|
|
@@ -1670,10 +1891,32 @@ function resolvePgConfig(config) {
|
|
|
1670
1891
|
indexes: config.indexes
|
|
1671
1892
|
};
|
|
1672
1893
|
}
|
|
1673
|
-
|
|
1674
|
-
|
|
1894
|
+
if ("pool" in config) {
|
|
1895
|
+
return {
|
|
1896
|
+
client: new PoolAdapter(config.pool),
|
|
1897
|
+
schemaName: config.schemaName,
|
|
1898
|
+
skipDefaultIndexes: config.skipDefaultIndexes,
|
|
1899
|
+
indexes: config.indexes
|
|
1900
|
+
};
|
|
1901
|
+
}
|
|
1902
|
+
let pool;
|
|
1903
|
+
if ("connectionString" in config) {
|
|
1904
|
+
pool = new Pool({
|
|
1905
|
+
connectionString: config.connectionString,
|
|
1906
|
+
ssl: config.ssl
|
|
1907
|
+
});
|
|
1908
|
+
} else {
|
|
1909
|
+
pool = new Pool({
|
|
1910
|
+
host: config.host,
|
|
1911
|
+
port: config.port,
|
|
1912
|
+
database: config.database,
|
|
1913
|
+
user: config.user,
|
|
1914
|
+
password: config.password,
|
|
1915
|
+
ssl: config.ssl
|
|
1916
|
+
});
|
|
1917
|
+
}
|
|
1675
1918
|
return {
|
|
1676
|
-
client,
|
|
1919
|
+
client: new PoolAdapter(pool),
|
|
1677
1920
|
schemaName: config.schemaName,
|
|
1678
1921
|
skipDefaultIndexes: config.skipDefaultIndexes,
|
|
1679
1922
|
indexes: config.indexes
|
|
@@ -1688,6 +1931,91 @@ function getTableName({ indexName, schemaName }) {
|
|
|
1688
1931
|
const quotedSchemaName = schemaName;
|
|
1689
1932
|
return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
|
|
1690
1933
|
}
|
|
1934
|
+
function mapToSqlType(type) {
|
|
1935
|
+
switch (type) {
|
|
1936
|
+
case "uuid":
|
|
1937
|
+
return "UUID";
|
|
1938
|
+
case "boolean":
|
|
1939
|
+
return "BOOLEAN";
|
|
1940
|
+
default:
|
|
1941
|
+
return getSqlType(type);
|
|
1942
|
+
}
|
|
1943
|
+
}
|
|
1944
|
+
function generateTableSQL({
|
|
1945
|
+
tableName,
|
|
1946
|
+
schema,
|
|
1947
|
+
schemaName,
|
|
1948
|
+
includeAllConstraints = false
|
|
1949
|
+
}) {
|
|
1950
|
+
const timeZColumns = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => {
|
|
1951
|
+
const parsedName = parseSqlIdentifier(name, "column name");
|
|
1952
|
+
return `"${parsedName}Z" TIMESTAMPTZ DEFAULT NOW()`;
|
|
1953
|
+
});
|
|
1954
|
+
const columns = Object.entries(schema).map(([name, def]) => {
|
|
1955
|
+
const parsedName = parseSqlIdentifier(name, "column name");
|
|
1956
|
+
const constraints = [];
|
|
1957
|
+
if (def.primaryKey) constraints.push("PRIMARY KEY");
|
|
1958
|
+
if (!def.nullable) constraints.push("NOT NULL");
|
|
1959
|
+
return `"${parsedName}" ${mapToSqlType(def.type)} ${constraints.join(" ")}`;
|
|
1960
|
+
});
|
|
1961
|
+
const finalColumns = [...columns, ...timeZColumns].join(",\n");
|
|
1962
|
+
const parsedSchemaName = schemaName ? parseSqlIdentifier(schemaName, "schema name") : "";
|
|
1963
|
+
const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
|
|
1964
|
+
const quotedSchemaName = getSchemaName(schemaName);
|
|
1965
|
+
const sql = `
|
|
1966
|
+
CREATE TABLE IF NOT EXISTS ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })} (
|
|
1967
|
+
${finalColumns}
|
|
1968
|
+
);
|
|
1969
|
+
${tableName === TABLE_WORKFLOW_SNAPSHOT ? `
|
|
1970
|
+
DO $$ BEGIN
|
|
1971
|
+
IF NOT EXISTS (
|
|
1972
|
+
SELECT 1 FROM pg_constraint WHERE conname = lower('${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key')
|
|
1973
|
+
) AND NOT EXISTS (
|
|
1974
|
+
SELECT 1 FROM pg_indexes WHERE indexname = lower('${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key')
|
|
1975
|
+
) THEN
|
|
1976
|
+
ALTER TABLE ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })}
|
|
1977
|
+
ADD CONSTRAINT ${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key
|
|
1978
|
+
UNIQUE (workflow_name, run_id);
|
|
1979
|
+
END IF;
|
|
1980
|
+
END $$;
|
|
1981
|
+
` : ""}
|
|
1982
|
+
${// For spans table: Include PRIMARY KEY in exports, but not in runtime (handled after deduplication)
|
|
1983
|
+
tableName === TABLE_SPANS && includeAllConstraints ? `
|
|
1984
|
+
DO $$ BEGIN
|
|
1985
|
+
IF NOT EXISTS (
|
|
1986
|
+
SELECT 1 FROM pg_constraint WHERE conname = lower('${constraintPrefix}mastra_ai_spans_traceid_spanid_pk')
|
|
1987
|
+
) THEN
|
|
1988
|
+
ALTER TABLE ${getTableName({ indexName: tableName, schemaName: quotedSchemaName })}
|
|
1989
|
+
ADD CONSTRAINT ${constraintPrefix}mastra_ai_spans_traceid_spanid_pk
|
|
1990
|
+
PRIMARY KEY ("traceId", "spanId");
|
|
1991
|
+
END IF;
|
|
1992
|
+
END $$;
|
|
1993
|
+
` : ""}
|
|
1994
|
+
`;
|
|
1995
|
+
return sql;
|
|
1996
|
+
}
|
|
1997
|
+
function exportSchemas(schemaName) {
|
|
1998
|
+
const statements = [];
|
|
1999
|
+
if (schemaName) {
|
|
2000
|
+
const quotedSchemaName = getSchemaName(schemaName);
|
|
2001
|
+
statements.push(`-- Create schema if it doesn't exist`);
|
|
2002
|
+
statements.push(`CREATE SCHEMA IF NOT EXISTS ${quotedSchemaName};`);
|
|
2003
|
+
statements.push("");
|
|
2004
|
+
}
|
|
2005
|
+
for (const [tableName, schema] of Object.entries(TABLE_SCHEMAS)) {
|
|
2006
|
+
statements.push(`-- Table: ${tableName}`);
|
|
2007
|
+
const sql = generateTableSQL({
|
|
2008
|
+
tableName,
|
|
2009
|
+
schema,
|
|
2010
|
+
schemaName,
|
|
2011
|
+
includeAllConstraints: true
|
|
2012
|
+
// Include all constraints for exports/documentation
|
|
2013
|
+
});
|
|
2014
|
+
statements.push(sql.trim());
|
|
2015
|
+
statements.push("");
|
|
2016
|
+
}
|
|
2017
|
+
return statements.join("\n");
|
|
2018
|
+
}
|
|
1691
2019
|
var schemaSetupRegistry = /* @__PURE__ */ new Map();
|
|
1692
2020
|
var PgDB = class extends MastraBase {
|
|
1693
2021
|
client;
|
|
@@ -1805,16 +2133,6 @@ var PgDB = class extends MastraBase {
|
|
|
1805
2133
|
}
|
|
1806
2134
|
await registryEntry.promise;
|
|
1807
2135
|
}
|
|
1808
|
-
getSqlType(type) {
|
|
1809
|
-
switch (type) {
|
|
1810
|
-
case "uuid":
|
|
1811
|
-
return "UUID";
|
|
1812
|
-
case "boolean":
|
|
1813
|
-
return "BOOLEAN";
|
|
1814
|
-
default:
|
|
1815
|
-
return getSqlType(type);
|
|
1816
|
-
}
|
|
1817
|
-
}
|
|
1818
2136
|
getDefaultValue(type) {
|
|
1819
2137
|
switch (type) {
|
|
1820
2138
|
case "timestamp":
|
|
@@ -1832,10 +2150,27 @@ var PgDB = class extends MastraBase {
|
|
|
1832
2150
|
const columns = Object.keys(record).map((col) => parseSqlIdentifier(col, "column name"));
|
|
1833
2151
|
const values = this.prepareValuesForInsert(record, tableName);
|
|
1834
2152
|
const placeholders = values.map((_, i) => `$${i + 1}`).join(", ");
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
2153
|
+
const fullTableName = getTableName({ indexName: tableName, schemaName });
|
|
2154
|
+
const columnList = columns.map((c) => `"${c}"`).join(", ");
|
|
2155
|
+
if (tableName === TABLE_SPANS) {
|
|
2156
|
+
const updateColumns = columns.filter((c) => c !== "traceId" && c !== "spanId");
|
|
2157
|
+
if (updateColumns.length > 0) {
|
|
2158
|
+
const updateClause = updateColumns.map((c) => `"${c}" = EXCLUDED."${c}"`).join(", ");
|
|
2159
|
+
await this.client.none(
|
|
2160
|
+
`INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})
|
|
2161
|
+
ON CONFLICT ("traceId", "spanId") DO UPDATE SET ${updateClause}`,
|
|
2162
|
+
values
|
|
2163
|
+
);
|
|
2164
|
+
} else {
|
|
2165
|
+
await this.client.none(
|
|
2166
|
+
`INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})
|
|
2167
|
+
ON CONFLICT ("traceId", "spanId") DO NOTHING`,
|
|
2168
|
+
values
|
|
2169
|
+
);
|
|
2170
|
+
}
|
|
2171
|
+
} else {
|
|
2172
|
+
await this.client.none(`INSERT INTO ${fullTableName} (${columnList}) VALUES (${placeholders})`, values);
|
|
2173
|
+
}
|
|
1839
2174
|
} catch (error) {
|
|
1840
2175
|
throw new MastraError(
|
|
1841
2176
|
{
|
|
@@ -1859,7 +2194,7 @@ var PgDB = class extends MastraBase {
|
|
|
1859
2194
|
SELECT 1 FROM information_schema.tables
|
|
1860
2195
|
WHERE table_schema = $1 AND table_name = $2
|
|
1861
2196
|
)`,
|
|
1862
|
-
[this.schemaName || "
|
|
2197
|
+
[this.schemaName || "public", tableName]
|
|
1863
2198
|
);
|
|
1864
2199
|
if (tableExists?.exists) {
|
|
1865
2200
|
await this.client.none(`TRUNCATE TABLE ${tableNameWithSchema} CASCADE`);
|
|
@@ -1884,52 +2219,10 @@ var PgDB = class extends MastraBase {
|
|
|
1884
2219
|
}) {
|
|
1885
2220
|
try {
|
|
1886
2221
|
const timeZColumnNames = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => name);
|
|
1887
|
-
const timeZColumns = Object.entries(schema).filter(([_, def]) => def.type === "timestamp").map(([name]) => {
|
|
1888
|
-
const parsedName = parseSqlIdentifier(name, "column name");
|
|
1889
|
-
return `"${parsedName}Z" TIMESTAMPTZ DEFAULT NOW()`;
|
|
1890
|
-
});
|
|
1891
|
-
const columns = Object.entries(schema).map(([name, def]) => {
|
|
1892
|
-
const parsedName = parseSqlIdentifier(name, "column name");
|
|
1893
|
-
const constraints = [];
|
|
1894
|
-
if (def.primaryKey) constraints.push("PRIMARY KEY");
|
|
1895
|
-
if (!def.nullable) constraints.push("NOT NULL");
|
|
1896
|
-
return `"${parsedName}" ${this.getSqlType(def.type)} ${constraints.join(" ")}`;
|
|
1897
|
-
});
|
|
1898
2222
|
if (this.schemaName) {
|
|
1899
2223
|
await this.setupSchema();
|
|
1900
2224
|
}
|
|
1901
|
-
const
|
|
1902
|
-
const constraintPrefix = this.schemaName ? `${this.schemaName}_` : "";
|
|
1903
|
-
const schemaName = getSchemaName(this.schemaName);
|
|
1904
|
-
const sql = `
|
|
1905
|
-
CREATE TABLE IF NOT EXISTS ${getTableName({ indexName: tableName, schemaName })} (
|
|
1906
|
-
${finalColumns}
|
|
1907
|
-
);
|
|
1908
|
-
${tableName === TABLE_WORKFLOW_SNAPSHOT ? `
|
|
1909
|
-
DO $$ BEGIN
|
|
1910
|
-
IF NOT EXISTS (
|
|
1911
|
-
SELECT 1 FROM pg_constraint WHERE conname = '${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key'
|
|
1912
|
-
) AND NOT EXISTS (
|
|
1913
|
-
SELECT 1 FROM pg_indexes WHERE indexname = '${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key'
|
|
1914
|
-
) THEN
|
|
1915
|
-
ALTER TABLE ${getTableName({ indexName: tableName, schemaName })}
|
|
1916
|
-
ADD CONSTRAINT ${constraintPrefix}mastra_workflow_snapshot_workflow_name_run_id_key
|
|
1917
|
-
UNIQUE (workflow_name, run_id);
|
|
1918
|
-
END IF;
|
|
1919
|
-
END $$;
|
|
1920
|
-
` : ""}
|
|
1921
|
-
${tableName === TABLE_SPANS ? `
|
|
1922
|
-
DO $$ BEGIN
|
|
1923
|
-
IF NOT EXISTS (
|
|
1924
|
-
SELECT 1 FROM pg_constraint WHERE conname = '${constraintPrefix}mastra_ai_spans_traceid_spanid_pk'
|
|
1925
|
-
) THEN
|
|
1926
|
-
ALTER TABLE ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })}
|
|
1927
|
-
ADD CONSTRAINT ${constraintPrefix}mastra_ai_spans_traceid_spanid_pk
|
|
1928
|
-
PRIMARY KEY ("traceId", "spanId");
|
|
1929
|
-
END IF;
|
|
1930
|
-
END $$;
|
|
1931
|
-
` : ""}
|
|
1932
|
-
`;
|
|
2225
|
+
const sql = generateTableSQL({ tableName, schema, schemaName: this.schemaName });
|
|
1933
2226
|
await this.client.none(sql);
|
|
1934
2227
|
await this.alterTable({
|
|
1935
2228
|
tableName,
|
|
@@ -1939,8 +2232,46 @@ var PgDB = class extends MastraBase {
|
|
|
1939
2232
|
if (tableName === TABLE_SPANS) {
|
|
1940
2233
|
await this.setupTimestampTriggers(tableName);
|
|
1941
2234
|
await this.migrateSpansTable();
|
|
2235
|
+
const pkExists = await this.spansPrimaryKeyExists();
|
|
2236
|
+
if (!pkExists) {
|
|
2237
|
+
const duplicateInfo = await this.checkForDuplicateSpans();
|
|
2238
|
+
if (duplicateInfo.hasDuplicates) {
|
|
2239
|
+
const errorMessage = `
|
|
2240
|
+
===========================================================================
|
|
2241
|
+
MIGRATION REQUIRED: Duplicate spans detected in ${duplicateInfo.tableName}
|
|
2242
|
+
===========================================================================
|
|
2243
|
+
|
|
2244
|
+
Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations.
|
|
2245
|
+
|
|
2246
|
+
The spans table requires a unique constraint on (traceId, spanId), but your
|
|
2247
|
+
database contains duplicate entries that must be resolved first.
|
|
2248
|
+
|
|
2249
|
+
To fix this, run the manual migration command:
|
|
2250
|
+
|
|
2251
|
+
npx mastra migrate
|
|
2252
|
+
|
|
2253
|
+
This command will:
|
|
2254
|
+
1. Remove duplicate spans (keeping the most complete/recent version)
|
|
2255
|
+
2. Add the required unique constraint
|
|
2256
|
+
|
|
2257
|
+
Note: This migration may take some time for large tables.
|
|
2258
|
+
===========================================================================
|
|
2259
|
+
`;
|
|
2260
|
+
throw new MastraError({
|
|
2261
|
+
id: createStorageErrorId("PG", "MIGRATION_REQUIRED", "DUPLICATE_SPANS"),
|
|
2262
|
+
domain: ErrorDomain.STORAGE,
|
|
2263
|
+
category: ErrorCategory.USER,
|
|
2264
|
+
text: errorMessage
|
|
2265
|
+
});
|
|
2266
|
+
} else {
|
|
2267
|
+
await this.addSpansPrimaryKey();
|
|
2268
|
+
}
|
|
2269
|
+
}
|
|
1942
2270
|
}
|
|
1943
2271
|
} catch (error) {
|
|
2272
|
+
if (error instanceof MastraError) {
|
|
2273
|
+
throw error;
|
|
2274
|
+
}
|
|
1944
2275
|
throw new MastraError(
|
|
1945
2276
|
{
|
|
1946
2277
|
id: createStorageErrorId("PG", "CREATE_TABLE", "FAILED"),
|
|
@@ -2003,12 +2334,29 @@ var PgDB = class extends MastraBase {
|
|
|
2003
2334
|
const columnExists = await this.hasColumn(TABLE_SPANS, columnName);
|
|
2004
2335
|
if (!columnExists) {
|
|
2005
2336
|
const parsedColumnName = parseSqlIdentifier(columnName, "column name");
|
|
2006
|
-
const sqlType =
|
|
2337
|
+
const sqlType = mapToSqlType(columnDef.type);
|
|
2007
2338
|
const nullable = columnDef.nullable ? "" : "NOT NULL";
|
|
2008
2339
|
const defaultValue = !columnDef.nullable ? this.getDefaultValue(columnDef.type) : "";
|
|
2009
2340
|
const alterSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}" ${sqlType} ${nullable} ${defaultValue}`.trim();
|
|
2010
2341
|
await this.client.none(alterSql);
|
|
2011
2342
|
this.logger?.debug?.(`Added column '${columnName}' to ${fullTableName}`);
|
|
2343
|
+
if (sqlType === "TIMESTAMP") {
|
|
2344
|
+
const timestampZSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}Z" TIMESTAMPTZ DEFAULT NOW()`.trim();
|
|
2345
|
+
await this.client.none(timestampZSql);
|
|
2346
|
+
this.logger?.debug?.(`Added timezone column '${columnName}Z' to ${fullTableName}`);
|
|
2347
|
+
}
|
|
2348
|
+
}
|
|
2349
|
+
}
|
|
2350
|
+
for (const [columnName, columnDef] of Object.entries(schema)) {
|
|
2351
|
+
if (columnDef.type === "timestamp") {
|
|
2352
|
+
const tzColumnName = `${columnName}Z`;
|
|
2353
|
+
const tzColumnExists = await this.hasColumn(TABLE_SPANS, tzColumnName);
|
|
2354
|
+
if (!tzColumnExists) {
|
|
2355
|
+
const parsedTzColumnName = parseSqlIdentifier(tzColumnName, "column name");
|
|
2356
|
+
const timestampZSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedTzColumnName}" TIMESTAMPTZ DEFAULT NOW()`.trim();
|
|
2357
|
+
await this.client.none(timestampZSql);
|
|
2358
|
+
this.logger?.debug?.(`Added timezone column '${tzColumnName}' to ${fullTableName}`);
|
|
2359
|
+
}
|
|
2012
2360
|
}
|
|
2013
2361
|
}
|
|
2014
2362
|
this.logger?.info?.(`Migration completed for ${fullTableName}`);
|
|
@@ -2016,6 +2364,224 @@ var PgDB = class extends MastraBase {
|
|
|
2016
2364
|
this.logger?.warn?.(`Failed to migrate spans table ${fullTableName}:`, error);
|
|
2017
2365
|
}
|
|
2018
2366
|
}
|
|
2367
|
+
/**
|
|
2368
|
+
* Deduplicates spans in the mastra_ai_spans table before adding the PRIMARY KEY constraint.
|
|
2369
|
+
* Keeps spans based on priority: completed (endedAt NOT NULL) > most recent updatedAt > most recent createdAt.
|
|
2370
|
+
*
|
|
2371
|
+
* Note: This prioritizes migration completion over perfect data preservation.
|
|
2372
|
+
* Old trace data may be lost, which is acceptable for this use case.
|
|
2373
|
+
*/
|
|
2374
|
+
async deduplicateSpans() {
|
|
2375
|
+
const fullTableName = getTableName({ indexName: TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
|
|
2376
|
+
try {
|
|
2377
|
+
const duplicateCheck = await this.client.oneOrNone(`
|
|
2378
|
+
SELECT EXISTS (
|
|
2379
|
+
SELECT 1
|
|
2380
|
+
FROM ${fullTableName}
|
|
2381
|
+
GROUP BY "traceId", "spanId"
|
|
2382
|
+
HAVING COUNT(*) > 1
|
|
2383
|
+
LIMIT 1
|
|
2384
|
+
) as has_duplicates
|
|
2385
|
+
`);
|
|
2386
|
+
if (!duplicateCheck?.has_duplicates) {
|
|
2387
|
+
this.logger?.debug?.(`No duplicate spans found in ${fullTableName}`);
|
|
2388
|
+
return;
|
|
2389
|
+
}
|
|
2390
|
+
this.logger?.info?.(`Duplicate spans detected in ${fullTableName}, starting deduplication...`);
|
|
2391
|
+
const result = await this.client.query(`
|
|
2392
|
+
DELETE FROM ${fullTableName} t1
|
|
2393
|
+
USING ${fullTableName} t2
|
|
2394
|
+
WHERE t1."traceId" = t2."traceId"
|
|
2395
|
+
AND t1."spanId" = t2."spanId"
|
|
2396
|
+
AND (
|
|
2397
|
+
-- Keep completed spans over incomplete
|
|
2398
|
+
(t1."endedAt" IS NULL AND t2."endedAt" IS NOT NULL)
|
|
2399
|
+
OR
|
|
2400
|
+
-- If both have same completion status, keep more recent updatedAt
|
|
2401
|
+
(
|
|
2402
|
+
(t1."endedAt" IS NULL) = (t2."endedAt" IS NULL)
|
|
2403
|
+
AND (
|
|
2404
|
+
(t1."updatedAt" < t2."updatedAt")
|
|
2405
|
+
OR (t1."updatedAt" IS NULL AND t2."updatedAt" IS NOT NULL)
|
|
2406
|
+
OR
|
|
2407
|
+
-- If updatedAt is the same, keep more recent createdAt
|
|
2408
|
+
(
|
|
2409
|
+
(t1."updatedAt" = t2."updatedAt" OR (t1."updatedAt" IS NULL AND t2."updatedAt" IS NULL))
|
|
2410
|
+
AND (
|
|
2411
|
+
(t1."createdAt" < t2."createdAt")
|
|
2412
|
+
OR (t1."createdAt" IS NULL AND t2."createdAt" IS NOT NULL)
|
|
2413
|
+
OR
|
|
2414
|
+
-- If all else equal, use ctid as tiebreaker
|
|
2415
|
+
(
|
|
2416
|
+
(t1."createdAt" = t2."createdAt" OR (t1."createdAt" IS NULL AND t2."createdAt" IS NULL))
|
|
2417
|
+
AND t1.ctid < t2.ctid
|
|
2418
|
+
)
|
|
2419
|
+
)
|
|
2420
|
+
)
|
|
2421
|
+
)
|
|
2422
|
+
)
|
|
2423
|
+
)
|
|
2424
|
+
`);
|
|
2425
|
+
this.logger?.info?.(
|
|
2426
|
+
`Deduplication complete: removed ${result.rowCount ?? 0} duplicate spans from ${fullTableName}`
|
|
2427
|
+
);
|
|
2428
|
+
} catch (error) {
|
|
2429
|
+
throw new MastraError(
|
|
2430
|
+
{
|
|
2431
|
+
id: createStorageErrorId("PG", "DEDUPLICATE_SPANS", "FAILED"),
|
|
2432
|
+
domain: ErrorDomain.STORAGE,
|
|
2433
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
2434
|
+
details: {
|
|
2435
|
+
tableName: TABLE_SPANS
|
|
2436
|
+
}
|
|
2437
|
+
},
|
|
2438
|
+
error
|
|
2439
|
+
);
|
|
2440
|
+
}
|
|
2441
|
+
}
|
|
2442
|
+
/**
|
|
2443
|
+
* Checks for duplicate (traceId, spanId) combinations in the spans table.
|
|
2444
|
+
* Returns information about duplicates for logging/CLI purposes.
|
|
2445
|
+
*/
|
|
2446
|
+
async checkForDuplicateSpans() {
|
|
2447
|
+
const fullTableName = getTableName({ indexName: TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
|
|
2448
|
+
try {
|
|
2449
|
+
const result = await this.client.oneOrNone(`
|
|
2450
|
+
SELECT COUNT(*) as duplicate_count
|
|
2451
|
+
FROM (
|
|
2452
|
+
SELECT "traceId", "spanId"
|
|
2453
|
+
FROM ${fullTableName}
|
|
2454
|
+
GROUP BY "traceId", "spanId"
|
|
2455
|
+
HAVING COUNT(*) > 1
|
|
2456
|
+
) duplicates
|
|
2457
|
+
`);
|
|
2458
|
+
const duplicateCount = parseInt(result?.duplicate_count ?? "0", 10);
|
|
2459
|
+
return {
|
|
2460
|
+
hasDuplicates: duplicateCount > 0,
|
|
2461
|
+
duplicateCount,
|
|
2462
|
+
tableName: fullTableName
|
|
2463
|
+
};
|
|
2464
|
+
} catch (error) {
|
|
2465
|
+
this.logger?.debug?.(`Could not check for duplicates: ${error}`);
|
|
2466
|
+
return { hasDuplicates: false, duplicateCount: 0, tableName: fullTableName };
|
|
2467
|
+
}
|
|
2468
|
+
}
|
|
2469
|
+
/**
|
|
2470
|
+
* Checks if the PRIMARY KEY constraint on (traceId, spanId) already exists on the spans table.
|
|
2471
|
+
* Used to skip deduplication when the constraint already exists (migration already complete).
|
|
2472
|
+
*/
|
|
2473
|
+
async spansPrimaryKeyExists() {
|
|
2474
|
+
const parsedSchemaName = this.schemaName ? parseSqlIdentifier(this.schemaName, "schema name") : "";
|
|
2475
|
+
const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
|
|
2476
|
+
const constraintName = `${constraintPrefix}mastra_ai_spans_traceid_spanid_pk`;
|
|
2477
|
+
const result = await this.client.oneOrNone(
|
|
2478
|
+
`SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = $1) as exists`,
|
|
2479
|
+
[constraintName]
|
|
2480
|
+
);
|
|
2481
|
+
return result?.exists ?? false;
|
|
2482
|
+
}
|
|
2483
|
+
/**
|
|
2484
|
+
* Adds the PRIMARY KEY constraint on (traceId, spanId) to the spans table.
|
|
2485
|
+
* Should be called AFTER deduplication to ensure no duplicate key violations.
|
|
2486
|
+
*/
|
|
2487
|
+
async addSpansPrimaryKey() {
|
|
2488
|
+
const fullTableName = getTableName({ indexName: TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
|
|
2489
|
+
const parsedSchemaName = this.schemaName ? parseSqlIdentifier(this.schemaName, "schema name") : "";
|
|
2490
|
+
const constraintPrefix = parsedSchemaName ? `${parsedSchemaName}_` : "";
|
|
2491
|
+
const constraintName = `${constraintPrefix}mastra_ai_spans_traceid_spanid_pk`;
|
|
2492
|
+
try {
|
|
2493
|
+
const constraintExists = await this.client.oneOrNone(
|
|
2494
|
+
`
|
|
2495
|
+
SELECT EXISTS (
|
|
2496
|
+
SELECT 1 FROM pg_constraint WHERE conname = $1
|
|
2497
|
+
) as exists
|
|
2498
|
+
`,
|
|
2499
|
+
[constraintName]
|
|
2500
|
+
);
|
|
2501
|
+
if (constraintExists?.exists) {
|
|
2502
|
+
this.logger?.debug?.(`PRIMARY KEY constraint ${constraintName} already exists on ${fullTableName}`);
|
|
2503
|
+
return;
|
|
2504
|
+
}
|
|
2505
|
+
await this.client.none(`
|
|
2506
|
+
ALTER TABLE ${fullTableName}
|
|
2507
|
+
ADD CONSTRAINT ${constraintName}
|
|
2508
|
+
PRIMARY KEY ("traceId", "spanId")
|
|
2509
|
+
`);
|
|
2510
|
+
this.logger?.info?.(`Added PRIMARY KEY constraint ${constraintName} to ${fullTableName}`);
|
|
2511
|
+
} catch (error) {
|
|
2512
|
+
throw new MastraError(
|
|
2513
|
+
{
|
|
2514
|
+
id: createStorageErrorId("PG", "ADD_SPANS_PRIMARY_KEY", "FAILED"),
|
|
2515
|
+
domain: ErrorDomain.STORAGE,
|
|
2516
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
2517
|
+
details: {
|
|
2518
|
+
tableName: TABLE_SPANS,
|
|
2519
|
+
constraintName
|
|
2520
|
+
}
|
|
2521
|
+
},
|
|
2522
|
+
error
|
|
2523
|
+
);
|
|
2524
|
+
}
|
|
2525
|
+
}
|
|
2526
|
+
/**
|
|
2527
|
+
* Manually run the spans migration to deduplicate and add the unique constraint.
|
|
2528
|
+
* This is intended to be called from the CLI when duplicates are detected.
|
|
2529
|
+
*
|
|
2530
|
+
* @returns Migration result with status and details
|
|
2531
|
+
*/
|
|
2532
|
+
async migrateSpans() {
|
|
2533
|
+
const fullTableName = getTableName({ indexName: TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
|
|
2534
|
+
const pkExists = await this.spansPrimaryKeyExists();
|
|
2535
|
+
if (pkExists) {
|
|
2536
|
+
return {
|
|
2537
|
+
success: true,
|
|
2538
|
+
alreadyMigrated: true,
|
|
2539
|
+
duplicatesRemoved: 0,
|
|
2540
|
+
message: `Migration already complete. PRIMARY KEY constraint exists on ${fullTableName}.`
|
|
2541
|
+
};
|
|
2542
|
+
}
|
|
2543
|
+
const duplicateInfo = await this.checkForDuplicateSpans();
|
|
2544
|
+
if (duplicateInfo.hasDuplicates) {
|
|
2545
|
+
this.logger?.info?.(
|
|
2546
|
+
`Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations. Starting deduplication...`
|
|
2547
|
+
);
|
|
2548
|
+
await this.deduplicateSpans();
|
|
2549
|
+
} else {
|
|
2550
|
+
this.logger?.info?.(`No duplicate spans found.`);
|
|
2551
|
+
}
|
|
2552
|
+
await this.addSpansPrimaryKey();
|
|
2553
|
+
return {
|
|
2554
|
+
success: true,
|
|
2555
|
+
alreadyMigrated: false,
|
|
2556
|
+
duplicatesRemoved: duplicateInfo.duplicateCount,
|
|
2557
|
+
message: duplicateInfo.hasDuplicates ? `Migration complete. Removed duplicates and added PRIMARY KEY constraint to ${fullTableName}.` : `Migration complete. Added PRIMARY KEY constraint to ${fullTableName}.`
|
|
2558
|
+
};
|
|
2559
|
+
}
|
|
2560
|
+
/**
|
|
2561
|
+
* Check migration status for the spans table.
|
|
2562
|
+
* Returns information about whether migration is needed.
|
|
2563
|
+
*/
|
|
2564
|
+
async checkSpansMigrationStatus() {
|
|
2565
|
+
const fullTableName = getTableName({ indexName: TABLE_SPANS, schemaName: getSchemaName(this.schemaName) });
|
|
2566
|
+
const pkExists = await this.spansPrimaryKeyExists();
|
|
2567
|
+
if (pkExists) {
|
|
2568
|
+
return {
|
|
2569
|
+
needsMigration: false,
|
|
2570
|
+
hasDuplicates: false,
|
|
2571
|
+
duplicateCount: 0,
|
|
2572
|
+
constraintExists: true,
|
|
2573
|
+
tableName: fullTableName
|
|
2574
|
+
};
|
|
2575
|
+
}
|
|
2576
|
+
const duplicateInfo = await this.checkForDuplicateSpans();
|
|
2577
|
+
return {
|
|
2578
|
+
needsMigration: true,
|
|
2579
|
+
hasDuplicates: duplicateInfo.hasDuplicates,
|
|
2580
|
+
duplicateCount: duplicateInfo.duplicateCount,
|
|
2581
|
+
constraintExists: false,
|
|
2582
|
+
tableName: fullTableName
|
|
2583
|
+
};
|
|
2584
|
+
}
|
|
2019
2585
|
/**
|
|
2020
2586
|
* Alters table schema to add columns if they don't exist
|
|
2021
2587
|
* @param tableName Name of the table
|
|
@@ -2033,7 +2599,7 @@ var PgDB = class extends MastraBase {
|
|
|
2033
2599
|
if (schema[columnName]) {
|
|
2034
2600
|
const columnDef = schema[columnName];
|
|
2035
2601
|
const parsedColumnName = parseSqlIdentifier(columnName, "column name");
|
|
2036
|
-
const sqlType =
|
|
2602
|
+
const sqlType = mapToSqlType(columnDef.type);
|
|
2037
2603
|
const nullable = columnDef.nullable ? "" : "NOT NULL";
|
|
2038
2604
|
const defaultValue = !columnDef.nullable ? this.getDefaultValue(columnDef.type) : "";
|
|
2039
2605
|
const alterSql = `ALTER TABLE ${fullTableName} ADD COLUMN IF NOT EXISTS "${parsedColumnName}" ${sqlType} ${nullable} ${defaultValue}`.trim();
|
|
@@ -2353,9 +2919,9 @@ var PgDB = class extends MastraBase {
|
|
|
2353
2919
|
size: result.size || "0",
|
|
2354
2920
|
definition: result.definition || "",
|
|
2355
2921
|
method: result.method || "btree",
|
|
2356
|
-
scans: parseInt(result.scans) || 0,
|
|
2357
|
-
tuples_read: parseInt(result.tuples_read) || 0,
|
|
2358
|
-
tuples_fetched: parseInt(result.tuples_fetched) || 0
|
|
2922
|
+
scans: parseInt(String(result.scans)) || 0,
|
|
2923
|
+
tuples_read: parseInt(String(result.tuples_read)) || 0,
|
|
2924
|
+
tuples_fetched: parseInt(String(result.tuples_fetched)) || 0
|
|
2359
2925
|
};
|
|
2360
2926
|
} catch (error) {
|
|
2361
2927
|
throw new MastraError(
|
|
@@ -2872,6 +3438,9 @@ function getTableName3({ indexName, schemaName }) {
|
|
|
2872
3438
|
const quotedIndexName = `"${indexName}"`;
|
|
2873
3439
|
return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
|
|
2874
3440
|
}
|
|
3441
|
+
function inPlaceholders(count, startIndex = 1) {
|
|
3442
|
+
return Array.from({ length: count }, (_, i) => `$${i + startIndex}`).join(", ");
|
|
3443
|
+
}
|
|
2875
3444
|
var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
2876
3445
|
#db;
|
|
2877
3446
|
#schema;
|
|
@@ -2998,27 +3567,52 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
2998
3567
|
);
|
|
2999
3568
|
}
|
|
3000
3569
|
}
|
|
3001
|
-
async
|
|
3002
|
-
const {
|
|
3003
|
-
|
|
3570
|
+
async listThreads(args) {
|
|
3571
|
+
const { page = 0, perPage: perPageInput, orderBy, filter } = args;
|
|
3572
|
+
try {
|
|
3573
|
+
this.validatePaginationInput(page, perPageInput ?? 100);
|
|
3574
|
+
} catch (error) {
|
|
3004
3575
|
throw new MastraError({
|
|
3005
|
-
id: createStorageErrorId("PG", "
|
|
3576
|
+
id: createStorageErrorId("PG", "LIST_THREADS", "INVALID_PAGE"),
|
|
3006
3577
|
domain: ErrorDomain.STORAGE,
|
|
3007
3578
|
category: ErrorCategory.USER,
|
|
3008
|
-
text:
|
|
3009
|
-
details: {
|
|
3010
|
-
resourceId,
|
|
3011
|
-
page
|
|
3012
|
-
}
|
|
3579
|
+
text: error instanceof Error ? error.message : "Invalid pagination parameters",
|
|
3580
|
+
details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
|
|
3013
3581
|
});
|
|
3014
3582
|
}
|
|
3015
|
-
const { field, direction } = this.parseOrderBy(orderBy);
|
|
3016
3583
|
const perPage = normalizePerPage(perPageInput, 100);
|
|
3584
|
+
try {
|
|
3585
|
+
this.validateMetadataKeys(filter?.metadata);
|
|
3586
|
+
} catch (error) {
|
|
3587
|
+
throw new MastraError({
|
|
3588
|
+
id: createStorageErrorId("PG", "LIST_THREADS", "INVALID_METADATA_KEY"),
|
|
3589
|
+
domain: ErrorDomain.STORAGE,
|
|
3590
|
+
category: ErrorCategory.USER,
|
|
3591
|
+
text: error instanceof Error ? error.message : "Invalid metadata key",
|
|
3592
|
+
details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
|
|
3593
|
+
});
|
|
3594
|
+
}
|
|
3595
|
+
const { field, direction } = this.parseOrderBy(orderBy);
|
|
3017
3596
|
const { offset, perPage: perPageForResponse } = calculatePagination(page, perPageInput, perPage);
|
|
3018
3597
|
try {
|
|
3019
3598
|
const tableName = getTableName3({ indexName: TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
|
|
3020
|
-
const
|
|
3021
|
-
const queryParams = [
|
|
3599
|
+
const whereClauses = [];
|
|
3600
|
+
const queryParams = [];
|
|
3601
|
+
let paramIndex = 1;
|
|
3602
|
+
if (filter?.resourceId) {
|
|
3603
|
+
whereClauses.push(`"resourceId" = $${paramIndex}`);
|
|
3604
|
+
queryParams.push(filter.resourceId);
|
|
3605
|
+
paramIndex++;
|
|
3606
|
+
}
|
|
3607
|
+
if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
|
|
3608
|
+
for (const [key, value] of Object.entries(filter.metadata)) {
|
|
3609
|
+
whereClauses.push(`metadata::jsonb @> $${paramIndex}::jsonb`);
|
|
3610
|
+
queryParams.push(JSON.stringify({ [key]: value }));
|
|
3611
|
+
paramIndex++;
|
|
3612
|
+
}
|
|
3613
|
+
}
|
|
3614
|
+
const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : "";
|
|
3615
|
+
const baseQuery = `FROM ${tableName} ${whereClause}`;
|
|
3022
3616
|
const countQuery = `SELECT COUNT(*) ${baseQuery}`;
|
|
3023
3617
|
const countResult = await this.#db.client.one(countQuery, queryParams);
|
|
3024
3618
|
const total = parseInt(countResult.count, 10);
|
|
@@ -3032,13 +3626,19 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3032
3626
|
};
|
|
3033
3627
|
}
|
|
3034
3628
|
const limitValue = perPageInput === false ? total : perPage;
|
|
3035
|
-
const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "updatedAt" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT
|
|
3036
|
-
const rows = await this.#db.client.manyOrNone(
|
|
3629
|
+
const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $${paramIndex} OFFSET $${paramIndex + 1}`;
|
|
3630
|
+
const rows = await this.#db.client.manyOrNone(
|
|
3631
|
+
dataQuery,
|
|
3632
|
+
[...queryParams, limitValue, offset]
|
|
3633
|
+
);
|
|
3037
3634
|
const threads = (rows || []).map((thread) => ({
|
|
3038
|
-
|
|
3635
|
+
id: thread.id,
|
|
3636
|
+
resourceId: thread.resourceId,
|
|
3637
|
+
title: thread.title,
|
|
3039
3638
|
metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
|
|
3040
|
-
|
|
3041
|
-
|
|
3639
|
+
// Use timezone-aware columns (*Z) for correct UTC timestamps, with fallback for legacy data
|
|
3640
|
+
createdAt: thread.createdAtZ || thread.createdAt,
|
|
3641
|
+
updatedAt: thread.updatedAtZ || thread.updatedAt
|
|
3042
3642
|
}));
|
|
3043
3643
|
return {
|
|
3044
3644
|
threads,
|
|
@@ -3050,11 +3650,12 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3050
3650
|
} catch (error) {
|
|
3051
3651
|
const mastraError = new MastraError(
|
|
3052
3652
|
{
|
|
3053
|
-
id: createStorageErrorId("PG", "
|
|
3653
|
+
id: createStorageErrorId("PG", "LIST_THREADS", "FAILED"),
|
|
3054
3654
|
domain: ErrorDomain.STORAGE,
|
|
3055
3655
|
category: ErrorCategory.THIRD_PARTY,
|
|
3056
3656
|
details: {
|
|
3057
|
-
resourceId,
|
|
3657
|
+
...filter?.resourceId && { resourceId: filter.resourceId },
|
|
3658
|
+
hasMetadataFilter: !!filter?.metadata,
|
|
3058
3659
|
page
|
|
3059
3660
|
}
|
|
3060
3661
|
},
|
|
@@ -3143,17 +3744,18 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3143
3744
|
...metadata
|
|
3144
3745
|
};
|
|
3145
3746
|
try {
|
|
3747
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
3146
3748
|
const thread = await this.#db.client.one(
|
|
3147
3749
|
`UPDATE ${threadTableName}
|
|
3148
3750
|
SET
|
|
3149
3751
|
title = $1,
|
|
3150
3752
|
metadata = $2,
|
|
3151
3753
|
"updatedAt" = $3,
|
|
3152
|
-
"updatedAtZ" = $
|
|
3153
|
-
WHERE id = $
|
|
3754
|
+
"updatedAtZ" = $4
|
|
3755
|
+
WHERE id = $5
|
|
3154
3756
|
RETURNING *
|
|
3155
3757
|
`,
|
|
3156
|
-
[title, mergedMetadata,
|
|
3758
|
+
[title, mergedMetadata, now, now, id]
|
|
3157
3759
|
);
|
|
3158
3760
|
return {
|
|
3159
3761
|
id: thread.id,
|
|
@@ -3296,7 +3898,7 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3296
3898
|
const tableName = getTableName3({ indexName: TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
|
|
3297
3899
|
const query = `
|
|
3298
3900
|
${selectStatement} FROM ${tableName}
|
|
3299
|
-
WHERE id IN (${messageIds.
|
|
3901
|
+
WHERE id IN (${inPlaceholders(messageIds.length)})
|
|
3300
3902
|
ORDER BY "createdAt" DESC
|
|
3301
3903
|
`;
|
|
3302
3904
|
const resultRows = await this.#db.client.manyOrNone(query, messageIds);
|
|
@@ -3357,8 +3959,7 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3357
3959
|
const orderByStatement = `ORDER BY "${field}" ${direction}`;
|
|
3358
3960
|
const selectStatement = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"`;
|
|
3359
3961
|
const tableName = getTableName3({ indexName: TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
|
|
3360
|
-
const
|
|
3361
|
-
const conditions = [`thread_id IN (${threadPlaceholders})`];
|
|
3962
|
+
const conditions = [`thread_id IN (${inPlaceholders(threadIds.length)})`];
|
|
3362
3963
|
const queryParams = [...threadIds];
|
|
3363
3964
|
let paramIndex = threadIds.length + 1;
|
|
3364
3965
|
if (resourceId) {
|
|
@@ -3366,11 +3967,13 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3366
3967
|
queryParams.push(resourceId);
|
|
3367
3968
|
}
|
|
3368
3969
|
if (filter?.dateRange?.start) {
|
|
3369
|
-
|
|
3970
|
+
const startOp = filter.dateRange.startExclusive ? ">" : ">=";
|
|
3971
|
+
conditions.push(`"createdAt" ${startOp} $${paramIndex++}`);
|
|
3370
3972
|
queryParams.push(filter.dateRange.start);
|
|
3371
3973
|
}
|
|
3372
3974
|
if (filter?.dateRange?.end) {
|
|
3373
|
-
|
|
3975
|
+
const endOp = filter.dateRange.endExclusive ? "<" : "<=";
|
|
3976
|
+
conditions.push(`"createdAt" ${endOp} $${paramIndex++}`);
|
|
3374
3977
|
queryParams.push(filter.dateRange.end);
|
|
3375
3978
|
}
|
|
3376
3979
|
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
@@ -3515,14 +4118,15 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3515
4118
|
);
|
|
3516
4119
|
});
|
|
3517
4120
|
const threadTableName = getTableName3({ indexName: TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
|
|
4121
|
+
const nowStr = (/* @__PURE__ */ new Date()).toISOString();
|
|
3518
4122
|
const threadUpdate = t.none(
|
|
3519
4123
|
`UPDATE ${threadTableName}
|
|
3520
4124
|
SET
|
|
3521
4125
|
"updatedAt" = $1,
|
|
3522
|
-
"updatedAtZ" = $
|
|
3523
|
-
WHERE id = $
|
|
4126
|
+
"updatedAtZ" = $2
|
|
4127
|
+
WHERE id = $3
|
|
3524
4128
|
`,
|
|
3525
|
-
[
|
|
4129
|
+
[nowStr, nowStr, threadId]
|
|
3526
4130
|
);
|
|
3527
4131
|
await Promise.all([...messageInserts, threadUpdate]);
|
|
3528
4132
|
});
|
|
@@ -3559,8 +4163,8 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3559
4163
|
return [];
|
|
3560
4164
|
}
|
|
3561
4165
|
const messageIds = messages.map((m) => m.id);
|
|
3562
|
-
const selectQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId" FROM ${getTableName3({ indexName: TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) })} WHERE id IN ($
|
|
3563
|
-
const existingMessagesDb = await this.#db.client.manyOrNone(selectQuery,
|
|
4166
|
+
const selectQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId" FROM ${getTableName3({ indexName: TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) })} WHERE id IN (${inPlaceholders(messageIds.length)})`;
|
|
4167
|
+
const existingMessagesDb = await this.#db.client.manyOrNone(selectQuery, messageIds);
|
|
3564
4168
|
if (existingMessagesDb.length === 0) {
|
|
3565
4169
|
return [];
|
|
3566
4170
|
}
|
|
@@ -3621,10 +4225,11 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3621
4225
|
}
|
|
3622
4226
|
}
|
|
3623
4227
|
if (threadIdsToUpdate.size > 0) {
|
|
4228
|
+
const threadIds = Array.from(threadIdsToUpdate);
|
|
3624
4229
|
queries.push(
|
|
3625
4230
|
t.none(
|
|
3626
|
-
`UPDATE ${getTableName3({ indexName: TABLE_THREADS, schemaName: getSchemaName3(this.#schema) })} SET "updatedAt" = NOW(), "updatedAtZ" = NOW() WHERE id IN ($
|
|
3627
|
-
|
|
4231
|
+
`UPDATE ${getTableName3({ indexName: TABLE_THREADS, schemaName: getSchemaName3(this.#schema) })} SET "updatedAt" = NOW(), "updatedAtZ" = NOW() WHERE id IN (${inPlaceholders(threadIds.length)})`,
|
|
4232
|
+
threadIds
|
|
3628
4233
|
)
|
|
3629
4234
|
);
|
|
3630
4235
|
}
|
|
@@ -3632,7 +4237,7 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3632
4237
|
await t.batch(queries);
|
|
3633
4238
|
}
|
|
3634
4239
|
});
|
|
3635
|
-
const updatedMessages = await this.#db.client.manyOrNone(selectQuery,
|
|
4240
|
+
const updatedMessages = await this.#db.client.manyOrNone(selectQuery, messageIds);
|
|
3636
4241
|
return (updatedMessages || []).map((row) => {
|
|
3637
4242
|
const message = this.normalizeMessageRow(row);
|
|
3638
4243
|
if (typeof message.content === "string") {
|
|
@@ -3744,15 +4349,159 @@ var MemoryPG = class _MemoryPG extends MemoryStorage {
|
|
|
3744
4349
|
values.push(JSON.stringify(updatedResource.metadata));
|
|
3745
4350
|
paramIndex++;
|
|
3746
4351
|
}
|
|
3747
|
-
|
|
3748
|
-
|
|
4352
|
+
const updatedAtStr = updatedResource.updatedAt.toISOString();
|
|
4353
|
+
updates.push(`"updatedAt" = $${paramIndex++}`);
|
|
4354
|
+
values.push(updatedAtStr);
|
|
3749
4355
|
updates.push(`"updatedAtZ" = $${paramIndex++}`);
|
|
3750
|
-
values.push(
|
|
3751
|
-
paramIndex++;
|
|
4356
|
+
values.push(updatedAtStr);
|
|
3752
4357
|
values.push(resourceId);
|
|
3753
4358
|
await this.#db.client.none(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = $${paramIndex}`, values);
|
|
3754
4359
|
return updatedResource;
|
|
3755
4360
|
}
|
|
4361
|
+
async cloneThread(args) {
|
|
4362
|
+
const { sourceThreadId, newThreadId: providedThreadId, resourceId, title, metadata, options } = args;
|
|
4363
|
+
const sourceThread = await this.getThreadById({ threadId: sourceThreadId });
|
|
4364
|
+
if (!sourceThread) {
|
|
4365
|
+
throw new MastraError({
|
|
4366
|
+
id: createStorageErrorId("PG", "CLONE_THREAD", "SOURCE_NOT_FOUND"),
|
|
4367
|
+
domain: ErrorDomain.STORAGE,
|
|
4368
|
+
category: ErrorCategory.USER,
|
|
4369
|
+
text: `Source thread with id ${sourceThreadId} not found`,
|
|
4370
|
+
details: { sourceThreadId }
|
|
4371
|
+
});
|
|
4372
|
+
}
|
|
4373
|
+
const newThreadId = providedThreadId || crypto.randomUUID();
|
|
4374
|
+
const existingThread = await this.getThreadById({ threadId: newThreadId });
|
|
4375
|
+
if (existingThread) {
|
|
4376
|
+
throw new MastraError({
|
|
4377
|
+
id: createStorageErrorId("PG", "CLONE_THREAD", "THREAD_EXISTS"),
|
|
4378
|
+
domain: ErrorDomain.STORAGE,
|
|
4379
|
+
category: ErrorCategory.USER,
|
|
4380
|
+
text: `Thread with id ${newThreadId} already exists`,
|
|
4381
|
+
details: { newThreadId }
|
|
4382
|
+
});
|
|
4383
|
+
}
|
|
4384
|
+
const threadTableName = getTableName3({ indexName: TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
|
|
4385
|
+
const messageTableName = getTableName3({ indexName: TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
|
|
4386
|
+
try {
|
|
4387
|
+
return await this.#db.client.tx(async (t) => {
|
|
4388
|
+
let messageQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"
|
|
4389
|
+
FROM ${messageTableName} WHERE thread_id = $1`;
|
|
4390
|
+
const messageParams = [sourceThreadId];
|
|
4391
|
+
let paramIndex = 2;
|
|
4392
|
+
if (options?.messageFilter?.startDate) {
|
|
4393
|
+
messageQuery += ` AND "createdAt" >= $${paramIndex++}`;
|
|
4394
|
+
messageParams.push(options.messageFilter.startDate);
|
|
4395
|
+
}
|
|
4396
|
+
if (options?.messageFilter?.endDate) {
|
|
4397
|
+
messageQuery += ` AND "createdAt" <= $${paramIndex++}`;
|
|
4398
|
+
messageParams.push(options.messageFilter.endDate);
|
|
4399
|
+
}
|
|
4400
|
+
if (options?.messageFilter?.messageIds && options.messageFilter.messageIds.length > 0) {
|
|
4401
|
+
messageQuery += ` AND id IN (${options.messageFilter.messageIds.map(() => `$${paramIndex++}`).join(", ")})`;
|
|
4402
|
+
messageParams.push(...options.messageFilter.messageIds);
|
|
4403
|
+
}
|
|
4404
|
+
messageQuery += ` ORDER BY "createdAt" ASC`;
|
|
4405
|
+
if (options?.messageLimit && options.messageLimit > 0) {
|
|
4406
|
+
const limitQuery = `SELECT * FROM (${messageQuery.replace('ORDER BY "createdAt" ASC', 'ORDER BY "createdAt" DESC')} LIMIT $${paramIndex}) AS limited ORDER BY "createdAt" ASC`;
|
|
4407
|
+
messageParams.push(options.messageLimit);
|
|
4408
|
+
messageQuery = limitQuery;
|
|
4409
|
+
}
|
|
4410
|
+
const sourceMessages = await t.manyOrNone(messageQuery, messageParams);
|
|
4411
|
+
const now = /* @__PURE__ */ new Date();
|
|
4412
|
+
const lastMessageId = sourceMessages.length > 0 ? sourceMessages[sourceMessages.length - 1].id : void 0;
|
|
4413
|
+
const cloneMetadata = {
|
|
4414
|
+
sourceThreadId,
|
|
4415
|
+
clonedAt: now,
|
|
4416
|
+
...lastMessageId && { lastMessageId }
|
|
4417
|
+
};
|
|
4418
|
+
const newThread = {
|
|
4419
|
+
id: newThreadId,
|
|
4420
|
+
resourceId: resourceId || sourceThread.resourceId,
|
|
4421
|
+
title: title || (sourceThread.title ? `Clone of ${sourceThread.title}` : void 0),
|
|
4422
|
+
metadata: {
|
|
4423
|
+
...metadata,
|
|
4424
|
+
clone: cloneMetadata
|
|
4425
|
+
},
|
|
4426
|
+
createdAt: now,
|
|
4427
|
+
updatedAt: now
|
|
4428
|
+
};
|
|
4429
|
+
await t.none(
|
|
4430
|
+
`INSERT INTO ${threadTableName} (
|
|
4431
|
+
id,
|
|
4432
|
+
"resourceId",
|
|
4433
|
+
title,
|
|
4434
|
+
metadata,
|
|
4435
|
+
"createdAt",
|
|
4436
|
+
"createdAtZ",
|
|
4437
|
+
"updatedAt",
|
|
4438
|
+
"updatedAtZ"
|
|
4439
|
+
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
|
|
4440
|
+
[
|
|
4441
|
+
newThread.id,
|
|
4442
|
+
newThread.resourceId,
|
|
4443
|
+
newThread.title,
|
|
4444
|
+
newThread.metadata ? JSON.stringify(newThread.metadata) : null,
|
|
4445
|
+
now,
|
|
4446
|
+
now,
|
|
4447
|
+
now,
|
|
4448
|
+
now
|
|
4449
|
+
]
|
|
4450
|
+
);
|
|
4451
|
+
const clonedMessages = [];
|
|
4452
|
+
const targetResourceId = resourceId || sourceThread.resourceId;
|
|
4453
|
+
for (const sourceMsg of sourceMessages) {
|
|
4454
|
+
const newMessageId = crypto.randomUUID();
|
|
4455
|
+
const normalizedMsg = this.normalizeMessageRow(sourceMsg);
|
|
4456
|
+
let parsedContent = normalizedMsg.content;
|
|
4457
|
+
try {
|
|
4458
|
+
parsedContent = JSON.parse(normalizedMsg.content);
|
|
4459
|
+
} catch {
|
|
4460
|
+
}
|
|
4461
|
+
await t.none(
|
|
4462
|
+
`INSERT INTO ${messageTableName} (id, thread_id, content, "createdAt", "createdAtZ", role, type, "resourceId")
|
|
4463
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
|
|
4464
|
+
[
|
|
4465
|
+
newMessageId,
|
|
4466
|
+
newThreadId,
|
|
4467
|
+
typeof normalizedMsg.content === "string" ? normalizedMsg.content : JSON.stringify(normalizedMsg.content),
|
|
4468
|
+
normalizedMsg.createdAt,
|
|
4469
|
+
normalizedMsg.createdAt,
|
|
4470
|
+
normalizedMsg.role,
|
|
4471
|
+
normalizedMsg.type || "v2",
|
|
4472
|
+
targetResourceId
|
|
4473
|
+
]
|
|
4474
|
+
);
|
|
4475
|
+
clonedMessages.push({
|
|
4476
|
+
id: newMessageId,
|
|
4477
|
+
threadId: newThreadId,
|
|
4478
|
+
content: parsedContent,
|
|
4479
|
+
role: normalizedMsg.role,
|
|
4480
|
+
type: normalizedMsg.type,
|
|
4481
|
+
createdAt: new Date(normalizedMsg.createdAt),
|
|
4482
|
+
resourceId: targetResourceId
|
|
4483
|
+
});
|
|
4484
|
+
}
|
|
4485
|
+
return {
|
|
4486
|
+
thread: newThread,
|
|
4487
|
+
clonedMessages
|
|
4488
|
+
};
|
|
4489
|
+
});
|
|
4490
|
+
} catch (error) {
|
|
4491
|
+
if (error instanceof MastraError) {
|
|
4492
|
+
throw error;
|
|
4493
|
+
}
|
|
4494
|
+
throw new MastraError(
|
|
4495
|
+
{
|
|
4496
|
+
id: createStorageErrorId("PG", "CLONE_THREAD", "FAILED"),
|
|
4497
|
+
domain: ErrorDomain.STORAGE,
|
|
4498
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
4499
|
+
details: { sourceThreadId, newThreadId }
|
|
4500
|
+
},
|
|
4501
|
+
error
|
|
4502
|
+
);
|
|
4503
|
+
}
|
|
4504
|
+
}
|
|
3756
4505
|
};
|
|
3757
4506
|
var ObservabilityPG = class _ObservabilityPG extends ObservabilityStorage {
|
|
3758
4507
|
#db;
|
|
@@ -3870,6 +4619,22 @@ var ObservabilityPG = class _ObservabilityPG extends ObservabilityStorage {
|
|
|
3870
4619
|
}
|
|
3871
4620
|
}
|
|
3872
4621
|
}
|
|
4622
|
+
/**
|
|
4623
|
+
* Manually run the spans migration to deduplicate and add the unique constraint.
|
|
4624
|
+
* This is intended to be called from the CLI when duplicates are detected.
|
|
4625
|
+
*
|
|
4626
|
+
* @returns Migration result with status and details
|
|
4627
|
+
*/
|
|
4628
|
+
async migrateSpans() {
|
|
4629
|
+
return this.#db.migrateSpans();
|
|
4630
|
+
}
|
|
4631
|
+
/**
|
|
4632
|
+
* Check migration status for the spans table.
|
|
4633
|
+
* Returns information about whether migration is needed.
|
|
4634
|
+
*/
|
|
4635
|
+
async checkSpansMigrationStatus() {
|
|
4636
|
+
return this.#db.checkSpansMigrationStatus();
|
|
4637
|
+
}
|
|
3873
4638
|
async dangerouslyClearAll() {
|
|
3874
4639
|
await this.#db.clearTable({ tableName: TABLE_SPANS });
|
|
3875
4640
|
}
|
|
@@ -4382,6 +5147,11 @@ var ScoresPG = class _ScoresPG extends ScoresStorage {
|
|
|
4382
5147
|
}
|
|
4383
5148
|
async init() {
|
|
4384
5149
|
await this.#db.createTable({ tableName: TABLE_SCORERS, schema: TABLE_SCHEMAS[TABLE_SCORERS] });
|
|
5150
|
+
await this.#db.alterTable({
|
|
5151
|
+
tableName: TABLE_SCORERS,
|
|
5152
|
+
schema: TABLE_SCHEMAS[TABLE_SCORERS],
|
|
5153
|
+
ifNotExists: ["spanId", "requestContext"]
|
|
5154
|
+
});
|
|
4385
5155
|
await this.createDefaultIndexes();
|
|
4386
5156
|
await this.createCustomIndexes();
|
|
4387
5157
|
}
|
|
@@ -4733,23 +5503,8 @@ function getTableName5({ indexName, schemaName }) {
|
|
|
4733
5503
|
const quotedIndexName = `"${indexName}"`;
|
|
4734
5504
|
return schemaName ? `${schemaName}.${quotedIndexName}` : quotedIndexName;
|
|
4735
5505
|
}
|
|
4736
|
-
function
|
|
4737
|
-
|
|
4738
|
-
if (typeof parsedSnapshot === "string") {
|
|
4739
|
-
try {
|
|
4740
|
-
parsedSnapshot = JSON.parse(row.snapshot);
|
|
4741
|
-
} catch (e) {
|
|
4742
|
-
console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
|
|
4743
|
-
}
|
|
4744
|
-
}
|
|
4745
|
-
return {
|
|
4746
|
-
workflowName: row.workflow_name,
|
|
4747
|
-
runId: row.run_id,
|
|
4748
|
-
snapshot: parsedSnapshot,
|
|
4749
|
-
resourceId: row.resourceId,
|
|
4750
|
-
createdAt: new Date(row.createdAtZ || row.createdAt),
|
|
4751
|
-
updatedAt: new Date(row.updatedAtZ || row.updatedAt)
|
|
4752
|
-
};
|
|
5506
|
+
function sanitizeJsonForPg(jsonString) {
|
|
5507
|
+
return jsonString.replace(/\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})/g, "");
|
|
4753
5508
|
}
|
|
4754
5509
|
var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
|
|
4755
5510
|
#db;
|
|
@@ -4766,6 +5521,24 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
|
|
|
4766
5521
|
this.#skipDefaultIndexes = skipDefaultIndexes;
|
|
4767
5522
|
this.#indexes = indexes?.filter((idx) => _WorkflowsPG.MANAGED_TABLES.includes(idx.table));
|
|
4768
5523
|
}
|
|
5524
|
+
parseWorkflowRun(row) {
|
|
5525
|
+
let parsedSnapshot = row.snapshot;
|
|
5526
|
+
if (typeof parsedSnapshot === "string") {
|
|
5527
|
+
try {
|
|
5528
|
+
parsedSnapshot = JSON.parse(row.snapshot);
|
|
5529
|
+
} catch (e) {
|
|
5530
|
+
this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
|
|
5531
|
+
}
|
|
5532
|
+
}
|
|
5533
|
+
return {
|
|
5534
|
+
workflowName: row.workflow_name,
|
|
5535
|
+
runId: row.run_id,
|
|
5536
|
+
snapshot: parsedSnapshot,
|
|
5537
|
+
resourceId: row.resourceId,
|
|
5538
|
+
createdAt: new Date(row.createdAtZ || row.createdAt),
|
|
5539
|
+
updatedAt: new Date(row.updatedAtZ || row.updatedAt)
|
|
5540
|
+
};
|
|
5541
|
+
}
|
|
4769
5542
|
/**
|
|
4770
5543
|
* Returns default index definitions for the workflows domain tables.
|
|
4771
5544
|
* Currently no default indexes are defined for workflows.
|
|
@@ -4838,12 +5611,13 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
|
|
|
4838
5611
|
const now = /* @__PURE__ */ new Date();
|
|
4839
5612
|
const createdAtValue = createdAt ? createdAt : now;
|
|
4840
5613
|
const updatedAtValue = updatedAt ? updatedAt : now;
|
|
5614
|
+
const sanitizedSnapshot = sanitizeJsonForPg(JSON.stringify(snapshot));
|
|
4841
5615
|
await this.#db.client.none(
|
|
4842
5616
|
`INSERT INTO ${getTableName5({ indexName: TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName5(this.#schema) })} (workflow_name, run_id, "resourceId", snapshot, "createdAt", "updatedAt")
|
|
4843
5617
|
VALUES ($1, $2, $3, $4, $5, $6)
|
|
4844
5618
|
ON CONFLICT (workflow_name, run_id) DO UPDATE
|
|
4845
5619
|
SET "resourceId" = $3, snapshot = $4, "updatedAt" = $6`,
|
|
4846
|
-
[workflowName, runId, resourceId,
|
|
5620
|
+
[workflowName, runId, resourceId, sanitizedSnapshot, createdAtValue, updatedAtValue]
|
|
4847
5621
|
);
|
|
4848
5622
|
} catch (error) {
|
|
4849
5623
|
throw new MastraError(
|
|
@@ -4906,7 +5680,7 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
|
|
|
4906
5680
|
if (!result) {
|
|
4907
5681
|
return null;
|
|
4908
5682
|
}
|
|
4909
|
-
return parseWorkflowRun(result);
|
|
5683
|
+
return this.parseWorkflowRun(result);
|
|
4910
5684
|
} catch (error) {
|
|
4911
5685
|
throw new MastraError(
|
|
4912
5686
|
{
|
|
@@ -4962,7 +5736,9 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
|
|
|
4962
5736
|
paramIndex++;
|
|
4963
5737
|
}
|
|
4964
5738
|
if (status) {
|
|
4965
|
-
conditions.push(
|
|
5739
|
+
conditions.push(
|
|
5740
|
+
`regexp_replace(snapshot::text, '\\\\u(0000|[Dd][89A-Fa-f][0-9A-Fa-f]{2})', '', 'g')::jsonb ->> 'status' = $${paramIndex}`
|
|
5741
|
+
);
|
|
4966
5742
|
values.push(status);
|
|
4967
5743
|
paramIndex++;
|
|
4968
5744
|
}
|
|
@@ -5007,7 +5783,7 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
|
|
|
5007
5783
|
const queryValues = usePagination ? [...values, normalizedPerPage, offset] : values;
|
|
5008
5784
|
const result = await this.#db.client.manyOrNone(query, queryValues);
|
|
5009
5785
|
const runs = (result || []).map((row) => {
|
|
5010
|
-
return parseWorkflowRun(row);
|
|
5786
|
+
return this.parseWorkflowRun(row);
|
|
5011
5787
|
});
|
|
5012
5788
|
return { runs, total: total || runs.length };
|
|
5013
5789
|
} catch (error) {
|
|
@@ -5027,9 +5803,12 @@ var WorkflowsPG = class _WorkflowsPG extends WorkflowsStorage {
|
|
|
5027
5803
|
};
|
|
5028
5804
|
|
|
5029
5805
|
// src/storage/index.ts
|
|
5030
|
-
var
|
|
5806
|
+
var DEFAULT_MAX_CONNECTIONS = 20;
|
|
5807
|
+
var DEFAULT_IDLE_TIMEOUT_MS = 3e4;
|
|
5808
|
+
var PostgresStore = class extends MastraCompositeStore {
|
|
5809
|
+
#pool;
|
|
5031
5810
|
#db;
|
|
5032
|
-
#
|
|
5811
|
+
#ownsPool;
|
|
5033
5812
|
schema;
|
|
5034
5813
|
isInitialized = false;
|
|
5035
5814
|
stores;
|
|
@@ -5037,60 +5816,27 @@ var PostgresStore = class extends MastraStorage {
|
|
|
5037
5816
|
try {
|
|
5038
5817
|
validateConfig("PostgresStore", config);
|
|
5039
5818
|
super({ id: config.id, name: "PostgresStore", disableInit: config.disableInit });
|
|
5040
|
-
this.schema = config.schemaName || "public";
|
|
5041
|
-
|
|
5042
|
-
|
|
5043
|
-
this.#
|
|
5819
|
+
this.schema = parseSqlIdentifier(config.schemaName || "public", "schema name");
|
|
5820
|
+
if (isPoolConfig(config)) {
|
|
5821
|
+
this.#pool = config.pool;
|
|
5822
|
+
this.#ownsPool = false;
|
|
5044
5823
|
} else {
|
|
5045
|
-
|
|
5046
|
-
|
|
5047
|
-
|
|
5048
|
-
|
|
5049
|
-
|
|
5050
|
-
|
|
5051
|
-
|
|
5052
|
-
|
|
5053
|
-
|
|
5054
|
-
|
|
5055
|
-
pgConfig = {
|
|
5056
|
-
...config,
|
|
5057
|
-
id: config.id,
|
|
5058
|
-
max: config.max,
|
|
5059
|
-
idleTimeoutMillis: config.idleTimeoutMillis
|
|
5060
|
-
};
|
|
5061
|
-
} else if (isHostConfig(config)) {
|
|
5062
|
-
pgConfig = {
|
|
5063
|
-
id: config.id,
|
|
5064
|
-
host: config.host,
|
|
5065
|
-
port: config.port,
|
|
5066
|
-
database: config.database,
|
|
5067
|
-
user: config.user,
|
|
5068
|
-
password: config.password,
|
|
5069
|
-
ssl: config.ssl,
|
|
5070
|
-
max: config.max,
|
|
5071
|
-
idleTimeoutMillis: config.idleTimeoutMillis
|
|
5072
|
-
};
|
|
5073
|
-
} else {
|
|
5074
|
-
throw new Error(
|
|
5075
|
-
"PostgresStore: invalid config. Provide either {client}, {connectionString}, {host,port,database,user,password}, or a pg ClientConfig (e.g., Cloud SQL connector with `stream`)."
|
|
5076
|
-
);
|
|
5077
|
-
}
|
|
5078
|
-
this.#db = this.#pgp(pgConfig);
|
|
5079
|
-
}
|
|
5080
|
-
const skipDefaultIndexes = config.skipDefaultIndexes;
|
|
5081
|
-
const indexes = config.indexes;
|
|
5082
|
-
const domainConfig = { client: this.#db, schemaName: this.schema, skipDefaultIndexes, indexes };
|
|
5083
|
-
const scores = new ScoresPG(domainConfig);
|
|
5084
|
-
const workflows = new WorkflowsPG(domainConfig);
|
|
5085
|
-
const memory = new MemoryPG(domainConfig);
|
|
5086
|
-
const observability = new ObservabilityPG(domainConfig);
|
|
5087
|
-
const agents = new AgentsPG(domainConfig);
|
|
5824
|
+
this.#pool = this.createPool(config);
|
|
5825
|
+
this.#ownsPool = true;
|
|
5826
|
+
}
|
|
5827
|
+
this.#db = new PoolAdapter(this.#pool);
|
|
5828
|
+
const domainConfig = {
|
|
5829
|
+
client: this.#db,
|
|
5830
|
+
schemaName: this.schema,
|
|
5831
|
+
skipDefaultIndexes: config.skipDefaultIndexes,
|
|
5832
|
+
indexes: config.indexes
|
|
5833
|
+
};
|
|
5088
5834
|
this.stores = {
|
|
5089
|
-
scores,
|
|
5090
|
-
workflows,
|
|
5091
|
-
memory,
|
|
5092
|
-
observability,
|
|
5093
|
-
agents
|
|
5835
|
+
scores: new ScoresPG(domainConfig),
|
|
5836
|
+
workflows: new WorkflowsPG(domainConfig),
|
|
5837
|
+
memory: new MemoryPG(domainConfig),
|
|
5838
|
+
observability: new ObservabilityPG(domainConfig),
|
|
5839
|
+
agents: new AgentsPG(domainConfig)
|
|
5094
5840
|
};
|
|
5095
5841
|
} catch (e) {
|
|
5096
5842
|
throw new MastraError(
|
|
@@ -5103,6 +5849,32 @@ var PostgresStore = class extends MastraStorage {
|
|
|
5103
5849
|
);
|
|
5104
5850
|
}
|
|
5105
5851
|
}
|
|
5852
|
+
createPool(config) {
|
|
5853
|
+
if (isConnectionStringConfig(config)) {
|
|
5854
|
+
return new Pool({
|
|
5855
|
+
connectionString: config.connectionString,
|
|
5856
|
+
ssl: config.ssl,
|
|
5857
|
+
max: config.max ?? DEFAULT_MAX_CONNECTIONS,
|
|
5858
|
+
idleTimeoutMillis: config.idleTimeoutMillis ?? DEFAULT_IDLE_TIMEOUT_MS
|
|
5859
|
+
});
|
|
5860
|
+
}
|
|
5861
|
+
if (isHostConfig(config)) {
|
|
5862
|
+
return new Pool({
|
|
5863
|
+
host: config.host,
|
|
5864
|
+
port: config.port,
|
|
5865
|
+
database: config.database,
|
|
5866
|
+
user: config.user,
|
|
5867
|
+
password: config.password,
|
|
5868
|
+
ssl: config.ssl,
|
|
5869
|
+
max: config.max ?? DEFAULT_MAX_CONNECTIONS,
|
|
5870
|
+
idleTimeoutMillis: config.idleTimeoutMillis ?? DEFAULT_IDLE_TIMEOUT_MS
|
|
5871
|
+
});
|
|
5872
|
+
}
|
|
5873
|
+
if (isCloudSqlConfig(config)) {
|
|
5874
|
+
return new Pool(config);
|
|
5875
|
+
}
|
|
5876
|
+
throw new Error("PostgresStore: invalid config");
|
|
5877
|
+
}
|
|
5106
5878
|
async init() {
|
|
5107
5879
|
if (this.isInitialized) {
|
|
5108
5880
|
return;
|
|
@@ -5112,6 +5884,9 @@ var PostgresStore = class extends MastraStorage {
|
|
|
5112
5884
|
await super.init();
|
|
5113
5885
|
} catch (error) {
|
|
5114
5886
|
this.isInitialized = false;
|
|
5887
|
+
if (error instanceof MastraError) {
|
|
5888
|
+
throw error;
|
|
5889
|
+
}
|
|
5115
5890
|
throw new MastraError(
|
|
5116
5891
|
{
|
|
5117
5892
|
id: createStorageErrorId("PG", "INIT", "FAILED"),
|
|
@@ -5122,32 +5897,32 @@ var PostgresStore = class extends MastraStorage {
|
|
|
5122
5897
|
);
|
|
5123
5898
|
}
|
|
5124
5899
|
}
|
|
5900
|
+
/**
|
|
5901
|
+
* Database client for executing queries.
|
|
5902
|
+
*
|
|
5903
|
+
* @example
|
|
5904
|
+
* ```typescript
|
|
5905
|
+
* const rows = await store.db.any('SELECT * FROM users WHERE active = $1', [true]);
|
|
5906
|
+
* const user = await store.db.one('SELECT * FROM users WHERE id = $1', [userId]);
|
|
5907
|
+
* ```
|
|
5908
|
+
*/
|
|
5125
5909
|
get db() {
|
|
5126
5910
|
return this.#db;
|
|
5127
5911
|
}
|
|
5128
|
-
|
|
5129
|
-
|
|
5130
|
-
|
|
5131
|
-
get
|
|
5132
|
-
return
|
|
5133
|
-
selectByIncludeResourceScope: true,
|
|
5134
|
-
resourceWorkingMemory: true,
|
|
5135
|
-
hasColumn: true,
|
|
5136
|
-
createTable: true,
|
|
5137
|
-
deleteMessages: true,
|
|
5138
|
-
observability: true,
|
|
5139
|
-
indexManagement: true,
|
|
5140
|
-
listScoresBySpan: true,
|
|
5141
|
-
agents: true
|
|
5142
|
-
};
|
|
5912
|
+
/**
|
|
5913
|
+
* The underlying pg.Pool for direct database access or ORM integration.
|
|
5914
|
+
*/
|
|
5915
|
+
get pool() {
|
|
5916
|
+
return this.#pool;
|
|
5143
5917
|
}
|
|
5144
5918
|
/**
|
|
5145
|
-
* Closes the
|
|
5146
|
-
*
|
|
5147
|
-
* This will close ALL connections in the pool, including pre-configured clients.
|
|
5919
|
+
* Closes the connection pool if it was created by this store.
|
|
5920
|
+
* If a pool was passed in via config, it will not be closed.
|
|
5148
5921
|
*/
|
|
5149
5922
|
async close() {
|
|
5150
|
-
this
|
|
5923
|
+
if (this.#ownsPool) {
|
|
5924
|
+
await this.#pool.end();
|
|
5925
|
+
}
|
|
5151
5926
|
}
|
|
5152
5927
|
};
|
|
5153
5928
|
|
|
@@ -5250,6 +6025,6 @@ Example Complex Query:
|
|
|
5250
6025
|
]
|
|
5251
6026
|
}`;
|
|
5252
6027
|
|
|
5253
|
-
export { PGVECTOR_PROMPT, PgVector, PostgresStore };
|
|
6028
|
+
export { AgentsPG, MemoryPG, ObservabilityPG, PGVECTOR_PROMPT, PgVector, PoolAdapter, PostgresStore, ScoresPG, WorkflowsPG, exportSchemas };
|
|
5254
6029
|
//# sourceMappingURL=index.js.map
|
|
5255
6030
|
//# sourceMappingURL=index.js.map
|