@mastra/mongodb 1.0.0-beta.9 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +1018 -0
- package/README.md +18 -0
- package/dist/docs/README.md +34 -0
- package/dist/docs/SKILL.md +35 -0
- package/dist/docs/SOURCE_MAP.json +6 -0
- package/dist/docs/memory/01-working-memory.md +390 -0
- package/dist/docs/rag/01-vector-databases.md +643 -0
- package/dist/docs/rag/02-retrieval.md +548 -0
- package/dist/docs/storage/01-reference.md +243 -0
- package/dist/docs/vectors/01-reference.md +201 -0
- package/dist/index.cjs +305 -46
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +307 -48
- package/dist/index.js.map +1 -1
- package/dist/storage/db/index.d.ts.map +1 -1
- package/dist/storage/domains/memory/index.d.ts +2 -2
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +46 -0
- package/dist/storage/domains/observability/index.d.ts.map +1 -1
- package/dist/storage/index.d.ts +2 -2
- package/dist/storage/index.d.ts.map +1 -1
- package/dist/storage/types.d.ts +14 -2
- package/dist/storage/types.d.ts.map +1 -1
- package/dist/vector/index.d.ts +17 -6
- package/dist/vector/index.d.ts.map +1 -1
- package/package.json +11 -10
package/dist/index.cjs
CHANGED
|
@@ -13,7 +13,7 @@ var evals = require('@mastra/core/evals');
|
|
|
13
13
|
|
|
14
14
|
// package.json
|
|
15
15
|
var package_default = {
|
|
16
|
-
version: "1.0.0
|
|
16
|
+
version: "1.0.0"};
|
|
17
17
|
var MongoDBFilterTranslator = class extends filter.BaseFilterTranslator {
|
|
18
18
|
getSupportedOperators() {
|
|
19
19
|
return {
|
|
@@ -107,7 +107,7 @@ var MongoDBVector = class extends vector.MastraVector {
|
|
|
107
107
|
client;
|
|
108
108
|
db;
|
|
109
109
|
collections;
|
|
110
|
-
embeddingFieldName
|
|
110
|
+
embeddingFieldName;
|
|
111
111
|
metadataFieldName = "metadata";
|
|
112
112
|
documentFieldName = "document";
|
|
113
113
|
collectionForValidation = null;
|
|
@@ -116,8 +116,11 @@ var MongoDBVector = class extends vector.MastraVector {
|
|
|
116
116
|
euclidean: "euclidean",
|
|
117
117
|
dotproduct: "dotProduct"
|
|
118
118
|
};
|
|
119
|
-
constructor({ id, uri, dbName, options }) {
|
|
119
|
+
constructor({ id, uri, dbName, options, embeddingFieldPath }) {
|
|
120
120
|
super({ id });
|
|
121
|
+
if (!uri) {
|
|
122
|
+
throw new Error('MongoDBVector requires a connection string. Provide "uri" in the constructor options.');
|
|
123
|
+
}
|
|
121
124
|
const client = new mongodb.MongoClient(uri, {
|
|
122
125
|
...options,
|
|
123
126
|
driverInfo: {
|
|
@@ -128,6 +131,7 @@ var MongoDBVector = class extends vector.MastraVector {
|
|
|
128
131
|
this.client = client;
|
|
129
132
|
this.db = this.client.db(dbName);
|
|
130
133
|
this.collections = /* @__PURE__ */ new Map();
|
|
134
|
+
this.embeddingFieldName = embeddingFieldPath ?? "embedding";
|
|
131
135
|
}
|
|
132
136
|
// Public methods
|
|
133
137
|
async connect() {
|
|
@@ -276,6 +280,8 @@ var MongoDBVector = class extends vector.MastraVector {
|
|
|
276
280
|
throw new Error(`Index "${indexNameInternal}" did not become ready within timeout`);
|
|
277
281
|
}
|
|
278
282
|
async upsert({ indexName, vectors, metadata, ids, documents }) {
|
|
283
|
+
vector.validateUpsertInput("MONGODB", vectors, metadata, ids);
|
|
284
|
+
vector.validateVectorValues("MONGODB", vectors);
|
|
279
285
|
try {
|
|
280
286
|
const collection = await this.getCollection(indexName);
|
|
281
287
|
this.collectionForValidation = collection;
|
|
@@ -351,8 +357,8 @@ var MongoDBVector = class extends vector.MastraVector {
|
|
|
351
357
|
index: indexNameInternal,
|
|
352
358
|
queryVector,
|
|
353
359
|
path: this.embeddingFieldName,
|
|
354
|
-
numCandidates: 100,
|
|
355
|
-
limit: topK
|
|
360
|
+
numCandidates: Math.min(1e4, Math.max(100, topK)),
|
|
361
|
+
limit: Math.min(1e4, topK)
|
|
356
362
|
};
|
|
357
363
|
if (Object.keys(combinedFilter).length > 0) {
|
|
358
364
|
const filterWithExclusion = {
|
|
@@ -859,11 +865,21 @@ function resolveMongoDBConfig(config) {
|
|
|
859
865
|
);
|
|
860
866
|
}
|
|
861
867
|
}
|
|
868
|
+
const connectionString = config.uri ?? config.url;
|
|
869
|
+
if (!connectionString) {
|
|
870
|
+
throw new error.MastraError({
|
|
871
|
+
id: storage.createStorageErrorId("MONGODB", "CONSTRUCTOR", "MISSING_URI"),
|
|
872
|
+
domain: error.ErrorDomain.STORAGE,
|
|
873
|
+
category: error.ErrorCategory.USER,
|
|
874
|
+
details: { dbName: config?.dbName },
|
|
875
|
+
text: 'MongoDBStore requires a connection string. Provide "uri" (recommended) or "url" in the constructor options.'
|
|
876
|
+
});
|
|
877
|
+
}
|
|
862
878
|
try {
|
|
863
879
|
return MongoDBConnector.fromDatabaseConfig({
|
|
864
880
|
id: "id" in config ? config.id : "domain",
|
|
865
881
|
options: config.options,
|
|
866
|
-
url:
|
|
882
|
+
url: connectionString,
|
|
867
883
|
dbName: config.dbName
|
|
868
884
|
});
|
|
869
885
|
} catch (error$1) {
|
|
@@ -872,7 +888,7 @@ function resolveMongoDBConfig(config) {
|
|
|
872
888
|
id: storage.createStorageErrorId("MONGODB", "CONSTRUCTOR", "FAILED"),
|
|
873
889
|
domain: error.ErrorDomain.STORAGE,
|
|
874
890
|
category: error.ErrorCategory.USER,
|
|
875
|
-
details: { url: config?.url, dbName: config?.dbName }
|
|
891
|
+
details: { uri: config?.uri ?? "", url: config?.url ?? "", dbName: config?.dbName ?? "" }
|
|
876
892
|
},
|
|
877
893
|
error$1
|
|
878
894
|
);
|
|
@@ -1337,10 +1353,12 @@ var MemoryStorageMongoDB = class _MemoryStorageMongoDB extends storage.MemorySto
|
|
|
1337
1353
|
query.resourceId = resourceId;
|
|
1338
1354
|
}
|
|
1339
1355
|
if (filter?.dateRange?.start) {
|
|
1340
|
-
|
|
1356
|
+
const startOp = filter.dateRange.startExclusive ? "$gt" : "$gte";
|
|
1357
|
+
query.createdAt = { ...query.createdAt, [startOp]: formatDateForMongoDB(filter.dateRange.start) };
|
|
1341
1358
|
}
|
|
1342
1359
|
if (filter?.dateRange?.end) {
|
|
1343
|
-
|
|
1360
|
+
const endOp = filter.dateRange.endExclusive ? "$lt" : "$lte";
|
|
1361
|
+
query.createdAt = { ...query.createdAt, [endOp]: formatDateForMongoDB(filter.dateRange.end) };
|
|
1344
1362
|
}
|
|
1345
1363
|
const total = await collection.countDocuments(query);
|
|
1346
1364
|
const messages = [];
|
|
@@ -1671,25 +1689,48 @@ var MemoryStorageMongoDB = class _MemoryStorageMongoDB extends storage.MemorySto
|
|
|
1671
1689
|
);
|
|
1672
1690
|
}
|
|
1673
1691
|
}
|
|
1674
|
-
async
|
|
1692
|
+
async listThreads(args) {
|
|
1693
|
+
const { page = 0, perPage: perPageInput, orderBy, filter } = args;
|
|
1694
|
+
try {
|
|
1695
|
+
this.validatePaginationInput(page, perPageInput ?? 100);
|
|
1696
|
+
} catch (error$1) {
|
|
1697
|
+
throw new error.MastraError(
|
|
1698
|
+
{
|
|
1699
|
+
id: storage.createStorageErrorId("MONGODB", "LIST_THREADS", "INVALID_PAGE"),
|
|
1700
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1701
|
+
category: error.ErrorCategory.USER,
|
|
1702
|
+
details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
|
|
1703
|
+
},
|
|
1704
|
+
error$1 instanceof Error ? error$1 : new Error("Invalid pagination parameters")
|
|
1705
|
+
);
|
|
1706
|
+
}
|
|
1707
|
+
const perPage = storage.normalizePerPage(perPageInput, 100);
|
|
1708
|
+
try {
|
|
1709
|
+
this.validateMetadataKeys(filter?.metadata);
|
|
1710
|
+
} catch (error$1) {
|
|
1711
|
+
throw new error.MastraError(
|
|
1712
|
+
{
|
|
1713
|
+
id: storage.createStorageErrorId("MONGODB", "LIST_THREADS", "INVALID_METADATA_KEY"),
|
|
1714
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1715
|
+
category: error.ErrorCategory.USER,
|
|
1716
|
+
details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
|
|
1717
|
+
},
|
|
1718
|
+
error$1 instanceof Error ? error$1 : new Error("Invalid metadata key")
|
|
1719
|
+
);
|
|
1720
|
+
}
|
|
1675
1721
|
try {
|
|
1676
|
-
const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
|
|
1677
|
-
if (page < 0) {
|
|
1678
|
-
throw new error.MastraError(
|
|
1679
|
-
{
|
|
1680
|
-
id: storage.createStorageErrorId("MONGODB", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
|
|
1681
|
-
domain: error.ErrorDomain.STORAGE,
|
|
1682
|
-
category: error.ErrorCategory.USER,
|
|
1683
|
-
details: { page }
|
|
1684
|
-
},
|
|
1685
|
-
new Error("page must be >= 0")
|
|
1686
|
-
);
|
|
1687
|
-
}
|
|
1688
|
-
const perPage = storage.normalizePerPage(perPageInput, 100);
|
|
1689
1722
|
const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
1690
1723
|
const { field, direction } = this.parseOrderBy(orderBy);
|
|
1691
1724
|
const collection = await this.getCollection(storage.TABLE_THREADS);
|
|
1692
|
-
const query = {
|
|
1725
|
+
const query = {};
|
|
1726
|
+
if (filter?.resourceId) {
|
|
1727
|
+
query.resourceId = filter.resourceId;
|
|
1728
|
+
}
|
|
1729
|
+
if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
|
|
1730
|
+
for (const [key, value] of Object.entries(filter.metadata)) {
|
|
1731
|
+
query[`metadata.${key}`] = value;
|
|
1732
|
+
}
|
|
1733
|
+
}
|
|
1693
1734
|
const total = await collection.countDocuments(query);
|
|
1694
1735
|
if (perPage === 0) {
|
|
1695
1736
|
return {
|
|
@@ -1723,10 +1764,13 @@ var MemoryStorageMongoDB = class _MemoryStorageMongoDB extends storage.MemorySto
|
|
|
1723
1764
|
} catch (error$1) {
|
|
1724
1765
|
throw new error.MastraError(
|
|
1725
1766
|
{
|
|
1726
|
-
id: storage.createStorageErrorId("MONGODB", "
|
|
1767
|
+
id: storage.createStorageErrorId("MONGODB", "LIST_THREADS", "FAILED"),
|
|
1727
1768
|
domain: error.ErrorDomain.STORAGE,
|
|
1728
1769
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1729
|
-
details: {
|
|
1770
|
+
details: {
|
|
1771
|
+
...filter?.resourceId && { resourceId: filter.resourceId },
|
|
1772
|
+
hasMetadataFilter: !!filter?.metadata
|
|
1773
|
+
}
|
|
1730
1774
|
},
|
|
1731
1775
|
error$1
|
|
1732
1776
|
);
|
|
@@ -1908,8 +1952,220 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends storage.Observabi
|
|
|
1908
1952
|
}
|
|
1909
1953
|
}
|
|
1910
1954
|
async init() {
|
|
1955
|
+
const uniqueIndexExists = await this.spansUniqueIndexExists();
|
|
1956
|
+
if (!uniqueIndexExists) {
|
|
1957
|
+
const duplicateInfo = await this.checkForDuplicateSpans();
|
|
1958
|
+
if (duplicateInfo.hasDuplicates) {
|
|
1959
|
+
const errorMessage = `
|
|
1960
|
+
===========================================================================
|
|
1961
|
+
MIGRATION REQUIRED: Duplicate spans detected in ${storage.TABLE_SPANS} collection
|
|
1962
|
+
===========================================================================
|
|
1963
|
+
|
|
1964
|
+
Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations.
|
|
1965
|
+
|
|
1966
|
+
The spans collection requires a unique index on (traceId, spanId), but your
|
|
1967
|
+
database contains duplicate entries that must be resolved first.
|
|
1968
|
+
|
|
1969
|
+
To fix this, run the manual migration command:
|
|
1970
|
+
|
|
1971
|
+
npx mastra migrate
|
|
1972
|
+
|
|
1973
|
+
This command will:
|
|
1974
|
+
1. Remove duplicate spans (keeping the most complete/recent version)
|
|
1975
|
+
2. Add the required unique index
|
|
1976
|
+
|
|
1977
|
+
Note: This migration may take some time for large collections.
|
|
1978
|
+
===========================================================================
|
|
1979
|
+
`;
|
|
1980
|
+
throw new error.MastraError({
|
|
1981
|
+
id: storage.createStorageErrorId("MONGODB", "MIGRATION_REQUIRED", "DUPLICATE_SPANS"),
|
|
1982
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1983
|
+
category: error.ErrorCategory.USER,
|
|
1984
|
+
text: errorMessage
|
|
1985
|
+
});
|
|
1986
|
+
}
|
|
1987
|
+
}
|
|
1988
|
+
await this.createDefaultIndexes();
|
|
1989
|
+
await this.createCustomIndexes();
|
|
1990
|
+
}
|
|
1991
|
+
/**
|
|
1992
|
+
* Checks if the unique index on (spanId, traceId) already exists on the spans collection.
|
|
1993
|
+
* Used to skip deduplication when the index already exists (migration already complete).
|
|
1994
|
+
*/
|
|
1995
|
+
async spansUniqueIndexExists() {
|
|
1996
|
+
try {
|
|
1997
|
+
const collection = await this.getCollection(storage.TABLE_SPANS);
|
|
1998
|
+
const indexes = await collection.indexes();
|
|
1999
|
+
return indexes.some((idx) => idx.unique === true && idx.key?.spanId === 1 && idx.key?.traceId === 1);
|
|
2000
|
+
} catch {
|
|
2001
|
+
return false;
|
|
2002
|
+
}
|
|
2003
|
+
}
|
|
2004
|
+
/**
|
|
2005
|
+
* Checks for duplicate (traceId, spanId) combinations in the spans collection.
|
|
2006
|
+
* Returns information about duplicates for logging/CLI purposes.
|
|
2007
|
+
*/
|
|
2008
|
+
async checkForDuplicateSpans() {
|
|
2009
|
+
try {
|
|
2010
|
+
const collection = await this.getCollection(storage.TABLE_SPANS);
|
|
2011
|
+
const result = await collection.aggregate([
|
|
2012
|
+
{
|
|
2013
|
+
$group: {
|
|
2014
|
+
_id: { traceId: "$traceId", spanId: "$spanId" },
|
|
2015
|
+
count: { $sum: 1 }
|
|
2016
|
+
}
|
|
2017
|
+
},
|
|
2018
|
+
{ $match: { count: { $gt: 1 } } },
|
|
2019
|
+
{ $count: "duplicateCount" }
|
|
2020
|
+
]).toArray();
|
|
2021
|
+
const duplicateCount = result[0]?.duplicateCount ?? 0;
|
|
2022
|
+
return {
|
|
2023
|
+
hasDuplicates: duplicateCount > 0,
|
|
2024
|
+
duplicateCount
|
|
2025
|
+
};
|
|
2026
|
+
} catch (error) {
|
|
2027
|
+
this.logger?.debug?.(`Could not check for duplicates: ${error}`);
|
|
2028
|
+
return { hasDuplicates: false, duplicateCount: 0 };
|
|
2029
|
+
}
|
|
2030
|
+
}
|
|
2031
|
+
/**
|
|
2032
|
+
* Manually run the spans migration to deduplicate and add the unique index.
|
|
2033
|
+
* This is intended to be called from the CLI when duplicates are detected.
|
|
2034
|
+
*
|
|
2035
|
+
* @returns Migration result with status and details
|
|
2036
|
+
*/
|
|
2037
|
+
async migrateSpans() {
|
|
2038
|
+
const indexExists = await this.spansUniqueIndexExists();
|
|
2039
|
+
if (indexExists) {
|
|
2040
|
+
return {
|
|
2041
|
+
success: true,
|
|
2042
|
+
alreadyMigrated: true,
|
|
2043
|
+
duplicatesRemoved: 0,
|
|
2044
|
+
message: `Migration already complete. Unique index exists on ${storage.TABLE_SPANS} collection.`
|
|
2045
|
+
};
|
|
2046
|
+
}
|
|
2047
|
+
const duplicateInfo = await this.checkForDuplicateSpans();
|
|
2048
|
+
if (duplicateInfo.hasDuplicates) {
|
|
2049
|
+
this.logger?.info?.(
|
|
2050
|
+
`Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations. Starting deduplication...`
|
|
2051
|
+
);
|
|
2052
|
+
await this.deduplicateSpans();
|
|
2053
|
+
} else {
|
|
2054
|
+
this.logger?.info?.(`No duplicate spans found.`);
|
|
2055
|
+
}
|
|
1911
2056
|
await this.createDefaultIndexes();
|
|
1912
2057
|
await this.createCustomIndexes();
|
|
2058
|
+
return {
|
|
2059
|
+
success: true,
|
|
2060
|
+
alreadyMigrated: false,
|
|
2061
|
+
duplicatesRemoved: duplicateInfo.duplicateCount,
|
|
2062
|
+
message: duplicateInfo.hasDuplicates ? `Migration complete. Removed duplicates and added unique index to ${storage.TABLE_SPANS} collection.` : `Migration complete. Added unique index to ${storage.TABLE_SPANS} collection.`
|
|
2063
|
+
};
|
|
2064
|
+
}
|
|
2065
|
+
/**
|
|
2066
|
+
* Check migration status for the spans collection.
|
|
2067
|
+
* Returns information about whether migration is needed.
|
|
2068
|
+
*/
|
|
2069
|
+
async checkSpansMigrationStatus() {
|
|
2070
|
+
const indexExists = await this.spansUniqueIndexExists();
|
|
2071
|
+
if (indexExists) {
|
|
2072
|
+
return {
|
|
2073
|
+
needsMigration: false,
|
|
2074
|
+
hasDuplicates: false,
|
|
2075
|
+
duplicateCount: 0,
|
|
2076
|
+
constraintExists: true,
|
|
2077
|
+
tableName: storage.TABLE_SPANS
|
|
2078
|
+
};
|
|
2079
|
+
}
|
|
2080
|
+
const duplicateInfo = await this.checkForDuplicateSpans();
|
|
2081
|
+
return {
|
|
2082
|
+
needsMigration: true,
|
|
2083
|
+
hasDuplicates: duplicateInfo.hasDuplicates,
|
|
2084
|
+
duplicateCount: duplicateInfo.duplicateCount,
|
|
2085
|
+
constraintExists: false,
|
|
2086
|
+
tableName: storage.TABLE_SPANS
|
|
2087
|
+
};
|
|
2088
|
+
}
|
|
2089
|
+
/**
|
|
2090
|
+
* Deduplicates spans with the same (traceId, spanId) combination.
|
|
2091
|
+
* This is needed for databases that existed before the unique constraint was added.
|
|
2092
|
+
*
|
|
2093
|
+
* Priority for keeping spans:
|
|
2094
|
+
* 1. Completed spans (endedAt IS NOT NULL) over incomplete spans
|
|
2095
|
+
* 2. Most recent updatedAt
|
|
2096
|
+
* 3. Most recent createdAt (as tiebreaker)
|
|
2097
|
+
*
|
|
2098
|
+
* Note: This prioritizes migration completion over perfect data preservation.
|
|
2099
|
+
* Old trace data may be lost, which is acceptable for this use case.
|
|
2100
|
+
*/
|
|
2101
|
+
async deduplicateSpans() {
|
|
2102
|
+
try {
|
|
2103
|
+
const collection = await this.getCollection(storage.TABLE_SPANS);
|
|
2104
|
+
const duplicateCheck = await collection.aggregate([
|
|
2105
|
+
{
|
|
2106
|
+
$group: {
|
|
2107
|
+
_id: { traceId: "$traceId", spanId: "$spanId" },
|
|
2108
|
+
count: { $sum: 1 }
|
|
2109
|
+
}
|
|
2110
|
+
},
|
|
2111
|
+
{ $match: { count: { $gt: 1 } } },
|
|
2112
|
+
{ $limit: 1 }
|
|
2113
|
+
]).toArray();
|
|
2114
|
+
if (duplicateCheck.length === 0) {
|
|
2115
|
+
this.logger?.debug?.("No duplicate spans found");
|
|
2116
|
+
return;
|
|
2117
|
+
}
|
|
2118
|
+
this.logger?.info?.("Duplicate spans detected, starting deduplication...");
|
|
2119
|
+
const idsToDelete = await collection.aggregate([
|
|
2120
|
+
// Sort by priority (affects which document $first picks within each group)
|
|
2121
|
+
{
|
|
2122
|
+
$sort: {
|
|
2123
|
+
// Completed spans first (endedAt exists and is not null)
|
|
2124
|
+
endedAt: -1,
|
|
2125
|
+
updatedAt: -1,
|
|
2126
|
+
createdAt: -1
|
|
2127
|
+
}
|
|
2128
|
+
},
|
|
2129
|
+
// Group by (traceId, spanId), keeping the first (best) _id and all _ids
|
|
2130
|
+
{
|
|
2131
|
+
$group: {
|
|
2132
|
+
_id: { traceId: "$traceId", spanId: "$spanId" },
|
|
2133
|
+
keepId: { $first: "$_id" },
|
|
2134
|
+
// The best one to keep (after sort)
|
|
2135
|
+
allIds: { $push: "$_id" },
|
|
2136
|
+
// All ObjectIds (just 12 bytes each, not full docs)
|
|
2137
|
+
count: { $sum: 1 }
|
|
2138
|
+
}
|
|
2139
|
+
},
|
|
2140
|
+
// Only consider groups with duplicates
|
|
2141
|
+
{ $match: { count: { $gt: 1 } } },
|
|
2142
|
+
// Get IDs to delete (allIds minus keepId)
|
|
2143
|
+
{
|
|
2144
|
+
$project: {
|
|
2145
|
+
idsToDelete: {
|
|
2146
|
+
$filter: {
|
|
2147
|
+
input: "$allIds",
|
|
2148
|
+
cond: { $ne: ["$$this", "$keepId"] }
|
|
2149
|
+
}
|
|
2150
|
+
}
|
|
2151
|
+
}
|
|
2152
|
+
},
|
|
2153
|
+
// Unwind to get flat list of IDs
|
|
2154
|
+
{ $unwind: "$idsToDelete" },
|
|
2155
|
+
// Just output the ID
|
|
2156
|
+
{ $project: { _id: "$idsToDelete" } }
|
|
2157
|
+
]).toArray();
|
|
2158
|
+
if (idsToDelete.length === 0) {
|
|
2159
|
+
this.logger?.debug?.("No duplicates to delete after aggregation");
|
|
2160
|
+
return;
|
|
2161
|
+
}
|
|
2162
|
+
const deleteResult = await collection.deleteMany({
|
|
2163
|
+
_id: { $in: idsToDelete.map((d) => d._id) }
|
|
2164
|
+
});
|
|
2165
|
+
this.logger?.info?.(`Deduplication complete: removed ${deleteResult.deletedCount} duplicate spans`);
|
|
2166
|
+
} catch (error) {
|
|
2167
|
+
this.logger?.warn?.("Failed to deduplicate spans:", error);
|
|
2168
|
+
}
|
|
1913
2169
|
}
|
|
1914
2170
|
async dangerouslyClearAll() {
|
|
1915
2171
|
const collection = await this.getCollection(storage.TABLE_SPANS);
|
|
@@ -2191,7 +2447,7 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends storage.Observabi
|
|
|
2191
2447
|
// No children with errors
|
|
2192
2448
|
}
|
|
2193
2449
|
];
|
|
2194
|
-
const countResult = await collection.aggregate([...pipeline, { $count: "total" }]).toArray();
|
|
2450
|
+
const countResult = await collection.aggregate([...pipeline, { $count: "total" }], { allowDiskUse: true }).toArray();
|
|
2195
2451
|
const count2 = countResult[0]?.total || 0;
|
|
2196
2452
|
if (count2 === 0) {
|
|
2197
2453
|
return {
|
|
@@ -2228,7 +2484,7 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends storage.Observabi
|
|
|
2228
2484
|
{ $project: { _errorSpans: 0 } }
|
|
2229
2485
|
];
|
|
2230
2486
|
}
|
|
2231
|
-
const spans2 = await collection.aggregate(aggregationPipeline).toArray();
|
|
2487
|
+
const spans2 = await collection.aggregate(aggregationPipeline, { allowDiskUse: true }).toArray();
|
|
2232
2488
|
return {
|
|
2233
2489
|
pagination: {
|
|
2234
2490
|
total: count2,
|
|
@@ -2254,18 +2510,21 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends storage.Observabi
|
|
|
2254
2510
|
let spans;
|
|
2255
2511
|
if (sortField === "endedAt") {
|
|
2256
2512
|
const nullSortValue = sortDirection === -1 ? 0 : 1;
|
|
2257
|
-
spans = await collection.aggregate(
|
|
2258
|
-
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2513
|
+
spans = await collection.aggregate(
|
|
2514
|
+
[
|
|
2515
|
+
{ $match: mongoFilter },
|
|
2516
|
+
{
|
|
2517
|
+
$addFields: {
|
|
2518
|
+
_endedAtNull: { $cond: [{ $eq: ["$endedAt", null] }, nullSortValue, sortDirection === -1 ? 1 : 0] }
|
|
2519
|
+
}
|
|
2520
|
+
},
|
|
2521
|
+
{ $sort: { _endedAtNull: 1, [sortField]: sortDirection } },
|
|
2522
|
+
{ $skip: page * perPage },
|
|
2523
|
+
{ $limit: perPage },
|
|
2524
|
+
{ $project: { _endedAtNull: 0 } }
|
|
2525
|
+
],
|
|
2526
|
+
{ allowDiskUse: true }
|
|
2527
|
+
).toArray();
|
|
2269
2528
|
} else {
|
|
2270
2529
|
spans = await collection.find(mongoFilter).sort({ [sortField]: sortDirection }).skip(page * perPage).limit(perPage).toArray();
|
|
2271
2530
|
}
|
|
@@ -2581,7 +2840,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends storage.ScoresSto
|
|
|
2581
2840
|
};
|
|
2582
2841
|
}
|
|
2583
2842
|
const end = perPageInput === false ? total : start + perPage;
|
|
2584
|
-
let cursor = collection.find(query).sort({ createdAt:
|
|
2843
|
+
let cursor = collection.find(query).sort({ createdAt: -1 }).skip(start);
|
|
2585
2844
|
if (perPageInput !== false) {
|
|
2586
2845
|
cursor = cursor.limit(perPage);
|
|
2587
2846
|
}
|
|
@@ -2630,7 +2889,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends storage.ScoresSto
|
|
|
2630
2889
|
};
|
|
2631
2890
|
}
|
|
2632
2891
|
const end = perPageInput === false ? total : start + perPage;
|
|
2633
|
-
let cursor = collection.find({ runId }).sort({ createdAt:
|
|
2892
|
+
let cursor = collection.find({ runId }).sort({ createdAt: -1 }).skip(start);
|
|
2634
2893
|
if (perPageInput !== false) {
|
|
2635
2894
|
cursor = cursor.limit(perPage);
|
|
2636
2895
|
}
|
|
@@ -2680,7 +2939,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends storage.ScoresSto
|
|
|
2680
2939
|
};
|
|
2681
2940
|
}
|
|
2682
2941
|
const end = perPageInput === false ? total : start + perPage;
|
|
2683
|
-
let cursor = collection.find({ entityId, entityType }).sort({ createdAt:
|
|
2942
|
+
let cursor = collection.find({ entityId, entityType }).sort({ createdAt: -1 }).skip(start);
|
|
2684
2943
|
if (perPageInput !== false) {
|
|
2685
2944
|
cursor = cursor.limit(perPage);
|
|
2686
2945
|
}
|
|
@@ -2731,7 +2990,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends storage.ScoresSto
|
|
|
2731
2990
|
};
|
|
2732
2991
|
}
|
|
2733
2992
|
const end = perPageInput === false ? total : start + perPage;
|
|
2734
|
-
let cursor = collection.find(query).sort({ createdAt:
|
|
2993
|
+
let cursor = collection.find(query).sort({ createdAt: -1 }).skip(start);
|
|
2735
2994
|
if (perPageInput !== false) {
|
|
2736
2995
|
cursor = cursor.limit(perPage);
|
|
2737
2996
|
}
|
|
@@ -3024,7 +3283,7 @@ var WorkflowsStorageMongoDB = class _WorkflowsStorageMongoDB extends storage.Wor
|
|
|
3024
3283
|
try {
|
|
3025
3284
|
parsedSnapshot = typeof row.snapshot === "string" ? storage.safelyParseJSON(row.snapshot) : row.snapshot;
|
|
3026
3285
|
} catch (e) {
|
|
3027
|
-
|
|
3286
|
+
this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
|
|
3028
3287
|
}
|
|
3029
3288
|
}
|
|
3030
3289
|
return {
|
|
@@ -3039,7 +3298,7 @@ var WorkflowsStorageMongoDB = class _WorkflowsStorageMongoDB extends storage.Wor
|
|
|
3039
3298
|
};
|
|
3040
3299
|
|
|
3041
3300
|
// src/storage/index.ts
|
|
3042
|
-
var MongoDBStore = class extends storage.
|
|
3301
|
+
var MongoDBStore = class extends storage.MastraCompositeStore {
|
|
3043
3302
|
#connector;
|
|
3044
3303
|
stores;
|
|
3045
3304
|
constructor(config) {
|