@mastra/mongodb 1.0.0-beta.9 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import { MastraError, ErrorCategory, ErrorDomain } from '@mastra/core/error';
2
- import { createVectorErrorId, AgentsStorage, TABLE_AGENTS, createStorageErrorId, normalizePerPage, calculatePagination, MemoryStorage, TABLE_THREADS, TABLE_MESSAGES, TABLE_RESOURCES, safelyParseJSON, ObservabilityStorage, TABLE_SPANS, listTracesArgsSchema, ScoresStorage, TABLE_SCORERS, WorkflowsStorage, TABLE_WORKFLOW_SNAPSHOT, MastraStorage, TraceStatus, transformScoreRow as transformScoreRow$1 } from '@mastra/core/storage';
3
- import { MastraVector } from '@mastra/core/vector';
2
+ import { createVectorErrorId, AgentsStorage, TABLE_AGENTS, createStorageErrorId, normalizePerPage, calculatePagination, MemoryStorage, TABLE_THREADS, TABLE_MESSAGES, TABLE_RESOURCES, safelyParseJSON, ObservabilityStorage, TABLE_SPANS, listTracesArgsSchema, ScoresStorage, TABLE_SCORERS, WorkflowsStorage, TABLE_WORKFLOW_SNAPSHOT, MastraCompositeStore, TraceStatus, transformScoreRow as transformScoreRow$1 } from '@mastra/core/storage';
3
+ import { MastraVector, validateUpsertInput, validateVectorValues } from '@mastra/core/vector';
4
4
  import { MongoClient } from 'mongodb';
5
5
  import { v4 } from 'uuid';
6
6
  import { BaseFilterTranslator } from '@mastra/core/vector/filter';
@@ -11,7 +11,7 @@ import { saveScorePayloadSchema } from '@mastra/core/evals';
11
11
 
12
12
  // package.json
13
13
  var package_default = {
14
- version: "1.0.0-beta.9"};
14
+ version: "1.0.0"};
15
15
  var MongoDBFilterTranslator = class extends BaseFilterTranslator {
16
16
  getSupportedOperators() {
17
17
  return {
@@ -105,7 +105,7 @@ var MongoDBVector = class extends MastraVector {
105
105
  client;
106
106
  db;
107
107
  collections;
108
- embeddingFieldName = "embedding";
108
+ embeddingFieldName;
109
109
  metadataFieldName = "metadata";
110
110
  documentFieldName = "document";
111
111
  collectionForValidation = null;
@@ -114,8 +114,11 @@ var MongoDBVector = class extends MastraVector {
114
114
  euclidean: "euclidean",
115
115
  dotproduct: "dotProduct"
116
116
  };
117
- constructor({ id, uri, dbName, options }) {
117
+ constructor({ id, uri, dbName, options, embeddingFieldPath }) {
118
118
  super({ id });
119
+ if (!uri) {
120
+ throw new Error('MongoDBVector requires a connection string. Provide "uri" in the constructor options.');
121
+ }
119
122
  const client = new MongoClient(uri, {
120
123
  ...options,
121
124
  driverInfo: {
@@ -126,6 +129,7 @@ var MongoDBVector = class extends MastraVector {
126
129
  this.client = client;
127
130
  this.db = this.client.db(dbName);
128
131
  this.collections = /* @__PURE__ */ new Map();
132
+ this.embeddingFieldName = embeddingFieldPath ?? "embedding";
129
133
  }
130
134
  // Public methods
131
135
  async connect() {
@@ -274,6 +278,8 @@ var MongoDBVector = class extends MastraVector {
274
278
  throw new Error(`Index "${indexNameInternal}" did not become ready within timeout`);
275
279
  }
276
280
  async upsert({ indexName, vectors, metadata, ids, documents }) {
281
+ validateUpsertInput("MONGODB", vectors, metadata, ids);
282
+ validateVectorValues("MONGODB", vectors);
277
283
  try {
278
284
  const collection = await this.getCollection(indexName);
279
285
  this.collectionForValidation = collection;
@@ -349,8 +355,8 @@ var MongoDBVector = class extends MastraVector {
349
355
  index: indexNameInternal,
350
356
  queryVector,
351
357
  path: this.embeddingFieldName,
352
- numCandidates: 100,
353
- limit: topK
358
+ numCandidates: Math.min(1e4, Math.max(100, topK)),
359
+ limit: Math.min(1e4, topK)
354
360
  };
355
361
  if (Object.keys(combinedFilter).length > 0) {
356
362
  const filterWithExclusion = {
@@ -857,11 +863,21 @@ function resolveMongoDBConfig(config) {
857
863
  );
858
864
  }
859
865
  }
866
+ const connectionString = config.uri ?? config.url;
867
+ if (!connectionString) {
868
+ throw new MastraError({
869
+ id: createStorageErrorId("MONGODB", "CONSTRUCTOR", "MISSING_URI"),
870
+ domain: ErrorDomain.STORAGE,
871
+ category: ErrorCategory.USER,
872
+ details: { dbName: config?.dbName },
873
+ text: 'MongoDBStore requires a connection string. Provide "uri" (recommended) or "url" in the constructor options.'
874
+ });
875
+ }
860
876
  try {
861
877
  return MongoDBConnector.fromDatabaseConfig({
862
878
  id: "id" in config ? config.id : "domain",
863
879
  options: config.options,
864
- url: config.url,
880
+ url: connectionString,
865
881
  dbName: config.dbName
866
882
  });
867
883
  } catch (error) {
@@ -870,7 +886,7 @@ function resolveMongoDBConfig(config) {
870
886
  id: createStorageErrorId("MONGODB", "CONSTRUCTOR", "FAILED"),
871
887
  domain: ErrorDomain.STORAGE,
872
888
  category: ErrorCategory.USER,
873
- details: { url: config?.url, dbName: config?.dbName }
889
+ details: { uri: config?.uri ?? "", url: config?.url ?? "", dbName: config?.dbName ?? "" }
874
890
  },
875
891
  error
876
892
  );
@@ -1335,10 +1351,12 @@ var MemoryStorageMongoDB = class _MemoryStorageMongoDB extends MemoryStorage {
1335
1351
  query.resourceId = resourceId;
1336
1352
  }
1337
1353
  if (filter?.dateRange?.start) {
1338
- query.createdAt = { ...query.createdAt, $gte: filter.dateRange.start };
1354
+ const startOp = filter.dateRange.startExclusive ? "$gt" : "$gte";
1355
+ query.createdAt = { ...query.createdAt, [startOp]: formatDateForMongoDB(filter.dateRange.start) };
1339
1356
  }
1340
1357
  if (filter?.dateRange?.end) {
1341
- query.createdAt = { ...query.createdAt, $lte: filter.dateRange.end };
1358
+ const endOp = filter.dateRange.endExclusive ? "$lt" : "$lte";
1359
+ query.createdAt = { ...query.createdAt, [endOp]: formatDateForMongoDB(filter.dateRange.end) };
1342
1360
  }
1343
1361
  const total = await collection.countDocuments(query);
1344
1362
  const messages = [];
@@ -1669,25 +1687,48 @@ var MemoryStorageMongoDB = class _MemoryStorageMongoDB extends MemoryStorage {
1669
1687
  );
1670
1688
  }
1671
1689
  }
1672
- async listThreadsByResourceId(args) {
1690
+ async listThreads(args) {
1691
+ const { page = 0, perPage: perPageInput, orderBy, filter } = args;
1692
+ try {
1693
+ this.validatePaginationInput(page, perPageInput ?? 100);
1694
+ } catch (error) {
1695
+ throw new MastraError(
1696
+ {
1697
+ id: createStorageErrorId("MONGODB", "LIST_THREADS", "INVALID_PAGE"),
1698
+ domain: ErrorDomain.STORAGE,
1699
+ category: ErrorCategory.USER,
1700
+ details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
1701
+ },
1702
+ error instanceof Error ? error : new Error("Invalid pagination parameters")
1703
+ );
1704
+ }
1705
+ const perPage = normalizePerPage(perPageInput, 100);
1706
+ try {
1707
+ this.validateMetadataKeys(filter?.metadata);
1708
+ } catch (error) {
1709
+ throw new MastraError(
1710
+ {
1711
+ id: createStorageErrorId("MONGODB", "LIST_THREADS", "INVALID_METADATA_KEY"),
1712
+ domain: ErrorDomain.STORAGE,
1713
+ category: ErrorCategory.USER,
1714
+ details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
1715
+ },
1716
+ error instanceof Error ? error : new Error("Invalid metadata key")
1717
+ );
1718
+ }
1673
1719
  try {
1674
- const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
1675
- if (page < 0) {
1676
- throw new MastraError(
1677
- {
1678
- id: createStorageErrorId("MONGODB", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
1679
- domain: ErrorDomain.STORAGE,
1680
- category: ErrorCategory.USER,
1681
- details: { page }
1682
- },
1683
- new Error("page must be >= 0")
1684
- );
1685
- }
1686
- const perPage = normalizePerPage(perPageInput, 100);
1687
1720
  const { offset, perPage: perPageForResponse } = calculatePagination(page, perPageInput, perPage);
1688
1721
  const { field, direction } = this.parseOrderBy(orderBy);
1689
1722
  const collection = await this.getCollection(TABLE_THREADS);
1690
- const query = { resourceId };
1723
+ const query = {};
1724
+ if (filter?.resourceId) {
1725
+ query.resourceId = filter.resourceId;
1726
+ }
1727
+ if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
1728
+ for (const [key, value] of Object.entries(filter.metadata)) {
1729
+ query[`metadata.${key}`] = value;
1730
+ }
1731
+ }
1691
1732
  const total = await collection.countDocuments(query);
1692
1733
  if (perPage === 0) {
1693
1734
  return {
@@ -1721,10 +1762,13 @@ var MemoryStorageMongoDB = class _MemoryStorageMongoDB extends MemoryStorage {
1721
1762
  } catch (error) {
1722
1763
  throw new MastraError(
1723
1764
  {
1724
- id: createStorageErrorId("MONGODB", "LIST_THREADS_BY_RESOURCE_ID", "FAILED"),
1765
+ id: createStorageErrorId("MONGODB", "LIST_THREADS", "FAILED"),
1725
1766
  domain: ErrorDomain.STORAGE,
1726
1767
  category: ErrorCategory.THIRD_PARTY,
1727
- details: { resourceId: args.resourceId }
1768
+ details: {
1769
+ ...filter?.resourceId && { resourceId: filter.resourceId },
1770
+ hasMetadataFilter: !!filter?.metadata
1771
+ }
1728
1772
  },
1729
1773
  error
1730
1774
  );
@@ -1906,8 +1950,220 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends ObservabilityStor
1906
1950
  }
1907
1951
  }
1908
1952
  async init() {
1953
+ const uniqueIndexExists = await this.spansUniqueIndexExists();
1954
+ if (!uniqueIndexExists) {
1955
+ const duplicateInfo = await this.checkForDuplicateSpans();
1956
+ if (duplicateInfo.hasDuplicates) {
1957
+ const errorMessage = `
1958
+ ===========================================================================
1959
+ MIGRATION REQUIRED: Duplicate spans detected in ${TABLE_SPANS} collection
1960
+ ===========================================================================
1961
+
1962
+ Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations.
1963
+
1964
+ The spans collection requires a unique index on (traceId, spanId), but your
1965
+ database contains duplicate entries that must be resolved first.
1966
+
1967
+ To fix this, run the manual migration command:
1968
+
1969
+ npx mastra migrate
1970
+
1971
+ This command will:
1972
+ 1. Remove duplicate spans (keeping the most complete/recent version)
1973
+ 2. Add the required unique index
1974
+
1975
+ Note: This migration may take some time for large collections.
1976
+ ===========================================================================
1977
+ `;
1978
+ throw new MastraError({
1979
+ id: createStorageErrorId("MONGODB", "MIGRATION_REQUIRED", "DUPLICATE_SPANS"),
1980
+ domain: ErrorDomain.STORAGE,
1981
+ category: ErrorCategory.USER,
1982
+ text: errorMessage
1983
+ });
1984
+ }
1985
+ }
1986
+ await this.createDefaultIndexes();
1987
+ await this.createCustomIndexes();
1988
+ }
1989
+ /**
1990
+ * Checks if the unique index on (spanId, traceId) already exists on the spans collection.
1991
+ * Used to skip deduplication when the index already exists (migration already complete).
1992
+ */
1993
+ async spansUniqueIndexExists() {
1994
+ try {
1995
+ const collection = await this.getCollection(TABLE_SPANS);
1996
+ const indexes = await collection.indexes();
1997
+ return indexes.some((idx) => idx.unique === true && idx.key?.spanId === 1 && idx.key?.traceId === 1);
1998
+ } catch {
1999
+ return false;
2000
+ }
2001
+ }
2002
+ /**
2003
+ * Checks for duplicate (traceId, spanId) combinations in the spans collection.
2004
+ * Returns information about duplicates for logging/CLI purposes.
2005
+ */
2006
+ async checkForDuplicateSpans() {
2007
+ try {
2008
+ const collection = await this.getCollection(TABLE_SPANS);
2009
+ const result = await collection.aggregate([
2010
+ {
2011
+ $group: {
2012
+ _id: { traceId: "$traceId", spanId: "$spanId" },
2013
+ count: { $sum: 1 }
2014
+ }
2015
+ },
2016
+ { $match: { count: { $gt: 1 } } },
2017
+ { $count: "duplicateCount" }
2018
+ ]).toArray();
2019
+ const duplicateCount = result[0]?.duplicateCount ?? 0;
2020
+ return {
2021
+ hasDuplicates: duplicateCount > 0,
2022
+ duplicateCount
2023
+ };
2024
+ } catch (error) {
2025
+ this.logger?.debug?.(`Could not check for duplicates: ${error}`);
2026
+ return { hasDuplicates: false, duplicateCount: 0 };
2027
+ }
2028
+ }
2029
+ /**
2030
+ * Manually run the spans migration to deduplicate and add the unique index.
2031
+ * This is intended to be called from the CLI when duplicates are detected.
2032
+ *
2033
+ * @returns Migration result with status and details
2034
+ */
2035
+ async migrateSpans() {
2036
+ const indexExists = await this.spansUniqueIndexExists();
2037
+ if (indexExists) {
2038
+ return {
2039
+ success: true,
2040
+ alreadyMigrated: true,
2041
+ duplicatesRemoved: 0,
2042
+ message: `Migration already complete. Unique index exists on ${TABLE_SPANS} collection.`
2043
+ };
2044
+ }
2045
+ const duplicateInfo = await this.checkForDuplicateSpans();
2046
+ if (duplicateInfo.hasDuplicates) {
2047
+ this.logger?.info?.(
2048
+ `Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations. Starting deduplication...`
2049
+ );
2050
+ await this.deduplicateSpans();
2051
+ } else {
2052
+ this.logger?.info?.(`No duplicate spans found.`);
2053
+ }
1909
2054
  await this.createDefaultIndexes();
1910
2055
  await this.createCustomIndexes();
2056
+ return {
2057
+ success: true,
2058
+ alreadyMigrated: false,
2059
+ duplicatesRemoved: duplicateInfo.duplicateCount,
2060
+ message: duplicateInfo.hasDuplicates ? `Migration complete. Removed duplicates and added unique index to ${TABLE_SPANS} collection.` : `Migration complete. Added unique index to ${TABLE_SPANS} collection.`
2061
+ };
2062
+ }
2063
+ /**
2064
+ * Check migration status for the spans collection.
2065
+ * Returns information about whether migration is needed.
2066
+ */
2067
+ async checkSpansMigrationStatus() {
2068
+ const indexExists = await this.spansUniqueIndexExists();
2069
+ if (indexExists) {
2070
+ return {
2071
+ needsMigration: false,
2072
+ hasDuplicates: false,
2073
+ duplicateCount: 0,
2074
+ constraintExists: true,
2075
+ tableName: TABLE_SPANS
2076
+ };
2077
+ }
2078
+ const duplicateInfo = await this.checkForDuplicateSpans();
2079
+ return {
2080
+ needsMigration: true,
2081
+ hasDuplicates: duplicateInfo.hasDuplicates,
2082
+ duplicateCount: duplicateInfo.duplicateCount,
2083
+ constraintExists: false,
2084
+ tableName: TABLE_SPANS
2085
+ };
2086
+ }
2087
+ /**
2088
+ * Deduplicates spans with the same (traceId, spanId) combination.
2089
+ * This is needed for databases that existed before the unique constraint was added.
2090
+ *
2091
+ * Priority for keeping spans:
2092
+ * 1. Completed spans (endedAt IS NOT NULL) over incomplete spans
2093
+ * 2. Most recent updatedAt
2094
+ * 3. Most recent createdAt (as tiebreaker)
2095
+ *
2096
+ * Note: This prioritizes migration completion over perfect data preservation.
2097
+ * Old trace data may be lost, which is acceptable for this use case.
2098
+ */
2099
+ async deduplicateSpans() {
2100
+ try {
2101
+ const collection = await this.getCollection(TABLE_SPANS);
2102
+ const duplicateCheck = await collection.aggregate([
2103
+ {
2104
+ $group: {
2105
+ _id: { traceId: "$traceId", spanId: "$spanId" },
2106
+ count: { $sum: 1 }
2107
+ }
2108
+ },
2109
+ { $match: { count: { $gt: 1 } } },
2110
+ { $limit: 1 }
2111
+ ]).toArray();
2112
+ if (duplicateCheck.length === 0) {
2113
+ this.logger?.debug?.("No duplicate spans found");
2114
+ return;
2115
+ }
2116
+ this.logger?.info?.("Duplicate spans detected, starting deduplication...");
2117
+ const idsToDelete = await collection.aggregate([
2118
+ // Sort by priority (affects which document $first picks within each group)
2119
+ {
2120
+ $sort: {
2121
+ // Completed spans first (endedAt exists and is not null)
2122
+ endedAt: -1,
2123
+ updatedAt: -1,
2124
+ createdAt: -1
2125
+ }
2126
+ },
2127
+ // Group by (traceId, spanId), keeping the first (best) _id and all _ids
2128
+ {
2129
+ $group: {
2130
+ _id: { traceId: "$traceId", spanId: "$spanId" },
2131
+ keepId: { $first: "$_id" },
2132
+ // The best one to keep (after sort)
2133
+ allIds: { $push: "$_id" },
2134
+ // All ObjectIds (just 12 bytes each, not full docs)
2135
+ count: { $sum: 1 }
2136
+ }
2137
+ },
2138
+ // Only consider groups with duplicates
2139
+ { $match: { count: { $gt: 1 } } },
2140
+ // Get IDs to delete (allIds minus keepId)
2141
+ {
2142
+ $project: {
2143
+ idsToDelete: {
2144
+ $filter: {
2145
+ input: "$allIds",
2146
+ cond: { $ne: ["$$this", "$keepId"] }
2147
+ }
2148
+ }
2149
+ }
2150
+ },
2151
+ // Unwind to get flat list of IDs
2152
+ { $unwind: "$idsToDelete" },
2153
+ // Just output the ID
2154
+ { $project: { _id: "$idsToDelete" } }
2155
+ ]).toArray();
2156
+ if (idsToDelete.length === 0) {
2157
+ this.logger?.debug?.("No duplicates to delete after aggregation");
2158
+ return;
2159
+ }
2160
+ const deleteResult = await collection.deleteMany({
2161
+ _id: { $in: idsToDelete.map((d) => d._id) }
2162
+ });
2163
+ this.logger?.info?.(`Deduplication complete: removed ${deleteResult.deletedCount} duplicate spans`);
2164
+ } catch (error) {
2165
+ this.logger?.warn?.("Failed to deduplicate spans:", error);
2166
+ }
1911
2167
  }
1912
2168
  async dangerouslyClearAll() {
1913
2169
  const collection = await this.getCollection(TABLE_SPANS);
@@ -2189,7 +2445,7 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends ObservabilityStor
2189
2445
  // No children with errors
2190
2446
  }
2191
2447
  ];
2192
- const countResult = await collection.aggregate([...pipeline, { $count: "total" }]).toArray();
2448
+ const countResult = await collection.aggregate([...pipeline, { $count: "total" }], { allowDiskUse: true }).toArray();
2193
2449
  const count2 = countResult[0]?.total || 0;
2194
2450
  if (count2 === 0) {
2195
2451
  return {
@@ -2226,7 +2482,7 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends ObservabilityStor
2226
2482
  { $project: { _errorSpans: 0 } }
2227
2483
  ];
2228
2484
  }
2229
- const spans2 = await collection.aggregate(aggregationPipeline).toArray();
2485
+ const spans2 = await collection.aggregate(aggregationPipeline, { allowDiskUse: true }).toArray();
2230
2486
  return {
2231
2487
  pagination: {
2232
2488
  total: count2,
@@ -2252,18 +2508,21 @@ var ObservabilityMongoDB = class _ObservabilityMongoDB extends ObservabilityStor
2252
2508
  let spans;
2253
2509
  if (sortField === "endedAt") {
2254
2510
  const nullSortValue = sortDirection === -1 ? 0 : 1;
2255
- spans = await collection.aggregate([
2256
- { $match: mongoFilter },
2257
- {
2258
- $addFields: {
2259
- _endedAtNull: { $cond: [{ $eq: ["$endedAt", null] }, nullSortValue, sortDirection === -1 ? 1 : 0] }
2260
- }
2261
- },
2262
- { $sort: { _endedAtNull: 1, [sortField]: sortDirection } },
2263
- { $skip: page * perPage },
2264
- { $limit: perPage },
2265
- { $project: { _endedAtNull: 0 } }
2266
- ]).toArray();
2511
+ spans = await collection.aggregate(
2512
+ [
2513
+ { $match: mongoFilter },
2514
+ {
2515
+ $addFields: {
2516
+ _endedAtNull: { $cond: [{ $eq: ["$endedAt", null] }, nullSortValue, sortDirection === -1 ? 1 : 0] }
2517
+ }
2518
+ },
2519
+ { $sort: { _endedAtNull: 1, [sortField]: sortDirection } },
2520
+ { $skip: page * perPage },
2521
+ { $limit: perPage },
2522
+ { $project: { _endedAtNull: 0 } }
2523
+ ],
2524
+ { allowDiskUse: true }
2525
+ ).toArray();
2267
2526
  } else {
2268
2527
  spans = await collection.find(mongoFilter).sort({ [sortField]: sortDirection }).skip(page * perPage).limit(perPage).toArray();
2269
2528
  }
@@ -2579,7 +2838,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends ScoresStorage {
2579
2838
  };
2580
2839
  }
2581
2840
  const end = perPageInput === false ? total : start + perPage;
2582
- let cursor = collection.find(query).sort({ createdAt: "desc" }).skip(start);
2841
+ let cursor = collection.find(query).sort({ createdAt: -1 }).skip(start);
2583
2842
  if (perPageInput !== false) {
2584
2843
  cursor = cursor.limit(perPage);
2585
2844
  }
@@ -2628,7 +2887,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends ScoresStorage {
2628
2887
  };
2629
2888
  }
2630
2889
  const end = perPageInput === false ? total : start + perPage;
2631
- let cursor = collection.find({ runId }).sort({ createdAt: "desc" }).skip(start);
2890
+ let cursor = collection.find({ runId }).sort({ createdAt: -1 }).skip(start);
2632
2891
  if (perPageInput !== false) {
2633
2892
  cursor = cursor.limit(perPage);
2634
2893
  }
@@ -2678,7 +2937,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends ScoresStorage {
2678
2937
  };
2679
2938
  }
2680
2939
  const end = perPageInput === false ? total : start + perPage;
2681
- let cursor = collection.find({ entityId, entityType }).sort({ createdAt: "desc" }).skip(start);
2940
+ let cursor = collection.find({ entityId, entityType }).sort({ createdAt: -1 }).skip(start);
2682
2941
  if (perPageInput !== false) {
2683
2942
  cursor = cursor.limit(perPage);
2684
2943
  }
@@ -2729,7 +2988,7 @@ var ScoresStorageMongoDB = class _ScoresStorageMongoDB extends ScoresStorage {
2729
2988
  };
2730
2989
  }
2731
2990
  const end = perPageInput === false ? total : start + perPage;
2732
- let cursor = collection.find(query).sort({ createdAt: "desc" }).skip(start);
2991
+ let cursor = collection.find(query).sort({ createdAt: -1 }).skip(start);
2733
2992
  if (perPageInput !== false) {
2734
2993
  cursor = cursor.limit(perPage);
2735
2994
  }
@@ -3022,7 +3281,7 @@ var WorkflowsStorageMongoDB = class _WorkflowsStorageMongoDB extends WorkflowsSt
3022
3281
  try {
3023
3282
  parsedSnapshot = typeof row.snapshot === "string" ? safelyParseJSON(row.snapshot) : row.snapshot;
3024
3283
  } catch (e) {
3025
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
3284
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
3026
3285
  }
3027
3286
  }
3028
3287
  return {
@@ -3037,7 +3296,7 @@ var WorkflowsStorageMongoDB = class _WorkflowsStorageMongoDB extends WorkflowsSt
3037
3296
  };
3038
3297
 
3039
3298
  // src/storage/index.ts
3040
- var MongoDBStore = class extends MastraStorage {
3299
+ var MongoDBStore = class extends MastraCompositeStore {
3041
3300
  #connector;
3042
3301
  stores;
3043
3302
  constructor(config) {