@pixagram/lacerta-db 0.13.1 → 0.13.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -439,7 +439,15 @@ class LRUCache {
439
439
 
440
440
  delete(key) { return this._cache.delete(key); }
441
441
  clear() { this._cache.clear(); }
442
- has(key) { return this.get(key) !== null; }
442
+ has(key) {
443
+ const item = this._cache.get(key);
444
+ if (!item) return false;
445
+ if (this._ttl && (Date.now() - item.ts > this._ttl)) {
446
+ this._cache.delete(key);
447
+ return false;
448
+ }
449
+ return true;
450
+ }
443
451
  get size() { return this._cache.size; }
444
452
  }
445
453
 
@@ -755,18 +763,38 @@ class BrowserCompressionUtility {
755
763
  }
756
764
  }
757
765
 
766
+ /**
767
+ * Synchronous "compression" — real CompressionStream requires async,
768
+ * so we prepend the raw marker (0x00) for format compatibility with
769
+ * the async decompress path. Without this marker, a doc packed via
770
+ * packSync could be silently misread by the async unpack path.
771
+ */
758
772
  compressSync(input) {
759
773
  if (!(input instanceof Uint8Array)) {
760
774
  throw new TypeError('Input must be Uint8Array');
761
775
  }
762
- return input;
776
+ const result = new Uint8Array(input.byteLength + 1);
777
+ result[0] = 0x00; // Raw marker
778
+ result.set(input, 1);
779
+ return result;
763
780
  }
764
781
 
765
782
  decompressSync(input) {
766
783
  if (!(input instanceof Uint8Array)) {
767
784
  throw new TypeError('Input must be Uint8Array');
768
785
  }
769
- return input;
786
+ if (input.length === 0) return input;
787
+
788
+ const marker = input[0];
789
+ if (marker === 0x01) {
790
+ // Deflate-compressed data — can't decompress synchronously
791
+ throw new LacertaDBError(
792
+ 'Cannot synchronously decompress deflate data. Use async unpack() instead.',
793
+ 'SYNC_DECOMPRESS_NOT_SUPPORTED'
794
+ );
795
+ }
796
+ // 0x00 (raw) or legacy (no marker): strip marker if present
797
+ return marker === 0x00 ? input.slice(1) : input;
770
798
  }
771
799
  }
772
800
 
@@ -1263,6 +1291,36 @@ class QuadTree {
1263
1291
  }
1264
1292
  }
1265
1293
 
1294
+ /**
1295
+ * Targeted removal: navigate to the quad containing (x, y) and remove
1296
+ * the point with matching data. O(log n) instead of O(n) full scan.
1297
+ * @param {number} x - Longitude
1298
+ * @param {number} y - Latitude
1299
+ * @param {*} id - Document ID
1300
+ * @returns {boolean} true if removed
1301
+ */
1302
+ removeAt(x, y, id) {
1303
+ if (!this._contains(this.boundary, { x, y })) return false;
1304
+
1305
+ // Try to remove from this node's points
1306
+ for (let i = 0; i < this.points.length; i++) {
1307
+ if (this.points[i].data === id) {
1308
+ this.points.splice(i, 1);
1309
+ return true;
1310
+ }
1311
+ }
1312
+
1313
+ // Navigate into the correct child quad
1314
+ if (this.divided) {
1315
+ return this.northeast.removeAt(x, y, id) ||
1316
+ this.northwest.removeAt(x, y, id) ||
1317
+ this.southeast.removeAt(x, y, id) ||
1318
+ this.southwest.removeAt(x, y, id);
1319
+ }
1320
+
1321
+ return false;
1322
+ }
1323
+
1266
1324
  _subdivide() {
1267
1325
  const {x, y, w, h} = this.boundary;
1268
1326
  const mw = w/2;
@@ -1390,18 +1448,27 @@ class BTreeNode {
1390
1448
  let i = this.n - 1;
1391
1449
 
1392
1450
  if (this.leaf) {
1451
+ // Search first, shift only if the key is truly new.
1452
+ // The old code shifted while scanning, which corrupted the array
1453
+ // when the key already existed at a lower index: entries between
1454
+ // the key's position and n-1 were duplicated rightward, leaving
1455
+ // stale copies inside the valid range.
1393
1456
  while (i >= 0 && _btreeCmp(this.keys[i], key) > 0) {
1394
- this.keys[i + 1] = this.keys[i];
1395
- this.values[i + 1] = this.values[i];
1396
1457
  i--;
1397
1458
  }
1398
1459
 
1399
1460
  if (i >= 0 && _btreeCmp(this.keys[i], key) === 0) {
1461
+ // Key exists — merge value into existing Set (no shift needed)
1400
1462
  if (!this.values[i]) {
1401
1463
  this.values[i] = new Set();
1402
1464
  }
1403
1465
  this.values[i].add(value);
1404
1466
  } else {
1467
+ // Key is new — shift entries right to open a slot at i+1
1468
+ for (let j = this.n - 1; j > i; j--) {
1469
+ this.keys[j + 1] = this.keys[j];
1470
+ this.values[j + 1] = this.values[j];
1471
+ }
1405
1472
  this.keys[i + 1] = key;
1406
1473
  this.values[i + 1] = new Set([value]);
1407
1474
  this.n++;
@@ -1706,7 +1773,7 @@ class BTreeNode {
1706
1773
  return this._remove(key, null, true);
1707
1774
  }
1708
1775
 
1709
- verify() {
1776
+ verify(isRoot = true) {
1710
1777
  const issues = [];
1711
1778
  for (let i = 0; i < this.n; i++) {
1712
1779
  if (this.keys[i] === undefined || this.keys[i] === null) {
@@ -1720,8 +1787,10 @@ class BTreeNode {
1720
1787
  }
1721
1788
  if (!this.leaf) {
1722
1789
  for (let i = 0; i <= this.n; i++) {
1723
- if (this.children[i]) {
1724
- const childIssues = this.children[i].verify();
1790
+ if (!this.children[i]) {
1791
+ issues.push(`Missing child at index ${i} (node has ${this.n} keys)`);
1792
+ } else {
1793
+ const childIssues = this.children[i].verify(false);
1725
1794
  issues.push(...childIssues);
1726
1795
  }
1727
1796
  }
@@ -1872,21 +1941,119 @@ class BTreeIndex {
1872
1941
  }
1873
1942
 
1874
1943
  /**
1875
- * Restore a BTreeIndex from persisted sorted entries.
1876
- * Much faster than full document scan + unpack.
1877
- * @param {Array} entries - [[key, [docId1, ...]], ...]
1944
+ * Restore a BTreeIndex from persisted sorted entries via O(n) bottom-up bulk-load.
1945
+ * Builds leaf nodes directly from sorted data, then constructs internal levels
1946
+ * using pre-extracted separators no individual insert() calls, no comparisons,
1947
+ * and no child mutation (which avoids orphaning subtrees at depth ≥ 3).
1948
+ * @param {Array} entries - [[key, [docId1, ...]], ...] — MUST be sorted by key
1878
1949
  * @param {number} [order=4]
1879
1950
  * @returns {BTreeIndex}
1880
1951
  */
1881
1952
  static fromSortedEntries(entries, order = 4) {
1882
1953
  const tree = new BTreeIndex(order);
1954
+ if (entries.length === 0) return tree;
1955
+
1956
+ const maxKeys = 2 * order - 1;
1957
+
1958
+ // Filter out null/undefined keys and count total values for _size
1959
+ const clean = [];
1960
+ let totalSize = 0;
1883
1961
  for (let i = 0; i < entries.length; i++) {
1884
- const [key, values] = entries[i];
1885
- if (key === undefined || key === null) continue;
1886
- for (let j = 0; j < values.length; j++) {
1887
- tree.insert(key, values[j]);
1962
+ if (entries[i][0] !== undefined && entries[i][0] !== null) {
1963
+ clean.push(entries[i]);
1964
+ totalSize += entries[i][1].length;
1965
+ }
1966
+ }
1967
+
1968
+ if (clean.length === 0) return tree;
1969
+
1970
+ // ---------------------------------------------------------------
1971
+ // Step 1: Build leaf nodes AND pre-extract inter-leaf separators.
1972
+ //
1973
+ // In a B-tree (not B+) some keys live at internal nodes. We decide
1974
+ // up-front which entries become leaf data and which become separator
1975
+ // keys for parent nodes. This avoids the old promote-and-shift
1976
+ // approach which orphaned subtrees when children were non-leaf.
1977
+ //
1978
+ // Layout: L0 S0 L1 S1 ... S(n-2) L(n-1)
1979
+ // ↑leaf ↑sep ↑leaf ↑last leaf (no trailing sep)
1980
+ // ---------------------------------------------------------------
1981
+
1982
+ // How many leaves do we need?
1983
+ // N entries = numLeaves * leafEntries + (numLeaves - 1) separators
1984
+ // N = numLeaves * fill + numLeaves - 1 = numLeaves * (fill + 1) - 1
1985
+ // numLeaves = ceil((N + 1) / (maxKeys + 1))
1986
+ const numLeaves = clean.length <= maxKeys
1987
+ ? 1
1988
+ : Math.ceil((clean.length + 1) / (maxKeys + 1));
1989
+
1990
+ // Distribute entries among leaves as evenly as possible
1991
+ const totalLeafEntries = clean.length - (numLeaves - 1); // subtract separator slots
1992
+ const basePerLeaf = Math.floor(totalLeafEntries / numLeaves);
1993
+ const extraLeaves = totalLeafEntries - basePerLeaf * numLeaves;
1994
+
1995
+ const leaves = [];
1996
+ const separators = [];
1997
+ let pos = 0;
1998
+
1999
+ for (let li = 0; li < numLeaves; li++) {
2000
+ const count = basePerLeaf + (li < extraLeaves ? 1 : 0);
2001
+ const node = new BTreeNode(order, true);
2002
+
2003
+ for (let j = 0; j < count; j++) {
2004
+ const [key, values] = clean[pos++];
2005
+ node.keys[j] = key;
2006
+ node.values[j] = new Set(values);
2007
+ node.n++;
2008
+ }
2009
+ leaves.push(node);
2010
+
2011
+ // Extract separator between this leaf and the next (not after the last)
2012
+ if (li < numLeaves - 1) {
2013
+ separators.push(clean[pos++]);
2014
+ }
2015
+ }
2016
+
2017
+ // ---------------------------------------------------------------
2018
+ // Step 2: Build internal levels bottom-up using pre-extracted
2019
+ // separators. Children are never mutated, so no subtrees are lost.
2020
+ // ---------------------------------------------------------------
2021
+ let level = leaves;
2022
+ let seps = separators;
2023
+
2024
+ while (level.length > 1) {
2025
+ const parents = [];
2026
+ const nextSeps = [];
2027
+ let ci = 0; // child index into level
2028
+ let si = 0; // separator index into seps
2029
+
2030
+ while (ci < level.length) {
2031
+ const parent = new BTreeNode(order, false);
2032
+ parent.children[0] = level[ci++];
2033
+
2034
+ // Attach children with their pre-extracted separators
2035
+ while (parent.n < maxKeys && si < seps.length && ci < level.length) {
2036
+ const [sepKey, sepValues] = seps[si++];
2037
+ parent.keys[parent.n] = sepKey;
2038
+ parent.values[parent.n] = new Set(sepValues);
2039
+ parent.children[parent.n + 1] = level[ci++];
2040
+ parent.n++;
2041
+ }
2042
+
2043
+ parents.push(parent);
2044
+
2045
+ // Extract separator between this parent and the next
2046
+ if (ci < level.length && si < seps.length) {
2047
+ nextSeps.push(seps[si++]);
2048
+ }
1888
2049
  }
2050
+
2051
+ level = parents;
2052
+ seps = nextSeps;
1889
2053
  }
2054
+
2055
+ tree._root = level[0];
2056
+ tree._size = totalSize;
1890
2057
  return tree;
1891
2058
  }
1892
2059
  }
@@ -1991,6 +2158,63 @@ class TextIndex {
1991
2158
  get size() {
1992
2159
  return this._docTokens.size;
1993
2160
  }
2161
+
2162
+ /**
2163
+ * Export index state for persistence.
2164
+ * Only the inverted index is stored — _docTokens is derived on restore.
2165
+ * Format: { invertedIndex: [[token, [docId, ...]], ...] }
2166
+ * @returns {Object}
2167
+ */
2168
+ toSerializable() {
2169
+ const invertedIndex = [];
2170
+ for (const [token, docIds] of this._invertedIndex) {
2171
+ invertedIndex.push([token, Array.from(docIds)]);
2172
+ }
2173
+ return { invertedIndex };
2174
+ }
2175
+
2176
+ /**
2177
+ * Restore a TextIndex from persisted data.
2178
+ * Rebuilds both _invertedIndex and _docTokens from the serialized inverted index.
2179
+ * @param {Object} data - { invertedIndex: [[token, [docId, ...]], ...] }
2180
+ * @returns {TextIndex}
2181
+ */
2182
+ static fromSerialized(data) {
2183
+ const idx = new TextIndex();
2184
+ if (!data || !Array.isArray(data.invertedIndex)) return idx;
2185
+
2186
+ for (const [token, docIds] of data.invertedIndex) {
2187
+ const docSet = new Set(docIds);
2188
+ idx._invertedIndex.set(token, docSet);
2189
+ // Derive _docTokens from inverted index
2190
+ for (const docId of docIds) {
2191
+ if (!idx._docTokens.has(docId)) idx._docTokens.set(docId, new Set());
2192
+ idx._docTokens.get(docId).add(token);
2193
+ }
2194
+ }
2195
+ return idx;
2196
+ }
2197
+
2198
+ /**
2199
+ * Verify index integrity. TextIndex is always healthy if it loaded.
2200
+ * @returns {{ healthy: boolean, issues: Array, requiresRebuild: boolean }}
2201
+ */
2202
+ verify() {
2203
+ const issues = [];
2204
+ // Cross-check: every docId in _invertedIndex must appear in _docTokens
2205
+ for (const [token, docIds] of this._invertedIndex) {
2206
+ for (const docId of docIds) {
2207
+ if (!this._docTokens.has(docId)) {
2208
+ issues.push(`Token '${token}' references unknown docId '${docId}'`);
2209
+ }
2210
+ }
2211
+ }
2212
+ return {
2213
+ healthy: issues.length === 0,
2214
+ issues,
2215
+ requiresRebuild: issues.length > 0
2216
+ };
2217
+ }
1994
2218
  }
1995
2219
 
1996
2220
  // ========================
@@ -2001,6 +2225,8 @@ class GeoIndex {
2001
2225
  constructor() {
2002
2226
  this._tree = new QuadTree({x: 0, y: 0, w: 180, h: 90});
2003
2227
  this._size = 0;
2228
+ // Coordinate lookup: docId → {x, y} for O(log n) targeted removal
2229
+ this._pointLookup = new Map();
2004
2230
  }
2005
2231
 
2006
2232
  addPoint(coords, docId) {
@@ -2008,11 +2234,20 @@ class GeoIndex {
2008
2234
  return;
2009
2235
  }
2010
2236
  this._tree.insert({x: coords.lng, y: coords.lat, data: docId});
2237
+ this._pointLookup.set(docId, { x: coords.lng, y: coords.lat });
2011
2238
  this._size++;
2012
2239
  }
2013
2240
 
2014
2241
  removePoint(docId) {
2015
- this._tree.remove(docId);
2242
+ const coords = this._pointLookup.get(docId);
2243
+ if (coords) {
2244
+ // Targeted removal: navigate to the correct quad and remove there
2245
+ this._tree.removeAt(coords.x, coords.y, docId);
2246
+ this._pointLookup.delete(docId);
2247
+ } else {
2248
+ // Fallback: full-tree scan (shouldn't happen if data is consistent)
2249
+ this._tree.remove(docId);
2250
+ }
2016
2251
  if (this._size > 0) this._size--;
2017
2252
  }
2018
2253
 
@@ -2084,6 +2319,178 @@ class GeoIndex {
2084
2319
  get size() {
2085
2320
  return this._size;
2086
2321
  }
2322
+
2323
+ /**
2324
+ * Export all points for persistence.
2325
+ * Uses Float64Array for coordinates (TurboSerial handles TypedArrays natively).
2326
+ * Format: { coords: Float64Array([lng0, lat0, lng1, lat1, ...]), docIds: [id0, id1, ...] }
2327
+ * @returns {Object}
2328
+ */
2329
+ toSerializable() {
2330
+ const points = [];
2331
+ this._collectAllPoints(this._tree, points);
2332
+
2333
+ const coords = new Float64Array(points.length * 2);
2334
+ const docIds = new Array(points.length);
2335
+
2336
+ for (let i = 0; i < points.length; i++) {
2337
+ coords[i * 2] = points[i].x; // lng
2338
+ coords[i * 2 + 1] = points[i].y; // lat
2339
+ docIds[i] = points[i].data; // docId
2340
+ }
2341
+
2342
+ return { coords, docIds };
2343
+ }
2344
+
2345
+ /**
2346
+ * Recursively collect all points from the QuadTree.
2347
+ * @param {QuadTree} node
2348
+ * @param {Array} points
2349
+ * @private
2350
+ */
2351
+ _collectAllPoints(node, points) {
2352
+ for (const p of node.points) {
2353
+ points.push(p);
2354
+ }
2355
+ if (node.divided) {
2356
+ this._collectAllPoints(node.northeast, points);
2357
+ this._collectAllPoints(node.northwest, points);
2358
+ this._collectAllPoints(node.southeast, points);
2359
+ this._collectAllPoints(node.southwest, points);
2360
+ }
2361
+ }
2362
+
2363
+ /**
2364
+ * Restore a GeoIndex from persisted data.
2365
+ * @param {Object} data - { coords: Float64Array, docIds: Array }
2366
+ * @returns {GeoIndex}
2367
+ */
2368
+ static fromSerialized(data) {
2369
+ const idx = new GeoIndex();
2370
+ if (!data || !data.coords || !data.docIds) return idx;
2371
+
2372
+ const { coords, docIds } = data;
2373
+ for (let i = 0; i < docIds.length; i++) {
2374
+ idx.addPoint(
2375
+ { lng: coords[i * 2], lat: coords[i * 2 + 1] },
2376
+ docIds[i]
2377
+ );
2378
+ }
2379
+ return idx;
2380
+ }
2381
+
2382
+ /**
2383
+ * Verify index integrity.
2384
+ * @returns {{ healthy: boolean, issues: Array, requiresRebuild: boolean }}
2385
+ */
2386
+ verify() {
2387
+ // Verify size consistency: count all points vs _size
2388
+ const points = [];
2389
+ this._collectAllPoints(this._tree, points);
2390
+ const issues = [];
2391
+ if (points.length !== this._size) {
2392
+ issues.push(`Size mismatch: _size=${this._size}, actual=${points.length}`);
2393
+ }
2394
+ return {
2395
+ healthy: issues.length === 0,
2396
+ issues,
2397
+ requiresRebuild: issues.length > 0
2398
+ };
2399
+ }
2400
+ }
2401
+
2402
+ // ========================
2403
+ // Index Manager (Cursor Optimized)
2404
+ // ========================
2405
+
2406
+ // ========================
2407
+ // Hash Index (O(1) Lookup)
2408
+ // ========================
2409
+
2410
+ class HashIndex {
2411
+ constructor() {
2412
+ this._map = new Map(); // value → Set<docId>
2413
+ }
2414
+
2415
+ insert(value, docId) {
2416
+ let bucket = this._map.get(value);
2417
+ if (!bucket) {
2418
+ bucket = new Set();
2419
+ this._map.set(value, bucket);
2420
+ }
2421
+ bucket.add(docId);
2422
+ }
2423
+
2424
+ find(value) {
2425
+ const bucket = this._map.get(value);
2426
+ return bucket ? Array.from(bucket) : [];
2427
+ }
2428
+
2429
+ remove(value, docId) {
2430
+ const bucket = this._map.get(value);
2431
+ if (bucket) {
2432
+ bucket.delete(docId);
2433
+ if (bucket.size === 0) this._map.delete(value);
2434
+ }
2435
+ }
2436
+
2437
+ has(value) {
2438
+ return this._map.has(value);
2439
+ }
2440
+
2441
+ clear() {
2442
+ this._map.clear();
2443
+ }
2444
+
2445
+ get size() {
2446
+ let count = 0;
2447
+ for (const bucket of this._map.values()) count += bucket.size;
2448
+ return count;
2449
+ }
2450
+
2451
+ /**
2452
+ * Export for persistence. Format: [[value, [docId, ...]], ...]
2453
+ * @returns {Array}
2454
+ */
2455
+ toSerializable() {
2456
+ const entries = [];
2457
+ for (const [value, docIds] of this._map) {
2458
+ entries.push([value, Array.from(docIds)]);
2459
+ }
2460
+ return entries;
2461
+ }
2462
+
2463
+ /**
2464
+ * Restore from persisted data.
2465
+ * @param {Array} entries - [[value, [docId, ...]], ...]
2466
+ * @returns {HashIndex}
2467
+ */
2468
+ static fromSerialized(entries) {
2469
+ const idx = new HashIndex();
2470
+ if (!Array.isArray(entries)) return idx;
2471
+ for (const [value, docIds] of entries) {
2472
+ idx._map.set(value, new Set(docIds));
2473
+ }
2474
+ return idx;
2475
+ }
2476
+
2477
+ /**
2478
+ * Verify index integrity.
2479
+ * @returns {{ healthy: boolean, issues: Array, requiresRebuild: boolean }}
2480
+ */
2481
+ verify() {
2482
+ const issues = [];
2483
+ for (const [value, bucket] of this._map) {
2484
+ if (!(bucket instanceof Set)) {
2485
+ issues.push(`Value '${value}' has non-Set bucket`);
2486
+ }
2487
+ }
2488
+ return {
2489
+ healthy: issues.length === 0,
2490
+ issues,
2491
+ requiresRebuild: issues.length > 0
2492
+ };
2493
+ }
2087
2494
  }
2088
2495
 
2089
2496
  // ========================
@@ -2207,14 +2614,17 @@ class IndexManager {
2207
2614
  }
2208
2615
 
2209
2616
  /**
2210
- * FAST PATH: Restore a BTree index from persisted entries stored in IDB.
2617
+ * FAST PATH: Restore an index from persisted entries stored in IDB.
2211
2618
  * Returns true if successful, false if persisted data is missing/corrupt.
2619
+ *
2620
+ * Supported types: btree, text, geo, hash
2621
+ *
2212
2622
  * @param {string} indexName
2213
2623
  * @returns {Promise<boolean>}
2214
2624
  */
2215
2625
  async _restoreIndex(indexName) {
2216
2626
  const index = this._indexes.get(indexName);
2217
- if (!index || index.type !== 'btree') return false;
2627
+ if (!index) return false;
2218
2628
 
2219
2629
  try {
2220
2630
  const docId = `${IndexManager.IDX_PREFIX}${indexName}`;
@@ -2222,21 +2632,66 @@ class IndexManager {
2222
2632
  this._collection._db, this._collection._storeName, docId
2223
2633
  );
2224
2634
 
2225
- if (!stored || !stored._entries || !Array.isArray(stored._entries)) {
2635
+ if (!stored) return false;
2636
+
2637
+ // Type guard: reject if persisted type doesn't match current definition
2638
+ if (stored._type && stored._type !== index.type) {
2639
+ console.warn(`[IndexManager] Type mismatch for '${indexName}': stored=${stored._type}, expected=${index.type}`);
2226
2640
  return false;
2227
2641
  }
2228
2642
 
2229
- // Restore B-Tree from sorted entries — no document scanning needed
2230
- const btree = BTreeIndex.fromSortedEntries(stored._entries, 4);
2643
+ let restored = null;
2231
2644
 
2232
- // Quick sanity check
2233
- const v = btree.verify();
2234
- if (!v.healthy) {
2235
- console.warn(`[IndexManager] Persisted index '${indexName}' is corrupt, will rebuild`);
2236
- return false;
2645
+ switch (index.type) {
2646
+ case 'btree': {
2647
+ if (!stored._entries || !Array.isArray(stored._entries)) return false;
2648
+ restored = BTreeIndex.fromSortedEntries(stored._entries, 4);
2649
+ const v = restored.verify();
2650
+ if (!v.healthy) {
2651
+ console.warn(`[IndexManager] Persisted btree '${indexName}' is corrupt, will rebuild`);
2652
+ return false;
2653
+ }
2654
+ break;
2655
+ }
2656
+
2657
+ case 'text': {
2658
+ if (!stored._data || !stored._data.invertedIndex) return false;
2659
+ restored = TextIndex.fromSerialized(stored._data);
2660
+ const v = restored.verify();
2661
+ if (!v.healthy) {
2662
+ console.warn(`[IndexManager] Persisted text index '${indexName}' is corrupt, will rebuild`);
2663
+ return false;
2664
+ }
2665
+ break;
2666
+ }
2667
+
2668
+ case 'geo': {
2669
+ if (!stored._data || !stored._data.coords || !stored._data.docIds) return false;
2670
+ restored = GeoIndex.fromSerialized(stored._data);
2671
+ const v = restored.verify();
2672
+ if (!v.healthy) {
2673
+ console.warn(`[IndexManager] Persisted geo index '${indexName}' is corrupt, will rebuild`);
2674
+ return false;
2675
+ }
2676
+ break;
2677
+ }
2678
+
2679
+ case 'hash': {
2680
+ if (!stored._entries || !Array.isArray(stored._entries)) return false;
2681
+ restored = HashIndex.fromSerialized(stored._entries);
2682
+ const v = restored.verify();
2683
+ if (!v.healthy) {
2684
+ console.warn(`[IndexManager] Persisted hash index '${indexName}' is corrupt, will rebuild`);
2685
+ return false;
2686
+ }
2687
+ break;
2688
+ }
2689
+
2690
+ default:
2691
+ return false;
2237
2692
  }
2238
2693
 
2239
- this._indexData.set(indexName, btree);
2694
+ this._indexData.set(indexName, restored);
2240
2695
  return true;
2241
2696
  } catch (e) {
2242
2697
  return false;
@@ -2244,23 +2699,56 @@ class IndexManager {
2244
2699
  }
2245
2700
 
2246
2701
  /**
2247
- * Persist a single BTree index's entries to IDB.
2702
+ * Persist a single index's entries to IDB.
2248
2703
  * Stored as a document with reserved _id in the existing 'documents' store.
2704
+ *
2705
+ * Supported types:
2706
+ * btree → toSortedEntries()
2707
+ * text → TextIndex.toSerializable()
2708
+ * geo → GeoIndex.toSerializable() (Float64Array coords)
2709
+ * hash → [[value, [docId, ...]], ...]
2710
+ *
2249
2711
  * @param {string} indexName
2250
2712
  */
2251
2713
  async _persistIndex(indexName) {
2252
2714
  const indexData = this._indexData.get(indexName);
2253
- if (!indexData || !(indexData instanceof BTreeIndex)) return;
2715
+ const index = this._indexes.get(indexName);
2716
+ if (!indexData || !index) return;
2254
2717
 
2255
2718
  try {
2256
2719
  const docId = `${IndexManager.IDX_PREFIX}${indexName}`;
2257
2720
  const payload = {
2258
2721
  _id: docId,
2259
- _entries: indexData.toSortedEntries(),
2722
+ _type: index.type,
2260
2723
  _persisted_at: Date.now(),
2261
- _size: indexData.size
2724
+ _size: indexData.size || 0
2262
2725
  };
2263
2726
 
2727
+ switch (index.type) {
2728
+ case 'btree':
2729
+ if (!(indexData instanceof BTreeIndex)) return;
2730
+ payload._entries = indexData.toSortedEntries();
2731
+ break;
2732
+
2733
+ case 'text':
2734
+ if (!(indexData instanceof TextIndex)) return;
2735
+ payload._data = indexData.toSerializable();
2736
+ break;
2737
+
2738
+ case 'geo':
2739
+ if (!(indexData instanceof GeoIndex)) return;
2740
+ payload._data = indexData.toSerializable();
2741
+ break;
2742
+
2743
+ case 'hash':
2744
+ if (!(indexData instanceof HashIndex)) return;
2745
+ payload._entries = indexData.toSerializable();
2746
+ break;
2747
+
2748
+ default:
2749
+ return; // Unknown type, skip
2750
+ }
2751
+
2264
2752
  await this._collection._indexedDB.put(
2265
2753
  this._collection._db, this._collection._storeName, payload
2266
2754
  );
@@ -2308,13 +2796,13 @@ class IndexManager {
2308
2796
  case 'btree':
2309
2797
  return new BTreeIndex();
2310
2798
  case 'hash':
2311
- return new Map();
2799
+ return new HashIndex();
2312
2800
  case 'text':
2313
2801
  return new TextIndex();
2314
2802
  case 'geo':
2315
2803
  return new GeoIndex();
2316
2804
  default:
2317
- return new Map();
2805
+ return new HashIndex();
2318
2806
  }
2319
2807
  }
2320
2808
 
@@ -2324,10 +2812,7 @@ class IndexManager {
2324
2812
  indexData.insert(value, docId);
2325
2813
  break;
2326
2814
  case 'hash':
2327
- if (!indexData.has(value)) {
2328
- indexData.set(value, new Set());
2329
- }
2330
- indexData.get(value).add(docId);
2815
+ indexData.insert(value, docId);
2331
2816
  break;
2332
2817
  case 'text':
2333
2818
  indexData.addDocument(value, docId);
@@ -2365,17 +2850,8 @@ class IndexManager {
2365
2850
  if (newValue !== undefined) indexData.insert(newValue, docId);
2366
2851
  break;
2367
2852
  case 'hash':
2368
- if (oldValue !== undefined) {
2369
- const oldSet = indexData.get(oldValue);
2370
- if (oldSet) {
2371
- oldSet.delete(docId);
2372
- if (oldSet.size === 0) indexData.delete(oldValue);
2373
- }
2374
- }
2375
- if (newValue !== undefined) {
2376
- if (!indexData.has(newValue)) indexData.set(newValue, new Set());
2377
- indexData.get(newValue).add(docId);
2378
- }
2853
+ if (oldValue !== undefined) indexData.remove(oldValue, docId);
2854
+ if (newValue !== undefined) indexData.insert(newValue, docId);
2379
2855
  break;
2380
2856
  case 'text':
2381
2857
  if (oldValue || newValue) {
@@ -2388,10 +2864,8 @@ class IndexManager {
2388
2864
  break;
2389
2865
  }
2390
2866
 
2391
- // Schedule async persistence for modified btree indexes
2392
- if (index.type === 'btree') {
2393
- this._schedulePersist(indexName);
2394
- }
2867
+ // Schedule async persistence for modified indexes (all types)
2868
+ this._schedulePersist(indexName);
2395
2869
  }
2396
2870
  }
2397
2871
 
@@ -2457,17 +2931,14 @@ class IndexManager {
2457
2931
 
2458
2932
  _queryHash(indexData, options) {
2459
2933
  if (options.$eq !== undefined) {
2460
- const docs = indexData.get(options.$eq);
2461
- return docs ? Array.from(docs) : [];
2934
+ return indexData.find(options.$eq);
2462
2935
  }
2463
2936
 
2464
2937
  if (options.$in !== undefined) {
2465
2938
  const results = new Set();
2466
2939
  for (const value of options.$in) {
2467
- const docs = indexData.get(value);
2468
- if (docs) {
2469
- docs.forEach(doc => results.add(doc));
2470
- }
2940
+ const docs = indexData.find(value);
2941
+ for (let i = 0; i < docs.length; i++) results.add(docs[i]);
2471
2942
  }
2472
2943
  return Array.from(results);
2473
2944
  }
@@ -2583,13 +3054,8 @@ class IndexManager {
2583
3054
  const needsRebuild = [];
2584
3055
 
2585
3056
  for (const [indexName, index] of this._indexes) {
2586
- if (index.type === 'btree') {
2587
- const restored = await this._restoreIndex(indexName);
2588
- if (!restored) {
2589
- needsRebuild.push(indexName);
2590
- }
2591
- } else {
2592
- // Non-btree indexes (text, geo, hash) always need rebuild
3057
+ const restored = await this._restoreIndex(indexName);
3058
+ if (!restored) {
2593
3059
  needsRebuild.push(indexName);
2594
3060
  }
2595
3061
  }
@@ -2620,8 +3086,17 @@ class IndexManager {
2620
3086
 
2621
3087
  _estimateMemoryUsage(indexData) {
2622
3088
  if (!indexData) return 0;
2623
- if (indexData instanceof Map) return indexData.size * 100;
2624
3089
  if (indexData instanceof BTreeIndex) return indexData.size * 120;
3090
+ if (indexData instanceof TextIndex) {
3091
+ // Rough estimate: inverted index entries + docTokens forward map
3092
+ let bytes = 0;
3093
+ for (const [token, docIds] of indexData._invertedIndex) {
3094
+ bytes += token.length * 2 + docIds.size * 64;
3095
+ }
3096
+ return bytes;
3097
+ }
3098
+ if (indexData instanceof GeoIndex) return indexData.size * 80;
3099
+ if (indexData instanceof HashIndex) return indexData.size * 100;
2625
3100
  return 0;
2626
3101
  }
2627
3102
 
@@ -3158,11 +3633,31 @@ class CollectionMetadata {
3158
3633
  this.createdAt = data.createdAt || Date.now();
3159
3634
  this.modifiedAt = data.modifiedAt || Date.now();
3160
3635
 
3161
- // Per-document tracking (in-memory Maps for O(1) ops)
3162
- this._docSizes = new Map(data._docSizes || []); // docId -> sizeKB
3163
- this._docModified = new Map(data._docModified || []); // docId -> timestamp
3164
- this._docPermanent = new Map(data._docPermanent || []); // docId -> boolean
3165
- this._docAttachments = new Map(data._docAttachments || []); // docId -> count
3636
+ // Per-document tracking: single Map<docId, {size, modified, permanent, attachments}>
3637
+ // Halves Map overhead vs. 4 separate Maps with identical key sets.
3638
+ this._docMeta = new Map();
3639
+
3640
+ // Hydrate from persisted data (supports both old 4-map and new unified format)
3641
+ if (data._docMeta) {
3642
+ // New unified format
3643
+ for (const [docId, meta] of data._docMeta) {
3644
+ this._docMeta.set(docId, meta);
3645
+ }
3646
+ } else if (data._docSizes) {
3647
+ // Legacy 4-map format — migrate on load
3648
+ const sizes = new Map(data._docSizes);
3649
+ const modified = new Map(data._docModified || []);
3650
+ const permanent = new Map(data._docPermanent || []);
3651
+ const attachments = new Map(data._docAttachments || []);
3652
+ for (const [docId, size] of sizes) {
3653
+ this._docMeta.set(docId, {
3654
+ size,
3655
+ modified: modified.get(docId) || Date.now(),
3656
+ permanent: permanent.get(docId) || false,
3657
+ attachments: attachments.get(docId) || 0
3658
+ });
3659
+ }
3660
+ }
3166
3661
 
3167
3662
  // Debounced persistence
3168
3663
  this._dirty = false;
@@ -3177,10 +3672,12 @@ class CollectionMetadata {
3177
3672
  // ---- Mutations (in-memory only, schedule async save) ----
3178
3673
 
3179
3674
  addDocument(docId, sizeKB, isPermanent = false, attachmentCount = 0) {
3180
- this._docSizes.set(docId, sizeKB);
3181
- this._docModified.set(docId, Date.now());
3182
- this._docPermanent.set(docId, isPermanent);
3183
- this._docAttachments.set(docId, attachmentCount);
3675
+ this._docMeta.set(docId, {
3676
+ size: sizeKB,
3677
+ modified: Date.now(),
3678
+ permanent: isPermanent,
3679
+ attachments: attachmentCount
3680
+ });
3184
3681
 
3185
3682
  this.sizeKB += sizeKB;
3186
3683
  this.length++;
@@ -3189,27 +3686,28 @@ class CollectionMetadata {
3189
3686
  }
3190
3687
 
3191
3688
  updateDocument(docId, newSizeKB, isPermanent = false, attachmentCount = 0) {
3192
- const oldSize = this._docSizes.get(docId) || 0;
3689
+ const existing = this._docMeta.get(docId);
3690
+ const oldSize = existing ? existing.size : 0;
3193
3691
  this.sizeKB = this.sizeKB - oldSize + newSizeKB;
3194
3692
 
3195
- this._docSizes.set(docId, newSizeKB);
3196
- this._docModified.set(docId, Date.now());
3197
- this._docPermanent.set(docId, isPermanent);
3198
- this._docAttachments.set(docId, attachmentCount);
3693
+ this._docMeta.set(docId, {
3694
+ size: newSizeKB,
3695
+ modified: Date.now(),
3696
+ permanent: isPermanent,
3697
+ attachments: attachmentCount
3698
+ });
3199
3699
 
3200
3700
  this.modifiedAt = Date.now();
3201
3701
  this._scheduleSave();
3202
3702
  }
3203
3703
 
3204
3704
  removeDocument(docId) {
3205
- const sizeKB = this._docSizes.get(docId) || 0;
3705
+ const existing = this._docMeta.get(docId);
3706
+ const sizeKB = existing ? existing.size : 0;
3206
3707
  this.sizeKB -= sizeKB;
3207
3708
  this.length--;
3208
3709
 
3209
- this._docSizes.delete(docId);
3210
- this._docModified.delete(docId);
3211
- this._docPermanent.delete(docId);
3212
- this._docAttachments.delete(docId);
3710
+ this._docMeta.delete(docId);
3213
3711
 
3214
3712
  this.modifiedAt = Date.now();
3215
3713
  this._scheduleSave();
@@ -3219,9 +3717,9 @@ class CollectionMetadata {
3219
3717
 
3220
3718
  getOldestNonPermanentDocuments(count) {
3221
3719
  const candidates = [];
3222
- for (const [docId, modified] of this._docModified) {
3223
- if (!this._docPermanent.get(docId)) {
3224
- candidates.push({ id: docId, modified });
3720
+ for (const [docId, meta] of this._docMeta) {
3721
+ if (!meta.permanent) {
3722
+ candidates.push({ id: docId, modified: meta.modified });
3225
3723
  }
3226
3724
  }
3227
3725
  candidates.sort((a, b) => a.modified - b.modified);
@@ -3229,15 +3727,17 @@ class CollectionMetadata {
3229
3727
  }
3230
3728
 
3231
3729
  getDocumentSize(docId) {
3232
- return this._docSizes.get(docId) || 0;
3730
+ const meta = this._docMeta.get(docId);
3731
+ return meta ? meta.size : 0;
3233
3732
  }
3234
3733
 
3235
3734
  isDocumentPermanent(docId) {
3236
- return this._docPermanent.get(docId) || false;
3735
+ const meta = this._docMeta.get(docId);
3736
+ return meta ? meta.permanent : false;
3237
3737
  }
3238
3738
 
3239
3739
  hasDocument(docId) {
3240
- return this._docSizes.has(docId);
3740
+ return this._docMeta.has(docId);
3241
3741
  }
3242
3742
 
3243
3743
  // ---- Aggregate snapshot (for DatabaseMetadata) ----
@@ -3284,10 +3784,7 @@ class CollectionMetadata {
3284
3784
  length: this.length,
3285
3785
  createdAt: this.createdAt,
3286
3786
  modifiedAt: this.modifiedAt,
3287
- _docSizes: Array.from(this._docSizes.entries()),
3288
- _docModified: Array.from(this._docModified.entries()),
3289
- _docPermanent: Array.from(this._docPermanent.entries()),
3290
- _docAttachments: Array.from(this._docAttachments.entries())
3787
+ _docMeta: Array.from(this._docMeta.entries())
3291
3788
  };
3292
3789
 
3293
3790
  try {
@@ -3403,10 +3900,7 @@ class CollectionMetadata {
3403
3900
  this.sizeKB = 0;
3404
3901
  this.length = 0;
3405
3902
  this.modifiedAt = Date.now();
3406
- this._docSizes.clear();
3407
- this._docModified.clear();
3408
- this._docPermanent.clear();
3409
- this._docAttachments.clear();
3903
+ this._docMeta.clear();
3410
3904
  this._dirty = true;
3411
3905
  this._flushSync();
3412
3906
  }
@@ -3722,6 +4216,10 @@ class QueryEngine {
3722
4216
  // Path cache: avoids repeated path.split('.') allocations during scans
3723
4217
  this._pathCache = new Map();
3724
4218
 
4219
+ // Pre-compiled Set cache for $in/$nin/$all operators.
4220
+ // Avoids rebuilding on every per-document call during a query scan.
4221
+ this._setCache = new WeakMap();
4222
+
3725
4223
  this.operators = {
3726
4224
  '$eq': (a, b) => a === b,
3727
4225
  '$ne': (a, b) => a !== b,
@@ -3729,8 +4227,18 @@ class QueryEngine {
3729
4227
  '$gte': (a, b) => a >= b,
3730
4228
  '$lt': (a, b) => a < b,
3731
4229
  '$lte': (a, b) => a <= b,
3732
- '$in': (a, b) => Array.isArray(b) && b.includes(a),
3733
- '$nin': (a, b) => Array.isArray(b) && !b.includes(a),
4230
+ '$in': (a, b) => {
4231
+ if (!Array.isArray(b)) return false;
4232
+ let s = this._setCache.get(b);
4233
+ if (!s) { s = new Set(b); this._setCache.set(b, s); }
4234
+ return s.has(a);
4235
+ },
4236
+ '$nin': (a, b) => {
4237
+ if (!Array.isArray(b)) return false;
4238
+ let s = this._setCache.get(b);
4239
+ if (!s) { s = new Set(b); this._setCache.set(b, s); }
4240
+ return !s.has(a);
4241
+ },
3734
4242
 
3735
4243
  '$and': (doc, conditions) => conditions.every(cond => this.evaluate(doc, cond)),
3736
4244
  '$or': (doc, conditions) => conditions.some(cond => this.evaluate(doc, cond)),
@@ -3740,7 +4248,12 @@ class QueryEngine {
3740
4248
  '$exists': (value, exists) => (value !== undefined) === exists,
3741
4249
  '$type': (value, type) => typeof value === type,
3742
4250
 
3743
- '$all': (arr, values) => Array.isArray(arr) && values.every(v => arr.includes(v)),
4251
+ '$all': (arr, values) => {
4252
+ if (!Array.isArray(arr)) return false;
4253
+ let s = this._setCache.get(arr);
4254
+ if (!s) { s = new Set(arr); this._setCache.set(arr, s); }
4255
+ return values.every(v => s.has(v));
4256
+ },
3744
4257
  '$elemMatch': (arr, condition) => Array.isArray(arr) && arr.some(elem => this.evaluate({ value: elem }, { value: condition })),
3745
4258
  '$size': (arr, size) => Array.isArray(arr) && arr.length === size,
3746
4259
 
@@ -3790,13 +4303,11 @@ class QueryEngine {
3790
4303
  this._pathCache.set(path, parts);
3791
4304
  // Cap cache size to prevent unbounded growth
3792
4305
  if (this._pathCache.size > 2000) {
3793
- // Delete oldest entries (first 500)
4306
+ // Delete oldest 500 entries
3794
4307
  const iter = this._pathCache.keys();
3795
- for (let i = 0; i < 500; i++) iter.next();
3796
- // Rebuild with remaining
3797
- const newCache = new Map();
3798
- for (const [k, v] of this._pathCache) newCache.set(k, v);
3799
- this._pathCache = newCache;
4308
+ for (let i = 0; i < 500; i++) {
4309
+ this._pathCache.delete(iter.next().value);
4310
+ }
3800
4311
  }
3801
4312
  }
3802
4313
  return parts;
@@ -3935,10 +4446,16 @@ class AggregationPipeline {
3935
4446
  result[fieldKey] = group.docs.length;
3936
4447
  break;
3937
4448
  case '$max':
3938
- result[fieldKey] = Math.max(...group.docs.map(d => queryEngine.getFieldValue(d, field)));
4449
+ result[fieldKey] = group.docs.reduce((max, d) => {
4450
+ const v = queryEngine.getFieldValue(d, field);
4451
+ return v !== undefined && (max === undefined || v > max) ? v : max;
4452
+ }, undefined);
3939
4453
  break;
3940
4454
  case '$min':
3941
- result[fieldKey] = Math.min(...group.docs.map(d => queryEngine.getFieldValue(d, field)));
4455
+ result[fieldKey] = group.docs.reduce((min, d) => {
4456
+ const v = queryEngine.getFieldValue(d, field);
4457
+ return v !== undefined && (min === undefined || v < min) ? v : min;
4458
+ }, undefined);
3942
4459
  break;
3943
4460
  }
3944
4461
  }
@@ -4098,12 +4615,18 @@ class MigrationManager {
4098
4615
  for (const collectionName of collections) {
4099
4616
  const coll = await this.database.getCollection(collectionName);
4100
4617
  const docs = await coll.getAll();
4618
+
4619
+ // Collect all updates, then apply in a single batch transaction
4620
+ const updates = [];
4101
4621
  for (const doc of docs) {
4102
4622
  const updated = await migration[direction](doc);
4103
4623
  if (updated) {
4104
- await coll.update(doc._id, updated);
4624
+ updates.push({ id: doc._id, data: updated });
4105
4625
  }
4106
4626
  }
4627
+ if (updates.length > 0) {
4628
+ await coll.batchUpdate(updates);
4629
+ }
4107
4630
  }
4108
4631
  }
4109
4632
  }
@@ -4114,13 +4637,18 @@ class MigrationManager {
4114
4637
 
4115
4638
  class PerformanceMonitor {
4116
4639
  constructor() {
4117
- this._metrics = {
4118
- operations: [],
4119
- latencies: [],
4120
- cacheHits: 0,
4121
- cacheMisses: 0,
4122
- memoryUsage: []
4123
- };
4640
+ // Fixed-size ring buffers — O(1) insert, no shift() overhead
4641
+ this._ops = new Array(100);
4642
+ this._opsIdx = 0;
4643
+ this._opsLen = 0;
4644
+ this._lats = new Float64Array(100);
4645
+ this._latsIdx = 0;
4646
+ this._latsLen = 0;
4647
+ this._mem = new Array(60);
4648
+ this._memIdx = 0;
4649
+ this._memLen = 0;
4650
+ this._cacheHits = 0;
4651
+ this._cacheMisses = 0;
4124
4652
  this._monitoring = false;
4125
4653
  this._monitoringInterval = null;
4126
4654
  }
@@ -4140,35 +4668,56 @@ class PerformanceMonitor {
4140
4668
 
4141
4669
  recordOperation(type, duration) {
4142
4670
  if (!this._monitoring) return;
4143
- this._metrics.operations.push({ type, duration, timestamp: Date.now() });
4144
- this._metrics.latencies.push(duration);
4145
- if (this._metrics.operations.length > 100) this._metrics.operations.shift();
4146
- if (this._metrics.latencies.length > 100) this._metrics.latencies.shift();
4671
+ this._ops[this._opsIdx] = { type, duration, timestamp: Date.now() };
4672
+ this._opsIdx = (this._opsIdx + 1) % 100;
4673
+ if (this._opsLen < 100) this._opsLen++;
4674
+ this._lats[this._latsIdx] = duration;
4675
+ this._latsIdx = (this._latsIdx + 1) % 100;
4676
+ if (this._latsLen < 100) this._latsLen++;
4147
4677
  }
4148
4678
 
4149
- recordCacheHit() { this._metrics.cacheHits++; }
4150
- recordCacheMiss() { this._metrics.cacheMisses++; }
4679
+ recordCacheHit() { this._cacheHits++; }
4680
+ recordCacheMiss() { this._cacheMisses++; }
4151
4681
 
4152
4682
  _collectMetrics() {
4153
4683
  if (performance && performance.memory) {
4154
- this._metrics.memoryUsage.push({
4684
+ this._mem[this._memIdx] = {
4155
4685
  used: performance.memory.usedJSHeapSize,
4156
4686
  total: performance.memory.totalJSHeapSize,
4157
4687
  limit: performance.memory.jsHeapSizeLimit,
4158
4688
  timestamp: Date.now()
4159
- });
4160
- if (this._metrics.memoryUsage.length > 60) this._metrics.memoryUsage.shift();
4689
+ };
4690
+ this._memIdx = (this._memIdx + 1) % 60;
4691
+ if (this._memLen < 60) this._memLen++;
4161
4692
  }
4162
4693
  }
4163
4694
 
4695
+ /** Helper: iterate the ring buffer entries (newest to oldest) */
4696
+ _iterRing(buf, idx, len) {
4697
+ const results = [];
4698
+ for (let i = 0; i < len; i++) {
4699
+ const pos = (idx - 1 - i + buf.length) % buf.length;
4700
+ if (buf[pos] !== undefined) results.push(buf[pos]);
4701
+ }
4702
+ return results;
4703
+ }
4704
+
4164
4705
  getStats() {
4165
- const opsPerSec = this._metrics.operations.filter(op => Date.now() - op.timestamp < 1000).length;
4166
- const totalLatency = this._metrics.latencies.reduce((a, b) => a + b, 0);
4167
- const avgLatency = this._metrics.latencies.length > 0 ? totalLatency / this._metrics.latencies.length : 0;
4168
- const totalCacheOps = this._metrics.cacheHits + this._metrics.cacheMisses;
4169
- const cacheHitRate = totalCacheOps > 0 ? (this._metrics.cacheHits / totalCacheOps) * 100 : 0;
4706
+ const now = Date.now();
4707
+ const ops = this._iterRing(this._ops, this._opsIdx, this._opsLen);
4708
+ const opsPerSec = ops.filter(op => now - op.timestamp < 1000).length;
4170
4709
 
4171
- const latestMemory = this._metrics.memoryUsage.length > 0 ? this._metrics.memoryUsage[this._metrics.memoryUsage.length - 1] : null;
4710
+ let totalLatency = 0;
4711
+ for (let i = 0; i < this._latsLen; i++) {
4712
+ totalLatency += this._lats[i];
4713
+ }
4714
+ const avgLatency = this._latsLen > 0 ? totalLatency / this._latsLen : 0;
4715
+
4716
+ const totalCacheOps = this._cacheHits + this._cacheMisses;
4717
+ const cacheHitRate = totalCacheOps > 0 ? (this._cacheHits / totalCacheOps) * 100 : 0;
4718
+
4719
+ const memEntries = this._iterRing(this._mem, this._memIdx, this._memLen);
4720
+ const latestMemory = memEntries.length > 0 ? memEntries[0] : null;
4172
4721
  const memoryUsageMB = latestMemory ? latestMemory.used / (1024 * 1024) : 0;
4173
4722
 
4174
4723
  return {
@@ -4186,14 +4735,18 @@ class PerformanceMonitor {
4186
4735
  if (stats.avgLatency > 100) {
4187
4736
  tips.push('High average latency detected. Consider enabling compression and indexing frequently queried fields.');
4188
4737
  }
4189
- if (stats.cacheHitRate < 50 && (this._metrics.cacheHits + this._metrics.cacheMisses) > 20) {
4738
+ if (stats.cacheHitRate < 50 && (this._cacheHits + this._cacheMisses) > 20) {
4190
4739
  tips.push('Low cache hit rate. Consider increasing cache size or optimizing query patterns.');
4191
4740
  }
4192
- if (this._metrics.memoryUsage.length > 10) {
4193
- const recent = this._metrics.memoryUsage.slice(-10);
4194
- const trend = recent[recent.length - 1].used - recent[0].used;
4195
- if (trend > 10 * 1024 * 1024) {
4196
- tips.push('Memory usage is increasing rapidly. Check for memory leaks or consider batch processing.');
4741
+ if (this._memLen > 10) {
4742
+ const memEntries = this._iterRing(this._mem, this._memIdx, Math.min(this._memLen, 10));
4743
+ const oldest = memEntries[memEntries.length - 1];
4744
+ const newest = memEntries[0];
4745
+ if (oldest && newest) {
4746
+ const trend = newest.used - oldest.used;
4747
+ if (trend > 10 * 1024 * 1024) {
4748
+ tips.push('Memory usage is increasing rapidly. Check for memory leaks or consider batch processing.');
4749
+ }
4197
4750
  }
4198
4751
  }
4199
4752
  return tips.length > 0 ? tips : ['Performance is optimal. No issues detected.'];
@@ -4303,6 +4856,10 @@ class Collection {
4303
4856
  // Document-level cache: avoids IDB reads + deserialization for repeated get() calls
4304
4857
  this._docCache = new LRUCache(200);
4305
4858
 
4859
+ // Generation counter: bumped on every write, included in query cache keys.
4860
+ // Old cache entries die naturally via LRU eviction — no nuclear clear() needed.
4861
+ this._cacheGeneration = 0;
4862
+
4306
4863
  // Pending indexes: definitions registered before init() — applied during init
4307
4864
  this._pendingIndexes = [];
4308
4865
 
@@ -4426,7 +4983,7 @@ class Collection {
4426
4983
 
4427
4984
  await this._checkSpaceLimit();
4428
4985
  await this._trigger('afterAdd', doc);
4429
- this._cacheStrategy.clear();
4986
+ this._cacheGeneration++;
4430
4987
  this._docCache.set(doc._id, fullDoc);
4431
4988
  return doc._id;
4432
4989
  }
@@ -4541,7 +5098,7 @@ class Collection {
4541
5098
  this.database.metadata.setCollection(this._metadata);
4542
5099
 
4543
5100
  await this._trigger('afterUpdate', doc);
4544
- this._cacheStrategy.clear();
5101
+ this._cacheGeneration++;
4545
5102
  this._docCache.set(doc._id, newDocOutput);
4546
5103
  return doc._id;
4547
5104
  }
@@ -4569,28 +5126,36 @@ class Collection {
4569
5126
 
4570
5127
  await this._trigger('beforeDelete', docId);
4571
5128
 
4572
- const doc = await this._indexedDB.get(this._db, this._storeName, docId);
4573
- if (!doc) {
5129
+ const stored = await this._indexedDB.get(this._db, this._storeName, docId);
5130
+ if (!stored) {
4574
5131
  throw new LacertaDBError('Document not found for deletion', 'DOCUMENT_NOT_FOUND');
4575
5132
  }
4576
5133
 
4577
- if (doc._permanent && !options.force) {
5134
+ if (stored._permanent && !options.force) {
4578
5135
  throw new LacertaDBError(
4579
5136
  'Cannot delete a permanent document. Use options.force = true to force deletion.',
4580
5137
  'PERMANENT_DOCUMENT_PROTECTION'
4581
5138
  );
4582
5139
  }
4583
5140
 
4584
- if (doc._permanent && options.force) {
5141
+ if (stored._permanent && options.force) {
4585
5142
  console.warn(`Force deleting permanent document: ${docId}`);
4586
5143
  }
4587
5144
 
4588
- const fullDoc = await this.get(docId);
5145
+ // Unpack the doc we already fetched — no second IDB read
5146
+ const existingDoc = new Document(stored, {
5147
+ encrypted: stored._encrypted,
5148
+ compressed: stored._compressed
5149
+ }, this._serializer);
5150
+ if (stored.packedData) {
5151
+ await existingDoc.unpack(this.database.encryption);
5152
+ }
5153
+ const fullDoc = existingDoc.objectOutput();
4589
5154
 
4590
5155
  await this._indexManager.updateIndexForDocument(docId, fullDoc, null);
4591
5156
 
4592
5157
  await this._indexedDB.delete(this._db, this._storeName, docId);
4593
- const attachments = doc._attachments;
5158
+ const attachments = stored._attachments;
4594
5159
  if (attachments && attachments.length > 0) {
4595
5160
  await this._opfs.deleteAttachments(this.database.name, this.name, docId);
4596
5161
  }
@@ -4599,14 +5164,14 @@ class Collection {
4599
5164
  this.database.metadata.setCollection(this._metadata);
4600
5165
 
4601
5166
  await this._trigger('afterDelete', docId);
4602
- this._cacheStrategy.clear();
5167
+ this._cacheGeneration++;
4603
5168
  this._docCache.delete(docId);
4604
5169
  } async query(filter = {}, options = {}) {
4605
5170
  if (!this._initialized) await this.init();
4606
5171
 
4607
5172
  const startTime = performance.now();
4608
5173
 
4609
- const cacheKey = _stableCacheKey(filter, options);
5174
+ const cacheKey = _stableCacheKey(filter, options) ^ (this._cacheGeneration * 2654435761);
4610
5175
  const cached = this._cacheStrategy.get(cacheKey);
4611
5176
 
4612
5177
  if (cached) {
@@ -4618,24 +5183,51 @@ class Collection {
4618
5183
  let results;
4619
5184
  let usedIndex = false;
4620
5185
 
5186
+ // --- Index selection: pick the most selective matching index ---
5187
+ let bestIndex = null;
5188
+ let bestSize = Infinity;
5189
+
4621
5190
  for (const [indexName, index] of this._indexManager.indexes) {
4622
5191
  const fieldValue = filter[index.fieldPath];
4623
5192
  if (fieldValue !== undefined) {
4624
- const docIds = await this._indexManager.query(indexName, fieldValue);
4625
- results = await Promise.all(
4626
- docIds.map(id => this.get(id).catch(() => null))
4627
- );
4628
- results = results.filter(Boolean);
4629
- usedIndex = true;
4630
- break;
5193
+ const indexData = this._indexManager._indexData.get(indexName);
5194
+ const size = indexData ? (indexData.size || 0) : Infinity;
5195
+ if (size < bestSize) {
5196
+ bestSize = size;
5197
+ bestIndex = { indexName, fieldValue };
5198
+ }
4631
5199
  }
4632
5200
  }
4633
5201
 
4634
- if (!usedIndex) {
4635
- results = await this.getAll(options);
4636
- if (Object.keys(filter).length > 0) {
4637
- results = results.filter(doc => queryEngine.evaluate(doc, filter));
5202
+ if (bestIndex) {
5203
+ const docIds = await this._indexManager.query(bestIndex.indexName, bestIndex.fieldValue);
5204
+ results = await Promise.all(
5205
+ docIds.map(id => this.get(id).catch(() => null))
5206
+ );
5207
+ results = results.filter(Boolean);
5208
+
5209
+ // Apply remaining filter fields the index didn't cover
5210
+ const remainingFilter = {};
5211
+ const indexedField = this._indexManager.indexes.get(bestIndex.indexName).fieldPath;
5212
+ for (const key in filter) {
5213
+ if (key !== indexedField) remainingFilter[key] = filter[key];
5214
+ }
5215
+ if (Object.keys(remainingFilter).length > 0) {
5216
+ results = results.filter(doc => queryEngine.evaluate(doc, remainingFilter));
4638
5217
  }
5218
+ usedIndex = true;
5219
+ }
5220
+
5221
+ if (!usedIndex) {
5222
+ const hasFilter = Object.keys(filter).length > 0;
5223
+ // Can we short-circuit? Only if no sort is needed.
5224
+ const canShortCircuit = !options.sort && hasFilter;
5225
+ const target = canShortCircuit ? (options.skip || 0) + (options.limit || Infinity) : Infinity;
5226
+
5227
+ results = await this._scanWithFilter(
5228
+ hasFilter ? filter : null,
5229
+ target
5230
+ );
4639
5231
  }
4640
5232
 
4641
5233
  if (options.sort) results = aggregationPipeline.stages.$sort(results, options.sort);
@@ -4655,6 +5247,64 @@ class Collection {
4655
5247
  return results;
4656
5248
  }
4657
5249
 
5250
+ /**
5251
+ * Cursor-based scan: fetches documents in batches, deserializes and
5252
+ * evaluates filter per-batch, stops early when target count is reached.
5253
+ * Avoids loading + deserializing the entire collection for selective queries.
5254
+ *
5255
+ * @param {object|null} filter - Query filter, or null for all docs
5256
+ * @param {number} target - Stop after collecting this many matches (Infinity = no limit)
5257
+ * @returns {Promise<Array>}
5258
+ */
5259
+ async _scanWithFilter(filter, target) {
5260
+ const results = [];
5261
+ let lastKey = null;
5262
+ const batchSize = 200;
5263
+
5264
+ while (true) {
5265
+ const batch = await this._indexedDB.getBatch(
5266
+ this._db, this._storeName, lastKey, batchSize
5267
+ );
5268
+
5269
+ if (batch.length === 0) break;
5270
+
5271
+ for (const docData of batch) {
5272
+ lastKey = docData._id;
5273
+
5274
+ // Skip persisted index entries
5275
+ if (typeof docData._id === 'string' && docData._id.startsWith(IndexManager.IDX_PREFIX)) {
5276
+ continue;
5277
+ }
5278
+
5279
+ try {
5280
+ const doc = new Document(docData, {
5281
+ encrypted: docData._encrypted,
5282
+ compressed: docData._compressed
5283
+ }, this._serializer);
5284
+
5285
+ if (docData.packedData) {
5286
+ await doc.unpack(this.database.encryption);
5287
+ }
5288
+
5289
+ const output = doc.objectOutput();
5290
+
5291
+ if (!filter || queryEngine.evaluate(output, filter)) {
5292
+ results.push(output);
5293
+ this._docCache.set(docData._id, output);
5294
+
5295
+ if (results.length >= target) return results;
5296
+ }
5297
+ } catch (error) {
5298
+ console.error(`Failed to unpack document ${docData._id}:`, error);
5299
+ }
5300
+ }
5301
+
5302
+ if (batch.length < batchSize) break;
5303
+ }
5304
+
5305
+ return results;
5306
+ }
5307
+
4658
5308
  async aggregate(pipeline) {
4659
5309
  if (!this._initialized) await this.init();
4660
5310
 
@@ -4736,15 +5386,27 @@ class Collection {
4736
5386
  const skipped = [];
4737
5387
  const useSync = !this.database.encryption && !(options.compressed);
4738
5388
 
4739
- // Phase 1: Bulk-fetch all existing docs in a single IDB read transaction
4740
- const updateIds = updates.map(u => u.id);
5389
+ // Phase 1: Fetch only the documents we need (not the entire collection)
4741
5390
  const storedMap = new Map();
4742
5391
 
4743
- // Fetch all at once via getAll, then build a Map for O(1) lookup
4744
- const allStored = await this._indexedDB.getAll(this._db, this._storeName);
4745
- for (const doc of allStored) {
4746
- if (doc._id && updateIds.includes(doc._id)) {
4747
- storedMap.set(doc._id, doc);
5392
+ // Single read transaction: fetch all target docs at once via IDB getAll
5393
+ // with a bounded key set, falling back to individual gets for small batches
5394
+ if (updates.length <= 20) {
5395
+ // Small batch: individual gets (avoids loading/filtering entire store)
5396
+ const fetches = updates.map(u =>
5397
+ this._indexedDB.get(this._db, this._storeName, u.id)
5398
+ .then(doc => doc && storedMap.set(u.id, doc))
5399
+ .catch(() => {})
5400
+ );
5401
+ await Promise.all(fetches);
5402
+ } else {
5403
+ // Larger batch: use getAll + Set-based filter (still cheaper than N transactions)
5404
+ const updateIdSet = new Set(updates.map(u => u.id));
5405
+ const allStored = await this._indexedDB.getAll(this._db, this._storeName);
5406
+ for (const doc of allStored) {
5407
+ if (doc._id && updateIdSet.has(doc._id)) {
5408
+ storedMap.set(doc._id, doc);
5409
+ }
4748
5410
  }
4749
5411
  }
4750
5412
 
@@ -4804,7 +5466,7 @@ class Collection {
4804
5466
  }
4805
5467
 
4806
5468
  this.database.metadata.setCollection(this._metadata);
4807
- this._cacheStrategy.clear();
5469
+ this._cacheGeneration++;
4808
5470
 
4809
5471
  if (this._performanceMonitor) {
4810
5472
  this._performanceMonitor.recordOperation('batchUpdate', performance.now() - startTime);
@@ -4831,21 +5493,51 @@ class Collection {
4831
5493
  const docsToRemove = [];
4832
5494
  const skipped = [];
4833
5495
 
4834
- // Phase 1: Validate all documents and prepare delete operations
5496
+ // Phase 1: Bulk-fetch all target docs, validate, and unpack in-place
5497
+ const storedMap = new Map();
5498
+
5499
+ if (normalizedItems.length <= 20) {
5500
+ // Small batch: parallel individual gets
5501
+ const fetches = normalizedItems.map(({ id }) =>
5502
+ this._indexedDB.get(this._db, this._storeName, id)
5503
+ .then(doc => doc && storedMap.set(id, doc))
5504
+ .catch(() => {})
5505
+ );
5506
+ await Promise.all(fetches);
5507
+ } else {
5508
+ // Large batch: single getAll + Set-filter
5509
+ const idSet = new Set(normalizedItems.map(({ id }) => id));
5510
+ const allStored = await this._indexedDB.getAll(this._db, this._storeName);
5511
+ for (const doc of allStored) {
5512
+ if (doc._id && idSet.has(doc._id)) {
5513
+ storedMap.set(doc._id, doc);
5514
+ }
5515
+ }
5516
+ }
5517
+
4835
5518
  for (const { id, options } of normalizedItems) {
4836
- const doc = await this._indexedDB.get(this._db, this._storeName, id);
4837
- if (!doc) {
5519
+ const stored = storedMap.get(id);
5520
+ if (!stored) {
4838
5521
  skipped.push({ success: false, id, error: 'Document not found' });
4839
5522
  continue;
4840
5523
  }
4841
5524
 
4842
- if (doc._permanent && !options.force) {
5525
+ if (stored._permanent && !options.force) {
4843
5526
  skipped.push({ success: false, id, error: 'Cannot delete permanent document without force flag' });
4844
5527
  continue;
4845
5528
  }
4846
5529
 
4847
- const fullDoc = await this.get(id);
4848
- docsToRemove.push({ id, fullDoc, stored: doc });
5530
+ // Unpack directly from the raw doc — no second IDB fetch
5531
+ const existingDoc = new Document(stored, {
5532
+ encrypted: stored._encrypted,
5533
+ compressed: stored._compressed
5534
+ }, this._serializer);
5535
+ if (stored.packedData) {
5536
+ await existingDoc.unpack(this.database.encryption);
5537
+ }
5538
+ const fullDoc = existingDoc.objectOutput();
5539
+
5540
+ docsToRemove.push({ id, fullDoc, stored });
4849
5541
 
4850
5542
  operations.push({
4851
5543
  type: 'delete',
@@ -4874,7 +5566,7 @@ class Collection {
4874
5566
  }
4875
5567
 
4876
5568
  this.database.metadata.setCollection(this._metadata);
4877
- this._cacheStrategy.clear();
5569
+ this._cacheGeneration++;
4878
5570
 
4879
5571
  if (this._performanceMonitor) {
4880
5572
  this._performanceMonitor.recordOperation('batchDelete', performance.now() - startTime);
@@ -5836,6 +6528,7 @@ export {
5836
6528
  BTreeIndex,
5837
6529
  TextIndex,
5838
6530
  GeoIndex,
6531
+ HashIndex,
5839
6532
  SecureDatabaseEncryption,
5840
6533
  QuickStore,
5841
6534
  AsyncMutex,