@git-stunts/git-warp 12.2.0 → 12.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +9 -6
  2. package/bin/cli/commands/trust.js +37 -1
  3. package/bin/cli/infrastructure.js +14 -1
  4. package/bin/cli/schemas.js +4 -4
  5. package/bin/presenters/text.js +10 -3
  6. package/bin/warp-graph.js +4 -1
  7. package/index.d.ts +17 -1
  8. package/package.json +1 -1
  9. package/src/domain/WarpGraph.js +1 -1
  10. package/src/domain/crdt/Dot.js +5 -0
  11. package/src/domain/crdt/LWW.js +3 -1
  12. package/src/domain/crdt/ORSet.js +33 -23
  13. package/src/domain/crdt/VersionVector.js +12 -0
  14. package/src/domain/errors/PatchError.js +27 -0
  15. package/src/domain/errors/StorageError.js +8 -0
  16. package/src/domain/errors/WriterError.js +5 -0
  17. package/src/domain/errors/index.js +1 -0
  18. package/src/domain/services/AuditReceiptService.js +2 -1
  19. package/src/domain/services/AuditVerifierService.js +33 -2
  20. package/src/domain/services/BitmapIndexBuilder.js +14 -9
  21. package/src/domain/services/BoundaryTransitionRecord.js +1 -0
  22. package/src/domain/services/CheckpointMessageCodec.js +5 -0
  23. package/src/domain/services/CheckpointService.js +29 -2
  24. package/src/domain/services/GCPolicy.js +25 -4
  25. package/src/domain/services/GraphTraversal.js +3 -1
  26. package/src/domain/services/IncrementalIndexUpdater.js +179 -36
  27. package/src/domain/services/JoinReducer.js +311 -75
  28. package/src/domain/services/KeyCodec.js +48 -0
  29. package/src/domain/services/MaterializedViewService.js +14 -3
  30. package/src/domain/services/MessageSchemaDetector.js +35 -5
  31. package/src/domain/services/OpNormalizer.js +79 -0
  32. package/src/domain/services/PatchBuilderV2.js +240 -160
  33. package/src/domain/services/QueryBuilder.js +4 -0
  34. package/src/domain/services/SyncAuthService.js +3 -0
  35. package/src/domain/services/SyncController.js +12 -31
  36. package/src/domain/services/SyncProtocol.js +76 -32
  37. package/src/domain/services/WarpMessageCodec.js +2 -0
  38. package/src/domain/trust/TrustCrypto.js +8 -5
  39. package/src/domain/trust/TrustRecordService.js +50 -36
  40. package/src/domain/types/TickReceipt.js +6 -4
  41. package/src/domain/types/WarpTypesV2.js +77 -5
  42. package/src/domain/utils/CachedValue.js +34 -5
  43. package/src/domain/utils/EventId.js +4 -1
  44. package/src/domain/utils/LRUCache.js +3 -1
  45. package/src/domain/utils/RefLayout.js +4 -0
  46. package/src/domain/utils/canonicalStringify.js +48 -18
  47. package/src/domain/utils/defaultClock.js +1 -0
  48. package/src/domain/utils/matchGlob.js +7 -0
  49. package/src/domain/warp/PatchSession.js +30 -24
  50. package/src/domain/warp/Writer.js +12 -1
  51. package/src/domain/warp/_wiredMethods.d.ts +1 -1
  52. package/src/domain/warp/checkpoint.methods.js +36 -7
  53. package/src/domain/warp/fork.methods.js +1 -1
  54. package/src/domain/warp/materialize.methods.js +44 -5
  55. package/src/domain/warp/materializeAdvanced.methods.js +50 -10
  56. package/src/domain/warp/patch.methods.js +21 -11
  57. package/src/infrastructure/adapters/GitGraphAdapter.js +55 -52
  58. package/src/infrastructure/codecs/CborCodec.js +2 -0
  59. package/src/domain/utils/fnv1a.js +0 -20
@@ -28,6 +28,24 @@ import { cloneStateV5, reduceV5 } from './JoinReducer.js';
28
28
  import { encodeEdgeKey, encodePropKey, CONTENT_PROPERTY_KEY, decodePropKey, isEdgePropKey, decodeEdgePropKey } from './KeyCodec.js';
29
29
  import { ProvenanceIndex } from './ProvenanceIndex.js';
30
30
 
31
+ // ============================================================================
32
+ // Checkpoint Schema Constants
33
+ // ============================================================================
34
+
35
+ /**
36
+ * Standard checkpoint schema — full V5 state without index tree.
37
+ * Distinct from the patch schema namespace (PATCH_SCHEMA_V2/V3).
38
+ * @type {number}
39
+ */
40
+ export const CHECKPOINT_SCHEMA_STANDARD = 2;
41
+
42
+ /**
43
+ * Index-tree checkpoint schema — full V5 state with bitmap index tree.
44
+ * Distinct from the patch schema namespace (PATCH_SCHEMA_V2/V3).
45
+ * @type {number}
46
+ */
47
+ export const CHECKPOINT_SCHEMA_INDEX_TREE = 4;
48
+
31
49
  // ============================================================================
32
50
  // Internal Helpers
33
51
  // ============================================================================
@@ -148,7 +166,9 @@ export async function createV5({
148
166
  // 1. Compute appliedVV from actual state dots
149
167
  const appliedVV = computeAppliedVV(state);
150
168
 
151
- // 2. Optionally compact (only tombstoned dots <= appliedVV)
169
+ // 2. Optionally compact (only tombstoned dots <= appliedVV).
170
+ // When compact=false, checkpointState aliases the caller's state but the
171
+ // remaining path is read-only (serialize + hash), so no clone is needed.
152
172
  let checkpointState = state;
153
173
  if (compact) {
154
174
  checkpointState = cloneStateV5(state);
@@ -188,6 +208,11 @@ export async function createV5({
188
208
  // If patch commits are ever pruned, content blobs remain reachable via
189
209
  // the checkpoint tree. Without this, git gc would nuke content blobs
190
210
  // whose only anchor was the (now-pruned) patch commit tree.
211
+ //
212
+ // O(P) scan over all properties — acceptable because checkpoint creation
213
+ // is infrequent. The property key format is deterministic (encodePropKey /
214
+ // encodeEdgePropKey), but content keys are interleaved with regular keys
215
+ // so no prefix filter can skip non-content entries without decoding.
191
216
  const contentOids = new Set();
192
217
  for (const [propKey, register] of checkpointState.prop) {
193
218
  const { propKey: decodedKey } = isEdgePropKey(propKey)
@@ -235,7 +260,9 @@ export async function createV5({
235
260
  stateHash,
236
261
  frontierOid: frontierBlobOid,
237
262
  indexOid: treeOid,
238
- schema: indexTree ? 4 : 2,
263
+ // Schema 3 was used for edge-property-aware patches but is never emitted
264
+ // by checkpoint creation. Schema 4 indicates an index tree is present.
265
+ schema: indexTree ? CHECKPOINT_SCHEMA_INDEX_TREE : CHECKPOINT_SCHEMA_STANDARD,
239
266
  });
240
267
 
241
268
  // 9. Create the checkpoint commit
@@ -4,6 +4,7 @@
4
4
 
5
5
  import { orsetCompact } from '../crdt/ORSet.js';
6
6
  import { collectGCMetrics } from './GCMetrics.js';
7
+ import WarpError from '../errors/WarpError.js';
7
8
 
8
9
  /**
9
10
  * @typedef {Object} GCPolicy
@@ -92,21 +93,41 @@ export function shouldRunGC(metrics, policy) {
92
93
 
93
94
  /**
94
95
  * Executes GC on state. Only compacts tombstoned dots <= appliedVV.
95
- * Mutates state in place.
96
+ * Mutates state **in place** — callers must clone-then-swap to preserve
97
+ * a rollback copy (see CheckpointService for the canonical pattern).
96
98
  *
97
99
  * @param {import('./JoinReducer.js').WarpStateV5} state - State to compact (mutated!)
98
100
  * @param {import('../crdt/VersionVector.js').VersionVector} appliedVV - Version vector cutoff
99
101
  * @returns {GCExecuteResult}
102
+ * @throws {WarpError} E_GC_INVALID_VV if appliedVV is not a Map
103
+ * @throws {WarpError} E_GC_COMPACT_FAILED if orsetCompact throws
100
104
  */
101
105
  export function executeGC(state, appliedVV) {
106
+ if (!(appliedVV instanceof Map)) {
107
+ throw new WarpError(
108
+ 'executeGC requires appliedVV to be a Map (VersionVector)',
109
+ 'E_GC_INVALID_VV',
110
+ );
111
+ }
112
+
102
113
  const startTime = performance.now();
103
114
 
104
115
  // Collect metrics before compaction
105
116
  const beforeMetrics = collectGCMetrics(state);
106
117
 
107
- // Compact both ORSets
108
- orsetCompact(state.nodeAlive, appliedVV);
109
- orsetCompact(state.edgeAlive, appliedVV);
118
+ // Compact both ORSets — wrap each phase so partial failure is diagnosable
119
+ let nodesDone = false;
120
+ try {
121
+ orsetCompact(state.nodeAlive, appliedVV);
122
+ nodesDone = true;
123
+ orsetCompact(state.edgeAlive, appliedVV);
124
+ } catch {
125
+ throw new WarpError(
126
+ `GC compaction failed during ${nodesDone ? 'edgeAlive' : 'nodeAlive'} phase`,
127
+ 'E_GC_COMPACT_FAILED',
128
+ { context: { phase: nodesDone ? 'edgeAlive' : 'nodeAlive', partialCompaction: nodesDone } },
129
+ );
130
+ }
110
131
 
111
132
  // Collect metrics after compaction
112
133
  const afterMetrics = collectGCMetrics(state);
@@ -165,7 +165,9 @@ export default class GraphTraversal {
165
165
  return await this._provider.getNeighbors(nodeId, direction, options);
166
166
  }
167
167
 
168
- const labelsKey = options?.labels ? JSON.stringify([...options.labels].sort()) : '*';
168
+ const labelsKey = options?.labels
169
+ ? [...options.labels].sort().join('\0')
170
+ : '*';
169
171
  const key = `${nodeId}\0${direction}\0${labelsKey}`;
170
172
  const cached = cache.get(key);
171
173
  if (cached !== undefined) {
@@ -1,10 +1,17 @@
1
1
  /**
2
- * Stateless service that computes dirty shard buffers from a PatchDiff.
2
+ * Stateful service that computes dirty shard buffers from a PatchDiff.
3
3
  *
4
4
  * Given a diff of alive-ness transitions + a shard loader for the existing
5
5
  * index tree, produces only the shard buffers that changed. The caller
6
6
  * merges them back into the tree via `{ ...existingTree, ...dirtyShards }`.
7
7
  *
8
+ * Instance state:
9
+ * - `_edgeAdjacencyCache` stores a WeakMap keyed by `state.edgeAlive` ORSet
10
+ * identity, mapping nodeId -> incident alive edge keys.
11
+ * - Cache lifetime is tied to the updater instance and is reconciled per diff
12
+ * once initialized. Reuse one instance for a single linear state stream;
13
+ * create a new instance to reset cache state across independent streams.
14
+ *
8
15
  * @module domain/services/IncrementalIndexUpdater
9
16
  */
10
17
 
@@ -40,6 +47,15 @@ export default class IncrementalIndexUpdater {
40
47
  */
41
48
  constructor({ codec } = {}) {
42
49
  this._codec = codec || defaultCodec;
50
+ /** @type {WeakMap<import('../crdt/ORSet.js').ORSet, Map<string, Set<string>>>} */
51
+ this._edgeAdjacencyCache = new WeakMap();
52
+ /**
53
+ * Cached next label ID — avoids O(L) max-scan per new label.
54
+ * Initialized lazily from existing labels on first _ensureLabel call.
55
+ * @type {number|null}
56
+ * @private
57
+ */
58
+ this._nextLabelId = null;
43
59
  }
44
60
 
45
61
  /**
@@ -63,8 +79,22 @@ export default class IncrementalIndexUpdater {
63
79
  const out = {};
64
80
 
65
81
  const labels = this._loadLabels(loadShard);
82
+ // Reset cached next label ID so _ensureLabel re-scans the fresh labels
83
+ // object loaded above. Without this, a stale _nextLabelId from a prior
84
+ // applyDiff call could collide with IDs already present in the new labels.
85
+ this._nextLabelId = null;
66
86
  let labelsDirty = false;
67
87
 
88
+ // Determine which added nodes are true re-adds (already have global IDs).
89
+ // Brand-new nodes cannot have pre-existing indexed edges to restore.
90
+ const readdedNodes = new Set();
91
+ for (const nodeId of diff.nodesAdded) {
92
+ const meta = this._getOrLoadMeta(computeShardKey(nodeId), metaCache, loadShard);
93
+ if (this._findGlobalId(meta, nodeId) !== undefined) {
94
+ readdedNodes.add(nodeId);
95
+ }
96
+ }
97
+
68
98
  for (const nodeId of diff.nodesAdded) {
69
99
  this._handleNodeAdd(nodeId, metaCache, loadShard);
70
100
  }
@@ -96,25 +126,22 @@ export default class IncrementalIndexUpdater {
96
126
  this._handleEdgeRemove(edge, labels, metaCache, fwdCache, revCache, loadShard);
97
127
  }
98
128
 
99
- // Restore edges for re-added nodes. When a node transitions
100
- // not-alive -> alive, edges touching it that are alive in the ORSet
101
- // become visible again. The diff only tracks explicit EdgeAdd ops,
102
- // not these implicit visibility transitions.
103
- //
104
- // Known O(E) worst-case: scans all alive edges. For genuinely new nodes
105
- // (not re-adds), this scan is unnecessary since they can't have pre-existing
106
- // edges. _findGlobalId returns undefined for new nodes, so this could be
107
- // short-circuited deferred for a future optimization pass.
108
- if (diff.nodesAdded.length > 0) {
109
- const addedSet = new Set(diff.nodesAdded);
129
+ // Keep adjacency cache in sync for every diff once initialized, so later
130
+ // re-add restores never consult stale edge membership.
131
+ let readdAdjacency = null;
132
+ if (readdedNodes.size > 0 || this._edgeAdjacencyCache.has(state.edgeAlive)) {
133
+ readdAdjacency = this._getOrBuildAliveEdgeAdjacency(state, diff);
134
+ }
135
+
136
+ // Restore edges for re-added nodes only. When a node transitions
137
+ // not-alive -> alive, alive OR-Set edges touching it become visible again.
138
+ // Brand-new nodes are skipped because they have no prior global ID.
139
+ if (readdedNodes.size > 0 && readdAdjacency) {
110
140
  const diffEdgeSet = new Set(
111
141
  diff.edgesAdded.map((e) => `${e.from}\0${e.to}\0${e.label}`),
112
142
  );
113
- for (const edgeKey of orsetElements(state.edgeAlive)) {
143
+ for (const edgeKey of this._collectReaddedEdgeKeys(readdAdjacency, readdedNodes)) {
114
144
  const { from, to, label } = decodeEdgeKey(edgeKey);
115
- if (!addedSet.has(from) && !addedSet.has(to)) {
116
- continue;
117
- }
118
145
  if (!orsetContains(state.nodeAlive, from) || !orsetContains(state.nodeAlive, to)) {
119
146
  continue;
120
147
  }
@@ -255,15 +282,16 @@ export default class IncrementalIndexUpdater {
255
282
  for (const bucket of Object.keys(fwdData)) {
256
283
  const gidStr = String(deadGid);
257
284
  if (fwdData[bucket] && fwdData[bucket][gidStr]) {
258
- // Before clearing, find the targets so we can clean reverse bitmaps
259
- const targets = RoaringBitmap32.deserialize(
285
+ // Deserialize once, collect targets, then clear+serialize in place.
286
+ const bm = RoaringBitmap32.deserialize(
260
287
  toBytes(fwdData[bucket][gidStr]),
261
288
  true,
262
- ).toArray();
289
+ );
290
+ const targets = bm.toArray();
263
291
 
264
292
  // Clear this node's outgoing bitmap
265
- const empty = new RoaringBitmap32();
266
- fwdData[bucket][gidStr] = empty.serialize(true);
293
+ bm.clear();
294
+ fwdData[bucket][gidStr] = bm.serialize(true);
267
295
 
268
296
  // Remove deadGid from each target's reverse bitmap
269
297
  for (const targetGid of targets) {
@@ -273,12 +301,12 @@ export default class IncrementalIndexUpdater {
273
301
  const revData = this._getOrLoadEdgeShard(revCache, 'rev', targetShard, loadShard);
274
302
  const targetGidStr = String(targetGid);
275
303
  if (revData[bucket] && revData[bucket][targetGidStr]) {
276
- const bm = RoaringBitmap32.deserialize(
304
+ const targetBm = RoaringBitmap32.deserialize(
277
305
  toBytes(revData[bucket][targetGidStr]),
278
306
  true,
279
307
  );
280
- bm.remove(deadGid);
281
- revData[bucket][targetGidStr] = bm.serialize(true);
308
+ targetBm.remove(deadGid);
309
+ revData[bucket][targetGidStr] = targetBm.serialize(true);
282
310
  }
283
311
  }
284
312
  }
@@ -290,13 +318,14 @@ export default class IncrementalIndexUpdater {
290
318
  for (const bucket of Object.keys(revData)) {
291
319
  const gidStr = String(deadGid);
292
320
  if (revData[bucket] && revData[bucket][gidStr]) {
293
- const sources = RoaringBitmap32.deserialize(
321
+ const bm = RoaringBitmap32.deserialize(
294
322
  toBytes(revData[bucket][gidStr]),
295
323
  true,
296
- ).toArray();
324
+ );
325
+ const sources = bm.toArray();
297
326
 
298
- const empty = new RoaringBitmap32();
299
- revData[bucket][gidStr] = empty.serialize(true);
327
+ bm.clear();
328
+ revData[bucket][gidStr] = bm.serialize(true);
300
329
 
301
330
  // Remove deadGid from each source's forward bitmap
302
331
  for (const sourceGid of sources) {
@@ -306,12 +335,12 @@ export default class IncrementalIndexUpdater {
306
335
  const fwdDataPeer = this._getOrLoadEdgeShard(fwdCache, 'fwd', sourceShard, loadShard);
307
336
  const sourceGidStr = String(sourceGid);
308
337
  if (fwdDataPeer[bucket] && fwdDataPeer[bucket][sourceGidStr]) {
309
- const bm = RoaringBitmap32.deserialize(
338
+ const sourceBm = RoaringBitmap32.deserialize(
310
339
  toBytes(fwdDataPeer[bucket][sourceGidStr]),
311
340
  true,
312
341
  );
313
- bm.remove(deadGid);
314
- fwdDataPeer[bucket][sourceGidStr] = bm.serialize(true);
342
+ sourceBm.remove(deadGid);
343
+ fwdDataPeer[bucket][sourceGidStr] = sourceBm.serialize(true);
315
344
  }
316
345
  }
317
346
  }
@@ -350,13 +379,19 @@ export default class IncrementalIndexUpdater {
350
379
  if (Object.prototype.hasOwnProperty.call(labels, label)) {
351
380
  return false;
352
381
  }
353
- let maxId = -1;
354
- for (const id of Object.values(labels)) {
355
- if (id > maxId) {
356
- maxId = id;
382
+ // Lazily initialize _nextLabelId from existing labels (O(L) once),
383
+ // then O(1) per subsequent new label.
384
+ if (this._nextLabelId === null) {
385
+ let maxId = -1;
386
+ for (const id of Object.values(labels)) {
387
+ if (id > maxId) {
388
+ maxId = id;
389
+ }
357
390
  }
391
+ this._nextLabelId = maxId + 1;
358
392
  }
359
- labels[label] = maxId + 1;
393
+ labels[label] = this._nextLabelId;
394
+ this._nextLabelId++;
360
395
  return true;
361
396
  }
362
397
 
@@ -743,6 +778,111 @@ export default class IncrementalIndexUpdater {
743
778
  return meta.nodeToGlobalMap.get(nodeId);
744
779
  }
745
780
 
781
+ /**
782
+ * Collects alive edge keys incident to re-added nodes.
783
+ *
784
+ * Uses an ORSet-keyed adjacency cache so repeated updates can enumerate
785
+ * candidates by degree rather than scanning all alive edges each time.
786
+ *
787
+ * @param {Map<string, Set<string>>} adjacency
788
+ * @param {Set<string>} readdedNodes
789
+ * @returns {Set<string>}
790
+ * @private
791
+ */
792
+ _collectReaddedEdgeKeys(adjacency, readdedNodes) {
793
+ const keys = new Set();
794
+ for (const nodeId of readdedNodes) {
795
+ const incident = adjacency.get(nodeId);
796
+ if (!incident) {
797
+ continue;
798
+ }
799
+ for (const edgeKey of incident) {
800
+ keys.add(edgeKey);
801
+ }
802
+ }
803
+ return keys;
804
+ }
805
+
806
+ /**
807
+ * Gets or builds a node -> alive edgeKey adjacency map for state.edgeAlive.
808
+ *
809
+ * For cached maps, applies diff edge transitions to keep membership current.
810
+ *
811
+ * @param {import('./JoinReducer.js').WarpStateV5} state
812
+ * @param {import('../types/PatchDiff.js').PatchDiff} diff
813
+ * @returns {Map<string, Set<string>>}
814
+ * @private
815
+ */
816
+ _getOrBuildAliveEdgeAdjacency(state, diff) {
817
+ const { edgeAlive } = state;
818
+ let adjacency = this._edgeAdjacencyCache.get(edgeAlive);
819
+ if (!adjacency) {
820
+ adjacency = new Map();
821
+ for (const edgeKey of orsetElements(edgeAlive)) {
822
+ const { from, to } = decodeEdgeKey(edgeKey);
823
+ this._addEdgeKeyToAdjacency(adjacency, from, edgeKey);
824
+ this._addEdgeKeyToAdjacency(adjacency, to, edgeKey);
825
+ }
826
+ this._edgeAdjacencyCache.set(edgeAlive, adjacency);
827
+ return adjacency;
828
+ }
829
+
830
+ for (const edge of diff.edgesAdded) {
831
+ const edgeKey = `${edge.from}\0${edge.to}\0${edge.label}`;
832
+ if (!orsetContains(edgeAlive, edgeKey)) {
833
+ continue;
834
+ }
835
+ this._addEdgeKeyToAdjacency(adjacency, edge.from, edgeKey);
836
+ this._addEdgeKeyToAdjacency(adjacency, edge.to, edgeKey);
837
+ }
838
+ for (const edge of diff.edgesRemoved) {
839
+ const edgeKey = `${edge.from}\0${edge.to}\0${edge.label}`;
840
+ if (orsetContains(edgeAlive, edgeKey)) {
841
+ continue;
842
+ }
843
+ this._removeEdgeKeyFromAdjacency(adjacency, edge.from, edgeKey);
844
+ this._removeEdgeKeyFromAdjacency(adjacency, edge.to, edgeKey);
845
+ }
846
+
847
+ return adjacency;
848
+ }
849
+
850
+ /**
851
+ * Adds an edge key to one endpoint's adjacency set.
852
+ *
853
+ * @param {Map<string, Set<string>>} adjacency
854
+ * @param {string} nodeId
855
+ * @param {string} edgeKey
856
+ * @private
857
+ */
858
+ _addEdgeKeyToAdjacency(adjacency, nodeId, edgeKey) {
859
+ let set = adjacency.get(nodeId);
860
+ if (!set) {
861
+ set = new Set();
862
+ adjacency.set(nodeId, set);
863
+ }
864
+ set.add(edgeKey);
865
+ }
866
+
867
+ /**
868
+ * Removes an edge key from one endpoint's adjacency set.
869
+ *
870
+ * @param {Map<string, Set<string>>} adjacency
871
+ * @param {string} nodeId
872
+ * @param {string} edgeKey
873
+ * @private
874
+ */
875
+ _removeEdgeKeyFromAdjacency(adjacency, nodeId, edgeKey) {
876
+ const set = adjacency.get(nodeId);
877
+ if (!set) {
878
+ return;
879
+ }
880
+ set.delete(edgeKey);
881
+ if (set.size === 0) {
882
+ adjacency.delete(nodeId);
883
+ }
884
+ }
885
+
746
886
  /**
747
887
  * Deserializes a bitmap from edge shard data, or creates a new one.
748
888
  *
@@ -753,6 +893,9 @@ export default class IncrementalIndexUpdater {
753
893
  * @private
754
894
  */
755
895
  _deserializeBitmap(data, bucket, ownerStr) {
896
+ // getRoaringBitmap32() is internally memoized (returns cached constructor
897
+ // after first resolution). The repeated calls are cheap but the pattern
898
+ // is noisy. A future cleanup could cache the constructor at instance level.
756
899
  const RoaringBitmap32 = getRoaringBitmap32();
757
900
  if (data[bucket] && data[bucket][ownerStr]) {
758
901
  return RoaringBitmap32.deserialize(