@brightchain/db 0.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +81 -0
- package/package.json +18 -0
- package/src/__tests__/helpers/mockBlockStore.d.ts +113 -0
- package/src/__tests__/helpers/mockBlockStore.js +380 -0
- package/src/__tests__/helpers/mockBlockStore.js.map +1 -0
- package/src/index.d.ts +31 -0
- package/src/index.js +78 -0
- package/src/index.js.map +1 -0
- package/src/lib/aggregation.d.ts +18 -0
- package/src/lib/aggregation.js +407 -0
- package/src/lib/aggregation.js.map +1 -0
- package/src/lib/cblIndex.d.ts +268 -0
- package/src/lib/cblIndex.js +856 -0
- package/src/lib/cblIndex.js.map +1 -0
- package/src/lib/collection.d.ts +305 -0
- package/src/lib/collection.js +991 -0
- package/src/lib/collection.js.map +1 -0
- package/src/lib/cursor.d.ts +8 -0
- package/src/lib/cursor.js +13 -0
- package/src/lib/cursor.js.map +1 -0
- package/src/lib/database.d.ts +158 -0
- package/src/lib/database.js +332 -0
- package/src/lib/database.js.map +1 -0
- package/src/lib/errors.d.ts +85 -0
- package/src/lib/errors.js +103 -0
- package/src/lib/errors.js.map +1 -0
- package/src/lib/expressMiddleware.d.ts +57 -0
- package/src/lib/expressMiddleware.js +488 -0
- package/src/lib/expressMiddleware.js.map +1 -0
- package/src/lib/headRegistry.d.ts +60 -0
- package/src/lib/headRegistry.js +216 -0
- package/src/lib/headRegistry.js.map +1 -0
- package/src/lib/indexing.d.ts +7 -0
- package/src/lib/indexing.js +14 -0
- package/src/lib/indexing.js.map +1 -0
- package/src/lib/model.d.ts +162 -0
- package/src/lib/model.js +260 -0
- package/src/lib/model.js.map +1 -0
- package/src/lib/pooledStoreAdapter.d.ts +44 -0
- package/src/lib/pooledStoreAdapter.js +109 -0
- package/src/lib/pooledStoreAdapter.js.map +1 -0
- package/src/lib/queryEngine.d.ts +48 -0
- package/src/lib/queryEngine.js +461 -0
- package/src/lib/queryEngine.js.map +1 -0
- package/src/lib/schemaValidation.d.ts +80 -0
- package/src/lib/schemaValidation.js +353 -0
- package/src/lib/schemaValidation.js.map +1 -0
- package/src/lib/transaction.d.ts +7 -0
- package/src/lib/transaction.js +12 -0
- package/src/lib/transaction.js.map +1 -0
- package/src/lib/types.d.ts +360 -0
- package/src/lib/types.js +6 -0
- package/src/lib/types.js.map +1 -0
- package/src/lib/updateEngine.d.ts +7 -0
- package/src/lib/updateEngine.js +13 -0
- package/src/lib/updateEngine.js.map +1 -0
|
@@ -0,0 +1,856 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* CBLIndex – higher-level CBL index built on top of a brightchain-db Collection.
|
|
4
|
+
*
|
|
5
|
+
* Tracks whitened CBL storage results with metadata, pool scoping,
|
|
6
|
+
* user-level organization, and file version history.
|
|
7
|
+
*
|
|
8
|
+
* Backed by a `Collection<ICBLIndexEntry>` named `__cbl_index__`.
|
|
9
|
+
*/
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
exports.CBLIndex = void 0;
|
|
12
|
+
const brightchain_lib_1 = require("@brightchain/brightchain-lib");
|
|
13
|
+
const crypto_1 = require("crypto");
|
|
14
|
+
/** The well-known collection name for the CBL index. */
|
|
15
|
+
const CBL_INDEX_COLLECTION_NAME = '__cbl_index__';
|
|
16
|
+
/** Head registry key for the latest snapshot magnet URL. */
|
|
17
|
+
const CBL_INDEX_SNAPSHOT_KEY = '__cbl_index_snapshot__';
|
|
18
|
+
/**
|
|
19
|
+
* Higher-level CBL index built on top of a brightchain-db Collection.
|
|
20
|
+
* Tracks whitened CBL storage results with metadata, pool scoping,
|
|
21
|
+
* and user-level organization.
|
|
22
|
+
*/
|
|
23
|
+
class CBLIndex {
|
|
24
|
+
constructor(db, blockStore, options) {
|
|
25
|
+
this.sequenceCounter = 0;
|
|
26
|
+
this.mutationsSinceSnapshot = 0;
|
|
27
|
+
this.collection = db.collection(CBL_INDEX_COLLECTION_NAME);
|
|
28
|
+
this.blockStore = blockStore;
|
|
29
|
+
this.parityCount = options?.parityCount ?? 0;
|
|
30
|
+
this.snapshotInterval = options?.snapshotInterval ?? 100;
|
|
31
|
+
this.headRegistry = db.getHeadRegistry();
|
|
32
|
+
this.dbName = db.name;
|
|
33
|
+
this.enableRecovery = options?.enableRecovery ?? true;
|
|
34
|
+
this.gossipService = options?.gossipService;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Generate FEC parity blocks for the collection's current head (metadata) block.
|
|
38
|
+
* This is a best-effort operation — failures are logged but do not propagate.
|
|
39
|
+
*/
|
|
40
|
+
async generateParityForHead() {
|
|
41
|
+
if (this.parityCount <= 0)
|
|
42
|
+
return;
|
|
43
|
+
const headBlockId = this.headRegistry.getHead(this.dbName, CBL_INDEX_COLLECTION_NAME);
|
|
44
|
+
if (!headBlockId)
|
|
45
|
+
return;
|
|
46
|
+
try {
|
|
47
|
+
await this.blockStore.generateParityBlocks(headBlockId, this.parityCount);
|
|
48
|
+
}
|
|
49
|
+
catch (err) {
|
|
50
|
+
// Best-effort: log warning but don't fail the mutation
|
|
51
|
+
console.warn(`[CBLIndex] FEC parity generation failed for head block ${headBlockId}:`, err instanceof Error ? err.message : err);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Track a mutation and trigger an auto-snapshot if the threshold is reached.
|
|
56
|
+
* Auto-snapshots are best-effort — failures are logged but don't break mutations.
|
|
57
|
+
*/
|
|
58
|
+
async trackMutationForAutoSnapshot() {
|
|
59
|
+
this.mutationsSinceSnapshot++;
|
|
60
|
+
if (this.snapshotInterval > 0 &&
|
|
61
|
+
this.mutationsSinceSnapshot >= this.snapshotInterval) {
|
|
62
|
+
try {
|
|
63
|
+
await this.snapshot();
|
|
64
|
+
// snapshot() resets mutationsSinceSnapshot on success
|
|
65
|
+
}
|
|
66
|
+
catch (err) {
|
|
67
|
+
// Best-effort: log warning but don't fail the mutation
|
|
68
|
+
console.warn('[CBLIndex] Auto-snapshot failed:', err instanceof Error ? err.message : err);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Create a snapshot of the current index state.
|
|
74
|
+
* Serializes all entries (including soft-deleted) and the sequence counter,
|
|
75
|
+
* stores the data as a CBL via the block store, and returns the magnet URL.
|
|
76
|
+
*
|
|
77
|
+
* Persists the snapshot magnet URL in the head registry so recovery can find it.
|
|
78
|
+
* Resets the mutation counter on success so auto-snapshot tracking restarts.
|
|
79
|
+
*/
|
|
80
|
+
async snapshot() {
|
|
81
|
+
// 1. Get ALL entries including soft-deleted for full state
|
|
82
|
+
const entries = await this.collection.find({}).toArray();
|
|
83
|
+
// 2. Serialize to JSON
|
|
84
|
+
const snapshotData = {
|
|
85
|
+
entries,
|
|
86
|
+
sequenceCounter: this.sequenceCounter,
|
|
87
|
+
};
|
|
88
|
+
const json = JSON.stringify(snapshotData);
|
|
89
|
+
// 3. Convert to Uint8Array
|
|
90
|
+
const encoder = new TextEncoder();
|
|
91
|
+
const data = encoder.encode(json);
|
|
92
|
+
// 4. Store as CBL via blockStore.storeCBLWithWhitening
|
|
93
|
+
const result = await this.blockStore.storeCBLWithWhitening(data);
|
|
94
|
+
// 5. Persist the snapshot magnet URL in the head registry for recovery
|
|
95
|
+
await this.headRegistry.setHead(this.dbName, CBL_INDEX_SNAPSHOT_KEY, result.magnetUrl);
|
|
96
|
+
// 6. Reset mutation counter on success
|
|
97
|
+
this.mutationsSinceSnapshot = 0;
|
|
98
|
+
// 7. Return the magnet URL
|
|
99
|
+
return result.magnetUrl;
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Initialize the sequence counter from existing entries.
|
|
103
|
+
* Call this after construction to resume from the highest existing sequence number.
|
|
104
|
+
*
|
|
105
|
+
* If the collection is empty and recovery is enabled, attempts recovery in order:
|
|
106
|
+
* (1) latest snapshot, (2) FEC parity rebuild, (3) block store scan.
|
|
107
|
+
*/
|
|
108
|
+
async initialize() {
|
|
109
|
+
const entries = await this.collection
|
|
110
|
+
.find({}, {
|
|
111
|
+
sort: { sequenceNumber: -1 },
|
|
112
|
+
limit: 1,
|
|
113
|
+
})
|
|
114
|
+
.toArray();
|
|
115
|
+
if (entries.length > 0) {
|
|
116
|
+
this.sequenceCounter = entries[0].sequenceNumber;
|
|
117
|
+
return;
|
|
118
|
+
}
|
|
119
|
+
// Collection is empty — attempt recovery if enabled
|
|
120
|
+
if (this.enableRecovery) {
|
|
121
|
+
const recovered = await this.recover();
|
|
122
|
+
if (recovered) {
|
|
123
|
+
// Re-read the sequence counter after recovery
|
|
124
|
+
const restored = await this.collection
|
|
125
|
+
.find({}, {
|
|
126
|
+
sort: { sequenceNumber: -1 },
|
|
127
|
+
limit: 1,
|
|
128
|
+
})
|
|
129
|
+
.toArray();
|
|
130
|
+
if (restored.length > 0) {
|
|
131
|
+
this.sequenceCounter = restored[0].sequenceNumber;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
/**
|
|
137
|
+
* Attempt to recover the CBL index from available sources.
|
|
138
|
+
* Recovery order:
|
|
139
|
+
* 1. Latest snapshot (magnet URL stored in head registry)
|
|
140
|
+
* 2. FEC parity rebuild of the collection's head block
|
|
141
|
+
* 3. Block store scan for CBL blocks (partial rebuild — metadata lost)
|
|
142
|
+
*
|
|
143
|
+
* @returns true if any recovery method succeeded, false if all failed
|
|
144
|
+
*/
|
|
145
|
+
async recover() {
|
|
146
|
+
// Strategy 1: Restore from latest snapshot
|
|
147
|
+
if (await this.recoverFromSnapshot()) {
|
|
148
|
+
console.info('[CBLIndex] Recovery: restored from snapshot.');
|
|
149
|
+
return true;
|
|
150
|
+
}
|
|
151
|
+
// Strategy 2: FEC parity rebuild of the collection head block
|
|
152
|
+
if (await this.recoverFromFEC()) {
|
|
153
|
+
console.info('[CBLIndex] Recovery: rebuilt from FEC parity blocks.');
|
|
154
|
+
return true;
|
|
155
|
+
}
|
|
156
|
+
// Strategy 3: Partial rebuild by scanning the block store
|
|
157
|
+
if (await this.recoverFromBlockScan()) {
|
|
158
|
+
console.warn('[CBLIndex] Recovery: partial rebuild from block store scan. ' +
|
|
159
|
+
'User metadata (file names, tags, collections) has been lost — ' +
|
|
160
|
+
'only structural data (block IDs, magnet URLs) was recovered.');
|
|
161
|
+
return true;
|
|
162
|
+
}
|
|
163
|
+
console.error('[CBLIndex] Recovery: all recovery methods failed. Starting with empty index.');
|
|
164
|
+
return false;
|
|
165
|
+
}
|
|
166
|
+
/**
|
|
167
|
+
* Strategy 1: Restore from the latest snapshot whose magnet URL is
|
|
168
|
+
* persisted in the head registry.
|
|
169
|
+
*/
|
|
170
|
+
async recoverFromSnapshot() {
|
|
171
|
+
try {
|
|
172
|
+
const magnetUrl = this.headRegistry.getHead(this.dbName, CBL_INDEX_SNAPSHOT_KEY);
|
|
173
|
+
if (!magnetUrl)
|
|
174
|
+
return false;
|
|
175
|
+
await this.restoreFromSnapshot(magnetUrl);
|
|
176
|
+
return true;
|
|
177
|
+
}
|
|
178
|
+
catch (err) {
|
|
179
|
+
console.warn('[CBLIndex] Snapshot recovery failed:', err instanceof Error ? err.message : err);
|
|
180
|
+
return false;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Strategy 2: Recover the collection's head metadata block via FEC parity,
|
|
185
|
+
* then let the Collection re-load its documents from the recovered block.
|
|
186
|
+
*/
|
|
187
|
+
async recoverFromFEC() {
|
|
188
|
+
try {
|
|
189
|
+
const headBlockId = this.headRegistry.getHead(this.dbName, CBL_INDEX_COLLECTION_NAME);
|
|
190
|
+
if (!headBlockId)
|
|
191
|
+
return false;
|
|
192
|
+
// Check if the head block is already accessible
|
|
193
|
+
const exists = await this.blockStore.has(headBlockId);
|
|
194
|
+
if (exists) {
|
|
195
|
+
// Head block exists — the Collection should load normally.
|
|
196
|
+
// This means the collection wasn't truly empty; nothing to recover.
|
|
197
|
+
return false;
|
|
198
|
+
}
|
|
199
|
+
// Attempt FEC recovery of the head block
|
|
200
|
+
const result = await this.blockStore.recoverBlock(headBlockId);
|
|
201
|
+
if (!result.success)
|
|
202
|
+
return false;
|
|
203
|
+
// Head block recovered — re-initialize the collection by reading entries
|
|
204
|
+
// The Collection infrastructure will pick up the recovered head block
|
|
205
|
+
// on next access. Verify we now have entries.
|
|
206
|
+
const entries = await this.collection
|
|
207
|
+
.find({}, { limit: 1 })
|
|
208
|
+
.toArray();
|
|
209
|
+
return entries.length > 0;
|
|
210
|
+
}
|
|
211
|
+
catch (err) {
|
|
212
|
+
console.warn('[CBLIndex] FEC recovery failed:', err instanceof Error ? err.message : err);
|
|
213
|
+
return false;
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Strategy 3: Scan the block store for CBL blocks and rebuild a minimal
|
|
218
|
+
* index from structural data only. This is a last-resort recovery that
|
|
219
|
+
* loses all user metadata (file names, tags, collections, visibility, etc.).
|
|
220
|
+
*
|
|
221
|
+
* Iterates all blocks in all pools, checks for the BrightChain CBL magic
|
|
222
|
+
* prefix (0xBC), and creates minimal index entries for discovered CBL blocks.
|
|
223
|
+
*/
|
|
224
|
+
async recoverFromBlockScan() {
|
|
225
|
+
try {
|
|
226
|
+
if (!(0, brightchain_lib_1.isPooledBlockStore)(this.blockStore))
|
|
227
|
+
return false;
|
|
228
|
+
const pooledStore = this.blockStore;
|
|
229
|
+
const pools = await pooledStore.listPools();
|
|
230
|
+
if (pools.length === 0)
|
|
231
|
+
return false;
|
|
232
|
+
let recoveredCount = 0;
|
|
233
|
+
const seenHashes = new Set();
|
|
234
|
+
for (const pool of pools) {
|
|
235
|
+
for await (const hash of pooledStore.listBlocksInPool(pool)) {
|
|
236
|
+
if (seenHashes.has(hash))
|
|
237
|
+
continue;
|
|
238
|
+
seenHashes.add(hash);
|
|
239
|
+
try {
|
|
240
|
+
const data = await pooledStore.getFromPool(pool, hash);
|
|
241
|
+
if (!this.looksLikeCblBlock(data))
|
|
242
|
+
continue;
|
|
243
|
+
// This block has the CBL magic prefix. Create a minimal
|
|
244
|
+
// index entry with structural data only.
|
|
245
|
+
const magnetUrl = this.blockStore.generateCBLMagnetUrl(hash, hash, // placeholder — the XOR pair is unknown
|
|
246
|
+
data.length);
|
|
247
|
+
this.sequenceCounter++;
|
|
248
|
+
const entry = {
|
|
249
|
+
_id: (0, crypto_1.randomUUID)(),
|
|
250
|
+
magnetUrl,
|
|
251
|
+
blockId1: hash,
|
|
252
|
+
blockId2: hash, // placeholder
|
|
253
|
+
blockSize: data.length,
|
|
254
|
+
poolId: pool,
|
|
255
|
+
createdAt: new Date(),
|
|
256
|
+
visibility: brightchain_lib_1.CBLVisibility.Private,
|
|
257
|
+
sequenceNumber: this.sequenceCounter,
|
|
258
|
+
};
|
|
259
|
+
await this.collection.insertOne(entry);
|
|
260
|
+
recoveredCount++;
|
|
261
|
+
}
|
|
262
|
+
catch {
|
|
263
|
+
// Skip blocks that can't be read
|
|
264
|
+
continue;
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
return recoveredCount > 0;
|
|
269
|
+
}
|
|
270
|
+
catch (err) {
|
|
271
|
+
console.warn('[CBLIndex] Block scan recovery failed:', err instanceof Error ? err.message : err);
|
|
272
|
+
return false;
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
/**
|
|
276
|
+
* Heuristic check for whether block data looks like a CBL block.
|
|
277
|
+
* CBL blocks in BrightChain start with the magic prefix 0xBC.
|
|
278
|
+
*/
|
|
279
|
+
looksLikeCblBlock(data) {
|
|
280
|
+
if (data.length < 4)
|
|
281
|
+
return false;
|
|
282
|
+
return data[0] === 0xbc;
|
|
283
|
+
}
|
|
284
|
+
/**
|
|
285
|
+
* Add a new CBL index entry.
|
|
286
|
+
* Validates that both referenced block IDs exist in the block store,
|
|
287
|
+
* assigns a monotonically increasing sequence number, and inserts.
|
|
288
|
+
*/
|
|
289
|
+
async addEntry(entry) {
|
|
290
|
+
// Validate block existence
|
|
291
|
+
const [block1Exists, block2Exists] = await Promise.all([
|
|
292
|
+
this.blockStore.has(entry.blockId1),
|
|
293
|
+
this.blockStore.has(entry.blockId2),
|
|
294
|
+
]);
|
|
295
|
+
const missingBlocks = [];
|
|
296
|
+
if (!block1Exists)
|
|
297
|
+
missingBlocks.push(entry.blockId1);
|
|
298
|
+
if (!block2Exists)
|
|
299
|
+
missingBlocks.push(entry.blockId2);
|
|
300
|
+
if (missingBlocks.length > 0) {
|
|
301
|
+
throw new Error(`Block validation failed: blocks not found in store: ${missingBlocks.join(', ')}`);
|
|
302
|
+
}
|
|
303
|
+
this.sequenceCounter++;
|
|
304
|
+
const fullEntry = {
|
|
305
|
+
...entry,
|
|
306
|
+
_id: (0, crypto_1.randomUUID)(),
|
|
307
|
+
sequenceNumber: this.sequenceCounter,
|
|
308
|
+
};
|
|
309
|
+
await this.collection.insertOne(fullEntry);
|
|
310
|
+
await this.generateParityForHead();
|
|
311
|
+
await this.trackMutationForAutoSnapshot();
|
|
312
|
+
// Announce new entry to peers in the same pool (Req 8.1)
|
|
313
|
+
if (this.gossipService) {
|
|
314
|
+
try {
|
|
315
|
+
await this.gossipService.announceCBLIndexUpdate(fullEntry);
|
|
316
|
+
}
|
|
317
|
+
catch {
|
|
318
|
+
// Best-effort: gossip failures should not break index mutations
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
return fullEntry;
|
|
322
|
+
}
|
|
323
|
+
/**
|
|
324
|
+
* Look up a single entry by its magnet URL.
|
|
325
|
+
*/
|
|
326
|
+
async getByMagnetUrl(magnetUrl) {
|
|
327
|
+
return this.collection.findOne({
|
|
328
|
+
magnetUrl,
|
|
329
|
+
deletedAt: { $exists: false },
|
|
330
|
+
});
|
|
331
|
+
}
|
|
332
|
+
/**
|
|
333
|
+
* Look up entries by block ID (matches either blockId1 or blockId2).
|
|
334
|
+
*/
|
|
335
|
+
async getByBlockId(blockId) {
|
|
336
|
+
return this.collection
|
|
337
|
+
.find({
|
|
338
|
+
$or: [{ blockId1: blockId }, { blockId2: blockId }],
|
|
339
|
+
deletedAt: { $exists: false },
|
|
340
|
+
})
|
|
341
|
+
.toArray();
|
|
342
|
+
}
|
|
343
|
+
/**
|
|
344
|
+
* Query entries with multi-attribute filtering, pagination, and sort.
|
|
345
|
+
*/
|
|
346
|
+
async query(options) {
|
|
347
|
+
const filter = this.buildFilter(options);
|
|
348
|
+
const sort = this.buildSort(options);
|
|
349
|
+
const cursor = this.collection.find(filter, { sort });
|
|
350
|
+
if (options.offset !== undefined && options.offset > 0) {
|
|
351
|
+
cursor.skip(options.offset);
|
|
352
|
+
}
|
|
353
|
+
if (options.limit !== undefined && options.limit >= 0) {
|
|
354
|
+
cursor.limit(options.limit);
|
|
355
|
+
}
|
|
356
|
+
return cursor.toArray();
|
|
357
|
+
}
|
|
358
|
+
/**
|
|
359
|
+
* Soft-delete an entry by magnet URL (sets deletedAt timestamp).
|
|
360
|
+
*/
|
|
361
|
+
async softDelete(magnetUrl) {
|
|
362
|
+
await this.collection.updateOne({ magnetUrl }, {
|
|
363
|
+
$set: { deletedAt: new Date() },
|
|
364
|
+
});
|
|
365
|
+
await this.generateParityForHead();
|
|
366
|
+
await this.trackMutationForAutoSnapshot();
|
|
367
|
+
// Announce soft-deletion to peers in the same pool (Req 8.6)
|
|
368
|
+
if (this.gossipService) {
|
|
369
|
+
try {
|
|
370
|
+
const deletedEntry = await this.collection.findOne({
|
|
371
|
+
magnetUrl,
|
|
372
|
+
});
|
|
373
|
+
if (deletedEntry) {
|
|
374
|
+
await this.gossipService.announceCBLIndexDelete(deletedEntry);
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
catch {
|
|
378
|
+
// Best-effort: gossip failures should not break index mutations
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
/**
|
|
383
|
+
* Aggregate CBL counts per pool.
|
|
384
|
+
* Returns a Map of poolId → count for all non-deleted entries.
|
|
385
|
+
*/
|
|
386
|
+
async getPoolCBLCounts() {
|
|
387
|
+
const entries = await this.collection
|
|
388
|
+
.find({
|
|
389
|
+
deletedAt: { $exists: false },
|
|
390
|
+
poolId: { $exists: true },
|
|
391
|
+
})
|
|
392
|
+
.toArray();
|
|
393
|
+
const counts = new Map();
|
|
394
|
+
for (const entry of entries) {
|
|
395
|
+
if (entry.poolId) {
|
|
396
|
+
counts.set(entry.poolId, (counts.get(entry.poolId) ?? 0) + 1);
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
return counts;
|
|
400
|
+
}
|
|
401
|
+
/**
|
|
402
|
+
* Find entries whose XOR component blocks exist in pools other than the given poolId.
|
|
403
|
+
* Returns block IDs and the set of pools they appear in.
|
|
404
|
+
*/
|
|
405
|
+
async getCrossPoolDependencies(poolId) {
|
|
406
|
+
// Get all entries in the target pool
|
|
407
|
+
const poolEntries = await this.collection
|
|
408
|
+
.find({
|
|
409
|
+
poolId,
|
|
410
|
+
deletedAt: { $exists: false },
|
|
411
|
+
})
|
|
412
|
+
.toArray();
|
|
413
|
+
// Collect all block IDs from this pool's entries
|
|
414
|
+
const blockIds = new Set();
|
|
415
|
+
for (const entry of poolEntries) {
|
|
416
|
+
blockIds.add(entry.blockId1);
|
|
417
|
+
blockIds.add(entry.blockId2);
|
|
418
|
+
}
|
|
419
|
+
// Get all non-deleted entries across all pools
|
|
420
|
+
const allEntries = await this.collection
|
|
421
|
+
.find({
|
|
422
|
+
deletedAt: { $exists: false },
|
|
423
|
+
})
|
|
424
|
+
.toArray();
|
|
425
|
+
// For each block ID from the target pool, find which other pools reference it
|
|
426
|
+
const blockPoolMap = new Map();
|
|
427
|
+
for (const entry of allEntries) {
|
|
428
|
+
const entryPool = entry.poolId ?? '__default__';
|
|
429
|
+
if (blockIds.has(entry.blockId1)) {
|
|
430
|
+
if (!blockPoolMap.has(entry.blockId1)) {
|
|
431
|
+
blockPoolMap.set(entry.blockId1, new Set());
|
|
432
|
+
}
|
|
433
|
+
blockPoolMap.get(entry.blockId1).add(entryPool);
|
|
434
|
+
}
|
|
435
|
+
if (blockIds.has(entry.blockId2)) {
|
|
436
|
+
if (!blockPoolMap.has(entry.blockId2)) {
|
|
437
|
+
blockPoolMap.set(entry.blockId2, new Set());
|
|
438
|
+
}
|
|
439
|
+
blockPoolMap.get(entry.blockId2).add(entryPool);
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
// Return only blocks that appear in multiple pools
|
|
443
|
+
const dependencies = [];
|
|
444
|
+
for (const [blockId, pools] of blockPoolMap) {
|
|
445
|
+
if (pools.size > 1) {
|
|
446
|
+
dependencies.push({ blockId, pools: Array.from(pools) });
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
return dependencies;
|
|
450
|
+
}
|
|
451
|
+
/**
|
|
452
|
+
* Get all CBL entries belonging to a specific pool (for pool deletion validation).
|
|
453
|
+
* Returns all non-deleted entries in the pool so the caller can assess
|
|
454
|
+
* whether the pool is safe to delete.
|
|
455
|
+
*
|
|
456
|
+
* Requirement 5.3: report all CBL entries in a pool as part of deletion validation.
|
|
457
|
+
*/
|
|
458
|
+
async getPoolEntries(poolId) {
|
|
459
|
+
return this.collection
|
|
460
|
+
.find({
|
|
461
|
+
poolId,
|
|
462
|
+
deletedAt: { $exists: false },
|
|
463
|
+
})
|
|
464
|
+
.toArray();
|
|
465
|
+
}
|
|
466
|
+
/**
|
|
467
|
+
* Share a CBL entry with another user.
|
|
468
|
+
* Adds the userId to the sharedWith array and sets visibility to Shared.
|
|
469
|
+
*/
|
|
470
|
+
async shareWith(magnetUrl, userId) {
|
|
471
|
+
const entry = await this.collection.findOne({
|
|
472
|
+
magnetUrl,
|
|
473
|
+
});
|
|
474
|
+
if (!entry) {
|
|
475
|
+
throw new Error(`CBL index entry not found for magnet URL: ${magnetUrl}`);
|
|
476
|
+
}
|
|
477
|
+
const sharedWith = entry.sharedWith ?? [];
|
|
478
|
+
if (!sharedWith.includes(userId)) {
|
|
479
|
+
sharedWith.push(userId);
|
|
480
|
+
}
|
|
481
|
+
await this.collection.updateOne({ magnetUrl }, {
|
|
482
|
+
$set: {
|
|
483
|
+
sharedWith,
|
|
484
|
+
visibility: brightchain_lib_1.CBLVisibility.Shared,
|
|
485
|
+
},
|
|
486
|
+
});
|
|
487
|
+
await this.generateParityForHead();
|
|
488
|
+
await this.trackMutationForAutoSnapshot();
|
|
489
|
+
}
|
|
490
|
+
/**
|
|
491
|
+
* Restore the index from a snapshot.
|
|
492
|
+
* Retrieves the CBL by magnet URL, deserializes the JSON payload,
|
|
493
|
+
* clears the current collection, and re-inserts all entries from the snapshot.
|
|
494
|
+
* Also restores the sequence counter.
|
|
495
|
+
*/
|
|
496
|
+
async restoreFromSnapshot(magnetUrl) {
|
|
497
|
+
// 1. Parse the magnet URL to get block IDs
|
|
498
|
+
const components = this.blockStore.parseCBLMagnetUrl(magnetUrl);
|
|
499
|
+
// 2. Retrieve the CBL data
|
|
500
|
+
const data = await this.blockStore.retrieveCBL(components.blockId1, components.blockId2, components.block1ParityIds, components.block2ParityIds);
|
|
501
|
+
// 3. Deserialize JSON from the Uint8Array
|
|
502
|
+
const decoder = new TextDecoder();
|
|
503
|
+
const json = decoder.decode(data);
|
|
504
|
+
const snapshotData = JSON.parse(json);
|
|
505
|
+
// 4. Clear the current collection
|
|
506
|
+
await this.collection.deleteMany({});
|
|
507
|
+
// 5. Insert all entries from the snapshot
|
|
508
|
+
if (snapshotData.entries.length > 0) {
|
|
509
|
+
// Restore Date objects from JSON strings
|
|
510
|
+
const restoredEntries = snapshotData.entries.map((entry) => ({
|
|
511
|
+
...entry,
|
|
512
|
+
createdAt: new Date(entry.createdAt),
|
|
513
|
+
deletedAt: entry.deletedAt ? new Date(entry.deletedAt) : undefined,
|
|
514
|
+
}));
|
|
515
|
+
await this.collection.insertMany(restoredEntries);
|
|
516
|
+
}
|
|
517
|
+
// 6. Restore the sequence counter and reset mutation tracking
|
|
518
|
+
this.sequenceCounter = snapshotData.sequenceCounter;
|
|
519
|
+
this.mutationsSinceSnapshot = 0;
|
|
520
|
+
}
|
|
521
|
+
/**
|
|
522
|
+
* Add a new version of a file.
|
|
523
|
+
* Auto-assigns versionNumber (previous max + 1) and sets previousVersion
|
|
524
|
+
* to the current latest version's magnet URL.
|
|
525
|
+
*
|
|
526
|
+
* IMPORTANT: version numbering considers ALL versions including soft-deleted
|
|
527
|
+
* ones so that the version chain remains intact (Requirement 27.8).
|
|
528
|
+
*/
|
|
529
|
+
async addVersion(fileId, entry) {
|
|
530
|
+
// Look at ALL versions (including soft-deleted) to determine the true
|
|
531
|
+
// latest version number and previous version pointer. This ensures the
|
|
532
|
+
// chain stays intact even when intermediate versions are soft-deleted.
|
|
533
|
+
const latest = await this.getLatestVersion(fileId, true);
|
|
534
|
+
const versionNumber = latest ? (latest.versionNumber ?? 0) + 1 : 1;
|
|
535
|
+
const previousVersion = latest ? latest.magnetUrl : undefined;
|
|
536
|
+
return this.addEntry({
|
|
537
|
+
...entry,
|
|
538
|
+
fileId,
|
|
539
|
+
versionNumber,
|
|
540
|
+
previousVersion,
|
|
541
|
+
});
|
|
542
|
+
}
|
|
543
|
+
/**
|
|
544
|
+
* Get all versions of a file, ordered by versionNumber ascending.
|
|
545
|
+
*
|
|
546
|
+
* @param fileId - The stable file identifier grouping all versions.
|
|
547
|
+
* @param includeDeleted - When true, includes soft-deleted versions in the
|
|
548
|
+
* result. Useful for verifying chain integrity (Requirement 27.8).
|
|
549
|
+
* Defaults to false.
|
|
550
|
+
*/
|
|
551
|
+
async getVersionHistory(fileId, includeDeleted = false) {
|
|
552
|
+
const filter = { fileId };
|
|
553
|
+
if (!includeDeleted) {
|
|
554
|
+
filter['deletedAt'] = { $exists: false };
|
|
555
|
+
}
|
|
556
|
+
return this.collection
|
|
557
|
+
.find(filter, {
|
|
558
|
+
sort: { versionNumber: 1 },
|
|
559
|
+
})
|
|
560
|
+
.toArray();
|
|
561
|
+
}
|
|
562
|
+
/**
|
|
563
|
+
* Get the latest version of a file (O(1) via sort + limit 1).
|
|
564
|
+
*
|
|
565
|
+
* @param fileId - The stable file identifier grouping all versions.
|
|
566
|
+
* @param includeDeleted - When true, considers soft-deleted versions.
|
|
567
|
+
* Used internally by addVersion to maintain chain integrity.
|
|
568
|
+
* Defaults to false.
|
|
569
|
+
*/
|
|
570
|
+
async getLatestVersion(fileId, includeDeleted = false) {
|
|
571
|
+
const filter = { fileId };
|
|
572
|
+
if (!includeDeleted) {
|
|
573
|
+
filter['deletedAt'] = { $exists: false };
|
|
574
|
+
}
|
|
575
|
+
const results = await this.collection
|
|
576
|
+
.find(filter, {
|
|
577
|
+
sort: { versionNumber: -1 },
|
|
578
|
+
limit: 1,
|
|
579
|
+
})
|
|
580
|
+
.toArray();
|
|
581
|
+
return results.length > 0 ? results[0] : null;
|
|
582
|
+
}
|
|
583
|
+
/**
|
|
584
|
+
* Merge an incoming CBL index entry from a remote peer (via gossip).
|
|
585
|
+
*
|
|
586
|
+
* Idempotent: if an entry with the same magnet URL already exists and has
|
|
587
|
+
* identical content (blockId1, blockId2, blockSize), the merge is a no-op.
|
|
588
|
+
*
|
|
589
|
+
* Conflict: if an entry with the same magnet URL exists but has different
|
|
590
|
+
* content, both entries are preserved and flagged with `hasConflict: true`
|
|
591
|
+
* and cross-referenced via `conflictsWith`.
|
|
592
|
+
*
|
|
593
|
+
* New entry: if no entry with the magnet URL exists, the entry is inserted
|
|
594
|
+
* with a new local sequence number.
|
|
595
|
+
*
|
|
596
|
+
* @param entry - The incoming entry from a remote peer.
|
|
597
|
+
* @returns The merged entry (existing, new, or conflict-flagged).
|
|
598
|
+
*
|
|
599
|
+
* @see Requirements 8.2, 8.3
|
|
600
|
+
*/
|
|
601
|
+
async mergeEntry(entry) {
|
|
602
|
+
// Look for an existing entry with the same magnet URL (including soft-deleted)
|
|
603
|
+
const existing = await this.collection.findOne({
|
|
604
|
+
magnetUrl: entry.magnetUrl,
|
|
605
|
+
});
|
|
606
|
+
if (existing) {
|
|
607
|
+
// Check if content is identical — idempotent merge
|
|
608
|
+
const sameContent = existing.blockId1 === entry.blockId1 &&
|
|
609
|
+
existing.blockId2 === entry.blockId2 &&
|
|
610
|
+
existing.blockSize === entry.blockSize;
|
|
611
|
+
if (sameContent) {
|
|
612
|
+
// Idempotent: same magnet URL, same content — skip
|
|
613
|
+
return existing;
|
|
614
|
+
}
|
|
615
|
+
// Conflict: same magnet URL, different content — preserve both, flag conflict
|
|
616
|
+
// 1. Flag the existing entry
|
|
617
|
+
const existingConflicts = existing.conflictsWith ?? [];
|
|
618
|
+
// We'll use the incoming entry's _id (or generate one) for cross-referencing
|
|
619
|
+
const newId = entry._id || (0, crypto_1.randomUUID)();
|
|
620
|
+
if (!existingConflicts.includes(newId)) {
|
|
621
|
+
existingConflicts.push(newId);
|
|
622
|
+
}
|
|
623
|
+
await this.collection.updateOne({ _id: existing._id }, {
|
|
624
|
+
$set: {
|
|
625
|
+
hasConflict: true,
|
|
626
|
+
conflictsWith: existingConflicts,
|
|
627
|
+
},
|
|
628
|
+
});
|
|
629
|
+
// 2. Insert the incoming entry with conflict flag
|
|
630
|
+
this.sequenceCounter++;
|
|
631
|
+
const conflictEntry = {
|
|
632
|
+
...entry,
|
|
633
|
+
_id: newId,
|
|
634
|
+
sequenceNumber: this.sequenceCounter,
|
|
635
|
+
hasConflict: true,
|
|
636
|
+
conflictsWith: [existing._id],
|
|
637
|
+
};
|
|
638
|
+
await this.collection.insertOne(conflictEntry);
|
|
639
|
+
await this.generateParityForHead();
|
|
640
|
+
await this.trackMutationForAutoSnapshot();
|
|
641
|
+
return conflictEntry;
|
|
642
|
+
}
|
|
643
|
+
// No existing entry — insert as new with a local sequence number
|
|
644
|
+
this.sequenceCounter++;
|
|
645
|
+
const newEntry = {
|
|
646
|
+
...entry,
|
|
647
|
+
_id: entry._id || (0, crypto_1.randomUUID)(),
|
|
648
|
+
sequenceNumber: this.sequenceCounter,
|
|
649
|
+
};
|
|
650
|
+
await this.collection.insertOne(newEntry);
|
|
651
|
+
await this.generateParityForHead();
|
|
652
|
+
await this.trackMutationForAutoSnapshot();
|
|
653
|
+
return newEntry;
|
|
654
|
+
}
|
|
655
|
+
/**
|
|
656
|
+
* Apply a soft-delete from a remote peer (via gossip).
|
|
657
|
+
*
|
|
658
|
+
* If the entry exists locally and is not already soft-deleted, marks it
|
|
659
|
+
* as deleted with the provided timestamp. If the entry doesn't exist
|
|
660
|
+
* locally or is already deleted, this is a no-op.
|
|
661
|
+
*
|
|
662
|
+
* @param magnetUrl - The magnet URL of the entry to soft-delete.
|
|
663
|
+
* @param deletedAt - The deletion timestamp from the remote peer.
|
|
664
|
+
*
|
|
665
|
+
* @see Requirements 8.6
|
|
666
|
+
*/
|
|
667
|
+
async mergeSoftDelete(magnetUrl, deletedAt) {
|
|
668
|
+
const existing = await this.collection.findOne({
|
|
669
|
+
magnetUrl,
|
|
670
|
+
});
|
|
671
|
+
if (!existing) {
|
|
672
|
+
// Entry doesn't exist locally — nothing to delete
|
|
673
|
+
return;
|
|
674
|
+
}
|
|
675
|
+
if (existing.deletedAt) {
|
|
676
|
+
// Already soft-deleted — no-op
|
|
677
|
+
return;
|
|
678
|
+
}
|
|
679
|
+
await this.collection.updateOne({ magnetUrl }, {
|
|
680
|
+
$set: { deletedAt },
|
|
681
|
+
});
|
|
682
|
+
await this.generateParityForHead();
|
|
683
|
+
await this.trackMutationForAutoSnapshot();
|
|
684
|
+
}
|
|
685
|
+
/**
|
|
686
|
+
* Get a CBL index manifest for a specific pool.
|
|
687
|
+
* Returns a list of (magnetUrl, sequenceNumber) pairs for all non-deleted
|
|
688
|
+
* entries in the given pool. Used during pool-scoped reconciliation to
|
|
689
|
+
* compare CBL index state between nodes.
|
|
690
|
+
*
|
|
691
|
+
* @param poolId - The pool to generate a manifest for
|
|
692
|
+
* @param nodeId - The local node ID to include in the manifest
|
|
693
|
+
* @returns A CBLIndexManifest for the specified pool
|
|
694
|
+
* @see Requirements 8.4
|
|
695
|
+
*/
|
|
696
|
+
async getCBLIndexManifest(poolId, nodeId) {
|
|
697
|
+
const entries = await this.collection
|
|
698
|
+
.find({
|
|
699
|
+
poolId,
|
|
700
|
+
deletedAt: { $exists: false },
|
|
701
|
+
})
|
|
702
|
+
.toArray();
|
|
703
|
+
const manifestEntries = entries.map((e) => ({
|
|
704
|
+
magnetUrl: e.magnetUrl,
|
|
705
|
+
sequenceNumber: e.sequenceNumber,
|
|
706
|
+
}));
|
|
707
|
+
return {
|
|
708
|
+
poolId,
|
|
709
|
+
nodeId,
|
|
710
|
+
entries: manifestEntries,
|
|
711
|
+
generatedAt: new Date(),
|
|
712
|
+
};
|
|
713
|
+
}
|
|
714
|
+
/**
|
|
715
|
+
* Reconcile the local CBL index for a pool against a remote manifest.
|
|
716
|
+
* Identifies entries present in the remote manifest but missing locally,
|
|
717
|
+
* and merges them using the provided entry fetcher.
|
|
718
|
+
*
|
|
719
|
+
* @param poolId - The pool being reconciled
|
|
720
|
+
* @param remoteManifest - The remote node's CBL index manifest
|
|
721
|
+
* @param fetchEntry - Callback to fetch a full CBL index entry from the remote peer by magnet URL
|
|
722
|
+
* @returns The number of entries merged
|
|
723
|
+
* @see Requirements 8.4
|
|
724
|
+
*/
|
|
725
|
+
async reconcileCBLIndex(poolId, remoteManifest, fetchEntry) {
|
|
726
|
+
// Build a set of local magnet URLs for this pool (including soft-deleted)
|
|
727
|
+
const localEntries = await this.collection
|
|
728
|
+
.find({
|
|
729
|
+
poolId,
|
|
730
|
+
})
|
|
731
|
+
.toArray();
|
|
732
|
+
const localMagnetUrls = new Set(localEntries.map((e) => e.magnetUrl));
|
|
733
|
+
// Find entries in the remote manifest that are missing locally
|
|
734
|
+
const missingEntries = remoteManifest.entries.filter((re) => !localMagnetUrls.has(re.magnetUrl));
|
|
735
|
+
let merged = 0;
|
|
736
|
+
for (const missing of missingEntries) {
|
|
737
|
+
const fullEntry = await fetchEntry(missing.magnetUrl);
|
|
738
|
+
if (fullEntry) {
|
|
739
|
+
await this.mergeEntry(fullEntry);
|
|
740
|
+
merged++;
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
return merged;
|
|
744
|
+
}
|
|
745
|
+
/**
|
|
746
|
+
* Build a FilterQuery from ICBLIndexQueryOptions.
|
|
747
|
+
*/
|
|
748
|
+
buildFilter(options) {
|
|
749
|
+
const conditions = [];
|
|
750
|
+
// Exclude soft-deleted entries unless explicitly requested
|
|
751
|
+
if (!options.includeDeleted) {
|
|
752
|
+
conditions.push({
|
|
753
|
+
deletedAt: { $exists: false },
|
|
754
|
+
});
|
|
755
|
+
}
|
|
756
|
+
if (options.poolId !== undefined) {
|
|
757
|
+
conditions.push({ poolId: options.poolId });
|
|
758
|
+
}
|
|
759
|
+
if (options.createdBy !== undefined) {
|
|
760
|
+
conditions.push({ createdBy: options.createdBy });
|
|
761
|
+
}
|
|
762
|
+
if (options.visibility !== undefined) {
|
|
763
|
+
conditions.push({ visibility: options.visibility });
|
|
764
|
+
}
|
|
765
|
+
if (options.userCollection !== undefined) {
|
|
766
|
+
conditions.push({
|
|
767
|
+
userCollection: options.userCollection,
|
|
768
|
+
});
|
|
769
|
+
}
|
|
770
|
+
if (options.fileId !== undefined) {
|
|
771
|
+
conditions.push({ fileId: options.fileId });
|
|
772
|
+
}
|
|
773
|
+
if (options.fileName !== undefined) {
|
|
774
|
+
conditions.push({
|
|
775
|
+
'metadata.fileName': options.fileName,
|
|
776
|
+
});
|
|
777
|
+
}
|
|
778
|
+
if (options.mimeType !== undefined) {
|
|
779
|
+
conditions.push({
|
|
780
|
+
'metadata.mimeType': options.mimeType,
|
|
781
|
+
});
|
|
782
|
+
}
|
|
783
|
+
if (options.tags !== undefined && options.tags.length > 0) {
|
|
784
|
+
// Entries must have ALL specified tags
|
|
785
|
+
for (const tag of options.tags) {
|
|
786
|
+
conditions.push({
|
|
787
|
+
'metadata.tags': { $elemMatch: { $eq: tag } },
|
|
788
|
+
});
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
// Visibility-based access control: when requestingUserId is set,
|
|
792
|
+
// only return entries the user is allowed to see.
|
|
793
|
+
// - Public: visible to everyone
|
|
794
|
+
// - Private: visible only to the creator
|
|
795
|
+
// - Shared: visible to the creator and users in sharedWith
|
|
796
|
+
if (options.requestingUserId !== undefined) {
|
|
797
|
+
conditions.push({
|
|
798
|
+
$or: [
|
|
799
|
+
// Public entries are always visible
|
|
800
|
+
{ visibility: brightchain_lib_1.CBLVisibility.Public },
|
|
801
|
+
// Private entries are visible only to the creator
|
|
802
|
+
{
|
|
803
|
+
$and: [
|
|
804
|
+
{ visibility: brightchain_lib_1.CBLVisibility.Private },
|
|
805
|
+
{ createdBy: options.requestingUserId },
|
|
806
|
+
],
|
|
807
|
+
},
|
|
808
|
+
// Shared entries are visible to the creator or sharedWith users
|
|
809
|
+
{
|
|
810
|
+
$and: [
|
|
811
|
+
{ visibility: brightchain_lib_1.CBLVisibility.Shared },
|
|
812
|
+
{
|
|
813
|
+
$or: [
|
|
814
|
+
{ createdBy: options.requestingUserId },
|
|
815
|
+
// MongoDB-style array element match: if sharedWith is an
|
|
816
|
+
// array of strings, matching a plain string value checks
|
|
817
|
+
// whether the string is an element of the array.
|
|
818
|
+
{ sharedWith: options.requestingUserId },
|
|
819
|
+
],
|
|
820
|
+
},
|
|
821
|
+
],
|
|
822
|
+
},
|
|
823
|
+
],
|
|
824
|
+
});
|
|
825
|
+
}
|
|
826
|
+
if (conditions.length === 0) {
|
|
827
|
+
return {};
|
|
828
|
+
}
|
|
829
|
+
if (conditions.length === 1) {
|
|
830
|
+
return conditions[0];
|
|
831
|
+
}
|
|
832
|
+
return { $and: conditions };
|
|
833
|
+
}
|
|
834
|
+
/**
|
|
835
|
+
* Build a SortSpec from ICBLIndexQueryOptions.
|
|
836
|
+
*/
|
|
837
|
+
buildSort(options) {
|
|
838
|
+
if (!options.sortBy)
|
|
839
|
+
return undefined;
|
|
840
|
+
const direction = options.sortOrder === 'desc' ? -1 : 1;
|
|
841
|
+
switch (options.sortBy) {
|
|
842
|
+
case 'createdAt':
|
|
843
|
+
return { createdAt: direction };
|
|
844
|
+
case 'fileName':
|
|
845
|
+
return { 'metadata.fileName': direction };
|
|
846
|
+
case 'originalSize':
|
|
847
|
+
return { 'metadata.originalSize': direction };
|
|
848
|
+
case 'versionNumber':
|
|
849
|
+
return { versionNumber: direction };
|
|
850
|
+
default:
|
|
851
|
+
return undefined;
|
|
852
|
+
}
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
exports.CBLIndex = CBLIndex;
|
|
856
|
+
//# sourceMappingURL=cblIndex.js.map
|