hypercore 10.38.2 → 11.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -30
- package/index.js +388 -444
- package/lib/audit.js +33 -41
- package/lib/bit-interlude.js +174 -0
- package/lib/bitfield.js +79 -87
- package/lib/block-store.js +12 -50
- package/lib/copy-prologue.js +236 -0
- package/lib/core.js +414 -746
- package/lib/download.js +42 -4
- package/lib/merkle-tree.js +263 -406
- package/lib/multisig.js +9 -6
- package/lib/mutex.js +4 -0
- package/lib/remote-bitfield.js +9 -9
- package/lib/replicator.js +247 -177
- package/lib/session-state.js +949 -0
- package/lib/verifier.js +20 -13
- package/package.json +2 -2
- package/lib/batch.js +0 -431
- package/lib/big-header.js +0 -55
- package/lib/oplog.js +0 -228
package/lib/merkle-tree.js
CHANGED
|
@@ -1,16 +1,10 @@
|
|
|
1
1
|
const flat = require('flat-tree')
|
|
2
2
|
const crypto = require('hypercore-crypto')
|
|
3
|
-
const c = require('compact-encoding')
|
|
4
|
-
const Xache = require('xache')
|
|
5
3
|
const b4a = require('b4a')
|
|
6
4
|
const unslab = require('unslab')
|
|
7
5
|
const caps = require('./caps')
|
|
8
6
|
const { INVALID_PROOF, INVALID_CHECKSUM, INVALID_OPERATION, BAD_ARGUMENT, ASSERTION } = require('hypercore-errors')
|
|
9
7
|
|
|
10
|
-
const BLANK_HASH = b4a.alloc(32)
|
|
11
|
-
const OLD_TREE = b4a.from([5, 2, 87, 2, 0, 0, 40, 7, 66, 76, 65, 75, 69, 50, 98])
|
|
12
|
-
const TREE_CACHE = 128 // speeds up linear scans by A LOT
|
|
13
|
-
|
|
14
8
|
class NodeQueue {
|
|
15
9
|
constructor (nodes, extra = null) {
|
|
16
10
|
this.i = 0
|
|
@@ -42,18 +36,22 @@ class NodeQueue {
|
|
|
42
36
|
}
|
|
43
37
|
|
|
44
38
|
class MerkleTreeBatch {
|
|
45
|
-
constructor (tree) {
|
|
46
|
-
this.fork =
|
|
47
|
-
this.roots = [...
|
|
48
|
-
this.length =
|
|
49
|
-
this.
|
|
50
|
-
this.
|
|
51
|
-
this.
|
|
39
|
+
constructor (tree, session) {
|
|
40
|
+
this.fork = session.fork
|
|
41
|
+
this.roots = [...session.roots]
|
|
42
|
+
this.length = session.length
|
|
43
|
+
this.signature = session.signature
|
|
44
|
+
this.ancestors = session.length
|
|
45
|
+
this.byteLength = session.byteLength
|
|
52
46
|
this.hashCached = null
|
|
53
47
|
|
|
54
|
-
this.
|
|
55
|
-
this.
|
|
48
|
+
this.committed = false
|
|
49
|
+
this.truncated = false
|
|
50
|
+
this.treeLength = session.length
|
|
51
|
+
this.treeFork = session.fork
|
|
56
52
|
this.tree = tree
|
|
53
|
+
this.storage = tree.storage
|
|
54
|
+
this.session = session
|
|
57
55
|
this.nodes = []
|
|
58
56
|
this.upgraded = false
|
|
59
57
|
}
|
|
@@ -121,7 +119,7 @@ class MerkleTreeBatch {
|
|
|
121
119
|
}
|
|
122
120
|
|
|
123
121
|
clone () {
|
|
124
|
-
const b = new MerkleTreeBatch(this.tree)
|
|
122
|
+
const b = new MerkleTreeBatch(this.tree, this.session)
|
|
125
123
|
|
|
126
124
|
b.fork = this.fork
|
|
127
125
|
b.roots = [...this.roots]
|
|
@@ -138,7 +136,7 @@ class MerkleTreeBatch {
|
|
|
138
136
|
}
|
|
139
137
|
|
|
140
138
|
hash () {
|
|
141
|
-
if (this.hashCached === null) this.hashCached = unslab(
|
|
139
|
+
if (this.hashCached === null) this.hashCached = unslab(crypto.tree(this.roots))
|
|
142
140
|
return this.hashCached
|
|
143
141
|
}
|
|
144
142
|
|
|
@@ -162,22 +160,28 @@ class MerkleTreeBatch {
|
|
|
162
160
|
return this.tree.get(index, error)
|
|
163
161
|
}
|
|
164
162
|
|
|
165
|
-
proof ({ block, hash, seek, upgrade }) {
|
|
166
|
-
return generateProof(this, block, hash, seek, upgrade)
|
|
163
|
+
proof (batch, { block, hash, seek, upgrade }) {
|
|
164
|
+
return generateProof(batch, this, block, hash, seek, upgrade)
|
|
167
165
|
}
|
|
168
166
|
|
|
169
167
|
verifyUpgrade (proof) {
|
|
170
|
-
const unverified = verifyTree(proof, this.
|
|
168
|
+
const unverified = verifyTree(proof, this.nodes)
|
|
171
169
|
|
|
172
170
|
if (!proof.upgrade) throw INVALID_OPERATION('Expected upgrade proof')
|
|
173
171
|
|
|
174
172
|
return verifyUpgrade(proof, unverified, this)
|
|
175
173
|
}
|
|
176
174
|
|
|
175
|
+
addNodesUnsafe (nodes) {
|
|
176
|
+
for (let i = 0; i < nodes.length; i++) {
|
|
177
|
+
this.nodes.push(nodes[i])
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
177
181
|
append (buf) {
|
|
178
182
|
const head = this.length * 2
|
|
179
183
|
const ite = flat.iterator(head)
|
|
180
|
-
const node = blockNode(
|
|
184
|
+
const node = blockNode(head, buf)
|
|
181
185
|
|
|
182
186
|
this.appendRoot(node, ite)
|
|
183
187
|
}
|
|
@@ -201,7 +205,7 @@ class MerkleTreeBatch {
|
|
|
201
205
|
break
|
|
202
206
|
}
|
|
203
207
|
|
|
204
|
-
const node = unslabNode(parentNode(
|
|
208
|
+
const node = unslabNode(parentNode(ite.parent(), a, b))
|
|
205
209
|
this.nodes.push(node)
|
|
206
210
|
this.roots.pop()
|
|
207
211
|
this.roots.pop()
|
|
@@ -210,76 +214,95 @@ class MerkleTreeBatch {
|
|
|
210
214
|
}
|
|
211
215
|
|
|
212
216
|
commitable () {
|
|
213
|
-
return this.treeFork === this.
|
|
217
|
+
return this.treeFork === this.session.fork && (
|
|
214
218
|
this.upgraded
|
|
215
|
-
? this.treeLength === this.
|
|
216
|
-
: this.treeLength <= this.
|
|
219
|
+
? this.treeLength === this.session.length
|
|
220
|
+
: this.treeLength <= this.session.length
|
|
217
221
|
)
|
|
218
222
|
}
|
|
219
223
|
|
|
220
|
-
commit () {
|
|
224
|
+
commit (tx) {
|
|
225
|
+
if (tx === undefined) throw INVALID_OPERATION('No database batch was passed')
|
|
221
226
|
if (!this.commitable()) throw INVALID_OPERATION('Tree was modified during batch, refusing to commit')
|
|
222
227
|
|
|
223
|
-
if (this.upgraded) this._commitUpgrade()
|
|
228
|
+
if (this.upgraded) this._commitUpgrade(tx)
|
|
224
229
|
|
|
225
230
|
for (let i = 0; i < this.nodes.length; i++) {
|
|
226
231
|
const node = this.nodes[i]
|
|
227
|
-
|
|
232
|
+
tx.putTreeNode(node)
|
|
228
233
|
}
|
|
234
|
+
|
|
235
|
+
this.committed = true
|
|
236
|
+
|
|
237
|
+
return this
|
|
229
238
|
}
|
|
230
239
|
|
|
231
|
-
_commitUpgrade () {
|
|
240
|
+
_commitUpgrade (tx) {
|
|
232
241
|
// TODO: If easy to detect, we should refuse an trunc+append here without a fork id
|
|
233
242
|
// change. Will only happen on user error so mostly to prevent that.
|
|
234
243
|
|
|
235
244
|
if (this.ancestors < this.treeLength) {
|
|
245
|
+
tx.deleteTreeNodeRange(this.ancestors * 2, this.treeLength * 2)
|
|
246
|
+
|
|
236
247
|
if (this.ancestors > 0) {
|
|
237
|
-
const head =
|
|
248
|
+
const head = this.ancestors * 2
|
|
238
249
|
const ite = flat.iterator(head - 2)
|
|
239
250
|
|
|
240
251
|
while (true) {
|
|
241
252
|
if (ite.contains(head) && ite.index < head) {
|
|
242
|
-
|
|
253
|
+
tx.deleteTreeNode(ite.index)
|
|
243
254
|
}
|
|
244
255
|
if (ite.offset === 0) break
|
|
245
256
|
ite.parent()
|
|
246
257
|
}
|
|
247
|
-
}
|
|
248
258
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
: this.ancestors
|
|
252
|
-
|
|
253
|
-
this.tree.truncated = true
|
|
254
|
-
this.tree.cache = new Xache({ maxSize: this.tree.cache.maxSize })
|
|
255
|
-
truncateMap(this.tree.unflushed, this.ancestors)
|
|
256
|
-
if (this.tree.flushing !== null) truncateMap(this.tree.flushing, this.ancestors)
|
|
259
|
+
this.truncated = true
|
|
260
|
+
}
|
|
257
261
|
}
|
|
258
|
-
|
|
259
|
-
this.tree.roots = this.roots
|
|
260
|
-
this.tree.length = this.length
|
|
261
|
-
this.tree.byteLength = this.byteLength
|
|
262
|
-
this.tree.fork = this.fork
|
|
263
|
-
this.tree.signature = this.signature
|
|
264
262
|
}
|
|
265
263
|
|
|
266
264
|
seek (bytes, padding) {
|
|
267
|
-
return new ByteSeeker(this, bytes, padding)
|
|
265
|
+
return new ByteSeeker(this, this, bytes, padding)
|
|
268
266
|
}
|
|
269
267
|
|
|
270
268
|
byteRange (index) {
|
|
271
|
-
|
|
269
|
+
const readBatch = this.storage.read()
|
|
270
|
+
const range = getByteRange(this, index, readBatch)
|
|
271
|
+
readBatch.tryFlush()
|
|
272
|
+
|
|
273
|
+
return range
|
|
272
274
|
}
|
|
273
275
|
|
|
274
276
|
byteOffset (index) {
|
|
275
|
-
if (index === 2 * this.
|
|
276
|
-
|
|
277
|
+
if (index === 2 * this.length) return this.byteLength
|
|
278
|
+
|
|
279
|
+
const readBatch = this.storage.read()
|
|
280
|
+
const offset = getByteOffset(this, index, readBatch)
|
|
281
|
+
readBatch.tryFlush()
|
|
282
|
+
|
|
283
|
+
return offset
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
async restore (length) {
|
|
287
|
+
if (length === this.length) return this
|
|
288
|
+
|
|
289
|
+
const roots = unslabNodes(await this.tree.getRoots(length))
|
|
290
|
+
|
|
291
|
+
this.roots = roots
|
|
292
|
+
this.length = length
|
|
293
|
+
this.byteLength = totalSize(roots)
|
|
294
|
+
this.ancestors = length
|
|
295
|
+
|
|
296
|
+
for (const node of roots) this.byteLength += node.size
|
|
297
|
+
|
|
298
|
+
return this
|
|
277
299
|
}
|
|
278
300
|
}
|
|
279
301
|
|
|
280
302
|
class ReorgBatch extends MerkleTreeBatch {
|
|
281
|
-
constructor (tree) {
|
|
282
|
-
super(tree)
|
|
303
|
+
constructor (tree, session) {
|
|
304
|
+
super(tree, session)
|
|
305
|
+
|
|
283
306
|
this.roots = []
|
|
284
307
|
this.length = 0
|
|
285
308
|
this.byteLength = 0
|
|
@@ -303,7 +326,7 @@ class ReorgBatch extends MerkleTreeBatch {
|
|
|
303
326
|
if (this.want === null) return true
|
|
304
327
|
|
|
305
328
|
const nodes = []
|
|
306
|
-
const root = verifyTree(proof,
|
|
329
|
+
const root = verifyTree(proof, nodes)
|
|
307
330
|
|
|
308
331
|
if (root === null || !b4a.equals(root.hash, this.diff.hash)) return false
|
|
309
332
|
|
|
@@ -363,8 +386,9 @@ class ReorgBatch extends MerkleTreeBatch {
|
|
|
363
386
|
}
|
|
364
387
|
|
|
365
388
|
class ByteSeeker {
|
|
366
|
-
constructor (tree, bytes, padding = 0) {
|
|
389
|
+
constructor (tree, session, bytes, padding = 0) {
|
|
367
390
|
this.tree = tree
|
|
391
|
+
this.session = session
|
|
368
392
|
this.bytes = bytes
|
|
369
393
|
this.padding = padding
|
|
370
394
|
|
|
@@ -377,7 +401,7 @@ class ByteSeeker {
|
|
|
377
401
|
async _seek (bytes) {
|
|
378
402
|
if (!bytes) return [0, 0]
|
|
379
403
|
|
|
380
|
-
for (const node of this.
|
|
404
|
+
for (const node of this.session.roots) { // all async ticks happen once we find the root so safe
|
|
381
405
|
const size = getUnpaddedSize(node, this.padding, null)
|
|
382
406
|
|
|
383
407
|
if (bytes === size) return [flat.rightSpan(node.index) + 2, 0]
|
|
@@ -423,81 +447,102 @@ class ByteSeeker {
|
|
|
423
447
|
}
|
|
424
448
|
}
|
|
425
449
|
|
|
426
|
-
|
|
427
|
-
constructor (
|
|
428
|
-
this.
|
|
429
|
-
this.
|
|
430
|
-
|
|
431
|
-
this.
|
|
432
|
-
this.
|
|
433
|
-
this.
|
|
434
|
-
this.
|
|
435
|
-
|
|
436
|
-
this.
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
450
|
+
class TreeProof {
|
|
451
|
+
constructor (tree, block, hash, seek, upgrade) {
|
|
452
|
+
this.fork = tree.fork
|
|
453
|
+
this.signature = tree.signature
|
|
454
|
+
|
|
455
|
+
this.block = block
|
|
456
|
+
this.hash = hash
|
|
457
|
+
this.seek = seek
|
|
458
|
+
this.upgrade = upgrade
|
|
459
|
+
|
|
460
|
+
this.pending = {
|
|
461
|
+
node: null,
|
|
462
|
+
seek: null,
|
|
463
|
+
upgrade: null,
|
|
464
|
+
additionalUpgrade: null
|
|
465
|
+
}
|
|
442
466
|
}
|
|
443
467
|
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
this.unflushed.set(node.index, node)
|
|
447
|
-
}
|
|
468
|
+
async settle () {
|
|
469
|
+
const result = { fork: this.fork, block: null, hash: null, seek: null, upgrade: null, manifest: null }
|
|
448
470
|
|
|
449
|
-
|
|
450
|
-
return new MerkleTreeBatch(this)
|
|
451
|
-
}
|
|
471
|
+
const [pNode, pSeek, pUpgrade, pAdditional] = await settleProof(this.pending)
|
|
452
472
|
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
473
|
+
if (this.block) {
|
|
474
|
+
if (pNode === null) throw INVALID_OPERATION('Invalid block request')
|
|
475
|
+
result.block = {
|
|
476
|
+
index: this.block.index,
|
|
477
|
+
value: null, // populated upstream, alloc it here for simplicity
|
|
478
|
+
nodes: pNode
|
|
479
|
+
}
|
|
480
|
+
} else if (this.hash) {
|
|
481
|
+
if (pNode === null) throw INVALID_OPERATION('Invalid block request')
|
|
482
|
+
result.hash = {
|
|
483
|
+
index: this.hash.index,
|
|
484
|
+
nodes: pNode
|
|
485
|
+
}
|
|
486
|
+
}
|
|
456
487
|
|
|
457
|
-
|
|
488
|
+
if (this.seek && pSeek !== null) {
|
|
489
|
+
result.seek = {
|
|
490
|
+
bytes: this.seek.bytes,
|
|
491
|
+
nodes: pSeek
|
|
492
|
+
}
|
|
493
|
+
}
|
|
458
494
|
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
495
|
+
if (this.upgrade) {
|
|
496
|
+
result.upgrade = {
|
|
497
|
+
start: this.upgrade.start,
|
|
498
|
+
length: this.upgrade.length,
|
|
499
|
+
nodes: pUpgrade,
|
|
500
|
+
additionalNodes: pAdditional || [],
|
|
501
|
+
signature: this.signature
|
|
502
|
+
}
|
|
503
|
+
}
|
|
463
504
|
|
|
464
|
-
|
|
505
|
+
return result
|
|
506
|
+
}
|
|
507
|
+
}
|
|
465
508
|
|
|
466
|
-
|
|
509
|
+
class MerkleTree {
|
|
510
|
+
constructor (storage) {
|
|
511
|
+
this.storage = storage
|
|
467
512
|
}
|
|
468
513
|
|
|
469
|
-
|
|
470
|
-
return
|
|
514
|
+
static hash (s) {
|
|
515
|
+
return unslab(crypto.tree(s.roots))
|
|
471
516
|
}
|
|
472
517
|
|
|
473
|
-
|
|
474
|
-
return
|
|
518
|
+
static signable (s, namespace) {
|
|
519
|
+
return caps.treeSignable(namespace, MerkleTree.hash(s), s.length, s.fork)
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
static size (roots) {
|
|
523
|
+
return totalSize(roots)
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
static span (roots) {
|
|
527
|
+
return totalSpan(roots)
|
|
475
528
|
}
|
|
476
529
|
|
|
477
|
-
|
|
478
|
-
return
|
|
530
|
+
clone (storage) {
|
|
531
|
+
return new MerkleTree(storage)
|
|
479
532
|
}
|
|
480
533
|
|
|
481
534
|
getRoots (length) {
|
|
482
535
|
const indexes = flat.fullRoots(2 * length)
|
|
483
536
|
const roots = new Array(indexes.length)
|
|
537
|
+
const readBatch = this.storage.read()
|
|
484
538
|
|
|
485
539
|
for (let i = 0; i < indexes.length; i++) {
|
|
486
|
-
roots[i] =
|
|
540
|
+
roots[i] = readBatch.getTreeNode(indexes[i], true)
|
|
487
541
|
}
|
|
488
542
|
|
|
489
|
-
|
|
490
|
-
}
|
|
543
|
+
readBatch.tryFlush()
|
|
491
544
|
|
|
492
|
-
|
|
493
|
-
this.prologue = { hash, length }
|
|
494
|
-
}
|
|
495
|
-
|
|
496
|
-
addNodes (nodes) {
|
|
497
|
-
for (let i = 0; i < nodes.length; i++) {
|
|
498
|
-
const node = nodes[i]
|
|
499
|
-
this.unflushed.set(node.index, node)
|
|
500
|
-
}
|
|
545
|
+
return Promise.all(roots)
|
|
501
546
|
}
|
|
502
547
|
|
|
503
548
|
getNeededNodes (length, start, end) {
|
|
@@ -527,11 +572,14 @@ module.exports = class MerkleTree {
|
|
|
527
572
|
async upgradeable (length) {
|
|
528
573
|
const indexes = flat.fullRoots(2 * length)
|
|
529
574
|
const roots = new Array(indexes.length)
|
|
575
|
+
const readBatch = this.storage.read()
|
|
530
576
|
|
|
531
577
|
for (let i = 0; i < indexes.length; i++) {
|
|
532
|
-
roots[i] =
|
|
578
|
+
roots[i] = readBatch.getTreeNode(indexes[i], false)
|
|
533
579
|
}
|
|
534
580
|
|
|
581
|
+
readBatch.tryFlush()
|
|
582
|
+
|
|
535
583
|
for (const node of await Promise.all(roots)) {
|
|
536
584
|
if (node === null) return false
|
|
537
585
|
}
|
|
@@ -539,131 +587,25 @@ module.exports = class MerkleTree {
|
|
|
539
587
|
return true
|
|
540
588
|
}
|
|
541
589
|
|
|
542
|
-
|
|
543
|
-
return
|
|
544
|
-
}
|
|
545
|
-
|
|
546
|
-
get (index, error = true) {
|
|
547
|
-
const c = this.cache.get(index)
|
|
548
|
-
if (c) return c
|
|
549
|
-
|
|
550
|
-
let node = this.unflushed.get(index)
|
|
551
|
-
|
|
552
|
-
if (this.flushing !== null && node === undefined) {
|
|
553
|
-
node = this.flushing.get(index)
|
|
554
|
-
}
|
|
555
|
-
|
|
556
|
-
// TODO: test this
|
|
557
|
-
if (this.truncated && node !== undefined && node.index >= 2 * this.truncateTo) {
|
|
558
|
-
node = blankNode(index)
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
if (node !== undefined) {
|
|
562
|
-
if (node.hash === BLANK_HASH) {
|
|
563
|
-
if (error) throw INVALID_OPERATION('Could not load node: ' + index)
|
|
564
|
-
return Promise.resolve(null)
|
|
565
|
-
}
|
|
566
|
-
return Promise.resolve(node)
|
|
567
|
-
}
|
|
568
|
-
|
|
569
|
-
return getStoredNode(this.storage, index, this.cache, error)
|
|
570
|
-
}
|
|
571
|
-
|
|
572
|
-
async flush () {
|
|
573
|
-
this.flushing = this.unflushed
|
|
574
|
-
this.unflushed = new Map()
|
|
575
|
-
|
|
576
|
-
try {
|
|
577
|
-
if (this.truncated) await this._flushTruncation()
|
|
578
|
-
await this._flushNodes()
|
|
579
|
-
} catch (err) {
|
|
580
|
-
for (const node of this.flushing.values()) {
|
|
581
|
-
if (!this.unflushed.has(node.index)) this.unflushed.set(node.index, node)
|
|
582
|
-
}
|
|
583
|
-
throw err
|
|
584
|
-
} finally {
|
|
585
|
-
this.flushing = null
|
|
586
|
-
}
|
|
587
|
-
}
|
|
588
|
-
|
|
589
|
-
_flushTruncation () {
|
|
590
|
-
return new Promise((resolve, reject) => {
|
|
591
|
-
const t = this.truncateTo
|
|
592
|
-
const offset = t === 0 ? 0 : (t - 1) * 80 + 40
|
|
593
|
-
|
|
594
|
-
this.storage.truncate(offset, (err) => {
|
|
595
|
-
if (err) return reject(err)
|
|
596
|
-
|
|
597
|
-
if (this.truncateTo === t) {
|
|
598
|
-
this.truncateTo = 0
|
|
599
|
-
this.truncated = false
|
|
600
|
-
}
|
|
601
|
-
|
|
602
|
-
resolve()
|
|
603
|
-
})
|
|
604
|
-
})
|
|
590
|
+
seek (session, bytes, padding) {
|
|
591
|
+
return new ByteSeeker(this, session, bytes, padding)
|
|
605
592
|
}
|
|
606
593
|
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
// TODO: bench loading a full disk page and copy to that instead
|
|
610
|
-
return new Promise((resolve, reject) => {
|
|
611
|
-
const slab = b4a.allocUnsafe(40 * this.flushing.size)
|
|
594
|
+
get (index, error = true, readBatch = null) {
|
|
595
|
+
if (readBatch) return readBatch.getTreeNode(index, error)
|
|
612
596
|
|
|
613
|
-
|
|
614
|
-
let missing = this.flushing.size + 1
|
|
615
|
-
let offset = 0
|
|
616
|
-
|
|
617
|
-
for (const node of this.flushing.values()) {
|
|
618
|
-
const state = {
|
|
619
|
-
start: 0,
|
|
620
|
-
end: 40,
|
|
621
|
-
buffer: slab.subarray(offset, offset += 40)
|
|
622
|
-
}
|
|
623
|
-
|
|
624
|
-
c.uint64.encode(state, node.size)
|
|
625
|
-
c.raw.encode(state, node.hash)
|
|
626
|
-
|
|
627
|
-
this.storage.write(node.index * 40, state.buffer, done)
|
|
628
|
-
}
|
|
629
|
-
|
|
630
|
-
done(null)
|
|
631
|
-
|
|
632
|
-
function done (err) {
|
|
633
|
-
if (err) error = err
|
|
634
|
-
if (--missing > 0) return
|
|
635
|
-
if (error) reject(error)
|
|
636
|
-
else resolve()
|
|
637
|
-
}
|
|
638
|
-
})
|
|
597
|
+
return getTreeNode(this.storage, index, error)
|
|
639
598
|
}
|
|
640
599
|
|
|
641
|
-
clear () {
|
|
642
|
-
this.cache = new Xache({ maxSize: this.cache.maxSize })
|
|
600
|
+
clear (tx) {
|
|
643
601
|
this.truncated = true
|
|
644
602
|
this.truncateTo = 0
|
|
645
|
-
this.roots = []
|
|
646
|
-
this.length = 0
|
|
647
|
-
this.byteLength = 0
|
|
648
|
-
this.fork = 0
|
|
649
|
-
this.signature = null
|
|
650
|
-
if (this.flushing !== null) this.flushing.clear()
|
|
651
|
-
this.unflushed.clear()
|
|
652
|
-
return this.flush()
|
|
653
|
-
}
|
|
654
603
|
|
|
655
|
-
|
|
656
|
-
return new Promise((resolve, reject) => {
|
|
657
|
-
this.storage.close(err => {
|
|
658
|
-
if (err) reject(err)
|
|
659
|
-
else resolve()
|
|
660
|
-
})
|
|
661
|
-
})
|
|
604
|
+
return tx.deleteTreeNodeRange(0, -1)
|
|
662
605
|
}
|
|
663
606
|
|
|
664
|
-
async truncate (length, fork =
|
|
607
|
+
async truncate (length, batch, fork = batch.fork) {
|
|
665
608
|
const head = length * 2
|
|
666
|
-
const batch = new MerkleTreeBatch(this)
|
|
667
609
|
const fullRoots = flat.fullRoots(head)
|
|
668
610
|
|
|
669
611
|
for (let i = 0; i < fullRoots.length; i++) {
|
|
@@ -687,13 +629,11 @@ module.exports = class MerkleTree {
|
|
|
687
629
|
return batch
|
|
688
630
|
}
|
|
689
631
|
|
|
690
|
-
async reorg (proof) {
|
|
691
|
-
const batch = new ReorgBatch(this)
|
|
692
|
-
|
|
632
|
+
async reorg (proof, batch) {
|
|
693
633
|
let unverified = null
|
|
694
634
|
|
|
695
635
|
if (proof.block || proof.hash || proof.seek) {
|
|
696
|
-
unverified = verifyTree(proof,
|
|
636
|
+
unverified = verifyTree(proof, batch.nodes)
|
|
697
637
|
}
|
|
698
638
|
|
|
699
639
|
if (!verifyUpgrade(proof, unverified, batch)) {
|
|
@@ -717,9 +657,9 @@ module.exports = class MerkleTree {
|
|
|
717
657
|
return batch
|
|
718
658
|
}
|
|
719
659
|
|
|
720
|
-
verifyFullyRemote (proof) {
|
|
660
|
+
verifyFullyRemote (proof, session) {
|
|
721
661
|
// TODO: impl this less hackishly
|
|
722
|
-
const batch = new MerkleTreeBatch(this)
|
|
662
|
+
const batch = new MerkleTreeBatch(this, session)
|
|
723
663
|
|
|
724
664
|
batch.fork = proof.fork
|
|
725
665
|
batch.roots = []
|
|
@@ -727,7 +667,7 @@ module.exports = class MerkleTree {
|
|
|
727
667
|
batch.ancestors = 0
|
|
728
668
|
batch.byteLength = 0
|
|
729
669
|
|
|
730
|
-
let unverified = verifyTree(proof,
|
|
670
|
+
let unverified = verifyTree(proof, batch.nodes)
|
|
731
671
|
|
|
732
672
|
if (proof.upgrade) {
|
|
733
673
|
if (verifyUpgrade(proof, unverified, batch)) {
|
|
@@ -738,10 +678,10 @@ module.exports = class MerkleTree {
|
|
|
738
678
|
return batch
|
|
739
679
|
}
|
|
740
680
|
|
|
741
|
-
async verify (proof) {
|
|
742
|
-
const batch = new MerkleTreeBatch(this)
|
|
681
|
+
async verify (proof, session) {
|
|
682
|
+
const batch = new MerkleTreeBatch(this, session)
|
|
743
683
|
|
|
744
|
-
let unverified = verifyTree(proof,
|
|
684
|
+
let unverified = verifyTree(proof, batch.nodes)
|
|
745
685
|
|
|
746
686
|
if (proof.upgrade) {
|
|
747
687
|
if (verifyUpgrade(proof, unverified, batch)) {
|
|
@@ -759,13 +699,13 @@ module.exports = class MerkleTree {
|
|
|
759
699
|
return batch
|
|
760
700
|
}
|
|
761
701
|
|
|
762
|
-
proof ({ block, hash, seek, upgrade }) {
|
|
763
|
-
return generateProof(
|
|
702
|
+
proof (rx, batch, { block, hash, seek, upgrade }) {
|
|
703
|
+
return generateProof(rx, batch, block, hash, seek, upgrade)
|
|
764
704
|
}
|
|
765
705
|
|
|
766
706
|
// Successor to .nodes()
|
|
767
|
-
async missingNodes (index) {
|
|
768
|
-
const head = 2 *
|
|
707
|
+
async missingNodes (index, length) {
|
|
708
|
+
const head = 2 * length
|
|
769
709
|
const ite = flat.iterator(index)
|
|
770
710
|
|
|
771
711
|
// See iterator.rightSpan()
|
|
@@ -774,7 +714,8 @@ module.exports = class MerkleTree {
|
|
|
774
714
|
if (iteRightSpan >= head) return 0
|
|
775
715
|
|
|
776
716
|
let cnt = 0
|
|
777
|
-
|
|
717
|
+
// TODO: we could prop use a read batch here and do this in blocks of X for perf
|
|
718
|
+
while (!ite.contains(head) && !(await hasTreeNode(this.storage, ite.index))) {
|
|
778
719
|
cnt++
|
|
779
720
|
ite.parent()
|
|
780
721
|
}
|
|
@@ -796,56 +737,38 @@ module.exports = class MerkleTree {
|
|
|
796
737
|
return cnt
|
|
797
738
|
}
|
|
798
739
|
|
|
799
|
-
|
|
800
|
-
return getByteRange(this, index)
|
|
801
|
-
}
|
|
802
|
-
|
|
803
|
-
byteOffset (index) {
|
|
804
|
-
return getByteOffset(this, index)
|
|
805
|
-
}
|
|
806
|
-
|
|
807
|
-
static async open (storage, opts = {}) {
|
|
808
|
-
await new Promise((resolve, reject) => {
|
|
809
|
-
storage.read(0, OLD_TREE.length, (err, buf) => {
|
|
810
|
-
if (err) return resolve()
|
|
811
|
-
if (b4a.equals(buf, OLD_TREE)) return reject(new Error('Storage contains an incompatible merkle tree'))
|
|
812
|
-
resolve()
|
|
813
|
-
})
|
|
814
|
-
})
|
|
815
|
-
|
|
816
|
-
const length = typeof opts.length === 'number'
|
|
817
|
-
? opts.length
|
|
818
|
-
: await autoLength(storage)
|
|
819
|
-
|
|
740
|
+
static async open (storage, length, opts = {}) {
|
|
820
741
|
const roots = []
|
|
821
742
|
for (const index of flat.fullRoots(2 * length)) {
|
|
822
|
-
roots.push(unslabNode(await
|
|
743
|
+
roots.push(unslabNode(await getTreeNode(storage, index, true)))
|
|
823
744
|
}
|
|
824
745
|
|
|
825
746
|
return new MerkleTree(storage, roots, opts.fork || 0, opts.signature || null, opts.prologue || null)
|
|
826
747
|
}
|
|
827
748
|
}
|
|
828
749
|
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
}
|
|
834
|
-
return [await tree.byteOffset(index), (await tree.get(index)).size]
|
|
750
|
+
module.exports = {
|
|
751
|
+
MerkleTreeBatch,
|
|
752
|
+
ReorgBatch,
|
|
753
|
+
MerkleTree
|
|
835
754
|
}
|
|
836
755
|
|
|
837
|
-
async function
|
|
838
|
-
|
|
756
|
+
async function getNodeSize (index, readBatch) {
|
|
757
|
+
return (await readBatch.getTreeNode(index, true)).size
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
function getByteOffsetTree (tree, index, readBatch) {
|
|
839
761
|
if ((index & 1) === 1) index = flat.leftSpan(index)
|
|
840
762
|
|
|
841
763
|
let head = 0
|
|
842
|
-
|
|
764
|
+
|
|
765
|
+
const promises = []
|
|
843
766
|
|
|
844
767
|
for (const node of tree.roots) { // all async ticks happen once we find the root so safe
|
|
845
768
|
head += 2 * ((node.index - head) + 1)
|
|
846
769
|
|
|
847
770
|
if (index >= head) {
|
|
848
|
-
|
|
771
|
+
promises.push(node.size)
|
|
849
772
|
continue
|
|
850
773
|
}
|
|
851
774
|
|
|
@@ -855,20 +778,43 @@ async function getByteOffset (tree, index) {
|
|
|
855
778
|
if (index < ite.index) {
|
|
856
779
|
ite.leftChild()
|
|
857
780
|
} else {
|
|
858
|
-
|
|
781
|
+
promises.push(tree.get(ite.leftChild(), true, readBatch))
|
|
859
782
|
ite.sibling()
|
|
860
783
|
}
|
|
861
784
|
}
|
|
862
785
|
|
|
863
|
-
return
|
|
786
|
+
return Promise.all(promises)
|
|
864
787
|
}
|
|
865
788
|
|
|
866
789
|
throw ASSERTION('Failed to find offset')
|
|
867
790
|
}
|
|
868
791
|
|
|
792
|
+
async function getByteOffset (tree, index, readBatch) {
|
|
793
|
+
if (index === 2 * tree.length) return tree.byteLength
|
|
794
|
+
|
|
795
|
+
const treeNodes = await getByteOffsetTree(tree, index, readBatch)
|
|
796
|
+
|
|
797
|
+
let offset = 0
|
|
798
|
+
for (const node of treeNodes) offset += node.size
|
|
799
|
+
|
|
800
|
+
return offset
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
function getByteRange (tree, index, readBatch) {
|
|
804
|
+
const head = 2 * tree.length
|
|
805
|
+
if (((index & 1) === 0 ? index : flat.rightSpan(index)) >= head) {
|
|
806
|
+
throw BAD_ARGUMENT('Index is out of bounds')
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
const offset = getByteOffset(tree, index, readBatch)
|
|
810
|
+
const size = getNodeSize(index, readBatch)
|
|
811
|
+
|
|
812
|
+
return Promise.all([offset, size])
|
|
813
|
+
}
|
|
814
|
+
|
|
869
815
|
// All the methods needed for proof verification
|
|
870
816
|
|
|
871
|
-
function verifyTree ({ block, hash, seek },
|
|
817
|
+
function verifyTree ({ block, hash, seek }, nodes) {
|
|
872
818
|
const untrustedNode = block
|
|
873
819
|
? { index: 2 * block.index, value: block.value, nodes: block.nodes }
|
|
874
820
|
: hash
|
|
@@ -889,7 +835,7 @@ function verifyTree ({ block, hash, seek }, crypto, nodes) {
|
|
|
889
835
|
while (q.length > 0) {
|
|
890
836
|
const node = q.shift(ite.sibling())
|
|
891
837
|
|
|
892
|
-
root = parentNode(
|
|
838
|
+
root = parentNode(ite.parent(), root, node)
|
|
893
839
|
nodes.push(node)
|
|
894
840
|
nodes.push(root)
|
|
895
841
|
}
|
|
@@ -898,7 +844,7 @@ function verifyTree ({ block, hash, seek }, crypto, nodes) {
|
|
|
898
844
|
if (untrustedNode === null) return root
|
|
899
845
|
|
|
900
846
|
const ite = flat.iterator(untrustedNode.index)
|
|
901
|
-
const blockHash = untrustedNode.value && blockNode(
|
|
847
|
+
const blockHash = untrustedNode.value && blockNode(ite.index, untrustedNode.value)
|
|
902
848
|
|
|
903
849
|
const q = new NodeQueue(untrustedNode.nodes, root)
|
|
904
850
|
|
|
@@ -908,7 +854,7 @@ function verifyTree ({ block, hash, seek }, crypto, nodes) {
|
|
|
908
854
|
while (q.length > 0) {
|
|
909
855
|
const node = q.shift(ite.sibling())
|
|
910
856
|
|
|
911
|
-
root = parentNode(
|
|
857
|
+
root = parentNode(ite.parent(), root, node)
|
|
912
858
|
nodes.push(node)
|
|
913
859
|
nodes.push(root)
|
|
914
860
|
}
|
|
@@ -917,7 +863,7 @@ function verifyTree ({ block, hash, seek }, crypto, nodes) {
|
|
|
917
863
|
}
|
|
918
864
|
|
|
919
865
|
function verifyUpgrade ({ fork, upgrade }, blockRoot, batch) {
|
|
920
|
-
const prologue = batch.
|
|
866
|
+
const prologue = batch.prologue
|
|
921
867
|
|
|
922
868
|
if (prologue) {
|
|
923
869
|
const { start, length } = upgrade
|
|
@@ -993,7 +939,7 @@ async function seekFromHead (tree, head, bytes, padding) {
|
|
|
993
939
|
|
|
994
940
|
for (let i = 0; i < roots.length; i++) {
|
|
995
941
|
const root = roots[i]
|
|
996
|
-
const node = await tree.get(root)
|
|
942
|
+
const node = await tree.get(root, true)
|
|
997
943
|
const size = getUnpaddedSize(node, padding, null)
|
|
998
944
|
|
|
999
945
|
if (bytes === size) return root
|
|
@@ -1017,6 +963,7 @@ async function seekTrustedTree (tree, root, bytes, padding) {
|
|
|
1017
963
|
|
|
1018
964
|
while ((ite.index & 1) !== 0) {
|
|
1019
965
|
const l = await tree.get(ite.leftChild(), false)
|
|
966
|
+
|
|
1020
967
|
if (l) {
|
|
1021
968
|
const size = getUnpaddedSize(l, padding, ite)
|
|
1022
969
|
if (size === bytes) return ite.index
|
|
@@ -1042,7 +989,7 @@ async function seekUntrustedTree (tree, root, bytes, padding) {
|
|
|
1042
989
|
|
|
1043
990
|
bytes -= offset
|
|
1044
991
|
|
|
1045
|
-
const node = await tree.get(root)
|
|
992
|
+
const node = await tree.get(root, true)
|
|
1046
993
|
|
|
1047
994
|
if (getUnpaddedSize(node, padding, null) <= bytes) throw INVALID_OPERATION('Invalid seek')
|
|
1048
995
|
|
|
@@ -1053,41 +1000,41 @@ async function seekUntrustedTree (tree, root, bytes, padding) {
|
|
|
1053
1000
|
// Note, that all these methods are sync as we can statically infer which nodes
|
|
1054
1001
|
// are needed for the remote to verify given they arguments they passed us
|
|
1055
1002
|
|
|
1056
|
-
function seekProof (tree, seekRoot, root, p) {
|
|
1003
|
+
function seekProof (tree, batch, seekRoot, root, p) {
|
|
1057
1004
|
const ite = flat.iterator(seekRoot)
|
|
1058
1005
|
|
|
1059
1006
|
p.seek = []
|
|
1060
|
-
p.seek.push(tree.get(ite.index))
|
|
1007
|
+
p.seek.push(tree.get(ite.index, true, batch))
|
|
1061
1008
|
|
|
1062
1009
|
while (ite.index !== root) {
|
|
1063
1010
|
ite.sibling()
|
|
1064
|
-
p.seek.push(tree.get(ite.index))
|
|
1011
|
+
p.seek.push(tree.get(ite.index, true, batch))
|
|
1065
1012
|
ite.parent()
|
|
1066
1013
|
}
|
|
1067
1014
|
}
|
|
1068
1015
|
|
|
1069
|
-
function blockAndSeekProof (tree, node, seek, seekRoot, root, p) {
|
|
1070
|
-
if (!node) return seekProof(tree, seekRoot, root, p)
|
|
1016
|
+
function blockAndSeekProof (tree, batch, node, seek, seekRoot, root, p) {
|
|
1017
|
+
if (!node) return seekProof(tree, batch, seekRoot, root, p)
|
|
1071
1018
|
|
|
1072
1019
|
const ite = flat.iterator(node.index)
|
|
1073
1020
|
|
|
1074
1021
|
p.node = []
|
|
1075
|
-
if (!node.value) p.node.push(tree.get(ite.index))
|
|
1022
|
+
if (!node.value) p.node.push(tree.get(ite.index, true, batch))
|
|
1076
1023
|
|
|
1077
1024
|
while (ite.index !== root) {
|
|
1078
1025
|
ite.sibling()
|
|
1079
1026
|
|
|
1080
1027
|
if (seek && ite.contains(seekRoot) && ite.index !== seekRoot) {
|
|
1081
|
-
seekProof(tree, seekRoot, ite.index, p)
|
|
1028
|
+
seekProof(tree, batch, seekRoot, ite.index, p)
|
|
1082
1029
|
} else {
|
|
1083
|
-
p.node.push(tree.get(ite.index))
|
|
1030
|
+
p.node.push(tree.get(ite.index, true, batch))
|
|
1084
1031
|
}
|
|
1085
1032
|
|
|
1086
1033
|
ite.parent()
|
|
1087
1034
|
}
|
|
1088
1035
|
}
|
|
1089
1036
|
|
|
1090
|
-
function upgradeProof (tree, node, seek, from, to, subTree, p) {
|
|
1037
|
+
function upgradeProof (tree, batch, node, seek, from, to, subTree, p) {
|
|
1091
1038
|
if (from === 0) p.upgrade = []
|
|
1092
1039
|
|
|
1093
1040
|
for (const ite = flat.iterator(0); ite.fullRoot(to); ite.nextTree()) {
|
|
@@ -1107,9 +1054,9 @@ function upgradeProof (tree, node, seek, from, to, subTree, p) {
|
|
|
1107
1054
|
ite.sibling()
|
|
1108
1055
|
if (ite.index > target) {
|
|
1109
1056
|
if (p.node === null && p.seek === null && ite.contains(subTree)) {
|
|
1110
|
-
blockAndSeekProof(tree, node, seek, subTree, ite.index, p)
|
|
1057
|
+
blockAndSeekProof(tree, batch, node, seek, subTree, ite.index, p)
|
|
1111
1058
|
} else {
|
|
1112
|
-
p.upgrade.push(
|
|
1059
|
+
p.upgrade.push(batch.getTreeNode(ite.index, true))
|
|
1113
1060
|
}
|
|
1114
1061
|
}
|
|
1115
1062
|
ite.parent()
|
|
@@ -1125,16 +1072,16 @@ function upgradeProof (tree, node, seek, from, to, subTree, p) {
|
|
|
1125
1072
|
// if the subtree included is a child of this tree, include that one
|
|
1126
1073
|
// instead of a dup node
|
|
1127
1074
|
if (p.node === null && p.seek === null && ite.contains(subTree)) {
|
|
1128
|
-
blockAndSeekProof(tree, node, seek, subTree, ite.index, p)
|
|
1075
|
+
blockAndSeekProof(tree, batch, node, seek, subTree, ite.index, p)
|
|
1129
1076
|
continue
|
|
1130
1077
|
}
|
|
1131
1078
|
|
|
1132
1079
|
// add root (can be optimised since the root might be in tree.roots)
|
|
1133
|
-
p.upgrade.push(tree.get(ite.index))
|
|
1080
|
+
p.upgrade.push(tree.get(ite.index, true, batch))
|
|
1134
1081
|
}
|
|
1135
1082
|
}
|
|
1136
1083
|
|
|
1137
|
-
function additionalUpgradeProof (tree, from, to, p) {
|
|
1084
|
+
function additionalUpgradeProof (tree, batch, from, to, p) {
|
|
1138
1085
|
if (from === 0) p.additionalUpgrade = []
|
|
1139
1086
|
|
|
1140
1087
|
for (const ite = flat.iterator(0); ite.fullRoot(to); ite.nextTree()) {
|
|
@@ -1153,7 +1100,7 @@ function additionalUpgradeProof (tree, from, to, p) {
|
|
|
1153
1100
|
while (ite.index !== root) {
|
|
1154
1101
|
ite.sibling()
|
|
1155
1102
|
if (ite.index > target) {
|
|
1156
|
-
p.additionalUpgrade.push(tree.get(ite.index))
|
|
1103
|
+
p.additionalUpgrade.push(tree.get(ite.index, true, batch))
|
|
1157
1104
|
}
|
|
1158
1105
|
ite.parent()
|
|
1159
1106
|
}
|
|
@@ -1166,7 +1113,7 @@ function additionalUpgradeProof (tree, from, to, p) {
|
|
|
1166
1113
|
}
|
|
1167
1114
|
|
|
1168
1115
|
// add root (can be optimised since the root is in tree.roots)
|
|
1169
|
-
p.additionalUpgrade.push(tree.get(ite.index))
|
|
1116
|
+
p.additionalUpgrade.push(tree.get(ite.index, true, batch))
|
|
1170
1117
|
}
|
|
1171
1118
|
}
|
|
1172
1119
|
|
|
@@ -1193,75 +1140,14 @@ function totalSpan (nodes) {
|
|
|
1193
1140
|
return s
|
|
1194
1141
|
}
|
|
1195
1142
|
|
|
1196
|
-
function blockNode (
|
|
1143
|
+
function blockNode (index, value) {
|
|
1197
1144
|
return { index, size: value.byteLength, hash: crypto.data(value) }
|
|
1198
1145
|
}
|
|
1199
1146
|
|
|
1200
|
-
function parentNode (
|
|
1147
|
+
function parentNode (index, a, b) {
|
|
1201
1148
|
return { index, size: a.size + b.size, hash: crypto.parent(a, b) }
|
|
1202
1149
|
}
|
|
1203
1150
|
|
|
1204
|
-
function blankNode (index) {
|
|
1205
|
-
return { index, size: 0, hash: BLANK_HASH }
|
|
1206
|
-
}
|
|
1207
|
-
|
|
1208
|
-
// Storage methods
|
|
1209
|
-
|
|
1210
|
-
function getStoredNode (storage, index, cache, error) {
|
|
1211
|
-
return new Promise((resolve, reject) => {
|
|
1212
|
-
storage.read(40 * index, 40, (err, data) => {
|
|
1213
|
-
if (err) {
|
|
1214
|
-
if (error) return reject(err)
|
|
1215
|
-
else resolve(null)
|
|
1216
|
-
return
|
|
1217
|
-
}
|
|
1218
|
-
|
|
1219
|
-
const hash = data.subarray(8)
|
|
1220
|
-
const size = c.decode(c.uint64, data)
|
|
1221
|
-
|
|
1222
|
-
if (size === 0 && b4a.compare(hash, BLANK_HASH) === 0) {
|
|
1223
|
-
if (error) reject(new Error('Could not load node: ' + index))
|
|
1224
|
-
else resolve(null)
|
|
1225
|
-
return
|
|
1226
|
-
}
|
|
1227
|
-
|
|
1228
|
-
const node = { index, size, hash }
|
|
1229
|
-
|
|
1230
|
-
if (cache !== null) {
|
|
1231
|
-
// Copy hash to a new buffer to avoid blocking gc of its original slab
|
|
1232
|
-
node.hash = unslab(hash)
|
|
1233
|
-
cache.set(index, node)
|
|
1234
|
-
}
|
|
1235
|
-
|
|
1236
|
-
resolve(node)
|
|
1237
|
-
})
|
|
1238
|
-
})
|
|
1239
|
-
}
|
|
1240
|
-
|
|
1241
|
-
function storedNodes (storage) {
|
|
1242
|
-
return new Promise((resolve) => {
|
|
1243
|
-
storage.stat((_, st) => {
|
|
1244
|
-
if (!st) return resolve(0)
|
|
1245
|
-
resolve((st.size - (st.size % 40)) / 40)
|
|
1246
|
-
})
|
|
1247
|
-
})
|
|
1248
|
-
}
|
|
1249
|
-
|
|
1250
|
-
async function autoLength (storage) {
|
|
1251
|
-
const nodes = await storedNodes(storage)
|
|
1252
|
-
if (!nodes) return 0
|
|
1253
|
-
const ite = flat.iterator(nodes - 1)
|
|
1254
|
-
let index = nodes - 1
|
|
1255
|
-
while (await getStoredNode(storage, ite.parent(), null, false)) index = ite.index
|
|
1256
|
-
return flat.rightSpan(index) / 2 + 1
|
|
1257
|
-
}
|
|
1258
|
-
|
|
1259
|
-
function truncateMap (map, len) {
|
|
1260
|
-
for (const node of map.values()) {
|
|
1261
|
-
if (node.index >= 2 * len) map.delete(node.index)
|
|
1262
|
-
}
|
|
1263
|
-
}
|
|
1264
|
-
|
|
1265
1151
|
function log2 (n) {
|
|
1266
1152
|
let res = 1
|
|
1267
1153
|
|
|
@@ -1279,6 +1165,20 @@ function normalizeIndexed (block, hash) {
|
|
|
1279
1165
|
return null
|
|
1280
1166
|
}
|
|
1281
1167
|
|
|
1168
|
+
function getTreeNode (storage, index, error) {
|
|
1169
|
+
const batch = storage.read()
|
|
1170
|
+
const node = batch.getTreeNode(index, error)
|
|
1171
|
+
batch.tryFlush()
|
|
1172
|
+
return node
|
|
1173
|
+
}
|
|
1174
|
+
|
|
1175
|
+
function hasTreeNode (storage, index) {
|
|
1176
|
+
const batch = storage.read()
|
|
1177
|
+
const has = batch.hasTreeNode(index)
|
|
1178
|
+
batch.tryFlush()
|
|
1179
|
+
return has
|
|
1180
|
+
}
|
|
1181
|
+
|
|
1282
1182
|
async function settleProof (p) {
|
|
1283
1183
|
const result = [
|
|
1284
1184
|
p.node && Promise.all(p.node),
|
|
@@ -1299,7 +1199,7 @@ async function settleProof (p) {
|
|
|
1299
1199
|
}
|
|
1300
1200
|
|
|
1301
1201
|
// tree can be either the merkle tree or a merkle tree batch
|
|
1302
|
-
async function generateProof (tree, block, hash, seek, upgrade) {
|
|
1202
|
+
async function generateProof (readBatch, tree, block, hash, seek, upgrade) {
|
|
1303
1203
|
// Important that this does not throw inbetween making the promise arrays
|
|
1304
1204
|
// and finalise being called, otherwise there will be lingering promises in the background
|
|
1305
1205
|
|
|
@@ -1308,17 +1208,13 @@ async function generateProof (tree, block, hash, seek, upgrade) {
|
|
|
1308
1208
|
upgrade.length = upgrade.start < tree.prologue.length ? tree.prologue.length : upgrade.length
|
|
1309
1209
|
}
|
|
1310
1210
|
|
|
1311
|
-
const fork = tree.fork
|
|
1312
|
-
const signature = tree.signature
|
|
1313
1211
|
const head = 2 * tree.length
|
|
1314
1212
|
const from = upgrade ? upgrade.start * 2 : 0
|
|
1315
1213
|
const to = upgrade ? from + upgrade.length * 2 : head
|
|
1316
1214
|
const node = normalizeIndexed(block, hash)
|
|
1317
1215
|
|
|
1318
|
-
const result = { fork, block: null, hash: null, seek: null, upgrade: null, manifest: null }
|
|
1319
|
-
|
|
1320
1216
|
// can't do anything as we have no data...
|
|
1321
|
-
if (head === 0) return
|
|
1217
|
+
if (head === 0) return new TreeProof(tree, null, null, null, null)
|
|
1322
1218
|
|
|
1323
1219
|
if (from >= to || to > head) {
|
|
1324
1220
|
throw INVALID_OPERATION('Invalid upgrade')
|
|
@@ -1329,61 +1225,22 @@ async function generateProof (tree, block, hash, seek, upgrade) {
|
|
|
1329
1225
|
|
|
1330
1226
|
let subTree = head
|
|
1331
1227
|
|
|
1332
|
-
const p =
|
|
1333
|
-
node: null,
|
|
1334
|
-
seek: null,
|
|
1335
|
-
upgrade: null,
|
|
1336
|
-
additionalUpgrade: null
|
|
1337
|
-
}
|
|
1228
|
+
const p = new TreeProof(tree, block, hash, seek, upgrade)
|
|
1338
1229
|
|
|
1339
1230
|
if (node !== null && (!upgrade || node.lastIndex < upgrade.start)) {
|
|
1340
1231
|
subTree = nodesToRoot(node.index, node.nodes, to)
|
|
1341
1232
|
const seekRoot = seek ? await seekUntrustedTree(tree, subTree, seek.bytes, seek.padding) : head
|
|
1342
|
-
blockAndSeekProof(tree, node, seek, seekRoot, subTree, p)
|
|
1233
|
+
blockAndSeekProof(tree, readBatch, node, seek, seekRoot, subTree, p.pending)
|
|
1343
1234
|
} else if ((node || seek) && upgrade) {
|
|
1344
1235
|
subTree = seek ? await seekFromHead(tree, to, seek.bytes, seek.padding) : node.index
|
|
1345
1236
|
}
|
|
1346
1237
|
|
|
1347
1238
|
if (upgrade) {
|
|
1348
|
-
upgradeProof(tree, node, seek, from, to, subTree, p)
|
|
1349
|
-
if (head > to) additionalUpgradeProof(tree, to, head, p)
|
|
1350
|
-
}
|
|
1351
|
-
|
|
1352
|
-
const [pNode, pSeek, pUpgrade, pAdditional] = await settleProof(p)
|
|
1353
|
-
|
|
1354
|
-
if (block) {
|
|
1355
|
-
if (pNode === null) throw INVALID_OPERATION('Invalid block request')
|
|
1356
|
-
result.block = {
|
|
1357
|
-
index: block.index,
|
|
1358
|
-
value: null, // populated upstream, alloc it here for simplicity
|
|
1359
|
-
nodes: pNode
|
|
1360
|
-
}
|
|
1361
|
-
} else if (hash) {
|
|
1362
|
-
if (pNode === null) throw INVALID_OPERATION('Invalid hash request')
|
|
1363
|
-
result.hash = {
|
|
1364
|
-
index: hash.index,
|
|
1365
|
-
nodes: pNode
|
|
1366
|
-
}
|
|
1367
|
-
}
|
|
1368
|
-
|
|
1369
|
-
if (seek && pSeek !== null) {
|
|
1370
|
-
result.seek = {
|
|
1371
|
-
bytes: seek.bytes,
|
|
1372
|
-
nodes: pSeek
|
|
1373
|
-
}
|
|
1374
|
-
}
|
|
1375
|
-
|
|
1376
|
-
if (upgrade) {
|
|
1377
|
-
result.upgrade = {
|
|
1378
|
-
start: upgrade.start,
|
|
1379
|
-
length: upgrade.length,
|
|
1380
|
-
nodes: pUpgrade,
|
|
1381
|
-
additionalNodes: pAdditional || [],
|
|
1382
|
-
signature
|
|
1383
|
-
}
|
|
1239
|
+
upgradeProof(tree, readBatch, node, seek, from, to, subTree, p.pending)
|
|
1240
|
+
if (head > to) additionalUpgradeProof(tree, readBatch, to, head, p.pending)
|
|
1384
1241
|
}
|
|
1385
1242
|
|
|
1386
|
-
return
|
|
1243
|
+
return p
|
|
1387
1244
|
}
|
|
1388
1245
|
|
|
1389
1246
|
function getUnpaddedSize (node, padding, ite) {
|