hypercore 10.38.2 → 11.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -30
- package/index.js +388 -444
- package/lib/audit.js +33 -41
- package/lib/bit-interlude.js +174 -0
- package/lib/bitfield.js +79 -87
- package/lib/block-store.js +12 -50
- package/lib/copy-prologue.js +236 -0
- package/lib/core.js +414 -746
- package/lib/download.js +42 -4
- package/lib/merkle-tree.js +263 -406
- package/lib/multisig.js +9 -6
- package/lib/mutex.js +4 -0
- package/lib/remote-bitfield.js +9 -9
- package/lib/replicator.js +247 -177
- package/lib/session-state.js +949 -0
- package/lib/verifier.js +20 -13
- package/package.json +2 -2
- package/lib/batch.js +0 -431
- package/lib/big-header.js +0 -55
- package/lib/oplog.js +0 -228
package/lib/core.js
CHANGED
|
@@ -1,91 +1,154 @@
|
|
|
1
|
-
const
|
|
1
|
+
const crypto = require('hypercore-crypto')
|
|
2
2
|
const b4a = require('b4a')
|
|
3
3
|
const unslab = require('unslab')
|
|
4
|
-
const
|
|
5
|
-
const BigHeader = require('./big-header')
|
|
4
|
+
const z32 = require('z32')
|
|
6
5
|
const Mutex = require('./mutex')
|
|
7
|
-
const MerkleTree = require('./merkle-tree')
|
|
6
|
+
const { MerkleTree, ReorgBatch } = require('./merkle-tree')
|
|
8
7
|
const BlockStore = require('./block-store')
|
|
8
|
+
const BitInterlude = require('./bit-interlude')
|
|
9
9
|
const Bitfield = require('./bitfield')
|
|
10
10
|
const RemoteBitfield = require('./remote-bitfield')
|
|
11
|
-
const Info = require('./info')
|
|
12
11
|
const { BAD_ARGUMENT, STORAGE_EMPTY, STORAGE_CONFLICT, INVALID_OPERATION, INVALID_SIGNATURE, INVALID_CHECKSUM } = require('hypercore-errors')
|
|
13
|
-
const m = require('./messages')
|
|
14
12
|
const Verifier = require('./verifier')
|
|
15
13
|
const audit = require('./audit')
|
|
14
|
+
const copyPrologue = require('./copy-prologue')
|
|
15
|
+
const SessionState = require('./session-state')
|
|
16
|
+
const Replicator = require('./replicator')
|
|
16
17
|
|
|
17
18
|
module.exports = class Core {
|
|
18
|
-
constructor (
|
|
19
|
-
this.
|
|
20
|
-
this.
|
|
19
|
+
constructor (db, opts = {}) {
|
|
20
|
+
this.db = db
|
|
21
|
+
this.storage = null
|
|
22
|
+
this.replicator = new Replicator(this, opts)
|
|
23
|
+
this.sessionStates = []
|
|
24
|
+
this.monitors = []
|
|
25
|
+
this.activeSessions = 0
|
|
26
|
+
|
|
27
|
+
this.id = opts.key ? z32.encode(opts.key) : null
|
|
28
|
+
this.key = opts.key || null
|
|
29
|
+
this.discoveryKey = opts.discoveryKey || (opts.key && crypto.discoveryKey(opts.key)) || null
|
|
30
|
+
this.manifest = null
|
|
31
|
+
this.opening = null
|
|
32
|
+
this.closing = null
|
|
33
|
+
this.exclusive = null
|
|
34
|
+
|
|
21
35
|
this.preupdate = null
|
|
22
|
-
this.header =
|
|
23
|
-
this.compat =
|
|
24
|
-
this.
|
|
25
|
-
this.
|
|
26
|
-
this.
|
|
27
|
-
this.
|
|
28
|
-
this.blocks = blocks
|
|
29
|
-
this.bitfield = bitfield
|
|
30
|
-
this.verifier = verifier
|
|
36
|
+
this.header = null
|
|
37
|
+
this.compat = false
|
|
38
|
+
this.tree = null
|
|
39
|
+
this.blocks = null
|
|
40
|
+
this.bitfield = null
|
|
41
|
+
this.verifier = null
|
|
31
42
|
this.truncating = 0
|
|
32
43
|
this.updating = false
|
|
33
|
-
this.closed = false
|
|
34
44
|
this.skipBitfield = null
|
|
35
|
-
this.
|
|
36
|
-
this.
|
|
37
|
-
this.
|
|
45
|
+
this.globalCache = opts.globalCache || null
|
|
46
|
+
this.autoClose = opts.autoClose !== false
|
|
47
|
+
this.encryption = null
|
|
48
|
+
this.onidle = noop
|
|
49
|
+
|
|
50
|
+
this.state = null
|
|
51
|
+
this.opened = false
|
|
52
|
+
this.destroyed = false
|
|
53
|
+
this.closed = false
|
|
38
54
|
|
|
39
|
-
this._manifestFlushed =
|
|
40
|
-
this.
|
|
41
|
-
this.
|
|
55
|
+
this._manifestFlushed = false
|
|
56
|
+
this._onflush = null
|
|
57
|
+
this._flushing = null
|
|
58
|
+
this._activeBatch = null
|
|
59
|
+
this._bitfield = null
|
|
42
60
|
this._verifies = null
|
|
43
61
|
this._verifiesFlushed = null
|
|
44
|
-
this.
|
|
45
|
-
|
|
62
|
+
this._legacy = !!opts.legacy
|
|
63
|
+
|
|
64
|
+
this.opening = this._open(opts)
|
|
65
|
+
this.opening.catch(noop)
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
ready () {
|
|
69
|
+
return this.opening
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
addMonitor (s) {
|
|
73
|
+
if (s._monitorIndex >= 0) return
|
|
74
|
+
s._monitorIndex = this.monitors.push(s) - 1
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
removeMonitor (s) {
|
|
78
|
+
if (s._monitorIndex < 0) return
|
|
79
|
+
const head = this.monitors.pop()
|
|
80
|
+
if (head !== s) this.monitors[(head._monitorIndex = s._monitorIndex)] = head
|
|
81
|
+
s._monitorIndex = -1
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
emitManifest () {
|
|
85
|
+
for (let i = this.monitors.length - 1; i >= 0; i--) {
|
|
86
|
+
this.monitors[i].emit('manifest')
|
|
87
|
+
}
|
|
46
88
|
}
|
|
47
89
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
const
|
|
90
|
+
createUserDataStream (opts, session = this.state) {
|
|
91
|
+
return session.storage.createUserDataStream(opts)
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
allSessions () {
|
|
95
|
+
const sessions = []
|
|
96
|
+
for (const state of this.sessionStates) {
|
|
97
|
+
if (state.sessions.length) sessions.push(...state.sessions)
|
|
98
|
+
}
|
|
99
|
+
return sessions
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
hasSession () {
|
|
103
|
+
return this.activeSessions !== 0
|
|
104
|
+
}
|
|
54
105
|
|
|
106
|
+
checkIfIdle () {
|
|
107
|
+
if (this.destroyed === true || this.hasSession() === true) return
|
|
108
|
+
if (this.replicator.idle() === false) return
|
|
109
|
+
if (this.state === null || this.state.mutex.idle() === false) return
|
|
110
|
+
this.onidle()
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async lockExclusive () {
|
|
114
|
+
if (this.exclusive === null) this.exclusive = new Mutex()
|
|
115
|
+
await this.exclusive.lock()
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
unlockExclusive () {
|
|
119
|
+
if (this.exclusive !== null) this.exclusive.unlock()
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
async _open (opts) {
|
|
55
123
|
try {
|
|
56
|
-
|
|
124
|
+
await this._tryOpen(opts)
|
|
57
125
|
} catch (err) {
|
|
58
|
-
|
|
126
|
+
this.onidle()
|
|
59
127
|
throw err
|
|
60
128
|
}
|
|
129
|
+
|
|
130
|
+
this.opened = true
|
|
61
131
|
}
|
|
62
132
|
|
|
63
|
-
|
|
133
|
+
async _tryOpen (opts) {
|
|
134
|
+
let storage = await this.db.resume(this.discoveryKey)
|
|
135
|
+
|
|
64
136
|
let overwrite = opts.overwrite === true
|
|
65
137
|
|
|
66
138
|
const force = opts.force === true
|
|
67
139
|
const createIfMissing = opts.createIfMissing !== false
|
|
68
|
-
const crypto = opts.crypto || hypercoreCrypto
|
|
69
140
|
// kill this flag soon
|
|
70
141
|
const legacy = !!opts.legacy
|
|
71
142
|
|
|
72
|
-
const oplog = new Oplog(oplogFile, {
|
|
73
|
-
headerEncoding: m.oplog.header,
|
|
74
|
-
entryEncoding: m.oplog.entry,
|
|
75
|
-
readonly: opts.readonly
|
|
76
|
-
})
|
|
77
|
-
|
|
78
143
|
// default to true for now if no manifest is provided
|
|
79
144
|
let compat = opts.compat === true || (opts.compat !== false && !opts.manifest)
|
|
80
145
|
|
|
81
|
-
let
|
|
146
|
+
let header = storage ? parseHeader(await getCoreInfo(storage)) : null
|
|
82
147
|
|
|
83
148
|
if (force && opts.key && header && !b4a.equals(header.key, opts.key)) {
|
|
84
149
|
overwrite = true
|
|
85
150
|
}
|
|
86
151
|
|
|
87
|
-
const bigHeader = new BigHeader(headerFile)
|
|
88
|
-
|
|
89
152
|
if (!header || overwrite) {
|
|
90
153
|
if (!createIfMissing) {
|
|
91
154
|
throw STORAGE_EMPTY('No Hypercore is stored here')
|
|
@@ -98,11 +161,11 @@ module.exports = class Core {
|
|
|
98
161
|
}
|
|
99
162
|
|
|
100
163
|
const keyPair = opts.keyPair || (opts.key ? null : crypto.keyPair())
|
|
164
|
+
|
|
101
165
|
const defaultManifest = !opts.manifest && (!!opts.compat || !opts.key || !!(keyPair && b4a.equals(opts.key, keyPair.publicKey)))
|
|
102
166
|
const manifest = defaultManifest ? Verifier.defaultSignerManifest(opts.key || keyPair.publicKey) : Verifier.createManifest(opts.manifest)
|
|
103
167
|
|
|
104
168
|
header = {
|
|
105
|
-
external: null,
|
|
106
169
|
key: opts.key || (compat ? manifest.signers[0].publicKey : Verifier.manifestHash(manifest)),
|
|
107
170
|
manifest,
|
|
108
171
|
keyPair: keyPair ? { publicKey: keyPair.publicKey, secretKey: keyPair.secretKey || null } : null,
|
|
@@ -119,15 +182,30 @@ module.exports = class Core {
|
|
|
119
182
|
}
|
|
120
183
|
}
|
|
121
184
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
185
|
+
const discoveryKey = opts.discoveryKey || crypto.discoveryKey(header.key)
|
|
186
|
+
|
|
187
|
+
storage = await this.db.create({
|
|
188
|
+
key: header.key,
|
|
189
|
+
manifest,
|
|
190
|
+
keyPair,
|
|
191
|
+
discoveryKey,
|
|
192
|
+
userData: opts.userData || [],
|
|
193
|
+
alias: opts.alias || null
|
|
194
|
+
})
|
|
125
195
|
}
|
|
126
196
|
|
|
127
197
|
// unslab the long lived buffers to avoid keeping the slab alive
|
|
128
198
|
header.key = unslab(header.key)
|
|
129
|
-
|
|
130
|
-
|
|
199
|
+
|
|
200
|
+
if (header.tree) {
|
|
201
|
+
header.tree.rootHash = unslab(header.tree.rootHash)
|
|
202
|
+
header.tree.signature = unslab(header.tree.signature)
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
if (header.keyPair) {
|
|
206
|
+
header.keyPair.publicKey = unslab(header.keyPair.publicKey)
|
|
207
|
+
header.keyPair.secretKey = unslab(header.keyPair.secretKey)
|
|
208
|
+
}
|
|
131
209
|
|
|
132
210
|
if (header.keyPair) {
|
|
133
211
|
header.keyPair.publicKey = unslab(header.keyPair.publicKey)
|
|
@@ -156,551 +234,160 @@ module.exports = class Core {
|
|
|
156
234
|
|
|
157
235
|
const prologue = header.manifest ? header.manifest.prologue : null
|
|
158
236
|
|
|
159
|
-
const tree = await MerkleTree.open(
|
|
160
|
-
const bitfield = await Bitfield.open(
|
|
161
|
-
const blocks = new BlockStore(
|
|
237
|
+
const tree = await MerkleTree.open(storage)
|
|
238
|
+
const bitfield = await Bitfield.open(storage, header.tree.length)
|
|
239
|
+
const blocks = new BlockStore(storage)
|
|
240
|
+
|
|
241
|
+
const treeInfo = {
|
|
242
|
+
fork: header.tree.fork,
|
|
243
|
+
signature: header.tree.signature,
|
|
244
|
+
roots: header.tree.length ? await tree.getRoots(header.tree.length) : [],
|
|
245
|
+
prologue
|
|
246
|
+
}
|
|
162
247
|
|
|
163
248
|
if (overwrite) {
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
249
|
+
const tx = storage.write()
|
|
250
|
+
tree.clear(tx)
|
|
251
|
+
blocks.clear(tx)
|
|
252
|
+
bitfield.clear(tx)
|
|
253
|
+
await tx.flush()
|
|
168
254
|
}
|
|
169
255
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
while (bitfield.get(header.hints.contiguousLength)) header.hints.contiguousLength++
|
|
256
|
+
for await (const { key, value } of storage.createUserDataStream()) {
|
|
257
|
+
header.userData.push({ key, value: unslab(value) })
|
|
173
258
|
}
|
|
174
259
|
|
|
260
|
+
// compat from earlier version that do not store contig length
|
|
261
|
+
// if (header.hints.contiguousLength === 0) {
|
|
262
|
+
// while (bitfield.get(header.hints.contiguousLength)) header.hints.contiguousLength++
|
|
263
|
+
// }
|
|
264
|
+
|
|
175
265
|
// to unslab
|
|
176
266
|
if (header.manifest) header.manifest = Verifier.createManifest(header.manifest)
|
|
177
267
|
|
|
178
268
|
const verifier = header.manifest ? new Verifier(header.key, header.manifest, { crypto, legacy }) : null
|
|
179
269
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
}
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
if (e.bitfield) {
|
|
192
|
-
bitfield.setRange(e.bitfield.start, e.bitfield.length, !e.bitfield.drop)
|
|
193
|
-
updateContig(header, e.bitfield, bitfield)
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
if (e.treeUpgrade) {
|
|
197
|
-
const batch = await tree.truncate(e.treeUpgrade.length, e.treeUpgrade.fork)
|
|
198
|
-
batch.ancestors = e.treeUpgrade.ancestors
|
|
199
|
-
batch.signature = unslab(e.treeUpgrade.signature)
|
|
200
|
-
addReorgHint(header.hints.reorgs, tree, batch)
|
|
201
|
-
batch.commit()
|
|
202
|
-
|
|
203
|
-
header.tree.length = tree.length
|
|
204
|
-
header.tree.fork = tree.fork
|
|
205
|
-
header.tree.rootHash = tree.hash()
|
|
206
|
-
header.tree.signature = tree.signature
|
|
207
|
-
}
|
|
208
|
-
}
|
|
270
|
+
this.storage = storage
|
|
271
|
+
this.header = header
|
|
272
|
+
this.compat = compat
|
|
273
|
+
this.tree = tree
|
|
274
|
+
this.blocks = blocks
|
|
275
|
+
this.bitfield = bitfield
|
|
276
|
+
this.verifier = verifier
|
|
277
|
+
this.state = new SessionState(this, null, storage, this.blocks, tree, treeInfo, null)
|
|
209
278
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
279
|
+
if (this.key === null) this.key = this.header.key
|
|
280
|
+
if (this.discoveryKey === null) this.discoveryKey = crypto.discoveryKey(this.key)
|
|
281
|
+
if (this.id === null) this.id = z32.encode(this.key)
|
|
282
|
+
if (this.manifest === null) this.manifest = this.header.manifest
|
|
213
283
|
|
|
214
|
-
|
|
284
|
+
this._manifestFlushed = !!header.manifest
|
|
215
285
|
}
|
|
216
286
|
|
|
217
287
|
async audit () {
|
|
218
|
-
await this.
|
|
288
|
+
await this.state.mutex.lock()
|
|
219
289
|
|
|
220
290
|
try {
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
291
|
+
const tx = this.state.createWriteBatch()
|
|
292
|
+
|
|
293
|
+
// TODO: refactor audit
|
|
294
|
+
const corrections = await audit(this, tx)
|
|
295
|
+
if (corrections.blocks || corrections.tree) {
|
|
296
|
+
await this.state.flushUpdate(tx)
|
|
297
|
+
}
|
|
298
|
+
|
|
224
299
|
return corrections
|
|
225
300
|
} finally {
|
|
226
|
-
|
|
301
|
+
this.state._unlock()
|
|
227
302
|
}
|
|
228
303
|
}
|
|
229
304
|
|
|
230
305
|
async setManifest (manifest) {
|
|
231
|
-
await this.
|
|
306
|
+
await this.state.mutex.lock()
|
|
232
307
|
|
|
233
308
|
try {
|
|
234
309
|
if (manifest && this.header.manifest === null) {
|
|
235
310
|
if (!Verifier.isValidManifest(this.header.key, manifest)) throw INVALID_CHECKSUM('Manifest hash does not match')
|
|
236
|
-
|
|
237
|
-
|
|
311
|
+
|
|
312
|
+
const tx = this.state.createWriteBatch()
|
|
313
|
+
this._setManifest(tx, Verifier.createManifest(manifest), null)
|
|
314
|
+
|
|
315
|
+
if (await this.state.flush(tx)) this.replicator.onupgrade()
|
|
238
316
|
}
|
|
239
317
|
} finally {
|
|
240
|
-
this.
|
|
318
|
+
this.state._unlock()
|
|
241
319
|
}
|
|
242
320
|
}
|
|
243
321
|
|
|
244
|
-
_setManifest (manifest, keyPair) {
|
|
322
|
+
_setManifest (tx, manifest, keyPair) {
|
|
245
323
|
if (!manifest && b4a.equals(keyPair.publicKey, this.header.key)) manifest = Verifier.defaultSignerManifest(this.header.key)
|
|
246
324
|
if (!manifest) return
|
|
247
325
|
|
|
248
|
-
const verifier = new Verifier(this.header.key, manifest, {
|
|
326
|
+
const verifier = new Verifier(this.header.key, manifest, { legacy: this._legacy })
|
|
327
|
+
|
|
328
|
+
if (verifier.prologue) this.state.prologue = Object.assign({}, verifier.prologue)
|
|
249
329
|
|
|
250
|
-
|
|
330
|
+
this.manifest = this.header.manifest = manifest
|
|
331
|
+
|
|
332
|
+
tx.setAuth({
|
|
333
|
+
key: this.header.key,
|
|
334
|
+
discoveryKey: this.discoveryKey,
|
|
335
|
+
manifest,
|
|
336
|
+
keyPair: this.header.keyPair
|
|
337
|
+
// TODO: encryptionKey?
|
|
338
|
+
})
|
|
251
339
|
|
|
252
|
-
this.header.manifest = manifest
|
|
253
340
|
this.compat = verifier.compat
|
|
254
341
|
this.verifier = verifier
|
|
255
342
|
this._manifestFlushed = false
|
|
256
343
|
|
|
257
|
-
this.
|
|
344
|
+
this.replicator.onupgrade()
|
|
345
|
+
this.emitManifest()
|
|
258
346
|
}
|
|
259
347
|
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
if (--this._autoFlush <= 0 || this.oplog.byteLength >= this._maxOplogSize) {
|
|
263
|
-
this._autoFlush = 4
|
|
264
|
-
return true
|
|
265
|
-
}
|
|
266
|
-
|
|
267
|
-
if (!this._manifestFlushed && this.header.manifest) {
|
|
268
|
-
this._manifestFlushed = true
|
|
269
|
-
return true
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
return false
|
|
273
|
-
}
|
|
274
|
-
|
|
275
|
-
async copyPrologue (src, { additional = [] } = {}) {
|
|
276
|
-
await this._mutex.lock()
|
|
348
|
+
async copyPrologue (src) {
|
|
349
|
+
await this.state.mutex.lock()
|
|
277
350
|
|
|
278
351
|
try {
|
|
279
|
-
await src.
|
|
352
|
+
await src.mutex.lock()
|
|
280
353
|
} catch (err) {
|
|
281
|
-
this.
|
|
354
|
+
this.state.mutex.unlock()
|
|
282
355
|
throw err
|
|
283
356
|
}
|
|
284
357
|
|
|
285
358
|
try {
|
|
286
|
-
|
|
287
|
-
if (!prologue) throw INVALID_OPERATION('No prologue present')
|
|
288
|
-
|
|
289
|
-
const srcLength = prologue.length - additional.length
|
|
290
|
-
const srcBatch = srcLength !== src.tree.length ? await src.tree.truncate(srcLength) : src.tree.batch()
|
|
291
|
-
const srcRoots = srcBatch.roots.slice(0)
|
|
292
|
-
const srcByteLength = srcBatch.byteLength
|
|
293
|
-
|
|
294
|
-
for (const blk of additional) srcBatch.append(blk)
|
|
295
|
-
|
|
296
|
-
if (!b4a.equals(srcBatch.hash(), prologue.hash)) throw INVALID_OPERATION('Source tree is conflicting')
|
|
297
|
-
|
|
298
|
-
// all hashes are correct, lets copy
|
|
299
|
-
|
|
300
|
-
const entry = {
|
|
301
|
-
userData: null,
|
|
302
|
-
treeNodes: srcRoots,
|
|
303
|
-
treeUpgrade: null,
|
|
304
|
-
bitfield: null
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
if (additional.length) {
|
|
308
|
-
await this.blocks.putBatch(srcLength, additional, srcByteLength)
|
|
309
|
-
entry.treeNodes = entry.treeNodes.concat(srcBatch.nodes)
|
|
310
|
-
entry.bitfield = {
|
|
311
|
-
drop: false,
|
|
312
|
-
start: srcLength,
|
|
313
|
-
length: additional.length
|
|
314
|
-
}
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
await this.oplog.append([entry], false)
|
|
318
|
-
this.tree.addNodes(entry.treeNodes)
|
|
319
|
-
|
|
320
|
-
if (this.header.tree.length < srcBatch.length) {
|
|
321
|
-
this.header.tree.length = srcBatch.length
|
|
322
|
-
this.header.tree.rootHash = srcBatch.hash()
|
|
323
|
-
|
|
324
|
-
this.tree.length = srcBatch.length
|
|
325
|
-
this.tree.byteLength = srcBatch.byteLength
|
|
326
|
-
this.tree.roots = srcBatch.roots
|
|
327
|
-
this.onupdate(0b0001, null, null, null)
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
if (entry.bitfield) {
|
|
331
|
-
this._setBitfieldRange(entry.bitfield.start, entry.bitfield.length, true)
|
|
332
|
-
this.onupdate(0, entry.bitfield, null, null)
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
await this._flushOplog()
|
|
336
|
-
|
|
337
|
-
// no more additional blocks now and we should be consistant on disk
|
|
338
|
-
// copy over all existing segments...
|
|
339
|
-
|
|
340
|
-
let segmentEnd = 0
|
|
341
|
-
|
|
342
|
-
while (segmentEnd < srcLength) {
|
|
343
|
-
const segmentStart = maximumSegmentStart(segmentEnd, src.bitfield, this.bitfield)
|
|
344
|
-
if (segmentStart >= srcLength || segmentStart < 0) break
|
|
345
|
-
|
|
346
|
-
// max segment is 65536 to avoid running out of memory
|
|
347
|
-
segmentEnd = Math.min(segmentStart + 65536, srcLength, minimumSegmentEnd(segmentStart, src.bitfield, this.bitfield))
|
|
348
|
-
|
|
349
|
-
const treeNodes = await src.tree.getNeededNodes(srcLength, segmentStart, segmentEnd)
|
|
350
|
-
const bitfield = {
|
|
351
|
-
drop: false,
|
|
352
|
-
start: segmentStart,
|
|
353
|
-
length: segmentEnd - segmentStart
|
|
354
|
-
}
|
|
355
|
-
|
|
356
|
-
const segment = []
|
|
357
|
-
for (let i = segmentStart; i < segmentEnd; i++) {
|
|
358
|
-
const blk = await src.blocks.get(i)
|
|
359
|
-
segment.push(blk)
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
const offset = await src.tree.byteOffset(2 * segmentStart)
|
|
363
|
-
await this.blocks.putBatch(segmentStart, segment, offset)
|
|
364
|
-
|
|
365
|
-
const entry = {
|
|
366
|
-
userData: null,
|
|
367
|
-
treeNodes,
|
|
368
|
-
treeUpgrade: null,
|
|
369
|
-
bitfield
|
|
370
|
-
}
|
|
371
|
-
|
|
372
|
-
await this.oplog.append([entry], false)
|
|
373
|
-
this.tree.addNodes(treeNodes)
|
|
374
|
-
this._setBitfieldRange(bitfield.start, bitfield.length, true)
|
|
375
|
-
this.onupdate(0, bitfield, null, null)
|
|
376
|
-
await this._flushOplog()
|
|
377
|
-
}
|
|
378
|
-
|
|
379
|
-
this.header.userData = src.header.userData.slice(0)
|
|
380
|
-
const contig = Math.min(src.header.hints.contiguousLength, srcBatch.length)
|
|
381
|
-
if (this.header.hints.contiguousLength < contig) this.header.hints.contiguousLength = contig
|
|
382
|
-
|
|
383
|
-
await this._flushOplog()
|
|
384
|
-
} finally {
|
|
385
|
-
src._mutex.unlock()
|
|
386
|
-
this._mutex.unlock()
|
|
387
|
-
}
|
|
388
|
-
}
|
|
389
|
-
|
|
390
|
-
async flush () {
|
|
391
|
-
await this._mutex.lock()
|
|
392
|
-
try {
|
|
393
|
-
this._manifestFlushed = true
|
|
394
|
-
this._autoFlush = 4
|
|
395
|
-
await this._flushOplog()
|
|
359
|
+
await copyPrologue(src, this)
|
|
396
360
|
} finally {
|
|
397
|
-
|
|
361
|
+
src.mutex.unlock()
|
|
362
|
+
this.state.mutex.unlock()
|
|
363
|
+
this.checkIfIdle()
|
|
398
364
|
}
|
|
399
365
|
}
|
|
400
366
|
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
// as their mutations are already stored in the oplog. We could potentially just run this in the
|
|
404
|
-
// background. Might be easier to impl that where it is called instead and keep this one simple.
|
|
405
|
-
await this.bitfield.flush()
|
|
406
|
-
await this.tree.flush()
|
|
407
|
-
|
|
408
|
-
return flushHeader(this.oplog, this.bigHeader, this.header)
|
|
409
|
-
}
|
|
410
|
-
|
|
411
|
-
_appendBlocks (values) {
|
|
412
|
-
return this.blocks.putBatch(this.tree.length, values, this.tree.byteLength)
|
|
367
|
+
get isFlushing () {
|
|
368
|
+
return !!(this._flushing || this.state._activeBatch)
|
|
413
369
|
}
|
|
414
370
|
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
await this.blocks.put(index, value, byteOffset)
|
|
371
|
+
flushed () {
|
|
372
|
+
return this.state.flushed()
|
|
418
373
|
}
|
|
419
374
|
|
|
420
|
-
async
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
await this._mutex.lock()
|
|
424
|
-
|
|
425
|
-
try {
|
|
426
|
-
let empty = true
|
|
427
|
-
|
|
428
|
-
for (const u of this.header.userData) {
|
|
429
|
-
if (u.key !== key) continue
|
|
430
|
-
if (value && b4a.equals(u.value, value)) return
|
|
431
|
-
empty = false
|
|
432
|
-
break
|
|
433
|
-
}
|
|
434
|
-
|
|
435
|
-
if (empty && !value) return
|
|
436
|
-
|
|
437
|
-
const entry = {
|
|
438
|
-
userData: { key, value },
|
|
439
|
-
treeNodes: null,
|
|
440
|
-
treeUpgrade: null,
|
|
441
|
-
bitfield: null
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
await this.oplog.append([entry], false)
|
|
445
|
-
|
|
446
|
-
updateUserData(this.header.userData, key, value)
|
|
447
|
-
|
|
448
|
-
if (this._shouldFlush() || flush) await this._flushOplog()
|
|
449
|
-
} finally {
|
|
450
|
-
this._mutex.unlock()
|
|
375
|
+
async _validateCommit (state, treeLength) {
|
|
376
|
+
if (this.state.length > state.length) {
|
|
377
|
+
throw new Error('Invalid commit: partial commit') // TODO: partial commit in the future if possible
|
|
451
378
|
}
|
|
452
|
-
}
|
|
453
|
-
|
|
454
|
-
async truncate (length, fork, { signature, keyPair = this.header.keyPair } = {}) {
|
|
455
|
-
if (this.tree.prologue && length < this.tree.prologue.length) {
|
|
456
|
-
throw INVALID_OPERATION('Truncation breaks prologue')
|
|
457
|
-
}
|
|
458
|
-
|
|
459
|
-
this.truncating++
|
|
460
|
-
await this._mutex.lock()
|
|
461
|
-
|
|
462
|
-
// upsert compat manifest
|
|
463
|
-
if (this.verifier === null && keyPair) this._setManifest(null, keyPair)
|
|
464
379
|
|
|
465
|
-
|
|
466
|
-
const
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
this.truncating--
|
|
471
|
-
this._mutex.unlock()
|
|
472
|
-
}
|
|
473
|
-
}
|
|
474
|
-
|
|
475
|
-
async clearBatch () {
|
|
476
|
-
await this._mutex.lock()
|
|
477
|
-
|
|
478
|
-
try {
|
|
479
|
-
const len = this.bitfield.findFirst(false, this.tree.length)
|
|
480
|
-
if (len <= this.tree.length) return
|
|
481
|
-
|
|
482
|
-
const batch = await this.tree.truncate(this.tree.length, this.tree.fork)
|
|
483
|
-
|
|
484
|
-
batch.signature = this.tree.signature // same sig
|
|
485
|
-
|
|
486
|
-
const entry = {
|
|
487
|
-
userData: null,
|
|
488
|
-
treeNodes: batch.nodes,
|
|
489
|
-
treeUpgrade: batch,
|
|
490
|
-
bitfield: {
|
|
491
|
-
drop: true,
|
|
492
|
-
start: batch.ancestors,
|
|
493
|
-
length: len - batch.ancestors
|
|
380
|
+
if (this.state.length > treeLength) {
|
|
381
|
+
for (const root of this.state.roots) {
|
|
382
|
+
const batchRoot = await state.tree.get(root.index)
|
|
383
|
+
if (batchRoot.size !== root.size || !b4a.equals(batchRoot.hash, root.hash)) {
|
|
384
|
+
throw new Error('Invalid commit: tree conflict')
|
|
494
385
|
}
|
|
495
386
|
}
|
|
496
|
-
|
|
497
|
-
await this.oplog.append([entry], false)
|
|
498
|
-
|
|
499
|
-
this._setBitfieldRange(batch.ancestors, len - batch.ancestors, false)
|
|
500
|
-
batch.commit()
|
|
501
|
-
|
|
502
|
-
// TODO: (see below todo)
|
|
503
|
-
await this._flushOplog()
|
|
504
|
-
} finally {
|
|
505
|
-
this._mutex.unlock()
|
|
506
387
|
}
|
|
507
|
-
}
|
|
508
|
-
|
|
509
|
-
async clear (start, end, cleared) {
|
|
510
|
-
await this._mutex.lock()
|
|
511
|
-
|
|
512
|
-
try {
|
|
513
|
-
const entry = {
|
|
514
|
-
userData: null,
|
|
515
|
-
treeNodes: null,
|
|
516
|
-
treeUpgrade: null,
|
|
517
|
-
bitfield: {
|
|
518
|
-
start,
|
|
519
|
-
length: end - start,
|
|
520
|
-
drop: true
|
|
521
|
-
}
|
|
522
|
-
}
|
|
523
|
-
|
|
524
|
-
await this.oplog.append([entry], false)
|
|
525
|
-
|
|
526
|
-
this._setBitfieldRange(start, end - start, false)
|
|
527
388
|
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
}
|
|
531
|
-
|
|
532
|
-
start = this.bitfield.lastSet(start) + 1
|
|
533
|
-
end = this.bitfield.firstSet(end)
|
|
534
|
-
|
|
535
|
-
if (end === -1) end = this.tree.length
|
|
536
|
-
if (start >= end || start >= this.tree.length) return
|
|
537
|
-
|
|
538
|
-
const offset = await this.tree.byteOffset(start * 2)
|
|
539
|
-
const endOffset = await this.tree.byteOffset(end * 2)
|
|
540
|
-
const length = endOffset - offset
|
|
541
|
-
|
|
542
|
-
const before = cleared ? await Info.bytesUsed(this.blocks.storage) : null
|
|
543
|
-
|
|
544
|
-
await this.blocks.clear(offset, length)
|
|
545
|
-
|
|
546
|
-
const after = cleared ? await Info.bytesUsed(this.blocks.storage) : null
|
|
547
|
-
|
|
548
|
-
if (cleared) cleared.blocks = Math.max(before - after, 0)
|
|
549
|
-
|
|
550
|
-
this.onupdate(0, entry.bitfield, null, null)
|
|
551
|
-
|
|
552
|
-
if (this._shouldFlush()) await this._flushOplog()
|
|
553
|
-
} finally {
|
|
554
|
-
this._mutex.unlock()
|
|
555
|
-
}
|
|
556
|
-
}
|
|
557
|
-
|
|
558
|
-
async purge () {
|
|
559
|
-
return new Promise((resolve, reject) => {
|
|
560
|
-
let missing = 4
|
|
561
|
-
let error = null
|
|
562
|
-
|
|
563
|
-
this.oplog.storage.unlink(done)
|
|
564
|
-
this.tree.storage.unlink(done)
|
|
565
|
-
this.bitfield.storage.unlink(done)
|
|
566
|
-
this.blocks.storage.unlink(done)
|
|
567
|
-
|
|
568
|
-
function done (err) {
|
|
569
|
-
if (err) error = err
|
|
570
|
-
if (--missing) return
|
|
571
|
-
if (error) reject(error)
|
|
572
|
-
else resolve()
|
|
573
|
-
}
|
|
574
|
-
})
|
|
575
|
-
}
|
|
576
|
-
|
|
577
|
-
async insertBatch (batch, values, { signature, keyPair = this.header.keyPair, pending = false, treeLength = batch.treeLength } = {}) {
|
|
578
|
-
await this._mutex.lock()
|
|
579
|
-
|
|
580
|
-
try {
|
|
581
|
-
// upsert compat manifest
|
|
582
|
-
if (this.verifier === null && keyPair) this._setManifest(null, keyPair)
|
|
583
|
-
|
|
584
|
-
if (this.tree.fork !== batch.fork) return null
|
|
585
|
-
|
|
586
|
-
if (this.tree.length > batch.treeLength) {
|
|
587
|
-
if (this.tree.length > batch.length) return null // TODO: partial commit in the future if possible
|
|
588
|
-
|
|
589
|
-
for (const root of this.tree.roots) {
|
|
590
|
-
const batchRoot = await batch.get(root.index)
|
|
591
|
-
if (batchRoot.size !== root.size || !b4a.equals(batchRoot.hash, root.hash)) {
|
|
592
|
-
return null
|
|
593
|
-
}
|
|
594
|
-
}
|
|
595
|
-
}
|
|
596
|
-
|
|
597
|
-
const adding = batch.length - treeLength
|
|
598
|
-
|
|
599
|
-
batch.upgraded = !pending && batch.length > this.tree.length
|
|
600
|
-
batch.treeLength = this.tree.length
|
|
601
|
-
batch.ancestors = this.tree.length
|
|
602
|
-
if (batch.upgraded && !pending) batch.signature = signature || this.verifier.sign(batch, keyPair)
|
|
603
|
-
|
|
604
|
-
let byteOffset = batch.byteLength
|
|
605
|
-
for (let i = 0; i < adding; i++) byteOffset -= values[i].byteLength
|
|
606
|
-
|
|
607
|
-
if (pending === true) batch.upgraded = false
|
|
608
|
-
|
|
609
|
-
const entry = {
|
|
610
|
-
userData: null,
|
|
611
|
-
treeNodes: batch.nodes,
|
|
612
|
-
treeUpgrade: batch.upgraded ? batch : null,
|
|
613
|
-
bitfield: {
|
|
614
|
-
drop: false,
|
|
615
|
-
start: treeLength,
|
|
616
|
-
length: adding
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
|
|
620
|
-
await this.blocks.putBatch(treeLength, adding < values.length ? values.slice(0, adding) : values, byteOffset)
|
|
621
|
-
await this.oplog.append([entry], false)
|
|
622
|
-
|
|
623
|
-
this._setBitfieldRange(entry.bitfield.start, entry.bitfield.length, true)
|
|
624
|
-
batch.commit()
|
|
625
|
-
|
|
626
|
-
if (batch.upgraded) {
|
|
627
|
-
this.header.tree.length = batch.length
|
|
628
|
-
this.header.tree.rootHash = batch.hash()
|
|
629
|
-
this.header.tree.signature = batch.signature
|
|
630
|
-
}
|
|
631
|
-
|
|
632
|
-
const status = (batch.upgraded ? 0b0001 : 0) | updateContig(this.header, entry.bitfield, this.bitfield)
|
|
633
|
-
if (!pending) {
|
|
634
|
-
// we already commit this, and now we signed it, so tell others
|
|
635
|
-
if (entry.treeUpgrade && treeLength > batch.treeLength) {
|
|
636
|
-
entry.bitfield.start = batch.treeLength
|
|
637
|
-
entry.bitfield.length = treeLength - batch.treeLength
|
|
638
|
-
}
|
|
639
|
-
|
|
640
|
-
this.onupdate(status, entry.bitfield, null, null)
|
|
641
|
-
}
|
|
642
|
-
|
|
643
|
-
if (this._shouldFlush()) await this._flushOplog()
|
|
644
|
-
} finally {
|
|
645
|
-
this._mutex.unlock()
|
|
646
|
-
}
|
|
647
|
-
|
|
648
|
-
return { length: batch.length, byteLength: batch.byteLength }
|
|
649
|
-
}
|
|
650
|
-
|
|
651
|
-
async append (values, { signature, keyPair = this.header.keyPair, preappend } = {}) {
|
|
652
|
-
await this._mutex.lock()
|
|
653
|
-
|
|
654
|
-
try {
|
|
655
|
-
// upsert compat manifest
|
|
656
|
-
if (this.verifier === null && keyPair) this._setManifest(null, keyPair)
|
|
657
|
-
|
|
658
|
-
if (preappend) await preappend(values)
|
|
659
|
-
|
|
660
|
-
if (!values.length) {
|
|
661
|
-
return { length: this.tree.length, byteLength: this.tree.byteLength }
|
|
662
|
-
}
|
|
663
|
-
|
|
664
|
-
const batch = this.tree.batch()
|
|
665
|
-
for (const val of values) batch.append(val)
|
|
666
|
-
|
|
667
|
-
// only multisig can have prologue so signature is always present
|
|
668
|
-
if (this.tree.prologue && batch.length < this.tree.prologue.length) {
|
|
669
|
-
throw INVALID_OPERATION('Append is not consistent with prologue')
|
|
670
|
-
}
|
|
671
|
-
|
|
672
|
-
batch.signature = signature || this.verifier.sign(batch, keyPair)
|
|
673
|
-
|
|
674
|
-
const entry = {
|
|
675
|
-
userData: null,
|
|
676
|
-
treeNodes: batch.nodes,
|
|
677
|
-
treeUpgrade: batch,
|
|
678
|
-
bitfield: {
|
|
679
|
-
drop: false,
|
|
680
|
-
start: batch.ancestors,
|
|
681
|
-
length: values.length
|
|
682
|
-
}
|
|
683
|
-
}
|
|
684
|
-
|
|
685
|
-
const byteLength = await this._appendBlocks(values)
|
|
686
|
-
|
|
687
|
-
await this.oplog.append([entry], false)
|
|
688
|
-
|
|
689
|
-
this._setBitfieldRange(batch.ancestors, batch.length - batch.ancestors, true)
|
|
690
|
-
batch.commit()
|
|
691
|
-
|
|
692
|
-
this.header.tree.length = batch.length
|
|
693
|
-
this.header.tree.rootHash = batch.hash()
|
|
694
|
-
this.header.tree.signature = batch.signature
|
|
695
|
-
|
|
696
|
-
const status = 0b0001 | updateContig(this.header, entry.bitfield, this.bitfield)
|
|
697
|
-
this.onupdate(status, entry.bitfield, null, null)
|
|
698
|
-
|
|
699
|
-
if (this._shouldFlush()) await this._flushOplog()
|
|
700
|
-
|
|
701
|
-
return { length: batch.length, byteLength }
|
|
702
|
-
} finally {
|
|
703
|
-
this._mutex.unlock()
|
|
389
|
+
if (this.verifier === null) {
|
|
390
|
+
throw INVALID_OPERATION('Cannot commit without manifest') // easier to assert than upsert
|
|
704
391
|
}
|
|
705
392
|
}
|
|
706
393
|
|
|
@@ -715,61 +402,23 @@ module.exports = class Core {
|
|
|
715
402
|
|
|
716
403
|
manifest = Verifier.createManifest(manifest) // To unslab
|
|
717
404
|
|
|
718
|
-
const verifier = this.verifier || new Verifier(this.header.key, manifest, {
|
|
405
|
+
const verifier = this.verifier || new Verifier(this.header.key, manifest, { legacy: this._legacy })
|
|
719
406
|
|
|
720
407
|
if (!verifier.verify(batch, batch.signature)) {
|
|
721
408
|
throw INVALID_SIGNATURE('Proof contains an invalid signature')
|
|
722
409
|
}
|
|
723
|
-
|
|
724
|
-
if (!this.header.manifest) {
|
|
725
|
-
this.header.manifest = manifest
|
|
726
|
-
this.compat = verifier.compat
|
|
727
|
-
this.verifier = verifier
|
|
728
|
-
this.onupdate(0b10000, null, null, null)
|
|
729
|
-
}
|
|
730
410
|
}
|
|
731
411
|
|
|
732
|
-
async _verifyExclusive ({ batch, bitfield, value, manifest
|
|
412
|
+
async _verifyExclusive ({ batch, bitfield, value, manifest }) {
|
|
733
413
|
this._verifyBatchUpgrade(batch, manifest)
|
|
414
|
+
if (!batch.commitable()) return false
|
|
734
415
|
|
|
735
|
-
await this.
|
|
736
|
-
|
|
737
|
-
try {
|
|
738
|
-
if (!batch.commitable()) return false
|
|
739
|
-
this.updating = true
|
|
740
|
-
|
|
741
|
-
const entry = {
|
|
742
|
-
userData: null,
|
|
743
|
-
treeNodes: batch.nodes,
|
|
744
|
-
treeUpgrade: batch,
|
|
745
|
-
bitfield
|
|
746
|
-
}
|
|
747
|
-
|
|
748
|
-
if (this.preupdate !== null) await this.preupdate(batch, this.header.key)
|
|
749
|
-
if (bitfield) await this._writeBlock(batch, bitfield.start, value)
|
|
750
|
-
|
|
751
|
-
await this.oplog.append([entry], false)
|
|
752
|
-
|
|
753
|
-
let status = 0b0001
|
|
754
|
-
|
|
755
|
-
if (bitfield) {
|
|
756
|
-
this._setBitfield(bitfield.start, true)
|
|
757
|
-
status |= updateContig(this.header, bitfield, this.bitfield)
|
|
758
|
-
}
|
|
759
|
-
|
|
760
|
-
batch.commit()
|
|
761
|
-
|
|
762
|
-
this.header.tree.fork = batch.fork
|
|
763
|
-
this.header.tree.length = batch.length
|
|
764
|
-
this.header.tree.rootHash = batch.hash()
|
|
765
|
-
this.header.tree.signature = batch.signature
|
|
416
|
+
if (this.preupdate !== null) await this.preupdate(batch, this.header.key)
|
|
766
417
|
|
|
767
|
-
|
|
418
|
+
await this.state._verifyBlock(batch, bitfield, value, this.header.manifest ? null : manifest)
|
|
768
419
|
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
this.updating = false
|
|
772
|
-
this._mutex.unlock()
|
|
420
|
+
if (!batch.upgraded && bitfield) {
|
|
421
|
+
this.replicator.onhave(bitfield.start, bitfield.length, bitfield.drop)
|
|
773
422
|
}
|
|
774
423
|
|
|
775
424
|
return true
|
|
@@ -778,73 +427,76 @@ module.exports = class Core {
|
|
|
778
427
|
async _verifyShared () {
|
|
779
428
|
if (!this._verifies.length) return false
|
|
780
429
|
|
|
781
|
-
await this.
|
|
430
|
+
await this.state.mutex.lock()
|
|
431
|
+
|
|
432
|
+
const tx = this.state.createWriteBatch()
|
|
782
433
|
|
|
783
434
|
const verifies = this._verifies
|
|
784
435
|
this._verifies = null
|
|
785
436
|
this._verified = null
|
|
786
437
|
|
|
787
438
|
try {
|
|
788
|
-
const entries = []
|
|
789
|
-
|
|
790
439
|
for (const { batch, bitfield, value } of verifies) {
|
|
791
440
|
if (!batch.commitable()) continue
|
|
792
441
|
|
|
793
442
|
if (bitfield) {
|
|
794
|
-
|
|
443
|
+
tx.putBlock(bitfield.start, value)
|
|
795
444
|
}
|
|
796
|
-
|
|
797
|
-
entries.push({
|
|
798
|
-
userData: null,
|
|
799
|
-
treeNodes: batch.nodes,
|
|
800
|
-
treeUpgrade: null,
|
|
801
|
-
bitfield
|
|
802
|
-
})
|
|
803
445
|
}
|
|
804
446
|
|
|
805
|
-
|
|
447
|
+
const bits = new BitInterlude()
|
|
806
448
|
|
|
807
449
|
for (let i = 0; i < verifies.length; i++) {
|
|
808
|
-
const { batch, bitfield,
|
|
450
|
+
const { batch, bitfield, manifest } = verifies[i]
|
|
809
451
|
|
|
810
452
|
if (!batch.commitable()) {
|
|
811
453
|
verifies[i] = null // signal that we cannot commit this one
|
|
812
454
|
continue
|
|
813
455
|
}
|
|
814
456
|
|
|
815
|
-
let status = 0
|
|
816
|
-
|
|
817
457
|
if (bitfield) {
|
|
818
|
-
|
|
819
|
-
status = updateContig(this.header, bitfield, this.bitfield)
|
|
458
|
+
bits.setRange(bitfield.start, bitfield.start + 1, true)
|
|
820
459
|
}
|
|
821
460
|
|
|
822
461
|
// if we got a manifest AND its strictly a non compat one, lets store it
|
|
823
462
|
if (manifest && this.header.manifest === null) {
|
|
824
463
|
if (!Verifier.isValidManifest(this.header.key, manifest)) throw INVALID_CHECKSUM('Manifest hash does not match')
|
|
825
|
-
this._setManifest(manifest, null)
|
|
464
|
+
this._setManifest(tx, manifest, null)
|
|
826
465
|
}
|
|
827
466
|
|
|
828
|
-
batch.commit()
|
|
467
|
+
if (batch.commitable()) batch.commit(tx)
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
const ranges = bits.flush(tx, this.bitfield)
|
|
471
|
+
|
|
472
|
+
await this.state.flush(tx)
|
|
829
473
|
|
|
830
|
-
|
|
474
|
+
for (const { start, end, value } of ranges) {
|
|
475
|
+
this._setBitfieldRanges(start, end, value)
|
|
831
476
|
}
|
|
832
477
|
|
|
833
|
-
|
|
478
|
+
for (let i = 0; i < verifies.length; i++) {
|
|
479
|
+
const bitfield = verifies[i] && verifies[i].bitfield
|
|
480
|
+
if (bitfield) {
|
|
481
|
+
this.replicator.onhave(bitfield.start, bitfield.length, bitfield.drop)
|
|
482
|
+
this.updateContiguousLength(bitfield)
|
|
483
|
+
}
|
|
484
|
+
}
|
|
834
485
|
} finally {
|
|
835
|
-
this.
|
|
486
|
+
this.state._clearActiveBatch()
|
|
487
|
+
this.state.mutex.unlock()
|
|
836
488
|
}
|
|
837
489
|
|
|
838
490
|
return verifies[0] !== null
|
|
839
491
|
}
|
|
840
492
|
|
|
841
493
|
async checkConflict (proof, from) {
|
|
842
|
-
if (this.
|
|
494
|
+
if (this.state.length < proof.upgrade.length || proof.fork !== this.state.fork) {
|
|
843
495
|
// out of date this proof - ignore for now
|
|
844
496
|
return false
|
|
845
497
|
}
|
|
846
498
|
|
|
847
|
-
const batch = this.tree.verifyFullyRemote(proof)
|
|
499
|
+
const batch = this.tree.verifyFullyRemote(proof, this.state)
|
|
848
500
|
|
|
849
501
|
try {
|
|
850
502
|
this._verifyBatchUpgrade(batch, proof.manifest)
|
|
@@ -852,20 +504,32 @@ module.exports = class Core {
|
|
|
852
504
|
return true
|
|
853
505
|
}
|
|
854
506
|
|
|
855
|
-
|
|
856
|
-
|
|
507
|
+
await this.state.mutex.lock()
|
|
508
|
+
|
|
509
|
+
try {
|
|
510
|
+
const tx = this.state.createWriteBatch()
|
|
511
|
+
if (this.header.manifest === null && proof.manifest) {
|
|
512
|
+
this._setManifest(tx, proof.manifest, null)
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
await this.state.flush(tx)
|
|
516
|
+
} finally {
|
|
517
|
+
this.state.mutex.unlock()
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
const remoteTreeHash = crypto.tree(proof.upgrade.nodes)
|
|
521
|
+
const localTreeHash = crypto.tree(await this.tree.getRoots(proof.upgrade.length))
|
|
857
522
|
|
|
858
523
|
if (b4a.equals(localTreeHash, remoteTreeHash)) return false
|
|
859
524
|
|
|
860
|
-
await this.
|
|
525
|
+
await this._onconflict(proof)
|
|
861
526
|
return true
|
|
862
527
|
}
|
|
863
528
|
|
|
864
529
|
async verifyReorg (proof) {
|
|
865
|
-
const batch =
|
|
866
|
-
|
|
530
|
+
const batch = new ReorgBatch(this.tree, this.state)
|
|
531
|
+
await this.tree.reorg(proof, batch)
|
|
867
532
|
this._verifyBatchUpgrade(batch, proof.manifest)
|
|
868
|
-
|
|
869
533
|
return batch
|
|
870
534
|
}
|
|
871
535
|
|
|
@@ -873,9 +537,9 @@ module.exports = class Core {
|
|
|
873
537
|
// We cannot apply "other forks" atm.
|
|
874
538
|
// We should probably still try and they are likely super similar for non upgrades
|
|
875
539
|
// but this is easy atm (and the above layer will just retry)
|
|
876
|
-
if (proof.fork !== this.
|
|
540
|
+
if (proof.fork !== this.state.fork) return false
|
|
877
541
|
|
|
878
|
-
const batch = await this.tree.verify(proof)
|
|
542
|
+
const batch = await this.tree.verify(proof, this.state)
|
|
879
543
|
if (!batch.commitable()) return false
|
|
880
544
|
|
|
881
545
|
const value = (proof.block && proof.block.value) || null
|
|
@@ -887,7 +551,9 @@ module.exports = class Core {
|
|
|
887
551
|
from
|
|
888
552
|
}
|
|
889
553
|
|
|
890
|
-
if (batch.upgraded)
|
|
554
|
+
if (batch.upgraded) {
|
|
555
|
+
return this._verifyExclusive(op)
|
|
556
|
+
}
|
|
891
557
|
|
|
892
558
|
if (this._verifies !== null) {
|
|
893
559
|
const verifies = this._verifies
|
|
@@ -898,102 +564,147 @@ module.exports = class Core {
|
|
|
898
564
|
|
|
899
565
|
this._verifies = [op]
|
|
900
566
|
this._verified = this._verifyShared()
|
|
567
|
+
|
|
901
568
|
return this._verified
|
|
902
569
|
}
|
|
903
570
|
|
|
904
|
-
async reorg (batch
|
|
571
|
+
async reorg (batch) {
|
|
905
572
|
if (!batch.commitable()) return false
|
|
906
573
|
|
|
907
574
|
this.truncating++
|
|
908
|
-
await this._mutex.lock()
|
|
909
575
|
|
|
910
576
|
try {
|
|
911
|
-
|
|
912
|
-
await this._truncate(batch, from)
|
|
577
|
+
await this.state.reorg(batch)
|
|
913
578
|
} finally {
|
|
914
579
|
this.truncating--
|
|
915
|
-
this._mutex.unlock()
|
|
916
580
|
}
|
|
917
581
|
|
|
918
582
|
return true
|
|
919
583
|
}
|
|
920
584
|
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
585
|
+
openSkipBitfield () {
|
|
586
|
+
if (this.skipBitfield !== null) return this.skipBitfield
|
|
587
|
+
this.skipBitfield = new RemoteBitfield()
|
|
588
|
+
const buf = this.bitfield.toBuffer(this.state.length)
|
|
589
|
+
const bitfield = new Uint32Array(buf.buffer, buf.byteOffset, buf.byteLength / 4)
|
|
590
|
+
this.skipBitfield.insert(0, bitfield)
|
|
591
|
+
return this.skipBitfield
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
_setBitfieldRanges (start, end, value) {
|
|
595
|
+
this.bitfield.setRange(start, end, value)
|
|
596
|
+
if (this.skipBitfield !== null) this.skipBitfield.setRange(start, end, value)
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
close () {
|
|
600
|
+
if (!this.closing) this.closing = this._close()
|
|
601
|
+
return this.closing
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
updateContiguousLength (bitfield) {
|
|
605
|
+
const contig = updateContigBatch(this.header.hints.contiguousLength, bitfield, this.bitfield)
|
|
606
|
+
|
|
607
|
+
if (contig.length !== -1 && contig.length !== this.header.hints.contiguousLength) {
|
|
608
|
+
this.header.hints.contiguousLength = contig.length
|
|
931
609
|
}
|
|
610
|
+
}
|
|
932
611
|
|
|
933
|
-
|
|
612
|
+
onappend (tree, bitfield) {
|
|
613
|
+
this.header.tree = tree
|
|
934
614
|
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
615
|
+
if (!bitfield) {
|
|
616
|
+
this.replicator.onupgrade()
|
|
617
|
+
return
|
|
618
|
+
}
|
|
938
619
|
|
|
939
|
-
|
|
940
|
-
const status = ((batch.length > batch.ancestors) ? 0b0011 : 0b0010) | contigStatus
|
|
620
|
+
this.replicator.cork()
|
|
941
621
|
|
|
942
|
-
|
|
943
|
-
this.header.tree.length = batch.length
|
|
944
|
-
this.header.tree.rootHash = batch.hash()
|
|
945
|
-
this.header.tree.signature = batch.signature
|
|
622
|
+
const { start, length, drop } = bitfield
|
|
946
623
|
|
|
947
|
-
this.
|
|
624
|
+
this._setBitfieldRanges(start, start + length, true)
|
|
625
|
+
this.updateContiguousLength({ start, length, drop: false })
|
|
948
626
|
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
// the later flush here as well
|
|
953
|
-
// if (this._shouldFlush()) await this._flushOplog()
|
|
954
|
-
await this._flushOplog()
|
|
627
|
+
this.replicator.onupgrade()
|
|
628
|
+
this.replicator.onhave(start, length, drop)
|
|
629
|
+
this.replicator.uncork()
|
|
955
630
|
}
|
|
956
631
|
|
|
957
|
-
|
|
958
|
-
if (this.
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
this.
|
|
963
|
-
|
|
632
|
+
ontruncate (tree, { start, length }) {
|
|
633
|
+
if (tree) this.header.tree = tree
|
|
634
|
+
|
|
635
|
+
this.replicator.cork()
|
|
636
|
+
|
|
637
|
+
this.replicator.ontruncate(start, length)
|
|
638
|
+
this.replicator.onhave(start, length, true)
|
|
639
|
+
this.replicator.onupgrade()
|
|
640
|
+
this.replicator.uncork()
|
|
641
|
+
|
|
642
|
+
for (const sessionState of this.sessionStates) {
|
|
643
|
+
if (start < sessionState.snapshotCompatLength) sessionState.snapshotCompatLength = start
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
this._setBitfieldRanges(start, start + length, false)
|
|
647
|
+
this.updateContiguousLength({ start, length, drop: true })
|
|
964
648
|
}
|
|
965
649
|
|
|
966
|
-
|
|
967
|
-
this.
|
|
968
|
-
|
|
650
|
+
async _onconflict (proof, from) {
|
|
651
|
+
await this.replicator.onconflict(from)
|
|
652
|
+
|
|
653
|
+
for (let i = this.monitors.length - 1; i >= 0; i--) {
|
|
654
|
+
const s = this.monitors[i]
|
|
655
|
+
s.emit('conflict', proof.upgrade.length, proof.fork, proof)
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
const err = new Error('Two conflicting signatures exist for length ' + proof.upgrade.length)
|
|
659
|
+
await this.closeAllSessions(err)
|
|
969
660
|
}
|
|
970
661
|
|
|
971
|
-
|
|
972
|
-
this.
|
|
973
|
-
|
|
662
|
+
async closeAllSessions (err) {
|
|
663
|
+
// this.sessions modifies itself when a session closes
|
|
664
|
+
// This way we ensure we indeed iterate over all sessions
|
|
665
|
+
const sessions = this.allSessions()
|
|
666
|
+
|
|
667
|
+
const all = []
|
|
668
|
+
for (const s of sessions) all.push(s.close({ error: err, force: false })) // force false or else infinite recursion
|
|
669
|
+
await Promise.allSettled(all)
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
async destroy () {
|
|
673
|
+
if (this.destroyed === true) return
|
|
674
|
+
this.destroyed = true
|
|
675
|
+
|
|
676
|
+
if (this.hasSession() === true) throw new Error('Cannot destroy while sessions are open')
|
|
677
|
+
|
|
678
|
+
const weakSessions = this.allSessions()
|
|
679
|
+
|
|
680
|
+
if (this.replicator) this.replicator.destroy()
|
|
681
|
+
if (this.state) await this.state.close()
|
|
682
|
+
|
|
683
|
+
// close all pending weak sessions...
|
|
684
|
+
for (const s of weakSessions) s.close().catch(noop)
|
|
974
685
|
}
|
|
975
686
|
|
|
976
|
-
async
|
|
687
|
+
async _close () {
|
|
688
|
+
if (this.opened === false) await this.opening
|
|
689
|
+
if (this.hasSession() === true) throw new Error('Cannot close while sessions are open')
|
|
690
|
+
|
|
691
|
+
if (this.replicator) await this.replicator.close()
|
|
692
|
+
|
|
693
|
+
await this.destroy()
|
|
694
|
+
if (this.autoClose) await this.storage.store.close()
|
|
695
|
+
|
|
977
696
|
this.closed = true
|
|
978
|
-
await this._mutex.destroy()
|
|
979
|
-
await Promise.allSettled([
|
|
980
|
-
this.oplog.close(),
|
|
981
|
-
this.bitfield.close(),
|
|
982
|
-
this.tree.close(),
|
|
983
|
-
this.blocks.close(),
|
|
984
|
-
this.bigHeader.close()
|
|
985
|
-
])
|
|
986
697
|
}
|
|
987
698
|
}
|
|
988
699
|
|
|
989
|
-
function
|
|
700
|
+
function updateContigBatch (start, upd, bitfield) {
|
|
990
701
|
const end = upd.start + upd.length
|
|
991
702
|
|
|
992
|
-
let c =
|
|
703
|
+
let c = start
|
|
993
704
|
|
|
994
705
|
if (upd.drop) {
|
|
995
706
|
// If we dropped a block in the current contig range, "downgrade" it
|
|
996
|
-
if (c
|
|
707
|
+
if (c > upd.start) {
|
|
997
708
|
c = upd.start
|
|
998
709
|
}
|
|
999
710
|
} else {
|
|
@@ -1003,104 +714,61 @@ function updateContig (header, upd, bitfield) {
|
|
|
1003
714
|
}
|
|
1004
715
|
}
|
|
1005
716
|
|
|
1006
|
-
if (c ===
|
|
1007
|
-
return
|
|
717
|
+
if (c === start) {
|
|
718
|
+
return {
|
|
719
|
+
length: -1
|
|
720
|
+
}
|
|
1008
721
|
}
|
|
1009
722
|
|
|
1010
|
-
if (c >
|
|
1011
|
-
|
|
1012
|
-
|
|
723
|
+
if (c > start) {
|
|
724
|
+
return {
|
|
725
|
+
length: c
|
|
726
|
+
}
|
|
1013
727
|
}
|
|
1014
728
|
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
}
|
|
1018
|
-
|
|
1019
|
-
function addReorgHint (list, tree, batch) {
|
|
1020
|
-
if (tree.length === 0 || tree.fork === batch.fork) return
|
|
1021
|
-
|
|
1022
|
-
while (list.length >= 4) list.shift() // 4 here is arbitrary, just want it to be small (hints only)
|
|
1023
|
-
while (list.length > 0) {
|
|
1024
|
-
if (list[list.length - 1].ancestors > batch.ancestors) list.pop()
|
|
1025
|
-
else break
|
|
729
|
+
return {
|
|
730
|
+
length: c
|
|
1026
731
|
}
|
|
1027
|
-
|
|
1028
|
-
list.push({ from: tree.fork, to: batch.fork, ancestors: batch.ancestors })
|
|
1029
732
|
}
|
|
1030
733
|
|
|
1031
|
-
function
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
else list.splice(i, 1)
|
|
1038
|
-
return
|
|
1039
|
-
}
|
|
734
|
+
function getDefaultTree () {
|
|
735
|
+
return {
|
|
736
|
+
fork: 0,
|
|
737
|
+
length: 0,
|
|
738
|
+
rootHash: null,
|
|
739
|
+
signature: null
|
|
1040
740
|
}
|
|
1041
|
-
if (value) list.push({ key, value })
|
|
1042
741
|
}
|
|
1043
742
|
|
|
1044
|
-
function
|
|
1045
|
-
|
|
1046
|
-
let error = null
|
|
1047
|
-
|
|
1048
|
-
return new Promise((resolve, reject) => {
|
|
1049
|
-
for (const s of storages) {
|
|
1050
|
-
missing++
|
|
1051
|
-
s.close(done)
|
|
1052
|
-
}
|
|
1053
|
-
|
|
1054
|
-
done(null)
|
|
743
|
+
function parseHeader (info) {
|
|
744
|
+
if (!info) return null
|
|
1055
745
|
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
746
|
+
return {
|
|
747
|
+
key: info.key,
|
|
748
|
+
manifest: info.manifest,
|
|
749
|
+
external: null,
|
|
750
|
+
keyPair: info.keyPair,
|
|
751
|
+
userData: [],
|
|
752
|
+
tree: info.head || getDefaultTree(),
|
|
753
|
+
hints: {
|
|
754
|
+
reorgs: [],
|
|
755
|
+
contiguousLength: 0
|
|
1061
756
|
}
|
|
1062
|
-
})
|
|
1063
|
-
}
|
|
1064
|
-
|
|
1065
|
-
async function flushHeader (oplog, bigHeader, header) {
|
|
1066
|
-
if (header.external) {
|
|
1067
|
-
await bigHeader.flush(header)
|
|
1068
|
-
}
|
|
1069
|
-
|
|
1070
|
-
try {
|
|
1071
|
-
await oplog.flush(header)
|
|
1072
|
-
} catch (err) {
|
|
1073
|
-
if (err.code !== 'OPLOG_HEADER_OVERFLOW') throw err
|
|
1074
|
-
await bigHeader.flush(header)
|
|
1075
|
-
await oplog.flush(header)
|
|
1076
757
|
}
|
|
1077
758
|
}
|
|
1078
759
|
|
|
1079
760
|
function noop () {}
|
|
1080
761
|
|
|
1081
|
-
function
|
|
1082
|
-
|
|
1083
|
-
const a = src.firstSet(start)
|
|
1084
|
-
const b = dst.firstUnset(start)
|
|
762
|
+
async function getCoreInfo (storage) {
|
|
763
|
+
const r = storage.read()
|
|
1085
764
|
|
|
1086
|
-
|
|
1087
|
-
|
|
765
|
+
const auth = r.getAuth()
|
|
766
|
+
const head = r.getHead()
|
|
1088
767
|
|
|
1089
|
-
|
|
1090
|
-
if (a < b) {
|
|
1091
|
-
start = b
|
|
1092
|
-
continue
|
|
1093
|
-
}
|
|
768
|
+
r.tryFlush()
|
|
1094
769
|
|
|
1095
|
-
|
|
770
|
+
return {
|
|
771
|
+
...await auth,
|
|
772
|
+
head: await head
|
|
1096
773
|
}
|
|
1097
774
|
}
|
|
1098
|
-
|
|
1099
|
-
function minimumSegmentEnd (start, src, dst) {
|
|
1100
|
-
const a = src.firstUnset(start)
|
|
1101
|
-
const b = dst.firstSet(start)
|
|
1102
|
-
|
|
1103
|
-
if (a === -1) return -1
|
|
1104
|
-
if (b === -1) return a
|
|
1105
|
-
return a < b ? a : b
|
|
1106
|
-
}
|