hyperbee2 2.7.0 → 2.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -7,16 +7,525 @@ npm install hyperbee2
7
7
  Next major version for [hyperbee](https://github.com/holepunchto/hyperbee).
8
8
  Will be merged in there and released a new major when fully done.
9
9
 
10
+ An append-only B-tree on top of a [Hypercore][hypercore].
11
+
10
12
  ## Usage
11
13
 
12
14
  ```js
13
- const Hyperbee2 = require('hyperbee2')
15
+ import Hyperbee2 from 'hyperbee2'
16
+ import Corestore from 'corestore'
17
+
18
+ // Create/open tree
19
+ const tree = new Hyperbee2(new Corestore('./mystore'))
20
+
21
+ // Wait for tree to become available
22
+ await tree.ready()
14
23
 
15
- const db = new Hyperbee2(store)
24
+ // Write some values to tree
25
+ const batch = tree.write()
26
+ batch.tryPut(Buffer.from('name'), Buffer.from('example'))
27
+ batch.tryPut(Buffer.from('email'), Buffer.from('example@example.com'))
28
+ await batch.flush()
16
29
 
17
- // see tests for more
30
+ // Read values from tree
31
+ const name = await tree.get(Buffer.from('name'))
32
+ const email = await tree.get(Buffer.from('email'))
33
+
34
+ // Print values
35
+ console.log(name.value.toString()) // example
36
+ console.log(email.value.toString()) // example@example.com
18
37
  ```
19
38
 
20
39
  ## License
21
40
 
22
41
  Apache-2.0
42
+
43
+ ## API
44
+
45
+ ### Hyperbee
46
+
47
+ #### `const db = new Hyperbee(store, [options])`
48
+
49
+ Make a new Hyperbee2 instance. The `store` argument expects a
50
+ [Corestore][corestore].
51
+
52
+ Options include:
53
+
54
+ ```js
55
+ {
56
+ key: null, // Buffer or String. Key of Hypercore to load via Corestore
57
+ maxCacheSize: 4096, // Max number of nodes to keep in NodeCache
58
+ core: Hypercore(...), // Hypercore within the Corestore to use (defaults to loading key, or using name='bee')
59
+ view: false, // Is this a view of an open Hyperbee? (i.e. do not close underlying store)
60
+ writable: true, // Is append / truncate allowed on the underlying Hypercore?
61
+ unbatch: 0, // Number of write batches to rollback during bootstrap
62
+ autoUpdate: false, // Reload root node when underlying Hypercore is appended to?
63
+ preload: null, // A function called by ready() after the Hypercore is ready. Can be async.
64
+ wait: true, // Wait for Hypercore to download blocks
65
+ }
66
+ ```
67
+
68
+ #### `await db.ready()`
69
+
70
+ Ensures the underlying [Hypercore][hypercore] is ready and prepares the Hyperbee
71
+ for use. If `autoUpdate` was set to true in the constructor's options,
72
+ this will start watching the [Hypercore][hypercore] for new writes.
73
+
74
+ Calling `get()` will call this automatically for you.
75
+
76
+ #### `await db.close()`
77
+
78
+ Fully close the Hyperbee. If it is not a view on another Hyperbee,
79
+ this will also close it's [Hypercore][hypercore].
80
+
81
+ #### `db.head()`
82
+
83
+ Returns an object with the following properties:
84
+
85
+ ```
86
+ {
87
+ length, // Number of blocks from the start of Hypercore that apply to this tree.
88
+ key, // Buffer or null. The key of the underlying Hypercore.
89
+ }
90
+ ```
91
+
92
+ If the Hyperbee is not ready, this will return null.
93
+
94
+ #### `db.cache`
95
+
96
+ Read only. The NodeCache used by this Hyperbee.
97
+
98
+ #### `db.core`
99
+
100
+ Read only. The [Hypercore][hypercore] used by this Hyperbee.
101
+
102
+ #### `db.opening`
103
+
104
+ Read only. A Promise that resolves to `undefined` once the [Corestore][corestore]
105
+ is ready.
106
+
107
+ #### `db.closing`
108
+
109
+ Read only. Initially null. When `db.close()` is called, this is set to
110
+ a Promise that resolves to `undefined` when the close completes.
111
+
112
+ #### `db.opened`
113
+
114
+ Read only. Boolean indicating whether the [Corestore][corestore] has opened.
115
+
116
+ #### `db.closed`
117
+
118
+ Read only. Boolean indicating whether the [Corestore][corestore] has closed.
119
+
120
+ #### `db.replicate()`
121
+
122
+ Calls `replicate` on underlying [Corestore][corestore].
123
+
124
+ #### `db.checkout([options])`
125
+
126
+ Returns a new Hyperbee as a view on the underlying [Hypercore][hypercore].
127
+
128
+ Options:
129
+
130
+ ```js
131
+ {
132
+ writable: false, // Boolean. Will the new tree be writable?
133
+ length: this.core.length, // Number. Length of blocks used from the Hypercore
134
+ key: null, // Buffer or null. Key of the Hypercore
135
+ }
136
+ ```
137
+
138
+ #### `db.move([options])`
139
+
140
+ Replaces the root of the tree with a new entry, potentially using a new
141
+ [Hypercore][hypercore].
142
+
143
+ Options:
144
+
145
+ ```js
146
+ {
147
+ writable: this.writable, // Boolean. Is this tree writable after the move?
148
+ length: this.core.length, // Number. Length of blocks used from the Hypercore
149
+ key: null, // Buffer or null. Key of the Hypercore
150
+ }
151
+ ```
152
+
153
+ #### `db.snapshot()`
154
+
155
+ Returns a new Hyperbee that is a read only view of the current tree.
156
+
157
+ #### `db.undo(n)`
158
+
159
+ Returns a new Hyperbee that is a writable view of the current tree,
160
+ with the last `n` write batches ignored.
161
+
162
+ #### `db.write([options])`
163
+
164
+ Returns a [WriteBatch](#writebatch) object through which the tree can be updated.
165
+
166
+ Options:
167
+
168
+ ```js
169
+ {
170
+ length: -1, // Length of blocks used from the Hypercore
171
+ // (i.e. what point in the hypercore is the write
172
+ // going to extend when flush() is called?).
173
+ // If -1, the length will be calculated on flush()
174
+ // using root.seq + 1.
175
+ key: null, // Buffer or null. Key of the Hypercore.
176
+ autoUpdate: true, // Boolean .Should Hyperbee automatically reflect updates
177
+ // after each flush()?
178
+ compat: false, // Boolean. Write blocks compatible with Hyperbee 1?
179
+ type: encoding.TYPE_LATEST, // Number. Block format to use.
180
+ inlineValueSize: 1024, // Values smaller than this byte length are
181
+ // written inline in the node. Larger values
182
+ // are referenced via a pointer into the block.
183
+ preferredBlockSize: 4096, // Try to write blocks of approximately this size
184
+ // when flushing updates.
185
+ }
186
+ ```
187
+
188
+ Errors:
189
+
190
+ - If the tree is not writable this will throw an Error
191
+
192
+ #### `db.createReadStream([options])`
193
+
194
+ Returns a [streamx][streamx] Readable Stream. This is async iterable.
195
+
196
+ Options:
197
+
198
+ ```js
199
+ {
200
+ prefetch: true, // Prefetch future blocks after yielding an entry?
201
+ reverse: false, // Iterate backwards over keys?
202
+ limit: -1, // Max number of entries to yield (-1 means no limit)
203
+ gte: undefined, // Buffer. Key lower bound (inclusive)
204
+ gt: undefined, // Buffer. Key lower bound (exclusive)
205
+ lte: undefined, // Buffer. Key upper bound (inclusive)
206
+ lt: undefined, // Buffer. Key upper bound (exclusive)
207
+ highWaterMark: 16384, // Size of read ahead buffer calculated
208
+ // as: number of entries * 1024
209
+ }
210
+ ```
211
+
212
+ Iterating over the stream will yield the following properties:
213
+
214
+ ```js
215
+ {
216
+ core, // Hypercore: the hypercore the entry is stored in
217
+ offset, // Number: the index of the entry in the block
218
+ seq, // Number: the sequence number of the block in the hypercore
219
+ key, // Buffer: the key of the entry
220
+ value, // Buffer: the value of the entry
221
+ }
222
+ ```
223
+
224
+ Example:
225
+
226
+ ```js
227
+ for await (const data of b.createReadStream()) {
228
+ console.log(data.key, '-->', data.value)
229
+ }
230
+ ```
231
+
232
+ #### `db.createDiffStream(right, [options])`
233
+
234
+ Returns a [streamx][streamx] Readable Stream that provides
235
+ synchronized iteration over two trees. This is async iterable.
236
+
237
+ Options: Same as options for `createReadStream()`.
238
+
239
+ Iterating over the stream will yield the same properties as
240
+ `createReadStream()` split into left (`db` in this case) and right
241
+ (`right` parameter).
242
+
243
+ ```js
244
+ {
245
+ left: {core, offset, seq, key, value}, // see createReadStream()
246
+ right: {core, offset, seq, key, value}, // see createReadStream()
247
+ }
248
+ ```
249
+
250
+ Example:
251
+
252
+ ```js
253
+ import Hyperbee from './index.js'
254
+ import Corestore from 'corestore'
255
+
256
+ const b1 = new Hyperbee(new Corestore('./store1'))
257
+ const b2 = new Hyperbee(new Corestore('./store2'))
258
+
259
+ await b1.ready()
260
+ await b2.ready()
261
+
262
+ const w1 = b1.write()
263
+ w1.tryPut(Buffer.from('A'), Buffer.from('A'))
264
+ w1.tryPut(Buffer.from('B'), Buffer.from('B'))
265
+ w1.tryPut(Buffer.from('C'), Buffer.from('C'))
266
+ await w1.flush()
267
+
268
+ const w2 = b2.write()
269
+ w2.tryPut(Buffer.from('A'), Buffer.from('A'))
270
+ w2.tryPut(Buffer.from('C'), Buffer.from('C'))
271
+ w2.tryPut(Buffer.from('E'), Buffer.from('E'))
272
+ await w2.flush()
273
+
274
+ for await (const data of b1.createDiffStream(b2)) {
275
+ console.log(data.left?.key.toString(), data.right?.key.toString())
276
+ }
277
+ ```
278
+
279
+ Example output:
280
+
281
+ ```js
282
+ A A
283
+ B undefined
284
+ C C
285
+ undefined E
286
+ ```
287
+
288
+ #### `db.createChangesStream([options])`
289
+
290
+ Returns a [streamx][streamx] Readable Stream for iterating over all
291
+ batches previously written to the tree. This is async iterable.
292
+
293
+ Options:
294
+
295
+ ```js
296
+ {
297
+ head: null, // null means use this.tree.head()
298
+ highWaterMark: 16384, // Size of read ahead buffer calculated
299
+ // as: number of entries * 1024
300
+ timeout: 0, // Wait at most this many milliseconds (0 means no timeout).
301
+ // Defaults to the value of the Hyperbee's timeout option.
302
+ wait: true, // Wait for Hypercore to download blocks
303
+ // Defaults to the value of the Hyperbee's wait option.
304
+ }
305
+ ```
306
+
307
+ Iterating over the stream will yield:
308
+
309
+ ```js
310
+ {
311
+ head: {
312
+ length, // Number: number of blocks from the start of Hypercore
313
+ // that apply to this version of the tree
314
+ key, // Buffer or null: the key of the Hypercore for this version
315
+ },
316
+ tail: {
317
+ length, // Number: number of blocks from the start of Hypercore
318
+ // that apply to the previous version of the tree
319
+ key, // Buffer or null: the key of the Hypercore for the previous
320
+ // version
321
+ },
322
+ batch: [ // Blocks written in this batch
323
+ {tree, keys, values, cohorts, metadata, ...},
324
+ ...
325
+ ],
326
+ }
327
+ ```
328
+
329
+ #### `await db.peek([range])`
330
+
331
+ Attempts to get the first entry within the given range.
332
+
333
+ Returns `null` if no entry exists in the range, or an object with
334
+ the following properties on success:
335
+
336
+ ```js
337
+ {
338
+ core, // Hypercore: the hypercore the entry is stored in
339
+ offset, // Number: the index of the entry in the block
340
+ seq, // Number: the sequence number of the block in the hypercore
341
+ key, // Buffer: the key of the entry
342
+ value, // Buffer: the value of the entry
343
+ }
344
+ ```
345
+
346
+ The `range` argument accepts the same properties as the options for
347
+ [`createReadStream()`](#dbcreatereadstreamoptions).
348
+
349
+ #### `await db.download([range])`
350
+
351
+ Fetches all entries in the given range. The promise resolves once
352
+ all matching entries have been fetched.
353
+
354
+ The `range` argument accepts the same properties as the options for
355
+ [`createReadStream()`](#dbcreatereadstreamoptions).
356
+
357
+ #### `await db.get(key, [options])`
358
+
359
+ Attempt to find an entry by its key.
360
+
361
+ Returns `null` if no entry exists in the range, or an object with
362
+ the following properties on success:
363
+
364
+ ```js
365
+ {
366
+ core, // Hypercore: the hypercore the entry is stored in
367
+ offset, // Number: the index of the entry in the block
368
+ seq, // Number: the sequence number of the block in the hypercore
369
+ key, // Buffer: the key of the entry
370
+ value, // Buffer: the value of the entry
371
+ }
372
+ ```
373
+
374
+ Options:
375
+
376
+ ```js
377
+ {
378
+ timeout, // Number: wait at most this many milliseconds (0 means no timeout)
379
+ wait, // Boolean: wait for Hypercore to download blocks
380
+ }
381
+ ```
382
+
383
+ #### `b.on('ready', listener)`
384
+
385
+ Emitted once the Hyperbee is ready for use.
386
+
387
+ ##### `b.on('update', listener)`
388
+
389
+ Emitted in the following scenarios:
390
+
391
+ - When a `WriteBatch` is flushed and its `autoUpdate` option is `true`
392
+ (the default).
393
+ - When the underlying core is appended to (locally or remotely) and the
394
+ `Hyperbee`'s `autoUpdate` option is `true`.
395
+ - After a `move()` call on the `Hyperbee`.
396
+ - After a rollback completes because the `unbatch` option to the
397
+ `Hyperbee` constructor was greater than `0`.
398
+
399
+ Warning: this event may be emitted multiple times for the same
400
+ update if `autoUpdate` is set on the `Hyperbee` and on `WriteBatch`:
401
+
402
+ ```js
403
+ const b = new Hyperbee(store, { autoUpdate: true })
404
+ b.on('update', () => console.log('New update'))
405
+
406
+ const w = b.write(/* autoUpdate is true by default */)
407
+ w.tryPut(Buffer.from('key'), Buffer.from('value'))
408
+ await w.flush()
409
+
410
+ // The following will be printed:
411
+ // New udpate
412
+ // New udpate
413
+ ```
414
+
415
+ ##### `b.on('close', listener)`
416
+
417
+ Emitted one the `close()` method completes.
418
+
419
+ ### WriteBatch
420
+
421
+ A WriteBatch can be constructed via Hyperbee's [write()](#dbwriteoptions)
422
+ method. Using a batch, multiple updates can be queued then applied together.
423
+
424
+ ```js
425
+ const batch = b.write();
426
+ batch.tryPut(Buffer.from('key'), Buffer.from('value');
427
+ await batch.flush();
428
+ ```
429
+
430
+ **Warning:** WriteBatch does not hold an exclusive lock on the database
431
+ while queuing operations unless you call `lock()`. By default, the
432
+ lock is only acquired when the operations are flushed. So be careful
433
+ about building concurrent batches:
434
+
435
+ ```js
436
+ import Hyperbee from './index.js'
437
+ import Corestore from 'corestore'
438
+
439
+ const b = new Hyperbee(new Corestore('./store'))
440
+ await b.ready()
441
+
442
+ const w1 = b.write()
443
+ w1.tryPut(Buffer.from('name'), Buffer.from('Sneezy'))
444
+
445
+ const w2 = b.write()
446
+ w2.tryPut(Buffer.from('name'), Buffer.from('Sleepy'))
447
+ w2.tryPut(Buffer.from('email'), Buffer.from('sleepy@example.com'))
448
+
449
+ // Be careful about application order vs. order the operations
450
+ // were queued when building batches concurrently.
451
+ await w2.flush()
452
+ await w1.flush()
453
+
454
+ for await (const data of b.createReadStream(b)) {
455
+ console.log(data.key.toString(), '-->', data.value.toString())
456
+ }
457
+
458
+ // Output: (email and name mismatch)
459
+ //
460
+ // email --> sleepy@example.com
461
+ // name --> Sneezy
462
+ ```
463
+
464
+ ### `await batch.lock()`
465
+
466
+ Aquires an exclusive write lock now instead of waiting for `flush()`
467
+ to be called. No writes will occur until this batch is closed allowing
468
+ you to keep a consistent view of the database while building the batch.
469
+
470
+ #### `batch.tryPut(key, value)`
471
+
472
+ Queues an operation to associate `key` with `value`. Any existing entry
473
+ for `key` will be overwritten.
474
+
475
+ #### `batch.tryDelete(key)`
476
+
477
+ Queues an operation to remove the entry with `key`, if it exists. If it
478
+ does not exist, this method does nothing (and will not throw).
479
+
480
+ #### `batch.tryClear()`
481
+
482
+ Queues an operation to clear all entries from the tree.
483
+
484
+ #### `await batch.flush()`
485
+
486
+ Aquires an exclusive write lock and applies the operations queued in this
487
+ batch to the tree, clearing the queue.
488
+
489
+ **Warning:** continuing to use the batch after flushing can cause unpredictable
490
+ behavior. Batches applied after the first flush will be 'unapplied' if you
491
+ flush again later. This can lead to accidentally removing data from the tree.
492
+
493
+ ```js
494
+ import Hyperbee from './index.js'
495
+ import Corestore from 'corestore'
496
+
497
+ const b = new Hyperbee(new Corestore('./store'))
498
+ await b.ready()
499
+
500
+ const w1 = b.write()
501
+ w1.tryPut(Buffer.from('name'), Buffer.from('Sneezy'))
502
+
503
+ const w2 = b.write()
504
+ w2.tryPut(Buffer.from('name'), Buffer.from('Sleepy'))
505
+ w2.tryPut(Buffer.from('email'), Buffer.from('sleepy@example.com'))
506
+
507
+ await w1.flush()
508
+ await w2.flush()
509
+
510
+ // Warning: flushing w1 again will unapply w2!
511
+ w1.tryPut(Buffer.from('active'), Buffer.from('false'))
512
+ await w1.flush()
513
+
514
+ for await (const data of b.createReadStream(b)) {
515
+ console.log(data.key.toString(), '-->', data.value.toString())
516
+ }
517
+
518
+ // Output: (name has reverted to 'Sneezy', email is missing)
519
+ //
520
+ // active --> false
521
+ // name --> Sneezy
522
+ ```
523
+
524
+ #### `batch.close()`
525
+
526
+ Closes the batch without flushing operations. Subsequent attempts
527
+ to flush the batch will result in an error.
528
+
529
+ [hypercore]: https://github.com/holepunchto/hypercore
530
+ [corestore]: https://github.com/holepunchto/corestore
531
+ [streamx]: https://github.com/mafintosh/streamx
package/index.js CHANGED
@@ -12,27 +12,37 @@ const { Pointer, KeyPointer, ValuePointer, TreeNode, EMPTY } = require('./lib/tr
12
12
  const { DeltaOp, DeltaCohort, OP_COHORT } = require('./lib/compression.js')
13
13
 
14
14
  class Hyperbee extends EventEmitter {
15
- constructor(store, options = {}) {
15
+ constructor(store, opts = {}) {
16
16
  super()
17
17
 
18
18
  const {
19
19
  t = 128, // legacy number for now, should be 128 now
20
20
  key = null,
21
21
  encryption = null,
22
+ getEncryptionProvider = toEncryptionProvider(encryption),
22
23
  maxCacheSize = 4096,
23
24
  config = new SessionConfig([], 0, true),
24
25
  activeRequests = config.activeRequests,
25
26
  timeout = config.timeout,
26
27
  wait = config.wait,
27
- core = key ? store.get(key) : store.get({ key, name: 'bee', encryption }),
28
- context = new CoreContext(store, core, new NodeCache(maxCacheSize), core, encryption, t),
28
+ core = key
29
+ ? store.get({ key, encryption: getEncryptionProvider(key) })
30
+ : store.get({ key, name: 'bee', encryption: getEncryptionProvider(key) }),
31
+ context = new CoreContext(
32
+ store,
33
+ core,
34
+ new NodeCache(maxCacheSize),
35
+ core,
36
+ getEncryptionProvider,
37
+ t
38
+ ),
29
39
  root = null,
30
40
  view = false,
31
41
  writable = true,
32
42
  unbatch = 0,
33
43
  autoUpdate = false,
34
44
  preload = null
35
- } = options
45
+ } = opts
36
46
 
37
47
  this.store = store
38
48
  this.root = root
@@ -94,7 +104,9 @@ class Hyperbee extends EventEmitter {
94
104
  root,
95
105
  view: true,
96
106
  writable,
97
- unbatch
107
+ unbatch,
108
+ autoUpdate: false,
109
+ preload: this.preload
98
110
  })
99
111
  }
100
112
 
@@ -423,3 +435,8 @@ async function inflateChildCohort(context, d, ptr, block, config) {
423
435
  const p = new Pointer(context, co.core, co.seq, co.offset)
424
436
  return new DeltaCohort(false, p, deltas)
425
437
  }
438
+
439
+ function toEncryptionProvider(encryption) {
440
+ if (encryption) return (key) => encryption
441
+ return () => null
442
+ }
package/lib/context.js CHANGED
@@ -5,7 +5,16 @@ const { TreeNodePointer } = require('./tree.js')
5
5
  const { decodeBlock } = require('./encoding.js')
6
6
 
7
7
  class CoreContext {
8
- constructor(store, local, cache, core, encryption, t, lock = new ScopeLock(), other = new Map()) {
8
+ constructor(
9
+ store,
10
+ local,
11
+ cache,
12
+ core,
13
+ getEncryptionProvider,
14
+ t,
15
+ lock = new ScopeLock(),
16
+ other = new Map()
17
+ ) {
9
18
  this.store = store
10
19
  this.local = local
11
20
  this.cache = cache
@@ -13,7 +22,7 @@ class CoreContext {
13
22
  this.t = t
14
23
  this.minKeys = t - 1
15
24
  this.maxKeys = 2 * t - 1
16
- this.encryption = encryption
25
+ this.getEncryptionProvider = getEncryptionProvider
17
26
  this.lock = lock
18
27
  this.other = other
19
28
  this.length = 0
@@ -132,7 +141,7 @@ class CoreContext {
132
141
  if (index > this.cores.length) throw new Error('Bad core index: ' + index)
133
142
  if (this.opened[index - 1] === null) {
134
143
  const key = this.cores[index - 1].key
135
- this.opened[index - 1] = this.store.get({ key, encryption: this.encryption })
144
+ this.opened[index - 1] = this.store.get({ key, encryption: this.getEncryptionProvider(key) })
136
145
  }
137
146
  return this.opened[index - 1]
138
147
  }
@@ -166,7 +175,9 @@ class CoreContext {
166
175
  const hex = b4a.toString(key, 'hex')
167
176
  if (this.other.has(hex)) return this.other.get(hex)
168
177
 
169
- const ctx = this._createContext(this.store.get({ key, encryption: this.encryption }))
178
+ const ctx = this._createContext(
179
+ this.store.get({ key, encryption: this.getEncryptionProvider(key) })
180
+ )
170
181
  this.other.set(hex, ctx)
171
182
  return ctx
172
183
  }
@@ -195,11 +206,11 @@ class CoreContext {
195
206
  const store = this.store
196
207
  const local = this.local
197
208
  const cache = this.cache
198
- const encryption = this.encryption
209
+ const getEncryptionProvider = this.getEncryptionProvider
199
210
  const t = this.t
200
211
  const lock = this.lock
201
212
  const other = this.other
202
- return new CoreContext(store, local, cache, core, encryption, t, lock, other)
213
+ return new CoreContext(store, local, cache, core, getEncryptionProvider, t, lock, other)
203
214
  }
204
215
  }
205
216
 
package/lib/ranges.js CHANGED
@@ -119,6 +119,7 @@ class RangeIterator {
119
119
  // TODO: dbl check this for off-by-ones with the offset and keys and children
120
120
  let limit = this.limit
121
121
 
122
+ // TODO: if limit === -1, don't return early here
122
123
  if (limit < this.tree.context.minKeys) return
123
124
 
124
125
  const parent = this.stack[this.stack.length - 1]
@@ -128,17 +129,21 @@ class RangeIterator {
128
129
  this.prefetching = pv
129
130
 
130
131
  for (let i = parent.offset >> 1; i < pv.children.length; i++) {
131
- const k = pv.keys.get(i)
132
+ // If the preceding key in parent is beyond upper bound,
133
+ // stop fetching child nodes.
134
+ if (i > 0) {
135
+ const k = pv.keys.get(i - 1)
132
136
 
133
- const cmp = this.reverse
134
- ? this.start
135
- ? b4a.compare(this.start, k.key)
136
- : -1
137
- : this.end
138
- ? b4a.compare(k.key, this.end)
139
- : -1
137
+ const cmp = this.reverse
138
+ ? this.start
139
+ ? b4a.compare(this.start, k.key)
140
+ : -1
141
+ : this.end
142
+ ? b4a.compare(k.key, this.end)
143
+ : -1
140
144
 
141
- if (cmp > this.compare) break
145
+ if (cmp > this.compare) return
146
+ }
142
147
 
143
148
  const c = pv.children.get(i)
144
149
  if (!c.value) this.tree.inflate(c, this.config).catch(noop)
package/lib/write.js CHANGED
@@ -33,6 +33,7 @@ module.exports = class WriteBatch {
33
33
  this.length = length
34
34
  this.key = key
35
35
  this.type = type
36
+ this.hasLock = false
36
37
  this.closed = false
37
38
  this.applied = 0
38
39
  this.root = null
@@ -62,9 +63,20 @@ module.exports = class WriteBatch {
62
63
  return root ? root.seq + 1 : 0
63
64
  }
64
65
 
66
+ async lock() {
67
+ if (this.hasLock) return
68
+ await this.tree.context.lock.lock()
69
+ this.hasLock = true
70
+ }
71
+
72
+ _unlock() {
73
+ if (!this.hasLock) return
74
+ this.tree.context.lock.unlock()
75
+ this.hasLock = false
76
+ }
77
+
65
78
  async flush() {
66
- const lock = this.tree.context.lock
67
- await lock.lock()
79
+ await this.lock()
68
80
 
69
81
  try {
70
82
  const ops = this.ops
@@ -94,7 +106,7 @@ module.exports = class WriteBatch {
94
106
  this.tree.update(this.root)
95
107
  }
96
108
  } finally {
97
- lock.unlock()
109
+ this._unlock()
98
110
  }
99
111
  }
100
112
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hyperbee2",
3
- "version": "2.7.0",
3
+ "version": "2.8.0",
4
4
  "description": "Scalable P2P BTree",
5
5
  "main": "index.js",
6
6
  "files": [