@nxtedition/rocksdb 7.0.3 → 7.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. package/binding.cc +320 -324
  2. package/chained-batch.js +6 -1
  3. package/deps/rocksdb/rocksdb/CMakeLists.txt +8 -3
  4. package/deps/rocksdb/rocksdb/Makefile +10 -4
  5. package/deps/rocksdb/rocksdb/TARGETS +6 -4
  6. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +9 -0
  7. package/deps/rocksdb/rocksdb/cache/cache_test.cc +14 -0
  8. package/deps/rocksdb/rocksdb/cache/clock_cache.cc +8 -8
  9. package/deps/rocksdb/rocksdb/cache/fast_lru_cache.cc +272 -174
  10. package/deps/rocksdb/rocksdb/cache/fast_lru_cache.h +201 -57
  11. package/deps/rocksdb/rocksdb/cache/lru_cache.cc +19 -19
  12. package/deps/rocksdb/rocksdb/cache/lru_cache.h +2 -1
  13. package/deps/rocksdb/rocksdb/db/blob/blob_source.cc +170 -0
  14. package/deps/rocksdb/rocksdb/db/blob/blob_source.h +95 -0
  15. package/deps/rocksdb/rocksdb/db/blob/blob_source_test.cc +298 -0
  16. package/deps/rocksdb/rocksdb/db/blob/db_blob_basic_test.cc +172 -0
  17. package/deps/rocksdb/rocksdb/db/column_family.cc +8 -3
  18. package/deps/rocksdb/rocksdb/db/column_family.h +6 -3
  19. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +10 -0
  20. package/deps/rocksdb/rocksdb/db/compaction/compaction_job_test.cc +6 -6
  21. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_level.cc +22 -2
  22. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_test.cc +38 -0
  23. package/deps/rocksdb/rocksdb/db/db_basic_test.cc +17 -5
  24. package/deps/rocksdb/rocksdb/db/db_block_cache_test.cc +4 -7
  25. package/deps/rocksdb/rocksdb/db/db_bloom_filter_test.cc +74 -71
  26. package/deps/rocksdb/rocksdb/db/db_compaction_test.cc +70 -1
  27. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +13 -12
  28. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +36 -0
  29. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_compaction_flush.cc +11 -4
  30. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_files.cc +1 -1
  31. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +139 -91
  32. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_write.cc +48 -14
  33. package/deps/rocksdb/rocksdb/db/db_kv_checksum_test.cc +90 -55
  34. package/deps/rocksdb/rocksdb/db/db_rate_limiter_test.cc +9 -4
  35. package/deps/rocksdb/rocksdb/db/db_test.cc +3 -1
  36. package/deps/rocksdb/rocksdb/db/db_wal_test.cc +12 -7
  37. package/deps/rocksdb/rocksdb/db/db_write_test.cc +35 -0
  38. package/deps/rocksdb/rocksdb/db/dbformat.cc +3 -1
  39. package/deps/rocksdb/rocksdb/db/dbformat.h +5 -3
  40. package/deps/rocksdb/rocksdb/db/flush_job_test.cc +1 -1
  41. package/deps/rocksdb/rocksdb/db/memtable.cc +1 -0
  42. package/deps/rocksdb/rocksdb/db/memtable_list_test.cc +4 -2
  43. package/deps/rocksdb/rocksdb/db/repair.cc +1 -1
  44. package/deps/rocksdb/rocksdb/db/version_builder.cc +43 -1
  45. package/deps/rocksdb/rocksdb/db/version_edit.cc +13 -5
  46. package/deps/rocksdb/rocksdb/db/version_edit.h +22 -1
  47. package/deps/rocksdb/rocksdb/db/version_edit_handler.cc +4 -5
  48. package/deps/rocksdb/rocksdb/db/version_set.cc +109 -41
  49. package/deps/rocksdb/rocksdb/db/version_set.h +36 -3
  50. package/deps/rocksdb/rocksdb/db/version_set_sync_and_async.h +1 -4
  51. package/deps/rocksdb/rocksdb/db/version_set_test.cc +10 -10
  52. package/deps/rocksdb/rocksdb/db/version_util.h +1 -1
  53. package/deps/rocksdb/rocksdb/db/wal_manager_test.cc +1 -1
  54. package/deps/rocksdb/rocksdb/db/write_batch.cc +34 -10
  55. package/deps/rocksdb/rocksdb/db/write_batch_internal.h +2 -0
  56. package/deps/rocksdb/rocksdb/db/write_callback_test.cc +4 -0
  57. package/deps/rocksdb/rocksdb/db_stress_tool/batched_ops_stress.cc +2 -0
  58. package/deps/rocksdb/rocksdb/db_stress_tool/cf_consistency_stress.cc +4 -1
  59. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +1 -1
  60. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +7 -5
  61. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +5 -10
  62. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_tool.cc +0 -7
  63. package/deps/rocksdb/rocksdb/db_stress_tool/no_batched_ops_stress.cc +2 -0
  64. package/deps/rocksdb/rocksdb/file/random_access_file_reader.cc +24 -3
  65. package/deps/rocksdb/rocksdb/file/writable_file_writer.cc +8 -0
  66. package/deps/rocksdb/rocksdb/file/writable_file_writer.h +10 -0
  67. package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +5 -0
  68. package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +4 -4
  69. package/deps/rocksdb/rocksdb/include/rocksdb/options.h +9 -5
  70. package/deps/rocksdb/rocksdb/include/rocksdb/statistics.h +5 -0
  71. package/deps/rocksdb/rocksdb/include/rocksdb/types.h +1 -0
  72. package/deps/rocksdb/rocksdb/include/rocksdb/utilities/write_batch_with_index.h +1 -1
  73. package/deps/rocksdb/rocksdb/include/rocksdb/version.h +1 -1
  74. package/deps/rocksdb/rocksdb/include/rocksdb/write_batch.h +0 -3
  75. package/deps/rocksdb/rocksdb/microbench/ribbon_bench.cc +8 -6
  76. package/deps/rocksdb/rocksdb/monitoring/statistics.cc +3 -1
  77. package/deps/rocksdb/rocksdb/options/options_helper.cc +4 -2
  78. package/deps/rocksdb/rocksdb/options/options_test.cc +1 -11
  79. package/deps/rocksdb/rocksdb/port/port_posix.h +7 -0
  80. package/deps/rocksdb/rocksdb/port/win/port_win.h +11 -3
  81. package/deps/rocksdb/rocksdb/src.mk +6 -2
  82. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.cc +4 -33
  83. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_iterator.h +3 -3
  84. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +38 -118
  85. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +6 -8
  86. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_sync_and_async.h +10 -13
  87. package/deps/rocksdb/rocksdb/table/block_based/block_like_traits.h +4 -9
  88. package/deps/rocksdb/rocksdb/table/block_based/block_type.h +0 -1
  89. package/deps/rocksdb/rocksdb/table/block_based/filter_block.h +10 -28
  90. package/deps/rocksdb/rocksdb/table/block_based/filter_block_reader_common.cc +2 -3
  91. package/deps/rocksdb/rocksdb/table/block_based/filter_policy.cc +0 -91
  92. package/deps/rocksdb/rocksdb/table/block_based/filter_policy_internal.h +2 -30
  93. package/deps/rocksdb/rocksdb/table/block_based/full_filter_block.cc +6 -27
  94. package/deps/rocksdb/rocksdb/table/block_based/full_filter_block.h +11 -13
  95. package/deps/rocksdb/rocksdb/table/block_based/full_filter_block_test.cc +28 -40
  96. package/deps/rocksdb/rocksdb/table/block_based/mock_block_based_table.h +0 -1
  97. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block.cc +22 -43
  98. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block.h +11 -22
  99. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block_test.cc +24 -25
  100. package/deps/rocksdb/rocksdb/table/block_fetcher.cc +0 -1
  101. package/deps/rocksdb/rocksdb/table/get_context.h +0 -1
  102. package/deps/rocksdb/rocksdb/table/table_test.cc +3 -18
  103. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +3 -16
  104. package/deps/rocksdb/rocksdb/tools/ldb_cmd.cc +3 -3
  105. package/deps/rocksdb/rocksdb/tools/ldb_cmd_test.cc +1 -1
  106. package/deps/rocksdb/rocksdb/util/bloom_test.cc +0 -201
  107. package/deps/rocksdb/rocksdb/util/distributed_mutex.h +48 -0
  108. package/deps/rocksdb/rocksdb/util/filter_bench.cc +5 -11
  109. package/deps/rocksdb/rocksdb/utilities/backup/backup_engine.cc +3 -0
  110. package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.cc +7 -21
  111. package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.h +1 -1
  112. package/deps/rocksdb/rocksdb/utilities/checkpoint/checkpoint_test.cc +45 -0
  113. package/deps/rocksdb/rocksdb/utilities/transactions/pessimistic_transaction_db.h +21 -14
  114. package/deps/rocksdb/rocksdb/utilities/transactions/transaction_base.cc +10 -1
  115. package/deps/rocksdb/rocksdb/utilities/transactions/write_prepared_txn.cc +3 -1
  116. package/deps/rocksdb/rocksdb/utilities/transactions/write_prepared_txn_db.cc +9 -0
  117. package/deps/rocksdb/rocksdb/utilities/transactions/write_unprepared_txn.cc +3 -2
  118. package/deps/rocksdb/rocksdb/utilities/transactions/write_unprepared_txn_db.cc +3 -1
  119. package/deps/rocksdb/rocksdb/utilities/write_batch_with_index/write_batch_with_index.cc +5 -4
  120. package/deps/rocksdb/rocksdb.gyp +1 -1
  121. package/index.js +36 -14
  122. package/package-lock.json +2 -2
  123. package/package.json +1 -1
  124. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  125. package/prebuilds/linux-x64/node.napi.node +0 -0
  126. package/deps/rocksdb/rocksdb/table/block_based/block_based_filter_block.cc +0 -358
  127. package/deps/rocksdb/rocksdb/table/block_based/block_based_filter_block.h +0 -127
  128. package/deps/rocksdb/rocksdb/table/block_based/block_based_filter_block_test.cc +0 -219
@@ -71,20 +71,27 @@ class PessimisticTransactionDB : public TransactionDB {
71
71
  virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override;
72
72
  inline Status WriteWithConcurrencyControl(const WriteOptions& opts,
73
73
  WriteBatch* updates) {
74
- // Need to lock all keys in this batch to prevent write conflicts with
75
- // concurrent transactions.
76
- Transaction* txn = BeginInternalTransaction(opts);
77
- txn->DisableIndexing();
78
-
79
- auto txn_impl = static_cast_with_check<PessimisticTransaction>(txn);
80
-
81
- // Since commitBatch sorts the keys before locking, concurrent Write()
82
- // operations will not cause a deadlock.
83
- // In order to avoid a deadlock with a concurrent Transaction, Transactions
84
- // should use a lock timeout.
85
- Status s = txn_impl->CommitBatch(updates);
86
-
87
- delete txn;
74
+ Status s;
75
+ if (opts.protection_bytes_per_key > 0) {
76
+ s = WriteBatchInternal::UpdateProtectionInfo(
77
+ updates, opts.protection_bytes_per_key);
78
+ }
79
+ if (s.ok()) {
80
+ // Need to lock all keys in this batch to prevent write conflicts with
81
+ // concurrent transactions.
82
+ Transaction* txn = BeginInternalTransaction(opts);
83
+ txn->DisableIndexing();
84
+
85
+ auto txn_impl = static_cast_with_check<PessimisticTransaction>(txn);
86
+
87
+ // Since commitBatch sorts the keys before locking, concurrent Write()
88
+ // operations will not cause a deadlock.
89
+ // In order to avoid a deadlock with a concurrent Transaction,
90
+ // Transactions should use a lock timeout.
91
+ s = txn_impl->CommitBatch(updates);
92
+
93
+ delete txn;
94
+ }
88
95
 
89
96
  return s;
90
97
  }
@@ -67,8 +67,11 @@ TransactionBaseImpl::TransactionBaseImpl(
67
67
  cmp_(GetColumnFamilyUserComparator(db->DefaultColumnFamily())),
68
68
  lock_tracker_factory_(lock_tracker_factory),
69
69
  start_time_(dbimpl_->GetSystemClock()->NowMicros()),
70
- write_batch_(cmp_, 0, true, 0),
70
+ write_batch_(cmp_, 0, true, 0, write_options.protection_bytes_per_key),
71
71
  tracked_locks_(lock_tracker_factory_.Create()),
72
+ commit_time_batch_(0 /* reserved_bytes */, 0 /* max_bytes */,
73
+ write_options.protection_bytes_per_key,
74
+ 0 /* default_cf_ts_sz */),
72
75
  indexing_enabled_(true) {
73
76
  assert(dynamic_cast<DBImpl*>(db_) != nullptr);
74
77
  log_number_ = 0;
@@ -108,6 +111,12 @@ void TransactionBaseImpl::Reinitialize(DB* db,
108
111
  start_time_ = dbimpl_->GetSystemClock()->NowMicros();
109
112
  indexing_enabled_ = true;
110
113
  cmp_ = GetColumnFamilyUserComparator(db_->DefaultColumnFamily());
114
+ WriteBatchInternal::UpdateProtectionInfo(
115
+ write_batch_.GetWriteBatch(), write_options_.protection_bytes_per_key)
116
+ .PermitUncheckedError();
117
+ WriteBatchInternal::UpdateProtectionInfo(
118
+ &commit_time_batch_, write_options_.protection_bytes_per_key)
119
+ .PermitUncheckedError();
111
120
  }
112
121
 
113
122
  void TransactionBaseImpl::SetSnapshot() {
@@ -267,7 +267,9 @@ Status WritePreparedTxn::RollbackInternal() {
267
267
  assert(db_impl_);
268
268
  assert(wpt_db_);
269
269
 
270
- WriteBatch rollback_batch;
270
+ WriteBatch rollback_batch(0 /* reserved_bytes */, 0 /* max_bytes */,
271
+ write_options_.protection_bytes_per_key,
272
+ 0 /* default_cf_ts_sz */);
271
273
  assert(GetId() != kMaxSequenceNumber);
272
274
  assert(GetId() > 0);
273
275
  auto cf_map_shared_ptr = wpt_db_->GetCFHandleMap();
@@ -166,6 +166,15 @@ Status WritePreparedTxnDB::WriteInternal(const WriteOptions& write_options_orig,
166
166
  // increased for this batch.
167
167
  return Status::OK();
168
168
  }
169
+
170
+ if (write_options_orig.protection_bytes_per_key > 0) {
171
+ auto s = WriteBatchInternal::UpdateProtectionInfo(
172
+ batch, write_options_orig.protection_bytes_per_key);
173
+ if (!s.ok()) {
174
+ return s;
175
+ }
176
+ }
177
+
169
178
  if (batch_cnt == 0) { // not provided, then compute it
170
179
  // TODO(myabandeh): add an option to allow user skipping this cost
171
180
  SubBatchCounter counter(*GetCFComparatorMap());
@@ -464,7 +464,7 @@ Status WriteUnpreparedTxn::FlushWriteBatchWithSavePointToDB() {
464
464
  // only used if the write batch encounters an invalid cf id, and falls back to
465
465
  // this comparator.
466
466
  WriteBatchWithIndex wb(wpt_db_->DefaultColumnFamily()->GetComparator(), 0,
467
- true, 0);
467
+ true, 0, write_options_.protection_bytes_per_key);
468
468
  // Swap with write_batch_ so that wb contains the complete write batch. The
469
469
  // actual write batch that will be flushed to DB will be built in
470
470
  // write_batch_, and will be read by FlushWriteBatchToDBInternal.
@@ -722,7 +722,8 @@ Status WriteUnpreparedTxn::WriteRollbackKeys(
722
722
  Status WriteUnpreparedTxn::RollbackInternal() {
723
723
  // TODO(lth): Reduce duplicate code with WritePrepared rollback logic.
724
724
  WriteBatchWithIndex rollback_batch(
725
- wpt_db_->DefaultColumnFamily()->GetComparator(), 0, true, 0);
725
+ wpt_db_->DefaultColumnFamily()->GetComparator(), 0, true, 0,
726
+ write_options_.protection_bytes_per_key);
726
727
  assert(GetId() != kMaxSequenceNumber);
727
728
  assert(GetId() > 0);
728
729
  Status s;
@@ -59,7 +59,9 @@ Status WriteUnpreparedTxnDB::RollbackRecoveredTransaction(
59
59
  for (auto it = rtxn->batches_.rbegin(); it != rtxn->batches_.rend(); ++it) {
60
60
  auto last_visible_txn = it->first - 1;
61
61
  const auto& batch = it->second.batch_;
62
- WriteBatch rollback_batch;
62
+ WriteBatch rollback_batch(0 /* reserved_bytes */, 0 /* max_bytes */,
63
+ w_options.protection_bytes_per_key,
64
+ 0 /* default_cf_ts_sz */);
63
65
 
64
66
  struct RollbackWriteBatchBuilder : public WriteBatch::Handler {
65
67
  DBImpl* db_;
@@ -25,8 +25,9 @@
25
25
  namespace ROCKSDB_NAMESPACE {
26
26
  struct WriteBatchWithIndex::Rep {
27
27
  explicit Rep(const Comparator* index_comparator, size_t reserved_bytes = 0,
28
- size_t max_bytes = 0, bool _overwrite_key = false)
29
- : write_batch(reserved_bytes, max_bytes, /*protection_bytes_per_key=*/0,
28
+ size_t max_bytes = 0, bool _overwrite_key = false,
29
+ size_t protection_bytes_per_key = 0)
30
+ : write_batch(reserved_bytes, max_bytes, protection_bytes_per_key,
30
31
  index_comparator ? index_comparator->timestamp_size() : 0),
31
32
  comparator(index_comparator, &write_batch),
32
33
  skip_list(comparator, &arena),
@@ -262,9 +263,9 @@ Status WriteBatchWithIndex::Rep::ReBuildIndex() {
262
263
 
263
264
  WriteBatchWithIndex::WriteBatchWithIndex(
264
265
  const Comparator* default_index_comparator, size_t reserved_bytes,
265
- bool overwrite_key, size_t max_bytes)
266
+ bool overwrite_key, size_t max_bytes, size_t protection_bytes_per_key)
266
267
  : rep(new Rep(default_index_comparator, reserved_bytes, max_bytes,
267
- overwrite_key)) {}
268
+ overwrite_key, protection_bytes_per_key)) {}
268
269
 
269
270
  WriteBatchWithIndex::~WriteBatchWithIndex() {}
270
271
 
@@ -182,6 +182,7 @@
182
182
  "rocksdb/db/blob/blob_log_format.cc",
183
183
  "rocksdb/db/blob/blob_log_sequential_reader.cc",
184
184
  "rocksdb/db/blob/blob_log_writer.cc",
185
+ "rocksdb/db/blob/blob_source.cc",
185
186
  "rocksdb/db/blob/prefetch_buffer_collection.cc",
186
187
  "rocksdb/db/builder.cc",
187
188
  "rocksdb/db/c.cc",
@@ -306,7 +307,6 @@
306
307
  "rocksdb/table/adaptive/adaptive_table_factory.cc",
307
308
  "rocksdb/table/block_based/binary_search_index_reader.cc",
308
309
  "rocksdb/table/block_based/block.cc",
309
- "rocksdb/table/block_based/block_based_filter_block.cc",
310
310
  "rocksdb/table/block_based/block_based_table_builder.cc",
311
311
  "rocksdb/table/block_based/block_based_table_factory.cc",
312
312
  "rocksdb/table/block_based/block_based_table_iterator.cc",
package/index.js CHANGED
@@ -75,7 +75,12 @@ class RocksLevel extends AbstractLevel {
75
75
  }
76
76
 
77
77
  _put (key, value, options, callback) {
78
- process.nextTick(callback, binding.db_put(this[kContext], key, value, options))
78
+ try {
79
+ binding.db_put(this[kContext], key, value, options)
80
+ process.nextTick(callback, null)
81
+ } catch (err) {
82
+ process.nextTick(callback, err)
83
+ }
79
84
  }
80
85
 
81
86
  _get (key, options, callback) {
@@ -87,11 +92,21 @@ class RocksLevel extends AbstractLevel {
87
92
  }
88
93
 
89
94
  _del (key, options, callback) {
90
- process.nextTick(callback, binding.db_del(this[kContext], key, options))
95
+ try {
96
+ binding.db_del(this[kContext], key, options)
97
+ process.nextTick(callback, null)
98
+ } catch (err) {
99
+ process.nextTick(callback, err)
100
+ }
91
101
  }
92
102
 
93
103
  _clear (options, callback) {
94
- process.nextTick(callback, binding.db_clear(this[kContext], options))
104
+ try {
105
+ binding.db_clear(this[kContext], options)
106
+ process.nextTick(callback, null)
107
+ } catch (err) {
108
+ process.nextTick(callback, err)
109
+ }
95
110
  }
96
111
 
97
112
  _chainedBatch () {
@@ -99,7 +114,12 @@ class RocksLevel extends AbstractLevel {
99
114
  }
100
115
 
101
116
  _batch (operations, options, callback) {
102
- process.nextTick(callback, binding.batch_do(this[kContext], operations, options))
117
+ try {
118
+ binding.batch_do(this[kContext], operations, options)
119
+ process.nextTick(callback, null)
120
+ } catch (err) {
121
+ process.nextTick(callback, err)
122
+ }
103
123
  }
104
124
 
105
125
  _iterator (options) {
@@ -128,10 +148,7 @@ class RocksLevel extends AbstractLevel {
128
148
  })
129
149
  }
130
150
 
131
- const context = binding.iterator_init(this[kContext], {
132
- highWaterMarkBytes: 1024 * 1024 * 1024, // TODO (fix): Replace with -1.
133
- ...options
134
- })
151
+ const context = binding.iterator_init(this[kContext], options)
135
152
  const resource = {
136
153
  callback: null,
137
154
  close (callback) {
@@ -141,12 +158,17 @@ class RocksLevel extends AbstractLevel {
141
158
 
142
159
  try {
143
160
  this.attachResource(resource)
144
- return await new Promise((resolve, reject) => binding.iterator_nextv(context, options.limit || 1000, (err, rows, finished) => {
161
+
162
+ const limit = options.limit ?? 1000
163
+ await new Promise((resolve, reject) => binding.iterator_nextv(context, limit, (err, rows, finished) => {
145
164
  if (err) {
146
165
  reject(err)
147
166
  } else {
148
- const sequence = binding.iterator_get_sequence(context)
149
- resolve({ rows, sequence, finished })
167
+ resolve({
168
+ rows,
169
+ sequence: binding.iterator_get_sequence(context),
170
+ finished
171
+ })
150
172
  }
151
173
  }))
152
174
  } finally {
@@ -222,11 +244,11 @@ class RocksLevel extends AbstractLevel {
222
244
  const updates = new Updates(this, options)
223
245
  try {
224
246
  while (true) {
225
- const { rows, sequence } = await updates.next()
226
- if (!rows) {
247
+ const entry = await updates.next()
248
+ if (!entry.rows) {
227
249
  return
228
250
  }
229
- yield { rows, sequence }
251
+ yield entry
230
252
  }
231
253
  } finally {
232
254
  await updates.close()
package/package-lock.json CHANGED
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "6.0.2",
3
+ "version": "7.0.5",
4
4
  "lockfileVersion": 2,
5
5
  "requires": true,
6
6
  "packages": {
7
7
  "": {
8
8
  "name": "@nxtedition/rocksdb",
9
- "version": "6.0.2",
9
+ "version": "7.0.5",
10
10
  "hasInstallScript": true,
11
11
  "license": "MIT",
12
12
  "dependencies": {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "7.0.3",
3
+ "version": "7.0.6",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",
Binary file
@@ -1,358 +0,0 @@
1
- // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2
- // This source code is licensed under both the GPLv2 (found in the
3
- // COPYING file in the root directory) and Apache 2.0 License
4
- // (found in the LICENSE.Apache file in the root directory).
5
- //
6
- // Copyright (c) 2012 The LevelDB Authors. All rights reserved.
7
- // Use of this source code is governed by a BSD-style license that can be
8
- // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
-
10
- #include "table/block_based/block_based_filter_block.h"
11
-
12
- #include <algorithm>
13
-
14
- #include "db/dbformat.h"
15
- #include "monitoring/perf_context_imp.h"
16
- #include "rocksdb/filter_policy.h"
17
- #include "table/block_based/block_based_table_reader.h"
18
- #include "util/cast_util.h"
19
- #include "util/coding.h"
20
- #include "util/string_util.h"
21
-
22
- namespace ROCKSDB_NAMESPACE {
23
-
24
- namespace {
25
-
26
- void AppendItem(std::string* props, const std::string& key,
27
- const std::string& value) {
28
- char cspace = ' ';
29
- std::string value_str("");
30
- size_t i = 0;
31
- const size_t dataLength = 64;
32
- const size_t tabLength = 2;
33
- const size_t offLength = 16;
34
-
35
- value_str.append(&value[i], std::min(size_t(dataLength), value.size()));
36
- i += dataLength;
37
- while (i < value.size()) {
38
- value_str.append("\n");
39
- value_str.append(offLength, cspace);
40
- value_str.append(&value[i], std::min(size_t(dataLength), value.size() - i));
41
- i += dataLength;
42
- }
43
-
44
- std::string result("");
45
- if (key.size() < (offLength - tabLength))
46
- result.append(size_t((offLength - tabLength)) - key.size(), cspace);
47
- result.append(key);
48
-
49
- props->append(result + ": " + value_str + "\n");
50
- }
51
-
52
- template <class TKey>
53
- void AppendItem(std::string* props, const TKey& key, const std::string& value) {
54
- std::string key_str = std::to_string(key);
55
- AppendItem(props, key_str, value);
56
- }
57
- } // namespace
58
-
59
- // See doc/table_format.txt for an explanation of the filter block format.
60
-
61
- // Generate new filter every 2KB of data
62
- static const size_t kFilterBaseLg = 11;
63
- static const size_t kFilterBase = 1 << kFilterBaseLg;
64
-
65
- BlockBasedFilterBlockBuilder::BlockBasedFilterBlockBuilder(
66
- const SliceTransform* prefix_extractor,
67
- const BlockBasedTableOptions& table_opt, int bits_per_key)
68
- : prefix_extractor_(prefix_extractor),
69
- whole_key_filtering_(table_opt.whole_key_filtering),
70
- bits_per_key_(bits_per_key),
71
- prev_prefix_start_(0),
72
- prev_prefix_size_(0),
73
- total_added_in_built_(0) {}
74
-
75
- void BlockBasedFilterBlockBuilder::StartBlock(uint64_t block_offset) {
76
- uint64_t filter_index = (block_offset / kFilterBase);
77
- assert(filter_index >= filter_offsets_.size());
78
- while (filter_index > filter_offsets_.size()) {
79
- GenerateFilter();
80
- }
81
- }
82
-
83
- size_t BlockBasedFilterBlockBuilder::EstimateEntriesAdded() {
84
- return total_added_in_built_ + start_.size();
85
- }
86
-
87
- void BlockBasedFilterBlockBuilder::Add(const Slice& key_without_ts) {
88
- if (prefix_extractor_ && prefix_extractor_->InDomain(key_without_ts)) {
89
- AddPrefix(key_without_ts);
90
- }
91
-
92
- if (whole_key_filtering_) {
93
- AddKey(key_without_ts);
94
- }
95
- }
96
-
97
- // Add key to filter if needed
98
- inline void BlockBasedFilterBlockBuilder::AddKey(const Slice& key) {
99
- start_.push_back(entries_.size());
100
- entries_.append(key.data(), key.size());
101
- }
102
-
103
- // Add prefix to filter if needed
104
- inline void BlockBasedFilterBlockBuilder::AddPrefix(const Slice& key) {
105
- // get slice for most recently added entry
106
- Slice prev;
107
- if (prev_prefix_size_ > 0) {
108
- prev = Slice(entries_.data() + prev_prefix_start_, prev_prefix_size_);
109
- }
110
-
111
- Slice prefix = prefix_extractor_->Transform(key);
112
- // insert prefix only when it's different from the previous prefix.
113
- if (prev.size() == 0 || prefix != prev) {
114
- prev_prefix_start_ = entries_.size();
115
- prev_prefix_size_ = prefix.size();
116
- AddKey(prefix);
117
- }
118
- }
119
-
120
- Slice BlockBasedFilterBlockBuilder::Finish(
121
- const BlockHandle& /*tmp*/, Status* status,
122
- std::unique_ptr<const char[]>* /* filter_data */) {
123
- // In this impl we ignore BlockHandle and filter_data
124
- *status = Status::OK();
125
-
126
- if (!start_.empty()) {
127
- GenerateFilter();
128
- }
129
-
130
- // Append array of per-filter offsets
131
- const uint32_t array_offset = static_cast<uint32_t>(result_.size());
132
- for (size_t i = 0; i < filter_offsets_.size(); i++) {
133
- PutFixed32(&result_, filter_offsets_[i]);
134
- }
135
-
136
- PutFixed32(&result_, array_offset);
137
- result_.push_back(kFilterBaseLg); // Save encoding parameter in result
138
- return Slice(result_);
139
- }
140
-
141
- void BlockBasedFilterBlockBuilder::GenerateFilter() {
142
- const size_t num_entries = start_.size();
143
- if (num_entries == 0) {
144
- // Fast path if there are no keys for this filter
145
- filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
146
- return;
147
- }
148
- total_added_in_built_ += num_entries;
149
-
150
- // Make list of keys from flattened key structure
151
- start_.push_back(entries_.size()); // Simplify length computation
152
- tmp_entries_.resize(num_entries);
153
- for (size_t i = 0; i < num_entries; i++) {
154
- const char* base = entries_.data() + start_[i];
155
- size_t length = start_[i + 1] - start_[i];
156
- tmp_entries_[i] = Slice(base, length);
157
- }
158
-
159
- // Generate filter for current set of keys and append to result_.
160
- filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
161
- DeprecatedBlockBasedBloomFilterPolicy::CreateFilter(
162
- tmp_entries_.data(), static_cast<int>(num_entries), bits_per_key_,
163
- &result_);
164
-
165
- tmp_entries_.clear();
166
- entries_.clear();
167
- start_.clear();
168
- prev_prefix_start_ = 0;
169
- prev_prefix_size_ = 0;
170
- }
171
-
172
- BlockBasedFilterBlockReader::BlockBasedFilterBlockReader(
173
- const BlockBasedTable* t, CachableEntry<BlockContents>&& filter_block)
174
- : FilterBlockReaderCommon(t, std::move(filter_block)) {
175
- assert(table());
176
- assert(table()->get_rep());
177
- assert(table()->get_rep()->filter_policy);
178
- }
179
-
180
- std::unique_ptr<FilterBlockReader> BlockBasedFilterBlockReader::Create(
181
- const BlockBasedTable* table, const ReadOptions& ro,
182
- FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch,
183
- bool pin, BlockCacheLookupContext* lookup_context) {
184
- assert(table);
185
- assert(table->get_rep());
186
- assert(!pin || prefetch);
187
-
188
- CachableEntry<BlockContents> filter_block;
189
- if (prefetch || !use_cache) {
190
- const Status s = ReadFilterBlock(
191
- table, prefetch_buffer, ro, use_cache, nullptr /* get_context */,
192
- lookup_context, &filter_block, BlockType::kDeprecatedFilter);
193
- if (!s.ok()) {
194
- IGNORE_STATUS_IF_ERROR(s);
195
- return std::unique_ptr<FilterBlockReader>();
196
- }
197
-
198
- if (use_cache && !pin) {
199
- filter_block.Reset();
200
- }
201
- }
202
-
203
- return std::unique_ptr<FilterBlockReader>(
204
- new BlockBasedFilterBlockReader(table, std::move(filter_block)));
205
- }
206
-
207
- bool BlockBasedFilterBlockReader::KeyMayMatch(
208
- const Slice& key, const SliceTransform* /* prefix_extractor */,
209
- uint64_t block_offset, const bool no_io,
210
- const Slice* const /*const_ikey_ptr*/, GetContext* get_context,
211
- BlockCacheLookupContext* lookup_context) {
212
- assert(block_offset != kNotValid);
213
- if (!whole_key_filtering()) {
214
- return true;
215
- }
216
- return MayMatch(key, block_offset, no_io, get_context, lookup_context);
217
- }
218
-
219
- bool BlockBasedFilterBlockReader::PrefixMayMatch(
220
- const Slice& prefix, const SliceTransform* /* prefix_extractor */,
221
- uint64_t block_offset, const bool no_io,
222
- const Slice* const /*const_ikey_ptr*/, GetContext* get_context,
223
- BlockCacheLookupContext* lookup_context) {
224
- assert(block_offset != kNotValid);
225
- return MayMatch(prefix, block_offset, no_io, get_context, lookup_context);
226
- }
227
-
228
- bool BlockBasedFilterBlockReader::ParseFieldsFromBlock(
229
- const BlockContents& contents, const char** data, const char** offset,
230
- size_t* num, size_t* base_lg) {
231
- assert(data);
232
- assert(offset);
233
- assert(num);
234
- assert(base_lg);
235
-
236
- const size_t n = contents.data.size();
237
- if (n < 5) { // 1 byte for base_lg and 4 for start of offset array
238
- return false;
239
- }
240
-
241
- const uint32_t last_word = DecodeFixed32(contents.data.data() + n - 5);
242
- if (last_word > n - 5) {
243
- return false;
244
- }
245
-
246
- *data = contents.data.data();
247
- *offset = (*data) + last_word;
248
- *num = (n - 5 - last_word) / 4;
249
- *base_lg = contents.data[n - 1];
250
-
251
- return true;
252
- }
253
-
254
- bool BlockBasedFilterBlockReader::MayMatch(
255
- const Slice& entry, uint64_t block_offset, bool no_io,
256
- GetContext* get_context, BlockCacheLookupContext* lookup_context) const {
257
- CachableEntry<BlockContents> filter_block;
258
-
259
- const Status s =
260
- GetOrReadFilterBlock(no_io, get_context, lookup_context, &filter_block,
261
- BlockType::kDeprecatedFilter);
262
- if (!s.ok()) {
263
- IGNORE_STATUS_IF_ERROR(s);
264
- return true;
265
- }
266
-
267
- assert(filter_block.GetValue());
268
-
269
- const char* data = nullptr;
270
- const char* offset = nullptr;
271
- size_t num = 0;
272
- size_t base_lg = 0;
273
- if (!ParseFieldsFromBlock(*filter_block.GetValue(), &data, &offset, &num,
274
- &base_lg)) {
275
- return true; // Errors are treated as potential matches
276
- }
277
-
278
- const uint64_t index = block_offset >> base_lg;
279
- if (index < num) {
280
- const uint32_t start = DecodeFixed32(offset + index * 4);
281
- const uint32_t limit = DecodeFixed32(offset + index * 4 + 4);
282
- if (start <= limit && limit <= (uint32_t)(offset - data)) {
283
- const Slice filter = Slice(data + start, limit - start);
284
-
285
- assert(table());
286
- assert(table()->get_rep());
287
-
288
- const bool may_match =
289
- DeprecatedBlockBasedBloomFilterPolicy::KeyMayMatch(entry, filter);
290
- if (may_match) {
291
- PERF_COUNTER_ADD(bloom_sst_hit_count, 1);
292
- return true;
293
- } else {
294
- PERF_COUNTER_ADD(bloom_sst_miss_count, 1);
295
- return false;
296
- }
297
- } else if (start == limit) {
298
- // Empty filters do not match any entries
299
- return false;
300
- }
301
- }
302
- return true; // Errors are treated as potential matches
303
- }
304
-
305
- size_t BlockBasedFilterBlockReader::ApproximateMemoryUsage() const {
306
- size_t usage = ApproximateFilterBlockMemoryUsage();
307
- #ifdef ROCKSDB_MALLOC_USABLE_SIZE
308
- usage += malloc_usable_size(const_cast<BlockBasedFilterBlockReader*>(this));
309
- #else
310
- usage += sizeof(*this);
311
- #endif // ROCKSDB_MALLOC_USABLE_SIZE
312
- return usage;
313
- }
314
-
315
- std::string BlockBasedFilterBlockReader::ToString() const {
316
- CachableEntry<BlockContents> filter_block;
317
-
318
- const Status s =
319
- GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */,
320
- nullptr /* lookup_context */, &filter_block,
321
- BlockType::kDeprecatedFilter);
322
- if (!s.ok()) {
323
- IGNORE_STATUS_IF_ERROR(s);
324
- return std::string("Unable to retrieve filter block");
325
- }
326
-
327
- assert(filter_block.GetValue());
328
-
329
- const char* data = nullptr;
330
- const char* offset = nullptr;
331
- size_t num = 0;
332
- size_t base_lg = 0;
333
- if (!ParseFieldsFromBlock(*filter_block.GetValue(), &data, &offset, &num,
334
- &base_lg)) {
335
- return std::string("Error parsing filter block");
336
- }
337
-
338
- std::string result;
339
- result.reserve(1024);
340
-
341
- std::string s_bo("Block offset"), s_hd("Hex dump"), s_fb("# filter blocks");
342
- AppendItem(&result, s_fb, std::to_string(num));
343
- AppendItem(&result, s_bo, s_hd);
344
-
345
- for (size_t index = 0; index < num; index++) {
346
- uint32_t start = DecodeFixed32(offset + index * 4);
347
- uint32_t limit = DecodeFixed32(offset + index * 4 + 4);
348
-
349
- if (start != limit) {
350
- result.append(" filter block # " + std::to_string(index + 1) + "\n");
351
- Slice filter = Slice(data + start, limit - start);
352
- AppendItem(&result, start, filter.ToString(true));
353
- }
354
- }
355
- return result;
356
- }
357
-
358
- } // namespace ROCKSDB_NAMESPACE