@nxtedition/rocksdb 7.1.10 → 7.1.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/binding.cc +50 -33
  2. package/deps/rocksdb/rocksdb/CMakeLists.txt +2 -1
  3. package/deps/rocksdb/rocksdb/TARGETS +2 -0
  4. package/deps/rocksdb/rocksdb/db/db_basic_test.cc +60 -17
  5. package/deps/rocksdb/rocksdb/db/db_impl/compacted_db_impl.cc +4 -4
  6. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +81 -37
  7. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +6 -0
  8. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_readonly.cc +6 -6
  9. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_secondary.cc +10 -8
  10. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_write.cc +14 -9
  11. package/deps/rocksdb/rocksdb/db/db_memtable_test.cc +3 -3
  12. package/deps/rocksdb/rocksdb/db/db_write_buffer_manager_test.cc +69 -0
  13. package/deps/rocksdb/rocksdb/db/flush_job.cc +6 -6
  14. package/deps/rocksdb/rocksdb/db/memtable.cc +19 -7
  15. package/deps/rocksdb/rocksdb/db/memtable.h +8 -16
  16. package/deps/rocksdb/rocksdb/db/memtable_list.cc +27 -16
  17. package/deps/rocksdb/rocksdb/db/memtable_list.h +18 -11
  18. package/deps/rocksdb/rocksdb/db/memtable_list_test.cc +70 -55
  19. package/deps/rocksdb/rocksdb/db/table_cache.cc +9 -11
  20. package/deps/rocksdb/rocksdb/db/table_cache.h +2 -1
  21. package/deps/rocksdb/rocksdb/db/table_cache_sync_and_async.h +3 -3
  22. package/deps/rocksdb/rocksdb/db/version_set.cc +530 -257
  23. package/deps/rocksdb/rocksdb/db/version_set.h +32 -2
  24. package/deps/rocksdb/rocksdb/db/version_set_sync_and_async.h +2 -2
  25. package/deps/rocksdb/rocksdb/db/wide/db_wide_basic_test.cc +64 -12
  26. package/deps/rocksdb/rocksdb/db/wide/wide_columns.cc +18 -0
  27. package/deps/rocksdb/rocksdb/include/rocksdb/db.h +8 -0
  28. package/deps/rocksdb/rocksdb/include/rocksdb/options.h +13 -1
  29. package/deps/rocksdb/rocksdb/include/rocksdb/utilities/stackable_db.h +7 -0
  30. package/deps/rocksdb/rocksdb/include/rocksdb/wide_columns.h +83 -0
  31. package/deps/rocksdb/rocksdb/options/options.cc +4 -2
  32. package/deps/rocksdb/rocksdb/src.mk +1 -0
  33. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +3 -10
  34. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_test.cc +5 -4
  35. package/deps/rocksdb/rocksdb/table/block_based/block_like_traits.h +10 -28
  36. package/deps/rocksdb/rocksdb/table/block_based/data_block_hash_index_test.cc +4 -4
  37. package/deps/rocksdb/rocksdb/table/cuckoo/cuckoo_table_reader_test.cc +11 -9
  38. package/deps/rocksdb/rocksdb/table/get_context.cc +34 -22
  39. package/deps/rocksdb/rocksdb/table/get_context.h +6 -3
  40. package/deps/rocksdb/rocksdb/table/multiget_context.h +69 -5
  41. package/deps/rocksdb/rocksdb/table/table_reader_bench.cc +2 -2
  42. package/deps/rocksdb/rocksdb/table/table_test.cc +8 -8
  43. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +23 -0
  44. package/deps/rocksdb/rocksdb/utilities/fault_injection_secondary_cache.cc +27 -7
  45. package/deps/rocksdb/rocksdb/utilities/fault_injection_secondary_cache.h +8 -4
  46. package/deps/rocksdb/rocksdb.gyp +1 -0
  47. package/index.js +19 -12
  48. package/package.json +1 -1
  49. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  50. package/prebuilds/darwin-x64/node.napi.node +0 -0
  51. package/prebuilds/linux-x64/node.napi.node +0 -0
@@ -41,17 +41,15 @@ void appendToReplayLog(std::string* replay_log, ValueType type, Slice value) {
41
41
 
42
42
  } // namespace
43
43
 
44
- GetContext::GetContext(const Comparator* ucmp,
45
- const MergeOperator* merge_operator, Logger* logger,
46
- Statistics* statistics, GetState init_state,
47
- const Slice& user_key, PinnableSlice* pinnable_val,
48
- std::string* timestamp, bool* value_found,
49
- MergeContext* merge_context, bool do_merge,
50
- SequenceNumber* _max_covering_tombstone_seq,
51
- SystemClock* clock, SequenceNumber* seq,
52
- PinnedIteratorsManager* _pinned_iters_mgr,
53
- ReadCallback* callback, bool* is_blob_index,
54
- uint64_t tracing_get_id, BlobFetcher* blob_fetcher)
44
+ GetContext::GetContext(
45
+ const Comparator* ucmp, const MergeOperator* merge_operator, Logger* logger,
46
+ Statistics* statistics, GetState init_state, const Slice& user_key,
47
+ PinnableSlice* pinnable_val, PinnableWideColumns* columns,
48
+ std::string* timestamp, bool* value_found, MergeContext* merge_context,
49
+ bool do_merge, SequenceNumber* _max_covering_tombstone_seq,
50
+ SystemClock* clock, SequenceNumber* seq,
51
+ PinnedIteratorsManager* _pinned_iters_mgr, ReadCallback* callback,
52
+ bool* is_blob_index, uint64_t tracing_get_id, BlobFetcher* blob_fetcher)
55
53
  : ucmp_(ucmp),
56
54
  merge_operator_(merge_operator),
57
55
  logger_(logger),
@@ -59,6 +57,7 @@ GetContext::GetContext(const Comparator* ucmp,
59
57
  state_(init_state),
60
58
  user_key_(user_key),
61
59
  pinnable_val_(pinnable_val),
60
+ columns_(columns),
62
61
  timestamp_(timestamp),
63
62
  value_found_(value_found),
64
63
  merge_context_(merge_context),
@@ -78,18 +77,22 @@ GetContext::GetContext(const Comparator* ucmp,
78
77
  sample_ = should_sample_file_read();
79
78
  }
80
79
 
81
- GetContext::GetContext(
82
- const Comparator* ucmp, const MergeOperator* merge_operator, Logger* logger,
83
- Statistics* statistics, GetState init_state, const Slice& user_key,
84
- PinnableSlice* pinnable_val, bool* value_found, MergeContext* merge_context,
85
- bool do_merge, SequenceNumber* _max_covering_tombstone_seq,
86
- SystemClock* clock, SequenceNumber* seq,
87
- PinnedIteratorsManager* _pinned_iters_mgr, ReadCallback* callback,
88
- bool* is_blob_index, uint64_t tracing_get_id, BlobFetcher* blob_fetcher)
80
+ GetContext::GetContext(const Comparator* ucmp,
81
+ const MergeOperator* merge_operator, Logger* logger,
82
+ Statistics* statistics, GetState init_state,
83
+ const Slice& user_key, PinnableSlice* pinnable_val,
84
+ PinnableWideColumns* columns, bool* value_found,
85
+ MergeContext* merge_context, bool do_merge,
86
+ SequenceNumber* _max_covering_tombstone_seq,
87
+ SystemClock* clock, SequenceNumber* seq,
88
+ PinnedIteratorsManager* _pinned_iters_mgr,
89
+ ReadCallback* callback, bool* is_blob_index,
90
+ uint64_t tracing_get_id, BlobFetcher* blob_fetcher)
89
91
  : GetContext(ucmp, merge_operator, logger, statistics, init_state, user_key,
90
- pinnable_val, nullptr, value_found, merge_context, do_merge,
91
- _max_covering_tombstone_seq, clock, seq, _pinned_iters_mgr,
92
- callback, is_blob_index, tracing_get_id, blob_fetcher) {}
92
+ pinnable_val, columns, /*timestamp=*/nullptr, value_found,
93
+ merge_context, do_merge, _max_covering_tombstone_seq, clock,
94
+ seq, _pinned_iters_mgr, callback, is_blob_index,
95
+ tracing_get_id, blob_fetcher) {}
93
96
 
94
97
  // Called from TableCache::Get and Table::Get when file/block in which
95
98
  // key may exist are not there in TableCache/BlockCache respectively. In this
@@ -291,6 +294,15 @@ bool GetContext::SaveValue(const ParsedInternalKey& parsed_key,
291
294
  // Otherwise copy the value
292
295
  pinnable_val_->PinSelf(value_to_use);
293
296
  }
297
+ } else if (columns_ != nullptr) {
298
+ if (type == kTypeWideColumnEntity) {
299
+ if (!columns_->SetWideColumnValue(value, value_pinner).ok()) {
300
+ state_ = kCorrupt;
301
+ return false;
302
+ }
303
+ } else {
304
+ columns_->SetPlainValue(value, value_pinner);
305
+ }
294
306
  }
295
307
  } else {
296
308
  // It means this function is called as part of DB GetMergeOperands
@@ -15,6 +15,7 @@ class Comparator;
15
15
  class Logger;
16
16
  class MergeContext;
17
17
  class MergeOperator;
18
+ class PinnableWideColumns;
18
19
  class PinnedIteratorsManager;
19
20
  class Statistics;
20
21
  class SystemClock;
@@ -101,7 +102,8 @@ class GetContext {
101
102
  // merge_context and they are never merged. The value pointer is untouched.
102
103
  GetContext(const Comparator* ucmp, const MergeOperator* merge_operator,
103
104
  Logger* logger, Statistics* statistics, GetState init_state,
104
- const Slice& user_key, PinnableSlice* value, bool* value_found,
105
+ const Slice& user_key, PinnableSlice* value,
106
+ PinnableWideColumns* columns, bool* value_found,
105
107
  MergeContext* merge_context, bool do_merge,
106
108
  SequenceNumber* max_covering_tombstone_seq, SystemClock* clock,
107
109
  SequenceNumber* seq = nullptr,
@@ -111,8 +113,8 @@ class GetContext {
111
113
  GetContext(const Comparator* ucmp, const MergeOperator* merge_operator,
112
114
  Logger* logger, Statistics* statistics, GetState init_state,
113
115
  const Slice& user_key, PinnableSlice* value,
114
- std::string* timestamp, bool* value_found,
115
- MergeContext* merge_context, bool do_merge,
116
+ PinnableWideColumns* columns, std::string* timestamp,
117
+ bool* value_found, MergeContext* merge_context, bool do_merge,
116
118
  SequenceNumber* max_covering_tombstone_seq, SystemClock* clock,
117
119
  SequenceNumber* seq = nullptr,
118
120
  PinnedIteratorsManager* _pinned_iters_mgr = nullptr,
@@ -186,6 +188,7 @@ class GetContext {
186
188
  GetState state_;
187
189
  Slice user_key_;
188
190
  PinnableSlice* pinnable_val_;
191
+ PinnableWideColumns* columns_;
189
192
  std::string* timestamp_;
190
193
  bool* value_found_; // Is value set correctly? Used by KeyMayExist
191
194
  MergeContext* merge_context_;
@@ -199,17 +199,24 @@ class MultiGetContext {
199
199
  : range_(range), ctx_(range->ctx_), index_(idx) {
200
200
  while (index_ < range_->end_ &&
201
201
  (Mask{1} << index_) &
202
- (range_->ctx_->value_mask_ | range_->skip_mask_))
202
+ (range_->ctx_->value_mask_ | range_->skip_mask_ |
203
+ range_->invalid_mask_))
203
204
  index_++;
204
205
  }
205
206
 
206
207
  Iterator(const Iterator&) = default;
208
+
209
+ Iterator(const Iterator& other, const Range* range)
210
+ : range_(range), ctx_(other.ctx_), index_(other.index_) {
211
+ assert(range->ctx_ == other.ctx_);
212
+ }
207
213
  Iterator& operator=(const Iterator&) = default;
208
214
 
209
215
  Iterator& operator++() {
210
216
  while (++index_ < range_->end_ &&
211
217
  (Mask{1} << index_) &
212
- (range_->ctx_->value_mask_ | range_->skip_mask_))
218
+ (range_->ctx_->value_mask_ | range_->skip_mask_ |
219
+ range_->invalid_mask_))
213
220
  ;
214
221
  return *this;
215
222
  }
@@ -247,9 +254,17 @@ class MultiGetContext {
247
254
  const Iterator& first,
248
255
  const Iterator& last) {
249
256
  ctx_ = mget_range.ctx_;
250
- start_ = first.index_;
251
- end_ = last.index_;
257
+ if (first == last) {
258
+ // This means create an empty range based on mget_range. So just
259
+ // set start_ and and_ to the same value
260
+ start_ = mget_range.start_;
261
+ end_ = start_;
262
+ } else {
263
+ start_ = first.index_;
264
+ end_ = last.index_;
265
+ }
252
266
  skip_mask_ = mget_range.skip_mask_;
267
+ invalid_mask_ = mget_range.invalid_mask_;
253
268
  assert(start_ < 64);
254
269
  assert(end_ < 64);
255
270
  }
@@ -305,18 +320,67 @@ class MultiGetContext {
305
320
  }
306
321
  }
307
322
 
323
+ // The += operator expands the number of keys in this range. The expansion
324
+ // is always to the right, i.e start of the additional range >= end of
325
+ // current range. There should be no overlap. Any skipped keys in rhs are
326
+ // marked as invalid in the invalid_mask_.
327
+ Range& operator+=(const Range& rhs) {
328
+ assert(rhs.start_ >= end_);
329
+ // Check for non-overlapping ranges and adjust invalid_mask_ accordingly
330
+ if (end_ < rhs.start_) {
331
+ invalid_mask_ |= RangeMask(end_, rhs.start_);
332
+ skip_mask_ |= RangeMask(end_, rhs.start_);
333
+ }
334
+ start_ = std::min<size_t>(start_, rhs.start_);
335
+ end_ = std::max<size_t>(end_, rhs.end_);
336
+ skip_mask_ |= rhs.skip_mask_ & RangeMask(rhs.start_, rhs.end_);
337
+ invalid_mask_ |= (rhs.invalid_mask_ | rhs.skip_mask_) &
338
+ RangeMask(rhs.start_, rhs.end_);
339
+ assert(start_ < 64);
340
+ assert(end_ < 64);
341
+ return *this;
342
+ }
343
+
344
+ // The -= operator removes keys from this range. The removed keys should
345
+ // come from a range completely overlapping the current range. The removed
346
+ // keys are marked invalid in the invalid_mask_.
347
+ Range& operator-=(const Range& rhs) {
348
+ assert(start_ <= rhs.start_ && end_ >= rhs.end_);
349
+ skip_mask_ |= (~rhs.skip_mask_ | rhs.invalid_mask_) &
350
+ RangeMask(rhs.start_, rhs.end_);
351
+ invalid_mask_ |= (~rhs.skip_mask_ | rhs.invalid_mask_) &
352
+ RangeMask(rhs.start_, rhs.end_);
353
+ return *this;
354
+ }
355
+
356
+ // Return a complement of the current range
357
+ Range operator~() {
358
+ Range res = *this;
359
+ res.skip_mask_ = ~skip_mask_ & RangeMask(start_, end_);
360
+ return res;
361
+ }
362
+
308
363
  private:
309
364
  friend MultiGetContext;
310
365
  MultiGetContext* ctx_;
311
366
  size_t start_;
312
367
  size_t end_;
313
368
  Mask skip_mask_;
369
+ Mask invalid_mask_;
314
370
 
315
371
  Range(MultiGetContext* ctx, size_t num_keys)
316
- : ctx_(ctx), start_(0), end_(num_keys), skip_mask_(0) {
372
+ : ctx_(ctx),
373
+ start_(0),
374
+ end_(num_keys),
375
+ skip_mask_(0),
376
+ invalid_mask_(0) {
317
377
  assert(num_keys < 64);
318
378
  }
319
379
 
380
+ static Mask RangeMask(size_t start, size_t end) {
381
+ return (((Mask{1} << (end - start)) - 1) << start);
382
+ }
383
+
320
384
  Mask RemainingMask() const {
321
385
  return (((Mask{1} << end_) - 1) & ~((Mask{1} << start_) - 1) &
322
386
  ~(ctx_->value_mask_ | skip_mask_));
@@ -177,8 +177,8 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
177
177
  GetContext get_context(
178
178
  ioptions.user_comparator, ioptions.merge_operator.get(),
179
179
  ioptions.logger, ioptions.stats, GetContext::kNotFound,
180
- Slice(key), &value, nullptr, &merge_context, true,
181
- &max_covering_tombstone_seq, clock);
180
+ Slice(key), &value, /*columns=*/nullptr, /*timestamp=*/nullptr,
181
+ &merge_context, true, &max_covering_tombstone_seq, clock);
182
182
  s = table_reader->Get(read_options, key, &get_context, nullptr);
183
183
  } else {
184
184
  s = db->Get(read_options, key, &result);
@@ -3036,8 +3036,8 @@ TEST_P(BlockBasedTableTest, TracingGetTest) {
3036
3036
  PinnableSlice value;
3037
3037
  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
3038
3038
  GetContext::kNotFound, user_key, &value, nullptr,
3039
- nullptr, true, nullptr, nullptr, nullptr, nullptr,
3040
- nullptr, nullptr, /*tracing_get_id=*/i);
3039
+ nullptr, nullptr, true, nullptr, nullptr, nullptr,
3040
+ nullptr, nullptr, nullptr, /*tracing_get_id=*/i);
3041
3041
  get_perf_context()->Reset();
3042
3042
  ASSERT_OK(c.GetTableReader()->Get(ReadOptions(), encoded_key, &get_context,
3043
3043
  moptions.prefix_extractor.get()));
@@ -3293,7 +3293,7 @@ TEST_P(BlockBasedTableTest, BlockCacheDisabledTest) {
3293
3293
  {
3294
3294
  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
3295
3295
  GetContext::kNotFound, Slice(), nullptr, nullptr,
3296
- nullptr, true, nullptr, nullptr);
3296
+ nullptr, nullptr, true, nullptr, nullptr);
3297
3297
  // a hack that just to trigger BlockBasedTable::GetFilter.
3298
3298
  ASSERT_OK(reader->Get(ReadOptions(), "non-exist-key", &get_context,
3299
3299
  moptions.prefix_extractor.get()));
@@ -3471,7 +3471,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
3471
3471
  PinnableSlice value;
3472
3472
  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
3473
3473
  GetContext::kNotFound, user_key, &value, nullptr,
3474
- nullptr, true, nullptr, nullptr);
3474
+ nullptr, nullptr, true, nullptr, nullptr);
3475
3475
  ASSERT_OK(reader->Get(ReadOptions(), internal_key.Encode(), &get_context,
3476
3476
  moptions4.prefix_extractor.get()));
3477
3477
  ASSERT_STREQ(value.data(), "hello");
@@ -3558,7 +3558,7 @@ TEST_P(BlockBasedTableTest, BlockReadCountTest) {
3558
3558
  {
3559
3559
  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
3560
3560
  GetContext::kNotFound, user_key, &value, nullptr,
3561
- nullptr, true, nullptr, nullptr);
3561
+ nullptr, nullptr, true, nullptr, nullptr);
3562
3562
  get_perf_context()->Reset();
3563
3563
  ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context,
3564
3564
  moptions.prefix_extractor.get()));
@@ -3584,7 +3584,7 @@ TEST_P(BlockBasedTableTest, BlockReadCountTest) {
3584
3584
  {
3585
3585
  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
3586
3586
  GetContext::kNotFound, user_key, &value, nullptr,
3587
- nullptr, true, nullptr, nullptr);
3587
+ nullptr, nullptr, true, nullptr, nullptr);
3588
3588
  get_perf_context()->Reset();
3589
3589
  ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context,
3590
3590
  moptions.prefix_extractor.get()));
@@ -5149,7 +5149,7 @@ TEST_P(BlockBasedTableTest, DataBlockHashIndex) {
5149
5149
  std::string user_key = ExtractUserKey(kv.first).ToString();
5150
5150
  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
5151
5151
  GetContext::kNotFound, user_key, &value, nullptr,
5152
- nullptr, true, nullptr, nullptr);
5152
+ nullptr, nullptr, true, nullptr, nullptr);
5153
5153
  ASSERT_OK(reader->Get(ro, kv.first, &get_context,
5154
5154
  moptions.prefix_extractor.get()));
5155
5155
  ASSERT_EQ(get_context.State(), GetContext::kFound);
@@ -5175,7 +5175,7 @@ TEST_P(BlockBasedTableTest, DataBlockHashIndex) {
5175
5175
  PinnableSlice value;
5176
5176
  GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
5177
5177
  GetContext::kNotFound, user_key, &value, nullptr,
5178
- nullptr, true, nullptr, nullptr);
5178
+ nullptr, nullptr, true, nullptr, nullptr);
5179
5179
  ASSERT_OK(reader->Get(ro, encoded_key, &get_context,
5180
5180
  moptions.prefix_extractor.get()));
5181
5181
  ASSERT_EQ(get_context.State(), GetContext::kNotFound);
@@ -1177,6 +1177,10 @@ DEFINE_bool(async_io, false,
1177
1177
  "When set true, RocksDB does asynchronous reads for internal auto "
1178
1178
  "readahead prefetching.");
1179
1179
 
1180
+ DEFINE_bool(optimize_multiget_for_io, true,
1181
+ "When set true, RocksDB does asynchronous reads for SST files in "
1182
+ "multiple levels for MultiGet.");
1183
+
1180
1184
  DEFINE_bool(charge_compression_dictionary_building_buffer, false,
1181
1185
  "Setting for "
1182
1186
  "CacheEntryRoleOptions::charged of "
@@ -1218,6 +1222,20 @@ DEFINE_string(backup_dir, "",
1218
1222
  DEFINE_string(restore_dir, "",
1219
1223
  "If not empty string, use the given dir for restore.");
1220
1224
 
1225
+ DEFINE_uint64(
1226
+ initial_auto_readahead_size,
1227
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().initial_auto_readahead_size,
1228
+ "RocksDB does auto-readahead for iterators on noticing more than two reads "
1229
+ "for a table file if user doesn't provide readahead_size. The readahead "
1230
+ "size starts at initial_auto_readahead_size");
1231
+
1232
+ DEFINE_uint64(
1233
+ max_auto_readahead_size,
1234
+ ROCKSDB_NAMESPACE::BlockBasedTableOptions().max_auto_readahead_size,
1235
+ "Rocksdb implicit readahead starts at "
1236
+ "BlockBasedTableOptions.initial_auto_readahead_size and doubles on every "
1237
+ "additional read upto max_auto_readahead_size");
1238
+
1221
1239
  static enum ROCKSDB_NAMESPACE::CompressionType StringToCompressionType(
1222
1240
  const char* ctype) {
1223
1241
  assert(ctype);
@@ -3350,6 +3368,7 @@ class Benchmark {
3350
3368
  read_options_.readahead_size = FLAGS_readahead_size;
3351
3369
  read_options_.adaptive_readahead = FLAGS_adaptive_readahead;
3352
3370
  read_options_.async_io = FLAGS_async_io;
3371
+ read_options_.optimize_multiget_for_io = FLAGS_optimize_multiget_for_io;
3353
3372
 
3354
3373
  void (Benchmark::*method)(ThreadState*) = nullptr;
3355
3374
  void (Benchmark::*post_process_method)() = nullptr;
@@ -4349,6 +4368,10 @@ class Benchmark {
4349
4368
  FLAGS_enable_index_compression;
4350
4369
  block_based_options.block_align = FLAGS_block_align;
4351
4370
  block_based_options.whole_key_filtering = FLAGS_whole_key_filtering;
4371
+ block_based_options.max_auto_readahead_size =
4372
+ FLAGS_max_auto_readahead_size;
4373
+ block_based_options.initial_auto_readahead_size =
4374
+ FLAGS_initial_auto_readahead_size;
4352
4375
  BlockBasedTableOptions::PrepopulateBlockCache prepopulate_block_cache =
4353
4376
  block_based_options.prepopulate_block_cache;
4354
4377
  switch (FLAGS_prepopulate_block_cache) {
@@ -88,14 +88,22 @@ std::unique_ptr<SecondaryCacheResultHandle>
88
88
  FaultInjectionSecondaryCache::Lookup(const Slice& key,
89
89
  const Cache::CreateCallback& create_cb,
90
90
  bool wait, bool& is_in_sec_cache) {
91
- std::unique_ptr<SecondaryCacheResultHandle> hdl =
92
- base_->Lookup(key, create_cb, wait, is_in_sec_cache);
93
91
  ErrorContext* ctx = GetErrorContext();
94
- if (wait && ctx->rand.OneIn(prob_)) {
95
- hdl.reset();
92
+ if (base_is_compressed_sec_cache_) {
93
+ if (ctx->rand.OneIn(prob_)) {
94
+ return nullptr;
95
+ } else {
96
+ return base_->Lookup(key, create_cb, wait, is_in_sec_cache);
97
+ }
98
+ } else {
99
+ std::unique_ptr<SecondaryCacheResultHandle> hdl =
100
+ base_->Lookup(key, create_cb, wait, is_in_sec_cache);
101
+ if (wait && ctx->rand.OneIn(prob_)) {
102
+ hdl.reset();
103
+ }
104
+ return std::unique_ptr<FaultInjectionSecondaryCache::ResultHandle>(
105
+ new FaultInjectionSecondaryCache::ResultHandle(this, std::move(hdl)));
96
106
  }
97
- return std::unique_ptr<FaultInjectionSecondaryCache::ResultHandle>(
98
- new FaultInjectionSecondaryCache::ResultHandle(this, std::move(hdl)));
99
107
  }
100
108
 
101
109
  void FaultInjectionSecondaryCache::Erase(const Slice& key) {
@@ -104,7 +112,19 @@ void FaultInjectionSecondaryCache::Erase(const Slice& key) {
104
112
 
105
113
  void FaultInjectionSecondaryCache::WaitAll(
106
114
  std::vector<SecondaryCacheResultHandle*> handles) {
107
- FaultInjectionSecondaryCache::ResultHandle::WaitAll(this, handles);
115
+ if (base_is_compressed_sec_cache_) {
116
+ ErrorContext* ctx = GetErrorContext();
117
+ std::vector<SecondaryCacheResultHandle*> base_handles;
118
+ for (SecondaryCacheResultHandle* hdl : handles) {
119
+ if (ctx->rand.OneIn(prob_)) {
120
+ continue;
121
+ }
122
+ base_handles.push_back(hdl);
123
+ }
124
+ base_->WaitAll(base_handles);
125
+ } else {
126
+ FaultInjectionSecondaryCache::ResultHandle::WaitAll(this, handles);
127
+ }
108
128
  }
109
129
 
110
130
  } // namespace ROCKSDB_NAMESPACE
@@ -22,6 +22,9 @@ class FaultInjectionSecondaryCache : public SecondaryCache {
22
22
  seed_(seed),
23
23
  prob_(prob),
24
24
  thread_local_error_(new ThreadLocalPtr(DeleteThreadLocalErrorContext)) {
25
+ if (std::strcmp(base_->Name(), "CompressedSecondaryCache") == 0) {
26
+ base_is_compressed_sec_cache_ = true;
27
+ }
25
28
  }
26
29
 
27
30
  virtual ~FaultInjectionSecondaryCache() override {}
@@ -35,13 +38,13 @@ class FaultInjectionSecondaryCache : public SecondaryCache {
35
38
  const Slice& key, const Cache::CreateCallback& create_cb, bool wait,
36
39
  bool& is_in_sec_cache) override;
37
40
 
38
- void Erase(const Slice& /*key*/) override;
41
+ void Erase(const Slice& key) override;
39
42
 
40
43
  void WaitAll(std::vector<SecondaryCacheResultHandle*> handles) override;
41
44
 
42
- std::string GetPrintableOptions() const override { return ""; }
43
-
44
- void EnableErrorInjection(uint64_t prob);
45
+ std::string GetPrintableOptions() const override {
46
+ return base_->GetPrintableOptions();
47
+ }
45
48
 
46
49
  private:
47
50
  class ResultHandle : public SecondaryCacheResultHandle {
@@ -80,6 +83,7 @@ class FaultInjectionSecondaryCache : public SecondaryCache {
80
83
  const std::shared_ptr<SecondaryCache> base_;
81
84
  uint32_t seed_;
82
85
  int prob_;
86
+ bool base_is_compressed_sec_cache_{false};
83
87
 
84
88
  struct ErrorContext {
85
89
  Random rand;
@@ -251,6 +251,7 @@
251
251
  "rocksdb/db/wal_edit.cc",
252
252
  "rocksdb/db/wal_manager.cc",
253
253
  "rocksdb/db/wide/wide_column_serialization.cc",
254
+ "rocksdb/db/wide/wide_columns.cc",
254
255
  "rocksdb/db/write_batch.cc",
255
256
  "rocksdb/db/write_batch_base.cc",
256
257
  "rocksdb/db/write_controller.cc",
package/index.js CHANGED
@@ -203,26 +203,20 @@ class RocksLevel extends AbstractLevel {
203
203
  _chainedBatch () {
204
204
  return new ChainedBatch(this, this[kContext], (batch, context, options, callback) => {
205
205
  try {
206
- const seq = this.sequence
207
- let sync = true
208
206
  this[kRef]()
209
- binding.batch_write(this[kContext], context, options, (err) => {
207
+ binding.batch_write(this[kContext], context, options, (err, sequence) => {
210
208
  this[kUnref]()
211
209
 
212
210
  if (!err) {
213
211
  this.emit('update', {
214
212
  rows: batch.toArray(),
215
213
  count: batch.length,
216
- sequence: seq + 1
214
+ sequence: sequence
217
215
  })
218
216
  }
219
- if (sync) {
220
- process.nextTick(callback, err)
221
- } else {
222
- callback(err)
223
- }
217
+
218
+ callback(err)
224
219
  })
225
- sync = false
226
220
  } catch (err) {
227
221
  process.nextTick(callback, err)
228
222
  }
@@ -233,8 +227,21 @@ class RocksLevel extends AbstractLevel {
233
227
  callback = fromCallback(callback, kPromise)
234
228
 
235
229
  try {
236
- binding.batch_do(this[kContext], operations, options ?? EMPTY)
237
- process.nextTick(callback, null)
230
+ this[kRef]()
231
+ binding.batch_do(this[kContext], operations, options ?? EMPTY, (err, sequence) => {
232
+ this[kUnref]()
233
+
234
+ // TODO (fix)
235
+ // if (!err) {
236
+ // this.emit('update', {
237
+ // rows: batch.toArray(),
238
+ // count: batch.length,
239
+ // sequence: sequence
240
+ // })
241
+ // }
242
+
243
+ callback(err)
244
+ })
238
245
  } catch (err) {
239
246
  process.nextTick(callback, err)
240
247
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "7.1.10",
3
+ "version": "7.1.13",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",
Binary file
Binary file