@nxtedition/rocksdb 7.0.0 → 7.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/binding.cc +38 -40
  2. package/deps/rocksdb/rocksdb/CMakeLists.txt +1 -1
  3. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +3 -1
  4. package/deps/rocksdb/rocksdb/cache/cache_entry_roles.cc +2 -0
  5. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager.cc +1 -0
  6. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager.h +28 -0
  7. package/deps/rocksdb/rocksdb/cache/cache_test.cc +5 -2
  8. package/deps/rocksdb/rocksdb/cache/fast_lru_cache.cc +48 -60
  9. package/deps/rocksdb/rocksdb/cache/fast_lru_cache.h +18 -20
  10. package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +2 -2
  11. package/deps/rocksdb/rocksdb/db/c.cc +5 -0
  12. package/deps/rocksdb/rocksdb/db/column_family.cc +20 -0
  13. package/deps/rocksdb/rocksdb/db/column_family.h +9 -0
  14. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +44 -26
  15. package/deps/rocksdb/rocksdb/db/comparator_db_test.cc +32 -14
  16. package/deps/rocksdb/rocksdb/db/db_basic_test.cc +73 -44
  17. package/deps/rocksdb/rocksdb/db/db_block_cache_test.cc +3 -1
  18. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +6 -1
  19. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +10 -5
  20. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_files.cc +47 -35
  21. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +2 -1
  22. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_write.cc +54 -32
  23. package/deps/rocksdb/rocksdb/db/db_kv_checksum_test.cc +426 -61
  24. package/deps/rocksdb/rocksdb/db/db_options_test.cc +1 -0
  25. package/deps/rocksdb/rocksdb/db/db_test.cc +102 -24
  26. package/deps/rocksdb/rocksdb/db/db_test2.cc +159 -30
  27. package/deps/rocksdb/rocksdb/db/db_test_util.cc +1 -0
  28. package/deps/rocksdb/rocksdb/db/dbformat.h +1 -1
  29. package/deps/rocksdb/rocksdb/db/version_builder.cc +39 -10
  30. package/deps/rocksdb/rocksdb/db/version_builder.h +4 -1
  31. package/deps/rocksdb/rocksdb/db/version_edit.h +20 -0
  32. package/deps/rocksdb/rocksdb/db/version_set.cc +2 -1
  33. package/deps/rocksdb/rocksdb/db/version_set.h +17 -2
  34. package/deps/rocksdb/rocksdb/db/version_set_test.cc +119 -0
  35. package/deps/rocksdb/rocksdb/db/write_batch.cc +96 -0
  36. package/deps/rocksdb/rocksdb/db/write_batch_internal.h +4 -0
  37. package/deps/rocksdb/rocksdb/db/write_thread.cc +1 -0
  38. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +3 -0
  39. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +9 -0
  40. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +18 -2
  41. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.h +4 -0
  42. package/deps/rocksdb/rocksdb/db_stress_tool/multi_ops_txns_stress.cc +12 -0
  43. package/deps/rocksdb/rocksdb/db_stress_tool/no_batched_ops_stress.cc +1 -1
  44. package/deps/rocksdb/rocksdb/env/fs_posix.cc +96 -6
  45. package/deps/rocksdb/rocksdb/env/io_posix.cc +51 -18
  46. package/deps/rocksdb/rocksdb/env/io_posix.h +2 -0
  47. package/deps/rocksdb/rocksdb/file/file_prefetch_buffer.cc +12 -5
  48. package/deps/rocksdb/rocksdb/file/file_prefetch_buffer.h +22 -6
  49. package/deps/rocksdb/rocksdb/file/prefetch_test.cc +99 -8
  50. package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +9 -1
  51. package/deps/rocksdb/rocksdb/include/rocksdb/c.h +3 -0
  52. package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +3 -0
  53. package/deps/rocksdb/rocksdb/include/rocksdb/comparator.h +4 -0
  54. package/deps/rocksdb/rocksdb/include/rocksdb/file_system.h +1 -1
  55. package/deps/rocksdb/rocksdb/include/rocksdb/io_status.h +7 -0
  56. package/deps/rocksdb/rocksdb/include/rocksdb/options.h +11 -1
  57. package/deps/rocksdb/rocksdb/include/rocksdb/slice_transform.h +4 -1
  58. package/deps/rocksdb/rocksdb/include/rocksdb/table.h +14 -1
  59. package/deps/rocksdb/rocksdb/include/rocksdb/write_batch.h +6 -0
  60. package/deps/rocksdb/rocksdb/options/cf_options.cc +12 -1
  61. package/deps/rocksdb/rocksdb/options/cf_options.h +2 -0
  62. package/deps/rocksdb/rocksdb/options/options.cc +8 -1
  63. package/deps/rocksdb/rocksdb/options/options_helper.cc +1 -0
  64. package/deps/rocksdb/rocksdb/options/options_parser.cc +2 -1
  65. package/deps/rocksdb/rocksdb/options/options_settable_test.cc +7 -2
  66. package/deps/rocksdb/rocksdb/options/options_test.cc +52 -0
  67. package/deps/rocksdb/rocksdb/port/port_posix.h +10 -1
  68. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_factory.cc +1 -1
  69. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_iterator.cc +1 -1
  70. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +1 -1
  71. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +5 -5
  72. package/deps/rocksdb/rocksdb/table/block_based/block_prefetcher.cc +16 -10
  73. package/deps/rocksdb/rocksdb/table/block_based/block_prefetcher.h +1 -1
  74. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block.cc +1 -1
  75. package/deps/rocksdb/rocksdb/table/block_based/partitioned_index_iterator.cc +4 -4
  76. package/deps/rocksdb/rocksdb/table/block_based/partitioned_index_reader.cc +1 -1
  77. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +39 -12
  78. package/deps/rocksdb/rocksdb/util/comparator.cc +10 -0
  79. package/deps/rocksdb/rocksdb/util/ribbon_alg.h +1 -1
  80. package/deps/rocksdb/rocksdb/util/xxhash.h +2 -1
  81. package/index.js +2 -2
  82. package/package.json +1 -1
  83. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  84. package/prebuilds/linux-x64/node.napi.node +0 -0
@@ -158,7 +158,7 @@ static inline void AsmVolatilePause() {
158
158
  #if defined(__i386__) || defined(__x86_64__)
159
159
  asm volatile("pause");
160
160
  #elif defined(__aarch64__)
161
- asm volatile("yield");
161
+ asm volatile("isb");
162
162
  #elif defined(__powerpc64__)
163
163
  asm volatile("or 27,27,27");
164
164
  #endif
@@ -202,7 +202,16 @@ extern void *cacheline_aligned_alloc(size_t size);
202
202
 
203
203
  extern void cacheline_aligned_free(void *memblock);
204
204
 
205
+ #if defined(__aarch64__)
206
+ // __builtin_prefetch(..., 1) turns into a prefetch into prfm pldl3keep. On
207
+ // arm64 we want this as close to the core as possible to turn it into a
208
+ // L1 prefetech unless locality == 0 in which case it will be turned into a
209
+ // non-temporal prefetch
210
+ #define PREFETCH(addr, rw, locality) \
211
+ __builtin_prefetch(addr, rw, locality >= 1 ? 3 : locality)
212
+ #else
205
213
  #define PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
214
+ #endif
206
215
 
207
216
  extern void Crash(const std::string& srcfile, int srcline);
208
217
 
@@ -695,7 +695,7 @@ Status BlockBasedTableFactory::ValidateOptions(
695
695
  static const std::set<CacheEntryRole> kMemoryChargingSupported = {
696
696
  CacheEntryRole::kCompressionDictionaryBuildingBuffer,
697
697
  CacheEntryRole::kFilterConstruction,
698
- CacheEntryRole::kBlockBasedTableReader};
698
+ CacheEntryRole::kBlockBasedTableReader, CacheEntryRole::kFileMetadata};
699
699
  if (options.charged != CacheEntryRoleOptions::Decision::kFallback &&
700
700
  kMemoryChargingSupported.count(role) == 0) {
701
701
  return Status::NotSupported(
@@ -257,7 +257,7 @@ void BlockBasedTableIterator::InitDataBlock() {
257
257
  // Enabled from the very first IO when ReadOptions.readahead_size is set.
258
258
  block_prefetcher_.PrefetchIfNeeded(
259
259
  rep, data_block_handle, read_options_.readahead_size, is_for_compaction,
260
- read_options_.async_io, read_options_.rate_limiter_priority);
260
+ /*async_io=*/false, read_options_.rate_limiter_priority);
261
261
  Status s;
262
262
  table_->NewDataBlockIterator<DataBlockIter>(
263
263
  read_options_, data_block_handle, &block_iter_, BlockType::kData,
@@ -563,7 +563,7 @@ void BlockBasedTable::SetupBaseCacheKey(const TableProperties* properties,
563
563
  // assert(!db_id.empty());
564
564
 
565
565
  // Minimum block size is 5 bytes; therefore we can trim off two lower bits
566
- // from offets. See GetCacheKey.
566
+ // from offsets. See GetCacheKey.
567
567
  *out_base_cache_key = OffsetableCacheKey(db_id, db_session_id, file_num,
568
568
  /*max_offset*/ file_size >> 2);
569
569
  }
@@ -662,21 +662,21 @@ struct BlockBasedTable::Rep {
662
662
  size_t max_readahead_size,
663
663
  std::unique_ptr<FilePrefetchBuffer>* fpb,
664
664
  bool implicit_auto_readahead,
665
- bool async_io) const {
665
+ uint64_t num_file_reads) const {
666
666
  fpb->reset(new FilePrefetchBuffer(
667
667
  readahead_size, max_readahead_size,
668
668
  !ioptions.allow_mmap_reads /* enable */, false /* track_min_offset */,
669
- implicit_auto_readahead, async_io, ioptions.fs.get(), ioptions.clock,
670
- ioptions.stats));
669
+ implicit_auto_readahead, num_file_reads, ioptions.fs.get(),
670
+ ioptions.clock, ioptions.stats));
671
671
  }
672
672
 
673
673
  void CreateFilePrefetchBufferIfNotExists(
674
674
  size_t readahead_size, size_t max_readahead_size,
675
675
  std::unique_ptr<FilePrefetchBuffer>* fpb, bool implicit_auto_readahead,
676
- bool async_io) const {
676
+ uint64_t num_file_reads) const {
677
677
  if (!(*fpb)) {
678
678
  CreateFilePrefetchBuffer(readahead_size, max_readahead_size, fpb,
679
- implicit_auto_readahead, async_io);
679
+ implicit_auto_readahead, num_file_reads);
680
680
  }
681
681
  }
682
682
 
@@ -16,17 +16,21 @@ void BlockPrefetcher::PrefetchIfNeeded(
16
16
  const BlockBasedTable::Rep* rep, const BlockHandle& handle,
17
17
  const size_t readahead_size, bool is_for_compaction, const bool async_io,
18
18
  const Env::IOPriority rate_limiter_priority) {
19
+ // num_file_reads is used by FilePrefetchBuffer only when
20
+ // implicit_auto_readahead is set.
19
21
  if (is_for_compaction) {
20
22
  rep->CreateFilePrefetchBufferIfNotExists(
21
23
  compaction_readahead_size_, compaction_readahead_size_,
22
- &prefetch_buffer_, false, async_io);
24
+ &prefetch_buffer_, /*implicit_auto_readahead=*/false,
25
+ /*num_file_reads=*/0);
23
26
  return;
24
27
  }
25
28
 
26
29
  // Explicit user requested readahead.
27
30
  if (readahead_size > 0) {
28
31
  rep->CreateFilePrefetchBufferIfNotExists(
29
- readahead_size, readahead_size, &prefetch_buffer_, false, async_io);
32
+ readahead_size, readahead_size, &prefetch_buffer_,
33
+ /*implicit_auto_readahead=*/false, /*num_file_reads=*/0);
30
34
  return;
31
35
  }
32
36
 
@@ -39,11 +43,13 @@ void BlockPrefetcher::PrefetchIfNeeded(
39
43
  return;
40
44
  }
41
45
 
42
- // In case of async_io, it always creates the PrefetchBuffer.
46
+ // In case of async_io, always creates the PrefetchBuffer irrespective of
47
+ // num_file_reads_.
43
48
  if (async_io) {
44
49
  rep->CreateFilePrefetchBufferIfNotExists(
45
50
  initial_auto_readahead_size_, max_auto_readahead_size,
46
- &prefetch_buffer_, /*implicit_auto_readahead=*/true, async_io);
51
+ &prefetch_buffer_, /*implicit_auto_readahead=*/true,
52
+ /*num_file_reads=*/0);
47
53
  return;
48
54
  }
49
55
 
@@ -78,9 +84,9 @@ void BlockPrefetcher::PrefetchIfNeeded(
78
84
  }
79
85
 
80
86
  if (rep->file->use_direct_io()) {
81
- rep->CreateFilePrefetchBufferIfNotExists(initial_auto_readahead_size_,
82
- max_auto_readahead_size,
83
- &prefetch_buffer_, true, async_io);
87
+ rep->CreateFilePrefetchBufferIfNotExists(
88
+ initial_auto_readahead_size_, max_auto_readahead_size,
89
+ &prefetch_buffer_, /*implicit_auto_readahead=*/true, num_file_reads_);
84
90
  return;
85
91
  }
86
92
 
@@ -96,9 +102,9 @@ void BlockPrefetcher::PrefetchIfNeeded(
96
102
  BlockBasedTable::BlockSizeWithTrailer(handle) + readahead_size_,
97
103
  rate_limiter_priority);
98
104
  if (s.IsNotSupported()) {
99
- rep->CreateFilePrefetchBufferIfNotExists(initial_auto_readahead_size_,
100
- max_auto_readahead_size,
101
- &prefetch_buffer_, true, async_io);
105
+ rep->CreateFilePrefetchBufferIfNotExists(
106
+ initial_auto_readahead_size_, max_auto_readahead_size,
107
+ &prefetch_buffer_, /*implicit_auto_readahead=*/true, num_file_reads_);
102
108
  return;
103
109
  }
104
110
 
@@ -63,7 +63,7 @@ class BlockPrefetcher {
63
63
  // initial_auto_readahead_size_ is used if RocksDB uses internal prefetch
64
64
  // buffer.
65
65
  uint64_t initial_auto_readahead_size_;
66
- int64_t num_file_reads_ = 0;
66
+ uint64_t num_file_reads_ = 0;
67
67
  uint64_t prev_offset_ = 0;
68
68
  size_t prev_len_ = 0;
69
69
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer_;
@@ -503,7 +503,7 @@ Status PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
503
503
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
504
504
  rep->CreateFilePrefetchBuffer(0, 0, &prefetch_buffer,
505
505
  false /* Implicit autoreadahead */,
506
- false /*async_io*/);
506
+ 0 /*num_reads_*/);
507
507
 
508
508
  IOOptions opts;
509
509
  s = rep->file->PrepareIOOptions(ro, opts);
@@ -89,10 +89,10 @@ void PartitionedIndexIterator::InitPartitionedIndexBlock() {
89
89
  // Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
90
90
  // Explicit user requested readahead:
91
91
  // Enabled from the very first IO when ReadOptions.readahead_size is set.
92
- block_prefetcher_.PrefetchIfNeeded(
93
- rep, partitioned_index_handle, read_options_.readahead_size,
94
- is_for_compaction, read_options_.async_io,
95
- read_options_.rate_limiter_priority);
92
+ block_prefetcher_.PrefetchIfNeeded(rep, partitioned_index_handle,
93
+ read_options_.readahead_size,
94
+ is_for_compaction, /*async_io=*/false,
95
+ read_options_.rate_limiter_priority);
96
96
  Status s;
97
97
  table_->NewDataBlockIterator<IndexBlockIter>(
98
98
  read_options_, partitioned_index_handle, &block_iter_,
@@ -158,7 +158,7 @@ Status PartitionIndexReader::CacheDependencies(const ReadOptions& ro,
158
158
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
159
159
  rep->CreateFilePrefetchBuffer(0, 0, &prefetch_buffer,
160
160
  false /*Implicit auto readahead*/,
161
- false /*async_io*/);
161
+ 0 /*num_reads_*/);
162
162
  IOOptions opts;
163
163
  {
164
164
  Status s = rep->file->PrepareIOOptions(ro, opts);
@@ -1157,6 +1157,11 @@ DEFINE_bool(charge_table_reader, false,
1157
1157
  "CacheEntryRoleOptions::charged of"
1158
1158
  "CacheEntryRole::kBlockBasedTableReader");
1159
1159
 
1160
+ DEFINE_bool(charge_file_metadata, false,
1161
+ "Setting for "
1162
+ "CacheEntryRoleOptions::charged of"
1163
+ "CacheEntryRole::kFileMetadata");
1164
+
1160
1165
  DEFINE_uint64(backup_rate_limit, 0ull,
1161
1166
  "If non-zero, db_bench will rate limit reads and writes for DB "
1162
1167
  "backup. This "
@@ -1641,12 +1646,20 @@ static const bool FLAGS_readwritepercent_dummy __attribute__((__unused__)) =
1641
1646
  DEFINE_int32(disable_seek_compaction, false,
1642
1647
  "Not used, left here for backwards compatibility");
1643
1648
 
1649
+ DEFINE_bool(allow_data_in_errors,
1650
+ ROCKSDB_NAMESPACE::Options().allow_data_in_errors,
1651
+ "If true, allow logging data, e.g. key, value in LOG files.");
1652
+
1644
1653
  static const bool FLAGS_deletepercent_dummy __attribute__((__unused__)) =
1645
1654
  RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent);
1646
1655
  static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((__unused__)) =
1647
1656
  RegisterFlagValidator(&FLAGS_table_cache_numshardbits,
1648
1657
  &ValidateTableCacheNumshardbits);
1649
1658
 
1659
+ DEFINE_uint32(write_batch_protection_bytes_per_key, 0,
1660
+ "Size of per-key-value checksum in each write batch. Currently "
1661
+ "only value 0 and 8 are supported.");
1662
+
1650
1663
  namespace ROCKSDB_NAMESPACE {
1651
1664
  namespace {
1652
1665
  static Status CreateMemTableRepFactory(
@@ -2375,13 +2388,14 @@ class CombinedStats {
2375
2388
 
2376
2389
  if (throughput_mbs_.size() == throughput_ops_.size()) {
2377
2390
  fprintf(stdout,
2378
- "%s [AVG %d runs] : %d (± %d) ops/sec; %6.1f (± %.1f) MB/sec\n",
2391
+ "%s [AVG %d runs] : %d (\xC2\xB1 %d) ops/sec; %6.1f (\xC2\xB1 "
2392
+ "%.1f) MB/sec\n",
2379
2393
  name, num_runs, static_cast<int>(CalcAvg(throughput_ops_)),
2380
2394
  static_cast<int>(CalcConfidence95(throughput_ops_)),
2381
2395
  CalcAvg(throughput_mbs_), CalcConfidence95(throughput_mbs_));
2382
2396
  } else {
2383
- fprintf(stdout, "%s [AVG %d runs] : %d (± %d) ops/sec\n", name, num_runs,
2384
- static_cast<int>(CalcAvg(throughput_ops_)),
2397
+ fprintf(stdout, "%s [AVG %d runs] : %d (\xC2\xB1 %d) ops/sec\n", name,
2398
+ num_runs, static_cast<int>(CalcAvg(throughput_ops_)),
2385
2399
  static_cast<int>(CalcConfidence95(throughput_ops_)));
2386
2400
  }
2387
2401
  }
@@ -2422,8 +2436,10 @@ class CombinedStats {
2422
2436
  int num_runs = static_cast<int>(throughput_ops_.size());
2423
2437
 
2424
2438
  if (throughput_mbs_.size() == throughput_ops_.size()) {
2439
+ // \xC2\xB1 is +/- character in UTF-8
2425
2440
  fprintf(stdout,
2426
- "%s [AVG %d runs] : %d (± %d) ops/sec; %6.1f (± %.1f) MB/sec\n"
2441
+ "%s [AVG %d runs] : %d (\xC2\xB1 %d) ops/sec; %6.1f (\xC2\xB1 "
2442
+ "%.1f) MB/sec\n"
2427
2443
  "%s [MEDIAN %d runs] : %d ops/sec; %6.1f MB/sec\n",
2428
2444
  name, num_runs, static_cast<int>(CalcAvg(throughput_ops_)),
2429
2445
  static_cast<int>(CalcConfidence95(throughput_ops_)),
@@ -2432,7 +2448,7 @@ class CombinedStats {
2432
2448
  CalcMedian(throughput_mbs_));
2433
2449
  } else {
2434
2450
  fprintf(stdout,
2435
- "%s [AVG %d runs] : %d (± %d) ops/sec\n"
2451
+ "%s [AVG %d runs] : %d (\xC2\xB1 %d) ops/sec\n"
2436
2452
  "%s [MEDIAN %d runs] : %d ops/sec\n",
2437
2453
  name, num_runs, static_cast<int>(CalcAvg(throughput_ops_)),
2438
2454
  static_cast<int>(CalcConfidence95(throughput_ops_)), name,
@@ -2945,8 +2961,10 @@ class Benchmark {
2945
2961
  }
2946
2962
  return cache;
2947
2963
  } else if (FLAGS_cache_type == "fast_lru_cache") {
2948
- return NewFastLRUCache(static_cast<size_t>(capacity),
2949
- FLAGS_cache_numshardbits);
2964
+ return NewFastLRUCache(static_cast<size_t>(capacity), FLAGS_block_size,
2965
+ FLAGS_cache_numshardbits,
2966
+ false /*strict_capacity_limit*/,
2967
+ kDefaultCacheMetadataChargePolicy);
2950
2968
  } else if (FLAGS_cache_type == "lru_cache") {
2951
2969
  LRUCacheOptions opts(
2952
2970
  static_cast<size_t>(capacity), FLAGS_cache_numshardbits,
@@ -4240,6 +4258,11 @@ class Benchmark {
4240
4258
  {/*.charged = */ FLAGS_charge_table_reader
4241
4259
  ? CacheEntryRoleOptions::Decision::kEnabled
4242
4260
  : CacheEntryRoleOptions::Decision::kDisabled}});
4261
+ block_based_options.cache_usage_options.options_overrides.insert(
4262
+ {CacheEntryRole::kFileMetadata,
4263
+ {/*.charged = */ FLAGS_charge_file_metadata
4264
+ ? CacheEntryRoleOptions::Decision::kEnabled
4265
+ : CacheEntryRoleOptions::Decision::kDisabled}});
4243
4266
  block_based_options.block_cache_compressed = compressed_cache_;
4244
4267
  block_based_options.block_size = FLAGS_block_size;
4245
4268
  block_based_options.block_restart_interval = FLAGS_block_restart_interval;
@@ -4438,6 +4461,8 @@ class Benchmark {
4438
4461
  options.comparator = test::BytewiseComparatorWithU64TsWrapper();
4439
4462
  }
4440
4463
 
4464
+ options.allow_data_in_errors = FLAGS_allow_data_in_errors;
4465
+
4441
4466
  // Integrated BlobDB
4442
4467
  options.enable_blob_files = FLAGS_enable_blob_files;
4443
4468
  options.min_blob_size = FLAGS_min_blob_size;
@@ -4465,7 +4490,6 @@ class Benchmark {
4465
4490
  exit(1);
4466
4491
  }
4467
4492
  #endif // ROCKSDB_LITE
4468
-
4469
4493
  }
4470
4494
 
4471
4495
  void InitializeOptionsGeneral(Options* opts) {
@@ -4893,7 +4917,8 @@ class Benchmark {
4893
4917
 
4894
4918
  RandomGenerator gen;
4895
4919
  WriteBatch batch(/*reserved_bytes=*/0, /*max_bytes=*/0,
4896
- /*protection_bytes_per_key=*/0, user_timestamp_size_);
4920
+ FLAGS_write_batch_protection_bytes_per_key,
4921
+ user_timestamp_size_);
4897
4922
  Status s;
4898
4923
  int64_t bytes = 0;
4899
4924
 
@@ -6682,7 +6707,8 @@ class Benchmark {
6682
6707
 
6683
6708
  void DoDelete(ThreadState* thread, bool seq) {
6684
6709
  WriteBatch batch(/*reserved_bytes=*/0, /*max_bytes=*/0,
6685
- /*protection_bytes_per_key=*/0, user_timestamp_size_);
6710
+ FLAGS_write_batch_protection_bytes_per_key,
6711
+ user_timestamp_size_);
6686
6712
  Duration duration(seq ? 0 : FLAGS_duration, deletes_);
6687
6713
  int64_t i = 0;
6688
6714
  std::unique_ptr<const char[]> key_guard;
@@ -6882,7 +6908,8 @@ class Benchmark {
6882
6908
  std::string keys[3];
6883
6909
 
6884
6910
  WriteBatch batch(/*reserved_bytes=*/0, /*max_bytes=*/0,
6885
- /*protection_bytes_per_key=*/0, user_timestamp_size_);
6911
+ FLAGS_write_batch_protection_bytes_per_key,
6912
+ user_timestamp_size_);
6886
6913
  Status s;
6887
6914
  for (int i = 0; i < 3; i++) {
6888
6915
  keys[i] = key.ToString() + suffixes[i];
@@ -6914,7 +6941,7 @@ class Benchmark {
6914
6941
  std::string suffixes[3] = {"1", "2", "0"};
6915
6942
  std::string keys[3];
6916
6943
 
6917
- WriteBatch batch(0, 0, /*protection_bytes_per_key=*/0,
6944
+ WriteBatch batch(0, 0, FLAGS_write_batch_protection_bytes_per_key,
6918
6945
  user_timestamp_size_);
6919
6946
  Status s;
6920
6947
  for (int i = 0; i < 3; i++) {
@@ -209,6 +209,16 @@ class ReverseBytewiseComparatorImpl : public BytewiseComparatorImpl {
209
209
  // Don't do anything for simplicity.
210
210
  }
211
211
 
212
+ bool IsSameLengthImmediateSuccessor(const Slice& s,
213
+ const Slice& t) const override {
214
+ // Always returning false to prevent surfacing design flaws in
215
+ // auto_prefix_mode
216
+ (void)s, (void)t;
217
+ return false;
218
+ // "Correct" implementation:
219
+ // return BytewiseComparatorImpl::IsSameLengthImmediateSuccessor(t, s);
220
+ }
221
+
212
222
  bool CanKeysWithDifferentByteContentsBeEqual() const override {
213
223
  return false;
214
224
  }
@@ -150,7 +150,7 @@ namespace ribbon {
150
150
  // (m/n) than is required with Gaussian elimination.
151
151
  //
152
152
  // Recommended reading:
153
- // "Peeling Close to the Orientability Threshold Spatial Coupling in
153
+ // "Peeling Close to the Orientability Threshold - Spatial Coupling in
154
154
  // Hashing-Based Data Structures" by Stefan Walzer
155
155
  //
156
156
  // ######################################################################
@@ -13,6 +13,7 @@
13
13
  #include "port/lang.h" // for FALLTHROUGH_INTENDED, inserted as appropriate
14
14
  /* END RocksDB customizations */
15
15
 
16
+ // clang-format off
16
17
  /*
17
18
  * xxHash - Extremely Fast Hash algorithm
18
19
  * Header File
@@ -3673,7 +3674,7 @@ XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3673
3674
  int i;
3674
3675
  for (i=0; i < nbRounds; ++i) {
3675
3676
  /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3676
- * this will warn "discards const qualifier". */
3677
+ * this will warn "discards 'const' qualifier". */
3677
3678
  union {
3678
3679
  XXH_ALIGN(64) const __m512i* cp;
3679
3680
  XXH_ALIGN(64) void* p;
package/index.js CHANGED
@@ -128,9 +128,9 @@ class RocksLevel extends AbstractLevel {
128
128
  })
129
129
  }
130
130
 
131
- const context = binding.iterator_init(this[kContext], {
131
+ const context = binding.iterator_init(this[kContext], {
132
132
  highWaterMarkBytes: 1024 * 1024 * 1024, // TODO (fix): Replace with -1.
133
- ...options
133
+ ...options
134
134
  })
135
135
  const resource = {
136
136
  callback: null,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/rocksdb",
3
- "version": "7.0.0",
3
+ "version": "7.0.3",
4
4
  "description": "A low-level Node.js RocksDB binding",
5
5
  "license": "MIT",
6
6
  "main": "index.js",
Binary file