@nxtedition/rocksdb 7.1.5 → 7.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/binding.cc +32 -14
  2. package/deps/rocksdb/rocksdb/cache/cache.cc +4 -0
  3. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +6 -8
  4. package/deps/rocksdb/rocksdb/cache/cache_key.cc +184 -164
  5. package/deps/rocksdb/rocksdb/cache/cache_key.h +38 -29
  6. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager_test.cc +4 -4
  7. package/deps/rocksdb/rocksdb/cache/clock_cache.cc +4 -2
  8. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.cc +11 -9
  9. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.h +1 -1
  10. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache_test.cc +28 -18
  11. package/deps/rocksdb/rocksdb/cache/lru_cache.cc +86 -17
  12. package/deps/rocksdb/rocksdb/cache/lru_cache.h +48 -8
  13. package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +356 -153
  14. package/deps/rocksdb/rocksdb/db/blob/blob_file_builder.cc +3 -7
  15. package/deps/rocksdb/rocksdb/db/blob/blob_source.cc +4 -5
  16. package/deps/rocksdb/rocksdb/db/blob/blob_source.h +2 -3
  17. package/deps/rocksdb/rocksdb/db/blob/blob_source_test.cc +12 -4
  18. package/deps/rocksdb/rocksdb/db/blob/db_blob_compaction_test.cc +69 -0
  19. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +6 -1
  20. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +4 -1
  21. package/deps/rocksdb/rocksdb/db/db_block_cache_test.cc +222 -182
  22. package/deps/rocksdb/rocksdb/db/db_kv_checksum_test.cc +239 -23
  23. package/deps/rocksdb/rocksdb/db/db_test2.cc +6 -2
  24. package/deps/rocksdb/rocksdb/db/event_helpers.cc +2 -1
  25. package/deps/rocksdb/rocksdb/db/import_column_family_job.cc +6 -0
  26. package/deps/rocksdb/rocksdb/db/import_column_family_job.h +6 -0
  27. package/deps/rocksdb/rocksdb/db/import_column_family_test.cc +6 -0
  28. package/deps/rocksdb/rocksdb/db/kv_checksum.h +8 -4
  29. package/deps/rocksdb/rocksdb/db/memtable.cc +173 -33
  30. package/deps/rocksdb/rocksdb/db/memtable.h +10 -0
  31. package/deps/rocksdb/rocksdb/db/table_cache_sync_and_async.h +2 -1
  32. package/deps/rocksdb/rocksdb/db/version_set.cc +37 -18
  33. package/deps/rocksdb/rocksdb/db/version_set_sync_and_async.h +2 -1
  34. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +1 -0
  35. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +6 -0
  36. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +2 -0
  37. package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +15 -0
  38. package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +31 -6
  39. package/deps/rocksdb/rocksdb/memory/memory_allocator_test.cc +1 -1
  40. package/deps/rocksdb/rocksdb/options/cf_options.cc +4 -0
  41. package/deps/rocksdb/rocksdb/options/cf_options.h +4 -0
  42. package/deps/rocksdb/rocksdb/options/options_helper.cc +2 -0
  43. package/deps/rocksdb/rocksdb/options/options_settable_test.cc +2 -1
  44. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.cc +2 -6
  45. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_factory.cc +1 -0
  46. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +2 -4
  47. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +1 -7
  48. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_sync_and_async.h +2 -1
  49. package/deps/rocksdb/rocksdb/table/unique_id.cc +22 -24
  50. package/deps/rocksdb/rocksdb/table/unique_id_impl.h +2 -1
  51. package/deps/rocksdb/rocksdb/tools/block_cache_analyzer/block_cache_trace_analyzer_plot.py +7 -0
  52. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +27 -3
  53. package/deps/rocksdb/rocksdb/util/async_file_reader.cc +2 -1
  54. package/deps/rocksdb/rocksdb/util/async_file_reader.h +3 -3
  55. package/deps/rocksdb/rocksdb/util/coro_utils.h +2 -1
  56. package/deps/rocksdb/rocksdb/util/hash_test.cc +67 -0
  57. package/deps/rocksdb/rocksdb/util/math.h +41 -0
  58. package/deps/rocksdb/rocksdb/util/math128.h +6 -0
  59. package/deps/rocksdb/rocksdb/util/single_thread_executor.h +2 -1
  60. package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.cc +3 -6
  61. package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager_test.h +5 -0
  62. package/deps/rocksdb/rocksdb/utilities/transactions/lock/range/range_lock_manager.h +6 -0
  63. package/index.js +15 -6
  64. package/package.json +1 -1
  65. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  66. package/prebuilds/darwin-x64/node.napi.node +0 -0
  67. package/prebuilds/linux-x64/node.napi.node +0 -0
@@ -9,6 +9,7 @@
9
9
 
10
10
  #include "rocksdb/rocksdb_namespace.h"
11
11
  #include "rocksdb/slice.h"
12
+ #include "table/unique_id_impl.h"
12
13
 
13
14
  namespace ROCKSDB_NAMESPACE {
14
15
 
@@ -33,10 +34,10 @@ class CacheKey {
33
34
  public:
34
35
  // For convenience, constructs an "empty" cache key that is never returned
35
36
  // by other means.
36
- inline CacheKey() : session_etc64_(), offset_etc64_() {}
37
+ inline CacheKey() : file_num_etc64_(), offset_etc64_() {}
37
38
 
38
39
  inline bool IsEmpty() const {
39
- return (session_etc64_ == 0) & (offset_etc64_ == 0);
40
+ return (file_num_etc64_ == 0) & (offset_etc64_ == 0);
40
41
  }
41
42
 
42
43
  // Use this cache key as a Slice (byte order is endianness-dependent)
@@ -59,9 +60,9 @@ class CacheKey {
59
60
 
60
61
  protected:
61
62
  friend class OffsetableCacheKey;
62
- CacheKey(uint64_t session_etc64, uint64_t offset_etc64)
63
- : session_etc64_(session_etc64), offset_etc64_(offset_etc64) {}
64
- uint64_t session_etc64_;
63
+ CacheKey(uint64_t file_num_etc64, uint64_t offset_etc64)
64
+ : file_num_etc64_(file_num_etc64), offset_etc64_(offset_etc64) {}
65
+ uint64_t file_num_etc64_;
65
66
  uint64_t offset_etc64_;
66
67
  };
67
68
 
@@ -85,50 +86,58 @@ class OffsetableCacheKey : private CacheKey {
85
86
  inline OffsetableCacheKey() : CacheKey() {}
86
87
 
87
88
  // Constructs an OffsetableCacheKey with the given information about a file.
88
- // max_offset is based on file size (see WithOffset) and is required here to
89
- // choose an appropriate (sub-)encoding. This constructor never generates an
90
- // "empty" base key.
89
+ // This constructor never generates an "empty" base key.
91
90
  OffsetableCacheKey(const std::string &db_id, const std::string &db_session_id,
92
- uint64_t file_number, uint64_t max_offset);
91
+ uint64_t file_number);
92
+
93
+ // Creates an OffsetableCacheKey from an SST unique ID, so that cache keys
94
+ // can be derived from DB manifest data before reading the file from
95
+ // storage--so that every part of the file can potentially go in a persistent
96
+ // cache.
97
+ //
98
+ // Calling GetSstInternalUniqueId() on a db_id, db_session_id, and
99
+ // file_number and passing the result to this function produces the same
100
+ // base cache key as feeding those inputs directly to the constructor.
101
+ //
102
+ // This is a bijective transformation assuming either id is empty or
103
+ // lower 64 bits is non-zero:
104
+ // * Empty (all zeros) input -> empty (all zeros) output
105
+ // * Lower 64 input is non-zero -> lower 64 output (file_num_etc64_) is
106
+ // non-zero
107
+ static OffsetableCacheKey FromInternalUniqueId(UniqueIdPtr id);
108
+
109
+ // This is the inverse transformation to the above, assuming either empty
110
+ // or lower 64 bits (file_num_etc64_) is non-zero. Perhaps only useful for
111
+ // testing.
112
+ UniqueId64x2 ToInternalUniqueId();
93
113
 
94
114
  inline bool IsEmpty() const {
95
- bool result = session_etc64_ == 0;
115
+ bool result = file_num_etc64_ == 0;
96
116
  assert(!(offset_etc64_ > 0 && result));
97
117
  return result;
98
118
  }
99
119
 
100
- // Construct a CacheKey for an offset within a file, which must be
101
- // <= max_offset provided in constructor. An offset is not necessarily a
102
- // byte offset if a smaller unique identifier of keyable offsets is used.
120
+ // Construct a CacheKey for an offset within a file. An offset is not
121
+ // necessarily a byte offset if a smaller unique identifier of keyable
122
+ // offsets is used.
103
123
  //
104
124
  // This class was designed to make this hot code extremely fast.
105
125
  inline CacheKey WithOffset(uint64_t offset) const {
106
126
  assert(!IsEmpty());
107
- assert(offset <= max_offset_);
108
- return CacheKey(session_etc64_, offset_etc64_ ^ offset);
127
+ return CacheKey(file_num_etc64_, offset_etc64_ ^ offset);
109
128
  }
110
129
 
111
- // The "common prefix" is a shared prefix for all the returned CacheKeys,
112
- // that also happens to usually be the same among many files in the same DB,
113
- // so is efficient and highly accurate (not perfectly) for DB-specific cache
114
- // dump selection (but not file-specific).
130
+ // The "common prefix" is a shared prefix for all the returned CacheKeys.
131
+ // It is specific to the file but the same for all offsets within the file.
115
132
  static constexpr size_t kCommonPrefixSize = 8;
116
133
  inline Slice CommonPrefixSlice() const {
117
- static_assert(sizeof(session_etc64_) == kCommonPrefixSize,
134
+ static_assert(sizeof(file_num_etc64_) == kCommonPrefixSize,
118
135
  "8 byte common prefix expected");
119
136
  assert(!IsEmpty());
120
- assert(&this->session_etc64_ == static_cast<const void *>(this));
137
+ assert(&this->file_num_etc64_ == static_cast<const void *>(this));
121
138
 
122
139
  return Slice(reinterpret_cast<const char *>(this), kCommonPrefixSize);
123
140
  }
124
-
125
- // For any max_offset <= this value, the same encoding scheme is guaranteed.
126
- static constexpr uint64_t kMaxOffsetStandardEncoding = 0xffffffffffU;
127
-
128
- private:
129
- #ifndef NDEBUG
130
- uint64_t max_offset_ = 0;
131
- #endif
132
141
  };
133
142
 
134
143
  } // namespace ROCKSDB_NAMESPACE
@@ -48,13 +48,13 @@ TEST_F(CacheReservationManagerTest, GenerateCacheKey) {
48
48
  // Next unique Cache key
49
49
  CacheKey ckey = CacheKey::CreateUniqueForCacheLifetime(cache.get());
50
50
  // Get to the underlying values
51
- using PairU64 = std::array<uint64_t, 2>;
52
- auto& ckey_pair = *reinterpret_cast<PairU64*>(&ckey);
51
+ uint64_t* ckey_data = reinterpret_cast<uint64_t*>(&ckey);
53
52
  // Back it up to the one used by CRM (using CacheKey implementation details)
54
- ckey_pair[1]--;
53
+ ckey_data[1]--;
55
54
 
56
55
  // Specific key (subject to implementation details)
57
- EXPECT_EQ(ckey_pair, PairU64({0, 2}));
56
+ EXPECT_EQ(ckey_data[0], 0);
57
+ EXPECT_EQ(ckey_data[1], 2);
58
58
 
59
59
  Cache::Handle* handle = cache->Lookup(ckey.AsSlice());
60
60
  EXPECT_NE(handle, nullptr)
@@ -697,8 +697,10 @@ void ClockCache::DisownData() {
697
697
  std::shared_ptr<Cache> NewClockCache(
698
698
  size_t capacity, int num_shard_bits, bool strict_capacity_limit,
699
699
  CacheMetadataChargePolicy metadata_charge_policy) {
700
- return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit, 0.5,
701
- nullptr, kDefaultToAdaptiveMutex, metadata_charge_policy);
700
+ return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
701
+ /* high_pri_pool_ratio */ 0.5, nullptr,
702
+ kDefaultToAdaptiveMutex, metadata_charge_policy,
703
+ /* low_pri_pool_ratio */ 0.0);
702
704
  }
703
705
 
704
706
  std::shared_ptr<Cache> ExperimentalNewClockCache(
@@ -17,17 +17,18 @@ namespace ROCKSDB_NAMESPACE {
17
17
 
18
18
  CompressedSecondaryCache::CompressedSecondaryCache(
19
19
  size_t capacity, int num_shard_bits, bool strict_capacity_limit,
20
- double high_pri_pool_ratio,
20
+ double high_pri_pool_ratio, double low_pri_pool_ratio,
21
21
  std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
22
22
  CacheMetadataChargePolicy metadata_charge_policy,
23
23
  CompressionType compression_type, uint32_t compress_format_version)
24
24
  : cache_options_(capacity, num_shard_bits, strict_capacity_limit,
25
25
  high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
26
26
  metadata_charge_policy, compression_type,
27
- compress_format_version) {
28
- cache_ = NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
29
- high_pri_pool_ratio, memory_allocator,
30
- use_adaptive_mutex, metadata_charge_policy);
27
+ compress_format_version, low_pri_pool_ratio) {
28
+ cache_ =
29
+ NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
30
+ high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
31
+ metadata_charge_policy, low_pri_pool_ratio);
31
32
  }
32
33
 
33
34
  CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
@@ -225,11 +226,12 @@ std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
225
226
  double high_pri_pool_ratio,
226
227
  std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
227
228
  CacheMetadataChargePolicy metadata_charge_policy,
228
- CompressionType compression_type, uint32_t compress_format_version) {
229
+ CompressionType compression_type, uint32_t compress_format_version,
230
+ double low_pri_pool_ratio) {
229
231
  return std::make_shared<CompressedSecondaryCache>(
230
232
  capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
231
- memory_allocator, use_adaptive_mutex, metadata_charge_policy,
232
- compression_type, compress_format_version);
233
+ low_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
234
+ metadata_charge_policy, compression_type, compress_format_version);
233
235
  }
234
236
 
235
237
  std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
@@ -240,7 +242,7 @@ std::shared_ptr<SecondaryCache> NewCompressedSecondaryCache(
240
242
  opts.capacity, opts.num_shard_bits, opts.strict_capacity_limit,
241
243
  opts.high_pri_pool_ratio, opts.memory_allocator, opts.use_adaptive_mutex,
242
244
  opts.metadata_charge_policy, opts.compression_type,
243
- opts.compress_format_version);
245
+ opts.compress_format_version, opts.low_pri_pool_ratio);
244
246
  }
245
247
 
246
248
  } // namespace ROCKSDB_NAMESPACE
@@ -56,7 +56,7 @@ class CompressedSecondaryCache : public SecondaryCache {
56
56
  public:
57
57
  CompressedSecondaryCache(
58
58
  size_t capacity, int num_shard_bits, bool strict_capacity_limit,
59
- double high_pri_pool_ratio,
59
+ double high_pri_pool_ratio, double low_pri_pool_ratio,
60
60
  std::shared_ptr<MemoryAllocator> memory_allocator = nullptr,
61
61
  bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
62
62
  CacheMetadataChargePolicy metadata_charge_policy =
@@ -240,9 +240,11 @@ class CompressedSecondaryCacheTest : public testing::Test {
240
240
  secondary_cache_opts.num_shard_bits = 0;
241
241
  std::shared_ptr<SecondaryCache> secondary_cache =
242
242
  NewCompressedSecondaryCache(secondary_cache_opts);
243
- LRUCacheOptions lru_cache_opts(1300, 0, /*_strict_capacity_limit=*/false,
244
- 0.5, nullptr, kDefaultToAdaptiveMutex,
245
- kDefaultCacheMetadataChargePolicy);
243
+ LRUCacheOptions lru_cache_opts(
244
+ 1300 /* capacity */, 0 /* num_shard_bits */,
245
+ false /* strict_capacity_limit */, 0.5 /* high_pri_pool_ratio */,
246
+ nullptr /* memory_allocator */, kDefaultToAdaptiveMutex,
247
+ kDefaultCacheMetadataChargePolicy);
246
248
  lru_cache_opts.secondary_cache = secondary_cache;
247
249
  std::shared_ptr<Cache> cache = NewLRUCache(lru_cache_opts);
248
250
  std::shared_ptr<Statistics> stats = CreateDBStatistics();
@@ -324,9 +326,11 @@ class CompressedSecondaryCacheTest : public testing::Test {
324
326
  std::shared_ptr<SecondaryCache> secondary_cache =
325
327
  NewCompressedSecondaryCache(secondary_cache_opts);
326
328
 
327
- LRUCacheOptions opts(1024, 0, /*_strict_capacity_limit=*/false, 0.5,
328
- nullptr, kDefaultToAdaptiveMutex,
329
- kDefaultCacheMetadataChargePolicy);
329
+ LRUCacheOptions opts(
330
+ 1024 /* capacity */, 0 /* num_shard_bits */,
331
+ false /* strict_capacity_limit */, 0.5 /* high_pri_pool_ratio */,
332
+ nullptr /* memory_allocator */, kDefaultToAdaptiveMutex,
333
+ kDefaultCacheMetadataChargePolicy);
330
334
  opts.secondary_cache = secondary_cache;
331
335
  std::shared_ptr<Cache> cache = NewLRUCache(opts);
332
336
 
@@ -371,9 +375,11 @@ class CompressedSecondaryCacheTest : public testing::Test {
371
375
  std::shared_ptr<SecondaryCache> secondary_cache =
372
376
  NewCompressedSecondaryCache(secondary_cache_opts);
373
377
 
374
- LRUCacheOptions opts(1200, 0, /*_strict_capacity_limit=*/false, 0.5,
375
- nullptr, kDefaultToAdaptiveMutex,
376
- kDefaultCacheMetadataChargePolicy);
378
+ LRUCacheOptions opts(
379
+ 1200 /* capacity */, 0 /* num_shard_bits */,
380
+ false /* strict_capacity_limit */, 0.5 /* high_pri_pool_ratio */,
381
+ nullptr /* memory_allocator */, kDefaultToAdaptiveMutex,
382
+ kDefaultCacheMetadataChargePolicy);
377
383
  opts.secondary_cache = secondary_cache;
378
384
  std::shared_ptr<Cache> cache = NewLRUCache(opts);
379
385
 
@@ -430,9 +436,11 @@ class CompressedSecondaryCacheTest : public testing::Test {
430
436
  std::shared_ptr<SecondaryCache> secondary_cache =
431
437
  NewCompressedSecondaryCache(secondary_cache_opts);
432
438
 
433
- LRUCacheOptions opts(1200, 0, /*_strict_capacity_limit=*/false, 0.5,
434
- nullptr, kDefaultToAdaptiveMutex,
435
- kDefaultCacheMetadataChargePolicy);
439
+ LRUCacheOptions opts(
440
+ 1200 /* capacity */, 0 /* num_shard_bits */,
441
+ false /* strict_capacity_limit */, 0.5 /* high_pri_pool_ratio */,
442
+ nullptr /* memory_allocator */, kDefaultToAdaptiveMutex,
443
+ kDefaultCacheMetadataChargePolicy);
436
444
  opts.secondary_cache = secondary_cache;
437
445
  std::shared_ptr<Cache> cache = NewLRUCache(opts);
438
446
 
@@ -488,9 +496,11 @@ class CompressedSecondaryCacheTest : public testing::Test {
488
496
  std::shared_ptr<SecondaryCache> secondary_cache =
489
497
  NewCompressedSecondaryCache(secondary_cache_opts);
490
498
 
491
- LRUCacheOptions opts(1200, 0, /*_strict_capacity_limit=*/true, 0.5, nullptr,
492
- kDefaultToAdaptiveMutex,
493
- kDefaultCacheMetadataChargePolicy);
499
+ LRUCacheOptions opts(
500
+ 1200 /* capacity */, 0 /* num_shard_bits */,
501
+ true /* strict_capacity_limit */, 0.5 /* high_pri_pool_ratio */,
502
+ nullptr /* memory_allocator */, kDefaultToAdaptiveMutex,
503
+ kDefaultCacheMetadataChargePolicy);
494
504
  opts.secondary_cache = secondary_cache;
495
505
  std::shared_ptr<Cache> cache = NewLRUCache(opts);
496
506
 
@@ -548,7 +558,7 @@ class CompressedSecondaryCacheTest : public testing::Test {
548
558
 
549
559
  using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
550
560
  std::unique_ptr<CompressedSecondaryCache> sec_cache =
551
- std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5,
561
+ std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5, 0.0,
552
562
  allocator);
553
563
  Random rnd(301);
554
564
  // 10000 = 8169 + 1769 + 62 , so there should be 3 chunks after split.
@@ -600,7 +610,7 @@ class CompressedSecondaryCacheTest : public testing::Test {
600
610
  std::string str = str1 + str2 + str3;
601
611
 
602
612
  std::unique_ptr<CompressedSecondaryCache> sec_cache =
603
- std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5);
613
+ std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5, 0.0);
604
614
  size_t charge{0};
605
615
  CacheAllocationPtr value =
606
616
  sec_cache->MergeChunksIntoValue(chunks_head, charge);
@@ -626,7 +636,7 @@ class CompressedSecondaryCacheTest : public testing::Test {
626
636
 
627
637
  using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
628
638
  std::unique_ptr<CompressedSecondaryCache> sec_cache =
629
- std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5,
639
+ std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5, 0.0,
630
640
  allocator);
631
641
  Random rnd(301);
632
642
  // 10000 = 8169 + 1769 + 62 , so there should be 3 chunks after split.
@@ -111,14 +111,17 @@ void LRUHandleTable::Resize() {
111
111
 
112
112
  LRUCacheShard::LRUCacheShard(
113
113
  size_t capacity, bool strict_capacity_limit, double high_pri_pool_ratio,
114
- bool use_adaptive_mutex, CacheMetadataChargePolicy metadata_charge_policy,
115
- int max_upper_hash_bits,
114
+ double low_pri_pool_ratio, bool use_adaptive_mutex,
115
+ CacheMetadataChargePolicy metadata_charge_policy, int max_upper_hash_bits,
116
116
  const std::shared_ptr<SecondaryCache>& secondary_cache)
117
117
  : capacity_(0),
118
118
  high_pri_pool_usage_(0),
119
+ low_pri_pool_usage_(0),
119
120
  strict_capacity_limit_(strict_capacity_limit),
120
121
  high_pri_pool_ratio_(high_pri_pool_ratio),
121
122
  high_pri_pool_capacity_(0),
123
+ low_pri_pool_ratio_(low_pri_pool_ratio),
124
+ low_pri_pool_capacity_(0),
122
125
  table_(max_upper_hash_bits),
123
126
  usage_(0),
124
127
  lru_usage_(0),
@@ -129,6 +132,7 @@ LRUCacheShard::LRUCacheShard(
129
132
  lru_.next = &lru_;
130
133
  lru_.prev = &lru_;
131
134
  lru_low_pri_ = &lru_;
135
+ lru_bottom_pri_ = &lru_;
132
136
  SetCapacity(capacity);
133
137
  }
134
138
 
@@ -192,10 +196,12 @@ void LRUCacheShard::ApplyToSomeEntries(
192
196
  index_begin, index_end);
193
197
  }
194
198
 
195
- void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
199
+ void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri,
200
+ LRUHandle** lru_bottom_pri) {
196
201
  DMutexLock l(mutex_);
197
202
  *lru = &lru_;
198
203
  *lru_low_pri = lru_low_pri_;
204
+ *lru_bottom_pri = lru_bottom_pri_;
199
205
  }
200
206
 
201
207
  size_t LRUCacheShard::TEST_GetLRUSize() {
@@ -214,20 +220,32 @@ double LRUCacheShard::GetHighPriPoolRatio() {
214
220
  return high_pri_pool_ratio_;
215
221
  }
216
222
 
223
+ double LRUCacheShard::GetLowPriPoolRatio() {
224
+ DMutexLock l(mutex_);
225
+ return low_pri_pool_ratio_;
226
+ }
227
+
217
228
  void LRUCacheShard::LRU_Remove(LRUHandle* e) {
218
229
  assert(e->next != nullptr);
219
230
  assert(e->prev != nullptr);
220
231
  if (lru_low_pri_ == e) {
221
232
  lru_low_pri_ = e->prev;
222
233
  }
234
+ if (lru_bottom_pri_ == e) {
235
+ lru_bottom_pri_ = e->prev;
236
+ }
223
237
  e->next->prev = e->prev;
224
238
  e->prev->next = e->next;
225
239
  e->prev = e->next = nullptr;
226
240
  assert(lru_usage_ >= e->total_charge);
227
241
  lru_usage_ -= e->total_charge;
242
+ assert(!e->InHighPriPool() || !e->InLowPriPool());
228
243
  if (e->InHighPriPool()) {
229
244
  assert(high_pri_pool_usage_ >= e->total_charge);
230
245
  high_pri_pool_usage_ -= e->total_charge;
246
+ } else if (e->InLowPriPool()) {
247
+ assert(low_pri_pool_usage_ >= e->total_charge);
248
+ low_pri_pool_usage_ -= e->total_charge;
231
249
  }
232
250
  }
233
251
 
@@ -241,17 +259,34 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) {
241
259
  e->prev->next = e;
242
260
  e->next->prev = e;
243
261
  e->SetInHighPriPool(true);
262
+ e->SetInLowPriPool(false);
244
263
  high_pri_pool_usage_ += e->total_charge;
245
264
  MaintainPoolSize();
246
- } else {
247
- // Insert "e" to the head of low-pri pool. Note that when
248
- // high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
265
+ } else if (low_pri_pool_ratio_ > 0 &&
266
+ (e->IsHighPri() || e->IsLowPri() || e->HasHit())) {
267
+ // Insert "e" to the head of low-pri pool.
249
268
  e->next = lru_low_pri_->next;
250
269
  e->prev = lru_low_pri_;
251
270
  e->prev->next = e;
252
271
  e->next->prev = e;
253
272
  e->SetInHighPriPool(false);
273
+ e->SetInLowPriPool(true);
274
+ low_pri_pool_usage_ += e->total_charge;
275
+ MaintainPoolSize();
254
276
  lru_low_pri_ = e;
277
+ } else {
278
+ // Insert "e" to the head of bottom-pri pool.
279
+ e->next = lru_bottom_pri_->next;
280
+ e->prev = lru_bottom_pri_;
281
+ e->prev->next = e;
282
+ e->next->prev = e;
283
+ e->SetInHighPriPool(false);
284
+ e->SetInLowPriPool(false);
285
+ // if the low-pri pool is empty, lru_low_pri_ also needs to be updated.
286
+ if (lru_bottom_pri_ == lru_low_pri_) {
287
+ lru_low_pri_ = e;
288
+ }
289
+ lru_bottom_pri_ = e;
255
290
  }
256
291
  lru_usage_ += e->total_charge;
257
292
  }
@@ -262,8 +297,20 @@ void LRUCacheShard::MaintainPoolSize() {
262
297
  lru_low_pri_ = lru_low_pri_->next;
263
298
  assert(lru_low_pri_ != &lru_);
264
299
  lru_low_pri_->SetInHighPriPool(false);
300
+ lru_low_pri_->SetInLowPriPool(true);
265
301
  assert(high_pri_pool_usage_ >= lru_low_pri_->total_charge);
266
302
  high_pri_pool_usage_ -= lru_low_pri_->total_charge;
303
+ low_pri_pool_usage_ += lru_low_pri_->total_charge;
304
+ }
305
+
306
+ while (low_pri_pool_usage_ > low_pri_pool_capacity_) {
307
+ // Overflow last entry in low-pri pool to bottom-pri pool.
308
+ lru_bottom_pri_ = lru_bottom_pri_->next;
309
+ assert(lru_bottom_pri_ != &lru_);
310
+ lru_bottom_pri_->SetInHighPriPool(false);
311
+ lru_bottom_pri_->SetInLowPriPool(false);
312
+ assert(low_pri_pool_usage_ >= lru_bottom_pri_->total_charge);
313
+ low_pri_pool_usage_ -= lru_bottom_pri_->total_charge;
267
314
  }
268
315
  }
269
316
 
@@ -288,6 +335,7 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
288
335
  DMutexLock l(mutex_);
289
336
  capacity_ = capacity;
290
337
  high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
338
+ low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
291
339
  EvictFromLRU(0, &last_reference_list);
292
340
  }
293
341
 
@@ -503,6 +551,13 @@ void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
503
551
  MaintainPoolSize();
504
552
  }
505
553
 
554
+ void LRUCacheShard::SetLowPriorityPoolRatio(double low_pri_pool_ratio) {
555
+ DMutexLock l(mutex_);
556
+ low_pri_pool_ratio_ = low_pri_pool_ratio;
557
+ low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
558
+ MaintainPoolSize();
559
+ }
560
+
506
561
  bool LRUCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
507
562
  if (handle == nullptr) {
508
563
  return false;
@@ -634,12 +689,15 @@ std::string LRUCacheShard::GetPrintableOptions() const {
634
689
  DMutexLock l(mutex_);
635
690
  snprintf(buffer, kBufferSize, " high_pri_pool_ratio: %.3lf\n",
636
691
  high_pri_pool_ratio_);
692
+ snprintf(buffer + strlen(buffer), kBufferSize - strlen(buffer),
693
+ " low_pri_pool_ratio: %.3lf\n", low_pri_pool_ratio_);
637
694
  }
638
695
  return std::string(buffer);
639
696
  }
640
697
 
641
698
  LRUCache::LRUCache(size_t capacity, int num_shard_bits,
642
699
  bool strict_capacity_limit, double high_pri_pool_ratio,
700
+ double low_pri_pool_ratio,
643
701
  std::shared_ptr<MemoryAllocator> allocator,
644
702
  bool use_adaptive_mutex,
645
703
  CacheMetadataChargePolicy metadata_charge_policy,
@@ -653,7 +711,7 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
653
711
  for (int i = 0; i < num_shards_; i++) {
654
712
  new (&shards_[i]) LRUCacheShard(
655
713
  per_shard, strict_capacity_limit, high_pri_pool_ratio,
656
- use_adaptive_mutex, metadata_charge_policy,
714
+ low_pri_pool_ratio, use_adaptive_mutex, metadata_charge_policy,
657
715
  /* max_upper_hash_bits */ 32 - num_shard_bits, secondary_cache);
658
716
  }
659
717
  secondary_cache_ = secondary_cache;
@@ -775,7 +833,8 @@ std::shared_ptr<Cache> NewLRUCache(
775
833
  double high_pri_pool_ratio,
776
834
  std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
777
835
  CacheMetadataChargePolicy metadata_charge_policy,
778
- const std::shared_ptr<SecondaryCache>& secondary_cache) {
836
+ const std::shared_ptr<SecondaryCache>& secondary_cache,
837
+ double low_pri_pool_ratio) {
779
838
  if (num_shard_bits >= 20) {
780
839
  return nullptr; // The cache cannot be sharded into too many fine pieces.
781
840
  }
@@ -783,30 +842,40 @@ std::shared_ptr<Cache> NewLRUCache(
783
842
  // Invalid high_pri_pool_ratio
784
843
  return nullptr;
785
844
  }
845
+ if (low_pri_pool_ratio < 0.0 || low_pri_pool_ratio > 1.0) {
846
+ // Invalid high_pri_pool_ratio
847
+ return nullptr;
848
+ }
849
+ if (low_pri_pool_ratio + high_pri_pool_ratio > 1.0) {
850
+ // Invalid high_pri_pool_ratio and low_pri_pool_ratio combination
851
+ return nullptr;
852
+ }
786
853
  if (num_shard_bits < 0) {
787
854
  num_shard_bits = GetDefaultCacheShardBits(capacity);
788
855
  }
789
856
  return std::make_shared<LRUCache>(
790
857
  capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
791
- std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy,
792
- secondary_cache);
858
+ low_pri_pool_ratio, std::move(memory_allocator), use_adaptive_mutex,
859
+ metadata_charge_policy, secondary_cache);
793
860
  }
794
861
 
795
862
  std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
796
- return NewLRUCache(
797
- cache_opts.capacity, cache_opts.num_shard_bits,
798
- cache_opts.strict_capacity_limit, cache_opts.high_pri_pool_ratio,
799
- cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
800
- cache_opts.metadata_charge_policy, cache_opts.secondary_cache);
863
+ return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
864
+ cache_opts.strict_capacity_limit,
865
+ cache_opts.high_pri_pool_ratio,
866
+ cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
867
+ cache_opts.metadata_charge_policy,
868
+ cache_opts.secondary_cache, cache_opts.low_pri_pool_ratio);
801
869
  }
802
870
 
803
871
  std::shared_ptr<Cache> NewLRUCache(
804
872
  size_t capacity, int num_shard_bits, bool strict_capacity_limit,
805
873
  double high_pri_pool_ratio,
806
874
  std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
807
- CacheMetadataChargePolicy metadata_charge_policy) {
875
+ CacheMetadataChargePolicy metadata_charge_policy,
876
+ double low_pri_pool_ratio) {
808
877
  return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
809
878
  high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
810
- metadata_charge_policy, nullptr);
879
+ metadata_charge_policy, nullptr, low_pri_pool_ratio);
811
880
  }
812
881
  } // namespace ROCKSDB_NAMESPACE