@nxtedition/rocksdb 7.0.25 → 7.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. package/binding.cc +9 -2
  2. package/chained-batch.js +1 -1
  3. package/deps/rocksdb/rocksdb/CMakeLists.txt +3 -0
  4. package/deps/rocksdb/rocksdb/Makefile +3 -0
  5. package/deps/rocksdb/rocksdb/TARGETS +10 -0
  6. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +17 -7
  7. package/deps/rocksdb/rocksdb/cache/cache_entry_roles.cc +2 -0
  8. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager.cc +1 -0
  9. package/deps/rocksdb/rocksdb/cache/charged_cache.cc +117 -0
  10. package/deps/rocksdb/rocksdb/cache/charged_cache.h +121 -0
  11. package/deps/rocksdb/rocksdb/cache/clock_cache.cc +270 -180
  12. package/deps/rocksdb/rocksdb/cache/clock_cache.h +412 -124
  13. package/deps/rocksdb/rocksdb/cache/fast_lru_cache.cc +1 -0
  14. package/deps/rocksdb/rocksdb/cache/lru_cache.cc +1 -1
  15. package/deps/rocksdb/rocksdb/cache/lru_cache.h +2 -2
  16. package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +2 -2
  17. package/deps/rocksdb/rocksdb/cache/sharded_cache.h +1 -1
  18. package/deps/rocksdb/rocksdb/db/blob/blob_file_builder.cc +71 -9
  19. package/deps/rocksdb/rocksdb/db/blob/blob_file_builder.h +11 -2
  20. package/deps/rocksdb/rocksdb/db/blob/blob_file_builder_test.cc +21 -14
  21. package/deps/rocksdb/rocksdb/db/blob/blob_source.cc +68 -7
  22. package/deps/rocksdb/rocksdb/db/blob/blob_source.h +16 -0
  23. package/deps/rocksdb/rocksdb/db/blob/blob_source_test.cc +519 -12
  24. package/deps/rocksdb/rocksdb/db/blob/db_blob_basic_test.cc +120 -0
  25. package/deps/rocksdb/rocksdb/db/builder.cc +15 -5
  26. package/deps/rocksdb/rocksdb/db/builder.h +3 -0
  27. package/deps/rocksdb/rocksdb/db/c.cc +18 -0
  28. package/deps/rocksdb/rocksdb/db/c_test.c +18 -0
  29. package/deps/rocksdb/rocksdb/db/column_family.h +2 -0
  30. package/deps/rocksdb/rocksdb/db/compaction/clipping_iterator.h +3 -2
  31. package/deps/rocksdb/rocksdb/db/compaction/compaction.cc +9 -4
  32. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +15 -10
  33. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.h +36 -34
  34. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +50 -13
  35. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.h +12 -0
  36. package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.cc +8 -1
  37. package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.h +2 -1
  38. package/deps/rocksdb/rocksdb/db/compaction/tiered_compaction_test.cc +13 -17
  39. package/deps/rocksdb/rocksdb/db/db_basic_test.cc +26 -9
  40. package/deps/rocksdb/rocksdb/db/db_compaction_test.cc +0 -11
  41. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +93 -0
  42. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +16 -1
  43. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_compaction_flush.cc +3 -8
  44. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_debug.cc +8 -1
  45. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +17 -5
  46. package/deps/rocksdb/rocksdb/db/db_test.cc +0 -3
  47. package/deps/rocksdb/rocksdb/db/db_test2.cc +39 -12
  48. package/deps/rocksdb/rocksdb/db/db_test_util.cc +9 -0
  49. package/deps/rocksdb/rocksdb/db/db_test_util.h +2 -0
  50. package/deps/rocksdb/rocksdb/db/dbformat.cc +0 -38
  51. package/deps/rocksdb/rocksdb/db/dbformat.h +14 -13
  52. package/deps/rocksdb/rocksdb/db/dbformat_test.cc +5 -2
  53. package/deps/rocksdb/rocksdb/db/event_helpers.cc +13 -1
  54. package/deps/rocksdb/rocksdb/db/external_sst_file_basic_test.cc +0 -10
  55. package/deps/rocksdb/rocksdb/db/flush_job.cc +19 -15
  56. package/deps/rocksdb/rocksdb/db/flush_job.h +7 -0
  57. package/deps/rocksdb/rocksdb/db/flush_job_test.cc +21 -15
  58. package/deps/rocksdb/rocksdb/db/forward_iterator.h +4 -3
  59. package/deps/rocksdb/rocksdb/db/memtable_list.cc +9 -0
  60. package/deps/rocksdb/rocksdb/db/memtable_list.h +5 -0
  61. package/deps/rocksdb/rocksdb/db/periodic_work_scheduler.cc +53 -12
  62. package/deps/rocksdb/rocksdb/db/periodic_work_scheduler.h +14 -2
  63. package/deps/rocksdb/rocksdb/db/periodic_work_scheduler_test.cc +10 -10
  64. package/deps/rocksdb/rocksdb/db/repair.cc +8 -6
  65. package/deps/rocksdb/rocksdb/db/seqno_time_test.cc +890 -0
  66. package/deps/rocksdb/rocksdb/db/seqno_to_time_mapping.cc +324 -0
  67. package/deps/rocksdb/rocksdb/db/seqno_to_time_mapping.h +186 -0
  68. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +2 -0
  69. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +13 -4
  70. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +23 -2
  71. package/deps/rocksdb/rocksdb/env/env_test.cc +74 -1
  72. package/deps/rocksdb/rocksdb/env/io_posix.cc +11 -8
  73. package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +28 -0
  74. package/deps/rocksdb/rocksdb/include/rocksdb/c.h +14 -1
  75. package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +4 -4
  76. package/deps/rocksdb/rocksdb/include/rocksdb/comparator.h +30 -23
  77. package/deps/rocksdb/rocksdb/include/rocksdb/db.h +1 -1
  78. package/deps/rocksdb/rocksdb/include/rocksdb/rate_limiter.h +3 -13
  79. package/deps/rocksdb/rocksdb/include/rocksdb/table_properties.h +5 -0
  80. package/deps/rocksdb/rocksdb/include/rocksdb/utilities/debug.h +1 -2
  81. package/deps/rocksdb/rocksdb/include/rocksdb/utilities/ldb_cmd.h +1 -0
  82. package/deps/rocksdb/rocksdb/include/rocksdb/version.h +1 -1
  83. package/deps/rocksdb/rocksdb/monitoring/stats_history_test.cc +26 -26
  84. package/deps/rocksdb/rocksdb/options/cf_options.cc +14 -1
  85. package/deps/rocksdb/rocksdb/options/cf_options.h +5 -0
  86. package/deps/rocksdb/rocksdb/options/customizable_test.cc +0 -56
  87. package/deps/rocksdb/rocksdb/options/db_options.cc +4 -5
  88. package/deps/rocksdb/rocksdb/options/options.cc +11 -1
  89. package/deps/rocksdb/rocksdb/options/options_helper.cc +8 -0
  90. package/deps/rocksdb/rocksdb/options/options_helper.h +4 -0
  91. package/deps/rocksdb/rocksdb/options/options_settable_test.cc +4 -0
  92. package/deps/rocksdb/rocksdb/options/options_test.cc +4 -0
  93. package/deps/rocksdb/rocksdb/src.mk +3 -0
  94. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.cc +6 -1
  95. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.h +4 -0
  96. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_factory.cc +36 -3
  97. package/deps/rocksdb/rocksdb/table/block_based/index_builder.cc +36 -1
  98. package/deps/rocksdb/rocksdb/table/block_based/index_builder.h +14 -3
  99. package/deps/rocksdb/rocksdb/table/internal_iterator.h +1 -1
  100. package/deps/rocksdb/rocksdb/table/meta_blocks.cc +6 -0
  101. package/deps/rocksdb/rocksdb/table/plain/plain_table_builder.cc +5 -0
  102. package/deps/rocksdb/rocksdb/table/plain/plain_table_builder.h +3 -0
  103. package/deps/rocksdb/rocksdb/table/sst_file_writer.cc +10 -7
  104. package/deps/rocksdb/rocksdb/table/table_builder.h +7 -3
  105. package/deps/rocksdb/rocksdb/table/table_properties.cc +9 -0
  106. package/deps/rocksdb/rocksdb/test_util/mock_time_env.h +3 -2
  107. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +58 -30
  108. package/deps/rocksdb/rocksdb/tools/db_bench_tool_test.cc +1 -0
  109. package/deps/rocksdb/rocksdb/tools/ldb_cmd.cc +20 -0
  110. package/deps/rocksdb/rocksdb/util/rate_limiter.cc +29 -154
  111. package/deps/rocksdb/rocksdb/util/rate_limiter.h +16 -34
  112. package/deps/rocksdb/rocksdb/util/rate_limiter_test.cc +0 -92
  113. package/deps/rocksdb/rocksdb/util/timer.h +6 -0
  114. package/deps/rocksdb/rocksdb/util/vector_iterator.h +4 -3
  115. package/deps/rocksdb/rocksdb/utilities/backup/backup_engine.cc +4 -45
  116. package/deps/rocksdb/rocksdb/utilities/debug.cc +40 -0
  117. package/deps/rocksdb/rocksdb.gyp +2 -0
  118. package/package.json +1 -1
  119. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  120. package/{deps/rocksdb/rocksdb/prebuilds → prebuilds}/linux-x64/node.napi.node +0 -0
@@ -11,6 +11,8 @@
11
11
  #include <memory>
12
12
  #include <string>
13
13
 
14
+ #include "cache/charged_cache.h"
15
+ #include "cache/compressed_secondary_cache.h"
14
16
  #include "db/blob/blob_file_cache.h"
15
17
  #include "db/blob/blob_file_reader.h"
16
18
  #include "db/blob/blob_log_format.h"
@@ -21,6 +23,7 @@
21
23
  #include "options/cf_options.h"
22
24
  #include "rocksdb/options.h"
23
25
  #include "util/compression.h"
26
+ #include "util/random.h"
24
27
 
25
28
  namespace ROCKSDB_NAMESPACE {
26
29
 
@@ -181,9 +184,10 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
181
184
  FileOptions file_options;
182
185
  constexpr HistogramImpl* blob_file_read_hist = nullptr;
183
186
 
184
- std::unique_ptr<BlobFileCache> blob_file_cache(new BlobFileCache(
185
- backing_cache.get(), &immutable_options, &file_options, column_family_id,
186
- blob_file_read_hist, nullptr /*IOTracer*/));
187
+ std::unique_ptr<BlobFileCache> blob_file_cache =
188
+ std::make_unique<BlobFileCache>(
189
+ backing_cache.get(), &immutable_options, &file_options,
190
+ column_family_id, blob_file_read_hist, nullptr /*IOTracer*/);
187
191
 
188
192
  BlobSource blob_source(&immutable_options, db_id_, db_session_id_,
189
193
  blob_file_cache.get());
@@ -479,9 +483,10 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) {
479
483
  auto backing_cache = NewLRUCache(capacity); // Blob file cache
480
484
 
481
485
  FileOptions file_options;
482
- std::unique_ptr<BlobFileCache> blob_file_cache(new BlobFileCache(
483
- backing_cache.get(), &immutable_options, &file_options, column_family_id,
484
- nullptr /*HistogramImpl*/, nullptr /*IOTracer*/));
486
+ std::unique_ptr<BlobFileCache> blob_file_cache =
487
+ std::make_unique<BlobFileCache>(
488
+ backing_cache.get(), &immutable_options, &file_options,
489
+ column_family_id, nullptr /*HistogramImpl*/, nullptr /*IOTracer*/);
485
490
 
486
491
  BlobSource blob_source(&immutable_options, db_id_, db_session_id_,
487
492
  blob_file_cache.get());
@@ -623,9 +628,10 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) {
623
628
  FileOptions file_options;
624
629
  constexpr HistogramImpl* blob_file_read_hist = nullptr;
625
630
 
626
- std::unique_ptr<BlobFileCache> blob_file_cache(new BlobFileCache(
627
- backing_cache.get(), &immutable_options, &file_options, column_family_id,
628
- blob_file_read_hist, nullptr /*IOTracer*/));
631
+ std::unique_ptr<BlobFileCache> blob_file_cache =
632
+ std::make_unique<BlobFileCache>(
633
+ backing_cache.get(), &immutable_options, &file_options,
634
+ column_family_id, blob_file_read_hist, nullptr /*IOTracer*/);
629
635
 
630
636
  BlobSource blob_source(&immutable_options, db_id_, db_session_id_,
631
637
  blob_file_cache.get());
@@ -805,9 +811,10 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
805
811
  FileOptions file_options;
806
812
  constexpr HistogramImpl* blob_file_read_hist = nullptr;
807
813
 
808
- std::unique_ptr<BlobFileCache> blob_file_cache(new BlobFileCache(
809
- backing_cache.get(), &immutable_options, &file_options, column_family_id,
810
- blob_file_read_hist, nullptr /*IOTracer*/));
814
+ std::unique_ptr<BlobFileCache> blob_file_cache =
815
+ std::make_unique<BlobFileCache>(
816
+ backing_cache.get(), &immutable_options, &file_options,
817
+ column_family_id, blob_file_read_hist, nullptr /*IOTracer*/);
811
818
 
812
819
  BlobSource blob_source(&immutable_options, db_id_, db_session_id_,
813
820
  blob_file_cache.get());
@@ -1020,6 +1027,506 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
1020
1027
  }
1021
1028
  }
1022
1029
 
1030
+ class BlobSecondaryCacheTest : public DBTestBase {
1031
+ protected:
1032
+ public:
1033
+ explicit BlobSecondaryCacheTest()
1034
+ : DBTestBase("blob_secondary_cache_test", /*env_do_fsync=*/true) {
1035
+ options_.env = env_;
1036
+ options_.enable_blob_files = true;
1037
+ options_.create_if_missing = true;
1038
+
1039
+ // Set a small cache capacity to evict entries from the cache, and to test
1040
+ // that secondary cache is used properly.
1041
+ lru_cache_ops_.capacity = 1024;
1042
+ lru_cache_ops_.num_shard_bits = 0;
1043
+ lru_cache_ops_.strict_capacity_limit = true;
1044
+ lru_cache_ops_.metadata_charge_policy = kDontChargeCacheMetadata;
1045
+
1046
+ secondary_cache_opts_.capacity = 8 << 20; // 8 MB
1047
+ secondary_cache_opts_.num_shard_bits = 0;
1048
+ secondary_cache_opts_.metadata_charge_policy = kDontChargeCacheMetadata;
1049
+
1050
+ // Read blobs from the secondary cache if they are not in the primary cache
1051
+ options_.lowest_used_cache_tier = CacheTier::kNonVolatileBlockTier;
1052
+
1053
+ assert(db_->GetDbIdentity(db_id_).ok());
1054
+ assert(db_->GetDbSessionId(db_session_id_).ok());
1055
+ }
1056
+
1057
+ Options options_;
1058
+
1059
+ LRUCacheOptions lru_cache_ops_;
1060
+ CompressedSecondaryCacheOptions secondary_cache_opts_;
1061
+
1062
+ std::string db_id_;
1063
+ std::string db_session_id_;
1064
+ };
1065
+
1066
+ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
1067
+ if (!Snappy_Supported()) {
1068
+ return;
1069
+ }
1070
+
1071
+ secondary_cache_opts_.compression_type = kSnappyCompression;
1072
+ lru_cache_ops_.secondary_cache =
1073
+ NewCompressedSecondaryCache(secondary_cache_opts_);
1074
+ options_.blob_cache = NewLRUCache(lru_cache_ops_);
1075
+
1076
+ options_.cf_paths.emplace_back(
1077
+ test::PerThreadDBPath(
1078
+ env_, "BlobSecondaryCacheTest_GetBlobsFromSecondaryCache"),
1079
+ 0);
1080
+
1081
+ options_.statistics = CreateDBStatistics();
1082
+ Statistics* statistics = options_.statistics.get();
1083
+ assert(statistics);
1084
+
1085
+ DestroyAndReopen(options_);
1086
+
1087
+ ImmutableOptions immutable_options(options_);
1088
+
1089
+ constexpr uint32_t column_family_id = 1;
1090
+ constexpr bool has_ttl = false;
1091
+ constexpr ExpirationRange expiration_range;
1092
+ constexpr uint64_t file_number = 1;
1093
+
1094
+ Random rnd(301);
1095
+
1096
+ std::vector<std::string> key_strs{"key0", "key1"};
1097
+ std::vector<std::string> blob_strs{rnd.RandomString(1010),
1098
+ rnd.RandomString(1020)};
1099
+
1100
+ std::vector<Slice> keys{key_strs[0], key_strs[1]};
1101
+ std::vector<Slice> blobs{blob_strs[0], blob_strs[1]};
1102
+
1103
+ std::vector<uint64_t> blob_offsets(keys.size());
1104
+ std::vector<uint64_t> blob_sizes(keys.size());
1105
+
1106
+ WriteBlobFile(immutable_options, column_family_id, has_ttl, expiration_range,
1107
+ expiration_range, file_number, keys, blobs, kNoCompression,
1108
+ blob_offsets, blob_sizes);
1109
+
1110
+ constexpr size_t capacity = 1024;
1111
+ std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
1112
+
1113
+ FileOptions file_options;
1114
+ constexpr HistogramImpl* blob_file_read_hist = nullptr;
1115
+
1116
+ std::unique_ptr<BlobFileCache> blob_file_cache(new BlobFileCache(
1117
+ backing_cache.get(), &immutable_options, &file_options, column_family_id,
1118
+ blob_file_read_hist, nullptr /*IOTracer*/));
1119
+
1120
+ BlobSource blob_source(&immutable_options, db_id_, db_session_id_,
1121
+ blob_file_cache.get());
1122
+
1123
+ CacheHandleGuard<BlobFileReader> file_reader;
1124
+ ASSERT_OK(blob_source.GetBlobFileReader(file_number, &file_reader));
1125
+ ASSERT_NE(file_reader.GetValue(), nullptr);
1126
+ const uint64_t file_size = file_reader.GetValue()->GetFileSize();
1127
+ ASSERT_EQ(file_reader.GetValue()->GetCompressionType(), kNoCompression);
1128
+
1129
+ ReadOptions read_options;
1130
+ read_options.verify_checksums = true;
1131
+
1132
+ auto blob_cache = options_.blob_cache;
1133
+ auto secondary_cache = lru_cache_ops_.secondary_cache;
1134
+
1135
+ Cache::CreateCallback create_cb = [&](const void* buf, size_t size,
1136
+ void** out_obj,
1137
+ size_t* charge) -> Status {
1138
+ std::string* blob = new std::string();
1139
+ blob->assign(static_cast<const char*>(buf), size);
1140
+ *out_obj = blob;
1141
+ *charge = size;
1142
+ return Status::OK();
1143
+ };
1144
+
1145
+ {
1146
+ // GetBlob
1147
+ std::vector<PinnableSlice> values(keys.size());
1148
+
1149
+ read_options.fill_cache = true;
1150
+ get_perf_context()->Reset();
1151
+
1152
+ // key0 should be filled to the primary cache from the blob file.
1153
+ ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
1154
+ blob_offsets[0], file_size, blob_sizes[0],
1155
+ kNoCompression, nullptr /* prefetch_buffer */,
1156
+ &values[0], nullptr /* bytes_read */));
1157
+ ASSERT_EQ(values[0], blobs[0]);
1158
+ ASSERT_TRUE(
1159
+ blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[0]));
1160
+
1161
+ // key0 should be demoted to the secondary cache, and key1 should be filled
1162
+ // to the primary cache from the blob file.
1163
+ ASSERT_OK(blob_source.GetBlob(read_options, keys[1], file_number,
1164
+ blob_offsets[1], file_size, blob_sizes[1],
1165
+ kNoCompression, nullptr /* prefetch_buffer */,
1166
+ &values[1], nullptr /* bytes_read */));
1167
+ ASSERT_EQ(values[1], blobs[1]);
1168
+ ASSERT_TRUE(
1169
+ blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[1]));
1170
+
1171
+ OffsetableCacheKey base_cache_key(db_id_, db_session_id_, file_number,
1172
+ file_size);
1173
+
1174
+ // blob_cache here only looks at the primary cache since we didn't provide
1175
+ // the cache item helper for the secondary cache. However, since key0 is
1176
+ // demoted to the secondary cache, we shouldn't be able to find it in the
1177
+ // primary cache.
1178
+ {
1179
+ CacheKey cache_key = base_cache_key.WithOffset(blob_offsets[0]);
1180
+ const Slice key0 = cache_key.AsSlice();
1181
+ auto handle0 = blob_cache->Lookup(key0, statistics);
1182
+ ASSERT_EQ(handle0, nullptr);
1183
+
1184
+ // key0 should be in the secondary cache. After looking up key0 in the
1185
+ // secondary cache, it will be erased from the secondary cache.
1186
+ bool is_in_sec_cache = false;
1187
+ auto sec_handle0 =
1188
+ secondary_cache->Lookup(key0, create_cb, true, is_in_sec_cache);
1189
+ ASSERT_FALSE(is_in_sec_cache);
1190
+ ASSERT_NE(sec_handle0, nullptr);
1191
+ ASSERT_TRUE(sec_handle0->IsReady());
1192
+ auto value = static_cast<std::string*>(sec_handle0->Value());
1193
+ ASSERT_EQ(*value, blobs[0]);
1194
+ delete value;
1195
+
1196
+ // key0 doesn't exist in the blob cache
1197
+ ASSERT_FALSE(blob_source.TEST_BlobInCache(file_number, file_size,
1198
+ blob_offsets[0]));
1199
+ }
1200
+
1201
+ // key1 should exist in the primary cache.
1202
+ {
1203
+ CacheKey cache_key = base_cache_key.WithOffset(blob_offsets[1]);
1204
+ const Slice key1 = cache_key.AsSlice();
1205
+ auto handle1 = blob_cache->Lookup(key1, statistics);
1206
+ ASSERT_NE(handle1, nullptr);
1207
+ blob_cache->Release(handle1);
1208
+
1209
+ bool is_in_sec_cache = false;
1210
+ auto sec_handle1 =
1211
+ secondary_cache->Lookup(key1, create_cb, true, is_in_sec_cache);
1212
+ ASSERT_FALSE(is_in_sec_cache);
1213
+ ASSERT_EQ(sec_handle1, nullptr);
1214
+
1215
+ ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
1216
+ blob_offsets[1]));
1217
+ }
1218
+
1219
+ {
1220
+ // fetch key0 from the blob file to the primary cache.
1221
+ ASSERT_OK(blob_source.GetBlob(
1222
+ read_options, keys[0], file_number, blob_offsets[0], file_size,
1223
+ blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */,
1224
+ &values[0], nullptr /* bytes_read */));
1225
+ ASSERT_EQ(values[0], blobs[0]);
1226
+
1227
+ // key0 should be in the primary cache.
1228
+ CacheKey cache_key0 = base_cache_key.WithOffset(blob_offsets[0]);
1229
+ const Slice key0 = cache_key0.AsSlice();
1230
+ auto handle0 = blob_cache->Lookup(key0, statistics);
1231
+ ASSERT_NE(handle0, nullptr);
1232
+ auto value = static_cast<std::string*>(blob_cache->Value(handle0));
1233
+ ASSERT_EQ(*value, blobs[0]);
1234
+ blob_cache->Release(handle0);
1235
+
1236
+ // key1 is not in the primary cache, and it should be demoted to the
1237
+ // secondary cache.
1238
+ CacheKey cache_key1 = base_cache_key.WithOffset(blob_offsets[1]);
1239
+ const Slice key1 = cache_key1.AsSlice();
1240
+ auto handle1 = blob_cache->Lookup(key1, statistics);
1241
+ ASSERT_EQ(handle1, nullptr);
1242
+
1243
+ // erase key0 from the primary cache.
1244
+ blob_cache->Erase(key0);
1245
+ handle0 = blob_cache->Lookup(key0, statistics);
1246
+ ASSERT_EQ(handle0, nullptr);
1247
+
1248
+ // key1 promotion should succeed due to the primary cache being empty. we
1249
+ // did't call secondary cache's Lookup() here, because it will remove the
1250
+ // key but it won't be able to promote the key to the primary cache.
1251
+ // Instead we use the end-to-end blob source API to promote the key to
1252
+ // the primary cache.
1253
+ ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
1254
+ blob_offsets[1]));
1255
+
1256
+ // key1 should be in the primary cache.
1257
+ handle1 = blob_cache->Lookup(key1, statistics);
1258
+ ASSERT_NE(handle1, nullptr);
1259
+ value = static_cast<std::string*>(blob_cache->Value(handle1));
1260
+ ASSERT_EQ(*value, blobs[1]);
1261
+ blob_cache->Release(handle1);
1262
+ }
1263
+ }
1264
+ }
1265
+
1266
+ class BlobSourceCacheReservationTest : public DBTestBase {
1267
+ public:
1268
+ explicit BlobSourceCacheReservationTest()
1269
+ : DBTestBase("blob_source_cache_reservation_test",
1270
+ /*env_do_fsync=*/true) {
1271
+ options_.env = env_;
1272
+ options_.enable_blob_files = true;
1273
+ options_.create_if_missing = true;
1274
+
1275
+ LRUCacheOptions co;
1276
+ co.capacity = kCacheCapacity;
1277
+ co.num_shard_bits = kNumShardBits;
1278
+ co.metadata_charge_policy = kDontChargeCacheMetadata;
1279
+ std::shared_ptr<Cache> blob_cache = NewLRUCache(co);
1280
+ std::shared_ptr<Cache> block_cache = NewLRUCache(co);
1281
+
1282
+ options_.blob_cache = blob_cache;
1283
+ options_.lowest_used_cache_tier = CacheTier::kVolatileTier;
1284
+
1285
+ BlockBasedTableOptions block_based_options;
1286
+ block_based_options.no_block_cache = false;
1287
+ block_based_options.block_cache = block_cache;
1288
+ block_based_options.cache_usage_options.options_overrides.insert(
1289
+ {CacheEntryRole::kBlobCache,
1290
+ {/* charged = */ CacheEntryRoleOptions::Decision::kEnabled}});
1291
+ options_.table_factory.reset(
1292
+ NewBlockBasedTableFactory(block_based_options));
1293
+
1294
+ assert(db_->GetDbIdentity(db_id_).ok());
1295
+ assert(db_->GetDbSessionId(db_session_id_).ok());
1296
+ }
1297
+
1298
+ void GenerateKeysAndBlobs() {
1299
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1300
+ key_strs_.push_back("key" + std::to_string(i));
1301
+ blob_strs_.push_back("blob" + std::to_string(i));
1302
+ }
1303
+
1304
+ blob_file_size_ = BlobLogHeader::kSize;
1305
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1306
+ keys_.push_back({key_strs_[i]});
1307
+ blobs_.push_back({blob_strs_[i]});
1308
+ blob_file_size_ +=
1309
+ BlobLogRecord::kHeaderSize + keys_[i].size() + blobs_[i].size();
1310
+ }
1311
+ blob_file_size_ += BlobLogFooter::kSize;
1312
+ }
1313
+
1314
+ static constexpr std::size_t kSizeDummyEntry = CacheReservationManagerImpl<
1315
+ CacheEntryRole::kBlobCache>::GetDummyEntrySize();
1316
+ static constexpr std::size_t kCacheCapacity = 1 * kSizeDummyEntry;
1317
+ static constexpr int kNumShardBits = 0; // 2^0 shard
1318
+
1319
+ static constexpr uint32_t kColumnFamilyId = 1;
1320
+ static constexpr bool kHasTTL = false;
1321
+ static constexpr uint64_t kBlobFileNumber = 1;
1322
+ static constexpr size_t kNumBlobs = 16;
1323
+
1324
+ std::vector<Slice> keys_;
1325
+ std::vector<Slice> blobs_;
1326
+ std::vector<std::string> key_strs_;
1327
+ std::vector<std::string> blob_strs_;
1328
+ uint64_t blob_file_size_;
1329
+
1330
+ Options options_;
1331
+ std::string db_id_;
1332
+ std::string db_session_id_;
1333
+ };
1334
+
1335
+ #ifndef ROCKSDB_LITE
1336
+ TEST_F(BlobSourceCacheReservationTest, SimpleCacheReservation) {
1337
+ options_.cf_paths.emplace_back(
1338
+ test::PerThreadDBPath(
1339
+ env_, "BlobSourceCacheReservationTest_SimpleCacheReservation"),
1340
+ 0);
1341
+
1342
+ GenerateKeysAndBlobs();
1343
+
1344
+ DestroyAndReopen(options_);
1345
+
1346
+ ImmutableOptions immutable_options(options_);
1347
+
1348
+ constexpr ExpirationRange expiration_range;
1349
+
1350
+ std::vector<uint64_t> blob_offsets(keys_.size());
1351
+ std::vector<uint64_t> blob_sizes(keys_.size());
1352
+
1353
+ WriteBlobFile(immutable_options, kColumnFamilyId, kHasTTL, expiration_range,
1354
+ expiration_range, kBlobFileNumber, keys_, blobs_,
1355
+ kNoCompression, blob_offsets, blob_sizes);
1356
+
1357
+ constexpr size_t capacity = 10;
1358
+ std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
1359
+
1360
+ FileOptions file_options;
1361
+ constexpr HistogramImpl* blob_file_read_hist = nullptr;
1362
+
1363
+ std::unique_ptr<BlobFileCache> blob_file_cache =
1364
+ std::make_unique<BlobFileCache>(
1365
+ backing_cache.get(), &immutable_options, &file_options,
1366
+ kColumnFamilyId, blob_file_read_hist, nullptr /*IOTracer*/);
1367
+
1368
+ BlobSource blob_source(&immutable_options, db_id_, db_session_id_,
1369
+ blob_file_cache.get());
1370
+
1371
+ ConcurrentCacheReservationManager* cache_res_mgr =
1372
+ static_cast<ChargedCache*>(blob_source.GetBlobCache())
1373
+ ->TEST_GetCacheReservationManager();
1374
+ ASSERT_NE(cache_res_mgr, nullptr);
1375
+
1376
+ ReadOptions read_options;
1377
+ read_options.verify_checksums = true;
1378
+
1379
+ std::vector<PinnableSlice> values(keys_.size());
1380
+
1381
+ {
1382
+ read_options.fill_cache = false;
1383
+
1384
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1385
+ ASSERT_OK(blob_source.GetBlob(
1386
+ read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
1387
+ blob_file_size_, blob_sizes[i], kNoCompression,
1388
+ nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
1389
+ ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0);
1390
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), 0);
1391
+ }
1392
+ }
1393
+
1394
+ {
1395
+ read_options.fill_cache = true;
1396
+
1397
+ // num_blobs is 16, so the total blob cache usage is less than a single
1398
+ // dummy entry. Therefore, cache reservation manager only reserves one dummy
1399
+ // entry here.
1400
+ uint64_t blob_bytes = 0;
1401
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1402
+ ASSERT_OK(blob_source.GetBlob(
1403
+ read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
1404
+ blob_file_size_, blob_sizes[i], kNoCompression,
1405
+ nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
1406
+ blob_bytes += blob_sizes[i];
1407
+ ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), kSizeDummyEntry);
1408
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), blob_bytes);
1409
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(),
1410
+ options_.blob_cache->GetUsage());
1411
+ }
1412
+ }
1413
+
1414
+ {
1415
+ OffsetableCacheKey base_cache_key(db_id_, db_session_id_, kBlobFileNumber,
1416
+ blob_file_size_);
1417
+ size_t blob_bytes = options_.blob_cache->GetUsage();
1418
+
1419
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1420
+ CacheKey cache_key = base_cache_key.WithOffset(blob_offsets[i]);
1421
+ // We didn't call options_.blob_cache->Erase() here, this is because
1422
+ // the cache wrapper's Erase() method must be called to update the
1423
+ // cache usage after erasing the cache entry.
1424
+ blob_source.GetBlobCache()->Erase(cache_key.AsSlice());
1425
+ if (i == kNumBlobs - 1) {
1426
+ // The last blob is not in the cache. cache_res_mgr should not reserve
1427
+ // any space for it.
1428
+ ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0);
1429
+ } else {
1430
+ ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), kSizeDummyEntry);
1431
+ }
1432
+ blob_bytes -= blob_sizes[i];
1433
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), blob_bytes);
1434
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(),
1435
+ options_.blob_cache->GetUsage());
1436
+ }
1437
+ }
1438
+ }
1439
+
1440
+ TEST_F(BlobSourceCacheReservationTest, IncreaseCacheReservationOnFullCache) {
1441
+ options_.cf_paths.emplace_back(
1442
+ test::PerThreadDBPath(
1443
+ env_,
1444
+ "BlobSourceCacheReservationTest_IncreaseCacheReservationOnFullCache"),
1445
+ 0);
1446
+
1447
+ GenerateKeysAndBlobs();
1448
+
1449
+ DestroyAndReopen(options_);
1450
+
1451
+ ImmutableOptions immutable_options(options_);
1452
+ constexpr size_t blob_size = kSizeDummyEntry / (kNumBlobs / 2);
1453
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1454
+ blob_file_size_ -= blobs_[i].size(); // old blob size
1455
+ blob_strs_[i].resize(blob_size, '@');
1456
+ blobs_[i] = Slice(blob_strs_[i]);
1457
+ blob_file_size_ += blobs_[i].size(); // new blob size
1458
+ }
1459
+
1460
+ std::vector<uint64_t> blob_offsets(keys_.size());
1461
+ std::vector<uint64_t> blob_sizes(keys_.size());
1462
+
1463
+ constexpr ExpirationRange expiration_range;
1464
+ WriteBlobFile(immutable_options, kColumnFamilyId, kHasTTL, expiration_range,
1465
+ expiration_range, kBlobFileNumber, keys_, blobs_,
1466
+ kNoCompression, blob_offsets, blob_sizes);
1467
+
1468
+ constexpr size_t capacity = 10;
1469
+ std::shared_ptr<Cache> backing_cache = NewLRUCache(capacity);
1470
+
1471
+ FileOptions file_options;
1472
+ constexpr HistogramImpl* blob_file_read_hist = nullptr;
1473
+
1474
+ std::unique_ptr<BlobFileCache> blob_file_cache =
1475
+ std::make_unique<BlobFileCache>(
1476
+ backing_cache.get(), &immutable_options, &file_options,
1477
+ kColumnFamilyId, blob_file_read_hist, nullptr /*IOTracer*/);
1478
+
1479
+ BlobSource blob_source(&immutable_options, db_id_, db_session_id_,
1480
+ blob_file_cache.get());
1481
+
1482
+ ConcurrentCacheReservationManager* cache_res_mgr =
1483
+ static_cast<ChargedCache*>(blob_source.GetBlobCache())
1484
+ ->TEST_GetCacheReservationManager();
1485
+ ASSERT_NE(cache_res_mgr, nullptr);
1486
+
1487
+ ReadOptions read_options;
1488
+ read_options.verify_checksums = true;
1489
+
1490
+ std::vector<PinnableSlice> values(keys_.size());
1491
+
1492
+ {
1493
+ read_options.fill_cache = false;
1494
+
1495
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1496
+ ASSERT_OK(blob_source.GetBlob(
1497
+ read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
1498
+ blob_file_size_, blob_sizes[i], kNoCompression,
1499
+ nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
1500
+ ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), 0);
1501
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), 0);
1502
+ }
1503
+ }
1504
+
1505
+ {
1506
+ read_options.fill_cache = true;
1507
+
1508
+ // Since we resized each blob to be kSizeDummyEntry / (num_blobs/ 2), we
1509
+ // should observe cache eviction for the second half blobs.
1510
+ uint64_t blob_bytes = 0;
1511
+ for (size_t i = 0; i < kNumBlobs; ++i) {
1512
+ ASSERT_OK(blob_source.GetBlob(
1513
+ read_options, keys_[i], kBlobFileNumber, blob_offsets[i],
1514
+ blob_file_size_, blob_sizes[i], kNoCompression,
1515
+ nullptr /* prefetch_buffer */, &values[i], nullptr /* bytes_read */));
1516
+ blob_bytes += blob_sizes[i];
1517
+ ASSERT_EQ(cache_res_mgr->GetTotalReservedCacheSize(), kSizeDummyEntry);
1518
+ if (i >= kNumBlobs / 2) {
1519
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), kSizeDummyEntry);
1520
+ } else {
1521
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(), blob_bytes);
1522
+ }
1523
+ ASSERT_EQ(cache_res_mgr->GetTotalMemoryUsed(),
1524
+ options_.blob_cache->GetUsage());
1525
+ }
1526
+ }
1527
+ }
1528
+ #endif // ROCKSDB_LITE
1529
+
1023
1530
  } // namespace ROCKSDB_NAMESPACE
1024
1531
 
1025
1532
  int main(int argc, char** argv) {