@nxtedition/rocksdb 8.1.4 → 8.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (223) hide show
  1. package/deps/rocksdb/rocksdb/CMakeLists.txt +21 -0
  2. package/deps/rocksdb/rocksdb/Makefile +15 -3
  3. package/deps/rocksdb/rocksdb/TARGETS +6 -0
  4. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +32 -35
  5. package/deps/rocksdb/rocksdb/cache/cache_entry_roles.cc +0 -30
  6. package/deps/rocksdb/rocksdb/cache/cache_entry_roles.h +0 -83
  7. package/deps/rocksdb/rocksdb/cache/cache_entry_stats.h +13 -14
  8. package/deps/rocksdb/rocksdb/cache/cache_helpers.cc +40 -0
  9. package/deps/rocksdb/rocksdb/cache/cache_helpers.h +14 -20
  10. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager.cc +8 -9
  11. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager.h +5 -4
  12. package/deps/rocksdb/rocksdb/cache/cache_test.cc +124 -156
  13. package/deps/rocksdb/rocksdb/cache/charged_cache.cc +10 -26
  14. package/deps/rocksdb/rocksdb/cache/charged_cache.h +11 -16
  15. package/deps/rocksdb/rocksdb/cache/clock_cache.cc +35 -32
  16. package/deps/rocksdb/rocksdb/cache/clock_cache.h +19 -21
  17. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.cc +42 -30
  18. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.h +9 -8
  19. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache_test.cc +91 -143
  20. package/deps/rocksdb/rocksdb/cache/lru_cache.cc +54 -60
  21. package/deps/rocksdb/rocksdb/cache/lru_cache.h +37 -63
  22. package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +120 -106
  23. package/deps/rocksdb/rocksdb/cache/secondary_cache.cc +14 -5
  24. package/deps/rocksdb/rocksdb/cache/sharded_cache.h +16 -31
  25. package/deps/rocksdb/rocksdb/cache/typed_cache.h +339 -0
  26. package/deps/rocksdb/rocksdb/db/blob/blob_contents.cc +0 -48
  27. package/deps/rocksdb/rocksdb/db/blob/blob_contents.h +18 -15
  28. package/deps/rocksdb/rocksdb/db/blob/blob_file_builder.cc +5 -26
  29. package/deps/rocksdb/rocksdb/db/blob/blob_file_cache.cc +7 -8
  30. package/deps/rocksdb/rocksdb/db/blob/blob_file_cache.h +6 -3
  31. package/deps/rocksdb/rocksdb/db/blob/blob_file_reader.cc +2 -7
  32. package/deps/rocksdb/rocksdb/db/blob/blob_source.cc +19 -47
  33. package/deps/rocksdb/rocksdb/db/blob/blob_source.h +13 -5
  34. package/deps/rocksdb/rocksdb/db/blob/blob_source_test.cc +15 -22
  35. package/deps/rocksdb/rocksdb/db/builder.cc +24 -10
  36. package/deps/rocksdb/rocksdb/db/builder.h +2 -1
  37. package/deps/rocksdb/rocksdb/db/c.cc +15 -0
  38. package/deps/rocksdb/rocksdb/db/c_test.c +3 -0
  39. package/deps/rocksdb/rocksdb/db/column_family.cc +11 -6
  40. package/deps/rocksdb/rocksdb/db/column_family.h +20 -6
  41. package/deps/rocksdb/rocksdb/db/compaction/compaction.cc +31 -34
  42. package/deps/rocksdb/rocksdb/db/compaction/compaction.h +3 -0
  43. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +21 -3
  44. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.h +1 -0
  45. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +4 -0
  46. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.h +4 -2
  47. package/deps/rocksdb/rocksdb/db/compaction/compaction_job_test.cc +9 -6
  48. package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.cc +275 -82
  49. package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.h +7 -0
  50. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.cc +11 -18
  51. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.h +17 -16
  52. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_fifo.cc +19 -6
  53. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_fifo.h +5 -5
  54. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_level.cc +22 -22
  55. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_level.h +5 -5
  56. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_test.cc +81 -52
  57. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_universal.cc +5 -1
  58. package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_universal.h +5 -5
  59. package/deps/rocksdb/rocksdb/db/compaction/compaction_service_job.cc +8 -2
  60. package/deps/rocksdb/rocksdb/db/compaction/subcompaction_state.h +3 -0
  61. package/deps/rocksdb/rocksdb/db/compaction/tiered_compaction_test.cc +266 -138
  62. package/deps/rocksdb/rocksdb/db/corruption_test.cc +86 -1
  63. package/deps/rocksdb/rocksdb/db/db_basic_test.cc +98 -9
  64. package/deps/rocksdb/rocksdb/db/db_block_cache_test.cc +28 -28
  65. package/deps/rocksdb/rocksdb/db/db_bloom_filter_test.cc +2 -3
  66. package/deps/rocksdb/rocksdb/db/db_compaction_test.cc +1022 -123
  67. package/deps/rocksdb/rocksdb/db/db_flush_test.cc +65 -4
  68. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +32 -21
  69. package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +32 -24
  70. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_compaction_flush.cc +199 -77
  71. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_debug.cc +1 -1
  72. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_experimental.cc +3 -2
  73. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_files.cc +3 -0
  74. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +8 -4
  75. package/deps/rocksdb/rocksdb/db/db_impl/db_impl_write.cc +43 -23
  76. package/deps/rocksdb/rocksdb/db/db_iter.cc +8 -2
  77. package/deps/rocksdb/rocksdb/db/db_merge_operand_test.cc +42 -0
  78. package/deps/rocksdb/rocksdb/db/db_merge_operator_test.cc +155 -0
  79. package/deps/rocksdb/rocksdb/db/db_properties_test.cc +12 -12
  80. package/deps/rocksdb/rocksdb/db/db_range_del_test.cc +230 -2
  81. package/deps/rocksdb/rocksdb/db/db_test.cc +3 -0
  82. package/deps/rocksdb/rocksdb/db/db_test2.cc +233 -8
  83. package/deps/rocksdb/rocksdb/db/db_test_util.cc +11 -10
  84. package/deps/rocksdb/rocksdb/db/db_test_util.h +39 -24
  85. package/deps/rocksdb/rocksdb/db/db_wal_test.cc +129 -0
  86. package/deps/rocksdb/rocksdb/db/db_with_timestamp_basic_test.cc +28 -0
  87. package/deps/rocksdb/rocksdb/db/db_with_timestamp_compaction_test.cc +21 -0
  88. package/deps/rocksdb/rocksdb/db/dbformat.cc +25 -0
  89. package/deps/rocksdb/rocksdb/db/dbformat.h +2 -0
  90. package/deps/rocksdb/rocksdb/db/experimental.cc +3 -2
  91. package/deps/rocksdb/rocksdb/db/external_sst_file_basic_test.cc +3 -0
  92. package/deps/rocksdb/rocksdb/db/external_sst_file_ingestion_job.cc +92 -13
  93. package/deps/rocksdb/rocksdb/db/external_sst_file_ingestion_job.h +38 -1
  94. package/deps/rocksdb/rocksdb/db/external_sst_file_test.cc +14 -110
  95. package/deps/rocksdb/rocksdb/db/flush_job.cc +12 -10
  96. package/deps/rocksdb/rocksdb/db/flush_job.h +3 -2
  97. package/deps/rocksdb/rocksdb/db/flush_job_test.cc +29 -29
  98. package/deps/rocksdb/rocksdb/db/import_column_family_job.cc +56 -53
  99. package/deps/rocksdb/rocksdb/db/import_column_family_test.cc +3 -4
  100. package/deps/rocksdb/rocksdb/db/internal_stats.cc +11 -11
  101. package/deps/rocksdb/rocksdb/db/internal_stats.h +2 -2
  102. package/deps/rocksdb/rocksdb/db/log_reader.cc +8 -6
  103. package/deps/rocksdb/rocksdb/db/log_test.cc +35 -2
  104. package/deps/rocksdb/rocksdb/db/memtable.cc +31 -6
  105. package/deps/rocksdb/rocksdb/db/merge_helper.cc +47 -29
  106. package/deps/rocksdb/rocksdb/db/merge_helper.h +14 -6
  107. package/deps/rocksdb/rocksdb/db/periodic_task_scheduler_test.cc +10 -10
  108. package/deps/rocksdb/rocksdb/db/range_tombstone_fragmenter_test.cc +1 -1
  109. package/deps/rocksdb/rocksdb/db/repair.cc +65 -22
  110. package/deps/rocksdb/rocksdb/db/repair_test.cc +54 -0
  111. package/deps/rocksdb/rocksdb/db/seqno_time_test.cc +26 -26
  112. package/deps/rocksdb/rocksdb/db/table_cache.cc +41 -91
  113. package/deps/rocksdb/rocksdb/db/table_cache.h +17 -19
  114. package/deps/rocksdb/rocksdb/db/table_cache_sync_and_async.h +7 -9
  115. package/deps/rocksdb/rocksdb/db/table_properties_collector.h +3 -1
  116. package/deps/rocksdb/rocksdb/db/version_builder.cc +102 -52
  117. package/deps/rocksdb/rocksdb/db/version_builder.h +20 -0
  118. package/deps/rocksdb/rocksdb/db/version_builder_test.cc +218 -93
  119. package/deps/rocksdb/rocksdb/db/version_edit.cc +27 -1
  120. package/deps/rocksdb/rocksdb/db/version_edit.h +34 -9
  121. package/deps/rocksdb/rocksdb/db/version_edit_handler.cc +13 -6
  122. package/deps/rocksdb/rocksdb/db/version_edit_handler.h +17 -6
  123. package/deps/rocksdb/rocksdb/db/version_edit_test.cc +19 -17
  124. package/deps/rocksdb/rocksdb/db/version_set.cc +160 -28
  125. package/deps/rocksdb/rocksdb/db/version_set.h +34 -4
  126. package/deps/rocksdb/rocksdb/db/version_set_sync_and_async.h +1 -1
  127. package/deps/rocksdb/rocksdb/db/version_set_test.cc +65 -31
  128. package/deps/rocksdb/rocksdb/db/write_batch.cc +4 -1
  129. package/deps/rocksdb/rocksdb/db/write_thread.cc +5 -2
  130. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +1 -0
  131. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_driver.cc +31 -32
  132. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_driver.h +2 -1
  133. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_env_wrapper.h +8 -6
  134. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +4 -0
  135. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +11 -4
  136. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_tool.cc +16 -15
  137. package/deps/rocksdb/rocksdb/db_stress_tool/no_batched_ops_stress.cc +13 -1
  138. package/deps/rocksdb/rocksdb/file/file_prefetch_buffer.cc +1 -0
  139. package/deps/rocksdb/rocksdb/file/prefetch_test.cc +286 -217
  140. package/deps/rocksdb/rocksdb/include/rocksdb/c.h +8 -0
  141. package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +137 -135
  142. package/deps/rocksdb/rocksdb/include/rocksdb/db.h +6 -0
  143. package/deps/rocksdb/rocksdb/include/rocksdb/listener.h +7 -1
  144. package/deps/rocksdb/rocksdb/include/rocksdb/merge_operator.h +21 -0
  145. package/deps/rocksdb/rocksdb/include/rocksdb/metadata.h +9 -3
  146. package/deps/rocksdb/rocksdb/include/rocksdb/options.h +2 -1
  147. package/deps/rocksdb/rocksdb/include/rocksdb/secondary_cache.h +8 -6
  148. package/deps/rocksdb/rocksdb/include/rocksdb/status.h +3 -0
  149. package/deps/rocksdb/rocksdb/include/rocksdb/utilities/backup_engine.h +69 -9
  150. package/deps/rocksdb/rocksdb/include/rocksdb/version.h +1 -1
  151. package/deps/rocksdb/rocksdb/memory/arena.cc +23 -87
  152. package/deps/rocksdb/rocksdb/memory/arena.h +25 -31
  153. package/deps/rocksdb/rocksdb/memory/arena_test.cc +90 -0
  154. package/deps/rocksdb/rocksdb/memory/memory_allocator.h +9 -0
  155. package/deps/rocksdb/rocksdb/monitoring/stats_history_test.cc +26 -26
  156. package/deps/rocksdb/rocksdb/options/customizable_test.cc +4 -3
  157. package/deps/rocksdb/rocksdb/port/mmap.cc +98 -0
  158. package/deps/rocksdb/rocksdb/port/mmap.h +70 -0
  159. package/deps/rocksdb/rocksdb/port/port_posix.h +2 -0
  160. package/{prebuilds → deps/rocksdb/rocksdb/prebuilds}/linux-x64/node.napi.node +0 -0
  161. package/deps/rocksdb/rocksdb/src.mk +3 -0
  162. package/deps/rocksdb/rocksdb/table/adaptive/adaptive_table_factory.cc +3 -2
  163. package/deps/rocksdb/rocksdb/table/block_based/block.h +3 -0
  164. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.cc +25 -67
  165. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.h +3 -3
  166. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_factory.cc +18 -13
  167. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +159 -225
  168. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +31 -50
  169. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_impl.h +52 -20
  170. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_sync_and_async.h +3 -3
  171. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_test.cc +1 -1
  172. package/deps/rocksdb/rocksdb/table/block_based/block_cache.cc +96 -0
  173. package/deps/rocksdb/rocksdb/table/block_based/block_cache.h +132 -0
  174. package/deps/rocksdb/rocksdb/table/block_based/cachable_entry.h +28 -0
  175. package/deps/rocksdb/rocksdb/table/block_based/filter_block_reader_common.cc +6 -5
  176. package/deps/rocksdb/rocksdb/table/block_based/filter_block_reader_common.h +1 -4
  177. package/deps/rocksdb/rocksdb/table/block_based/full_filter_block.cc +6 -7
  178. package/deps/rocksdb/rocksdb/table/block_based/index_reader_common.cc +3 -1
  179. package/deps/rocksdb/rocksdb/table/block_based/parsed_full_filter_block.h +6 -1
  180. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block.cc +19 -18
  181. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block.h +9 -5
  182. package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block_test.cc +3 -1
  183. package/deps/rocksdb/rocksdb/table/block_based/partitioned_index_reader.cc +2 -1
  184. package/deps/rocksdb/rocksdb/table/block_based/uncompression_dict_reader.cc +2 -2
  185. package/deps/rocksdb/rocksdb/table/block_fetcher_test.cc +3 -3
  186. package/deps/rocksdb/rocksdb/table/format.cc +24 -20
  187. package/deps/rocksdb/rocksdb/table/format.h +6 -3
  188. package/deps/rocksdb/rocksdb/table/get_context.cc +12 -3
  189. package/deps/rocksdb/rocksdb/table/internal_iterator.h +0 -2
  190. package/deps/rocksdb/rocksdb/table/merging_iterator.cc +69 -35
  191. package/deps/rocksdb/rocksdb/table/meta_blocks.cc +2 -2
  192. package/deps/rocksdb/rocksdb/table/sst_file_dumper.cc +1 -1
  193. package/deps/rocksdb/rocksdb/table/table_test.cc +7 -6
  194. package/deps/rocksdb/rocksdb/test_util/testutil.h +10 -0
  195. package/deps/rocksdb/rocksdb/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc +66 -1
  196. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +9 -2
  197. package/deps/rocksdb/rocksdb/trace_replay/block_cache_tracer.cc +5 -0
  198. package/deps/rocksdb/rocksdb/trace_replay/block_cache_tracer.h +2 -2
  199. package/deps/rocksdb/rocksdb/trace_replay/trace_replay.cc +1 -1
  200. package/deps/rocksdb/rocksdb/util/async_file_reader.cc +20 -12
  201. package/deps/rocksdb/rocksdb/util/bloom_test.cc +1 -1
  202. package/deps/rocksdb/rocksdb/util/compression.cc +2 -2
  203. package/deps/rocksdb/rocksdb/util/compression.h +11 -2
  204. package/deps/rocksdb/rocksdb/util/status.cc +7 -0
  205. package/deps/rocksdb/rocksdb/util/xxhash.h +1901 -887
  206. package/deps/rocksdb/rocksdb/utilities/backup/backup_engine.cc +250 -74
  207. package/deps/rocksdb/rocksdb/utilities/backup/backup_engine_test.cc +199 -4
  208. package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.cc +35 -57
  209. package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.h +4 -5
  210. package/deps/rocksdb/rocksdb/utilities/checkpoint/checkpoint_impl.cc +1 -0
  211. package/deps/rocksdb/rocksdb/utilities/fault_injection_fs.cc +39 -0
  212. package/deps/rocksdb/rocksdb/utilities/fault_injection_fs.h +9 -0
  213. package/deps/rocksdb/rocksdb/utilities/fault_injection_secondary_cache.cc +11 -6
  214. package/deps/rocksdb/rocksdb/utilities/fault_injection_secondary_cache.h +6 -5
  215. package/deps/rocksdb/rocksdb/utilities/memory_allocators.h +0 -1
  216. package/deps/rocksdb/rocksdb/utilities/simulator_cache/cache_simulator.cc +10 -11
  217. package/deps/rocksdb/rocksdb/utilities/simulator_cache/sim_cache.cc +31 -31
  218. package/deps/rocksdb/rocksdb/utilities/transactions/lock/range/range_tree/lib/portability/toku_time.h +4 -0
  219. package/deps/rocksdb/rocksdb/utilities/transactions/transaction_test.cc +111 -0
  220. package/deps/rocksdb/rocksdb/utilities/ttl/db_ttl_impl.cc +1 -0
  221. package/deps/rocksdb/rocksdb/utilities/write_batch_with_index/write_batch_with_index_internal.cc +12 -3
  222. package/package.json +1 -1
  223. package/deps/rocksdb/rocksdb/table/block_based/block_like_traits.h +0 -182
@@ -17,25 +17,10 @@ ChargedCache::ChargedCache(std::shared_ptr<Cache> cache,
17
17
  CacheReservationManagerImpl<CacheEntryRole::kBlobCache>>(
18
18
  block_cache))) {}
19
19
 
20
- Status ChargedCache::Insert(const Slice& key, void* value, size_t charge,
21
- DeleterFn deleter, Handle** handle,
22
- Priority priority) {
23
- Status s = cache_->Insert(key, value, charge, deleter, handle, priority);
24
- if (s.ok()) {
25
- // Insert may cause the cache entry eviction if the cache is full. So we
26
- // directly call the reservation manager to update the total memory used
27
- // in the cache.
28
- assert(cache_res_mgr_);
29
- cache_res_mgr_->UpdateCacheReservation(cache_->GetUsage())
30
- .PermitUncheckedError();
31
- }
32
- return s;
33
- }
34
-
35
- Status ChargedCache::Insert(const Slice& key, void* value,
20
+ Status ChargedCache::Insert(const Slice& key, ObjectPtr obj,
36
21
  const CacheItemHelper* helper, size_t charge,
37
22
  Handle** handle, Priority priority) {
38
- Status s = cache_->Insert(key, value, helper, charge, handle, priority);
23
+ Status s = cache_->Insert(key, obj, helper, charge, handle, priority);
39
24
  if (s.ok()) {
40
25
  // Insert may cause the cache entry eviction if the cache is full. So we
41
26
  // directly call the reservation manager to update the total memory used
@@ -47,22 +32,21 @@ Status ChargedCache::Insert(const Slice& key, void* value,
47
32
  return s;
48
33
  }
49
34
 
50
- Cache::Handle* ChargedCache::Lookup(const Slice& key, Statistics* stats) {
51
- return cache_->Lookup(key, stats);
52
- }
53
-
54
35
  Cache::Handle* ChargedCache::Lookup(const Slice& key,
55
36
  const CacheItemHelper* helper,
56
- const CreateCallback& create_cb,
37
+ CreateContext* create_context,
57
38
  Priority priority, bool wait,
58
39
  Statistics* stats) {
59
- auto handle = cache_->Lookup(key, helper, create_cb, priority, wait, stats);
40
+ auto handle =
41
+ cache_->Lookup(key, helper, create_context, priority, wait, stats);
60
42
  // Lookup may promote the KV pair from the secondary cache to the primary
61
43
  // cache. So we directly call the reservation manager to update the total
62
44
  // memory used in the cache.
63
- assert(cache_res_mgr_);
64
- cache_res_mgr_->UpdateCacheReservation(cache_->GetUsage())
65
- .PermitUncheckedError();
45
+ if (helper && helper->create_cb) {
46
+ assert(cache_res_mgr_);
47
+ cache_res_mgr_->UpdateCacheReservation(cache_->GetUsage())
48
+ .PermitUncheckedError();
49
+ }
66
50
  return handle;
67
51
  }
68
52
 
@@ -23,16 +23,14 @@ class ChargedCache : public Cache {
23
23
  std::shared_ptr<Cache> block_cache);
24
24
  ~ChargedCache() override = default;
25
25
 
26
- Status Insert(const Slice& key, void* value, size_t charge, DeleterFn deleter,
27
- Handle** handle, Priority priority) override;
28
- Status Insert(const Slice& key, void* value, const CacheItemHelper* helper,
26
+ Status Insert(const Slice& key, ObjectPtr obj, const CacheItemHelper* helper,
29
27
  size_t charge, Handle** handle = nullptr,
30
28
  Priority priority = Priority::LOW) override;
31
29
 
32
- Cache::Handle* Lookup(const Slice& key, Statistics* stats) override;
33
30
  Cache::Handle* Lookup(const Slice& key, const CacheItemHelper* helper,
34
- const CreateCallback& create_cb, Priority priority,
35
- bool wait, Statistics* stats = nullptr) override;
31
+ CreateContext* create_context,
32
+ Priority priority = Priority::LOW, bool wait = true,
33
+ Statistics* stats = nullptr) override;
36
34
 
37
35
  bool Release(Cache::Handle* handle, bool useful,
38
36
  bool erase_if_last_ref = false) override;
@@ -56,7 +54,9 @@ class ChargedCache : public Cache {
56
54
  return cache_->HasStrictCapacityLimit();
57
55
  }
58
56
 
59
- void* Value(Cache::Handle* handle) override { return cache_->Value(handle); }
57
+ ObjectPtr Value(Cache::Handle* handle) override {
58
+ return cache_->Value(handle);
59
+ }
60
60
 
61
61
  bool IsReady(Cache::Handle* handle) override {
62
62
  return cache_->IsReady(handle);
@@ -84,22 +84,17 @@ class ChargedCache : public Cache {
84
84
  return cache_->GetCharge(handle);
85
85
  }
86
86
 
87
- Cache::DeleterFn GetDeleter(Cache::Handle* handle) const override {
88
- return cache_->GetDeleter(handle);
87
+ const CacheItemHelper* GetCacheItemHelper(Handle* handle) const override {
88
+ return cache_->GetCacheItemHelper(handle);
89
89
  }
90
90
 
91
91
  void ApplyToAllEntries(
92
- const std::function<void(const Slice& key, void* value, size_t charge,
93
- Cache::DeleterFn deleter)>& callback,
92
+ const std::function<void(const Slice& key, ObjectPtr value, size_t charge,
93
+ const CacheItemHelper* helper)>& callback,
94
94
  const Cache::ApplyToAllEntriesOptions& opts) override {
95
95
  cache_->ApplyToAllEntries(callback, opts);
96
96
  }
97
97
 
98
- void ApplyToAllCacheEntries(void (*callback)(void* value, size_t charge),
99
- bool thread_safe) override {
100
- cache_->ApplyToAllCacheEntries(callback, thread_safe);
101
- }
102
-
103
98
  std::string GetPrintableOptions() const override {
104
99
  return cache_->GetPrintableOptions();
105
100
  }
@@ -50,12 +50,12 @@ inline uint64_t GetInitialCountdown(Cache::Priority priority) {
50
50
  }
51
51
  }
52
52
 
53
- inline void FreeDataMarkEmpty(ClockHandle& h) {
53
+ inline void FreeDataMarkEmpty(ClockHandle& h, MemoryAllocator* allocator) {
54
54
  // NOTE: in theory there's more room for parallelism if we copy the handle
55
55
  // data and delay actions like this until after marking the entry as empty,
56
56
  // but performance tests only show a regression by copying the few words
57
57
  // of data.
58
- h.FreeData();
58
+ h.FreeData(allocator);
59
59
 
60
60
  #ifndef NDEBUG
61
61
  // Mark slot as empty, with assertion
@@ -115,24 +115,23 @@ inline bool ClockUpdate(ClockHandle& h) {
115
115
 
116
116
  } // namespace
117
117
 
118
- void ClockHandleBasicData::FreeData() const {
119
- if (deleter) {
120
- UniqueId64x2 unhashed;
121
- (*deleter)(
122
- ClockCacheShard<HyperClockTable>::ReverseHash(hashed_key, &unhashed),
123
- value);
118
+ void ClockHandleBasicData::FreeData(MemoryAllocator* allocator) const {
119
+ if (helper->del_cb) {
120
+ helper->del_cb(value, allocator);
124
121
  }
125
122
  }
126
123
 
127
124
  HyperClockTable::HyperClockTable(
128
125
  size_t capacity, bool /*strict_capacity_limit*/,
129
- CacheMetadataChargePolicy metadata_charge_policy, const Opts& opts)
126
+ CacheMetadataChargePolicy metadata_charge_policy,
127
+ MemoryAllocator* allocator, const Opts& opts)
130
128
  : length_bits_(CalcHashBits(capacity, opts.estimated_value_size,
131
129
  metadata_charge_policy)),
132
130
  length_bits_mask_((size_t{1} << length_bits_) - 1),
133
131
  occupancy_limit_(static_cast<size_t>((uint64_t{1} << length_bits_) *
134
132
  kStrictLoadFactor)),
135
- array_(new HandleImpl[size_t{1} << length_bits_]) {
133
+ array_(new HandleImpl[size_t{1} << length_bits_]),
134
+ allocator_(allocator) {
136
135
  if (metadata_charge_policy ==
137
136
  CacheMetadataChargePolicy::kFullChargeCacheMetadata) {
138
137
  usage_ += size_t{GetTableSize()} * sizeof(HandleImpl);
@@ -154,7 +153,7 @@ HyperClockTable::~HyperClockTable() {
154
153
  case ClockHandle::kStateInvisible: // rare but possible
155
154
  case ClockHandle::kStateVisible:
156
155
  assert(GetRefcount(h.meta) == 0);
157
- h.FreeData();
156
+ h.FreeData(allocator_);
158
157
  #ifndef NDEBUG
159
158
  Rollback(h.hashed_key, &h);
160
159
  ReclaimEntryUsage(h.GetTotalCharge());
@@ -415,7 +414,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
415
414
  if (handle == nullptr) {
416
415
  // Don't insert the entry but still return ok, as if the entry
417
416
  // inserted into cache and evicted immediately.
418
- proto.FreeData();
417
+ proto.FreeData(allocator_);
419
418
  return Status::OK();
420
419
  } else {
421
420
  // Need to track usage of fallback detached insert
@@ -556,7 +555,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
556
555
  if (handle == nullptr) {
557
556
  revert_usage_fn();
558
557
  // As if unrefed entry immdiately evicted
559
- proto.FreeData();
558
+ proto.FreeData(allocator_);
560
559
  return Status::OK();
561
560
  }
562
561
  }
@@ -698,14 +697,14 @@ bool HyperClockTable::Release(HandleImpl* h, bool useful,
698
697
  // Took ownership
699
698
  size_t total_charge = h->GetTotalCharge();
700
699
  if (UNLIKELY(h->IsDetached())) {
701
- h->FreeData();
700
+ h->FreeData(allocator_);
702
701
  // Delete detached handle
703
702
  delete h;
704
703
  detached_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
705
704
  usage_.fetch_sub(total_charge, std::memory_order_relaxed);
706
705
  } else {
707
706
  Rollback(h->hashed_key, h);
708
- FreeDataMarkEmpty(*h);
707
+ FreeDataMarkEmpty(*h, allocator_);
709
708
  ReclaimEntryUsage(total_charge);
710
709
  }
711
710
  return true;
@@ -790,7 +789,7 @@ void HyperClockTable::Erase(const UniqueId64x2& hashed_key) {
790
789
  // Took ownership
791
790
  assert(hashed_key == h->hashed_key);
792
791
  size_t total_charge = h->GetTotalCharge();
793
- FreeDataMarkEmpty(*h);
792
+ FreeDataMarkEmpty(*h, allocator_);
794
793
  ReclaimEntryUsage(total_charge);
795
794
  // We already have a copy of hashed_key in this case, so OK to
796
795
  // delay Rollback until after releasing the entry
@@ -878,7 +877,7 @@ void HyperClockTable::EraseUnRefEntries() {
878
877
  // Took ownership
879
878
  size_t total_charge = h.GetTotalCharge();
880
879
  Rollback(h.hashed_key, &h);
881
- FreeDataMarkEmpty(h);
880
+ FreeDataMarkEmpty(h, allocator_);
882
881
  ReclaimEntryUsage(total_charge);
883
882
  }
884
883
  }
@@ -968,7 +967,7 @@ inline void HyperClockTable::Evict(size_t requested_charge,
968
967
  Rollback(h.hashed_key, &h);
969
968
  *freed_charge += h.GetTotalCharge();
970
969
  *freed_count += 1;
971
- FreeDataMarkEmpty(h);
970
+ FreeDataMarkEmpty(h, allocator_);
972
971
  }
973
972
  }
974
973
 
@@ -990,9 +989,10 @@ template <class Table>
990
989
  ClockCacheShard<Table>::ClockCacheShard(
991
990
  size_t capacity, bool strict_capacity_limit,
992
991
  CacheMetadataChargePolicy metadata_charge_policy,
993
- const typename Table::Opts& opts)
992
+ MemoryAllocator* allocator, const typename Table::Opts& opts)
994
993
  : CacheShardBase(metadata_charge_policy),
995
- table_(capacity, strict_capacity_limit, metadata_charge_policy, opts),
994
+ table_(capacity, strict_capacity_limit, metadata_charge_policy, allocator,
995
+ opts),
996
996
  capacity_(capacity),
997
997
  strict_capacity_limit_(strict_capacity_limit) {
998
998
  // Initial charge metadata should not exceed capacity
@@ -1006,8 +1006,9 @@ void ClockCacheShard<Table>::EraseUnRefEntries() {
1006
1006
 
1007
1007
  template <class Table>
1008
1008
  void ClockCacheShard<Table>::ApplyToSomeEntries(
1009
- const std::function<void(const Slice& key, void* value, size_t charge,
1010
- DeleterFn deleter)>& callback,
1009
+ const std::function<void(const Slice& key, Cache::ObjectPtr value,
1010
+ size_t charge,
1011
+ const Cache::CacheItemHelper* helper)>& callback,
1011
1012
  size_t average_entries_per_lock, size_t* state) {
1012
1013
  // The state is essentially going to be the starting hash, which works
1013
1014
  // nicely even if we resize between calls because we use upper-most
@@ -1034,7 +1035,7 @@ void ClockCacheShard<Table>::ApplyToSomeEntries(
1034
1035
  [callback](const HandleImpl& h) {
1035
1036
  UniqueId64x2 unhashed;
1036
1037
  callback(ReverseHash(h.hashed_key, &unhashed), h.value,
1037
- h.GetTotalCharge(), h.deleter);
1038
+ h.GetTotalCharge(), h.helper);
1038
1039
  },
1039
1040
  index_begin, index_end, false);
1040
1041
  }
@@ -1078,9 +1079,9 @@ void ClockCacheShard<Table>::SetStrictCapacityLimit(
1078
1079
  template <class Table>
1079
1080
  Status ClockCacheShard<Table>::Insert(const Slice& key,
1080
1081
  const UniqueId64x2& hashed_key,
1081
- void* value, size_t charge,
1082
- Cache::DeleterFn deleter,
1083
- HandleImpl** handle,
1082
+ Cache::ObjectPtr value,
1083
+ const Cache::CacheItemHelper* helper,
1084
+ size_t charge, HandleImpl** handle,
1084
1085
  Cache::Priority priority) {
1085
1086
  if (UNLIKELY(key.size() != kCacheKeySize)) {
1086
1087
  return Status::NotSupported("ClockCache only supports key size " +
@@ -1089,7 +1090,7 @@ Status ClockCacheShard<Table>::Insert(const Slice& key,
1089
1090
  ClockHandleBasicData proto;
1090
1091
  proto.hashed_key = hashed_key;
1091
1092
  proto.value = value;
1092
- proto.deleter = deleter;
1093
+ proto.helper = helper;
1093
1094
  proto.total_charge = charge;
1094
1095
  Status s = table_.Insert(
1095
1096
  proto, handle, priority, capacity_.load(std::memory_order_relaxed),
@@ -1223,15 +1224,16 @@ HyperClockCache::HyperClockCache(
1223
1224
  // TODO: should not need to go through two levels of pointer indirection to
1224
1225
  // get to table entries
1225
1226
  size_t per_shard = GetPerShardCapacity();
1227
+ MemoryAllocator* alloc = this->memory_allocator();
1226
1228
  InitShards([=](Shard* cs) {
1227
1229
  HyperClockTable::Opts opts;
1228
1230
  opts.estimated_value_size = estimated_value_size;
1229
- new (cs)
1230
- Shard(per_shard, strict_capacity_limit, metadata_charge_policy, opts);
1231
+ new (cs) Shard(per_shard, strict_capacity_limit, metadata_charge_policy,
1232
+ alloc, opts);
1231
1233
  });
1232
1234
  }
1233
1235
 
1234
- void* HyperClockCache::Value(Handle* handle) {
1236
+ Cache::ObjectPtr HyperClockCache::Value(Handle* handle) {
1235
1237
  return reinterpret_cast<const HandleImpl*>(handle)->value;
1236
1238
  }
1237
1239
 
@@ -1239,9 +1241,10 @@ size_t HyperClockCache::GetCharge(Handle* handle) const {
1239
1241
  return reinterpret_cast<const HandleImpl*>(handle)->GetTotalCharge();
1240
1242
  }
1241
1243
 
1242
- Cache::DeleterFn HyperClockCache::GetDeleter(Handle* handle) const {
1244
+ const Cache::CacheItemHelper* HyperClockCache::GetCacheItemHelper(
1245
+ Handle* handle) const {
1243
1246
  auto h = reinterpret_cast<const HandleImpl*>(handle);
1244
- return h->deleter;
1247
+ return h->helper;
1245
1248
  }
1246
1249
 
1247
1250
  namespace {
@@ -305,8 +305,8 @@ constexpr double kLoadFactor = 0.7;
305
305
  constexpr double kStrictLoadFactor = 0.84;
306
306
 
307
307
  struct ClockHandleBasicData {
308
- void* value = nullptr;
309
- Cache::DeleterFn deleter = nullptr;
308
+ Cache::ObjectPtr value = nullptr;
309
+ const Cache::CacheItemHelper* helper = nullptr;
310
310
  // A lossless, reversible hash of the fixed-size (16 byte) cache key. This
311
311
  // eliminates the need to store a hash separately.
312
312
  UniqueId64x2 hashed_key = kNullUniqueId64x2;
@@ -321,7 +321,7 @@ struct ClockHandleBasicData {
321
321
  inline size_t GetTotalCharge() const { return total_charge; }
322
322
 
323
323
  // Calls deleter (if non-null) on cache key and value
324
- void FreeData() const;
324
+ void FreeData(MemoryAllocator* allocator) const;
325
325
 
326
326
  // Required by concept HandleImpl
327
327
  const UniqueId64x2& GetHash() const { return hashed_key; }
@@ -411,7 +411,7 @@ class HyperClockTable {
411
411
 
412
412
  HyperClockTable(size_t capacity, bool strict_capacity_limit,
413
413
  CacheMetadataChargePolicy metadata_charge_policy,
414
- const Opts& opts);
414
+ MemoryAllocator* allocator, const Opts& opts);
415
415
  ~HyperClockTable();
416
416
 
417
417
  Status Insert(const ClockHandleBasicData& proto, HandleImpl** handle,
@@ -519,6 +519,8 @@ class HyperClockTable {
519
519
  // Updates `detached_usage_` but not `usage_` nor `occupancy_`.
520
520
  inline HandleImpl* DetachedInsert(const ClockHandleBasicData& proto);
521
521
 
522
+ MemoryAllocator* GetAllocator() const { return allocator_; }
523
+
522
524
  // Returns the number of bits used to hash an element in the hash
523
525
  // table.
524
526
  static int CalcHashBits(size_t capacity, size_t estimated_value_size,
@@ -538,6 +540,9 @@ class HyperClockTable {
538
540
  // Array of slots comprising the hash table.
539
541
  const std::unique_ptr<HandleImpl[]> array_;
540
542
 
543
+ // From Cache, for deleter
544
+ MemoryAllocator* const allocator_;
545
+
541
546
  // We partition the following members into different cache lines
542
547
  // to avoid false sharing among Lookup, Release, Erase and Insert
543
548
  // operations in ClockCacheShard.
@@ -563,7 +568,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
563
568
  public:
564
569
  ClockCacheShard(size_t capacity, bool strict_capacity_limit,
565
570
  CacheMetadataChargePolicy metadata_charge_policy,
566
- const typename Table::Opts& opts);
571
+ MemoryAllocator* allocator, const typename Table::Opts& opts);
567
572
 
568
573
  // For CacheShard concept
569
574
  using HandleImpl = typename Table::HandleImpl;
@@ -600,9 +605,9 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
600
605
 
601
606
  void SetStrictCapacityLimit(bool strict_capacity_limit);
602
607
 
603
- Status Insert(const Slice& key, const UniqueId64x2& hashed_key, void* value,
604
- size_t charge, Cache::DeleterFn deleter, HandleImpl** handle,
605
- Cache::Priority priority);
608
+ Status Insert(const Slice& key, const UniqueId64x2& hashed_key,
609
+ Cache::ObjectPtr value, const Cache::CacheItemHelper* helper,
610
+ size_t charge, HandleImpl** handle, Cache::Priority priority);
606
611
 
607
612
  HandleImpl* Lookup(const Slice& key, const UniqueId64x2& hashed_key);
608
613
 
@@ -629,25 +634,18 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
629
634
  size_t GetTableAddressCount() const;
630
635
 
631
636
  void ApplyToSomeEntries(
632
- const std::function<void(const Slice& key, void* value, size_t charge,
633
- DeleterFn deleter)>& callback,
637
+ const std::function<void(const Slice& key, Cache::ObjectPtr obj,
638
+ size_t charge,
639
+ const Cache::CacheItemHelper* helper)>& callback,
634
640
  size_t average_entries_per_lock, size_t* state);
635
641
 
636
642
  void EraseUnRefEntries();
637
643
 
638
644
  std::string GetPrintableOptions() const { return std::string{}; }
639
645
 
640
- // SecondaryCache not yet supported
641
- Status Insert(const Slice& key, const UniqueId64x2& hashed_key, void* value,
642
- const Cache::CacheItemHelper* helper, size_t charge,
643
- HandleImpl** handle, Cache::Priority priority) {
644
- return Insert(key, hashed_key, value, charge, helper->del_cb, handle,
645
- priority);
646
- }
647
-
648
646
  HandleImpl* Lookup(const Slice& key, const UniqueId64x2& hashed_key,
649
647
  const Cache::CacheItemHelper* /*helper*/,
650
- const Cache::CreateCallback& /*create_cb*/,
648
+ Cache::CreateContext* /*create_context*/,
651
649
  Cache::Priority /*priority*/, bool /*wait*/,
652
650
  Statistics* /*stats*/) {
653
651
  return Lookup(key, hashed_key);
@@ -686,11 +684,11 @@ class HyperClockCache
686
684
 
687
685
  const char* Name() const override { return "HyperClockCache"; }
688
686
 
689
- void* Value(Handle* handle) override;
687
+ Cache::ObjectPtr Value(Handle* handle) override;
690
688
 
691
689
  size_t GetCharge(Handle* handle) const override;
692
690
 
693
- DeleterFn GetDeleter(Handle* handle) const override;
691
+ const CacheItemHelper* GetCacheItemHelper(Handle* handle) const override;
694
692
 
695
693
  void ReportProblems(
696
694
  const std::shared_ptr<Logger>& /*info_log*/) const override;
@@ -37,8 +37,10 @@ CompressedSecondaryCache::CompressedSecondaryCache(
37
37
  CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
38
38
 
39
39
  std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
40
- const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
41
- bool advise_erase, bool& is_in_sec_cache) {
40
+ const Slice& key, const Cache::CacheItemHelper* helper,
41
+ Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
42
+ bool& is_in_sec_cache) {
43
+ assert(helper);
42
44
  std::unique_ptr<SecondaryCacheResultHandle> handle;
43
45
  is_in_sec_cache = false;
44
46
  Cache::Handle* lru_handle = cache_->Lookup(key);
@@ -64,12 +66,14 @@ std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
64
66
  ptr = reinterpret_cast<CacheAllocationPtr*>(handle_value);
65
67
  handle_value_charge = cache_->GetCharge(lru_handle);
66
68
  }
69
+ MemoryAllocator* allocator = cache_options_.memory_allocator.get();
67
70
 
68
71
  Status s;
69
- void* value{nullptr};
72
+ Cache::ObjectPtr value{nullptr};
70
73
  size_t charge{0};
71
74
  if (cache_options_.compression_type == kNoCompression) {
72
- s = create_cb(ptr->get(), handle_value_charge, &value, &charge);
75
+ s = helper->create_cb(Slice(ptr->get(), handle_value_charge),
76
+ create_context, allocator, &value, &charge);
73
77
  } else {
74
78
  UncompressionContext uncompression_context(cache_options_.compression_type);
75
79
  UncompressionInfo uncompression_info(uncompression_context,
@@ -79,14 +83,14 @@ std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
79
83
  size_t uncompressed_size{0};
80
84
  CacheAllocationPtr uncompressed = UncompressData(
81
85
  uncompression_info, (char*)ptr->get(), handle_value_charge,
82
- &uncompressed_size, cache_options_.compress_format_version,
83
- cache_options_.memory_allocator.get());
86
+ &uncompressed_size, cache_options_.compress_format_version, allocator);
84
87
 
85
88
  if (!uncompressed) {
86
89
  cache_->Release(lru_handle, /*erase_if_last_ref=*/true);
87
90
  return nullptr;
88
91
  }
89
- s = create_cb(uncompressed.get(), uncompressed_size, &value, &charge);
92
+ s = helper->create_cb(Slice(uncompressed.get(), uncompressed_size),
93
+ create_context, allocator, &value, &charge);
90
94
  }
91
95
 
92
96
  if (!s.ok()) {
@@ -98,8 +102,9 @@ std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
98
102
  cache_->Release(lru_handle, /*erase_if_last_ref=*/true);
99
103
  // Insert a dummy handle.
100
104
  cache_
101
- ->Insert(key, /*value=*/nullptr, /*charge=*/0,
102
- GetDeletionCallback(cache_options_.enable_custom_split_merge))
105
+ ->Insert(key, /*obj=*/nullptr,
106
+ GetHelper(cache_options_.enable_custom_split_merge),
107
+ /*charge=*/0)
103
108
  .PermitUncheckedError();
104
109
  } else {
105
110
  is_in_sec_cache = true;
@@ -109,19 +114,20 @@ std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
109
114
  return handle;
110
115
  }
111
116
 
112
- Status CompressedSecondaryCache::Insert(const Slice& key, void* value,
117
+ Status CompressedSecondaryCache::Insert(const Slice& key,
118
+ Cache::ObjectPtr value,
113
119
  const Cache::CacheItemHelper* helper) {
114
120
  if (value == nullptr) {
115
121
  return Status::InvalidArgument();
116
122
  }
117
123
 
118
124
  Cache::Handle* lru_handle = cache_->Lookup(key);
119
- Cache::DeleterFn del_cb =
120
- GetDeletionCallback(cache_options_.enable_custom_split_merge);
125
+ auto internal_helper = GetHelper(cache_options_.enable_custom_split_merge);
121
126
  if (lru_handle == nullptr) {
122
127
  PERF_COUNTER_ADD(compressed_sec_cache_insert_dummy_count, 1);
123
128
  // Insert a dummy handle if the handle is evicted for the first time.
124
- return cache_->Insert(key, /*value=*/nullptr, /*charge=*/0, del_cb);
129
+ return cache_->Insert(key, /*obj=*/nullptr, internal_helper,
130
+ /*charge=*/0);
125
131
  } else {
126
132
  cache_->Release(lru_handle, /*erase_if_last_ref=*/false);
127
133
  }
@@ -169,10 +175,10 @@ Status CompressedSecondaryCache::Insert(const Slice& key, void* value,
169
175
  size_t charge{0};
170
176
  CacheValueChunk* value_chunks_head =
171
177
  SplitValueIntoChunks(val, cache_options_.compression_type, charge);
172
- return cache_->Insert(key, value_chunks_head, charge, del_cb);
178
+ return cache_->Insert(key, value_chunks_head, internal_helper, charge);
173
179
  } else {
174
180
  CacheAllocationPtr* buf = new CacheAllocationPtr(std::move(ptr));
175
- return cache_->Insert(key, buf, size, del_cb);
181
+ return cache_->Insert(key, buf, internal_helper, size);
176
182
  }
177
183
  }
178
184
 
@@ -276,23 +282,29 @@ CacheAllocationPtr CompressedSecondaryCache::MergeChunksIntoValue(
276
282
  return ptr;
277
283
  }
278
284
 
279
- Cache::DeleterFn CompressedSecondaryCache::GetDeletionCallback(
280
- bool enable_custom_split_merge) {
285
+ const Cache::CacheItemHelper* CompressedSecondaryCache::GetHelper(
286
+ bool enable_custom_split_merge) const {
281
287
  if (enable_custom_split_merge) {
282
- return [](const Slice& /*key*/, void* obj) {
283
- CacheValueChunk* chunks_head = reinterpret_cast<CacheValueChunk*>(obj);
284
- while (chunks_head != nullptr) {
285
- CacheValueChunk* tmp_chunk = chunks_head;
286
- chunks_head = chunks_head->next;
287
- tmp_chunk->Free();
288
- obj = nullptr;
289
- };
290
- };
288
+ static const Cache::CacheItemHelper kHelper{
289
+ CacheEntryRole::kMisc,
290
+ [](Cache::ObjectPtr obj, MemoryAllocator* /*alloc*/) {
291
+ CacheValueChunk* chunks_head = static_cast<CacheValueChunk*>(obj);
292
+ while (chunks_head != nullptr) {
293
+ CacheValueChunk* tmp_chunk = chunks_head;
294
+ chunks_head = chunks_head->next;
295
+ tmp_chunk->Free();
296
+ obj = nullptr;
297
+ };
298
+ }};
299
+ return &kHelper;
291
300
  } else {
292
- return [](const Slice& /*key*/, void* obj) {
293
- delete reinterpret_cast<CacheAllocationPtr*>(obj);
294
- obj = nullptr;
295
- };
301
+ static const Cache::CacheItemHelper kHelper{
302
+ CacheEntryRole::kMisc,
303
+ [](Cache::ObjectPtr obj, MemoryAllocator* /*alloc*/) {
304
+ delete static_cast<CacheAllocationPtr*>(obj);
305
+ obj = nullptr;
306
+ }};
307
+ return &kHelper;
296
308
  }
297
309
  }
298
310
 
@@ -21,7 +21,7 @@ namespace ROCKSDB_NAMESPACE {
21
21
 
22
22
  class CompressedSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
23
23
  public:
24
- CompressedSecondaryCacheResultHandle(void* value, size_t size)
24
+ CompressedSecondaryCacheResultHandle(Cache::ObjectPtr value, size_t size)
25
25
  : value_(value), size_(size) {}
26
26
  ~CompressedSecondaryCacheResultHandle() override = default;
27
27
 
@@ -34,12 +34,12 @@ class CompressedSecondaryCacheResultHandle : public SecondaryCacheResultHandle {
34
34
 
35
35
  void Wait() override {}
36
36
 
37
- void* Value() override { return value_; }
37
+ Cache::ObjectPtr Value() override { return value_; }
38
38
 
39
39
  size_t Size() override { return size_; }
40
40
 
41
41
  private:
42
- void* value_;
42
+ Cache::ObjectPtr value_;
43
43
  size_t size_;
44
44
  };
45
45
 
@@ -83,12 +83,13 @@ class CompressedSecondaryCache : public SecondaryCache {
83
83
 
84
84
  const char* Name() const override { return "CompressedSecondaryCache"; }
85
85
 
86
- Status Insert(const Slice& key, void* value,
86
+ Status Insert(const Slice& key, Cache::ObjectPtr value,
87
87
  const Cache::CacheItemHelper* helper) override;
88
88
 
89
89
  std::unique_ptr<SecondaryCacheResultHandle> Lookup(
90
- const Slice& key, const Cache::CreateCallback& create_cb, bool /*wait*/,
91
- bool advise_erase, bool& is_in_sec_cache) override;
90
+ const Slice& key, const Cache::CacheItemHelper* helper,
91
+ Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
92
+ bool& is_in_sec_cache) override;
92
93
 
93
94
  bool SupportForceErase() const override { return true; }
94
95
 
@@ -129,8 +130,8 @@ class CompressedSecondaryCache : public SecondaryCache {
129
130
  CacheAllocationPtr MergeChunksIntoValue(const void* chunks_head,
130
131
  size_t& charge);
131
132
 
132
- // An implementation of Cache::DeleterFn.
133
- static Cache::DeleterFn GetDeletionCallback(bool enable_custom_split_merge);
133
+ // TODO: clean up to use cleaner interfaces in typed_cache.h
134
+ const Cache::CacheItemHelper* GetHelper(bool enable_custom_split_merge) const;
134
135
  std::shared_ptr<Cache> cache_;
135
136
  CompressedSecondaryCacheOptions cache_options_;
136
137
  mutable port::Mutex capacity_mutex_;