@nxtedition/rocksdb 7.0.23 → 7.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +3 -1
- package/deps/rocksdb/rocksdb/CMakeLists.txt +5 -0
- package/deps/rocksdb/rocksdb/Makefile +6 -2
- package/deps/rocksdb/rocksdb/TARGETS +14 -0
- package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +4 -1
- package/deps/rocksdb/rocksdb/cache/cache_helpers.h +20 -0
- package/deps/rocksdb/rocksdb/cache/cache_reservation_manager_test.cc +2 -2
- package/deps/rocksdb/rocksdb/cache/cache_test.cc +44 -31
- package/deps/rocksdb/rocksdb/cache/clock_cache.cc +491 -722
- package/deps/rocksdb/rocksdb/cache/clock_cache.h +468 -2
- package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.cc +1 -1
- package/deps/rocksdb/rocksdb/cache/fast_lru_cache.cc +51 -52
- package/deps/rocksdb/rocksdb/cache/fast_lru_cache.h +28 -16
- package/deps/rocksdb/rocksdb/cache/lru_cache.cc +12 -1
- package/deps/rocksdb/rocksdb/cache/lru_cache.h +1 -0
- package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +170 -36
- package/deps/rocksdb/rocksdb/db/blob/blob_file_cache_test.cc +1 -1
- package/deps/rocksdb/rocksdb/db/blob/blob_file_reader.cc +63 -36
- package/deps/rocksdb/rocksdb/db/blob/blob_file_reader.h +4 -6
- package/deps/rocksdb/rocksdb/db/blob/blob_file_reader_test.cc +57 -38
- package/deps/rocksdb/rocksdb/db/blob/blob_read_request.h +58 -0
- package/deps/rocksdb/rocksdb/db/blob/blob_source.cc +164 -74
- package/deps/rocksdb/rocksdb/db/blob/blob_source.h +42 -29
- package/deps/rocksdb/rocksdb/db/blob/blob_source_test.cc +419 -62
- package/deps/rocksdb/rocksdb/db/blob/db_blob_basic_test.cc +208 -8
- package/deps/rocksdb/rocksdb/db/c.cc +68 -0
- package/deps/rocksdb/rocksdb/db/c_test.c +95 -2
- package/deps/rocksdb/rocksdb/db/column_family.cc +12 -3
- package/deps/rocksdb/rocksdb/db/compaction/compaction.cc +92 -15
- package/deps/rocksdb/rocksdb/db/compaction/compaction.h +76 -4
- package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +52 -1
- package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.h +30 -1
- package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator_test.cc +126 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +203 -1584
- package/deps/rocksdb/rocksdb/db/compaction/compaction_job.h +93 -26
- package/deps/rocksdb/rocksdb/db/compaction/compaction_job_test.cc +87 -1
- package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.cc +314 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.h +328 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.cc +32 -6
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.h +4 -1
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_fifo.cc +7 -3
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_level.cc +174 -33
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_test.cc +474 -7
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_universal.cc +5 -2
- package/deps/rocksdb/rocksdb/db/compaction/compaction_service_job.cc +825 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_state.cc +46 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_state.h +42 -0
- package/deps/rocksdb/rocksdb/db/compaction/subcompaction_state.cc +223 -0
- package/deps/rocksdb/rocksdb/db/compaction/subcompaction_state.h +255 -0
- package/deps/rocksdb/rocksdb/db/compaction/tiered_compaction_test.cc +1253 -0
- package/deps/rocksdb/rocksdb/db/corruption_test.cc +32 -8
- package/deps/rocksdb/rocksdb/db/db_basic_test.cc +3 -1
- package/deps/rocksdb/rocksdb/db/db_block_cache_test.cc +13 -8
- package/deps/rocksdb/rocksdb/db/db_bloom_filter_test.cc +376 -0
- package/deps/rocksdb/rocksdb/db/db_compaction_test.cc +103 -78
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +4 -6
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +0 -8
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +10 -3
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl_secondary.cc +21 -6
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl_secondary.h +19 -1
- package/deps/rocksdb/rocksdb/db/db_iter.cc +91 -14
- package/deps/rocksdb/rocksdb/db/db_iter.h +5 -0
- package/deps/rocksdb/rocksdb/db/db_kv_checksum_test.cc +33 -0
- package/deps/rocksdb/rocksdb/db/db_properties_test.cc +79 -0
- package/deps/rocksdb/rocksdb/db/db_range_del_test.cc +2 -0
- package/deps/rocksdb/rocksdb/db/db_test2.cc +1 -1
- package/deps/rocksdb/rocksdb/db/db_wal_test.cc +5 -2
- package/deps/rocksdb/rocksdb/db/db_with_timestamp_basic_test.cc +185 -0
- package/deps/rocksdb/rocksdb/db/dbformat.cc +1 -4
- package/deps/rocksdb/rocksdb/db/dbformat.h +2 -8
- package/deps/rocksdb/rocksdb/db/internal_stats.cc +71 -29
- package/deps/rocksdb/rocksdb/db/internal_stats.h +160 -5
- package/deps/rocksdb/rocksdb/db/log_reader.cc +29 -3
- package/deps/rocksdb/rocksdb/db/log_reader.h +12 -3
- package/deps/rocksdb/rocksdb/db/repair_test.cc +1 -3
- package/deps/rocksdb/rocksdb/db/version_edit.cc +6 -0
- package/deps/rocksdb/rocksdb/db/version_set.cc +93 -129
- package/deps/rocksdb/rocksdb/db/version_set.h +4 -4
- package/deps/rocksdb/rocksdb/db/version_set_sync_and_async.h +2 -2
- package/deps/rocksdb/rocksdb/db/version_set_test.cc +42 -35
- package/deps/rocksdb/rocksdb/db/write_batch.cc +10 -2
- package/deps/rocksdb/rocksdb/db/write_batch_internal.h +4 -1
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.cc +10 -4
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +3 -3
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_driver.cc +3 -2
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +4 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_shared_state.h +5 -1
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +140 -8
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.h +12 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/multi_ops_txns_stress.cc +46 -7
- package/deps/rocksdb/rocksdb/db_stress_tool/multi_ops_txns_stress.h +7 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/no_batched_ops_stress.cc +27 -7
- package/deps/rocksdb/rocksdb/env/composite_env_wrapper.h +8 -0
- package/deps/rocksdb/rocksdb/env/env_posix.cc +14 -0
- package/deps/rocksdb/rocksdb/env/env_test.cc +130 -1
- package/deps/rocksdb/rocksdb/env/fs_posix.cc +7 -1
- package/deps/rocksdb/rocksdb/env/io_posix.cc +18 -50
- package/deps/rocksdb/rocksdb/env/io_posix.h +53 -6
- package/deps/rocksdb/rocksdb/file/file_prefetch_buffer.cc +8 -10
- package/deps/rocksdb/rocksdb/file/file_prefetch_buffer.h +3 -7
- package/deps/rocksdb/rocksdb/file/prefetch_test.cc +239 -259
- package/deps/rocksdb/rocksdb/file/random_access_file_reader.cc +84 -19
- package/deps/rocksdb/rocksdb/file/random_access_file_reader.h +24 -4
- package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +1 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/c.h +31 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +11 -7
- package/deps/rocksdb/rocksdb/include/rocksdb/compaction_job_stats.h +2 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/db.h +14 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/env.h +20 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/options.h +37 -13
- package/deps/rocksdb/rocksdb/include/rocksdb/perf_context.h +7 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/statistics.h +14 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/threadpool.h +9 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/write_batch.h +13 -13
- package/deps/rocksdb/rocksdb/logging/auto_roll_logger.cc +12 -2
- package/deps/rocksdb/rocksdb/monitoring/perf_context.cc +38 -0
- package/deps/rocksdb/rocksdb/monitoring/statistics.cc +7 -1
- package/deps/rocksdb/rocksdb/port/win/env_win.cc +17 -0
- package/deps/rocksdb/rocksdb/port/win/env_win.h +8 -0
- package/deps/rocksdb/rocksdb/port/win/io_win.cc +6 -3
- package/deps/rocksdb/rocksdb/src.mk +5 -0
- package/deps/rocksdb/rocksdb/table/block_based/block.h +1 -2
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.cc +1 -1
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_iterator.cc +5 -2
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +1 -1
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_impl.h +15 -12
- package/deps/rocksdb/rocksdb/table/block_based/block_prefetcher.cc +5 -4
- package/deps/rocksdb/rocksdb/table/block_based/block_prefetcher.h +2 -1
- package/deps/rocksdb/rocksdb/table/block_based/filter_policy.cc +1 -1
- package/deps/rocksdb/rocksdb/table/block_based/partitioned_index_iterator.cc +4 -4
- package/deps/rocksdb/rocksdb/table/block_fetcher.cc +1 -2
- package/deps/rocksdb/rocksdb/table/get_context.cc +1 -0
- package/deps/rocksdb/rocksdb/table/sst_file_dumper.cc +1 -2
- package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +24 -4
- package/deps/rocksdb/rocksdb/util/async_file_reader.cc +1 -1
- package/deps/rocksdb/rocksdb/util/compression.h +2 -0
- package/deps/rocksdb/rocksdb/util/thread_list_test.cc +18 -1
- package/deps/rocksdb/rocksdb/util/threadpool_imp.cc +67 -4
- package/deps/rocksdb/rocksdb/util/threadpool_imp.h +8 -0
- package/deps/rocksdb/rocksdb/utilities/backup/backup_engine.cc +15 -12
- package/deps/rocksdb/rocksdb/utilities/backup/backup_engine_test.cc +4 -2
- package/deps/rocksdb/rocksdb/utilities/simulator_cache/sim_cache_test.cc +1 -1
- package/deps/rocksdb/rocksdb.gyp +5 -1
- package/package.json +1 -1
- package/prebuilds/darwin-arm64/node.napi.node +0 -0
- package/prebuilds/linux-x64/node.napi.node +0 -0
|
@@ -0,0 +1,1253 @@
|
|
|
1
|
+
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
//
|
|
3
|
+
// This source code is licensed under both the GPLv2 (found in the
|
|
4
|
+
// COPYING file in the root directory) and Apache 2.0 License
|
|
5
|
+
// (found in the LICENSE.Apache file in the root directory).
|
|
6
|
+
//
|
|
7
|
+
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
8
|
+
// Use of this source code is governed by a BSD-style license that can be
|
|
9
|
+
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
10
|
+
|
|
11
|
+
#include "db/db_test_util.h"
|
|
12
|
+
#include "port/stack_trace.h"
|
|
13
|
+
#include "rocksdb/listener.h"
|
|
14
|
+
|
|
15
|
+
namespace ROCKSDB_NAMESPACE {
|
|
16
|
+
|
|
17
|
+
#if !defined(ROCKSDB_LITE)
|
|
18
|
+
|
|
19
|
+
class TieredCompactionTest : public DBTestBase {
|
|
20
|
+
public:
|
|
21
|
+
TieredCompactionTest()
|
|
22
|
+
: DBTestBase("tiered_compaction_test", /*env_do_fsync=*/true),
|
|
23
|
+
kBasicCompStats(CompactionReason::kUniversalSizeAmplification, 1),
|
|
24
|
+
kBasicPerKeyPlacementCompStats(
|
|
25
|
+
CompactionReason::kUniversalSizeAmplification, 1),
|
|
26
|
+
kBasicFlushStats(CompactionReason::kFlush, 1) {
|
|
27
|
+
kBasicCompStats.micros = kHasValue;
|
|
28
|
+
kBasicCompStats.cpu_micros = kHasValue;
|
|
29
|
+
kBasicCompStats.bytes_read_non_output_levels = kHasValue;
|
|
30
|
+
kBasicCompStats.num_input_files_in_non_output_levels = kHasValue;
|
|
31
|
+
kBasicCompStats.num_input_records = kHasValue;
|
|
32
|
+
kBasicCompStats.num_dropped_records = kHasValue;
|
|
33
|
+
|
|
34
|
+
kBasicPerLevelStats.num_output_records = kHasValue;
|
|
35
|
+
kBasicPerLevelStats.bytes_written = kHasValue;
|
|
36
|
+
kBasicPerLevelStats.num_output_files = kHasValue;
|
|
37
|
+
|
|
38
|
+
kBasicPerKeyPlacementCompStats.micros = kHasValue;
|
|
39
|
+
kBasicPerKeyPlacementCompStats.cpu_micros = kHasValue;
|
|
40
|
+
kBasicPerKeyPlacementCompStats.Add(kBasicPerLevelStats);
|
|
41
|
+
|
|
42
|
+
kBasicFlushStats.micros = kHasValue;
|
|
43
|
+
kBasicFlushStats.cpu_micros = kHasValue;
|
|
44
|
+
kBasicFlushStats.bytes_written = kHasValue;
|
|
45
|
+
kBasicFlushStats.num_output_files = kHasValue;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
protected:
|
|
49
|
+
static constexpr uint8_t kHasValue = 1;
|
|
50
|
+
|
|
51
|
+
InternalStats::CompactionStats kBasicCompStats;
|
|
52
|
+
InternalStats::CompactionStats kBasicPerKeyPlacementCompStats;
|
|
53
|
+
InternalStats::CompactionOutputsStats kBasicPerLevelStats;
|
|
54
|
+
InternalStats::CompactionStats kBasicFlushStats;
|
|
55
|
+
|
|
56
|
+
void SetUp() override {
|
|
57
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
58
|
+
"Compaction::SupportsPerKeyPlacement:Enabled", [&](void* arg) {
|
|
59
|
+
auto supports_per_key_placement = static_cast<bool*>(arg);
|
|
60
|
+
*supports_per_key_placement = true;
|
|
61
|
+
});
|
|
62
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
#ifndef ROCKSDB_LITE
|
|
66
|
+
uint64_t GetSstSizeHelper(Temperature temperature) {
|
|
67
|
+
std::string prop;
|
|
68
|
+
EXPECT_TRUE(dbfull()->GetProperty(
|
|
69
|
+
DB::Properties::kLiveSstFilesSizeAtTemperature +
|
|
70
|
+
std::to_string(static_cast<uint8_t>(temperature)),
|
|
71
|
+
&prop));
|
|
72
|
+
return static_cast<uint64_t>(std::atoi(prop.c_str()));
|
|
73
|
+
}
|
|
74
|
+
#endif // ROCKSDB_LITE
|
|
75
|
+
|
|
76
|
+
const std::vector<InternalStats::CompactionStats>& GetCompactionStats() {
|
|
77
|
+
VersionSet* const versions = dbfull()->GetVersionSet();
|
|
78
|
+
assert(versions);
|
|
79
|
+
assert(versions->GetColumnFamilySet());
|
|
80
|
+
|
|
81
|
+
ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault();
|
|
82
|
+
assert(cfd);
|
|
83
|
+
|
|
84
|
+
const InternalStats* const internal_stats = cfd->internal_stats();
|
|
85
|
+
assert(internal_stats);
|
|
86
|
+
|
|
87
|
+
return internal_stats->TEST_GetCompactionStats();
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
const InternalStats::CompactionStats& GetPerKeyPlacementCompactionStats() {
|
|
91
|
+
VersionSet* const versions = dbfull()->GetVersionSet();
|
|
92
|
+
assert(versions);
|
|
93
|
+
assert(versions->GetColumnFamilySet());
|
|
94
|
+
|
|
95
|
+
ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault();
|
|
96
|
+
assert(cfd);
|
|
97
|
+
|
|
98
|
+
const InternalStats* const internal_stats = cfd->internal_stats();
|
|
99
|
+
assert(internal_stats);
|
|
100
|
+
|
|
101
|
+
return internal_stats->TEST_GetPerKeyPlacementCompactionStats();
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// Verify the compaction stats, the stats are roughly compared
|
|
105
|
+
void VerifyCompactionStats(
|
|
106
|
+
const std::vector<InternalStats::CompactionStats>& expect_stats,
|
|
107
|
+
const InternalStats::CompactionStats& expect_pl_stats) {
|
|
108
|
+
const std::vector<InternalStats::CompactionStats>& stats =
|
|
109
|
+
GetCompactionStats();
|
|
110
|
+
const size_t kLevels = expect_stats.size();
|
|
111
|
+
ASSERT_EQ(kLevels, stats.size());
|
|
112
|
+
|
|
113
|
+
for (auto it = stats.begin(), expect = expect_stats.begin();
|
|
114
|
+
it != stats.end(); it++, expect++) {
|
|
115
|
+
VerifyCompactionStats(*it, *expect);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
const InternalStats::CompactionStats& pl_stats =
|
|
119
|
+
GetPerKeyPlacementCompactionStats();
|
|
120
|
+
VerifyCompactionStats(pl_stats, expect_pl_stats);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
void ResetAllStats(std::vector<InternalStats::CompactionStats>& stats,
|
|
124
|
+
InternalStats::CompactionStats& pl_stats) {
|
|
125
|
+
ASSERT_OK(dbfull()->ResetStats());
|
|
126
|
+
for (auto& level_stats : stats) {
|
|
127
|
+
level_stats.Clear();
|
|
128
|
+
}
|
|
129
|
+
pl_stats.Clear();
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
private:
|
|
133
|
+
void CompareStats(uint64_t val, uint64_t expect) {
|
|
134
|
+
if (expect > 0) {
|
|
135
|
+
ASSERT_TRUE(val > 0);
|
|
136
|
+
} else {
|
|
137
|
+
ASSERT_EQ(val, 0);
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
void VerifyCompactionStats(
|
|
142
|
+
const InternalStats::CompactionStats& stats,
|
|
143
|
+
const InternalStats::CompactionStats& expect_stats) {
|
|
144
|
+
CompareStats(stats.micros, expect_stats.micros);
|
|
145
|
+
CompareStats(stats.cpu_micros, expect_stats.cpu_micros);
|
|
146
|
+
CompareStats(stats.bytes_read_non_output_levels,
|
|
147
|
+
expect_stats.bytes_read_non_output_levels);
|
|
148
|
+
CompareStats(stats.bytes_read_output_level,
|
|
149
|
+
expect_stats.bytes_read_output_level);
|
|
150
|
+
CompareStats(stats.bytes_read_blob, expect_stats.bytes_read_blob);
|
|
151
|
+
CompareStats(stats.bytes_written, expect_stats.bytes_written);
|
|
152
|
+
CompareStats(stats.bytes_moved, expect_stats.bytes_moved);
|
|
153
|
+
CompareStats(stats.num_input_files_in_non_output_levels,
|
|
154
|
+
expect_stats.num_input_files_in_non_output_levels);
|
|
155
|
+
CompareStats(stats.num_input_files_in_output_level,
|
|
156
|
+
expect_stats.num_input_files_in_output_level);
|
|
157
|
+
CompareStats(stats.num_output_files, expect_stats.num_output_files);
|
|
158
|
+
CompareStats(stats.num_output_files_blob,
|
|
159
|
+
expect_stats.num_output_files_blob);
|
|
160
|
+
CompareStats(stats.num_input_records, expect_stats.num_input_records);
|
|
161
|
+
CompareStats(stats.num_dropped_records, expect_stats.num_dropped_records);
|
|
162
|
+
CompareStats(stats.num_output_records, expect_stats.num_output_records);
|
|
163
|
+
ASSERT_EQ(stats.count, expect_stats.count);
|
|
164
|
+
for (int i = 0; i < static_cast<int>(CompactionReason::kNumOfReasons);
|
|
165
|
+
i++) {
|
|
166
|
+
ASSERT_EQ(stats.counts[i], expect_stats.counts[i]);
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
TEST_F(TieredCompactionTest, SequenceBasedTieredStorageUniversal) {
|
|
172
|
+
const int kNumTrigger = 4;
|
|
173
|
+
const int kNumLevels = 7;
|
|
174
|
+
const int kNumKeys = 100;
|
|
175
|
+
const int kLastLevel = kNumLevels - 1;
|
|
176
|
+
|
|
177
|
+
auto options = CurrentOptions();
|
|
178
|
+
options.compaction_style = kCompactionStyleUniversal;
|
|
179
|
+
options.bottommost_temperature = Temperature::kCold;
|
|
180
|
+
options.level0_file_num_compaction_trigger = kNumTrigger;
|
|
181
|
+
options.statistics = CreateDBStatistics();
|
|
182
|
+
options.max_subcompactions = 10;
|
|
183
|
+
DestroyAndReopen(options);
|
|
184
|
+
|
|
185
|
+
std::atomic_uint64_t latest_cold_seq = 0;
|
|
186
|
+
std::vector<SequenceNumber> seq_history;
|
|
187
|
+
|
|
188
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
189
|
+
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
190
|
+
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
191
|
+
context->output_to_penultimate_level =
|
|
192
|
+
context->seq_num > latest_cold_seq;
|
|
193
|
+
});
|
|
194
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
195
|
+
|
|
196
|
+
std::vector<InternalStats::CompactionStats> expect_stats(kNumLevels);
|
|
197
|
+
InternalStats::CompactionStats& last_stats = expect_stats[kLastLevel];
|
|
198
|
+
InternalStats::CompactionStats expect_pl_stats;
|
|
199
|
+
|
|
200
|
+
for (int i = 0; i < kNumTrigger; i++) {
|
|
201
|
+
for (int j = 0; j < kNumKeys; j++) {
|
|
202
|
+
ASSERT_OK(Put(Key(i * 10 + j), "value" + std::to_string(i)));
|
|
203
|
+
}
|
|
204
|
+
ASSERT_OK(Flush());
|
|
205
|
+
seq_history.emplace_back(dbfull()->GetLatestSequenceNumber());
|
|
206
|
+
expect_stats[0].Add(kBasicFlushStats);
|
|
207
|
+
}
|
|
208
|
+
ASSERT_OK(dbfull()->WaitForCompact(true));
|
|
209
|
+
|
|
210
|
+
// the penultimate level file temperature is not cold, all data are output to
|
|
211
|
+
// the penultimate level.
|
|
212
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
213
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
214
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
|
|
215
|
+
|
|
216
|
+
// basic compaction stats are still counted to the last level
|
|
217
|
+
expect_stats[kLastLevel].Add(kBasicCompStats);
|
|
218
|
+
expect_pl_stats.Add(kBasicPerKeyPlacementCompStats);
|
|
219
|
+
|
|
220
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
221
|
+
|
|
222
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
223
|
+
|
|
224
|
+
// move forward the cold_seq to split the file into 2 levels, so should have
|
|
225
|
+
// both the last level stats and the output_to_penultimate_level stats
|
|
226
|
+
latest_cold_seq = seq_history[0];
|
|
227
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
228
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
229
|
+
|
|
230
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
231
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
232
|
+
|
|
233
|
+
last_stats.Add(kBasicCompStats);
|
|
234
|
+
last_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
235
|
+
last_stats.Add(kBasicPerLevelStats);
|
|
236
|
+
last_stats.num_dropped_records = 0;
|
|
237
|
+
expect_pl_stats.Add(kBasicPerKeyPlacementCompStats);
|
|
238
|
+
expect_pl_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
239
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
240
|
+
|
|
241
|
+
// delete all cold data, so all data will be on penultimate level
|
|
242
|
+
for (int i = 0; i < 10; i++) {
|
|
243
|
+
ASSERT_OK(Delete(Key(i)));
|
|
244
|
+
}
|
|
245
|
+
ASSERT_OK(Flush());
|
|
246
|
+
|
|
247
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
248
|
+
|
|
249
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
250
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
251
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
252
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
|
|
253
|
+
|
|
254
|
+
last_stats.Add(kBasicCompStats);
|
|
255
|
+
last_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
256
|
+
last_stats.bytes_read_output_level = kHasValue;
|
|
257
|
+
last_stats.num_input_files_in_output_level = kHasValue;
|
|
258
|
+
expect_pl_stats.Add(kBasicPerKeyPlacementCompStats);
|
|
259
|
+
expect_pl_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
260
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
261
|
+
|
|
262
|
+
// move forward the cold_seq again with range delete, take a snapshot to keep
|
|
263
|
+
// the range dels in both cold and hot SSTs
|
|
264
|
+
auto snap = db_->GetSnapshot();
|
|
265
|
+
latest_cold_seq = seq_history[2];
|
|
266
|
+
std::string start = Key(25), end = Key(35);
|
|
267
|
+
ASSERT_OK(
|
|
268
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
269
|
+
ASSERT_OK(Flush());
|
|
270
|
+
|
|
271
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
272
|
+
|
|
273
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
274
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
275
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
276
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
277
|
+
|
|
278
|
+
last_stats.Add(kBasicCompStats);
|
|
279
|
+
last_stats.Add(kBasicPerLevelStats);
|
|
280
|
+
last_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
281
|
+
expect_pl_stats.Add(kBasicPerKeyPlacementCompStats);
|
|
282
|
+
expect_pl_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
283
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
284
|
+
|
|
285
|
+
// verify data
|
|
286
|
+
std::string value;
|
|
287
|
+
for (int i = 0; i < kNumKeys; i++) {
|
|
288
|
+
if (i < 10 || (i >= 25 && i < 35)) {
|
|
289
|
+
ASSERT_TRUE(db_->Get(ReadOptions(), Key(i), &value).IsNotFound());
|
|
290
|
+
} else {
|
|
291
|
+
ASSERT_OK(db_->Get(ReadOptions(), Key(i), &value));
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// range delete all hot data
|
|
296
|
+
start = Key(30);
|
|
297
|
+
end = Key(130);
|
|
298
|
+
ASSERT_OK(
|
|
299
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
300
|
+
ASSERT_OK(Flush());
|
|
301
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
302
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
303
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
304
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
305
|
+
|
|
306
|
+
// no range del is dropped because of snapshot
|
|
307
|
+
ASSERT_EQ(
|
|
308
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
309
|
+
0);
|
|
310
|
+
|
|
311
|
+
// release the snapshot and do compaction again should remove all hot data
|
|
312
|
+
db_->ReleaseSnapshot(snap);
|
|
313
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
314
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
315
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
316
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
317
|
+
|
|
318
|
+
// 2 range dels are dropped
|
|
319
|
+
ASSERT_EQ(
|
|
320
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
321
|
+
3);
|
|
322
|
+
|
|
323
|
+
// move backward the cold_seq, for example the user may change the setting of
|
|
324
|
+
// hot/cold data, but it won't impact the existing cold data, as the sequence
|
|
325
|
+
// number is zeroed out.
|
|
326
|
+
latest_cold_seq = seq_history[1];
|
|
327
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
328
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
329
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
330
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
TEST_F(TieredCompactionTest, RangeBasedTieredStorageUniversal) {
|
|
334
|
+
const int kNumTrigger = 4;
|
|
335
|
+
const int kNumLevels = 7;
|
|
336
|
+
const int kNumKeys = 100;
|
|
337
|
+
const int kLastLevel = kNumLevels - 1;
|
|
338
|
+
|
|
339
|
+
auto options = CurrentOptions();
|
|
340
|
+
options.compaction_style = kCompactionStyleUniversal;
|
|
341
|
+
options.bottommost_temperature = Temperature::kCold;
|
|
342
|
+
options.level0_file_num_compaction_trigger = kNumTrigger;
|
|
343
|
+
options.statistics = CreateDBStatistics();
|
|
344
|
+
options.max_subcompactions = 10;
|
|
345
|
+
DestroyAndReopen(options);
|
|
346
|
+
auto cmp = options.comparator;
|
|
347
|
+
|
|
348
|
+
port::Mutex mutex;
|
|
349
|
+
std::string hot_start = Key(10);
|
|
350
|
+
std::string hot_end = Key(50);
|
|
351
|
+
|
|
352
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
353
|
+
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
354
|
+
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
355
|
+
MutexLock l(&mutex);
|
|
356
|
+
context->output_to_penultimate_level =
|
|
357
|
+
cmp->Compare(context->key, hot_start) >= 0 &&
|
|
358
|
+
cmp->Compare(context->key, hot_end) < 0;
|
|
359
|
+
});
|
|
360
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
361
|
+
|
|
362
|
+
std::vector<InternalStats::CompactionStats> expect_stats(kNumLevels);
|
|
363
|
+
InternalStats::CompactionStats& last_stats = expect_stats[kLastLevel];
|
|
364
|
+
InternalStats::CompactionStats expect_pl_stats;
|
|
365
|
+
|
|
366
|
+
for (int i = 0; i < kNumTrigger; i++) {
|
|
367
|
+
for (int j = 0; j < kNumKeys; j++) {
|
|
368
|
+
ASSERT_OK(Put(Key(j), "value" + std::to_string(j)));
|
|
369
|
+
}
|
|
370
|
+
ASSERT_OK(Flush());
|
|
371
|
+
expect_stats[0].Add(kBasicFlushStats);
|
|
372
|
+
}
|
|
373
|
+
ASSERT_OK(dbfull()->WaitForCompact(true));
|
|
374
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
375
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
376
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
377
|
+
|
|
378
|
+
last_stats.Add(kBasicCompStats);
|
|
379
|
+
last_stats.Add(kBasicPerLevelStats);
|
|
380
|
+
expect_pl_stats.Add(kBasicPerKeyPlacementCompStats);
|
|
381
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
382
|
+
|
|
383
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
384
|
+
|
|
385
|
+
// change to all cold, no output_to_penultimate_level output
|
|
386
|
+
{
|
|
387
|
+
MutexLock l(&mutex);
|
|
388
|
+
hot_start = Key(100);
|
|
389
|
+
hot_end = Key(200);
|
|
390
|
+
}
|
|
391
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
392
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
393
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
394
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
395
|
+
|
|
396
|
+
last_stats.Add(kBasicCompStats);
|
|
397
|
+
last_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
398
|
+
last_stats.Add(kBasicPerLevelStats);
|
|
399
|
+
last_stats.num_dropped_records = 0;
|
|
400
|
+
last_stats.bytes_read_output_level = kHasValue;
|
|
401
|
+
last_stats.num_input_files_in_output_level = kHasValue;
|
|
402
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
403
|
+
|
|
404
|
+
// change to all hot, universal compaction support moving data to up level if
|
|
405
|
+
// it's within compaction level range.
|
|
406
|
+
{
|
|
407
|
+
MutexLock l(&mutex);
|
|
408
|
+
hot_start = Key(0);
|
|
409
|
+
hot_end = Key(100);
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
// No data is moved from cold tier to hot tier because no input files from L5
|
|
413
|
+
// or higher, it's not safe to move data to output_to_penultimate_level level.
|
|
414
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
415
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
416
|
+
|
|
417
|
+
// Add 2 keys in higher level, but in separated files, the keys within that
|
|
418
|
+
// range should be moved up to the penultimate level
|
|
419
|
+
ASSERT_OK(Put(Key(0), "value" + std::to_string(0)));
|
|
420
|
+
ASSERT_OK(Flush());
|
|
421
|
+
ASSERT_OK(Put(Key(50), "value" + std::to_string(0)));
|
|
422
|
+
ASSERT_OK(Flush());
|
|
423
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
424
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
425
|
+
|
|
426
|
+
// Add an SST with a key range cover all the data we want to move from the
|
|
427
|
+
// last level to the penultimate level
|
|
428
|
+
ASSERT_OK(Put(Key(0), "value" + std::to_string(0)));
|
|
429
|
+
ASSERT_OK(Put(Key(99), "value" + std::to_string(0)));
|
|
430
|
+
ASSERT_OK(Flush());
|
|
431
|
+
|
|
432
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
433
|
+
|
|
434
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
435
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
436
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
437
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
|
|
438
|
+
|
|
439
|
+
last_stats.Add(kBasicCompStats);
|
|
440
|
+
last_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
441
|
+
last_stats.bytes_read_output_level = kHasValue;
|
|
442
|
+
last_stats.num_input_files_in_output_level = kHasValue;
|
|
443
|
+
expect_pl_stats.Add(kBasicPerKeyPlacementCompStats);
|
|
444
|
+
expect_pl_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
445
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
446
|
+
|
|
447
|
+
// change to only 1 key cold, to test compaction could stop even it matches
|
|
448
|
+
// size amp compaction threshold
|
|
449
|
+
{
|
|
450
|
+
MutexLock l(&mutex);
|
|
451
|
+
hot_start = Key(1);
|
|
452
|
+
hot_end = Key(1000);
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
// generate files just enough to trigger compaction
|
|
456
|
+
for (int i = 0; i < kNumTrigger - 1; i++) {
|
|
457
|
+
for (int j = 0; j < 1000; j++) {
|
|
458
|
+
ASSERT_OK(Put(Key(j), "value" + std::to_string(j)));
|
|
459
|
+
}
|
|
460
|
+
ASSERT_OK(Flush());
|
|
461
|
+
}
|
|
462
|
+
ASSERT_OK(dbfull()->WaitForCompact(
|
|
463
|
+
true)); // make sure the compaction is able to finish
|
|
464
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
465
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
466
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
467
|
+
auto opts = db_->GetOptions();
|
|
468
|
+
auto max_size_amp =
|
|
469
|
+
opts.compaction_options_universal.max_size_amplification_percent / 100;
|
|
470
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown),
|
|
471
|
+
GetSstSizeHelper(Temperature::kCold) * max_size_amp);
|
|
472
|
+
|
|
473
|
+
// delete all cold data
|
|
474
|
+
ASSERT_OK(Delete(Key(0)));
|
|
475
|
+
ASSERT_OK(Flush());
|
|
476
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
477
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
478
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
479
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
|
|
480
|
+
|
|
481
|
+
// range delete overlap with both hot/cold data, with a snapshot to make sure
|
|
482
|
+
// the range del is saved
|
|
483
|
+
auto snap = db_->GetSnapshot();
|
|
484
|
+
{
|
|
485
|
+
MutexLock l(&mutex);
|
|
486
|
+
hot_start = Key(50);
|
|
487
|
+
hot_end = Key(100);
|
|
488
|
+
}
|
|
489
|
+
std::string start = Key(1), end = Key(70);
|
|
490
|
+
ASSERT_OK(
|
|
491
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
492
|
+
ASSERT_OK(Flush());
|
|
493
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
494
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
495
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
496
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
497
|
+
|
|
498
|
+
// no range del is dropped until snapshot is released
|
|
499
|
+
ASSERT_EQ(
|
|
500
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
501
|
+
0);
|
|
502
|
+
|
|
503
|
+
// verify data
|
|
504
|
+
std::string value;
|
|
505
|
+
for (int i = 0; i < kNumKeys; i++) {
|
|
506
|
+
if (i < 70) {
|
|
507
|
+
ASSERT_TRUE(db_->Get(ReadOptions(), Key(i), &value).IsNotFound());
|
|
508
|
+
} else {
|
|
509
|
+
ASSERT_OK(db_->Get(ReadOptions(), Key(i), &value));
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
db_->ReleaseSnapshot(snap);
|
|
514
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
515
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
516
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
517
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
518
|
+
|
|
519
|
+
// range del is dropped
|
|
520
|
+
ASSERT_EQ(
|
|
521
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
522
|
+
1);
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
TEST_F(TieredCompactionTest, LevelColdRangeDelete) {
|
|
526
|
+
const int kNumTrigger = 4;
|
|
527
|
+
const int kNumLevels = 7;
|
|
528
|
+
const int kNumKeys = 100;
|
|
529
|
+
|
|
530
|
+
auto options = CurrentOptions();
|
|
531
|
+
options.bottommost_temperature = Temperature::kCold;
|
|
532
|
+
options.level0_file_num_compaction_trigger = kNumTrigger;
|
|
533
|
+
options.num_levels = kNumLevels;
|
|
534
|
+
options.statistics = CreateDBStatistics();
|
|
535
|
+
options.max_subcompactions = 10;
|
|
536
|
+
DestroyAndReopen(options);
|
|
537
|
+
|
|
538
|
+
std::atomic_uint64_t latest_cold_seq = 0;
|
|
539
|
+
|
|
540
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
541
|
+
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
542
|
+
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
543
|
+
context->output_to_penultimate_level =
|
|
544
|
+
context->seq_num > latest_cold_seq;
|
|
545
|
+
});
|
|
546
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
547
|
+
|
|
548
|
+
for (int i = 0; i < kNumKeys; i++) {
|
|
549
|
+
ASSERT_OK(Put(Key(i), "value" + std::to_string(i)));
|
|
550
|
+
}
|
|
551
|
+
ASSERT_OK(Flush());
|
|
552
|
+
|
|
553
|
+
CompactRangeOptions cro;
|
|
554
|
+
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
555
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
556
|
+
ASSERT_EQ("0,1", FilesPerLevel());
|
|
557
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
558
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
559
|
+
|
|
560
|
+
MoveFilesToLevel(kNumLevels - 1);
|
|
561
|
+
|
|
562
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
563
|
+
|
|
564
|
+
auto snap = db_->GetSnapshot();
|
|
565
|
+
|
|
566
|
+
std::string start = Key(10);
|
|
567
|
+
std::string end = Key(50);
|
|
568
|
+
ASSERT_OK(
|
|
569
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
570
|
+
|
|
571
|
+
// 20->30 will be marked as cold data, but it cannot be placed to cold tier
|
|
572
|
+
// (bottommost) otherwise, it will be "deleted" by the range del in
|
|
573
|
+
// output_to_penultimate_level level verify that these data will be able to
|
|
574
|
+
// queried
|
|
575
|
+
for (int i = 20; i < 30; i++) {
|
|
576
|
+
ASSERT_OK(Put(Key(i), "value" + std::to_string(i)));
|
|
577
|
+
}
|
|
578
|
+
// make the range tombstone and data after that cold
|
|
579
|
+
latest_cold_seq = dbfull()->GetLatestSequenceNumber();
|
|
580
|
+
|
|
581
|
+
// add home hot data, just for test
|
|
582
|
+
for (int i = 30; i < 40; i++) {
|
|
583
|
+
ASSERT_OK(Put(Key(i), "value" + std::to_string(i)));
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
587
|
+
|
|
588
|
+
std::string value;
|
|
589
|
+
for (int i = 0; i < kNumKeys; i++) {
|
|
590
|
+
auto s = db_->Get(ReadOptions(), Key(i), &value);
|
|
591
|
+
if ((i >= 10 && i < 20) || (i >= 40 && i < 50)) {
|
|
592
|
+
ASSERT_TRUE(s.IsNotFound());
|
|
593
|
+
} else {
|
|
594
|
+
ASSERT_OK(s);
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
db_->ReleaseSnapshot(snap);
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
// Test SST partitioner cut after every single key
|
|
602
|
+
class SingleKeySstPartitioner : public SstPartitioner {
|
|
603
|
+
public:
|
|
604
|
+
const char* Name() const override { return "SingleKeySstPartitioner"; }
|
|
605
|
+
|
|
606
|
+
PartitionerResult ShouldPartition(
|
|
607
|
+
const PartitionerRequest& /*request*/) override {
|
|
608
|
+
return kRequired;
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
bool CanDoTrivialMove(const Slice& /*smallest_user_key*/,
|
|
612
|
+
const Slice& /*largest_user_key*/) override {
|
|
613
|
+
return false;
|
|
614
|
+
}
|
|
615
|
+
};
|
|
616
|
+
|
|
617
|
+
class SingleKeySstPartitionerFactory : public SstPartitionerFactory {
|
|
618
|
+
public:
|
|
619
|
+
static const char* kClassName() { return "SingleKeySstPartitionerFactory"; }
|
|
620
|
+
const char* Name() const override { return kClassName(); }
|
|
621
|
+
|
|
622
|
+
std::unique_ptr<SstPartitioner> CreatePartitioner(
|
|
623
|
+
const SstPartitioner::Context& /* context */) const override {
|
|
624
|
+
return std::unique_ptr<SstPartitioner>(new SingleKeySstPartitioner());
|
|
625
|
+
}
|
|
626
|
+
};
|
|
627
|
+
|
|
628
|
+
TEST_F(TieredCompactionTest, LevelOutofBoundaryRangeDelete) {
|
|
629
|
+
const int kNumTrigger = 4;
|
|
630
|
+
const int kNumLevels = 3;
|
|
631
|
+
const int kNumKeys = 10;
|
|
632
|
+
|
|
633
|
+
auto factory = std::make_shared<SingleKeySstPartitionerFactory>();
|
|
634
|
+
auto options = CurrentOptions();
|
|
635
|
+
options.bottommost_temperature = Temperature::kCold;
|
|
636
|
+
options.level0_file_num_compaction_trigger = kNumTrigger;
|
|
637
|
+
options.num_levels = kNumLevels;
|
|
638
|
+
options.statistics = CreateDBStatistics();
|
|
639
|
+
options.sst_partitioner_factory = factory;
|
|
640
|
+
options.max_subcompactions = 10;
|
|
641
|
+
DestroyAndReopen(options);
|
|
642
|
+
|
|
643
|
+
std::atomic_uint64_t latest_cold_seq = 0;
|
|
644
|
+
|
|
645
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
646
|
+
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
647
|
+
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
648
|
+
context->output_to_penultimate_level =
|
|
649
|
+
context->seq_num > latest_cold_seq;
|
|
650
|
+
});
|
|
651
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
652
|
+
|
|
653
|
+
for (int i = 0; i < kNumKeys; i++) {
|
|
654
|
+
ASSERT_OK(Put(Key(i), "value" + std::to_string(i)));
|
|
655
|
+
}
|
|
656
|
+
ASSERT_OK(Flush());
|
|
657
|
+
|
|
658
|
+
MoveFilesToLevel(kNumLevels - 1);
|
|
659
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
660
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
661
|
+
ASSERT_EQ("0,0,10", FilesPerLevel());
|
|
662
|
+
|
|
663
|
+
auto snap = db_->GetSnapshot();
|
|
664
|
+
|
|
665
|
+
// only range delete
|
|
666
|
+
std::string start = Key(3);
|
|
667
|
+
std::string end = Key(5);
|
|
668
|
+
ASSERT_OK(
|
|
669
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
670
|
+
ASSERT_OK(Flush());
|
|
671
|
+
|
|
672
|
+
CompactRangeOptions cro;
|
|
673
|
+
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
674
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
675
|
+
|
|
676
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown),
|
|
677
|
+
0); // tombstone has no size, even it's in hot tier
|
|
678
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
679
|
+
ASSERT_EQ("0,1,10",
|
|
680
|
+
FilesPerLevel()); // one file is at the penultimate level which
|
|
681
|
+
// only contains a range delete
|
|
682
|
+
|
|
683
|
+
// Add 2 hot keys, each is a new SST, they will be placed in the same level as
|
|
684
|
+
// range del, but they don't have overlap with range del, make sure the range
|
|
685
|
+
// del will still be placed there
|
|
686
|
+
latest_cold_seq = dbfull()->GetLatestSequenceNumber();
|
|
687
|
+
ASSERT_OK(Put(Key(0), "new value" + std::to_string(0)));
|
|
688
|
+
auto snap2 = db_->GetSnapshot();
|
|
689
|
+
ASSERT_OK(Put(Key(6), "new value" + std::to_string(6)));
|
|
690
|
+
ASSERT_OK(Flush());
|
|
691
|
+
|
|
692
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
693
|
+
ASSERT_EQ("0,2,10",
|
|
694
|
+
FilesPerLevel()); // one file is at the penultimate level
|
|
695
|
+
// which only contains a range delete
|
|
696
|
+
std::vector<LiveFileMetaData> live_file_meta;
|
|
697
|
+
db_->GetLiveFilesMetaData(&live_file_meta);
|
|
698
|
+
bool found_sst_with_del = false;
|
|
699
|
+
uint64_t sst_with_del_num = 0;
|
|
700
|
+
for (const auto& meta : live_file_meta) {
|
|
701
|
+
if (meta.num_deletions > 0) {
|
|
702
|
+
// found SST with del, which has 2 entries, one for data one for range del
|
|
703
|
+
ASSERT_EQ(meta.level,
|
|
704
|
+
kNumLevels - 2); // output to penultimate level
|
|
705
|
+
ASSERT_EQ(meta.num_entries, 2);
|
|
706
|
+
ASSERT_EQ(meta.num_deletions, 1);
|
|
707
|
+
found_sst_with_del = true;
|
|
708
|
+
sst_with_del_num = meta.file_number;
|
|
709
|
+
}
|
|
710
|
+
}
|
|
711
|
+
ASSERT_TRUE(found_sst_with_del);
|
|
712
|
+
|
|
713
|
+
// release the first snapshot and compact, which should compact the range del
|
|
714
|
+
// but new inserted key `0` and `6` are still hot data which will be placed on
|
|
715
|
+
// the penultimate level
|
|
716
|
+
db_->ReleaseSnapshot(snap);
|
|
717
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
718
|
+
ASSERT_EQ("0,2,7", FilesPerLevel());
|
|
719
|
+
db_->GetLiveFilesMetaData(&live_file_meta);
|
|
720
|
+
found_sst_with_del = false;
|
|
721
|
+
for (const auto& meta : live_file_meta) {
|
|
722
|
+
// check new SST with del (the old one may not yet be deleted after
|
|
723
|
+
// compaction)
|
|
724
|
+
if (meta.num_deletions > 0 && meta.file_number != sst_with_del_num) {
|
|
725
|
+
found_sst_with_del = true;
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
ASSERT_FALSE(found_sst_with_del);
|
|
729
|
+
|
|
730
|
+
// Now make all data cold, key 0 will be moved to the last level, but key 6 is
|
|
731
|
+
// still in snap2, so it will be kept at the penultimate level
|
|
732
|
+
latest_cold_seq = dbfull()->GetLatestSequenceNumber();
|
|
733
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
734
|
+
ASSERT_EQ("0,1,8", FilesPerLevel());
|
|
735
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
736
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
737
|
+
|
|
738
|
+
db_->ReleaseSnapshot(snap2);
|
|
739
|
+
|
|
740
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
741
|
+
ASSERT_EQ("0,0,8", FilesPerLevel());
|
|
742
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
743
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
TEST_F(TieredCompactionTest, UniversalRangeDelete) {
|
|
747
|
+
const int kNumTrigger = 4;
|
|
748
|
+
const int kNumLevels = 7;
|
|
749
|
+
const int kNumKeys = 10;
|
|
750
|
+
|
|
751
|
+
auto factory = std::make_shared<SingleKeySstPartitionerFactory>();
|
|
752
|
+
|
|
753
|
+
auto options = CurrentOptions();
|
|
754
|
+
options.compaction_style = kCompactionStyleUniversal;
|
|
755
|
+
options.bottommost_temperature = Temperature::kCold;
|
|
756
|
+
options.level0_file_num_compaction_trigger = kNumTrigger;
|
|
757
|
+
options.statistics = CreateDBStatistics();
|
|
758
|
+
options.sst_partitioner_factory = factory;
|
|
759
|
+
options.max_subcompactions = 10;
|
|
760
|
+
DestroyAndReopen(options);
|
|
761
|
+
|
|
762
|
+
std::atomic_uint64_t latest_cold_seq = 0;
|
|
763
|
+
|
|
764
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
765
|
+
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
766
|
+
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
767
|
+
context->output_to_penultimate_level =
|
|
768
|
+
context->seq_num > latest_cold_seq;
|
|
769
|
+
});
|
|
770
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
771
|
+
|
|
772
|
+
for (int i = 0; i < kNumKeys; i++) {
|
|
773
|
+
ASSERT_OK(Put(Key(i), "value" + std::to_string(i)));
|
|
774
|
+
}
|
|
775
|
+
ASSERT_OK(Flush());
|
|
776
|
+
|
|
777
|
+
// compact to the penultimate level with 10 files
|
|
778
|
+
CompactRangeOptions cro;
|
|
779
|
+
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
780
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
781
|
+
|
|
782
|
+
ASSERT_EQ("0,0,0,0,0,10", FilesPerLevel());
|
|
783
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
784
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
|
|
785
|
+
|
|
786
|
+
// make all data cold
|
|
787
|
+
latest_cold_seq = dbfull()->GetLatestSequenceNumber();
|
|
788
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
789
|
+
ASSERT_EQ("0,0,0,0,0,0,10", FilesPerLevel());
|
|
790
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
791
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
792
|
+
|
|
793
|
+
// range del which considered as hot data, but it will be merged and deleted
|
|
794
|
+
// with the last level data
|
|
795
|
+
std::string start = Key(3);
|
|
796
|
+
std::string end = Key(5);
|
|
797
|
+
ASSERT_OK(
|
|
798
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
799
|
+
ASSERT_OK(Flush());
|
|
800
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
801
|
+
|
|
802
|
+
ASSERT_EQ("0,0,0,0,0,0,8", FilesPerLevel());
|
|
803
|
+
|
|
804
|
+
// range del with snapshot should be preserved in the penultimate level
|
|
805
|
+
auto snap = db_->GetSnapshot();
|
|
806
|
+
|
|
807
|
+
start = Key(6);
|
|
808
|
+
end = Key(8);
|
|
809
|
+
ASSERT_OK(
|
|
810
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
811
|
+
ASSERT_OK(Flush());
|
|
812
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
813
|
+
ASSERT_EQ("0,0,0,0,0,1,8", FilesPerLevel());
|
|
814
|
+
|
|
815
|
+
// Add 2 hot keys, each is a new SST, they will be placed in the same level as
|
|
816
|
+
// range del, but no overlap with range del.
|
|
817
|
+
latest_cold_seq = dbfull()->GetLatestSequenceNumber();
|
|
818
|
+
ASSERT_OK(Put(Key(4), "new value" + std::to_string(0)));
|
|
819
|
+
auto snap2 = db_->GetSnapshot();
|
|
820
|
+
ASSERT_OK(Put(Key(9), "new value" + std::to_string(6)));
|
|
821
|
+
|
|
822
|
+
ASSERT_OK(Flush());
|
|
823
|
+
|
|
824
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
825
|
+
ASSERT_EQ("0,0,0,0,0,2,8", FilesPerLevel());
|
|
826
|
+
// find the SST with range del
|
|
827
|
+
std::vector<LiveFileMetaData> live_file_meta;
|
|
828
|
+
db_->GetLiveFilesMetaData(&live_file_meta);
|
|
829
|
+
bool found_sst_with_del = false;
|
|
830
|
+
uint64_t sst_with_del_num = 0;
|
|
831
|
+
for (const auto& meta : live_file_meta) {
|
|
832
|
+
if (meta.num_deletions > 0) {
|
|
833
|
+
// found SST with del, which has 2 entries, one for data one for range del
|
|
834
|
+
ASSERT_EQ(meta.level,
|
|
835
|
+
kNumLevels - 2); // output_to_penultimate_level level
|
|
836
|
+
ASSERT_EQ(meta.num_entries, 2);
|
|
837
|
+
ASSERT_EQ(meta.num_deletions, 1);
|
|
838
|
+
found_sst_with_del = true;
|
|
839
|
+
sst_with_del_num = meta.file_number;
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
ASSERT_TRUE(found_sst_with_del);
|
|
843
|
+
|
|
844
|
+
// release the first snapshot which should compact the range del, but data on
|
|
845
|
+
// the same level is still hot
|
|
846
|
+
db_->ReleaseSnapshot(snap);
|
|
847
|
+
|
|
848
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
849
|
+
ASSERT_EQ("0,0,0,0,0,2,6", FilesPerLevel());
|
|
850
|
+
db_->GetLiveFilesMetaData(&live_file_meta);
|
|
851
|
+
// no range del should be found in SST
|
|
852
|
+
found_sst_with_del = false;
|
|
853
|
+
for (const auto& meta : live_file_meta) {
|
|
854
|
+
// check new SST with del (the old one may not yet be deleted after
|
|
855
|
+
// compaction)
|
|
856
|
+
if (meta.num_deletions > 0 && meta.file_number != sst_with_del_num) {
|
|
857
|
+
found_sst_with_del = true;
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
ASSERT_FALSE(found_sst_with_del);
|
|
861
|
+
|
|
862
|
+
// make all data to cold, but key 6 is still protected by snap2
|
|
863
|
+
latest_cold_seq = dbfull()->GetLatestSequenceNumber();
|
|
864
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
865
|
+
ASSERT_EQ("0,0,0,0,0,1,7", FilesPerLevel());
|
|
866
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
867
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
868
|
+
|
|
869
|
+
db_->ReleaseSnapshot(snap2);
|
|
870
|
+
|
|
871
|
+
// release snapshot, everything go to bottommost
|
|
872
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
873
|
+
ASSERT_EQ("0,0,0,0,0,0,7", FilesPerLevel());
|
|
874
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
875
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
TEST_F(TieredCompactionTest, SequenceBasedTieredStorageLevel) {
|
|
879
|
+
const int kNumTrigger = 4;
|
|
880
|
+
const int kNumLevels = 7;
|
|
881
|
+
const int kNumKeys = 100;
|
|
882
|
+
const int kLastLevel = kNumLevels - 1;
|
|
883
|
+
|
|
884
|
+
auto options = CurrentOptions();
|
|
885
|
+
options.bottommost_temperature = Temperature::kCold;
|
|
886
|
+
options.level0_file_num_compaction_trigger = kNumTrigger;
|
|
887
|
+
options.num_levels = kNumLevels;
|
|
888
|
+
options.statistics = CreateDBStatistics();
|
|
889
|
+
options.max_subcompactions = 10;
|
|
890
|
+
DestroyAndReopen(options);
|
|
891
|
+
|
|
892
|
+
std::atomic_uint64_t latest_cold_seq = 0;
|
|
893
|
+
std::vector<SequenceNumber> seq_history;
|
|
894
|
+
|
|
895
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
896
|
+
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
897
|
+
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
898
|
+
context->output_to_penultimate_level =
|
|
899
|
+
context->seq_num > latest_cold_seq;
|
|
900
|
+
});
|
|
901
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
902
|
+
|
|
903
|
+
std::vector<InternalStats::CompactionStats> expect_stats(kNumLevels);
|
|
904
|
+
InternalStats::CompactionStats& last_stats = expect_stats[kLastLevel];
|
|
905
|
+
InternalStats::CompactionStats expect_pl_stats;
|
|
906
|
+
|
|
907
|
+
for (int i = 0; i < kNumTrigger; i++) {
|
|
908
|
+
for (int j = 0; j < kNumKeys; j++) {
|
|
909
|
+
ASSERT_OK(Put(Key(i * 10 + j), "value" + std::to_string(i)));
|
|
910
|
+
}
|
|
911
|
+
ASSERT_OK(Flush());
|
|
912
|
+
expect_stats[0].Add(kBasicFlushStats);
|
|
913
|
+
}
|
|
914
|
+
ASSERT_OK(dbfull()->WaitForCompact(true));
|
|
915
|
+
|
|
916
|
+
// non-last-level compaction doesn't support per_key_placement
|
|
917
|
+
ASSERT_EQ("0,1", FilesPerLevel());
|
|
918
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
919
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
920
|
+
|
|
921
|
+
expect_stats[1].Add(kBasicCompStats);
|
|
922
|
+
expect_stats[1].Add(kBasicPerLevelStats);
|
|
923
|
+
expect_stats[1].ResetCompactionReason(CompactionReason::kLevelL0FilesNum);
|
|
924
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
925
|
+
|
|
926
|
+
MoveFilesToLevel(kLastLevel);
|
|
927
|
+
|
|
928
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
929
|
+
|
|
930
|
+
// the data should be all hot, and it's a last level compaction, but all
|
|
931
|
+
// sequence numbers have been zeroed out, so they're still treated as old
|
|
932
|
+
// data.
|
|
933
|
+
CompactRangeOptions cro;
|
|
934
|
+
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
935
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
936
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
937
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
938
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
939
|
+
|
|
940
|
+
last_stats.Add(kBasicCompStats);
|
|
941
|
+
last_stats.Add(kBasicPerLevelStats);
|
|
942
|
+
last_stats.num_dropped_records = 0;
|
|
943
|
+
last_stats.bytes_read_non_output_levels = 0;
|
|
944
|
+
last_stats.num_input_files_in_non_output_levels = 0;
|
|
945
|
+
last_stats.bytes_read_output_level = kHasValue;
|
|
946
|
+
last_stats.num_input_files_in_output_level = kHasValue;
|
|
947
|
+
last_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
948
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
949
|
+
|
|
950
|
+
// Add new data, which is all hot and overriding all existing data
|
|
951
|
+
for (int i = 0; i < kNumTrigger; i++) {
|
|
952
|
+
for (int j = 0; j < kNumKeys; j++) {
|
|
953
|
+
ASSERT_OK(Put(Key(i * 10 + j), "value" + std::to_string(i)));
|
|
954
|
+
}
|
|
955
|
+
ASSERT_OK(Flush());
|
|
956
|
+
seq_history.emplace_back(dbfull()->GetLatestSequenceNumber());
|
|
957
|
+
}
|
|
958
|
+
ASSERT_OK(dbfull()->WaitForCompact(true));
|
|
959
|
+
ASSERT_EQ("0,1,0,0,0,0,1", FilesPerLevel());
|
|
960
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
961
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
962
|
+
|
|
963
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
964
|
+
|
|
965
|
+
// after compaction, all data are hot
|
|
966
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
967
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
968
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
969
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
|
|
970
|
+
|
|
971
|
+
for (int level = 2; level < kNumLevels - 1; level++) {
|
|
972
|
+
expect_stats[level].bytes_moved = kHasValue;
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
last_stats.Add(kBasicCompStats);
|
|
976
|
+
last_stats.bytes_read_output_level = kHasValue;
|
|
977
|
+
last_stats.num_input_files_in_output_level = kHasValue;
|
|
978
|
+
last_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
979
|
+
expect_pl_stats.Add(kBasicPerKeyPlacementCompStats);
|
|
980
|
+
expect_pl_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
981
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
982
|
+
|
|
983
|
+
// move forward the cold_seq, try to split the data into cold and hot, but in
|
|
984
|
+
// this case it's unsafe to split the data
|
|
985
|
+
latest_cold_seq = seq_history[1];
|
|
986
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
987
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
988
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
989
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
990
|
+
|
|
991
|
+
seq_history.clear();
|
|
992
|
+
|
|
993
|
+
// Add new data again
|
|
994
|
+
for (int i = 0; i < kNumTrigger; i++) {
|
|
995
|
+
for (int j = 0; j < kNumKeys; j++) {
|
|
996
|
+
ASSERT_OK(Put(Key(i * 10 + j), "value" + std::to_string(i)));
|
|
997
|
+
}
|
|
998
|
+
ASSERT_OK(Flush());
|
|
999
|
+
seq_history.emplace_back(dbfull()->GetLatestSequenceNumber());
|
|
1000
|
+
}
|
|
1001
|
+
ASSERT_OK(dbfull()->WaitForCompact(true));
|
|
1002
|
+
|
|
1003
|
+
ResetAllStats(expect_stats, expect_pl_stats);
|
|
1004
|
+
|
|
1005
|
+
// Try to split the last level cold data into hot and cold, which
|
|
1006
|
+
// is not supported
|
|
1007
|
+
latest_cold_seq = seq_history[0];
|
|
1008
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1009
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
1010
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1011
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1012
|
+
|
|
1013
|
+
auto comp_stats = kBasicCompStats;
|
|
1014
|
+
comp_stats.ResetCompactionReason(CompactionReason::kManualCompaction);
|
|
1015
|
+
const int bottommost_level = 5;
|
|
1016
|
+
expect_stats[bottommost_level].Add(comp_stats);
|
|
1017
|
+
expect_stats[bottommost_level].Add(
|
|
1018
|
+
comp_stats); // bottommost level has 2 compactions
|
|
1019
|
+
expect_stats[bottommost_level].Add(kBasicPerLevelStats);
|
|
1020
|
+
expect_stats[bottommost_level].bytes_read_output_level = kHasValue;
|
|
1021
|
+
expect_stats[bottommost_level].num_input_files_in_output_level = kHasValue;
|
|
1022
|
+
|
|
1023
|
+
for (int level = 2; level < bottommost_level; level++) {
|
|
1024
|
+
expect_stats[level].bytes_moved = kHasValue;
|
|
1025
|
+
}
|
|
1026
|
+
VerifyCompactionStats(expect_stats, expect_pl_stats);
|
|
1027
|
+
|
|
1028
|
+
// manually move all data (cold) to last level
|
|
1029
|
+
MoveFilesToLevel(kLastLevel);
|
|
1030
|
+
seq_history.clear();
|
|
1031
|
+
// Add new data once again
|
|
1032
|
+
for (int i = 0; i < kNumTrigger; i++) {
|
|
1033
|
+
for (int j = 0; j < kNumKeys; j++) {
|
|
1034
|
+
ASSERT_OK(Put(Key(i * 10 + j), "value" + std::to_string(i)));
|
|
1035
|
+
}
|
|
1036
|
+
ASSERT_OK(Flush());
|
|
1037
|
+
seq_history.emplace_back(dbfull()->GetLatestSequenceNumber());
|
|
1038
|
+
}
|
|
1039
|
+
ASSERT_OK(dbfull()->WaitForCompact(true));
|
|
1040
|
+
|
|
1041
|
+
latest_cold_seq = seq_history[0];
|
|
1042
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1043
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
1044
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1045
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1046
|
+
|
|
1047
|
+
// delete all cold data
|
|
1048
|
+
for (int i = 0; i < 10; i++) {
|
|
1049
|
+
ASSERT_OK(Delete(Key(i)));
|
|
1050
|
+
}
|
|
1051
|
+
ASSERT_OK(Flush());
|
|
1052
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1053
|
+
ASSERT_EQ("0,0,0,0,0,1", FilesPerLevel());
|
|
1054
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1055
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1056
|
+
|
|
1057
|
+
MoveFilesToLevel(kLastLevel);
|
|
1058
|
+
|
|
1059
|
+
// move forward the cold_seq again with range delete, take a snapshot to keep
|
|
1060
|
+
// the range dels in bottommost
|
|
1061
|
+
auto snap = db_->GetSnapshot();
|
|
1062
|
+
latest_cold_seq = seq_history[2];
|
|
1063
|
+
std::string start = Key(25), end = Key(35);
|
|
1064
|
+
ASSERT_OK(
|
|
1065
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
1066
|
+
// add one small key and large key in the input level, to make sure it's able
|
|
1067
|
+
// to move hot data to input level within that range
|
|
1068
|
+
ASSERT_OK(Put(Key(0), "value" + std::to_string(0)));
|
|
1069
|
+
ASSERT_OK(Put(Key(100), "value" + std::to_string(0)));
|
|
1070
|
+
|
|
1071
|
+
ASSERT_OK(Flush());
|
|
1072
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1073
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
1074
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1075
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1076
|
+
|
|
1077
|
+
// verify data
|
|
1078
|
+
std::string value;
|
|
1079
|
+
for (int i = 1; i < 130; i++) {
|
|
1080
|
+
if (i < 10 || (i >= 25 && i < 35)) {
|
|
1081
|
+
ASSERT_TRUE(db_->Get(ReadOptions(), Key(i), &value).IsNotFound());
|
|
1082
|
+
} else {
|
|
1083
|
+
ASSERT_OK(db_->Get(ReadOptions(), Key(i), &value));
|
|
1084
|
+
}
|
|
1085
|
+
}
|
|
1086
|
+
|
|
1087
|
+
// delete all hot data
|
|
1088
|
+
ASSERT_OK(Delete(Key(0)));
|
|
1089
|
+
start = Key(30);
|
|
1090
|
+
end = Key(101); // range [101, 130] is cold, because it's not in input range
|
|
1091
|
+
// in previous compaction
|
|
1092
|
+
ASSERT_OK(
|
|
1093
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
1094
|
+
ASSERT_OK(Flush());
|
|
1095
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1096
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
1097
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1098
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1099
|
+
|
|
1100
|
+
// no range del is dropped because of snapshot
|
|
1101
|
+
ASSERT_EQ(
|
|
1102
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
1103
|
+
0);
|
|
1104
|
+
|
|
1105
|
+
db_->ReleaseSnapshot(snap);
|
|
1106
|
+
|
|
1107
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1108
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
1109
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1110
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1111
|
+
|
|
1112
|
+
// 3 range dels dropped, the first one is double counted as expected, which is
|
|
1113
|
+
// spread into 2 SST files
|
|
1114
|
+
ASSERT_EQ(
|
|
1115
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
1116
|
+
3);
|
|
1117
|
+
|
|
1118
|
+
// move backward of cold_seq, which might happen when the user change the
|
|
1119
|
+
// setting. the hot data won't move up, just to make sure it still runs
|
|
1120
|
+
// fine, which is because:
|
|
1121
|
+
// 1. sequence number is zeroed out, so no time information
|
|
1122
|
+
// 2. leveled compaction only support move data up within the higher level
|
|
1123
|
+
// input range
|
|
1124
|
+
latest_cold_seq = seq_history[1];
|
|
1125
|
+
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
1126
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
1127
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1128
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1129
|
+
}
|
|
1130
|
+
|
|
1131
|
+
TEST_F(TieredCompactionTest, RangeBasedTieredStorageLevel) {
|
|
1132
|
+
const int kNumTrigger = 4;
|
|
1133
|
+
const int kNumLevels = 7;
|
|
1134
|
+
const int kNumKeys = 100;
|
|
1135
|
+
|
|
1136
|
+
auto options = CurrentOptions();
|
|
1137
|
+
options.bottommost_temperature = Temperature::kCold;
|
|
1138
|
+
options.level0_file_num_compaction_trigger = kNumTrigger;
|
|
1139
|
+
options.level_compaction_dynamic_level_bytes = true;
|
|
1140
|
+
options.num_levels = kNumLevels;
|
|
1141
|
+
options.statistics = CreateDBStatistics();
|
|
1142
|
+
options.max_subcompactions = 10;
|
|
1143
|
+
DestroyAndReopen(options);
|
|
1144
|
+
auto cmp = options.comparator;
|
|
1145
|
+
|
|
1146
|
+
port::Mutex mutex;
|
|
1147
|
+
std::string hot_start = Key(10);
|
|
1148
|
+
std::string hot_end = Key(50);
|
|
1149
|
+
|
|
1150
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
1151
|
+
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
1152
|
+
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
1153
|
+
MutexLock l(&mutex);
|
|
1154
|
+
context->output_to_penultimate_level =
|
|
1155
|
+
cmp->Compare(context->key, hot_start) >= 0 &&
|
|
1156
|
+
cmp->Compare(context->key, hot_end) < 0;
|
|
1157
|
+
});
|
|
1158
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
1159
|
+
|
|
1160
|
+
for (int i = 0; i < kNumTrigger; i++) {
|
|
1161
|
+
for (int j = 0; j < kNumKeys; j++) {
|
|
1162
|
+
ASSERT_OK(Put(Key(j), "value" + std::to_string(j)));
|
|
1163
|
+
}
|
|
1164
|
+
ASSERT_OK(Flush());
|
|
1165
|
+
}
|
|
1166
|
+
ASSERT_OK(dbfull()->WaitForCompact(true));
|
|
1167
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
1168
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1169
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1170
|
+
|
|
1171
|
+
// change to all cold
|
|
1172
|
+
{
|
|
1173
|
+
MutexLock l(&mutex);
|
|
1174
|
+
hot_start = Key(100);
|
|
1175
|
+
hot_end = Key(200);
|
|
1176
|
+
}
|
|
1177
|
+
CompactRangeOptions cro;
|
|
1178
|
+
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
1179
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1180
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
1181
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1182
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1183
|
+
|
|
1184
|
+
// change to all hot, but level compaction only support move cold to hot
|
|
1185
|
+
// within it's higher level input range.
|
|
1186
|
+
{
|
|
1187
|
+
MutexLock l(&mutex);
|
|
1188
|
+
hot_start = Key(0);
|
|
1189
|
+
hot_end = Key(100);
|
|
1190
|
+
}
|
|
1191
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1192
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
1193
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1194
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1195
|
+
|
|
1196
|
+
// with mixed hot/cold data
|
|
1197
|
+
{
|
|
1198
|
+
MutexLock l(&mutex);
|
|
1199
|
+
hot_start = Key(50);
|
|
1200
|
+
hot_end = Key(100);
|
|
1201
|
+
}
|
|
1202
|
+
ASSERT_OK(Put(Key(0), "value" + std::to_string(0)));
|
|
1203
|
+
ASSERT_OK(Put(Key(100), "value" + std::to_string(100)));
|
|
1204
|
+
ASSERT_OK(Flush());
|
|
1205
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1206
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
1207
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1208
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1209
|
+
|
|
1210
|
+
// delete all hot data, but with snapshot to keep the range del
|
|
1211
|
+
auto snap = db_->GetSnapshot();
|
|
1212
|
+
std::string start = Key(50);
|
|
1213
|
+
std::string end = Key(100);
|
|
1214
|
+
ASSERT_OK(
|
|
1215
|
+
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end));
|
|
1216
|
+
ASSERT_OK(Flush());
|
|
1217
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1218
|
+
ASSERT_EQ("0,0,0,0,0,1,1", FilesPerLevel());
|
|
1219
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1220
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1221
|
+
|
|
1222
|
+
// no range del is dropped because of snapshot
|
|
1223
|
+
ASSERT_EQ(
|
|
1224
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
1225
|
+
0);
|
|
1226
|
+
|
|
1227
|
+
// release the snapshot and do compaction again should remove all hot data
|
|
1228
|
+
db_->ReleaseSnapshot(snap);
|
|
1229
|
+
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
|
1230
|
+
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
|
|
1231
|
+
ASSERT_EQ(GetSstSizeHelper(Temperature::kUnknown), 0);
|
|
1232
|
+
ASSERT_GT(GetSstSizeHelper(Temperature::kCold), 0);
|
|
1233
|
+
|
|
1234
|
+
ASSERT_EQ(
|
|
1235
|
+
options.statistics->getTickerCount(COMPACTION_RANGE_DEL_DROP_OBSOLETE),
|
|
1236
|
+
1);
|
|
1237
|
+
}
|
|
1238
|
+
|
|
1239
|
+
#endif // !defined(ROCKSDB_LITE)
|
|
1240
|
+
|
|
1241
|
+
} // namespace ROCKSDB_NAMESPACE
|
|
1242
|
+
|
|
1243
|
+
int main(int argc, char** argv) {
|
|
1244
|
+
#if !defined(ROCKSDB_LITE)
|
|
1245
|
+
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
1246
|
+
::testing::InitGoogleTest(&argc, argv);
|
|
1247
|
+
return RUN_ALL_TESTS();
|
|
1248
|
+
#else
|
|
1249
|
+
(void)argc;
|
|
1250
|
+
(void)argv;
|
|
1251
|
+
return 0;
|
|
1252
|
+
#endif
|
|
1253
|
+
}
|