@nxtedition/rocksdb 13.5.13 → 14.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.cc +33 -2
- package/binding.gyp +2 -2
- package/chained-batch.js +9 -16
- package/deps/rocksdb/rocksdb/BUCK +18 -1
- package/deps/rocksdb/rocksdb/CMakeLists.txt +10 -3
- package/deps/rocksdb/rocksdb/Makefile +20 -9
- package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +90 -13
- package/deps/rocksdb/rocksdb/cache/clock_cache.cc +88 -75
- package/deps/rocksdb/rocksdb/cache/clock_cache.h +44 -36
- package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.cc +184 -148
- package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.h +5 -11
- package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache_test.cc +116 -47
- package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +1 -1
- package/deps/rocksdb/rocksdb/cache/secondary_cache_adapter.cc +3 -6
- package/deps/rocksdb/rocksdb/db/arena_wrapped_db_iter.h +1 -1
- package/deps/rocksdb/rocksdb/db/builder.cc +4 -2
- package/deps/rocksdb/rocksdb/db/c.cc +207 -0
- package/deps/rocksdb/rocksdb/db/c_test.c +72 -0
- package/deps/rocksdb/rocksdb/db/column_family.cc +3 -2
- package/deps/rocksdb/rocksdb/db/column_family.h +5 -0
- package/deps/rocksdb/rocksdb/db/compact_files_test.cc +4 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction.cc +2 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +51 -38
- package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.h +29 -12
- package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator_test.cc +5 -10
- package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +566 -366
- package/deps/rocksdb/rocksdb/db/compaction/compaction_job.h +131 -4
- package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.cc +1 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_outputs.h +7 -0
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.cc +4 -4
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker.h +13 -14
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_fifo.cc +12 -7
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_fifo.h +8 -10
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_test.cc +97 -76
- package/deps/rocksdb/rocksdb/db/compaction/compaction_picker_universal.cc +11 -14
- package/deps/rocksdb/rocksdb/db/compaction/compaction_service_job.cc +1 -1
- package/deps/rocksdb/rocksdb/db/compaction/subcompaction_state.h +8 -0
- package/deps/rocksdb/rocksdb/db/compaction/tiered_compaction_test.cc +16 -3
- package/deps/rocksdb/rocksdb/db/db_basic_test.cc +1 -0
- package/deps/rocksdb/rocksdb/db/db_compaction_test.cc +448 -1
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl.cc +22 -20
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl.h +4 -1
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl_compaction_flush.cc +5 -5
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl_open.cc +7 -3
- package/deps/rocksdb/rocksdb/db/db_impl/db_impl_secondary.cc +1 -1
- package/deps/rocksdb/rocksdb/db/db_iter.cc +104 -0
- package/deps/rocksdb/rocksdb/db/db_iter.h +4 -11
- package/deps/rocksdb/rocksdb/db/db_iterator_test.cc +331 -58
- package/deps/rocksdb/rocksdb/db/db_memtable_test.cc +129 -0
- package/deps/rocksdb/rocksdb/db/db_sst_test.cc +64 -0
- package/deps/rocksdb/rocksdb/db/db_table_properties_test.cc +40 -0
- package/deps/rocksdb/rocksdb/db/db_test2.cc +25 -15
- package/deps/rocksdb/rocksdb/db/db_test_util.cc +42 -24
- package/deps/rocksdb/rocksdb/db/db_test_util.h +29 -14
- package/deps/rocksdb/rocksdb/db/db_universal_compaction_test.cc +69 -36
- package/deps/rocksdb/rocksdb/db/db_with_timestamp_basic_test.cc +0 -1
- package/deps/rocksdb/rocksdb/db/event_helpers.cc +1 -0
- package/deps/rocksdb/rocksdb/db/experimental.cc +5 -4
- package/deps/rocksdb/rocksdb/db/external_sst_file_basic_test.cc +8 -1
- package/deps/rocksdb/rocksdb/db/external_sst_file_ingestion_job.cc +275 -79
- package/deps/rocksdb/rocksdb/db/external_sst_file_ingestion_job.h +23 -5
- package/deps/rocksdb/rocksdb/db/external_sst_file_test.cc +591 -175
- package/deps/rocksdb/rocksdb/db/flush_job.cc +3 -4
- package/deps/rocksdb/rocksdb/db/log_reader.cc +5 -2
- package/deps/rocksdb/rocksdb/db/memtable.cc +84 -35
- package/deps/rocksdb/rocksdb/db/memtable.h +39 -34
- package/deps/rocksdb/rocksdb/db/merge_helper.cc +1 -0
- package/deps/rocksdb/rocksdb/db/merge_operator.cc +1 -1
- package/deps/rocksdb/rocksdb/db/multi_scan.cc +11 -5
- package/deps/rocksdb/rocksdb/db/version_edit.cc +1 -1
- package/deps/rocksdb/rocksdb/db/version_edit.h +1 -1
- package/deps/rocksdb/rocksdb/db/version_edit_handler.cc +34 -14
- package/deps/rocksdb/rocksdb/db/version_edit_handler.h +28 -5
- package/deps/rocksdb/rocksdb/db/version_set.cc +159 -14
- package/deps/rocksdb/rocksdb/db/version_set.h +2 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/CMakeLists.txt +1 -1
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.cc +60 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +16 -1
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_compaction_service.h +75 -10
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_compression_manager.cc +28 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_compression_manager.h +2 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_driver.cc +31 -1
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +50 -2
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_shared_state.h +57 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_stat.h +0 -4
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +266 -35
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.h +5 -0
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_tool.cc +0 -6
- package/deps/rocksdb/rocksdb/db_stress_tool/no_batched_ops_stress.cc +18 -2
- package/deps/rocksdb/rocksdb/env/env.cc +12 -0
- package/deps/rocksdb/rocksdb/env/env_test.cc +18 -0
- package/deps/rocksdb/rocksdb/env/file_system_tracer.cc +2 -0
- package/deps/rocksdb/rocksdb/env/fs_posix.cc +9 -5
- package/deps/rocksdb/rocksdb/env/io_posix.cc +4 -2
- package/deps/rocksdb/rocksdb/file/random_access_file_reader.cc +19 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/advanced_compression.h +33 -31
- package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +42 -9
- package/deps/rocksdb/rocksdb/include/rocksdb/c.h +93 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +43 -49
- package/deps/rocksdb/rocksdb/include/rocksdb/compaction_job_stats.h +4 -3
- package/deps/rocksdb/rocksdb/include/rocksdb/compression_type.h +8 -6
- package/deps/rocksdb/rocksdb/include/rocksdb/data_structure.h +487 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/db.h +11 -12
- package/deps/rocksdb/rocksdb/include/rocksdb/env.h +135 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/file_system.h +5 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/iostats_context.h +12 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/iterator.h +1 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/ldb_tool.h +8 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/memtablerep.h +12 -8
- package/deps/rocksdb/rocksdb/include/rocksdb/metadata.h +3 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/multi_scan.h +19 -9
- package/deps/rocksdb/rocksdb/include/rocksdb/options.h +219 -24
- package/deps/rocksdb/rocksdb/include/rocksdb/point_lock_bench_tool.h +14 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/secondary_cache.h +2 -2
- package/deps/rocksdb/rocksdb/include/rocksdb/slice.h +1 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/statistics.h +7 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/status.h +16 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/table.h +16 -4
- package/deps/rocksdb/rocksdb/include/rocksdb/table_properties.h +13 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/types.h +4 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/universal_compaction.h +0 -2
- package/deps/rocksdb/rocksdb/include/rocksdb/user_defined_index.h +45 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/utilities/cache_dump_load.h +1 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/utilities/stackable_db.h +1 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/utilities/transaction.h +6 -1
- package/deps/rocksdb/rocksdb/include/rocksdb/utilities/transaction_db.h +21 -0
- package/deps/rocksdb/rocksdb/include/rocksdb/version.h +2 -2
- package/deps/rocksdb/rocksdb/memory/memory_allocator_impl.h +3 -3
- package/deps/rocksdb/rocksdb/memtable/inlineskiplist.h +77 -51
- package/deps/rocksdb/rocksdb/memtable/skiplist.h +10 -13
- package/deps/rocksdb/rocksdb/memtable/skiplistrep.cc +16 -7
- package/deps/rocksdb/rocksdb/memtable/vectorrep.cc +9 -4
- package/deps/rocksdb/rocksdb/monitoring/iostats_context.cc +2 -0
- package/deps/rocksdb/rocksdb/monitoring/statistics.cc +6 -0
- package/deps/rocksdb/rocksdb/options/cf_options.cc +13 -1
- package/deps/rocksdb/rocksdb/options/cf_options.h +6 -2
- package/deps/rocksdb/rocksdb/options/options.cc +2 -0
- package/deps/rocksdb/rocksdb/options/options_helper.cc +9 -8
- package/deps/rocksdb/rocksdb/options/options_settable_test.cc +9 -5
- package/deps/rocksdb/rocksdb/port/mmap.cc +1 -1
- package/deps/rocksdb/rocksdb/port/win/xpress_win.cc +51 -0
- package/deps/rocksdb/rocksdb/port/win/xpress_win.h +4 -0
- package/deps/rocksdb/rocksdb/src.mk +8 -2
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.cc +1125 -765
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.h +35 -24
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_factory.cc +29 -4
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_iterator.cc +732 -256
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_iterator.h +225 -16
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +102 -26
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +1 -1
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_sync_and_async.h +2 -75
- package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_test.cc +433 -141
- package/deps/rocksdb/rocksdb/table/block_based/block_builder.h +2 -0
- package/deps/rocksdb/rocksdb/table/block_based/flush_block_policy.cc +17 -10
- package/deps/rocksdb/rocksdb/table/block_based/flush_block_policy_impl.h +20 -0
- package/deps/rocksdb/rocksdb/table/block_based/index_builder.cc +112 -85
- package/deps/rocksdb/rocksdb/table/block_based/index_builder.h +191 -36
- package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block.cc +2 -2
- package/deps/rocksdb/rocksdb/table/block_based/partitioned_filter_block_test.cc +1 -1
- package/deps/rocksdb/rocksdb/table/block_based/user_defined_index_wrapper.h +108 -31
- package/deps/rocksdb/rocksdb/table/external_table.cc +7 -3
- package/deps/rocksdb/rocksdb/table/format.cc +6 -12
- package/deps/rocksdb/rocksdb/table/format.h +10 -0
- package/deps/rocksdb/rocksdb/table/internal_iterator.h +1 -1
- package/deps/rocksdb/rocksdb/table/iterator_wrapper.h +1 -1
- package/deps/rocksdb/rocksdb/table/merging_iterator.cc +1 -1
- package/deps/rocksdb/rocksdb/table/meta_blocks.cc +5 -0
- package/deps/rocksdb/rocksdb/table/multiget_context.h +3 -1
- package/deps/rocksdb/rocksdb/table/sst_file_dumper.cc +118 -46
- package/deps/rocksdb/rocksdb/table/sst_file_dumper.h +9 -8
- package/deps/rocksdb/rocksdb/table/table_builder.h +5 -0
- package/deps/rocksdb/rocksdb/table/table_properties.cc +16 -0
- package/deps/rocksdb/rocksdb/table/table_test.cc +1540 -155
- package/deps/rocksdb/rocksdb/test_util/testutil.h +21 -5
- package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +26 -5
- package/deps/rocksdb/rocksdb/tools/ldb.cc +1 -2
- package/deps/rocksdb/rocksdb/tools/ldb_cmd.cc +2 -0
- package/deps/rocksdb/rocksdb/tools/ldb_tool.cc +9 -3
- package/deps/rocksdb/rocksdb/tools/sst_dump_test.cc +133 -165
- package/deps/rocksdb/rocksdb/tools/sst_dump_tool.cc +173 -64
- package/deps/rocksdb/rocksdb/util/aligned_buffer.h +69 -0
- package/deps/rocksdb/rocksdb/util/atomic.h +6 -0
- package/deps/rocksdb/rocksdb/util/auto_tune_compressor.cc +29 -20
- package/deps/rocksdb/rocksdb/util/auto_tune_compressor.h +10 -6
- package/deps/rocksdb/rocksdb/util/bit_fields.h +338 -0
- package/deps/rocksdb/rocksdb/util/coding.h +3 -3
- package/deps/rocksdb/rocksdb/util/compaction_job_stats_impl.cc +2 -2
- package/deps/rocksdb/rocksdb/util/compression.cc +777 -82
- package/deps/rocksdb/rocksdb/util/compression.h +5 -0
- package/deps/rocksdb/rocksdb/util/compression_test.cc +5 -3
- package/deps/rocksdb/rocksdb/util/dynamic_bloom.cc +2 -2
- package/deps/rocksdb/rocksdb/util/dynamic_bloom.h +15 -14
- package/deps/rocksdb/rocksdb/util/interval_test.cc +102 -0
- package/deps/rocksdb/rocksdb/util/semaphore.h +164 -0
- package/deps/rocksdb/rocksdb/util/simple_mixed_compressor.cc +10 -6
- package/deps/rocksdb/rocksdb/util/simple_mixed_compressor.h +4 -2
- package/deps/rocksdb/rocksdb/util/slice_test.cc +136 -0
- package/deps/rocksdb/rocksdb/util/status.cc +1 -0
- package/deps/rocksdb/rocksdb/util/string_util.cc +2 -16
- package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.cc +1 -1
- package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.h +1 -1
- package/deps/rocksdb/rocksdb/utilities/fault_injection_fs.cc +7 -4
- package/deps/rocksdb/rocksdb/utilities/fault_injection_fs.h +35 -14
- package/deps/rocksdb/rocksdb/utilities/persistent_cache/hash_table_test.cc +2 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/lock_manager.cc +5 -2
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/any_lock_manager_test.h +244 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_bench.cc +18 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_bench_tool.cc +159 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager.cc +1244 -161
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager.h +66 -12
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager_stress_test.cc +103 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager_test.cc +1275 -8
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager_test.h +40 -262
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager_test_common.h +78 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_validation_test_runner.h +469 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/lock/range/range_locking_test.cc +2 -6
- package/deps/rocksdb/rocksdb/utilities/transactions/pessimistic_transaction.cc +4 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/pessimistic_transaction.h +9 -1
- package/deps/rocksdb/rocksdb/utilities/transactions/timestamped_snapshot_test.cc +18 -9
- package/deps/rocksdb/rocksdb/utilities/transactions/transaction_base.h +2 -0
- package/deps/rocksdb/rocksdb/utilities/transactions/transaction_db_mutex_impl.cc +2 -1
- package/deps/rocksdb/rocksdb/utilities/transactions/transaction_test.cc +72 -44
- package/deps/rocksdb/rocksdb/utilities/transactions/transaction_test.h +92 -15
- package/deps/rocksdb/rocksdb/utilities/transactions/write_committed_transaction_ts_test.cc +6 -20
- package/deps/rocksdb/rocksdb/utilities/transactions/write_prepared_transaction_test.cc +143 -112
- package/deps/rocksdb/rocksdb/utilities/transactions/write_unprepared_transaction_test.cc +23 -16
- package/index.js +3 -3
- package/package.json +1 -1
- package/prebuilds/darwin-arm64/@nxtedition+rocksdb.node +0 -0
- package/prebuilds/linux-x64/@nxtedition+rocksdb.node +0 -0
- package/util.h +38 -12
- package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_stat.cc +0 -17
|
@@ -5,11 +5,49 @@
|
|
|
5
5
|
|
|
6
6
|
#include "utilities/transactions/lock/point/point_lock_manager_test.h"
|
|
7
7
|
|
|
8
|
+
#include "utilities/transactions/lock/point/any_lock_manager_test.h"
|
|
9
|
+
|
|
8
10
|
namespace ROCKSDB_NAMESPACE {
|
|
9
11
|
|
|
12
|
+
struct SpotLockManagerTestParam {
|
|
13
|
+
bool use_per_key_point_lock_manager;
|
|
14
|
+
int deadlock_timeout_us;
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
// Define operator<< for SpotLockManagerTestParam to stop valgrind from
|
|
18
|
+
// complaining uinitialized value when printing SpotLockManagerTestParam.
|
|
19
|
+
std::ostream& operator<<(std::ostream& os,
|
|
20
|
+
const SpotLockManagerTestParam& param) {
|
|
21
|
+
os << "use_per_key_point_lock_manager: "
|
|
22
|
+
<< param.use_per_key_point_lock_manager
|
|
23
|
+
<< ", deadlock_timeout_us: " << param.deadlock_timeout_us;
|
|
24
|
+
return os;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// including test for both PointLockManager and PerKeyPointLockManager
|
|
28
|
+
class SpotLockManagerTest
|
|
29
|
+
: public PointLockManagerTest,
|
|
30
|
+
public testing::WithParamInterface<SpotLockManagerTestParam> {
|
|
31
|
+
public:
|
|
32
|
+
void SetUp() override {
|
|
33
|
+
init();
|
|
34
|
+
// If a custom setup function was provided, use it. Otherwise, use what we
|
|
35
|
+
// have inherited.
|
|
36
|
+
auto param = GetParam();
|
|
37
|
+
if (param.use_per_key_point_lock_manager) {
|
|
38
|
+
locker_.reset(new PerKeyPointLockManager(
|
|
39
|
+
static_cast<PessimisticTransactionDB*>(db_), txndb_opt_));
|
|
40
|
+
} else {
|
|
41
|
+
locker_.reset(new PointLockManager(
|
|
42
|
+
static_cast<PessimisticTransactionDB*>(db_), txndb_opt_));
|
|
43
|
+
}
|
|
44
|
+
deadlock_timeout_us = param.deadlock_timeout_us;
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
|
|
10
48
|
// This test is not applicable for Range Lock manager as Range Lock Manager
|
|
11
49
|
// operates on Column Families, not their ids.
|
|
12
|
-
|
|
50
|
+
TEST_P(SpotLockManagerTest, LockNonExistingColumnFamily) {
|
|
13
51
|
MockColumnFamilyHandle cf(1024);
|
|
14
52
|
locker_->RemoveColumnFamily(&cf);
|
|
15
53
|
auto txn = NewTxn();
|
|
@@ -19,7 +57,7 @@ TEST_F(PointLockManagerTest, LockNonExistingColumnFamily) {
|
|
|
19
57
|
delete txn;
|
|
20
58
|
}
|
|
21
59
|
|
|
22
|
-
|
|
60
|
+
TEST_P(SpotLockManagerTest, LockStatus) {
|
|
23
61
|
MockColumnFamilyHandle cf1(1024), cf2(2048);
|
|
24
62
|
locker_->AddColumnFamily(&cf1);
|
|
25
63
|
locker_->AddColumnFamily(&cf2);
|
|
@@ -61,7 +99,7 @@ TEST_F(PointLockManagerTest, LockStatus) {
|
|
|
61
99
|
delete txn2;
|
|
62
100
|
}
|
|
63
101
|
|
|
64
|
-
|
|
102
|
+
TEST_P(SpotLockManagerTest, UnlockExclusive) {
|
|
65
103
|
MockColumnFamilyHandle cf(1);
|
|
66
104
|
locker_->AddColumnFamily(&cf);
|
|
67
105
|
|
|
@@ -79,7 +117,7 @@ TEST_F(PointLockManagerTest, UnlockExclusive) {
|
|
|
79
117
|
delete txn2;
|
|
80
118
|
}
|
|
81
119
|
|
|
82
|
-
|
|
120
|
+
TEST_P(SpotLockManagerTest, UnlockShared) {
|
|
83
121
|
MockColumnFamilyHandle cf(1);
|
|
84
122
|
locker_->AddColumnFamily(&cf);
|
|
85
123
|
|
|
@@ -100,7 +138,7 @@ TEST_F(PointLockManagerTest, UnlockShared) {
|
|
|
100
138
|
// This test doesn't work with Range Lock Manager, because Range Lock Manager
|
|
101
139
|
// doesn't support deadlock_detect_depth.
|
|
102
140
|
|
|
103
|
-
|
|
141
|
+
TEST_P(SpotLockManagerTest, DeadlockDepthExceeded) {
|
|
104
142
|
// Tests that when detecting deadlock, if the detection depth is exceeded,
|
|
105
143
|
// it's also viewed as deadlock.
|
|
106
144
|
MockColumnFamilyHandle cf(1);
|
|
@@ -108,7 +146,7 @@ TEST_F(PointLockManagerTest, DeadlockDepthExceeded) {
|
|
|
108
146
|
TransactionOptions txn_opt;
|
|
109
147
|
txn_opt.deadlock_detect = true;
|
|
110
148
|
txn_opt.deadlock_detect_depth = 1;
|
|
111
|
-
txn_opt.lock_timeout =
|
|
149
|
+
txn_opt.lock_timeout = kLongTxnTimeoutMs;
|
|
112
150
|
auto txn1 = NewTxn(txn_opt);
|
|
113
151
|
auto txn2 = NewTxn(txn_opt);
|
|
114
152
|
auto txn3 = NewTxn(txn_opt);
|
|
@@ -124,7 +162,8 @@ TEST_F(PointLockManagerTest, DeadlockDepthExceeded) {
|
|
|
124
162
|
// it must have another txn waiting on it, which is txn4 in this case.
|
|
125
163
|
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
126
164
|
|
|
127
|
-
port::Thread t1
|
|
165
|
+
port::Thread t1;
|
|
166
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t1, [&]() {
|
|
128
167
|
ASSERT_OK(locker_->TryLock(txn2, 1, "k2", env_, true));
|
|
129
168
|
// block because txn1 is holding a lock on k1.
|
|
130
169
|
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, true));
|
|
@@ -132,7 +171,8 @@ TEST_F(PointLockManagerTest, DeadlockDepthExceeded) {
|
|
|
132
171
|
|
|
133
172
|
ASSERT_OK(locker_->TryLock(txn3, 1, "k3", env_, true));
|
|
134
173
|
|
|
135
|
-
port::Thread t2
|
|
174
|
+
port::Thread t2;
|
|
175
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t2, [&]() {
|
|
136
176
|
// block because txn3 is holding a lock on k1.
|
|
137
177
|
ASSERT_OK(locker_->TryLock(txn4, 1, "k3", env_, true));
|
|
138
178
|
});
|
|
@@ -150,15 +190,1242 @@ TEST_F(PointLockManagerTest, DeadlockDepthExceeded) {
|
|
|
150
190
|
t1.join();
|
|
151
191
|
t2.join();
|
|
152
192
|
|
|
193
|
+
locker_->UnLock(txn2, 1, "k2", env_);
|
|
194
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
195
|
+
locker_->UnLock(txn4, 1, "k3", env_);
|
|
196
|
+
|
|
153
197
|
delete txn4;
|
|
154
198
|
delete txn3;
|
|
155
199
|
delete txn2;
|
|
156
200
|
delete txn1;
|
|
157
201
|
}
|
|
158
202
|
|
|
203
|
+
TEST_P(SpotLockManagerTest, PrioritizedLockUpgradeWithExclusiveLock) {
|
|
204
|
+
// Tests that a lock upgrade request is prioritized over other lock requests.
|
|
205
|
+
|
|
206
|
+
// txn1 acquires shared lock on k1.
|
|
207
|
+
// txn2 acquires exclusive lock on k1.
|
|
208
|
+
// txn1 acquires exclusive locks k1 successfully
|
|
209
|
+
|
|
210
|
+
MockColumnFamilyHandle cf(1);
|
|
211
|
+
locker_->AddColumnFamily(&cf);
|
|
212
|
+
TransactionOptions txn_opt;
|
|
213
|
+
txn_opt.deadlock_detect = true;
|
|
214
|
+
txn_opt.lock_timeout = kLongTxnTimeoutMs;
|
|
215
|
+
auto txn1 = NewTxn(txn_opt);
|
|
216
|
+
auto txn2 = NewTxn(txn_opt);
|
|
217
|
+
|
|
218
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, false));
|
|
219
|
+
|
|
220
|
+
// txn2 tries to lock k1 exclusively, will be blocked.
|
|
221
|
+
port::Thread t;
|
|
222
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t, [this, &txn2]() {
|
|
223
|
+
// block because txn1 is holding a shared lock on k1.
|
|
224
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, true));
|
|
225
|
+
});
|
|
226
|
+
|
|
227
|
+
// verify lock upgrade successfully
|
|
228
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
229
|
+
|
|
230
|
+
// unlock txn1, so txn2 could proceed
|
|
231
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
232
|
+
|
|
233
|
+
// Cleanup
|
|
234
|
+
t.join();
|
|
235
|
+
|
|
236
|
+
// Cleanup
|
|
237
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
238
|
+
delete txn2;
|
|
239
|
+
delete txn1;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
TEST_P(SpotLockManagerTest,
|
|
243
|
+
PrioritizedLockUpgradeWithExclusiveLockAndSharedLock) {
|
|
244
|
+
// Tests that lock upgrade is prioritized when mixed with shared and exclusive
|
|
245
|
+
// locks requests
|
|
246
|
+
|
|
247
|
+
// txn1 acquires shared lock on k1.
|
|
248
|
+
// txn2 acquires shared lock on k1.
|
|
249
|
+
// txn3 acquires exclusive lock on k1.
|
|
250
|
+
// txn1 acquires exclusive locks k1 <- request granted after txn2 release the
|
|
251
|
+
// lock
|
|
252
|
+
|
|
253
|
+
MockColumnFamilyHandle cf(1);
|
|
254
|
+
locker_->AddColumnFamily(&cf);
|
|
255
|
+
TransactionOptions txn_opt;
|
|
256
|
+
txn_opt.deadlock_detect = true;
|
|
257
|
+
txn_opt.lock_timeout = kLongTxnTimeoutMs;
|
|
258
|
+
auto txn1 = NewTxn(txn_opt);
|
|
259
|
+
auto txn2 = NewTxn(txn_opt);
|
|
260
|
+
auto txn3 = NewTxn(txn_opt);
|
|
261
|
+
|
|
262
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, false));
|
|
263
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
264
|
+
|
|
265
|
+
// txn3 tries to lock k1 exclusively, will be blocked.
|
|
266
|
+
port::Thread txn3_thread;
|
|
267
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, txn3_thread, [this, &txn3]() {
|
|
268
|
+
// block because txn1 and txn2 are holding a shared lock on k1.
|
|
269
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, true));
|
|
270
|
+
});
|
|
271
|
+
// Verify txn3 is blocked
|
|
272
|
+
ASSERT_TRUE(txn3_thread.joinable());
|
|
273
|
+
|
|
274
|
+
// txn1 tries to lock k1 exclusively, will be blocked.
|
|
275
|
+
port::Thread txn1_thread;
|
|
276
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, txn1_thread, [this, &txn1]() {
|
|
277
|
+
// block because txn1 and txn2 are holding a shared lock on k1.
|
|
278
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
279
|
+
});
|
|
280
|
+
// Verify txn1 is blocked
|
|
281
|
+
ASSERT_TRUE(txn1_thread.joinable());
|
|
282
|
+
|
|
283
|
+
// Unlock txn2, so txn1 could proceed
|
|
284
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
285
|
+
txn1_thread.join();
|
|
286
|
+
|
|
287
|
+
// Unlock txn1, so txn3 could proceed
|
|
288
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
289
|
+
txn3_thread.join();
|
|
290
|
+
|
|
291
|
+
// Cleanup
|
|
292
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
293
|
+
delete txn3;
|
|
294
|
+
delete txn2;
|
|
295
|
+
delete txn1;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
TEST_P(SpotLockManagerTest, Deadlock_MultipleUpgrade) {
|
|
299
|
+
// Tests that deadlock can be detected for shared locks and exclusive locks
|
|
300
|
+
// mixed Deadlock scenario:
|
|
301
|
+
|
|
302
|
+
// txn1 acquires shared lock on k1.
|
|
303
|
+
// txn2 acquires shared lock on k1.
|
|
304
|
+
// txn1 acquires exclusive locks k1
|
|
305
|
+
// txn2 acquires exclusive locks k1 <- dead lock detected
|
|
306
|
+
|
|
307
|
+
MockColumnFamilyHandle cf(1);
|
|
308
|
+
locker_->AddColumnFamily(&cf);
|
|
309
|
+
TransactionOptions txn_opt;
|
|
310
|
+
txn_opt.deadlock_detect = true;
|
|
311
|
+
txn_opt.lock_timeout = kLongTxnTimeoutMs;
|
|
312
|
+
auto txn1 = NewTxn(txn_opt);
|
|
313
|
+
auto txn2 = NewTxn(txn_opt);
|
|
314
|
+
|
|
315
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, false));
|
|
316
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
317
|
+
|
|
318
|
+
// txn1 tries to lock k1 exclusively, will be blocked.
|
|
319
|
+
port::Thread t;
|
|
320
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t, [this, &txn1]() {
|
|
321
|
+
// block because txn2 is holding a shared lock on k1.
|
|
322
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
323
|
+
});
|
|
324
|
+
|
|
325
|
+
auto s = locker_->TryLock(txn2, 1, "k1", env_, true);
|
|
326
|
+
ASSERT_TRUE(s.IsBusy());
|
|
327
|
+
ASSERT_EQ(s.subcode(), Status::SubCode::kDeadlock);
|
|
328
|
+
|
|
329
|
+
std::vector<DeadlockPath> deadlock_paths = locker_->GetDeadlockInfoBuffer();
|
|
330
|
+
ASSERT_EQ(deadlock_paths.size(), 1u);
|
|
331
|
+
ASSERT_FALSE(deadlock_paths[0].limit_exceeded);
|
|
332
|
+
|
|
333
|
+
std::vector<DeadlockInfo> deadlocks = deadlock_paths[0].path;
|
|
334
|
+
ASSERT_EQ(deadlocks.size(), 2u);
|
|
335
|
+
|
|
336
|
+
ASSERT_EQ(deadlocks[0].m_txn_id, txn1->GetID());
|
|
337
|
+
ASSERT_EQ(deadlocks[0].m_cf_id, 1u);
|
|
338
|
+
ASSERT_TRUE(deadlocks[0].m_exclusive);
|
|
339
|
+
ASSERT_EQ(deadlocks[0].m_waiting_key, "k1");
|
|
340
|
+
|
|
341
|
+
ASSERT_EQ(deadlocks[1].m_txn_id, txn2->GetID());
|
|
342
|
+
ASSERT_EQ(deadlocks[1].m_cf_id, 1u);
|
|
343
|
+
ASSERT_TRUE(deadlocks[1].m_exclusive);
|
|
344
|
+
ASSERT_EQ(deadlocks[1].m_waiting_key, "k1");
|
|
345
|
+
|
|
346
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
347
|
+
t.join();
|
|
348
|
+
|
|
349
|
+
// Cleanup
|
|
350
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
351
|
+
delete txn2;
|
|
352
|
+
delete txn1;
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
TEST_P(SpotLockManagerTest, Deadlock_MultipleUpgradeInterleaveExclusive) {
|
|
356
|
+
// Tests that deadlock can be detected for shared locks and exclusive locks
|
|
357
|
+
// mixed Deadlock scenario:
|
|
358
|
+
|
|
359
|
+
// txn1 acquires shared lock on k1.
|
|
360
|
+
// txn2 acquires shared lock on k1.
|
|
361
|
+
// txn3 acquires exclusive lock on k1.
|
|
362
|
+
// txn1 acquires exclusive locks k1 <- request granted after txn2 release the
|
|
363
|
+
// lock.
|
|
364
|
+
// txn2 acquires exclusive locks k1 <- dead lock detected
|
|
365
|
+
|
|
366
|
+
MockColumnFamilyHandle cf(1);
|
|
367
|
+
locker_->AddColumnFamily(&cf);
|
|
368
|
+
TransactionOptions txn_opt;
|
|
369
|
+
txn_opt.deadlock_detect = true;
|
|
370
|
+
txn_opt.lock_timeout = kLongTxnTimeoutMs;
|
|
371
|
+
auto txn1 = NewTxn(txn_opt);
|
|
372
|
+
auto txn2 = NewTxn(txn_opt);
|
|
373
|
+
auto txn3 = NewTxn(txn_opt);
|
|
374
|
+
|
|
375
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, false));
|
|
376
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
377
|
+
|
|
378
|
+
// txn3 tries to lock k1 exclusively, will be blocked.
|
|
379
|
+
port::Thread txn3_thread;
|
|
380
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, txn3_thread, [this, &txn3]() {
|
|
381
|
+
// block because txn1 and txn2 are holding a shared lock on k1.
|
|
382
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, true));
|
|
383
|
+
});
|
|
384
|
+
// Verify txn3 is blocked
|
|
385
|
+
ASSERT_TRUE(txn3_thread.joinable());
|
|
386
|
+
|
|
387
|
+
// txn1 tries to lock k1 exclusively, will be blocked.
|
|
388
|
+
port::Thread txn1_thread;
|
|
389
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, txn1_thread, [this, &txn1]() {
|
|
390
|
+
// block because txn1 and txn2 are holding a shared lock on k1.
|
|
391
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
392
|
+
});
|
|
393
|
+
// Verify txn1 is blocked
|
|
394
|
+
ASSERT_TRUE(txn1_thread.joinable());
|
|
395
|
+
|
|
396
|
+
auto s = locker_->TryLock(txn2, 1, "k1", env_, true);
|
|
397
|
+
ASSERT_TRUE(s.IsBusy());
|
|
398
|
+
ASSERT_EQ(s.subcode(), Status::SubCode::kDeadlock);
|
|
399
|
+
|
|
400
|
+
std::vector<DeadlockPath> deadlock_paths = locker_->GetDeadlockInfoBuffer();
|
|
401
|
+
ASSERT_EQ(deadlock_paths.size(), 1u);
|
|
402
|
+
ASSERT_FALSE(deadlock_paths[0].limit_exceeded);
|
|
403
|
+
|
|
404
|
+
std::vector<DeadlockInfo> deadlocks = deadlock_paths[0].path;
|
|
405
|
+
ASSERT_EQ(deadlocks.size(), 2u);
|
|
406
|
+
|
|
407
|
+
ASSERT_EQ(deadlocks[0].m_txn_id, txn1->GetID());
|
|
408
|
+
ASSERT_EQ(deadlocks[0].m_cf_id, 1u);
|
|
409
|
+
ASSERT_TRUE(deadlocks[0].m_exclusive);
|
|
410
|
+
ASSERT_EQ(deadlocks[0].m_waiting_key, "k1");
|
|
411
|
+
|
|
412
|
+
ASSERT_EQ(deadlocks[1].m_txn_id, txn2->GetID());
|
|
413
|
+
ASSERT_EQ(deadlocks[1].m_cf_id, 1u);
|
|
414
|
+
ASSERT_TRUE(deadlocks[1].m_exclusive);
|
|
415
|
+
ASSERT_EQ(deadlocks[1].m_waiting_key, "k1");
|
|
416
|
+
|
|
417
|
+
// Unlock txn2, so txn1 could proceed
|
|
418
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
419
|
+
txn1_thread.join();
|
|
420
|
+
|
|
421
|
+
// Unlock txn1, so txn3 could proceed
|
|
422
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
423
|
+
txn3_thread.join();
|
|
424
|
+
|
|
425
|
+
// Cleanup
|
|
426
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
427
|
+
delete txn3;
|
|
428
|
+
delete txn2;
|
|
429
|
+
delete txn1;
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
class PerKeyPointLockManagerTest : public PointLockManagerTest {
|
|
433
|
+
public:
|
|
434
|
+
void SetUp() override {
|
|
435
|
+
init();
|
|
436
|
+
cf_ = std::make_unique<MockColumnFamilyHandle>(1);
|
|
437
|
+
txn_opt_.deadlock_detect = true;
|
|
438
|
+
// by default use long timeout and disable expiration
|
|
439
|
+
txn_opt_.lock_timeout = kLongTxnTimeoutMs;
|
|
440
|
+
txn_opt_.expiration = -1;
|
|
441
|
+
|
|
442
|
+
// CAUTION: This test creates a separate lock manager object (right, NOT
|
|
443
|
+
// the one that the TransactionDB is using!), and runs tests on it.
|
|
444
|
+
locker_.reset(new PerKeyPointLockManager(
|
|
445
|
+
static_cast<PessimisticTransactionDB*>(db_), txndb_opt_));
|
|
446
|
+
locker_->AddColumnFamily(cf_.get());
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
TransactionOptions txn_opt_;
|
|
450
|
+
std::unique_ptr<MockColumnFamilyHandle> cf_;
|
|
451
|
+
};
|
|
452
|
+
|
|
453
|
+
TEST_F(PerKeyPointLockManagerTest, LockEfficiency) {
|
|
454
|
+
// Create multiple transactions, each acquire exclusive lock on the same key
|
|
455
|
+
std::vector<PessimisticTransaction*> txns;
|
|
456
|
+
std::vector<port::Thread> blockingThreads;
|
|
457
|
+
|
|
458
|
+
// Count the total number of wait sync point calls
|
|
459
|
+
std::atomic_int wait_sync_point_times = 0;
|
|
460
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
461
|
+
wait_sync_point_name_,
|
|
462
|
+
[&wait_sync_point_times](void* /*arg*/) { wait_sync_point_times++; });
|
|
463
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
464
|
+
|
|
465
|
+
constexpr auto num_of_txn = 10;
|
|
466
|
+
// create 10 transactions, each of them try to acquire exclusive lock on the
|
|
467
|
+
// same key
|
|
468
|
+
for (int i = 0; i < num_of_txn; i++) {
|
|
469
|
+
auto txn = NewTxn(txn_opt_);
|
|
470
|
+
txns.push_back(txn);
|
|
471
|
+
|
|
472
|
+
if (i == 0) {
|
|
473
|
+
// txn0 acquires the lock, so the rest of the transactions could block
|
|
474
|
+
ASSERT_OK(locker_->TryLock(txn, 1, "k1", env_, true));
|
|
475
|
+
} else {
|
|
476
|
+
blockingThreads.emplace_back([this, txn]() {
|
|
477
|
+
// block because first txn is holding an exclusive lock on k1.
|
|
478
|
+
ASSERT_OK(locker_->TryLock(txn, 1, "k1", env_, true));
|
|
479
|
+
});
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
// wait for transaction i to be blocked
|
|
483
|
+
while (wait_sync_point_times.load() < i) {
|
|
484
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
// unlock the key, so next transaction could take the lock.
|
|
489
|
+
locker_->UnLock(txns[0], 1, "k1", env_);
|
|
490
|
+
|
|
491
|
+
auto num_of_blocking_thread = num_of_txn - 1;
|
|
492
|
+
|
|
493
|
+
for (int i = 0; i < num_of_blocking_thread; i++) {
|
|
494
|
+
// validate the thread is finished
|
|
495
|
+
blockingThreads[i].join();
|
|
496
|
+
auto num_of_threads_completed = i + 1;
|
|
497
|
+
for (int j = 0; j < num_of_blocking_thread; j++) {
|
|
498
|
+
if (j < num_of_threads_completed) {
|
|
499
|
+
// validate the thread is no longer joinable
|
|
500
|
+
ASSERT_FALSE(blockingThreads[j].joinable());
|
|
501
|
+
} else {
|
|
502
|
+
// validate the rest of the threads are still joinable
|
|
503
|
+
ASSERT_TRUE(blockingThreads[j].joinable());
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
// unlock the key, so next transaction could take the lock.
|
|
507
|
+
locker_->UnLock(txns[i + 1], 1, "k1", env_);
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
ASSERT_EQ(wait_sync_point_times.load(), num_of_blocking_thread);
|
|
511
|
+
SyncPoint::GetInstance()->DisableProcessing();
|
|
512
|
+
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
513
|
+
|
|
514
|
+
for (int i = 0; i < num_of_txn; i++) {
|
|
515
|
+
delete txns[num_of_txn - i - 1];
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
TEST_F(PerKeyPointLockManagerTest, LockFairness) {
|
|
520
|
+
// Create multiple transactions requesting locks on the same key, validate
|
|
521
|
+
// that they are executed in FIFO order
|
|
522
|
+
|
|
523
|
+
// txn0 acquires exclusive lock on k1.
|
|
524
|
+
// txn1 acquires shared lock on k1.
|
|
525
|
+
// txn2 acquires shared lock on k1.
|
|
526
|
+
// txn3 acquires exclusive lock on k1.
|
|
527
|
+
// txn4 acquires shared lock on k1.
|
|
528
|
+
// txn5 acquires exclusive lock on k1.
|
|
529
|
+
// txn6 acquires exclusive lock on k1.
|
|
530
|
+
// txn7 acquires shared lock on k1.
|
|
531
|
+
// txn8 acquires shared lock on k1.
|
|
532
|
+
// txn9 acquires exclusive lock on k1.
|
|
533
|
+
|
|
534
|
+
std::vector<PessimisticTransaction*> txns;
|
|
535
|
+
std::vector<port::Thread> blockingThreads;
|
|
536
|
+
|
|
537
|
+
// Count the total number of wait sync point calls
|
|
538
|
+
std::atomic_int wait_sync_point_times = 0;
|
|
539
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
540
|
+
wait_sync_point_name_,
|
|
541
|
+
[&wait_sync_point_times](void* /*arg*/) { wait_sync_point_times++; });
|
|
542
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
543
|
+
|
|
544
|
+
constexpr auto num_of_txn = 10;
|
|
545
|
+
std::vector<bool> txn_lock_types = {true, false, false, true, false,
|
|
546
|
+
true, true, false, false, true};
|
|
547
|
+
// create 10 transactions, each of them try to acquire exclusive lock on the
|
|
548
|
+
// same key
|
|
549
|
+
for (int i = 0; i < num_of_txn; i++) {
|
|
550
|
+
auto txn = NewTxn(txn_opt_);
|
|
551
|
+
txns.push_back(txn);
|
|
552
|
+
|
|
553
|
+
if (i == 0) {
|
|
554
|
+
// txn0 acquires the lock, so the rest of the transactions would block
|
|
555
|
+
ASSERT_OK(locker_->TryLock(txn, 1, "k1", env_, txn_lock_types[0]));
|
|
556
|
+
} else {
|
|
557
|
+
blockingThreads.emplace_back([this, txn, type = txn_lock_types[i]]() {
|
|
558
|
+
ASSERT_OK(locker_->TryLock(txn, 1, "k1", env_, type));
|
|
559
|
+
});
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
// wait for transaction i to be blocked
|
|
563
|
+
while (wait_sync_point_times.load() < i) {
|
|
564
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
565
|
+
}
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
auto num_of_blocking_thread = num_of_txn - 1;
|
|
569
|
+
|
|
570
|
+
auto thread_idx = 0;
|
|
571
|
+
auto txn_idx = 0;
|
|
572
|
+
|
|
573
|
+
auto unlockTxn = [&]() {
|
|
574
|
+
// unlock the key in transaction.
|
|
575
|
+
locker_->UnLock(txns[txn_idx++], 1, "k1", env_);
|
|
576
|
+
};
|
|
577
|
+
|
|
578
|
+
auto validateLockTakenByNextTxn = [&]() {
|
|
579
|
+
// validate the thread is finished
|
|
580
|
+
blockingThreads[thread_idx++].join();
|
|
581
|
+
};
|
|
582
|
+
|
|
583
|
+
auto stillWaitingForLock = [&]() {
|
|
584
|
+
// validate the thread is no longer joinable
|
|
585
|
+
ASSERT_TRUE(blockingThreads[thread_idx].joinable());
|
|
586
|
+
};
|
|
587
|
+
|
|
588
|
+
// unlock the key, so next group of transactions could take the lock.
|
|
589
|
+
unlockTxn();
|
|
590
|
+
|
|
591
|
+
// txn1 acquires shared lock on k1.
|
|
592
|
+
// txn2 acquires shared lock on k1.
|
|
593
|
+
validateLockTakenByNextTxn();
|
|
594
|
+
validateLockTakenByNextTxn();
|
|
595
|
+
|
|
596
|
+
// txn3 acquires exclusive lock on k1.
|
|
597
|
+
stillWaitingForLock();
|
|
598
|
+
unlockTxn();
|
|
599
|
+
unlockTxn();
|
|
600
|
+
validateLockTakenByNextTxn();
|
|
601
|
+
|
|
602
|
+
// txn4 acquires shared lock on k1.
|
|
603
|
+
stillWaitingForLock();
|
|
604
|
+
unlockTxn();
|
|
605
|
+
validateLockTakenByNextTxn();
|
|
606
|
+
|
|
607
|
+
// txn5 acquires exclusive lock on k1.
|
|
608
|
+
stillWaitingForLock();
|
|
609
|
+
unlockTxn();
|
|
610
|
+
validateLockTakenByNextTxn();
|
|
611
|
+
|
|
612
|
+
// txn6 acquires exclusive lock on k1.
|
|
613
|
+
stillWaitingForLock();
|
|
614
|
+
unlockTxn();
|
|
615
|
+
validateLockTakenByNextTxn();
|
|
616
|
+
|
|
617
|
+
// txn7 acquires shared lock on k1.
|
|
618
|
+
// txn8 acquires shared lock on k1.
|
|
619
|
+
stillWaitingForLock();
|
|
620
|
+
unlockTxn();
|
|
621
|
+
validateLockTakenByNextTxn();
|
|
622
|
+
validateLockTakenByNextTxn();
|
|
623
|
+
|
|
624
|
+
// txn9 acquires exclusive lock on k1.
|
|
625
|
+
stillWaitingForLock();
|
|
626
|
+
unlockTxn();
|
|
627
|
+
unlockTxn();
|
|
628
|
+
validateLockTakenByNextTxn();
|
|
629
|
+
|
|
630
|
+
// clean up
|
|
631
|
+
unlockTxn();
|
|
632
|
+
|
|
633
|
+
ASSERT_EQ(wait_sync_point_times.load(), num_of_blocking_thread);
|
|
634
|
+
SyncPoint::GetInstance()->DisableProcessing();
|
|
635
|
+
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
636
|
+
|
|
637
|
+
for (int i = 0; i < num_of_txn; i++) {
|
|
638
|
+
delete txns[num_of_txn - i - 1];
|
|
639
|
+
}
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
TEST_F(PerKeyPointLockManagerTest, FIFO) {
|
|
643
|
+
// validate S, X, S lock order would be executed in FIFO order
|
|
644
|
+
// txn1 acquires shared lock on k1.
|
|
645
|
+
// txn2 acquires exclusive lock on k1.
|
|
646
|
+
// txn3 acquires shared lock on k1.
|
|
647
|
+
|
|
648
|
+
std::vector<PessimisticTransaction*> txns;
|
|
649
|
+
std::vector<port::Thread> blockingThreads;
|
|
650
|
+
|
|
651
|
+
// Count the total number of wait sync point calls
|
|
652
|
+
std::atomic_int wait_sync_point_times = 0;
|
|
653
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
654
|
+
wait_sync_point_name_,
|
|
655
|
+
[&wait_sync_point_times](void* /*arg*/) { wait_sync_point_times++; });
|
|
656
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
657
|
+
|
|
658
|
+
constexpr auto num_of_txn = 3;
|
|
659
|
+
std::vector<bool> txn_lock_types = {false, true, false};
|
|
660
|
+
// create 3 transactions, each of them try to acquire exclusive lock on the
|
|
661
|
+
// same key
|
|
662
|
+
for (int i = 0; i < num_of_txn; i++) {
|
|
663
|
+
auto txn = NewTxn(txn_opt_);
|
|
664
|
+
txns.push_back(txn);
|
|
665
|
+
|
|
666
|
+
if (i == 0) {
|
|
667
|
+
// txn0 acquires the lock, so the rest of the transactions would block
|
|
668
|
+
ASSERT_OK(locker_->TryLock(txn, 1, "k1", env_, txn_lock_types[0]));
|
|
669
|
+
} else {
|
|
670
|
+
blockingThreads.emplace_back([this, txn, type = txn_lock_types[i]]() {
|
|
671
|
+
ASSERT_OK(locker_->TryLock(txn, 1, "k1", env_, type));
|
|
672
|
+
});
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
// wait for transaction i to be blocked
|
|
676
|
+
while (wait_sync_point_times.load() < i) {
|
|
677
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
678
|
+
}
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
auto num_of_blocking_thread = num_of_txn - 1;
|
|
682
|
+
|
|
683
|
+
auto thread_idx = 0;
|
|
684
|
+
auto txn_idx = 0;
|
|
685
|
+
|
|
686
|
+
auto unlockTxn = [&]() {
|
|
687
|
+
// unlock the key in transaction.
|
|
688
|
+
locker_->UnLock(txns[txn_idx++], 1, "k1", env_);
|
|
689
|
+
};
|
|
690
|
+
|
|
691
|
+
auto validateLockTakenByNextTxn = [&]() {
|
|
692
|
+
// validate the thread is finished
|
|
693
|
+
blockingThreads[thread_idx++].join();
|
|
694
|
+
};
|
|
695
|
+
|
|
696
|
+
auto stillWaitingForLock = [&]() {
|
|
697
|
+
// validate the thread is no longer joinable
|
|
698
|
+
ASSERT_TRUE(blockingThreads[thread_idx].joinable());
|
|
699
|
+
};
|
|
700
|
+
|
|
701
|
+
// unlock the key, so next group of transactions could take the lock.
|
|
702
|
+
stillWaitingForLock();
|
|
703
|
+
unlockTxn();
|
|
704
|
+
|
|
705
|
+
// txn1 acquires exclusive lock on k1.
|
|
706
|
+
validateLockTakenByNextTxn();
|
|
707
|
+
|
|
708
|
+
// txn2 acquires shared lock on k1.
|
|
709
|
+
stillWaitingForLock();
|
|
710
|
+
unlockTxn();
|
|
711
|
+
validateLockTakenByNextTxn();
|
|
712
|
+
|
|
713
|
+
// clean up
|
|
714
|
+
unlockTxn();
|
|
715
|
+
|
|
716
|
+
ASSERT_EQ(wait_sync_point_times.load(), num_of_blocking_thread);
|
|
717
|
+
SyncPoint::GetInstance()->DisableProcessing();
|
|
718
|
+
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
719
|
+
|
|
720
|
+
for (int i = 0; i < num_of_txn; i++) {
|
|
721
|
+
delete txns[num_of_txn - i - 1];
|
|
722
|
+
}
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
TEST_P(SpotLockManagerTest, LockDownGradeWithOtherLockRequests) {
|
|
726
|
+
// Test lock down grade always succeeds, even if there are other lock requests
|
|
727
|
+
// waiting for the same lock.
|
|
728
|
+
MockColumnFamilyHandle cf(1);
|
|
729
|
+
locker_->AddColumnFamily(&cf);
|
|
730
|
+
TransactionOptions txn_opt;
|
|
731
|
+
txn_opt.deadlock_detect = true;
|
|
732
|
+
txn_opt.lock_timeout = kLongTxnTimeoutMs;
|
|
733
|
+
auto txn1 = NewTxn(txn_opt);
|
|
734
|
+
auto txn2 = NewTxn(txn_opt);
|
|
735
|
+
|
|
736
|
+
for (bool exclusive : {true, false}) {
|
|
737
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
738
|
+
|
|
739
|
+
port::Thread t;
|
|
740
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t, [this, &txn2, exclusive]() {
|
|
741
|
+
// block because txn1 is holding a exclusive lock on k1.
|
|
742
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, exclusive));
|
|
743
|
+
});
|
|
744
|
+
|
|
745
|
+
// txn1 downgrades the lock to shared lock, so txn2 could proceed
|
|
746
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, false));
|
|
747
|
+
|
|
748
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
749
|
+
t.join();
|
|
750
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
751
|
+
}
|
|
752
|
+
|
|
753
|
+
// clean up
|
|
754
|
+
delete txn2;
|
|
755
|
+
delete txn1;
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
TEST_P(SpotLockManagerTest, LockTimeout) {
|
|
759
|
+
// Test lock timeout
|
|
760
|
+
// txn1 acquires an exclusive lock on k1 successfully.
|
|
761
|
+
// txn2 try to acquire a lock on k1, but timedout.
|
|
762
|
+
|
|
763
|
+
MockColumnFamilyHandle cf(1);
|
|
764
|
+
locker_->AddColumnFamily(&cf);
|
|
765
|
+
TransactionOptions txn_opt;
|
|
766
|
+
txn_opt.deadlock_detect = true;
|
|
767
|
+
txn_opt.lock_timeout = kShortTxnTimeoutMs;
|
|
768
|
+
auto txn1 = NewTxn(txn_opt);
|
|
769
|
+
auto txn2 = NewTxn(txn_opt);
|
|
770
|
+
|
|
771
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
772
|
+
|
|
773
|
+
for (bool exclusive : {true, false}) {
|
|
774
|
+
auto ret = locker_->TryLock(txn2, 1, "k1", env_, exclusive);
|
|
775
|
+
ASSERT_TRUE(ret.IsTimedOut());
|
|
776
|
+
}
|
|
777
|
+
|
|
778
|
+
// clean up
|
|
779
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
780
|
+
delete txn2;
|
|
781
|
+
delete txn1;
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
TEST_P(SpotLockManagerTest, ExpiredLockStolenAfterTimeout) {
|
|
785
|
+
// validate an expired lock can be stolen by another transaction that timed
|
|
786
|
+
// out on the lock.
|
|
787
|
+
// txn1 acquires an exclusive lock on k1 successfully with a short expiration
|
|
788
|
+
// time.
|
|
789
|
+
// txn2 try to acquire a shared lock on k1 with timeout that is slightly
|
|
790
|
+
// longer than the txn1 expiration.
|
|
791
|
+
// Validate txn2 will take the lock.
|
|
792
|
+
|
|
793
|
+
MockColumnFamilyHandle cf(1);
|
|
794
|
+
locker_->AddColumnFamily(&cf);
|
|
795
|
+
TransactionOptions txn_opt;
|
|
796
|
+
txn_opt.deadlock_detect = true;
|
|
797
|
+
txn_opt.expiration = 1000;
|
|
798
|
+
txn_opt.lock_timeout = 1000 * 2;
|
|
799
|
+
auto txn1 = NewTxn(txn_opt);
|
|
800
|
+
auto txn2 = NewTxn(txn_opt);
|
|
801
|
+
|
|
802
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
803
|
+
|
|
804
|
+
port::Thread t1;
|
|
805
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t1, [this, &txn2]() {
|
|
806
|
+
// block because txn1 is holding an exclusive lock on k1.
|
|
807
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
808
|
+
});
|
|
809
|
+
|
|
810
|
+
t1.join();
|
|
811
|
+
|
|
812
|
+
// clean up
|
|
813
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
814
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
815
|
+
|
|
816
|
+
delete txn2;
|
|
817
|
+
delete txn1;
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
// Try to block until transaction enters waiting state.
|
|
821
|
+
// However due to timing, it could fail, so return true if succeeded, false
|
|
822
|
+
// otherwise.
|
|
823
|
+
bool TryBlockUntilWaitingTxn(const char* sync_point_name, port::Thread& t,
|
|
824
|
+
std::function<void()> function) {
|
|
825
|
+
std::atomic<bool> reached(false);
|
|
826
|
+
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
|
827
|
+
sync_point_name, [&](void* /*arg*/) { reached.store(true); });
|
|
828
|
+
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
829
|
+
|
|
830
|
+
// As the lifetime of the complete variable could go beyond the scope of this
|
|
831
|
+
// function, so we wrap it in a shared_ptr, and copy it into the lambda
|
|
832
|
+
std::shared_ptr<std::atomic<bool>> complete =
|
|
833
|
+
std::make_shared<std::atomic<bool>>(false);
|
|
834
|
+
t = port::Thread([complete, &function]() {
|
|
835
|
+
function();
|
|
836
|
+
complete->store(true);
|
|
837
|
+
});
|
|
838
|
+
|
|
839
|
+
auto ret = false;
|
|
840
|
+
|
|
841
|
+
while (true) {
|
|
842
|
+
if (complete->load()) {
|
|
843
|
+
// function completed, before sync point was reached, return false
|
|
844
|
+
t.join();
|
|
845
|
+
ret = false;
|
|
846
|
+
break;
|
|
847
|
+
}
|
|
848
|
+
if (reached.load()) {
|
|
849
|
+
// sync point was reached before function completed, return true
|
|
850
|
+
ret = true;
|
|
851
|
+
break;
|
|
852
|
+
}
|
|
853
|
+
}
|
|
854
|
+
|
|
855
|
+
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
|
856
|
+
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
857
|
+
return ret;
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
TEST_F(PerKeyPointLockManagerTest, LockStealAfterExpirationExclusive) {
|
|
861
|
+
// There are multiple transactions waiting for the same lock.
|
|
862
|
+
// txn1 acquires an exclusive lock on k1 successfully with a short expiration
|
|
863
|
+
// time.
|
|
864
|
+
// txn2 try to acquire an exclusive lock on k1, before expiration time,
|
|
865
|
+
// so it is blocked and waits for txn1 lock expired.
|
|
866
|
+
// txn3 try to acquire an exclusive lock on k1 after txn1 lock expires, FIFO
|
|
867
|
+
// order is respected.
|
|
868
|
+
// txn2 is woken up and takes the lock. unlock txn2, txn3 should proceed.
|
|
869
|
+
|
|
870
|
+
txn_opt_.expiration = 1000;
|
|
871
|
+
auto txn1 = NewTxn(txn_opt_);
|
|
872
|
+
txn_opt_.expiration = -1;
|
|
873
|
+
auto txn2 = NewTxn(txn_opt_);
|
|
874
|
+
auto txn3 = NewTxn(txn_opt_);
|
|
875
|
+
|
|
876
|
+
port::Thread t1;
|
|
877
|
+
auto retry_times = 10;
|
|
878
|
+
|
|
879
|
+
// Use a loop to reduce test flakiness.
|
|
880
|
+
// that the test is flaky because the txn2 thread start could be delayed until
|
|
881
|
+
// txn1 lock expired. In that case, txn2 will not enter into wait state, which
|
|
882
|
+
// will defeat the test purpose. Use a loop to retry a few times, until it is
|
|
883
|
+
// able to enter into wait state.
|
|
884
|
+
while (retry_times--) {
|
|
885
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
886
|
+
if (TryBlockUntilWaitingTxn(wait_sync_point_name_, t1, [this, &txn2]() {
|
|
887
|
+
// block because txn1 is holding a shared lock on k1.
|
|
888
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, true));
|
|
889
|
+
})) {
|
|
890
|
+
break;
|
|
891
|
+
}
|
|
892
|
+
// failed, retry again
|
|
893
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
894
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
895
|
+
}
|
|
896
|
+
// make sure txn2 is able to reach the wait state before proceed
|
|
897
|
+
ASSERT_GT(retry_times, 0);
|
|
898
|
+
|
|
899
|
+
// txn3 try to acquire an exclusive lock on k1, FIFO order is respected.
|
|
900
|
+
port::Thread t2;
|
|
901
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t2, [this, &txn3]() {
|
|
902
|
+
// block because txn1 is holding an exclusive lock on k1.
|
|
903
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, true));
|
|
904
|
+
});
|
|
905
|
+
|
|
906
|
+
// validate txn2 is woken up and takes the lock
|
|
907
|
+
t1.join();
|
|
908
|
+
|
|
909
|
+
// unlock txn2, txn3 should proceed
|
|
910
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
911
|
+
t2.join();
|
|
912
|
+
|
|
913
|
+
// clean up
|
|
914
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
915
|
+
|
|
916
|
+
delete txn3;
|
|
917
|
+
delete txn2;
|
|
918
|
+
delete txn1;
|
|
919
|
+
}
|
|
920
|
+
|
|
921
|
+
TEST_F(PerKeyPointLockManagerTest, LockStealAfterExpirationShared) {
|
|
922
|
+
// There are multiple transactions waiting for the same lock.
|
|
923
|
+
// txn1 acquires a shared lock on k1 successfully with a short expiration
|
|
924
|
+
// time.
|
|
925
|
+
// txn2 try to acquire an exclusive lock on k1, before expiration time,
|
|
926
|
+
// so it is blocked and waits for txn1 lock expired.
|
|
927
|
+
// txn3 try to acquire a shared lock on k1 after txn1 lock expires, FIFO
|
|
928
|
+
// order is respected.
|
|
929
|
+
// txn2 is woken up and takes the lock. unlock txn2, txn3 should proceed.
|
|
930
|
+
|
|
931
|
+
txn_opt_.expiration = 1000;
|
|
932
|
+
auto txn1 = NewTxn(txn_opt_);
|
|
933
|
+
txn_opt_.expiration = -1;
|
|
934
|
+
auto txn2 = NewTxn(txn_opt_);
|
|
935
|
+
auto txn3 = NewTxn(txn_opt_);
|
|
936
|
+
|
|
937
|
+
port::Thread t1;
|
|
938
|
+
auto retry_times = 10;
|
|
939
|
+
|
|
940
|
+
// Use a loop to reduce test flakiness.
|
|
941
|
+
// that the test is flaky because the txn2 thread start could be delayed until
|
|
942
|
+
// txn1 lock expired. In that case, txn2 will not enter into wait state, which
|
|
943
|
+
// will defeat the test purpose. Use a loop to retry a few times, until it is
|
|
944
|
+
// able to enter into wait state.
|
|
945
|
+
while (retry_times--) {
|
|
946
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, false));
|
|
947
|
+
if (TryBlockUntilWaitingTxn(wait_sync_point_name_, t1, [this, &txn2]() {
|
|
948
|
+
// block because txn1 is holding an exclusive lock on k1.
|
|
949
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, true));
|
|
950
|
+
})) {
|
|
951
|
+
break;
|
|
952
|
+
}
|
|
953
|
+
// failed, retry again
|
|
954
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
955
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
956
|
+
}
|
|
957
|
+
// make sure txn2 is able to reach the wait state before proceed
|
|
958
|
+
ASSERT_GT(retry_times, 0);
|
|
959
|
+
|
|
960
|
+
// txn3 try to acquire an exclusive lock on k1, FIFO order is respected.
|
|
961
|
+
port::Thread t2;
|
|
962
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t2, [this, &txn3]() {
|
|
963
|
+
// block because txn1 is holding an exclusive lock on k1.
|
|
964
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, false));
|
|
965
|
+
});
|
|
966
|
+
|
|
967
|
+
// validate txn2 is woken up and takes the lock
|
|
968
|
+
t1.join();
|
|
969
|
+
|
|
970
|
+
// unlock txn2, txn3 should proceed
|
|
971
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
972
|
+
t2.join();
|
|
973
|
+
|
|
974
|
+
// clean up
|
|
975
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
976
|
+
|
|
977
|
+
delete txn3;
|
|
978
|
+
delete txn2;
|
|
979
|
+
delete txn1;
|
|
980
|
+
}
|
|
981
|
+
|
|
982
|
+
TEST_F(PerKeyPointLockManagerTest, DeadLockOnWaiter) {
|
|
983
|
+
// Txn1 acquires exclusive lock on k1
|
|
984
|
+
// Txn3 acquires shared lock on k2
|
|
985
|
+
// Txn2 tries to acquire exclusive lock on k1, waiting in the waiter queue.
|
|
986
|
+
// Txn3 tries to acquire exclusive lock on k1, waiting in the waiter queue.
|
|
987
|
+
// Txn3 depends on both Txn1 and Txn2. Txn1 unlocks k1.
|
|
988
|
+
// Txn2 takes the lock k1, and tries to acquire lock k2.
|
|
989
|
+
// Now Txn2 depends on Txn3.
|
|
990
|
+
// Deadlock is detected, and Txn2 is aborted.
|
|
991
|
+
|
|
992
|
+
auto txn1 = NewTxn(txn_opt_);
|
|
993
|
+
auto txn2 = NewTxn(txn_opt_);
|
|
994
|
+
auto txn3 = NewTxn(txn_opt_);
|
|
995
|
+
|
|
996
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
997
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k2", env_, false));
|
|
998
|
+
|
|
999
|
+
port::Thread t1;
|
|
1000
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t1, [this, &txn2]() {
|
|
1001
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, true));
|
|
1002
|
+
auto s = locker_->TryLock(txn2, 1, "k2", env_, true);
|
|
1003
|
+
ASSERT_TRUE(s.IsDeadlock());
|
|
1004
|
+
});
|
|
1005
|
+
|
|
1006
|
+
port::Thread t2;
|
|
1007
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t2, [this, &txn3]() {
|
|
1008
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, true));
|
|
1009
|
+
});
|
|
1010
|
+
|
|
1011
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
1012
|
+
|
|
1013
|
+
t1.join();
|
|
1014
|
+
|
|
1015
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
1016
|
+
t2.join();
|
|
1017
|
+
|
|
1018
|
+
// clean up
|
|
1019
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
1020
|
+
locker_->UnLock(txn3, 1, "k2", env_);
|
|
1021
|
+
|
|
1022
|
+
delete txn3;
|
|
1023
|
+
delete txn2;
|
|
1024
|
+
delete txn1;
|
|
1025
|
+
}
|
|
1026
|
+
|
|
1027
|
+
TEST_F(PerKeyPointLockManagerTest, SharedLockRaceCondition) {
|
|
1028
|
+
// Verify a shared lock race condition is handled properly.
|
|
1029
|
+
// When there are waiters in the queue, and all of them are shared waiters,
|
|
1030
|
+
// and no one has taken the lock and all of them just got woken up and not
|
|
1031
|
+
// yet taken the lock yet. A new shared lock request should be granted
|
|
1032
|
+
// directly, without wait in the queue. If it did, It would not be woken up
|
|
1033
|
+
// until the last shared lock is released.
|
|
1034
|
+
|
|
1035
|
+
// Disable deadlock detection timeout to prevent test flakyness.
|
|
1036
|
+
deadlock_timeout_us = 0;
|
|
1037
|
+
auto txn1 = NewTxn(txn_opt_);
|
|
1038
|
+
auto txn2 = NewTxn(txn_opt_);
|
|
1039
|
+
auto txn3 = NewTxn(txn_opt_);
|
|
1040
|
+
|
|
1041
|
+
SyncPoint::GetInstance()->DisableProcessing();
|
|
1042
|
+
SyncPoint::GetInstance()->LoadDependency(
|
|
1043
|
+
{{"PerKeyPointLockManager::AcquireWithTimeout:AfterWokenUp",
|
|
1044
|
+
"PerKeyPointLockManagerTest::SharedLockRaceCondition:"
|
|
1045
|
+
"BeforeNewSharedLockRequest"},
|
|
1046
|
+
{"PerKeyPointLockManagerTest::SharedLockRaceCondition:"
|
|
1047
|
+
"AfterNewSharedLockRequest",
|
|
1048
|
+
"PerKeyPointLockManager::AcquireWithTimeout:BeforeTakeLock"}});
|
|
1049
|
+
|
|
1050
|
+
std::atomic<bool> reached(false);
|
|
1051
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
1052
|
+
wait_sync_point_name_,
|
|
1053
|
+
[&reached](void* /*arg*/) { reached.store(true); });
|
|
1054
|
+
|
|
1055
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
1056
|
+
|
|
1057
|
+
// txn1 acquires an exclusive lock on k1, so that the following shared lock
|
|
1058
|
+
// request would be blocked
|
|
1059
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
1060
|
+
|
|
1061
|
+
// txn2 try to acquire a shared lock on k1, and get blocked
|
|
1062
|
+
auto t1 = port::Thread([this, &txn2]() {
|
|
1063
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
1064
|
+
});
|
|
1065
|
+
|
|
1066
|
+
while (!reached.load()) {
|
|
1067
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
|
1068
|
+
}
|
|
1069
|
+
|
|
1070
|
+
// unlock txn1, txn2 should be woken up, but txn2 stops on the sync point
|
|
1071
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
1072
|
+
|
|
1073
|
+
// Use sync point to simulate the race condition.
|
|
1074
|
+
// txn3 tries to take the lock right after txn2 is woken up, but before it
|
|
1075
|
+
// takes the lock
|
|
1076
|
+
TEST_SYNC_POINT(
|
|
1077
|
+
"PerKeyPointLockManagerTest::SharedLockRaceCondition:"
|
|
1078
|
+
"BeforeNewSharedLockRequest");
|
|
1079
|
+
|
|
1080
|
+
// txn3 try to acquire a shared lock on k1, and get granted immediately
|
|
1081
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, false));
|
|
1082
|
+
|
|
1083
|
+
TEST_SYNC_POINT(
|
|
1084
|
+
"PerKeyPointLockManagerTest::SharedLockRaceCondition:"
|
|
1085
|
+
"AfterNewSharedLockRequest");
|
|
1086
|
+
|
|
1087
|
+
// validate txn2 is woken up and takes the lock
|
|
1088
|
+
t1.join();
|
|
1089
|
+
|
|
1090
|
+
// cleanup
|
|
1091
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
1092
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
1093
|
+
|
|
1094
|
+
delete txn3;
|
|
1095
|
+
delete txn2;
|
|
1096
|
+
delete txn1;
|
|
1097
|
+
}
|
|
1098
|
+
|
|
1099
|
+
TEST_F(PerKeyPointLockManagerTest, UpgradeLockRaceCondition) {
|
|
1100
|
+
// Verify an upgrade lock race condition is handled properly.
|
|
1101
|
+
// When a key is locked in exlusive mode, shared lock waiters will be enqueued
|
|
1102
|
+
// as waiters.
|
|
1103
|
+
// When the exclusive lock holder release the lock. The shared lock waiters
|
|
1104
|
+
// are woken up to take the lock. At this point, when a new shared lock
|
|
1105
|
+
// requester comes in, it will take the lock directly without waiting or
|
|
1106
|
+
// queueing. This requester then immediately upgrade the lock to exclusive
|
|
1107
|
+
// lock. This request will be prioritized to the head of the queue.
|
|
1108
|
+
// Meantime, it should also depend on the shared lock waiters which are still
|
|
1109
|
+
// in the queue that are ready to take the lock. Later, when one of the reader
|
|
1110
|
+
// lock want to also upgrade its lock, it will detect a dead lock and abort.
|
|
1111
|
+
|
|
1112
|
+
auto txn1 = NewTxn(txn_opt_);
|
|
1113
|
+
auto txn2 = NewTxn(txn_opt_);
|
|
1114
|
+
auto txn3 = NewTxn(txn_opt_);
|
|
1115
|
+
|
|
1116
|
+
SyncPoint::GetInstance()->DisableProcessing();
|
|
1117
|
+
SyncPoint::GetInstance()->LoadDependency(
|
|
1118
|
+
{{"PerKeyPointLockManager::AcquireWithTimeout:AfterWokenUp",
|
|
1119
|
+
"PerKeyPointLockManagerTest::UpgradeLockRaceCondition:"
|
|
1120
|
+
"BeforeNewSharedLockRequest"},
|
|
1121
|
+
{"PerKeyPointLockManagerTest::UpgradeLockRaceCondition:"
|
|
1122
|
+
"AfterNewSharedLockRequest",
|
|
1123
|
+
"PerKeyPointLockManager::AcquireWithTimeout:BeforeTakeLock"}});
|
|
1124
|
+
|
|
1125
|
+
std::atomic<bool> reached(false);
|
|
1126
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
1127
|
+
wait_sync_point_name_,
|
|
1128
|
+
[&reached](void* /*arg*/) { reached.store(true); });
|
|
1129
|
+
|
|
1130
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
1131
|
+
|
|
1132
|
+
// txn1 acquires an exclusive lock on k1, so that the following shared lock
|
|
1133
|
+
// request would be blocked
|
|
1134
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
1135
|
+
|
|
1136
|
+
auto t1 = port::Thread([this, &txn2]() {
|
|
1137
|
+
// txn2 try to acquire a shared lock on k1, and get blocked
|
|
1138
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
1139
|
+
});
|
|
1140
|
+
|
|
1141
|
+
while (!reached.load()) {
|
|
1142
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
|
1143
|
+
}
|
|
1144
|
+
|
|
1145
|
+
// unlock txn1, txn2 should be woken up, but txn2 stops on the sync point
|
|
1146
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
1147
|
+
|
|
1148
|
+
// Use sync point to simulate the race condition.
|
|
1149
|
+
// txn3 tries to take the lock right after txn2 is woken up, but before it
|
|
1150
|
+
// takes the lock
|
|
1151
|
+
TEST_SYNC_POINT(
|
|
1152
|
+
"PerKeyPointLockManagerTest::UpgradeLockRaceCondition:"
|
|
1153
|
+
"BeforeNewSharedLockRequest");
|
|
1154
|
+
|
|
1155
|
+
// txn3 try to acquire a shared lock on k1, and get granted immediately
|
|
1156
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, false));
|
|
1157
|
+
|
|
1158
|
+
// txn3 try to upgrade its lock to exclusive lock and get blocked.
|
|
1159
|
+
reached = false;
|
|
1160
|
+
auto t2 = port::Thread([this, &txn3]() {
|
|
1161
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, true));
|
|
1162
|
+
});
|
|
1163
|
+
|
|
1164
|
+
while (!reached.load()) {
|
|
1165
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
TEST_SYNC_POINT(
|
|
1169
|
+
"PerKeyPointLockManagerTest::UpgradeLockRaceCondition:"
|
|
1170
|
+
"AfterNewSharedLockRequest");
|
|
1171
|
+
|
|
1172
|
+
// validate txn2 is woken up and takes the shared lock
|
|
1173
|
+
t1.join();
|
|
1174
|
+
|
|
1175
|
+
// validate txn2 would get deadlock when it try to upgrade its lock to
|
|
1176
|
+
// exclusive
|
|
1177
|
+
auto s = locker_->TryLock(txn2, 1, "k1", env_, true);
|
|
1178
|
+
ASSERT_TRUE(s.IsDeadlock());
|
|
1179
|
+
|
|
1180
|
+
// cleanup
|
|
1181
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
1182
|
+
t2.join();
|
|
1183
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
1184
|
+
|
|
1185
|
+
delete txn3;
|
|
1186
|
+
delete txn2;
|
|
1187
|
+
delete txn1;
|
|
1188
|
+
}
|
|
1189
|
+
|
|
1190
|
+
TEST_P(SpotLockManagerTest, Catch22) {
|
|
1191
|
+
// Benchmark the overhead of one transaction depends on another in a circle
|
|
1192
|
+
// repeatedly
|
|
1193
|
+
|
|
1194
|
+
MockColumnFamilyHandle cf(1);
|
|
1195
|
+
locker_->AddColumnFamily(&cf);
|
|
1196
|
+
TransactionOptions txn_opt;
|
|
1197
|
+
txn_opt.deadlock_detect = true;
|
|
1198
|
+
txn_opt.lock_timeout = kLongTxnTimeoutMs;
|
|
1199
|
+
txn_opt.expiration = kLongTxnTimeoutMs;
|
|
1200
|
+
|
|
1201
|
+
auto txn1 = NewTxn(txn_opt);
|
|
1202
|
+
auto txn2 = NewTxn(txn_opt);
|
|
1203
|
+
|
|
1204
|
+
// use a wait count to count the number of times the lock is waited inside
|
|
1205
|
+
// transaction lock
|
|
1206
|
+
std::atomic_int wait_count(0);
|
|
1207
|
+
|
|
1208
|
+
SyncPoint::GetInstance()->DisableProcessing();
|
|
1209
|
+
if (GetParam().use_per_key_point_lock_manager &&
|
|
1210
|
+
GetParam().deadlock_timeout_us != 0) {
|
|
1211
|
+
// Use special sync point when deadlock timeout is enabled, so the test run
|
|
1212
|
+
// faster
|
|
1213
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
1214
|
+
"PerKeyPointLockManager::AcquireWithTimeout:"
|
|
1215
|
+
"WaitingTxnBeforeDeadLockDetection",
|
|
1216
|
+
[&wait_count](void* /*arg*/) { wait_count++; });
|
|
1217
|
+
} else {
|
|
1218
|
+
// PointLockManager
|
|
1219
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
1220
|
+
wait_sync_point_name_, [&wait_count](void* /*arg*/) { wait_count++; });
|
|
1221
|
+
}
|
|
1222
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
1223
|
+
|
|
1224
|
+
// txn1 X lock
|
|
1225
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
1226
|
+
|
|
1227
|
+
std::mutex coordinator_mutex;
|
|
1228
|
+
int iteration_count = 10000;
|
|
1229
|
+
|
|
1230
|
+
// txn1 try to lock X lock in a loop
|
|
1231
|
+
auto t1 = port::Thread(
|
|
1232
|
+
[this, &txn1, &wait_count, &coordinator_mutex, &iteration_count]() {
|
|
1233
|
+
while (wait_count.load() < iteration_count) {
|
|
1234
|
+
// spin wait until the other thread enters the lock waiter queue.
|
|
1235
|
+
while (wait_count.load() % 2 == 0);
|
|
1236
|
+
// unlock the lock, so that the other thread can acquire the lock
|
|
1237
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
1238
|
+
{
|
|
1239
|
+
// Use the coordinator mutex to make sure the other thread has been
|
|
1240
|
+
// waked up and acquired the lock, before this thread try to acquire
|
|
1241
|
+
// the lock again.
|
|
1242
|
+
std::scoped_lock<std::mutex> lock(coordinator_mutex);
|
|
1243
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
1247
|
+
});
|
|
1248
|
+
|
|
1249
|
+
// txn2 try to lock X lock in a loop
|
|
1250
|
+
auto t2 = port::Thread(
|
|
1251
|
+
[this, &txn2, &wait_count, &coordinator_mutex, &iteration_count]() {
|
|
1252
|
+
while (wait_count.load() < iteration_count) {
|
|
1253
|
+
{
|
|
1254
|
+
// Use the coordinator mutex to make sure the other thread has been
|
|
1255
|
+
// waked up and acquired the lock, before this thread try to acquire
|
|
1256
|
+
// the lock again.
|
|
1257
|
+
std::scoped_lock<std::mutex> lock(coordinator_mutex);
|
|
1258
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, true));
|
|
1259
|
+
}
|
|
1260
|
+
// spin wait until the other thread enters the lock waiter queue.
|
|
1261
|
+
while (wait_count.load() % 2 == 1);
|
|
1262
|
+
// unlock the lock, so that the other thread can acquire the lock
|
|
1263
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
1264
|
+
}
|
|
1265
|
+
});
|
|
1266
|
+
|
|
1267
|
+
// clean up
|
|
1268
|
+
t1.join();
|
|
1269
|
+
t2.join();
|
|
1270
|
+
|
|
1271
|
+
delete txn2;
|
|
1272
|
+
delete txn1;
|
|
1273
|
+
}
|
|
1274
|
+
|
|
1275
|
+
TEST_F(PerKeyPointLockManagerTest, LockUpgradeOrdering) {
|
|
1276
|
+
// When lock is upgraded, verify that it will only upgrade its lock after all
|
|
1277
|
+
// the shared lock that are before the first exclusive lock in the lock wait
|
|
1278
|
+
// queue.
|
|
1279
|
+
|
|
1280
|
+
auto txn1 = NewTxn(txn_opt_);
|
|
1281
|
+
auto txn2 = NewTxn(txn_opt_);
|
|
1282
|
+
auto txn3 = NewTxn(txn_opt_);
|
|
1283
|
+
auto txn4 = NewTxn(txn_opt_);
|
|
1284
|
+
|
|
1285
|
+
std::mutex txn4_mutex;
|
|
1286
|
+
std::unique_lock<std::mutex> txn4_lock(txn4_mutex);
|
|
1287
|
+
std::atomic_bool txn4_waked_up(false);
|
|
1288
|
+
std::atomic_int wait_count(0);
|
|
1289
|
+
|
|
1290
|
+
SyncPoint::GetInstance()->DisableProcessing();
|
|
1291
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
1292
|
+
wait_sync_point_name_, [&wait_count](void* /*arg*/) { wait_count++; });
|
|
1293
|
+
SyncPoint::GetInstance()->SetCallBack(
|
|
1294
|
+
"PerKeyPointLockManager::AcquireWithTimeout:AfterWokenUp",
|
|
1295
|
+
[&txn4, &txn4_mutex, &txn4_waked_up](void* arg) {
|
|
1296
|
+
auto transaction_id = *(static_cast<TransactionID*>(arg));
|
|
1297
|
+
if (transaction_id == txn4->GetID()) {
|
|
1298
|
+
txn4_waked_up.store(true);
|
|
1299
|
+
{
|
|
1300
|
+
// wait for txn4 mutex to be released, so that this thread will be
|
|
1301
|
+
// blocked.
|
|
1302
|
+
std::scoped_lock<std::mutex> lock(txn4_mutex);
|
|
1303
|
+
}
|
|
1304
|
+
}
|
|
1305
|
+
});
|
|
1306
|
+
SyncPoint::GetInstance()->EnableProcessing();
|
|
1307
|
+
|
|
1308
|
+
// Txn1 X lock
|
|
1309
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
1310
|
+
|
|
1311
|
+
// Txn2,3,4 try S lock
|
|
1312
|
+
port::Thread t1([this, &txn2]() {
|
|
1313
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
1314
|
+
});
|
|
1315
|
+
port::Thread t2([this, &txn3]() {
|
|
1316
|
+
ASSERT_OK(locker_->TryLock(txn3, 1, "k1", env_, false));
|
|
1317
|
+
});
|
|
1318
|
+
port::Thread t3([this, &txn4]() {
|
|
1319
|
+
ASSERT_OK(locker_->TryLock(txn4, 1, "k1", env_, false));
|
|
1320
|
+
});
|
|
1321
|
+
|
|
1322
|
+
// wait for all 3 transactions to enter wait state
|
|
1323
|
+
while (wait_count.load() < 3) {
|
|
1324
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
1325
|
+
}
|
|
1326
|
+
|
|
1327
|
+
// Txn1 unlock
|
|
1328
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
1329
|
+
|
|
1330
|
+
// Txn2,3 take S lock
|
|
1331
|
+
t1.join();
|
|
1332
|
+
t2.join();
|
|
1333
|
+
|
|
1334
|
+
// wait for txn4 to be woken up, otherwise txn2 will get deadlock
|
|
1335
|
+
while (!txn4_waked_up.load()) {
|
|
1336
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
1337
|
+
}
|
|
1338
|
+
|
|
1339
|
+
// Txn2 try X lock
|
|
1340
|
+
std::atomic_bool txn2_exclusive_lock_acquired(false);
|
|
1341
|
+
port::Thread t4([this, &txn2, &txn2_exclusive_lock_acquired]() {
|
|
1342
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, true));
|
|
1343
|
+
txn2_exclusive_lock_acquired.store(true);
|
|
1344
|
+
});
|
|
1345
|
+
|
|
1346
|
+
// wait for txn2 to enter wait state
|
|
1347
|
+
while (wait_count.load() < 4) {
|
|
1348
|
+
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
1349
|
+
}
|
|
1350
|
+
|
|
1351
|
+
// Txn3 release S lock
|
|
1352
|
+
locker_->UnLock(txn3, 1, "k1", env_);
|
|
1353
|
+
|
|
1354
|
+
// Validate Txn2 has not acquired the lock yet
|
|
1355
|
+
ASSERT_FALSE(txn2_exclusive_lock_acquired.load());
|
|
1356
|
+
|
|
1357
|
+
// Txn4 take S lock
|
|
1358
|
+
txn4_lock.unlock();
|
|
1359
|
+
t3.join();
|
|
1360
|
+
|
|
1361
|
+
// Txn4 release S lock Txn2 upgraded to X lock Txn2
|
|
1362
|
+
locker_->UnLock(txn4, 1, "k1", env_);
|
|
1363
|
+
t4.join();
|
|
1364
|
+
ASSERT_TRUE(txn2_exclusive_lock_acquired.load());
|
|
1365
|
+
|
|
1366
|
+
// release lock clean up
|
|
1367
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
1368
|
+
|
|
1369
|
+
delete txn4;
|
|
1370
|
+
delete txn3;
|
|
1371
|
+
delete txn2;
|
|
1372
|
+
delete txn1;
|
|
1373
|
+
}
|
|
1374
|
+
|
|
1375
|
+
TEST_F(PerKeyPointLockManagerTest, LockDownGradeRaceCondition) {
|
|
1376
|
+
// When a lock is downgraded, it should notify all the shared waiters in the
|
|
1377
|
+
// queue to take the lock.
|
|
1378
|
+
|
|
1379
|
+
auto txn1 = NewTxn(txn_opt_);
|
|
1380
|
+
auto txn2 = NewTxn(txn_opt_);
|
|
1381
|
+
|
|
1382
|
+
// Txn1 X lock
|
|
1383
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, true));
|
|
1384
|
+
|
|
1385
|
+
// Txn2 try S lock
|
|
1386
|
+
port::Thread t1;
|
|
1387
|
+
BlockUntilWaitingTxn(wait_sync_point_name_, t1, [this, &txn2]() {
|
|
1388
|
+
ASSERT_OK(locker_->TryLock(txn2, 1, "k1", env_, false));
|
|
1389
|
+
});
|
|
1390
|
+
|
|
1391
|
+
// Txn1 downgrade to S lock
|
|
1392
|
+
ASSERT_OK(locker_->TryLock(txn1, 1, "k1", env_, false));
|
|
1393
|
+
|
|
1394
|
+
// Txn2 take S lock
|
|
1395
|
+
t1.join();
|
|
1396
|
+
|
|
1397
|
+
// clean up
|
|
1398
|
+
locker_->UnLock(txn1, 1, "k1", env_);
|
|
1399
|
+
locker_->UnLock(txn2, 1, "k1", env_);
|
|
1400
|
+
|
|
1401
|
+
delete txn2;
|
|
1402
|
+
delete txn1;
|
|
1403
|
+
}
|
|
1404
|
+
|
|
1405
|
+
// Run AnyLockManagerTest with PointLockManager
|
|
159
1406
|
INSTANTIATE_TEST_CASE_P(PointLockManager, AnyLockManagerTest,
|
|
160
1407
|
::testing::Values(nullptr));
|
|
161
1408
|
|
|
1409
|
+
// Run AnyLockManagerTest with PerKeyPointLockManager
|
|
1410
|
+
template <int64_t N>
|
|
1411
|
+
void PerKeyPointLockManagerTestSetup(PointLockManagerTest* self) {
|
|
1412
|
+
self->init();
|
|
1413
|
+
self->deadlock_timeout_us = N;
|
|
1414
|
+
self->UsePerKeyPointLockManager();
|
|
1415
|
+
}
|
|
1416
|
+
|
|
1417
|
+
INSTANTIATE_TEST_CASE_P(
|
|
1418
|
+
PerLockPointLockManager, AnyLockManagerTest,
|
|
1419
|
+
::testing::Values(PerKeyPointLockManagerTestSetup<0>,
|
|
1420
|
+
PerKeyPointLockManagerTestSetup<100>,
|
|
1421
|
+
PerKeyPointLockManagerTestSetup<1000>));
|
|
1422
|
+
|
|
1423
|
+
// Run PointLockManagerTest with PerLockPointLockManager and PointLockManager
|
|
1424
|
+
INSTANTIATE_TEST_CASE_P(
|
|
1425
|
+
PointLockCorrectnessCheckTestSuite, SpotLockManagerTest,
|
|
1426
|
+
::testing::ValuesIn(std::vector<SpotLockManagerTestParam>{
|
|
1427
|
+
{true, 0}, {true, 100}, {true, 1000}, {false, 0}}));
|
|
1428
|
+
|
|
162
1429
|
} // namespace ROCKSDB_NAMESPACE
|
|
163
1430
|
|
|
164
1431
|
int main(int argc, char** argv) {
|