@nxtedition/rocksdb 7.1.5 → 7.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/binding.cc +32 -14
  2. package/deps/rocksdb/rocksdb/cache/cache.cc +4 -0
  3. package/deps/rocksdb/rocksdb/cache/cache_bench_tool.cc +6 -8
  4. package/deps/rocksdb/rocksdb/cache/cache_key.cc +184 -164
  5. package/deps/rocksdb/rocksdb/cache/cache_key.h +38 -29
  6. package/deps/rocksdb/rocksdb/cache/cache_reservation_manager_test.cc +4 -4
  7. package/deps/rocksdb/rocksdb/cache/clock_cache.cc +4 -2
  8. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.cc +11 -9
  9. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache.h +1 -1
  10. package/deps/rocksdb/rocksdb/cache/compressed_secondary_cache_test.cc +28 -18
  11. package/deps/rocksdb/rocksdb/cache/lru_cache.cc +86 -17
  12. package/deps/rocksdb/rocksdb/cache/lru_cache.h +48 -8
  13. package/deps/rocksdb/rocksdb/cache/lru_cache_test.cc +356 -153
  14. package/deps/rocksdb/rocksdb/db/blob/blob_file_builder.cc +3 -7
  15. package/deps/rocksdb/rocksdb/db/blob/blob_source.cc +4 -5
  16. package/deps/rocksdb/rocksdb/db/blob/blob_source.h +2 -3
  17. package/deps/rocksdb/rocksdb/db/blob/blob_source_test.cc +12 -4
  18. package/deps/rocksdb/rocksdb/db/blob/db_blob_compaction_test.cc +69 -0
  19. package/deps/rocksdb/rocksdb/db/compaction/compaction_iterator.cc +6 -1
  20. package/deps/rocksdb/rocksdb/db/compaction/compaction_job.cc +4 -1
  21. package/deps/rocksdb/rocksdb/db/db_block_cache_test.cc +222 -182
  22. package/deps/rocksdb/rocksdb/db/db_kv_checksum_test.cc +239 -23
  23. package/deps/rocksdb/rocksdb/db/db_test2.cc +6 -2
  24. package/deps/rocksdb/rocksdb/db/event_helpers.cc +2 -1
  25. package/deps/rocksdb/rocksdb/db/import_column_family_job.cc +6 -0
  26. package/deps/rocksdb/rocksdb/db/import_column_family_job.h +6 -0
  27. package/deps/rocksdb/rocksdb/db/import_column_family_test.cc +6 -0
  28. package/deps/rocksdb/rocksdb/db/kv_checksum.h +8 -4
  29. package/deps/rocksdb/rocksdb/db/memtable.cc +173 -33
  30. package/deps/rocksdb/rocksdb/db/memtable.h +10 -0
  31. package/deps/rocksdb/rocksdb/db/table_cache_sync_and_async.h +2 -1
  32. package/deps/rocksdb/rocksdb/db/version_set.cc +37 -18
  33. package/deps/rocksdb/rocksdb/db/version_set_sync_and_async.h +2 -1
  34. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_common.h +1 -0
  35. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_gflags.cc +6 -0
  36. package/deps/rocksdb/rocksdb/db_stress_tool/db_stress_test_base.cc +2 -0
  37. package/deps/rocksdb/rocksdb/include/rocksdb/advanced_options.h +15 -0
  38. package/deps/rocksdb/rocksdb/include/rocksdb/cache.h +31 -6
  39. package/deps/rocksdb/rocksdb/memory/memory_allocator_test.cc +1 -1
  40. package/deps/rocksdb/rocksdb/options/cf_options.cc +4 -0
  41. package/deps/rocksdb/rocksdb/options/cf_options.h +4 -0
  42. package/deps/rocksdb/rocksdb/options/options_helper.cc +2 -0
  43. package/deps/rocksdb/rocksdb/options/options_settable_test.cc +2 -1
  44. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_builder.cc +2 -6
  45. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_factory.cc +1 -0
  46. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.cc +2 -4
  47. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader.h +1 -7
  48. package/deps/rocksdb/rocksdb/table/block_based/block_based_table_reader_sync_and_async.h +2 -1
  49. package/deps/rocksdb/rocksdb/table/unique_id.cc +22 -24
  50. package/deps/rocksdb/rocksdb/table/unique_id_impl.h +2 -1
  51. package/deps/rocksdb/rocksdb/tools/block_cache_analyzer/block_cache_trace_analyzer_plot.py +7 -0
  52. package/deps/rocksdb/rocksdb/tools/db_bench_tool.cc +27 -3
  53. package/deps/rocksdb/rocksdb/util/async_file_reader.cc +2 -1
  54. package/deps/rocksdb/rocksdb/util/async_file_reader.h +3 -3
  55. package/deps/rocksdb/rocksdb/util/coro_utils.h +2 -1
  56. package/deps/rocksdb/rocksdb/util/hash_test.cc +67 -0
  57. package/deps/rocksdb/rocksdb/util/math.h +41 -0
  58. package/deps/rocksdb/rocksdb/util/math128.h +6 -0
  59. package/deps/rocksdb/rocksdb/util/single_thread_executor.h +2 -1
  60. package/deps/rocksdb/rocksdb/utilities/cache_dump_load_impl.cc +3 -6
  61. package/deps/rocksdb/rocksdb/utilities/transactions/lock/point/point_lock_manager_test.h +5 -0
  62. package/deps/rocksdb/rocksdb/utilities/transactions/lock/range/range_lock_manager.h +6 -0
  63. package/index.js +15 -6
  64. package/package.json +1 -1
  65. package/prebuilds/darwin-arm64/node.napi.node +0 -0
  66. package/prebuilds/darwin-x64/node.napi.node +0 -0
  67. package/prebuilds/linux-x64/node.napi.node +0 -0
@@ -819,8 +819,8 @@ class MockCache : public LRUCache {
819
819
 
820
820
  MockCache()
821
821
  : LRUCache((size_t)1 << 25 /*capacity*/, 0 /*num_shard_bits*/,
822
- false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/) {
823
- }
822
+ false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/,
823
+ 0.0 /*low_pri_pool_ratio*/) {}
824
824
 
825
825
  using ShardedCache::Insert;
826
826
 
@@ -1736,51 +1736,60 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) {
1736
1736
 
1737
1737
  class CacheKeyTest : public testing::Test {
1738
1738
  public:
1739
- void SetupStableBase() {
1739
+ CacheKey GetBaseCacheKey() {
1740
+ CacheKey rv = GetOffsetableCacheKey(0, /*min file_number*/ 1).WithOffset(0);
1741
+ // Correct for file_number_ == 1
1742
+ *reinterpret_cast<uint64_t*>(&rv) ^= ReverseBits(uint64_t{1});
1743
+ return rv;
1744
+ }
1745
+ CacheKey GetCacheKey(uint64_t session_counter, uint64_t file_number,
1746
+ uint64_t offset) {
1747
+ OffsetableCacheKey offsetable =
1748
+ GetOffsetableCacheKey(session_counter, file_number);
1749
+ // * 4 to counteract optimization that strips lower 2 bits in encoding
1750
+ // the offset in BlockBasedTable::GetCacheKey (which we prefer to include
1751
+ // in unit tests to maximize functional coverage).
1752
+ EXPECT_GE(offset * 4, offset); // no overflow
1753
+ return BlockBasedTable::GetCacheKey(offsetable,
1754
+ BlockHandle(offset * 4, /*size*/ 5));
1755
+ }
1756
+
1757
+ protected:
1758
+ OffsetableCacheKey GetOffsetableCacheKey(uint64_t session_counter,
1759
+ uint64_t file_number) {
1740
1760
  // Like SemiStructuredUniqueIdGen::GenerateNext
1741
1761
  tp_.db_session_id = EncodeSessionId(base_session_upper_,
1742
- base_session_lower_ ^ session_counter_);
1762
+ base_session_lower_ ^ session_counter);
1743
1763
  tp_.db_id = std::to_string(db_id_);
1744
- tp_.orig_file_number = file_number_;
1764
+ tp_.orig_file_number = file_number;
1745
1765
  bool is_stable;
1746
1766
  std::string cur_session_id = ""; // ignored
1747
1767
  uint64_t cur_file_number = 42; // ignored
1768
+ OffsetableCacheKey rv;
1748
1769
  BlockBasedTable::SetupBaseCacheKey(&tp_, cur_session_id, cur_file_number,
1749
- file_size_, &base_cache_key_,
1750
- &is_stable);
1751
- ASSERT_TRUE(is_stable);
1752
- }
1753
- CacheKey WithOffset(uint64_t offset) {
1754
- return BlockBasedTable::GetCacheKey(base_cache_key_,
1755
- BlockHandle(offset, /*size*/ 5));
1770
+ &rv, &is_stable);
1771
+ EXPECT_TRUE(is_stable);
1772
+ EXPECT_TRUE(!rv.IsEmpty());
1773
+ // BEGIN some assertions in relation to SST unique IDs
1774
+ std::string external_unique_id_str;
1775
+ EXPECT_OK(GetUniqueIdFromTableProperties(tp_, &external_unique_id_str));
1776
+ UniqueId64x2 sst_unique_id = {};
1777
+ EXPECT_OK(DecodeUniqueIdBytes(external_unique_id_str, &sst_unique_id));
1778
+ ExternalUniqueIdToInternal(&sst_unique_id);
1779
+ OffsetableCacheKey ock =
1780
+ OffsetableCacheKey::FromInternalUniqueId(&sst_unique_id);
1781
+ EXPECT_EQ(rv.WithOffset(0).AsSlice(), ock.WithOffset(0).AsSlice());
1782
+ EXPECT_EQ(ock.ToInternalUniqueId(), sst_unique_id);
1783
+ // END some assertions in relation to SST unique IDs
1784
+ return rv;
1756
1785
  }
1757
1786
 
1758
- protected:
1759
- OffsetableCacheKey base_cache_key_;
1760
1787
  TableProperties tp_;
1761
- uint64_t file_size_ = 0;
1762
1788
  uint64_t base_session_upper_ = 0;
1763
1789
  uint64_t base_session_lower_ = 0;
1764
- uint64_t session_counter_ = 0;
1765
- uint64_t file_number_ = 0;
1766
1790
  uint64_t db_id_ = 0;
1767
1791
  };
1768
1792
 
1769
- namespace {
1770
- template <typename T>
1771
- int CountBitsDifferent(const T& t1, const T& t2) {
1772
- int diff = 0;
1773
- const uint8_t* p1 = reinterpret_cast<const uint8_t*>(&t1);
1774
- const uint8_t* p2 = reinterpret_cast<const uint8_t*>(&t2);
1775
- static_assert(sizeof(*p1) == 1, "Expecting uint8_t byte");
1776
- for (size_t i = 0; i < sizeof(T); ++i) {
1777
- diff += BitsSetToOne(p1[i] ^ p2[i]);
1778
- }
1779
- return diff;
1780
- }
1781
-
1782
- } // namespace
1783
-
1784
1793
  TEST_F(CacheKeyTest, DBImplSessionIdStructure) {
1785
1794
  // We have to generate our own session IDs for simulation purposes in other
1786
1795
  // tests. Here we verify that the DBImpl implementation seems to match
@@ -1799,171 +1808,202 @@ TEST_F(CacheKeyTest, DBImplSessionIdStructure) {
1799
1808
  ASSERT_NE(Lower32of64(lower1), Lower32of64(lower2));
1800
1809
  }
1801
1810
 
1802
- TEST_F(CacheKeyTest, StandardEncodingLimit) {
1803
- base_session_upper_ = 1234;
1804
- base_session_lower_ = 5678;
1805
- session_counter_ = 42;
1806
- file_number_ = 42;
1807
- db_id_ = 1234;
1808
-
1809
- file_size_ = 42;
1810
- SetupStableBase();
1811
- CacheKey ck1;
1812
- ASSERT_TRUE(ck1.IsEmpty());
1813
- ck1 = WithOffset(0);
1814
- ASSERT_FALSE(ck1.IsEmpty());
1815
-
1816
- // Should use same encoding
1817
- file_size_ = BlockBasedTable::kMaxFileSizeStandardEncoding;
1818
- SetupStableBase();
1819
- CacheKey ck2 = WithOffset(0);
1820
- ASSERT_EQ(CountBitsDifferent(ck1, ck2), 0);
1821
-
1822
- // Should use different encoding
1823
- ++file_size_;
1824
- SetupStableBase();
1825
- CacheKey ck3 = WithOffset(0);
1826
- ASSERT_GT(CountBitsDifferent(ck2, ck3), 0);
1811
+ namespace {
1812
+ // Deconstruct cache key, based on knowledge of implementation details.
1813
+ void DeconstructNonemptyCacheKey(const CacheKey& key, uint64_t* file_num_etc64,
1814
+ uint64_t* offset_etc64) {
1815
+ *file_num_etc64 = *reinterpret_cast<const uint64_t*>(key.AsSlice().data());
1816
+ *offset_etc64 = *reinterpret_cast<const uint64_t*>(key.AsSlice().data() + 8);
1817
+ assert(*file_num_etc64 != 0);
1818
+ if (*offset_etc64 == 0) {
1819
+ std::swap(*file_num_etc64, *offset_etc64);
1820
+ }
1821
+ assert(*offset_etc64 != 0);
1827
1822
  }
1828
1823
 
1829
- TEST_F(CacheKeyTest, Encodings) {
1830
- // Claim from cache_key.cc:
1831
- // In fact, if our SST files are all < 4TB (see
1832
- // BlockBasedTable::kMaxFileSizeStandardEncoding), then SST files generated
1833
- // in a single process are guaranteed to have unique cache keys, unless/until
1834
- // number session ids * max file number = 2**86, e.g. 1 trillion DB::Open in
1835
- // a single process and 64 trillion files generated.
1836
-
1837
- // We can generalize that. For
1838
- // * z bits in maximum file size
1839
- // * n bits in maximum file number
1840
- // * s bits in maximum session counter
1841
- // uniqueness is guaranteed at least when all of these hold:
1842
- // * z + n + s <= 121 (128 - 2 meta + 2 offset trim - (8-1) byte granularity
1843
- // in encoding)
1844
- // * n + s <= 86 (encoding limitation)
1845
- // * s <= 62 (because of 2-bit metadata)
1846
-
1847
- // We can verify this indirectly by how input bits get into the cache key,
1848
- // but we have to be mindful that for sufficiently large file sizes,
1849
- // different encodings might be used. But for cases mixing large and small
1850
- // files, we have to verify uniqueness between encodings.
1851
-
1852
- // Going through all combinations would be a little expensive, so we test
1853
- // only one random "stripe" of the configuration space per run.
1854
- constexpr uint32_t kStripeBits = 8;
1855
- constexpr uint32_t kStripeMask = (uint32_t{1} << kStripeBits) - 1;
1856
-
1857
- // Also cycle through stripes on repeated runs (not thread safe)
1858
- static uint32_t stripe =
1859
- static_cast<uint32_t>(std::random_device{}()) & kStripeMask;
1860
- stripe = (stripe + 1) & kStripeMask;
1861
-
1862
- fprintf(stderr, "%u\n", stripe);
1863
-
1864
- // We are going to randomly initialize some values which *should* not affect
1865
- // result
1866
- Random64 r{std::random_device{}()};
1824
+ // Make a bit mask of 0 to 64 bits
1825
+ uint64_t MakeMask64(int bits) {
1826
+ if (bits >= 64) {
1827
+ return uint64_t{0} - 1;
1828
+ } else {
1829
+ return (uint64_t{1} << bits) - 1;
1830
+ }
1831
+ }
1867
1832
 
1868
- int max_num_encodings = 0;
1869
- uint32_t config_num = 0;
1870
- uint32_t session_counter_bits, file_number_bits, max_file_size_bits;
1871
-
1872
- // Inner loop body, used later in a loop over configurations
1873
- auto TestConfig = [&]() {
1874
- base_session_upper_ = r.Next();
1875
- base_session_lower_ = r.Next();
1876
- session_counter_ = r.Next();
1877
- if (session_counter_bits < 64) {
1878
- // Avoid shifting UB
1879
- session_counter_ = session_counter_ >> 1 >> (63 - session_counter_bits);
1880
- }
1881
- file_number_ = r.Next() >> (64 - file_number_bits);
1882
- // Need two bits set to avoid temporary zero below
1883
- if (BitsSetToOne(file_number_) < 2) {
1884
- file_number_ = 3;
1885
- }
1886
- db_id_ = r.Next();
1887
-
1888
- // Work-around clang-analyzer which thinks empty last_base is garbage
1889
- CacheKey last_base = CacheKey::CreateUniqueForProcessLifetime();
1890
-
1891
- std::unordered_set<std::string> seen;
1892
- int num_encodings = 0;
1893
-
1894
- // Loop over encodings by increasing file size bits
1895
- for (uint32_t file_size_bits = 1; file_size_bits <= max_file_size_bits;
1896
- ++file_size_bits) {
1897
- file_size_ = uint64_t{1} << (file_size_bits - 1);
1898
- SetupStableBase();
1899
- CacheKey new_base = WithOffset(0);
1900
- if (CountBitsDifferent(last_base, new_base) == 0) {
1901
- // Same as previous encoding
1902
- continue;
1903
- }
1833
+ // See CacheKeyTest::Encodings
1834
+ struct CacheKeyDecoder {
1835
+ // Inputs
1836
+ uint64_t base_file_num_etc64, base_offset_etc64;
1837
+ int session_counter_bits, file_number_bits, offset_bits;
1838
+
1839
+ // Derived
1840
+ uint64_t session_counter_mask, file_number_mask, offset_mask;
1841
+
1842
+ // Outputs
1843
+ uint64_t decoded_session_counter, decoded_file_num, decoded_offset;
1904
1844
 
1905
- // New encoding
1906
- ++num_encodings;
1907
- ASSERT_TRUE(seen.insert(new_base.AsSlice().ToString()).second);
1908
- last_base = new_base;
1909
- for (uint32_t i = 0; i < file_size_bits; ++i) {
1910
- CacheKey ck = WithOffset(uint64_t{1} << i);
1911
- if (i < 2) {
1912
- // These cases are not relevant and optimized by dropping two
1913
- // lowest bits because there's always at least 5 bytes between
1914
- // blocks.
1915
- ASSERT_EQ(CountBitsDifferent(ck, new_base), 0);
1916
- } else {
1917
- // Normal case
1918
- // 1 bit different from base and never been seen implies the bit
1919
- // is encoded into cache key without overlapping other structured
1920
- // data.
1921
- ASSERT_EQ(CountBitsDifferent(ck, new_base), 1);
1922
- ASSERT_TRUE(seen.insert(ck.AsSlice().ToString()).second);
1845
+ void SetBaseCacheKey(const CacheKey& base) {
1846
+ DeconstructNonemptyCacheKey(base, &base_file_num_etc64, &base_offset_etc64);
1847
+ }
1848
+
1849
+ void SetRanges(int _session_counter_bits, int _file_number_bits,
1850
+ int _offset_bits) {
1851
+ session_counter_bits = _session_counter_bits;
1852
+ session_counter_mask = MakeMask64(session_counter_bits);
1853
+ file_number_bits = _file_number_bits;
1854
+ file_number_mask = MakeMask64(file_number_bits);
1855
+ offset_bits = _offset_bits;
1856
+ offset_mask = MakeMask64(offset_bits);
1857
+ }
1858
+
1859
+ void Decode(const CacheKey& key) {
1860
+ uint64_t file_num_etc64, offset_etc64;
1861
+ DeconstructNonemptyCacheKey(key, &file_num_etc64, &offset_etc64);
1862
+
1863
+ // First decode session counter
1864
+ if (offset_bits + session_counter_bits <= 64) {
1865
+ // fully recoverable from offset_etc64
1866
+ decoded_session_counter =
1867
+ ReverseBits((offset_etc64 ^ base_offset_etc64)) &
1868
+ session_counter_mask;
1869
+ } else if (file_number_bits + session_counter_bits <= 64) {
1870
+ // fully recoverable from file_num_etc64
1871
+ decoded_session_counter = DownwardInvolution(
1872
+ (file_num_etc64 ^ base_file_num_etc64) & session_counter_mask);
1873
+ } else {
1874
+ // Need to combine parts from each word.
1875
+ // Piece1 will contain some correct prefix of the bottom bits of
1876
+ // session counter.
1877
+ uint64_t piece1 =
1878
+ ReverseBits((offset_etc64 ^ base_offset_etc64) & ~offset_mask);
1879
+ int piece1_bits = 64 - offset_bits;
1880
+ // Piece2 will contain involuded bits that we can combine with piece1
1881
+ // to infer rest of session counter
1882
+ int piece2_bits = std::min(64 - file_number_bits, 64 - piece1_bits);
1883
+ ASSERT_LT(piece2_bits, 64);
1884
+ uint64_t piece2_mask = MakeMask64(piece2_bits);
1885
+ uint64_t piece2 = (file_num_etc64 ^ base_file_num_etc64) & piece2_mask;
1886
+
1887
+ // Cancel out the part of piece2 that we can infer from piece1
1888
+ // (DownwardInvolution distributes over xor)
1889
+ piece2 ^= DownwardInvolution(piece1) & piece2_mask;
1890
+
1891
+ // Now we need to solve for the unknown original bits in higher
1892
+ // positions than piece1 provides. We use Gaussian elimination
1893
+ // because we know that a piece2_bits X piece2_bits submatrix of
1894
+ // the matrix underlying DownwardInvolution times the vector of
1895
+ // unknown original bits equals piece2.
1896
+ //
1897
+ // Build an augmented row matrix for that submatrix, built column by
1898
+ // column.
1899
+ std::array<uint64_t, 64> aug_rows{};
1900
+ for (int i = 0; i < piece2_bits; ++i) { // over columns
1901
+ uint64_t col_i = DownwardInvolution(uint64_t{1} << piece1_bits << i);
1902
+ ASSERT_NE(col_i & 1U, 0);
1903
+ for (int j = 0; j < piece2_bits; ++j) { // over rows
1904
+ aug_rows[j] |= (col_i & 1U) << i;
1905
+ col_i >>= 1;
1923
1906
  }
1924
1907
  }
1925
- for (uint32_t i = 0; i < session_counter_bits; ++i) {
1926
- SaveAndRestore<uint64_t> tmp(&session_counter_,
1927
- session_counter_ ^ (uint64_t{1} << i));
1928
- SetupStableBase();
1929
- CacheKey ck = WithOffset(0);
1930
- ASSERT_EQ(CountBitsDifferent(ck, new_base), 1);
1931
- ASSERT_TRUE(seen.insert(ck.AsSlice().ToString()).second);
1908
+ // Augment with right hand side
1909
+ for (int j = 0; j < piece2_bits; ++j) { // over rows
1910
+ aug_rows[j] |= (piece2 & 1U) << piece2_bits;
1911
+ piece2 >>= 1;
1932
1912
  }
1933
- for (uint32_t i = 0; i < file_number_bits; ++i) {
1934
- SaveAndRestore<uint64_t> tmp(&file_number_,
1935
- file_number_ ^ (uint64_t{1} << i));
1936
- SetupStableBase();
1937
- CacheKey ck = WithOffset(0);
1938
- ASSERT_EQ(CountBitsDifferent(ck, new_base), 1);
1939
- ASSERT_TRUE(seen.insert(ck.AsSlice().ToString()).second);
1913
+ // Run Gaussian elimination
1914
+ for (int i = 0; i < piece2_bits; ++i) { // over columns
1915
+ // Find a row that can be used to cancel others
1916
+ uint64_t canceller = 0;
1917
+ // Note: Rows 0 through i-1 contain 1s in columns already eliminated
1918
+ for (int j = i; j < piece2_bits; ++j) { // over rows
1919
+ if (aug_rows[j] & (uint64_t{1} << i)) {
1920
+ // Swap into appropriate row
1921
+ std::swap(aug_rows[i], aug_rows[j]);
1922
+ // Keep a handy copy for row reductions
1923
+ canceller = aug_rows[i];
1924
+ break;
1925
+ }
1926
+ }
1927
+ ASSERT_NE(canceller, 0);
1928
+ for (int j = 0; j < piece2_bits; ++j) { // over rows
1929
+ if (i != j && ((aug_rows[j] >> i) & 1) != 0) {
1930
+ // Row reduction
1931
+ aug_rows[j] ^= canceller;
1932
+ }
1933
+ }
1934
+ }
1935
+ // Extract result
1936
+ decoded_session_counter = piece1;
1937
+ for (int j = 0; j < piece2_bits; ++j) { // over rows
1938
+ ASSERT_EQ(aug_rows[j] & piece2_mask, uint64_t{1} << j);
1939
+ decoded_session_counter |= aug_rows[j] >> piece2_bits << piece1_bits
1940
+ << j;
1940
1941
  }
1941
- max_num_encodings = std::max(max_num_encodings, num_encodings);
1942
1942
  }
1943
- };
1944
1943
 
1945
- // Loop over configurations and test those in stripe
1946
- for (session_counter_bits = 0; session_counter_bits <= 62;
1944
+ decoded_offset =
1945
+ offset_etc64 ^ base_offset_etc64 ^ ReverseBits(decoded_session_counter);
1946
+
1947
+ decoded_file_num = ReverseBits(file_num_etc64 ^ base_file_num_etc64 ^
1948
+ DownwardInvolution(decoded_session_counter));
1949
+ }
1950
+ };
1951
+ } // namespace
1952
+
1953
+ TEST_F(CacheKeyTest, Encodings) {
1954
+ // This test primarily verifies this claim from cache_key.cc:
1955
+ // // In fact, if DB ids were not involved, we would be guaranteed unique
1956
+ // // cache keys for files generated in a single process until total bits for
1957
+ // // biggest session_id_counter, orig_file_number, and offset_in_file
1958
+ // // reach 128 bits.
1959
+ //
1960
+ // To demonstrate this, CacheKeyDecoder can reconstruct the structured inputs
1961
+ // to the cache key when provided an output cache key, the unstructured
1962
+ // inputs, and bounds on the structured inputs.
1963
+ //
1964
+ // See OffsetableCacheKey comments in cache_key.cc.
1965
+
1966
+ // We are going to randomly initialize some values that *should* not affect
1967
+ // result
1968
+ Random64 r{std::random_device{}()};
1969
+
1970
+ CacheKeyDecoder decoder;
1971
+ db_id_ = r.Next();
1972
+ base_session_upper_ = r.Next();
1973
+ base_session_lower_ = r.Next();
1974
+ if (base_session_lower_ == 0) {
1975
+ base_session_lower_ = 1;
1976
+ }
1977
+
1978
+ decoder.SetBaseCacheKey(GetBaseCacheKey());
1979
+
1980
+ // Loop over configurations and test those
1981
+ for (int session_counter_bits = 0; session_counter_bits <= 64;
1947
1982
  ++session_counter_bits) {
1948
- uint32_t max_file_number_bits =
1949
- std::min(uint32_t{64}, uint32_t{86} - session_counter_bits);
1950
- // Start with 2 to avoid file_number_ == 0 in testing
1951
- for (file_number_bits = 2; file_number_bits <= max_file_number_bits;
1952
- ++file_number_bits) {
1953
- uint32_t max_max_file_size_bits =
1954
- std::min(uint32_t{64},
1955
- uint32_t{121} - file_number_bits - session_counter_bits);
1956
- for (max_file_size_bits = 1; max_file_size_bits <= max_max_file_size_bits;
1957
- ++max_file_size_bits) {
1958
- if ((config_num++ & kStripeMask) == stripe) {
1959
- TestConfig();
1983
+ for (int file_number_bits = 1; file_number_bits <= 64; ++file_number_bits) {
1984
+ // 62 bits max because unoptimized offset will be 64 bits in that case
1985
+ for (int offset_bits = 0; offset_bits <= 62; ++offset_bits) {
1986
+ if (session_counter_bits + file_number_bits + offset_bits > 128) {
1987
+ break;
1988
+ }
1989
+
1990
+ decoder.SetRanges(session_counter_bits, file_number_bits, offset_bits);
1991
+
1992
+ uint64_t session_counter = r.Next() & decoder.session_counter_mask;
1993
+ uint64_t file_number = r.Next() & decoder.file_number_mask;
1994
+ if (file_number == 0) {
1995
+ // Minimum
1996
+ file_number = 1;
1960
1997
  }
1998
+ uint64_t offset = r.Next() & decoder.offset_mask;
1999
+ decoder.Decode(GetCacheKey(session_counter, file_number, offset));
2000
+
2001
+ EXPECT_EQ(decoder.decoded_session_counter, session_counter);
2002
+ EXPECT_EQ(decoder.decoded_file_num, file_number);
2003
+ EXPECT_EQ(decoder.decoded_offset, offset);
1961
2004
  }
1962
2005
  }
1963
2006
  }
1964
-
1965
- // Make sure the current implementation is exercised
1966
- ASSERT_EQ(max_num_encodings, 4);
1967
2007
  }
1968
2008
 
1969
2009
  INSTANTIATE_TEST_CASE_P(DBBlockCacheKeyTest, DBBlockCacheKeyTest,