tigerbeetle-node 0.11.13 → 0.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. package/README.md +5 -10
  2. package/dist/bin/aarch64-linux-gnu/client.node +0 -0
  3. package/dist/bin/aarch64-linux-musl/client.node +0 -0
  4. package/dist/bin/aarch64-macos/client.node +0 -0
  5. package/dist/bin/x86_64-linux-gnu/client.node +0 -0
  6. package/dist/bin/x86_64-linux-musl/client.node +0 -0
  7. package/dist/bin/x86_64-macos/client.node +0 -0
  8. package/dist/index.js +33 -1
  9. package/dist/index.js.map +1 -1
  10. package/package-lock.json +66 -0
  11. package/package.json +6 -16
  12. package/src/index.ts +56 -1
  13. package/src/node.zig +9 -9
  14. package/dist/.client.node.sha256 +0 -1
  15. package/scripts/build_lib.sh +0 -61
  16. package/scripts/download_node_headers.sh +0 -32
  17. package/src/tigerbeetle/scripts/benchmark.bat +0 -55
  18. package/src/tigerbeetle/scripts/benchmark.sh +0 -66
  19. package/src/tigerbeetle/scripts/confirm_image.sh +0 -44
  20. package/src/tigerbeetle/scripts/fail_on_diff.sh +0 -9
  21. package/src/tigerbeetle/scripts/fuzz_loop.sh +0 -15
  22. package/src/tigerbeetle/scripts/fuzz_loop_hash_log.sh +0 -12
  23. package/src/tigerbeetle/scripts/fuzz_unique_errors.sh +0 -7
  24. package/src/tigerbeetle/scripts/install.bat +0 -7
  25. package/src/tigerbeetle/scripts/install.sh +0 -21
  26. package/src/tigerbeetle/scripts/install_zig.bat +0 -113
  27. package/src/tigerbeetle/scripts/install_zig.sh +0 -90
  28. package/src/tigerbeetle/scripts/lint.zig +0 -199
  29. package/src/tigerbeetle/scripts/pre-commit.sh +0 -9
  30. package/src/tigerbeetle/scripts/scripts/benchmark.bat +0 -55
  31. package/src/tigerbeetle/scripts/scripts/benchmark.sh +0 -66
  32. package/src/tigerbeetle/scripts/scripts/confirm_image.sh +0 -44
  33. package/src/tigerbeetle/scripts/scripts/fail_on_diff.sh +0 -9
  34. package/src/tigerbeetle/scripts/scripts/fuzz_loop.sh +0 -15
  35. package/src/tigerbeetle/scripts/scripts/fuzz_loop_hash_log.sh +0 -12
  36. package/src/tigerbeetle/scripts/scripts/fuzz_unique_errors.sh +0 -7
  37. package/src/tigerbeetle/scripts/scripts/install.bat +0 -7
  38. package/src/tigerbeetle/scripts/scripts/install.sh +0 -21
  39. package/src/tigerbeetle/scripts/scripts/install_zig.bat +0 -113
  40. package/src/tigerbeetle/scripts/scripts/install_zig.sh +0 -90
  41. package/src/tigerbeetle/scripts/scripts/lint.zig +0 -199
  42. package/src/tigerbeetle/scripts/scripts/pre-commit.sh +0 -9
  43. package/src/tigerbeetle/scripts/scripts/shellcheck.sh +0 -5
  44. package/src/tigerbeetle/scripts/scripts/tests_on_alpine.sh +0 -10
  45. package/src/tigerbeetle/scripts/scripts/tests_on_ubuntu.sh +0 -14
  46. package/src/tigerbeetle/scripts/scripts/upgrade_ubuntu_kernel.sh +0 -48
  47. package/src/tigerbeetle/scripts/scripts/validate_docs.sh +0 -23
  48. package/src/tigerbeetle/scripts/scripts/vr_state_enumerate +0 -46
  49. package/src/tigerbeetle/scripts/shellcheck.sh +0 -5
  50. package/src/tigerbeetle/scripts/tests_on_alpine.sh +0 -10
  51. package/src/tigerbeetle/scripts/tests_on_ubuntu.sh +0 -14
  52. package/src/tigerbeetle/scripts/upgrade_ubuntu_kernel.sh +0 -48
  53. package/src/tigerbeetle/scripts/validate_docs.sh +0 -23
  54. package/src/tigerbeetle/scripts/vr_state_enumerate +0 -46
  55. package/src/tigerbeetle/src/benchmark.zig +0 -336
  56. package/src/tigerbeetle/src/config.zig +0 -233
  57. package/src/tigerbeetle/src/constants.zig +0 -428
  58. package/src/tigerbeetle/src/ewah.zig +0 -286
  59. package/src/tigerbeetle/src/ewah_benchmark.zig +0 -120
  60. package/src/tigerbeetle/src/ewah_fuzz.zig +0 -130
  61. package/src/tigerbeetle/src/fifo.zig +0 -120
  62. package/src/tigerbeetle/src/io/benchmark.zig +0 -213
  63. package/src/tigerbeetle/src/io/darwin.zig +0 -814
  64. package/src/tigerbeetle/src/io/linux.zig +0 -1071
  65. package/src/tigerbeetle/src/io/test.zig +0 -643
  66. package/src/tigerbeetle/src/io/windows.zig +0 -1183
  67. package/src/tigerbeetle/src/io.zig +0 -34
  68. package/src/tigerbeetle/src/iops.zig +0 -107
  69. package/src/tigerbeetle/src/lsm/README.md +0 -308
  70. package/src/tigerbeetle/src/lsm/binary_search.zig +0 -341
  71. package/src/tigerbeetle/src/lsm/bloom_filter.zig +0 -125
  72. package/src/tigerbeetle/src/lsm/compaction.zig +0 -603
  73. package/src/tigerbeetle/src/lsm/composite_key.zig +0 -77
  74. package/src/tigerbeetle/src/lsm/direction.zig +0 -11
  75. package/src/tigerbeetle/src/lsm/eytzinger.zig +0 -587
  76. package/src/tigerbeetle/src/lsm/eytzinger_benchmark.zig +0 -330
  77. package/src/tigerbeetle/src/lsm/forest.zig +0 -205
  78. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +0 -450
  79. package/src/tigerbeetle/src/lsm/grid.zig +0 -573
  80. package/src/tigerbeetle/src/lsm/groove.zig +0 -1036
  81. package/src/tigerbeetle/src/lsm/k_way_merge.zig +0 -474
  82. package/src/tigerbeetle/src/lsm/level_iterator.zig +0 -332
  83. package/src/tigerbeetle/src/lsm/manifest.zig +0 -617
  84. package/src/tigerbeetle/src/lsm/manifest_level.zig +0 -878
  85. package/src/tigerbeetle/src/lsm/manifest_log.zig +0 -789
  86. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +0 -691
  87. package/src/tigerbeetle/src/lsm/merge_iterator.zig +0 -106
  88. package/src/tigerbeetle/src/lsm/node_pool.zig +0 -235
  89. package/src/tigerbeetle/src/lsm/posted_groove.zig +0 -381
  90. package/src/tigerbeetle/src/lsm/segmented_array.zig +0 -1329
  91. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +0 -148
  92. package/src/tigerbeetle/src/lsm/segmented_array_fuzz.zig +0 -9
  93. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +0 -850
  94. package/src/tigerbeetle/src/lsm/table.zig +0 -1009
  95. package/src/tigerbeetle/src/lsm/table_immutable.zig +0 -192
  96. package/src/tigerbeetle/src/lsm/table_iterator.zig +0 -340
  97. package/src/tigerbeetle/src/lsm/table_mutable.zig +0 -203
  98. package/src/tigerbeetle/src/lsm/test.zig +0 -439
  99. package/src/tigerbeetle/src/lsm/tree.zig +0 -1169
  100. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +0 -479
  101. package/src/tigerbeetle/src/message_bus.zig +0 -1013
  102. package/src/tigerbeetle/src/message_pool.zig +0 -156
  103. package/src/tigerbeetle/src/ring_buffer.zig +0 -399
  104. package/src/tigerbeetle/src/simulator.zig +0 -580
  105. package/src/tigerbeetle/src/state_machine/auditor.zig +0 -578
  106. package/src/tigerbeetle/src/state_machine/workload.zig +0 -883
  107. package/src/tigerbeetle/src/state_machine.zig +0 -2099
  108. package/src/tigerbeetle/src/static_allocator.zig +0 -65
  109. package/src/tigerbeetle/src/stdx.zig +0 -171
  110. package/src/tigerbeetle/src/storage.zig +0 -393
  111. package/src/tigerbeetle/src/testing/cluster/message_bus.zig +0 -82
  112. package/src/tigerbeetle/src/testing/cluster/network.zig +0 -237
  113. package/src/tigerbeetle/src/testing/cluster/state_checker.zig +0 -169
  114. package/src/tigerbeetle/src/testing/cluster/storage_checker.zig +0 -202
  115. package/src/tigerbeetle/src/testing/cluster.zig +0 -444
  116. package/src/tigerbeetle/src/testing/fuzz.zig +0 -140
  117. package/src/tigerbeetle/src/testing/hash_log.zig +0 -66
  118. package/src/tigerbeetle/src/testing/id.zig +0 -99
  119. package/src/tigerbeetle/src/testing/packet_simulator.zig +0 -374
  120. package/src/tigerbeetle/src/testing/priority_queue.zig +0 -645
  121. package/src/tigerbeetle/src/testing/reply_sequence.zig +0 -139
  122. package/src/tigerbeetle/src/testing/state_machine.zig +0 -250
  123. package/src/tigerbeetle/src/testing/storage.zig +0 -757
  124. package/src/tigerbeetle/src/testing/table.zig +0 -247
  125. package/src/tigerbeetle/src/testing/time.zig +0 -84
  126. package/src/tigerbeetle/src/tigerbeetle.zig +0 -227
  127. package/src/tigerbeetle/src/time.zig +0 -112
  128. package/src/tigerbeetle/src/tracer.zig +0 -529
  129. package/src/tigerbeetle/src/unit_tests.zig +0 -40
  130. package/src/tigerbeetle/src/vopr.zig +0 -495
  131. package/src/tigerbeetle/src/vsr/README.md +0 -209
  132. package/src/tigerbeetle/src/vsr/client.zig +0 -544
  133. package/src/tigerbeetle/src/vsr/clock.zig +0 -855
  134. package/src/tigerbeetle/src/vsr/journal.zig +0 -2415
  135. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +0 -111
  136. package/src/tigerbeetle/src/vsr/marzullo.zig +0 -309
  137. package/src/tigerbeetle/src/vsr/replica.zig +0 -6616
  138. package/src/tigerbeetle/src/vsr/replica_format.zig +0 -219
  139. package/src/tigerbeetle/src/vsr/superblock.zig +0 -1631
  140. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +0 -256
  141. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +0 -929
  142. package/src/tigerbeetle/src/vsr/superblock_free_set_fuzz.zig +0 -334
  143. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +0 -390
  144. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +0 -615
  145. package/src/tigerbeetle/src/vsr/superblock_quorums.zig +0 -394
  146. package/src/tigerbeetle/src/vsr/superblock_quorums_fuzz.zig +0 -314
  147. package/src/tigerbeetle/src/vsr.zig +0 -1425
@@ -1,1009 +0,0 @@
1
- const std = @import("std");
2
- const mem = std.mem;
3
- const math = std.math;
4
- const assert = std.debug.assert;
5
-
6
- const constants = @import("../constants.zig");
7
- const vsr = @import("../vsr.zig");
8
- const binary_search = @import("binary_search.zig");
9
- const bloom_filter = @import("bloom_filter.zig");
10
-
11
- const stdx = @import("../stdx.zig");
12
- const div_ceil = stdx.div_ceil;
13
- const eytzinger = @import("eytzinger.zig").eytzinger;
14
- const snapshot_latest = @import("tree.zig").snapshot_latest;
15
-
16
- const BlockType = @import("grid.zig").BlockType;
17
- const alloc_block = @import("grid.zig").alloc_block;
18
- const TableInfoType = @import("manifest.zig").TableInfoType;
19
-
20
- pub const TableUsage = enum {
21
- /// General purpose table.
22
- general,
23
- /// If your usage fits this pattern:
24
- /// * Only put keys which are not present.
25
- /// * Only remove keys which are present.
26
- /// * TableKey == TableValue (modulo padding, eg CompositeKey)
27
- /// Then we can unlock additional optimizations.
28
- secondary_index,
29
- };
30
-
31
- /// A table is a set of blocks:
32
- ///
33
- /// * Index block (exactly 1)
34
- /// * Filter blocks (at least one, at most `filter_block_count_max`)
35
- /// Each filter block summarizes the keys for several adjacent (in terms of key) data blocks.
36
- /// * Data blocks (at least one, at most `data_block_count_max`)
37
- /// Store the actual keys/values, along with a small index of the keys to optimize lookups.
38
- ///
39
- ///
40
- /// Every block begins with a `vsr.Header` that includes:
41
- ///
42
- /// * `checksum`, `checksum_body` verify the data integrity.
43
- /// * `cluster` is the cluster id.
44
- /// * `command` is `.block`.
45
- /// * `op` is the block address.
46
- /// * `size` is the block size excluding padding.
47
- ///
48
- /// Index block schema:
49
- /// │ vsr.Header │ operation=BlockType.index
50
- /// │ vsr.Header │ commit=filter_block_count,
51
- /// │ │ request=data_block_count,
52
- /// │ │ timestamp=snapshot_min
53
- /// │ [filter_block_count_max]u128 │ checksums of filter blocks
54
- /// │ [data_block_count_max]u128 │ checksums of data blocks
55
- /// │ [data_block_count_max]Key │ the maximum/last key in the respective data block
56
- /// │ [filter_block_count_max]u64 │ addresses of filter blocks
57
- /// │ [data_block_count_max]u64 │ addresses of data blocks
58
- /// │ […]u8{0} │ padding (to end of block)
59
- ///
60
- /// Filter block schema:
61
- /// │ vsr.Header │ operation=BlockType.filter
62
- /// │ […]u8 │ A split-block Bloom filter, "containing" every key from as many as
63
- /// │ │ `filter_data_block_count_max` data blocks.
64
- ///
65
- /// Data block schema:
66
- /// │ vsr.Header │ operation=BlockType.data
67
- /// │ [block_key_count + 1]Key │ Eytzinger-layout keys from a subset of the values.
68
- /// │ [≤value_count_max]Value │ At least one value (no empty tables).
69
- /// │ […]u8{0} │ padding (to end of block)
70
- pub fn TableType(
71
- comptime TableKey: type,
72
- comptime TableValue: type,
73
- /// Returns the sort order between two keys.
74
- comptime table_compare_keys: fn (TableKey, TableKey) callconv(.Inline) math.Order,
75
- /// Returns the key for a value. For example, given `object` returns `object.id`.
76
- /// Since most objects contain an id, this avoids duplicating the key when storing the value.
77
- comptime table_key_from_value: fn (*const TableValue) callconv(.Inline) TableKey,
78
- /// Must compare greater than all other keys.
79
- comptime table_sentinel_key: TableKey,
80
- /// Returns whether a value is a tombstone value.
81
- comptime table_tombstone: fn (*const TableValue) callconv(.Inline) bool,
82
- /// Returns a tombstone value representation for a key.
83
- comptime table_tombstone_from_key: fn (TableKey) callconv(.Inline) TableValue,
84
- /// The maximum number of values per table.
85
- comptime table_value_count_max: usize,
86
- comptime usage: TableUsage,
87
- ) type {
88
- return struct {
89
- const Table = @This();
90
-
91
- // Re-export all the generic arguments.
92
- pub const Key = TableKey;
93
- pub const Value = TableValue;
94
- pub const compare_keys = table_compare_keys;
95
- pub const key_from_value = table_key_from_value;
96
- pub const sentinel_key = table_sentinel_key;
97
- pub const tombstone = table_tombstone;
98
- pub const tombstone_from_key = table_tombstone_from_key;
99
- pub const value_count_max = table_value_count_max;
100
- pub const usage = usage;
101
-
102
- // Export hashmap context for Key and Value
103
- pub const HashMapContextValue = struct {
104
- pub fn eql(_: HashMapContextValue, a: Value, b: Value) bool {
105
- return compare_keys(key_from_value(&a), key_from_value(&b)) == .eq;
106
- }
107
-
108
- pub fn hash(_: HashMapContextValue, value: Value) u64 {
109
- // TODO(King): this erros out with "unable to hash type void" due to
110
- // CompositeKey(T) struct containing .padding which may be void at comptime.
111
- const key = key_from_value(&value);
112
- return std.hash_map.getAutoHashFn(Key, HashMapContextValue)(.{}, key);
113
- }
114
- };
115
-
116
- const block_size = constants.block_size;
117
- const BlockPtr = *align(constants.sector_size) [block_size]u8;
118
- const BlockPtrConst = *align(constants.sector_size) const [block_size]u8;
119
-
120
- pub const key_size = @sizeOf(Key);
121
- pub const value_size = @sizeOf(Value);
122
-
123
- comptime {
124
- assert(@alignOf(Key) == 8 or @alignOf(Key) == 16);
125
- // TODO(ifreund) What are our alignment expectations for Value?
126
-
127
- // There must be no padding in the Key/Value types to avoid buffer bleeds.
128
- assert(@bitSizeOf(Key) == @sizeOf(Key) * 8);
129
- assert(@bitSizeOf(Value) == @sizeOf(Value) * 8);
130
-
131
- // These impact our calculation of:
132
- // * the superblock trailer size, and
133
- // * the manifest log layout for alignment.
134
- assert(key_size >= 8);
135
- assert(key_size <= 32);
136
- assert(key_size == 8 or key_size == 16 or key_size == 24 or key_size == 32);
137
- }
138
-
139
- const address_size = @sizeOf(u64);
140
- const checksum_size = @sizeOf(u128);
141
- const block_body_size = block_size - @sizeOf(vsr.Header);
142
-
143
- pub const layout = layout: {
144
- @setEvalBranchQuota(10_000);
145
-
146
- assert(block_size % constants.sector_size == 0);
147
- assert(math.isPowerOfTwo(block_size));
148
-
149
- // Searching the values array is more expensive than searching the per-block index
150
- // as the larger values size leads to more cache misses. We can therefore speed
151
- // up lookups by making the per block index larger at the cost of reducing the
152
- // number of values that may be stored per block.
153
- //
154
- // X = values per block
155
- // Y = keys per block
156
- //
157
- // R = constants.lsm_value_to_key_layout_ratio_min
158
- //
159
- // To maximize:
160
- // Y
161
- // Given constraints:
162
- // body >= X * value_size + Y * key_size
163
- // (X * value_size) / (Y * key_size) >= R
164
- // X >= Y
165
- //
166
- // Plots of above constraints:
167
- // https://www.desmos.com/calculator/elqqaalgbc
168
- //
169
- // body - X * value_size = Y * key_size
170
- // Y = (body - X * value_size) / key_size
171
- //
172
- // (X * value_size) / (body - X * value_size) = R
173
- // (X * value_size) = R * body - R * X * value_size
174
- // (R + 1) * X * value_size = R * body
175
- // X = R * body / ((R + 1)* value_size)
176
- //
177
- // Y = (body - (R * body / ((R + 1) * value_size)) * value_size) / key_size
178
- // Y = (body - (R / (R + 1)) * body) / key_size
179
- // Y = body / ((R + 1) * key_size)
180
- var block_keys_layout_count = math.min(
181
- block_body_size / ((constants.lsm_value_to_key_layout_ratio_min + 1) * key_size),
182
- block_body_size / (value_size + key_size),
183
- );
184
-
185
- // Round to the next lowest power of two. This speeds up lookups in the Eytzinger
186
- // layout and should help ensure better alignment for the following values.
187
- // We could round to the nearest power of two, but then we would need
188
- // care to avoid breaking e.g. the X >= Y invariant above.
189
- block_keys_layout_count = math.floorPowerOfTwo(u64, block_keys_layout_count);
190
-
191
- // If the index is smaller than 16 keys then there are key sizes >= 4 such that
192
- // the total index size is not 64 byte cache line aligned.
193
- assert(@sizeOf(Key) >= 4);
194
- assert(@sizeOf(Key) % 4 == 0);
195
- if (block_keys_layout_count < @divExact(constants.cache_line_size, 4)) {
196
- block_keys_layout_count = 0;
197
- }
198
- assert((block_keys_layout_count * key_size) % constants.cache_line_size == 0);
199
-
200
- const block_key_layout_size = block_keys_layout_count * key_size;
201
- const block_key_count =
202
- if (block_keys_layout_count == 0) 0 else block_keys_layout_count - 1;
203
-
204
- const block_value_count_max = @divFloor(
205
- block_body_size - block_key_layout_size,
206
- value_size,
207
- );
208
-
209
- // TODO Audit/tune this number for split block bloom filters:
210
- const filter_bytes_per_key = 2;
211
- const filter_data_block_count_max = @divFloor(
212
- block_body_size,
213
- block_value_count_max * filter_bytes_per_key,
214
- );
215
-
216
- // We need enough blocks to hold `value_count_max` values.
217
- const data_blocks = div_ceil(value_count_max, block_value_count_max);
218
- const filter_blocks = div_ceil(data_blocks, filter_data_block_count_max);
219
-
220
- break :layout .{
221
- // The number of keys in the Eytzinger layout per data block.
222
- .block_key_count = block_key_count,
223
- // The number of bytes used by the keys in the data block.
224
- .block_key_layout_size = block_key_layout_size,
225
- // The maximum number of values in a data block.
226
- .block_value_count_max = block_value_count_max,
227
-
228
- .data_block_count_max = data_blocks,
229
- .filter_block_count_max = filter_blocks,
230
-
231
- // The number of data blocks covered by a single filter block.
232
- .filter_data_block_count_max = std.math.min(
233
- filter_data_block_count_max,
234
- data_blocks,
235
- ),
236
- };
237
- };
238
-
239
- const index_block_count = 1;
240
- pub const filter_block_count_max = layout.filter_block_count_max;
241
- pub const data_block_count_max = layout.data_block_count_max;
242
- pub const block_count_max =
243
- index_block_count + filter_block_count_max + data_block_count_max;
244
-
245
- const index = struct {
246
- const size = @sizeOf(vsr.Header) + filter_checksums_size + data_checksums_size +
247
- keys_size + filter_addresses_size + data_addresses_size;
248
-
249
- const filter_checksums_offset = @sizeOf(vsr.Header);
250
- const filter_checksums_size = filter_block_count_max * checksum_size;
251
-
252
- const data_checksums_offset = filter_checksums_offset + filter_checksums_size;
253
- const data_checksums_size = data_block_count_max * checksum_size;
254
-
255
- const keys_offset = data_checksums_offset + data_checksums_size;
256
- const keys_size = data_block_count_max * key_size;
257
-
258
- const filter_addresses_offset = keys_offset + keys_size;
259
- const filter_addresses_size = filter_block_count_max * address_size;
260
-
261
- const data_addresses_offset = filter_addresses_offset + filter_addresses_size;
262
- const data_addresses_size = data_block_count_max * address_size;
263
-
264
- const padding_offset = data_addresses_offset + data_addresses_size;
265
- const padding_size = block_size - padding_offset;
266
- };
267
-
268
- pub const filter = struct {
269
- pub const data_block_count_max = layout.filter_data_block_count_max;
270
-
271
- const filter_offset = @sizeOf(vsr.Header);
272
- const filter_size = block_size - filter_offset;
273
-
274
- const padding_offset = filter_offset + filter_size;
275
- const padding_size = block_size - padding_offset;
276
- };
277
-
278
- pub const data = struct {
279
- const key_count = layout.block_key_count;
280
- pub const block_value_count_max = layout.block_value_count_max;
281
-
282
- const key_layout_offset = @sizeOf(vsr.Header);
283
- const key_layout_size = layout.block_key_layout_size;
284
-
285
- const values_offset = key_layout_offset + key_layout_size;
286
- const values_size = block_value_count_max * value_size;
287
-
288
- const padding_offset = values_offset + values_size;
289
- const padding_size = block_size - padding_offset;
290
- };
291
-
292
- const compile_log_layout = false;
293
- comptime {
294
- if (compile_log_layout) {
295
- @compileError(std.fmt.comptimePrint(
296
- \\
297
- \\
298
- \\lsm parameters:
299
- \\ value: {}
300
- \\ value count max: {}
301
- \\ key size: {}
302
- \\ value size: {}
303
- \\ block size: {}
304
- \\layout:
305
- \\ index block count: {}
306
- \\ filter block count max: {}
307
- \\ data block count max: {}
308
- \\index:
309
- \\ size: {}
310
- \\ filter_checksums_offset: {}
311
- \\ filter_checksums_size: {}
312
- \\ data_checksums_offset: {}
313
- \\ data_checksums_size: {}
314
- \\ keys_offset: {}
315
- \\ keys_size: {}
316
- \\ filter_addresses_offset: {}
317
- \\ filter_addresses_size: {}
318
- \\ data_addresses_offset: {}
319
- \\ data_addresses_size: {}
320
- \\filter:
321
- \\ data_block_count_max: {}
322
- \\ filter_offset: {}
323
- \\ filter_size: {}
324
- \\ padding_offset: {}
325
- \\ padding_size: {}
326
- \\data:
327
- \\ key_count: {}
328
- \\ value_count_max: {}
329
- \\ key_layout_offset: {}
330
- \\ key_layout_size: {}
331
- \\ values_offset: {}
332
- \\ values_size: {}
333
- \\ padding_offset: {}
334
- \\ padding_size: {}
335
- \\
336
- ,
337
- .{
338
- Value,
339
- value_count_max,
340
- key_size,
341
- value_size,
342
- block_size,
343
-
344
- index_block_count,
345
- filter_block_count_max,
346
- data_block_count_max,
347
-
348
- index.size,
349
- index.filter_checksums_offset,
350
- index.filter_checksums_size,
351
- index.data_checksums_offset,
352
- index.data_checksums_size,
353
- index.keys_offset,
354
- index.keys_size,
355
- index.filter_addresses_offset,
356
- index.filter_addresses_size,
357
- index.data_addresses_offset,
358
- index.data_addresses_size,
359
-
360
- filter.data_block_count_max,
361
- filter.filter_offset,
362
- filter.filter_size,
363
- filter.padding_offset,
364
- filter.padding_size,
365
-
366
- data.key_count,
367
- data.block_value_count_max,
368
- data.key_layout_offset,
369
- data.key_layout_size,
370
- data.values_offset,
371
- data.values_size,
372
- data.padding_offset,
373
- data.padding_size,
374
- },
375
- ));
376
- }
377
- }
378
-
379
- comptime {
380
- assert(index_block_count > 0);
381
- assert(filter_block_count_max > 0);
382
- assert(data_block_count_max > 0);
383
-
384
- assert(filter.data_block_count_max > 0);
385
- // There should not be more data blocks per filter block than there are data blocks:
386
- assert(filter.data_block_count_max <= data_block_count_max);
387
-
388
- const filter_bytes_per_key = 2;
389
- assert(filter_block_count_max * filter.filter_size >=
390
- data_block_count_max * data.block_value_count_max * filter_bytes_per_key);
391
-
392
- assert(index.size == @sizeOf(vsr.Header) +
393
- data_block_count_max * (key_size + address_size + checksum_size) +
394
- filter_block_count_max * (address_size + checksum_size));
395
- assert(index.size == index.data_addresses_offset + index.data_addresses_size);
396
- assert(index.size <= block_size);
397
- assert(index.keys_size > 0);
398
- assert(index.keys_size % key_size == 0);
399
- assert(@divExact(index.data_addresses_size, @sizeOf(u64)) == data_block_count_max);
400
- assert(@divExact(index.filter_addresses_size, @sizeOf(u64)) == filter_block_count_max);
401
- assert(@divExact(index.data_checksums_size, @sizeOf(u128)) == data_block_count_max);
402
- assert(@divExact(index.filter_checksums_size, @sizeOf(u128)) == filter_block_count_max);
403
- assert(block_size == index.padding_offset + index.padding_size);
404
- assert(block_size == index.size + index.padding_size);
405
-
406
- // Split block bloom filters require filters to be a multiple of 32 bytes as they
407
- // use 256 bit blocks.
408
- assert(filter.filter_size % 32 == 0);
409
- assert(filter.filter_size == block_body_size);
410
- assert(block_size == filter.padding_offset + filter.padding_size);
411
- assert(block_size == @sizeOf(vsr.Header) + filter.filter_size + filter.padding_size);
412
-
413
- if (data.key_count > 0) {
414
- assert(data.key_count >= 3);
415
- assert(math.isPowerOfTwo(data.key_count + 1));
416
- assert(data.key_count + 1 == @divExact(data.key_layout_size, key_size));
417
- assert(data.values_size / data.key_layout_size >=
418
- constants.lsm_value_to_key_layout_ratio_min);
419
- } else {
420
- assert(data.key_count == 0);
421
- assert(data.key_layout_size == 0);
422
- assert(data.values_offset == data.key_layout_offset);
423
- }
424
-
425
- assert(data.block_value_count_max > 0);
426
- assert(data.block_value_count_max >= data.key_count);
427
- assert(@divExact(data.values_size, value_size) == data.block_value_count_max);
428
- assert(data.values_offset % constants.cache_line_size == 0);
429
- // You can have any size value you want, as long as it fits
430
- // neatly into the CPU cache lines :)
431
- assert((data.block_value_count_max * value_size) % constants.cache_line_size == 0);
432
-
433
- assert(data.padding_size >= 0);
434
- assert(block_size == @sizeOf(vsr.Header) + data.key_layout_size +
435
- data.values_size + data.padding_size);
436
- assert(block_size == data.padding_offset + data.padding_size);
437
-
438
- // We expect no block padding at least for TigerBeetle's objects and indexes:
439
- if ((key_size == 8 and value_size == 128) or
440
- (key_size == 8 and value_size == 64) or
441
- (key_size == 16 and value_size == 16) or
442
- (key_size == 32 and value_size == 32))
443
- {
444
- assert(data.padding_size == 0);
445
- }
446
- }
447
-
448
- pub const Builder = struct {
449
- const TableInfo = TableInfoType(Table);
450
-
451
- key_min: Key = undefined, // Inclusive.
452
- key_max: Key = undefined, // Inclusive.
453
-
454
- index_block: BlockPtr,
455
- filter_block: BlockPtr,
456
- data_block: BlockPtr,
457
-
458
- data_block_count: u32 = 0,
459
- value: u32 = 0,
460
-
461
- filter_block_count: u32 = 0,
462
- data_blocks_in_filter: u32 = 0,
463
-
464
- pub fn init(allocator: mem.Allocator) !Builder {
465
- const index_block = try alloc_block(allocator);
466
- errdefer allocator.free(index_block);
467
-
468
- const filter_block = try alloc_block(allocator);
469
- errdefer allocator.free(filter_block);
470
-
471
- const data_block = try alloc_block(allocator);
472
- errdefer allocator.free(data_block);
473
-
474
- return Builder{
475
- .index_block = index_block[0..block_size],
476
- .filter_block = filter_block[0..block_size],
477
- .data_block = data_block[0..block_size],
478
- };
479
- }
480
-
481
- pub fn deinit(builder: *Builder, allocator: mem.Allocator) void {
482
- allocator.free(builder.index_block);
483
- allocator.free(builder.filter_block);
484
- allocator.free(builder.data_block);
485
-
486
- builder.* = undefined;
487
- }
488
-
489
- pub fn data_block_append(builder: *Builder, value: *const Value) void {
490
- const values_max = data_block_values(builder.data_block);
491
- assert(values_max.len == data.block_value_count_max);
492
-
493
- values_max[builder.value] = value.*;
494
- builder.value += 1;
495
-
496
- const key = key_from_value(value);
497
- const fingerprint = bloom_filter.Fingerprint.create(mem.asBytes(&key));
498
- bloom_filter.add(fingerprint, filter_block_filter(builder.filter_block));
499
- }
500
-
501
- pub fn data_block_append_slice(builder: *Builder, values: []const Value) void {
502
- assert(values.len > 0);
503
- assert(builder.value + values.len <= data.block_value_count_max);
504
-
505
- const values_max = data_block_values(builder.data_block);
506
- assert(values_max.len == data.block_value_count_max);
507
-
508
- stdx.copy_disjoint(.inexact, Value, values_max[builder.value..], values);
509
- builder.value += @intCast(u32, values.len);
510
-
511
- for (values) |*value| {
512
- const key = key_from_value(value);
513
- const fingerprint = bloom_filter.Fingerprint.create(mem.asBytes(&key));
514
- bloom_filter.add(fingerprint, filter_block_filter(builder.filter_block));
515
- }
516
- }
517
-
518
- pub fn data_block_empty(builder: Builder) bool {
519
- assert(builder.value <= data.block_value_count_max);
520
- return builder.value == 0;
521
- }
522
-
523
- pub fn data_block_full(builder: Builder) bool {
524
- assert(builder.value <= data.block_value_count_max);
525
- return builder.value == data.block_value_count_max;
526
- }
527
-
528
- const DataFinishOptions = struct {
529
- cluster: u32,
530
- address: u64,
531
- };
532
-
533
- pub fn data_block_finish(builder: *Builder, options: DataFinishOptions) void {
534
- // For each block we write the sorted values, initialize the Eytzinger layout,
535
- // complete the block header, and add the block's max key to the table index.
536
-
537
- assert(options.address > 0);
538
- assert(builder.value > 0);
539
-
540
- const block = builder.data_block;
541
- const values_max = data_block_values(block);
542
- assert(values_max.len == data.block_value_count_max);
543
-
544
- const values = values_max[0..builder.value];
545
- const key_max = key_from_value(&values[values.len - 1]);
546
-
547
- if (constants.verify) {
548
- var a = &values[0];
549
- for (values[1..]) |*b| {
550
- assert(compare_keys(key_from_value(a), key_from_value(b)) == .lt);
551
- a = b;
552
- }
553
- }
554
-
555
- if (data.key_count > 0) {
556
- assert(@divExact(data.key_layout_size, key_size) == data.key_count + 1);
557
-
558
- const key_layout_bytes = @alignCast(
559
- @alignOf(Key),
560
- block[data.key_layout_offset..][0..data.key_layout_size],
561
- );
562
- const key_layout = mem.bytesAsValue([data.key_count + 1]Key, key_layout_bytes);
563
-
564
- const e = eytzinger(data.key_count, data.block_value_count_max);
565
- e.layout_from_keys_or_values(
566
- Key,
567
- Value,
568
- key_from_value,
569
- sentinel_key,
570
- values,
571
- key_layout,
572
- );
573
- }
574
-
575
- const values_padding = mem.sliceAsBytes(values_max[builder.value..]);
576
- const block_padding = block[data.padding_offset..][0..data.padding_size];
577
- assert(compare_keys(key_from_value(&values[values.len - 1]), key_max) == .eq);
578
-
579
- const header_bytes = block[0..@sizeOf(vsr.Header)];
580
- const header = mem.bytesAsValue(vsr.Header, header_bytes);
581
-
582
- header.* = .{
583
- .cluster = options.cluster,
584
- .op = options.address,
585
- .request = @intCast(u32, values.len),
586
- .size = block_size - @intCast(u32, values_padding.len + block_padding.len),
587
- .command = .block,
588
- .operation = BlockType.data.operation(),
589
- };
590
-
591
- header.set_checksum_body(block[@sizeOf(vsr.Header)..header.size]);
592
- header.set_checksum();
593
-
594
- const current = builder.data_block_count;
595
- index_data_keys(builder.index_block)[current] = key_max;
596
- index_data_addresses(builder.index_block)[current] = options.address;
597
- index_data_checksums(builder.index_block)[current] = header.checksum;
598
-
599
- if (current == 0) builder.key_min = key_from_value(&values[0]);
600
- builder.key_max = key_max;
601
-
602
- if (current == 0 and values.len == 1) {
603
- assert(compare_keys(builder.key_min, builder.key_max) == .eq);
604
- } else {
605
- assert(compare_keys(builder.key_min, builder.key_max) == .lt);
606
- }
607
-
608
- if (current > 0) {
609
- const key_max_prev = index_data_keys(builder.index_block)[current - 1];
610
- assert(compare_keys(key_max_prev, key_from_value(&values[0])) == .lt);
611
- }
612
-
613
- builder.data_block_count += 1;
614
- builder.value = 0;
615
-
616
- builder.data_blocks_in_filter += 1;
617
- }
618
-
619
- pub fn filter_block_empty(builder: Builder) bool {
620
- assert(builder.data_blocks_in_filter <= filter.data_block_count_max);
621
- return builder.data_blocks_in_filter == 0;
622
- }
623
-
624
- pub fn filter_block_full(builder: Builder) bool {
625
- assert(builder.data_blocks_in_filter <= filter.data_block_count_max);
626
- return builder.data_blocks_in_filter == filter.data_block_count_max;
627
- }
628
-
629
- const FilterFinishOptions = struct {
630
- cluster: u32,
631
- address: u64,
632
- };
633
-
634
- pub fn filter_block_finish(builder: *Builder, options: FilterFinishOptions) void {
635
- assert(!builder.filter_block_empty());
636
- assert(builder.data_block_empty());
637
- assert(options.address > 0);
638
-
639
- const header_bytes = builder.filter_block[0..@sizeOf(vsr.Header)];
640
- const header = mem.bytesAsValue(vsr.Header, header_bytes);
641
- header.* = .{
642
- .cluster = options.cluster,
643
- .op = options.address,
644
- .size = block_size - filter.padding_size,
645
- .command = .block,
646
- .operation = BlockType.filter.operation(),
647
- };
648
-
649
- const body = builder.filter_block[@sizeOf(vsr.Header)..header.size];
650
- header.set_checksum_body(body);
651
- header.set_checksum();
652
-
653
- const current = builder.filter_block_count;
654
- index_filter_addresses(builder.index_block)[current] = options.address;
655
- index_filter_checksums(builder.index_block)[current] = header.checksum;
656
-
657
- builder.filter_block_count += 1;
658
- builder.data_blocks_in_filter = 0;
659
- }
660
-
661
- pub fn index_block_empty(builder: Builder) bool {
662
- assert(builder.data_block_count <= data_block_count_max);
663
- return builder.data_block_count == 0;
664
- }
665
-
666
- pub fn index_block_full(builder: Builder) bool {
667
- assert(builder.data_block_count <= data_block_count_max);
668
- return builder.data_block_count == data_block_count_max;
669
- }
670
-
671
- const IndexFinishOptions = struct {
672
- cluster: u32,
673
- address: u64,
674
- snapshot_min: u64,
675
- };
676
-
677
- pub fn index_block_finish(builder: *Builder, options: IndexFinishOptions) TableInfo {
678
- assert(options.address > 0);
679
- assert(builder.filter_block_empty());
680
- assert(builder.data_block_empty());
681
- assert(builder.data_block_count > 0);
682
- assert(builder.value == 0);
683
- assert(builder.data_blocks_in_filter == 0);
684
- assert(builder.filter_block_count == div_ceil(
685
- builder.data_block_count,
686
- filter.data_block_count_max,
687
- ));
688
-
689
- const index_block = builder.index_block;
690
-
691
- const header_bytes = index_block[0..@sizeOf(vsr.Header)];
692
- const header = mem.bytesAsValue(vsr.Header, header_bytes);
693
-
694
- header.* = .{
695
- .cluster = options.cluster,
696
- .op = options.address,
697
- .commit = builder.filter_block_count,
698
- .request = builder.data_block_count,
699
- .timestamp = options.snapshot_min,
700
- .size = index.size,
701
- .command = .block,
702
- .operation = BlockType.index.operation(),
703
- };
704
- header.set_checksum_body(index_block[@sizeOf(vsr.Header)..header.size]);
705
- header.set_checksum();
706
-
707
- const info: TableInfo = .{
708
- .checksum = header.checksum,
709
- .address = options.address,
710
- .snapshot_min = options.snapshot_min,
711
- .key_min = builder.key_min,
712
- .key_max = builder.key_max,
713
- };
714
-
715
- assert(info.snapshot_max == math.maxInt(u64));
716
-
717
- // Reset the builder to its initial state, leaving the buffers untouched.
718
- builder.* = .{
719
- .key_min = undefined,
720
- .key_max = undefined,
721
- .index_block = builder.index_block,
722
- .filter_block = builder.filter_block,
723
- .data_block = builder.data_block,
724
- };
725
-
726
- return info;
727
- }
728
- };
729
-
730
- pub inline fn index_block_address(index_block: BlockPtrConst) u64 {
731
- const header = mem.bytesAsValue(vsr.Header, index_block[0..@sizeOf(vsr.Header)]);
732
- const address = header.op;
733
- assert(address > 0);
734
- return address;
735
- }
736
-
737
- pub inline fn index_data_keys(index_block: BlockPtr) []Key {
738
- return mem.bytesAsSlice(Key, index_block[index.keys_offset..][0..index.keys_size]);
739
- }
740
-
741
- pub inline fn index_data_keys_used(index_block: BlockPtrConst) []const Key {
742
- const slice = mem.bytesAsSlice(
743
- Key,
744
- index_block[index.keys_offset..][0..index.keys_size],
745
- );
746
- return slice[0..index_data_blocks_used(index_block)];
747
- }
748
-
749
- pub inline fn index_data_addresses(index_block: BlockPtr) []u64 {
750
- return mem.bytesAsSlice(
751
- u64,
752
- index_block[index.data_addresses_offset..][0..index.data_addresses_size],
753
- );
754
- }
755
-
756
- pub inline fn index_data_addresses_used(index_block: BlockPtrConst) []const u64 {
757
- const slice = mem.bytesAsSlice(
758
- u64,
759
- index_block[index.data_addresses_offset..][0..index.data_addresses_size],
760
- );
761
- return slice[0..index_data_blocks_used(index_block)];
762
- }
763
-
764
- pub inline fn index_data_checksums(index_block: BlockPtr) []u128 {
765
- return mem.bytesAsSlice(
766
- u128,
767
- index_block[index.data_checksums_offset..][0..index.data_checksums_size],
768
- );
769
- }
770
-
771
- pub inline fn index_data_checksums_used(index_block: BlockPtrConst) []const u128 {
772
- const slice = mem.bytesAsSlice(
773
- u128,
774
- index_block[index.data_checksums_offset..][0..index.data_checksums_size],
775
- );
776
- return slice[0..index_data_blocks_used(index_block)];
777
- }
778
-
779
- inline fn index_filter_addresses(index_block: BlockPtr) []u64 {
780
- return mem.bytesAsSlice(
781
- u64,
782
- index_block[index.filter_addresses_offset..][0..index.filter_addresses_size],
783
- );
784
- }
785
-
786
- pub inline fn index_filter_addresses_used(index_block: BlockPtrConst) []const u64 {
787
- const slice = mem.bytesAsSlice(
788
- u64,
789
- index_block[index.filter_addresses_offset..][0..index.filter_addresses_size],
790
- );
791
- return slice[0..index_filter_blocks_used(index_block)];
792
- }
793
-
794
- inline fn index_filter_checksums(index_block: BlockPtr) []u128 {
795
- return mem.bytesAsSlice(
796
- u128,
797
- index_block[index.filter_checksums_offset..][0..index.filter_checksums_size],
798
- );
799
- }
800
-
801
- pub inline fn index_filter_checksums_used(index_block: BlockPtrConst) []const u128 {
802
- const slice = mem.bytesAsSlice(
803
- u128,
804
- index_block[index.filter_checksums_offset..][0..index.filter_checksums_size],
805
- );
806
- return slice[0..index_filter_blocks_used(index_block)];
807
- }
808
-
809
- inline fn index_blocks_used(index_block: BlockPtrConst) u32 {
810
- return index_block_count + index_filter_blocks_used(index_block) +
811
- index_data_blocks_used(index_block);
812
- }
813
-
814
- inline fn index_filter_blocks_used(index_block: BlockPtrConst) u32 {
815
- const header = mem.bytesAsValue(vsr.Header, index_block[0..@sizeOf(vsr.Header)]);
816
- const value = @intCast(u32, header.commit);
817
- assert(value > 0);
818
- assert(value <= filter_block_count_max);
819
- return value;
820
- }
821
-
822
- pub inline fn index_data_blocks_used(index_block: BlockPtrConst) u32 {
823
- const header = mem.bytesAsValue(vsr.Header, index_block[0..@sizeOf(vsr.Header)]);
824
- const value = @intCast(u32, header.request);
825
- assert(value > 0);
826
- assert(value <= data_block_count_max);
827
- return value;
828
- }
829
-
830
- /// Returns the zero-based index of the data block that may contain the key.
831
- /// May be called on an index block only when the key is in range of the table.
832
- inline fn index_data_block_for_key(index_block: BlockPtrConst, key: Key) u32 {
833
- // Because we store key_max in the index block we can use the raw binary search
834
- // here and avoid the extra comparison. If the search finds an exact match, we
835
- // want to return that data block. If the search does not find an exact match
836
- // it returns the index of the next greatest key, which again is the index of the
837
- // data block that may contain the key.
838
- const data_block_index = binary_search.binary_search_keys_raw(
839
- Key,
840
- compare_keys,
841
- Table.index_data_keys_used(index_block),
842
- key,
843
- .{},
844
- );
845
- assert(data_block_index < index_data_blocks_used(index_block));
846
- return data_block_index;
847
- }
848
-
849
- pub const IndexBlocks = struct {
850
- filter_block_address: u64,
851
- filter_block_checksum: u128,
852
- data_block_address: u64,
853
- data_block_checksum: u128,
854
- };
855
-
856
- /// Returns all data stored in the index block relating to a given key.
857
- /// May be called on an index block only when the key is in range of the table.
858
- pub inline fn index_blocks_for_key(index_block: BlockPtrConst, key: Key) IndexBlocks {
859
- const d = Table.index_data_block_for_key(index_block, key);
860
- const f = @divFloor(d, filter.data_block_count_max);
861
-
862
- return .{
863
- .filter_block_address = index_filter_addresses_used(index_block)[f],
864
- .filter_block_checksum = index_filter_checksums_used(index_block)[f],
865
- .data_block_address = index_data_addresses_used(index_block)[d],
866
- .data_block_checksum = index_data_checksums_used(index_block)[d],
867
- };
868
- }
869
-
870
- inline fn data_block_values(data_block: BlockPtr) []Value {
871
- return mem.bytesAsSlice(
872
- Value,
873
- data_block[data.values_offset..][0..data.values_size],
874
- );
875
- }
876
-
877
- pub inline fn data_block_values_used(data_block: BlockPtrConst) []const Value {
878
- const header = mem.bytesAsValue(vsr.Header, data_block[0..@sizeOf(vsr.Header)]);
879
- // TODO we should be able to cross-check this with the header size
880
- // for more safety.
881
- const used = @intCast(u32, header.request);
882
- assert(used <= data.block_value_count_max);
883
- const slice = mem.bytesAsSlice(
884
- Value,
885
- data_block[data.values_offset..][0..data.values_size],
886
- );
887
- return slice[0..used];
888
- }
889
-
890
- pub inline fn block_address(block: BlockPtrConst) u64 {
891
- const header = mem.bytesAsValue(vsr.Header, block[0..@sizeOf(vsr.Header)]);
892
- const address = header.op;
893
- assert(address > 0);
894
- return address;
895
- }
896
-
897
- pub inline fn filter_block_filter(filter_block: BlockPtr) []u8 {
898
- return filter_block[filter.filter_offset..][0..filter.filter_size];
899
- }
900
-
901
- pub inline fn filter_block_filter_const(filter_block: BlockPtrConst) []const u8 {
902
- return filter_block[filter.filter_offset..][0..filter.filter_size];
903
- }
904
-
905
- pub fn data_block_search(data_block: BlockPtrConst, key: Key) ?*const Value {
906
- const values = blk: {
907
- if (data.key_count == 0) break :blk data_block_values_used(data_block);
908
-
909
- assert(@divExact(data.key_layout_size, key_size) == data.key_count + 1);
910
- const key_layout_bytes = @alignCast(
911
- @alignOf(Key),
912
- data_block[data.key_layout_offset..][0..data.key_layout_size],
913
- );
914
- const key_layout = mem.bytesAsValue([data.key_count + 1]Key, key_layout_bytes);
915
-
916
- const e = eytzinger(data.key_count, data.block_value_count_max);
917
- break :blk e.search_values(
918
- Key,
919
- Value,
920
- compare_keys,
921
- key_layout,
922
- data_block_values_used(data_block),
923
- key,
924
- );
925
- };
926
-
927
- const result = binary_search.binary_search_values(
928
- Key,
929
- Value,
930
- key_from_value,
931
- compare_keys,
932
- values,
933
- key,
934
- .{},
935
- );
936
- if (result.exact) {
937
- const value = &values[result.index];
938
- if (constants.verify) {
939
- assert(compare_keys(key, key_from_value(value)) == .eq);
940
- }
941
- return value;
942
- }
943
-
944
- if (constants.verify) {
945
- for (data_block_values_used(data_block)) |*value| {
946
- assert(compare_keys(key, key_from_value(value)) != .eq);
947
- }
948
- }
949
-
950
- return null;
951
- }
952
-
953
- pub fn verify(
954
- comptime Storage: type,
955
- storage: *Storage,
956
- index_address: u64,
957
- key_min: ?Key,
958
- key_max: ?Key,
959
- ) void {
960
- if (Storage != @import("../testing/storage.zig").Storage)
961
- // Too complicated to do async verification
962
- return;
963
-
964
- const index_block = storage.grid_block(index_address);
965
- const addresses = index_data_addresses(index_block);
966
- const data_blocks_used = index_data_blocks_used(index_block);
967
- var data_block_index: usize = 0;
968
- while (data_block_index < data_blocks_used) : (data_block_index += 1) {
969
- const address = addresses[data_block_index];
970
- const data_block = storage.grid_block(address);
971
- const values = data_block_values_used(data_block);
972
- if (values.len > 0) {
973
- if (data_block_index == 0) {
974
- assert(key_min == null or
975
- compare_keys(key_min.?, key_from_value(&values[0])) == .eq);
976
- }
977
- if (data_block_index == data_blocks_used - 1) {
978
- assert(key_max == null or
979
- compare_keys(key_from_value(&values[values.len - 1]), key_max.?) == .eq);
980
- }
981
- var a = &values[0];
982
- for (values[1..]) |*b| {
983
- assert(compare_keys(key_from_value(a), key_from_value(b)) == .lt);
984
- a = b;
985
- }
986
- }
987
- }
988
- }
989
- };
990
- }
991
-
992
- test "Table" {
993
- const Key = @import("composite_key.zig").CompositeKey(u128);
994
-
995
- const Table = TableType(
996
- Key,
997
- Key.Value,
998
- Key.compare_keys,
999
- Key.key_from_value,
1000
- Key.sentinel_key,
1001
- Key.tombstone,
1002
- Key.tombstone_from_key,
1003
- 1, // Doesn't matter for this test.
1004
- .general,
1005
- );
1006
-
1007
- _ = Table;
1008
- std.testing.refAllDecls(Table.Builder);
1009
- }