tigerbeetle-node 0.11.13 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. package/dist/bin/aarch64-linux-gnu/client.node +0 -0
  2. package/dist/bin/aarch64-linux-musl/client.node +0 -0
  3. package/dist/bin/aarch64-macos/client.node +0 -0
  4. package/dist/bin/x86_64-linux-gnu/client.node +0 -0
  5. package/dist/bin/x86_64-linux-musl/client.node +0 -0
  6. package/dist/bin/x86_64-macos/client.node +0 -0
  7. package/dist/index.js +33 -1
  8. package/dist/index.js.map +1 -1
  9. package/package-lock.json +66 -0
  10. package/package.json +6 -16
  11. package/src/index.ts +56 -1
  12. package/src/node.zig +9 -9
  13. package/dist/.client.node.sha256 +0 -1
  14. package/scripts/build_lib.sh +0 -61
  15. package/scripts/download_node_headers.sh +0 -32
  16. package/src/tigerbeetle/scripts/benchmark.bat +0 -55
  17. package/src/tigerbeetle/scripts/benchmark.sh +0 -66
  18. package/src/tigerbeetle/scripts/confirm_image.sh +0 -44
  19. package/src/tigerbeetle/scripts/fail_on_diff.sh +0 -9
  20. package/src/tigerbeetle/scripts/fuzz_loop.sh +0 -15
  21. package/src/tigerbeetle/scripts/fuzz_loop_hash_log.sh +0 -12
  22. package/src/tigerbeetle/scripts/fuzz_unique_errors.sh +0 -7
  23. package/src/tigerbeetle/scripts/install.bat +0 -7
  24. package/src/tigerbeetle/scripts/install.sh +0 -21
  25. package/src/tigerbeetle/scripts/install_zig.bat +0 -113
  26. package/src/tigerbeetle/scripts/install_zig.sh +0 -90
  27. package/src/tigerbeetle/scripts/lint.zig +0 -199
  28. package/src/tigerbeetle/scripts/pre-commit.sh +0 -9
  29. package/src/tigerbeetle/scripts/scripts/benchmark.bat +0 -55
  30. package/src/tigerbeetle/scripts/scripts/benchmark.sh +0 -66
  31. package/src/tigerbeetle/scripts/scripts/confirm_image.sh +0 -44
  32. package/src/tigerbeetle/scripts/scripts/fail_on_diff.sh +0 -9
  33. package/src/tigerbeetle/scripts/scripts/fuzz_loop.sh +0 -15
  34. package/src/tigerbeetle/scripts/scripts/fuzz_loop_hash_log.sh +0 -12
  35. package/src/tigerbeetle/scripts/scripts/fuzz_unique_errors.sh +0 -7
  36. package/src/tigerbeetle/scripts/scripts/install.bat +0 -7
  37. package/src/tigerbeetle/scripts/scripts/install.sh +0 -21
  38. package/src/tigerbeetle/scripts/scripts/install_zig.bat +0 -113
  39. package/src/tigerbeetle/scripts/scripts/install_zig.sh +0 -90
  40. package/src/tigerbeetle/scripts/scripts/lint.zig +0 -199
  41. package/src/tigerbeetle/scripts/scripts/pre-commit.sh +0 -9
  42. package/src/tigerbeetle/scripts/scripts/shellcheck.sh +0 -5
  43. package/src/tigerbeetle/scripts/scripts/tests_on_alpine.sh +0 -10
  44. package/src/tigerbeetle/scripts/scripts/tests_on_ubuntu.sh +0 -14
  45. package/src/tigerbeetle/scripts/scripts/upgrade_ubuntu_kernel.sh +0 -48
  46. package/src/tigerbeetle/scripts/scripts/validate_docs.sh +0 -23
  47. package/src/tigerbeetle/scripts/scripts/vr_state_enumerate +0 -46
  48. package/src/tigerbeetle/scripts/shellcheck.sh +0 -5
  49. package/src/tigerbeetle/scripts/tests_on_alpine.sh +0 -10
  50. package/src/tigerbeetle/scripts/tests_on_ubuntu.sh +0 -14
  51. package/src/tigerbeetle/scripts/upgrade_ubuntu_kernel.sh +0 -48
  52. package/src/tigerbeetle/scripts/validate_docs.sh +0 -23
  53. package/src/tigerbeetle/scripts/vr_state_enumerate +0 -46
  54. package/src/tigerbeetle/src/benchmark.zig +0 -336
  55. package/src/tigerbeetle/src/config.zig +0 -233
  56. package/src/tigerbeetle/src/constants.zig +0 -428
  57. package/src/tigerbeetle/src/ewah.zig +0 -286
  58. package/src/tigerbeetle/src/ewah_benchmark.zig +0 -120
  59. package/src/tigerbeetle/src/ewah_fuzz.zig +0 -130
  60. package/src/tigerbeetle/src/fifo.zig +0 -120
  61. package/src/tigerbeetle/src/io/benchmark.zig +0 -213
  62. package/src/tigerbeetle/src/io/darwin.zig +0 -814
  63. package/src/tigerbeetle/src/io/linux.zig +0 -1071
  64. package/src/tigerbeetle/src/io/test.zig +0 -643
  65. package/src/tigerbeetle/src/io/windows.zig +0 -1183
  66. package/src/tigerbeetle/src/io.zig +0 -34
  67. package/src/tigerbeetle/src/iops.zig +0 -107
  68. package/src/tigerbeetle/src/lsm/README.md +0 -308
  69. package/src/tigerbeetle/src/lsm/binary_search.zig +0 -341
  70. package/src/tigerbeetle/src/lsm/bloom_filter.zig +0 -125
  71. package/src/tigerbeetle/src/lsm/compaction.zig +0 -603
  72. package/src/tigerbeetle/src/lsm/composite_key.zig +0 -77
  73. package/src/tigerbeetle/src/lsm/direction.zig +0 -11
  74. package/src/tigerbeetle/src/lsm/eytzinger.zig +0 -587
  75. package/src/tigerbeetle/src/lsm/eytzinger_benchmark.zig +0 -330
  76. package/src/tigerbeetle/src/lsm/forest.zig +0 -205
  77. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +0 -450
  78. package/src/tigerbeetle/src/lsm/grid.zig +0 -573
  79. package/src/tigerbeetle/src/lsm/groove.zig +0 -1036
  80. package/src/tigerbeetle/src/lsm/k_way_merge.zig +0 -474
  81. package/src/tigerbeetle/src/lsm/level_iterator.zig +0 -332
  82. package/src/tigerbeetle/src/lsm/manifest.zig +0 -617
  83. package/src/tigerbeetle/src/lsm/manifest_level.zig +0 -878
  84. package/src/tigerbeetle/src/lsm/manifest_log.zig +0 -789
  85. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +0 -691
  86. package/src/tigerbeetle/src/lsm/merge_iterator.zig +0 -106
  87. package/src/tigerbeetle/src/lsm/node_pool.zig +0 -235
  88. package/src/tigerbeetle/src/lsm/posted_groove.zig +0 -381
  89. package/src/tigerbeetle/src/lsm/segmented_array.zig +0 -1329
  90. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +0 -148
  91. package/src/tigerbeetle/src/lsm/segmented_array_fuzz.zig +0 -9
  92. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +0 -850
  93. package/src/tigerbeetle/src/lsm/table.zig +0 -1009
  94. package/src/tigerbeetle/src/lsm/table_immutable.zig +0 -192
  95. package/src/tigerbeetle/src/lsm/table_iterator.zig +0 -340
  96. package/src/tigerbeetle/src/lsm/table_mutable.zig +0 -203
  97. package/src/tigerbeetle/src/lsm/test.zig +0 -439
  98. package/src/tigerbeetle/src/lsm/tree.zig +0 -1169
  99. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +0 -479
  100. package/src/tigerbeetle/src/message_bus.zig +0 -1013
  101. package/src/tigerbeetle/src/message_pool.zig +0 -156
  102. package/src/tigerbeetle/src/ring_buffer.zig +0 -399
  103. package/src/tigerbeetle/src/simulator.zig +0 -580
  104. package/src/tigerbeetle/src/state_machine/auditor.zig +0 -578
  105. package/src/tigerbeetle/src/state_machine/workload.zig +0 -883
  106. package/src/tigerbeetle/src/state_machine.zig +0 -2099
  107. package/src/tigerbeetle/src/static_allocator.zig +0 -65
  108. package/src/tigerbeetle/src/stdx.zig +0 -171
  109. package/src/tigerbeetle/src/storage.zig +0 -393
  110. package/src/tigerbeetle/src/testing/cluster/message_bus.zig +0 -82
  111. package/src/tigerbeetle/src/testing/cluster/network.zig +0 -237
  112. package/src/tigerbeetle/src/testing/cluster/state_checker.zig +0 -169
  113. package/src/tigerbeetle/src/testing/cluster/storage_checker.zig +0 -202
  114. package/src/tigerbeetle/src/testing/cluster.zig +0 -444
  115. package/src/tigerbeetle/src/testing/fuzz.zig +0 -140
  116. package/src/tigerbeetle/src/testing/hash_log.zig +0 -66
  117. package/src/tigerbeetle/src/testing/id.zig +0 -99
  118. package/src/tigerbeetle/src/testing/packet_simulator.zig +0 -374
  119. package/src/tigerbeetle/src/testing/priority_queue.zig +0 -645
  120. package/src/tigerbeetle/src/testing/reply_sequence.zig +0 -139
  121. package/src/tigerbeetle/src/testing/state_machine.zig +0 -250
  122. package/src/tigerbeetle/src/testing/storage.zig +0 -757
  123. package/src/tigerbeetle/src/testing/table.zig +0 -247
  124. package/src/tigerbeetle/src/testing/time.zig +0 -84
  125. package/src/tigerbeetle/src/tigerbeetle.zig +0 -227
  126. package/src/tigerbeetle/src/time.zig +0 -112
  127. package/src/tigerbeetle/src/tracer.zig +0 -529
  128. package/src/tigerbeetle/src/unit_tests.zig +0 -40
  129. package/src/tigerbeetle/src/vopr.zig +0 -495
  130. package/src/tigerbeetle/src/vsr/README.md +0 -209
  131. package/src/tigerbeetle/src/vsr/client.zig +0 -544
  132. package/src/tigerbeetle/src/vsr/clock.zig +0 -855
  133. package/src/tigerbeetle/src/vsr/journal.zig +0 -2415
  134. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +0 -111
  135. package/src/tigerbeetle/src/vsr/marzullo.zig +0 -309
  136. package/src/tigerbeetle/src/vsr/replica.zig +0 -6616
  137. package/src/tigerbeetle/src/vsr/replica_format.zig +0 -219
  138. package/src/tigerbeetle/src/vsr/superblock.zig +0 -1631
  139. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +0 -256
  140. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +0 -929
  141. package/src/tigerbeetle/src/vsr/superblock_free_set_fuzz.zig +0 -334
  142. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +0 -390
  143. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +0 -615
  144. package/src/tigerbeetle/src/vsr/superblock_quorums.zig +0 -394
  145. package/src/tigerbeetle/src/vsr/superblock_quorums_fuzz.zig +0 -314
  146. package/src/tigerbeetle/src/vsr.zig +0 -1425
@@ -1,573 +0,0 @@
1
- const std = @import("std");
2
- const assert = std.debug.assert;
3
- const mem = std.mem;
4
-
5
- const constants = @import("../constants.zig");
6
- const vsr = @import("../vsr.zig");
7
- const free_set = @import("../vsr/superblock_free_set.zig");
8
-
9
- const SuperBlockType = vsr.SuperBlockType;
10
- const FIFO = @import("../fifo.zig").FIFO;
11
- const IOPS = @import("../iops.zig").IOPS;
12
- const SetAssociativeCache = @import("set_associative_cache.zig").SetAssociativeCache;
13
- const stdx = @import("../stdx.zig");
14
-
15
- const log = std.log.scoped(.grid);
16
-
17
- /// A block's type is implicitly determined by how its address is stored (e.g. in the index block).
18
- /// BlockType is an additional check that a block has the expected type on read.
19
- ///
20
- /// The BlockType is stored in the block's `header.operation`.
21
- pub const BlockType = enum(u8) {
22
- /// Unused; verifies that no block is written with a default 0 operation.
23
- reserved = 0,
24
-
25
- manifest = 1,
26
- index = 2,
27
- filter = 3,
28
- data = 4,
29
-
30
- pub inline fn from(vsr_operation: vsr.Operation) BlockType {
31
- return @intToEnum(BlockType, @enumToInt(vsr_operation));
32
- }
33
-
34
- pub inline fn operation(block_type: BlockType) vsr.Operation {
35
- return @intToEnum(vsr.Operation, @enumToInt(block_type));
36
- }
37
- };
38
-
39
- // Leave this outside GridType so we can call it from modules that don't know about Storage.
40
- pub fn alloc_block(
41
- allocator: mem.Allocator,
42
- ) !*align(constants.sector_size) [constants.block_size]u8 {
43
- const block = try allocator.alignedAlloc(u8, constants.sector_size, constants.block_size);
44
- mem.set(u8, block, 0);
45
- return block[0..constants.block_size];
46
- }
47
-
48
- /// The Grid provides access to on-disk blocks (blobs of `block_size` bytes).
49
- /// Each block is identified by an "address" (`u64`, beginning at 1).
50
- ///
51
- /// Recently/frequently-used blocks are transparently cached in memory.
52
- pub fn GridType(comptime Storage: type) type {
53
- const block_size = constants.block_size;
54
- const SuperBlock = SuperBlockType(Storage);
55
-
56
- return struct {
57
- const Grid = @This();
58
-
59
- // TODO put more thought into how low/high these limits should be.
60
- pub const read_iops_max = 16;
61
- pub const write_iops_max = 16;
62
-
63
- pub const BlockPtr = *align(constants.sector_size) [block_size]u8;
64
- pub const BlockPtrConst = *align(constants.sector_size) const [block_size]u8;
65
- pub const Reservation = free_set.Reservation;
66
-
67
- // Grid just reuses the Storage's NextTick abstraction for simplicity.
68
- pub const NextTick = Storage.NextTick;
69
-
70
- pub const Write = struct {
71
- callback: fn (*Grid.Write) void,
72
- address: u64,
73
- block: *BlockPtr,
74
-
75
- /// Link for the Grid.write_queue linked list.
76
- next: ?*Write = null,
77
- };
78
-
79
- const WriteIOP = struct {
80
- grid: *Grid,
81
- completion: Storage.Write,
82
- write: *Write,
83
- };
84
-
85
- pub const Read = struct {
86
- callback: fn (*Grid.Read, BlockPtrConst) void,
87
- address: u64,
88
- checksum: u128,
89
- block_type: BlockType,
90
-
91
- pending: ReadPending = .{},
92
- resolves: FIFO(ReadPending) = .{},
93
-
94
- grid: *Grid,
95
- next_tick: Grid.NextTick = undefined,
96
-
97
- /// Link for Grid.read_queue/Grid.read_recovery_queue linked lists.
98
- next: ?*Read = null,
99
- };
100
-
101
- const ReadPending = struct {
102
- /// Link for Read.resolves linked lists.
103
- next: ?*ReadPending = null,
104
- };
105
-
106
- const ReadIOP = struct {
107
- completion: Storage.Read,
108
- read: *Read,
109
- };
110
-
111
- const cache_interface = struct {
112
- inline fn address_from_address(address: *const u64) u64 {
113
- return address.*;
114
- }
115
-
116
- inline fn hash_address(address: u64) u64 {
117
- assert(address > 0);
118
- return std.hash.Wyhash.hash(0, mem.asBytes(&address));
119
- }
120
-
121
- inline fn equal_addresses(a: u64, b: u64) bool {
122
- return a == b;
123
- }
124
- };
125
-
126
- const set_associative_cache_ways = 16;
127
-
128
- const Cache = SetAssociativeCache(
129
- u64,
130
- u64,
131
- cache_interface.address_from_address,
132
- cache_interface.hash_address,
133
- cache_interface.equal_addresses,
134
- .{
135
- .ways = set_associative_cache_ways,
136
- .value_alignment = @alignOf(u64),
137
- },
138
- );
139
-
140
- superblock: *SuperBlock,
141
-
142
- // Each entry in cache has a corresponding block.
143
- cache_blocks: []BlockPtr,
144
- cache: Cache,
145
-
146
- write_iops: IOPS(WriteIOP, write_iops_max) = .{},
147
- write_queue: FIFO(Write) = .{},
148
-
149
- // Each read_iops has a corresponding block.
150
- read_iop_blocks: [read_iops_max]BlockPtr,
151
- read_iops: IOPS(ReadIOP, read_iops_max) = .{},
152
- read_queue: FIFO(Read) = .{},
153
-
154
- // List if Read.pending's which are in `read_queue` but also waiting for a free `read_iops`.
155
- read_pending_queue: FIFO(ReadPending) = .{},
156
- // TODO interrogate this list and do recovery in Replica.tick().
157
- read_recovery_queue: FIFO(Read) = .{},
158
- // True if there's a read thats resolving callbacks. If so, the read cache must not be invalidated.
159
- read_resolving: bool = false,
160
-
161
- pub fn init(allocator: mem.Allocator, superblock: *SuperBlock) !Grid {
162
- // TODO Determine this at runtime based on runtime configured maximum
163
- // memory usage of tigerbeetle.
164
- const cache_blocks_count = 2048;
165
-
166
- const cache_blocks = try allocator.alloc(BlockPtr, cache_blocks_count);
167
- errdefer allocator.free(cache_blocks);
168
-
169
- for (cache_blocks) |*cache_block, i| {
170
- errdefer for (cache_blocks[0..i]) |block| allocator.free(block);
171
- cache_block.* = try alloc_block(allocator);
172
- }
173
-
174
- var cache = try Cache.init(allocator, cache_blocks_count);
175
- errdefer cache.deinit(allocator);
176
-
177
- var read_iop_blocks: [read_iops_max]BlockPtr = undefined;
178
-
179
- for (&read_iop_blocks) |*read_iop_block, i| {
180
- errdefer for (read_iop_blocks[0..i]) |block| allocator.free(block);
181
- read_iop_block.* = try alloc_block(allocator);
182
- }
183
-
184
- return Grid{
185
- .superblock = superblock,
186
- .cache_blocks = cache_blocks,
187
- .cache = cache,
188
- .read_iop_blocks = read_iop_blocks,
189
- };
190
- }
191
-
192
- pub fn deinit(grid: *Grid, allocator: mem.Allocator) void {
193
- for (&grid.read_iop_blocks) |block| allocator.free(block);
194
-
195
- grid.cache.deinit(allocator);
196
-
197
- for (grid.cache_blocks) |block| allocator.free(block);
198
- allocator.free(grid.cache_blocks);
199
-
200
- grid.* = undefined;
201
- }
202
-
203
- pub fn on_next_tick(
204
- grid: *Grid,
205
- callback: fn (*Grid.NextTick) void,
206
- next_tick: *Grid.NextTick,
207
- ) void {
208
- grid.superblock.storage.on_next_tick(callback, next_tick);
209
- }
210
-
211
- /// Returning null indicates that there are not enough free blocks to fill the reservation.
212
- pub fn reserve(grid: *Grid, blocks_count: usize) ?Reservation {
213
- return grid.superblock.free_set.reserve(blocks_count);
214
- }
215
-
216
- /// Forfeit a reservation.
217
- pub fn forfeit(grid: *Grid, reservation: Reservation) void {
218
- return grid.superblock.free_set.forfeit(reservation);
219
- }
220
-
221
- /// Returns a just-allocated block.
222
- /// The caller is responsible for not acquiring more blocks than they reserved.
223
- pub fn acquire(grid: *Grid, reservation: Reservation) u64 {
224
- return grid.superblock.free_set.acquire(reservation).?;
225
- }
226
-
227
- /// This function should be used to release addresses, instead of release()
228
- /// on the free set directly, as this also demotes the address within the block cache.
229
- /// This reduces conflict misses in the block cache, by freeing ways soon after they are
230
- /// released.
231
- ///
232
- /// This does not remove the block from the cache — the block can be read until the next
233
- /// checkpoint.
234
- ///
235
- /// Asserts that the address is not currently being read from or written to.
236
- pub fn release(grid: *Grid, address: u64) void {
237
- grid.assert_not_writing(address, null);
238
- grid.assert_not_reading(address, null);
239
-
240
- grid.cache.demote(address);
241
- grid.superblock.free_set.release(address);
242
- }
243
-
244
- /// Assert that the address is not currently being written to.
245
- /// Assert that the block pointer is not being used for any write if non-null.
246
- fn assert_not_writing(grid: *Grid, address: u64, block: ?BlockPtrConst) void {
247
- assert(address > 0);
248
- {
249
- var it = grid.write_queue.peek();
250
- while (it) |queued_write| : (it = queued_write.next) {
251
- assert(address != queued_write.address);
252
- assert(block != queued_write.block.*);
253
- }
254
- }
255
- {
256
- var it = grid.write_iops.iterate();
257
- while (it.next()) |iop| {
258
- assert(address != iop.write.address);
259
- assert(block != iop.write.block.*);
260
- }
261
- }
262
- }
263
-
264
- /// Assert that the address is not currently being read from.
265
- /// Assert that the block pointer is not being used for any read if non-null.
266
- fn assert_not_reading(grid: *Grid, address: u64, block: ?BlockPtrConst) void {
267
- assert(address > 0);
268
- for ([_]*const FIFO(Read){
269
- &grid.read_queue,
270
- &grid.read_recovery_queue,
271
- }) |queue| {
272
- var it = queue.peek();
273
- while (it) |queued_read| : (it = queued_read.next) {
274
- assert(address != queued_read.address);
275
- }
276
- }
277
- {
278
- var it = grid.read_iops.iterate();
279
- while (it.next()) |iop| {
280
- assert(address != iop.read.address);
281
- const iop_block = grid.read_iop_blocks[grid.read_iops.index(iop)];
282
- assert(block != iop_block);
283
- }
284
- }
285
- }
286
-
287
- /// NOTE: This will consume `block` and replace it with a fresh block.
288
- pub fn write_block(
289
- grid: *Grid,
290
- callback: fn (*Grid.Write) void,
291
- write: *Grid.Write,
292
- block: *BlockPtr,
293
- address: u64,
294
- ) void {
295
- assert(address > 0);
296
- grid.assert_not_writing(address, block.*);
297
- grid.assert_not_reading(address, block.*);
298
-
299
- if (constants.verify) {
300
- for (grid.cache_blocks) |cache_block| {
301
- assert(cache_block != block.*);
302
- }
303
- }
304
-
305
- assert(grid.superblock.opened);
306
- assert(!grid.superblock.free_set.is_free(address));
307
-
308
- write.* = .{
309
- .callback = callback,
310
- .address = address,
311
- .block = block,
312
- };
313
-
314
- const iop = grid.write_iops.acquire() orelse {
315
- grid.write_queue.push(write);
316
- return;
317
- };
318
-
319
- grid.write_block_with(iop, write);
320
- }
321
-
322
- fn write_block_with(grid: *Grid, iop: *WriteIOP, write: *Write) void {
323
- iop.* = .{
324
- .grid = grid,
325
- .completion = undefined,
326
- .write = write,
327
- };
328
-
329
- grid.superblock.storage.write_sectors(
330
- write_block_callback,
331
- &iop.completion,
332
- write.block.*,
333
- .grid,
334
- block_offset(write.address),
335
- );
336
- }
337
-
338
- fn write_block_callback(completion: *Storage.Write) void {
339
- const iop = @fieldParentPtr(WriteIOP, "completion", completion);
340
-
341
- // We must copy these values to the stack as they will be overwritten
342
- // when we release the iop and potentially start a queued write.
343
- const grid = iop.grid;
344
- const completed_write = iop.write;
345
-
346
- // We can only update the cache if the Grid is not resolving callbacks with a cache block.
347
- assert(!grid.read_resolving);
348
-
349
- // Insert the write block into the cache, and give the evicted block to the writer.
350
- const cache_index = grid.cache.insert_index(&completed_write.address);
351
- const cache_block = &grid.cache_blocks[cache_index];
352
- std.mem.swap(BlockPtr, cache_block, completed_write.block);
353
- std.mem.set(u8, completed_write.block.*, 0);
354
-
355
- // Start a queued write if possible *before* calling the completed
356
- // write's callback. This ensures that if the callback calls
357
- // Grid.write_block() it doesn't preempt the queue.
358
- if (grid.write_queue.pop()) |queued_write| {
359
- grid.write_block_with(iop, queued_write);
360
- } else {
361
- grid.write_iops.release(iop);
362
- }
363
-
364
- // This call must come after (logicall) releasing the IOP. Otherwise we risk tripping
365
- // assertions forbidding concurrent writes using the same block/address
366
- // if the callback calls write_block().
367
- completed_write.callback(completed_write);
368
- }
369
-
370
- /// This function transparently handles recovery if the checksum fails.
371
- /// If necessary, this read will be added to a linked list, which Replica can then
372
- /// interrogate each tick(). The callback passed to this function won't be called until the
373
- /// block has been recovered.
374
- pub fn read_block(
375
- grid: *Grid,
376
- callback: fn (*Grid.Read, BlockPtrConst) void,
377
- read: *Grid.Read,
378
- address: u64,
379
- checksum: u128,
380
- block_type: BlockType,
381
- ) void {
382
- assert(address > 0);
383
- assert(block_type != .reserved);
384
- grid.assert_not_writing(address, null);
385
-
386
- assert(grid.superblock.opened);
387
- assert(!grid.superblock.free_set.is_free(address));
388
-
389
- read.* = .{
390
- .callback = callback,
391
- .address = address,
392
- .checksum = checksum,
393
- .block_type = block_type,
394
- .grid = grid,
395
- };
396
-
397
- // Check if a read is already processing/recovering and merge with it.
398
- for ([_]*const FIFO(Read){
399
- &grid.read_queue,
400
- &grid.read_recovery_queue,
401
- }) |queue| {
402
- var it = queue.peek();
403
- while (it) |queued_read| : (it = queued_read.next) {
404
- if (address == queued_read.address) {
405
- assert(checksum == queued_read.checksum);
406
- assert(block_type == queued_read.block_type);
407
- queued_read.resolves.push(&read.pending);
408
- return;
409
- }
410
- }
411
- }
412
-
413
- // Become the "root" read thats fetching the block for the given address.
414
- // The fetch happens asynchronously to avoid stack-overflow and nested cache invalidation.
415
- grid.read_queue.push(read);
416
- grid.on_next_tick(read_block_tick_callback, &read.next_tick);
417
- }
418
-
419
- fn read_block_tick_callback(next_tick: *Storage.NextTick) void {
420
- const read = @fieldParentPtr(Grid.Read, "next_tick", next_tick);
421
- const grid = read.grid;
422
-
423
- // Try to resolve the read from the cache.
424
- if (grid.cache.get_index(read.address)) |cache_index| {
425
- const cache_block = grid.cache_blocks[cache_index];
426
- if (constants.verify) grid.verify_cached_read(read.address, cache_block);
427
- grid.read_block_resolve(read, cache_block);
428
- return;
429
- }
430
-
431
- // Grab an IOP to resolve the block from storage.
432
- // Failure to do so means the read is queued to receive an IOP when one finishes.
433
- const iop = grid.read_iops.acquire() orelse {
434
- grid.read_pending_queue.push(&read.pending);
435
- return;
436
- };
437
-
438
- grid.read_block_with(iop, read);
439
- }
440
-
441
- fn read_block_with(grid: *Grid, iop: *Grid.ReadIOP, read: *Grid.Read) void {
442
- const address = read.address;
443
- assert(address > 0);
444
-
445
- // We can only update the cache if the Grid is not resolving callbacks with a cache block.
446
- assert(!grid.read_resolving);
447
-
448
- iop.* = .{
449
- .completion = undefined,
450
- .read = read,
451
- };
452
- const iop_block = grid.read_iop_blocks[grid.read_iops.index(iop)];
453
-
454
- grid.superblock.storage.read_sectors(
455
- read_block_callback,
456
- &iop.completion,
457
- iop_block,
458
- .grid,
459
- block_offset(address),
460
- );
461
- }
462
-
463
- fn read_block_callback(completion: *Storage.Read) void {
464
- const iop = @fieldParentPtr(ReadIOP, "completion", completion);
465
- const read = iop.read;
466
- const grid = read.grid;
467
- const iop_block = &grid.read_iop_blocks[grid.read_iops.index(iop)];
468
-
469
- // Insert the block into the cache, and give the evicted block to `iop`.
470
- const cache_index = grid.cache.insert_index(&read.address);
471
- const cache_block = &grid.cache_blocks[cache_index];
472
- std.mem.swap(BlockPtr, iop_block, cache_block);
473
- std.mem.set(u8, iop_block.*, 0);
474
-
475
- // Handoff the iop to a pending read or release it before resolving the callbacks below.
476
- if (grid.read_pending_queue.pop()) |pending| {
477
- const queued_read = @fieldParentPtr(Read, "pending", pending);
478
- grid.read_block_with(iop, queued_read);
479
- } else {
480
- grid.read_iops.release(iop);
481
- }
482
-
483
- // A valid block filled by storage means the reads for the address can be resolved
484
- if (read_block_valid(read, cache_block.*)) {
485
- grid.read_block_resolve(read, cache_block.*);
486
- return;
487
- }
488
-
489
- // On the result of an invalid block, move the "root" read (and all others it resolves)
490
- // to recovery queue. Future reads on the same address will see the "root" read in the
491
- // recovery queue and enqueue to it.
492
- grid.read_queue.remove(read);
493
- grid.read_recovery_queue.push(read);
494
- }
495
-
496
- fn read_block_valid(read: *Grid.Read, block: BlockPtrConst) bool {
497
- const address = read.address;
498
- const checksum = read.checksum;
499
- const block_type = read.block_type;
500
-
501
- const header_bytes = block[0..@sizeOf(vsr.Header)];
502
- const header = mem.bytesAsValue(vsr.Header, header_bytes);
503
-
504
- if (!header.valid_checksum()) {
505
- log.err("invalid checksum at address {}", .{address});
506
- return false;
507
- }
508
-
509
- if (!header.valid_checksum_body(block[@sizeOf(vsr.Header)..header.size])) {
510
- log.err("invalid checksum body at address {}", .{address});
511
- return false;
512
- }
513
-
514
- if (header.checksum != checksum) {
515
- log.err(
516
- "expected address={} checksum={} block_type={}, " ++
517
- "found address={} checksum={} block_type={}",
518
- .{
519
- address,
520
- checksum,
521
- block_type,
522
- header.op,
523
- header.checksum,
524
- @enumToInt(header.operation),
525
- },
526
- );
527
- return false;
528
- }
529
-
530
- assert(header.op == address);
531
- assert(header.operation == block_type.operation());
532
- return true;
533
- }
534
-
535
- fn read_block_resolve(grid: *Grid, read: *Grid.Read, block: BlockPtrConst) void {
536
- // Guard to make sure the cache cannot be updated by any read.callbacks() below.
537
- assert(!grid.read_resolving);
538
- grid.read_resolving = true;
539
- defer {
540
- assert(grid.read_resolving);
541
- grid.read_resolving = false;
542
- }
543
-
544
- // Remove the "root" read so that the address is no longer actively reading / locked.
545
- grid.read_queue.remove(read);
546
-
547
- // Resolve all reads queued to the address with the block.
548
- while (read.resolves.pop()) |pending| {
549
- const pending_read = @fieldParentPtr(Read, "pending", pending);
550
- pending_read.callback(pending_read, block);
551
- }
552
-
553
- // Then invoke the callback with the cache block (which should be valid for the duration
554
- // of the callback as any nested Grid calls cannot synchronously update the cache).
555
- read.callback(read, block);
556
- }
557
-
558
- fn block_offset(address: u64) u64 {
559
- assert(address > 0);
560
-
561
- return (address - 1) * block_size;
562
- }
563
-
564
- fn verify_cached_read(grid: *Grid, address: u64, cached_block: BlockPtrConst) void {
565
- if (Storage != @import("../testing/storage.zig").Storage)
566
- // Too complicated to do async verification
567
- return;
568
-
569
- const actual_block = grid.superblock.storage.grid_block(address);
570
- assert(std.mem.eql(u8, cached_block, actual_block));
571
- }
572
- };
573
- }