tigerbeetle-node 0.11.4 → 0.11.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/dist/.client.node.sha256 +1 -1
  2. package/package.json +1 -1
  3. package/src/node.zig +5 -5
  4. package/src/tigerbeetle/src/benchmark.zig +4 -4
  5. package/src/tigerbeetle/src/c/tb_client/context.zig +6 -6
  6. package/src/tigerbeetle/src/c/tb_client/echo_client.zig +2 -2
  7. package/src/tigerbeetle/src/c/tb_client/thread.zig +0 -1
  8. package/src/tigerbeetle/src/c/tb_client.zig +2 -2
  9. package/src/tigerbeetle/src/c/test.zig +8 -8
  10. package/src/tigerbeetle/src/cli.zig +9 -9
  11. package/src/tigerbeetle/src/demo.zig +4 -4
  12. package/src/tigerbeetle/src/io/darwin.zig +4 -4
  13. package/src/tigerbeetle/src/io/linux.zig +6 -6
  14. package/src/tigerbeetle/src/io/windows.zig +4 -4
  15. package/src/tigerbeetle/src/lsm/compaction.zig +8 -8
  16. package/src/tigerbeetle/src/lsm/forest.zig +2 -2
  17. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +9 -9
  18. package/src/tigerbeetle/src/lsm/grid.zig +5 -5
  19. package/src/tigerbeetle/src/lsm/groove.zig +4 -4
  20. package/src/tigerbeetle/src/lsm/level_iterator.zig +2 -2
  21. package/src/tigerbeetle/src/lsm/manifest.zig +19 -18
  22. package/src/tigerbeetle/src/lsm/manifest_level.zig +2 -2
  23. package/src/tigerbeetle/src/lsm/manifest_log.zig +8 -8
  24. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +15 -7
  25. package/src/tigerbeetle/src/lsm/posted_groove.zig +3 -3
  26. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +13 -13
  27. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +2 -2
  28. package/src/tigerbeetle/src/lsm/table.zig +19 -19
  29. package/src/tigerbeetle/src/lsm/table_immutable.zig +4 -4
  30. package/src/tigerbeetle/src/lsm/table_iterator.zig +17 -9
  31. package/src/tigerbeetle/src/lsm/table_mutable.zig +3 -3
  32. package/src/tigerbeetle/src/lsm/test.zig +6 -6
  33. package/src/tigerbeetle/src/lsm/tree.zig +48 -45
  34. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +14 -14
  35. package/src/tigerbeetle/src/main.zig +18 -11
  36. package/src/tigerbeetle/src/message_bus.zig +25 -25
  37. package/src/tigerbeetle/src/message_pool.zig +17 -17
  38. package/src/tigerbeetle/src/simulator.zig +6 -4
  39. package/src/tigerbeetle/src/storage.zig +12 -12
  40. package/src/tigerbeetle/src/test/accounting/auditor.zig +2 -2
  41. package/src/tigerbeetle/src/test/accounting/workload.zig +5 -5
  42. package/src/tigerbeetle/src/test/cluster.zig +8 -8
  43. package/src/tigerbeetle/src/test/conductor.zig +6 -5
  44. package/src/tigerbeetle/src/test/message_bus.zig +0 -2
  45. package/src/tigerbeetle/src/test/network.zig +5 -5
  46. package/src/tigerbeetle/src/test/state_checker.zig +2 -2
  47. package/src/tigerbeetle/src/test/storage.zig +54 -51
  48. package/src/tigerbeetle/src/test/storage_checker.zig +3 -3
  49. package/src/tigerbeetle/src/time.zig +0 -1
  50. package/src/tigerbeetle/src/tracer.zig +4 -4
  51. package/src/tigerbeetle/src/vsr/client.zig +5 -5
  52. package/src/tigerbeetle/src/vsr/clock.zig +9 -8
  53. package/src/tigerbeetle/src/vsr/journal.zig +47 -47
  54. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +11 -11
  55. package/src/tigerbeetle/src/vsr/replica.zig +53 -53
  56. package/src/tigerbeetle/src/vsr/replica_format.zig +8 -8
  57. package/src/tigerbeetle/src/vsr/superblock.zig +55 -55
  58. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +9 -9
  59. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +4 -4
  60. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +2 -2
  61. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +5 -5
  62. package/src/tigerbeetle/src/vsr.zig +20 -20
@@ -4,7 +4,7 @@ const Allocator = std.mem.Allocator;
4
4
  const assert = std.debug.assert;
5
5
  const math = std.math;
6
6
 
7
- const config = @import("../constants.zig");
7
+ const constants = @import("../constants.zig");
8
8
 
9
9
  const Message = @import("../message_pool.zig").MessagePool.Message;
10
10
  const util = @import("../util.zig");
@@ -23,7 +23,7 @@ const log = std.log.scoped(.journal);
23
23
  const Ring = enum {
24
24
  /// A circular buffer of (redundant) prepare message headers.
25
25
  headers,
26
- /// A circular buffer of prepare messages. Each slot is padded to `config.message_size_max`.
26
+ /// A circular buffer of prepare messages. Each slot is padded to `constants.message_size_max`.
27
27
  prepares,
28
28
 
29
29
  /// Returns the slot's offset relative to the start of the ring.
@@ -31,13 +31,13 @@ const Ring = enum {
31
31
  assert(slot.index < slot_count);
32
32
  switch (ring) {
33
33
  .headers => {
34
- comptime assert(config.sector_size % @sizeOf(Header) == 0);
34
+ comptime assert(constants.sector_size % @sizeOf(Header) == 0);
35
35
  const ring_offset = vsr.sector_floor(slot.index * @sizeOf(Header));
36
36
  assert(ring_offset < headers_size);
37
37
  return ring_offset;
38
38
  },
39
39
  .prepares => {
40
- const ring_offset = config.message_size_max * slot.index;
40
+ const ring_offset = constants.message_size_max * slot.index;
41
41
  assert(ring_offset < prepares_size);
42
42
  return ring_offset;
43
43
  },
@@ -45,7 +45,7 @@ const Ring = enum {
45
45
  }
46
46
  };
47
47
 
48
- const headers_per_sector = @divExact(config.sector_size, @sizeOf(Header));
48
+ const headers_per_sector = @divExact(constants.sector_size, @sizeOf(Header));
49
49
  comptime {
50
50
  assert(headers_per_sector > 0);
51
51
  }
@@ -59,7 +59,7 @@ comptime {
59
59
  /// - `journal.dirty`
60
60
  /// - `journal.faulty`
61
61
  ///
62
- /// A header's slot is `header.op % config.journal_slot_count`.
62
+ /// A header's slot is `header.op % constants.journal_slot_count`.
63
63
  const Slot = struct { index: u64 };
64
64
 
65
65
  /// An inclusive, non-empty range of slots.
@@ -94,9 +94,9 @@ const Status = enum {
94
94
  recovered,
95
95
  };
96
96
 
97
- const slot_count = config.journal_slot_count;
98
- const headers_size = config.journal_size_headers;
99
- const prepares_size = config.journal_size_prepares;
97
+ const slot_count = constants.journal_slot_count;
98
+ const headers_size = constants.journal_size_headers;
99
+ const prepares_size = constants.journal_size_prepares;
100
100
 
101
101
  pub const write_ahead_log_zone_size = headers_size + prepares_size;
102
102
 
@@ -107,14 +107,14 @@ comptime {
107
107
  assert(slot_count >= headers_per_sector);
108
108
  // The length of the prepare pipeline is the upper bound on how many ops can be
109
109
  // reordered during a view change. See `recover_prepares_callback()` for more detail.
110
- assert(slot_count > config.pipeline_max);
110
+ assert(slot_count > constants.pipeline_max);
111
111
 
112
112
  assert(headers_size > 0);
113
- assert(headers_size % config.sector_size == 0);
113
+ assert(headers_size % constants.sector_size == 0);
114
114
 
115
115
  assert(prepares_size > 0);
116
- assert(prepares_size % config.sector_size == 0);
117
- assert(prepares_size % config.message_size_max == 0);
116
+ assert(prepares_size % constants.sector_size == 0);
117
+ assert(prepares_size % constants.message_size_max == 0);
118
118
  }
119
119
 
120
120
  pub fn Journal(comptime Replica: type, comptime Storage: type) type {
@@ -151,7 +151,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
151
151
  /// This is reset to undefined and reused for each Storage.write_sectors() call.
152
152
  range: Range,
153
153
 
154
- const Sector = *align(config.sector_size) [config.sector_size]u8;
154
+ const Sector = *align(constants.sector_size) [constants.sector_size]u8;
155
155
 
156
156
  fn header_sector(write: *Self.Write, journal: *Self) Sector {
157
157
  assert(journal.writes.items.len == journal.headers_iops.len);
@@ -161,7 +161,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
161
161
  );
162
162
  // TODO The compiler should not need this align cast as the type of `headers_iops`
163
163
  // ensures that each buffer is properly aligned.
164
- return @alignCast(config.sector_size, &journal.headers_iops[i]);
164
+ return @alignCast(constants.sector_size, &journal.headers_iops[i]);
165
165
  }
166
166
  };
167
167
 
@@ -201,7 +201,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
201
201
  /// When the slot's header is `reserved`, the header's `op` is the slot index.
202
202
  ///
203
203
  /// During recovery, store the (unvalidated) headers of the prepare ring.
204
- headers: []align(config.sector_size) Header,
204
+ headers: []align(constants.sector_size) Header,
205
205
 
206
206
  /// Store headers whose prepares are on disk.
207
207
  /// Redundant headers are updated after the corresponding prepare(s) are written,
@@ -218,17 +218,17 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
218
218
  /// which requires remote repair.
219
219
  ///
220
220
  /// During recovery, store the redundant (unvalidated) headers.
221
- headers_redundant: []align(config.sector_size) Header,
221
+ headers_redundant: []align(constants.sector_size) Header,
222
222
 
223
223
  /// We copy-on-write to these buffers, as the in-memory headers may change while writing.
224
224
  /// The buffers belong to the IOP at the corresponding index in IOPS.
225
- headers_iops: *align(config.sector_size) [config.journal_iops_write_max][config.sector_size]u8,
225
+ headers_iops: *align(constants.sector_size) [constants.journal_iops_write_max][constants.sector_size]u8,
226
226
 
227
227
  /// Statically allocated read IO operation context data.
228
- reads: IOPS(Read, config.journal_iops_read_max) = .{},
228
+ reads: IOPS(Read, constants.journal_iops_read_max) = .{},
229
229
 
230
230
  /// Statically allocated write IO operation context data.
231
- writes: IOPS(Write, config.journal_iops_write_max) = .{},
231
+ writes: IOPS(Write, constants.journal_iops_write_max) = .{},
232
232
 
233
233
  /// Whether an entry is in memory only and needs to be written or is being written:
234
234
  /// We use this in the same sense as a dirty bit in the kernel page cache.
@@ -268,7 +268,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
268
268
 
269
269
  var headers = try allocator.allocAdvanced(
270
270
  Header,
271
- config.sector_size,
271
+ constants.sector_size,
272
272
  slot_count,
273
273
  .exact,
274
274
  );
@@ -277,7 +277,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
277
277
 
278
278
  var headers_redundant = try allocator.allocAdvanced(
279
279
  Header,
280
- config.sector_size,
280
+ constants.sector_size,
281
281
  slot_count,
282
282
  .exact,
283
283
  );
@@ -301,11 +301,11 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
301
301
  std.mem.set(bool, prepare_inhabited, false);
302
302
 
303
303
  const headers_iops = (try allocator.allocAdvanced(
304
- [config.sector_size]u8,
305
- config.sector_size,
306
- config.journal_iops_write_max,
304
+ [constants.sector_size]u8,
305
+ constants.sector_size,
306
+ constants.journal_iops_write_max,
307
307
  .exact,
308
- ))[0..config.journal_iops_write_max];
308
+ ))[0..constants.journal_iops_write_max];
309
309
  errdefer allocator.free(headers_iops);
310
310
 
311
311
  log.debug("{}: slot_count={} size={} headers_size={} prepares_size={}", .{
@@ -328,7 +328,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
328
328
  .headers_iops = headers_iops,
329
329
  };
330
330
 
331
- assert(@mod(@ptrToInt(&self.headers[0]), config.sector_size) == 0);
331
+ assert(@mod(@ptrToInt(&self.headers[0]), constants.sector_size) == 0);
332
332
  assert(self.dirty.bits.bit_length == slot_count);
333
333
  assert(self.faulty.bits.bit_length == slot_count);
334
334
  assert(self.dirty.count == slot_count);
@@ -768,7 +768,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
768
768
  const message = replica.message_bus.get_message();
769
769
  defer replica.message_bus.unref(message);
770
770
 
771
- var message_size: usize = config.message_size_max;
771
+ var message_size: usize = constants.message_size_max;
772
772
 
773
773
  // If the header is in-memory, we can skip the read from the disk.
774
774
  if (self.header_with_op_and_checksum(op, checksum)) |exact| {
@@ -776,14 +776,14 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
776
776
  message.header.* = exact.*;
777
777
  // Normally the message's padding would have been zeroed by the MessageBus,
778
778
  // but we are copying (only) a message header into a new buffer.
779
- std.mem.set(u8, message.buffer[@sizeOf(Header)..config.sector_size], 0);
779
+ std.mem.set(u8, message.buffer[@sizeOf(Header)..constants.sector_size], 0);
780
780
  callback(replica, message, destination_replica);
781
781
  return;
782
782
  } else {
783
783
  // As an optimization, we can read fewer than `message_size_max` bytes because
784
784
  // we know the message's exact size.
785
785
  message_size = vsr.sector_ceil(exact.size);
786
- assert(message_size <= config.message_size_max);
786
+ assert(message_size <= constants.message_size_max);
787
787
  }
788
788
  }
789
789
 
@@ -1032,7 +1032,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
1032
1032
 
1033
1033
  fn recover_headers_buffer(message: *Message, offset: u64) []align(@alignOf(Header)) u8 {
1034
1034
  const max = std.math.min(message.buffer.len, headers_size - offset);
1035
- assert(max % config.sector_size == 0);
1035
+ assert(max % constants.sector_size == 0);
1036
1036
  assert(max % @sizeOf(Header) == 0);
1037
1037
  return message.buffer[0..max];
1038
1038
  }
@@ -1075,7 +1075,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
1075
1075
  &read.completion,
1076
1076
  // We load the entire message to verify that it isn't torn or corrupt.
1077
1077
  // We don't know the message's size, so use the entire buffer.
1078
- message.buffer[0..config.message_size_max],
1078
+ message.buffer[0..constants.message_size_max],
1079
1079
  .wal_prepares,
1080
1080
  Ring.prepares.offset(slot),
1081
1081
  );
@@ -1652,7 +1652,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
1652
1652
  };
1653
1653
 
1654
1654
  const offset = Ring.headers.offset(slot_of_message);
1655
- assert(offset % config.sector_size == 0);
1655
+ assert(offset % constants.sector_size == 0);
1656
1656
 
1657
1657
  const buffer: []u8 = write.header_sector(self);
1658
1658
  const buffer_headers = std.mem.bytesAsSlice(Header, buffer);
@@ -1686,7 +1686,7 @@ pub fn Journal(comptime Replica: type, comptime Storage: type) type {
1686
1686
  self.replica,
1687
1687
  message.header.op,
1688
1688
  offset,
1689
- offset + config.sector_size,
1689
+ offset + constants.sector_size,
1690
1690
  });
1691
1691
 
1692
1692
  // Memory must not be owned by self.headers as these may be modified concurrently:
@@ -2216,11 +2216,11 @@ test "recovery_cases" {
2216
2216
  /// `offset_logical` is relative to the beginning of the `wal_headers` zone.
2217
2217
  /// Returns the number of bytes written to `target`.
2218
2218
  pub fn format_wal_headers(cluster: u32, offset_logical: u64, target: []u8) usize {
2219
- assert(offset_logical <= config.journal_size_headers);
2220
- assert(offset_logical % config.sector_size == 0);
2219
+ assert(offset_logical <= constants.journal_size_headers);
2220
+ assert(offset_logical % constants.sector_size == 0);
2221
2221
  assert(target.len > 0);
2222
2222
  assert(target.len % @sizeOf(Header) == 0);
2223
- assert(target.len % config.sector_size == 0);
2223
+ assert(target.len % constants.sector_size == 0);
2224
2224
 
2225
2225
  var headers = std.mem.bytesAsSlice(Header, target);
2226
2226
  const headers_past = @divExact(offset_logical, @sizeOf(Header));
@@ -2242,7 +2242,7 @@ pub fn format_wal_headers(cluster: u32, offset_logical: u64, target: []u8) usize
2242
2242
 
2243
2243
  test "format_wal_headers" {
2244
2244
  const fuzz = @import("./journal_format_fuzz.zig");
2245
- try fuzz.fuzz_format_wal_headers(config.sector_size);
2245
+ try fuzz.fuzz_format_wal_headers(constants.sector_size);
2246
2246
  }
2247
2247
 
2248
2248
  /// Format part of a new WAL's Zone.wal_prepares, writing to `target`.
@@ -2250,23 +2250,23 @@ test "format_wal_headers" {
2250
2250
  /// `offset_logical` is relative to the beginning of the `wal_prepares` zone.
2251
2251
  /// Returns the number of bytes written to `target`.
2252
2252
  pub fn format_wal_prepares(cluster: u32, offset_logical: u64, target: []u8) usize {
2253
- assert(offset_logical <= config.journal_size_prepares);
2254
- assert(offset_logical % config.sector_size == 0);
2253
+ assert(offset_logical <= constants.journal_size_prepares);
2254
+ assert(offset_logical % constants.sector_size == 0);
2255
2255
  assert(target.len > 0);
2256
2256
  assert(target.len % @sizeOf(Header) == 0);
2257
- assert(target.len % config.sector_size == 0);
2257
+ assert(target.len % constants.sector_size == 0);
2258
2258
 
2259
- const sectors_per_message = @divExact(config.message_size_max, config.sector_size);
2260
- const sector_max = @divExact(config.journal_size_prepares, config.sector_size);
2259
+ const sectors_per_message = @divExact(constants.message_size_max, constants.sector_size);
2260
+ const sector_max = @divExact(constants.journal_size_prepares, constants.sector_size);
2261
2261
 
2262
- var sectors = std.mem.bytesAsSlice([config.sector_size]u8, target);
2262
+ var sectors = std.mem.bytesAsSlice([constants.sector_size]u8, target);
2263
2263
  for (sectors) |*sector_data, i| {
2264
- const sector = @divExact(offset_logical, config.sector_size) + i;
2264
+ const sector = @divExact(offset_logical, constants.sector_size) + i;
2265
2265
  if (sector == sector_max) {
2266
2266
  if (i == 0) {
2267
- assert(offset_logical == config.journal_size_prepares);
2267
+ assert(offset_logical == constants.journal_size_prepares);
2268
2268
  }
2269
- return i * config.sector_size;
2269
+ return i * constants.sector_size;
2270
2270
  } else {
2271
2271
  const message_slot = @divFloor(sector, sectors_per_message);
2272
2272
  assert(message_slot < slot_count);
@@ -3,7 +3,7 @@ const std = @import("std");
3
3
  const assert = std.debug.assert;
4
4
  const log = std.log.scoped(.fuzz_journal_format);
5
5
 
6
- const config = @import("../constants.zig");
6
+ const constants = @import("../constants.zig");
7
7
  const util = @import("../util.zig");
8
8
  const vsr = @import("../vsr.zig");
9
9
  const journal = @import("./journal.zig");
@@ -19,9 +19,9 @@ pub fn main() !void {
19
19
  var prng = std.rand.DefaultPrng.init(args.seed);
20
20
 
21
21
  // +10 to occasionally test formatting into a buffer larger than the total data size.
22
- const write_sectors_max = @divExact(config.journal_size_headers, config.sector_size);
22
+ const write_sectors_max = @divExact(constants.journal_size_headers, constants.sector_size);
23
23
  const write_sectors = 1 + prng.random().uintLessThan(usize, write_sectors_max + 10);
24
- const write_size = write_sectors * config.sector_size;
24
+ const write_size = write_sectors * constants.sector_size;
25
25
 
26
26
  log.info("write_size={} write_sectors={}", .{ write_size, write_sectors });
27
27
 
@@ -32,13 +32,13 @@ pub fn main() !void {
32
32
  pub fn fuzz_format_wal_headers(write_size_max: usize) !void {
33
33
  assert(write_size_max > 0);
34
34
  assert(write_size_max % @sizeOf(vsr.Header) == 0);
35
- assert(write_size_max % config.sector_size == 0);
35
+ assert(write_size_max % constants.sector_size == 0);
36
36
 
37
37
  const write = try std.testing.allocator.alloc(u8, write_size_max);
38
38
  defer std.testing.allocator.free(write);
39
39
 
40
40
  var offset: usize = 0;
41
- while (offset < config.journal_size_headers) {
41
+ while (offset < constants.journal_size_headers) {
42
42
  const write_size = journal.format_wal_headers(cluster, offset, write);
43
43
  defer offset += write_size;
44
44
 
@@ -48,19 +48,19 @@ pub fn fuzz_format_wal_headers(write_size_max: usize) !void {
48
48
  try verify_slot_header(slot, header);
49
49
  }
50
50
  }
51
- assert(offset == config.journal_size_headers);
51
+ assert(offset == constants.journal_size_headers);
52
52
  }
53
53
 
54
54
  pub fn fuzz_format_wal_prepares(write_size_max: usize) !void {
55
55
  assert(write_size_max > 0);
56
56
  assert(write_size_max % @sizeOf(vsr.Header) == 0);
57
- assert(write_size_max % config.sector_size == 0);
57
+ assert(write_size_max % constants.sector_size == 0);
58
58
 
59
59
  const write = try std.testing.allocator.alloc(u8, write_size_max);
60
60
  defer std.testing.allocator.free(write);
61
61
 
62
62
  var offset: usize = 0;
63
- while (offset < config.journal_size_prepares) {
63
+ while (offset < constants.journal_size_prepares) {
64
64
  const write_size = journal.format_wal_prepares(cluster, offset, write);
65
65
  defer offset += write_size;
66
66
 
@@ -68,12 +68,12 @@ pub fn fuzz_format_wal_prepares(write_size_max: usize) !void {
68
68
  while (offset_checked < write_size) {
69
69
  const offset_header_next = std.mem.alignForward(
70
70
  offset + offset_checked,
71
- config.message_size_max,
71
+ constants.message_size_max,
72
72
  ) - offset;
73
73
 
74
74
  if (offset_checked == offset_header_next) {
75
75
  // Message header.
76
- const slot = @divExact(offset + offset_checked, config.message_size_max);
76
+ const slot = @divExact(offset + offset_checked, constants.message_size_max);
77
77
  const header_bytes = write[offset_checked..][0..@sizeOf(vsr.Header)];
78
78
  const header = std.mem.bytesToValue(vsr.Header, header_bytes);
79
79
 
@@ -92,7 +92,7 @@ pub fn fuzz_format_wal_prepares(write_size_max: usize) !void {
92
92
  }
93
93
  assert(offset_checked == write_size);
94
94
  }
95
- assert(offset == config.journal_size_prepares);
95
+ assert(offset == constants.journal_size_prepares);
96
96
  }
97
97
 
98
98
  fn verify_slot_header(slot: usize, header: vsr.Header) !void {