tigerbeetle-node 0.11.4 → 0.11.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/dist/.client.node.sha256 +1 -1
  2. package/package.json +1 -1
  3. package/src/node.zig +5 -5
  4. package/src/tigerbeetle/src/benchmark.zig +4 -4
  5. package/src/tigerbeetle/src/c/tb_client/context.zig +6 -6
  6. package/src/tigerbeetle/src/c/tb_client/echo_client.zig +2 -2
  7. package/src/tigerbeetle/src/c/tb_client/thread.zig +0 -1
  8. package/src/tigerbeetle/src/c/tb_client.zig +2 -2
  9. package/src/tigerbeetle/src/c/test.zig +8 -8
  10. package/src/tigerbeetle/src/cli.zig +9 -9
  11. package/src/tigerbeetle/src/demo.zig +4 -4
  12. package/src/tigerbeetle/src/io/darwin.zig +4 -4
  13. package/src/tigerbeetle/src/io/linux.zig +6 -6
  14. package/src/tigerbeetle/src/io/windows.zig +4 -4
  15. package/src/tigerbeetle/src/lsm/compaction.zig +8 -8
  16. package/src/tigerbeetle/src/lsm/forest.zig +2 -2
  17. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +9 -9
  18. package/src/tigerbeetle/src/lsm/grid.zig +5 -5
  19. package/src/tigerbeetle/src/lsm/groove.zig +4 -4
  20. package/src/tigerbeetle/src/lsm/level_iterator.zig +2 -2
  21. package/src/tigerbeetle/src/lsm/manifest.zig +19 -18
  22. package/src/tigerbeetle/src/lsm/manifest_level.zig +2 -2
  23. package/src/tigerbeetle/src/lsm/manifest_log.zig +8 -8
  24. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +15 -7
  25. package/src/tigerbeetle/src/lsm/posted_groove.zig +3 -3
  26. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +13 -13
  27. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +2 -2
  28. package/src/tigerbeetle/src/lsm/table.zig +19 -19
  29. package/src/tigerbeetle/src/lsm/table_immutable.zig +4 -4
  30. package/src/tigerbeetle/src/lsm/table_iterator.zig +17 -9
  31. package/src/tigerbeetle/src/lsm/table_mutable.zig +3 -3
  32. package/src/tigerbeetle/src/lsm/test.zig +6 -6
  33. package/src/tigerbeetle/src/lsm/tree.zig +48 -45
  34. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +14 -14
  35. package/src/tigerbeetle/src/main.zig +18 -11
  36. package/src/tigerbeetle/src/message_bus.zig +25 -25
  37. package/src/tigerbeetle/src/message_pool.zig +17 -17
  38. package/src/tigerbeetle/src/simulator.zig +6 -4
  39. package/src/tigerbeetle/src/storage.zig +12 -12
  40. package/src/tigerbeetle/src/test/accounting/auditor.zig +2 -2
  41. package/src/tigerbeetle/src/test/accounting/workload.zig +5 -5
  42. package/src/tigerbeetle/src/test/cluster.zig +8 -8
  43. package/src/tigerbeetle/src/test/conductor.zig +6 -5
  44. package/src/tigerbeetle/src/test/message_bus.zig +0 -2
  45. package/src/tigerbeetle/src/test/network.zig +5 -5
  46. package/src/tigerbeetle/src/test/state_checker.zig +2 -2
  47. package/src/tigerbeetle/src/test/storage.zig +54 -51
  48. package/src/tigerbeetle/src/test/storage_checker.zig +3 -3
  49. package/src/tigerbeetle/src/time.zig +0 -1
  50. package/src/tigerbeetle/src/tracer.zig +4 -4
  51. package/src/tigerbeetle/src/vsr/client.zig +5 -5
  52. package/src/tigerbeetle/src/vsr/clock.zig +9 -8
  53. package/src/tigerbeetle/src/vsr/journal.zig +47 -47
  54. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +11 -11
  55. package/src/tigerbeetle/src/vsr/replica.zig +53 -53
  56. package/src/tigerbeetle/src/vsr/replica_format.zig +8 -8
  57. package/src/tigerbeetle/src/vsr/superblock.zig +55 -55
  58. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +9 -9
  59. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +4 -4
  60. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +2 -2
  61. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +5 -5
  62. package/src/tigerbeetle/src/vsr.zig +20 -20
@@ -16,7 +16,7 @@ const mem = std.mem;
16
16
  const meta = std.meta;
17
17
  const os = std.os;
18
18
 
19
- const config = @import("../constants.zig");
19
+ const constants = @import("../constants.zig");
20
20
  const div_ceil = @import("../util.zig").div_ceil;
21
21
  const vsr = @import("../vsr.zig");
22
22
  const log = std.log.scoped(.superblock);
@@ -27,7 +27,7 @@ pub const SuperBlockManifest = @import("superblock_manifest.zig").Manifest;
27
27
  pub const SuperBlockFreeSet = @import("superblock_free_set.zig").FreeSet;
28
28
  pub const SuperBlockClientTable = @import("superblock_client_table.zig").ClientTable;
29
29
  pub const Quorums = @import("superblock_quorums.zig").QuorumsType(.{
30
- .superblock_copies = config.superblock_copies,
30
+ .superblock_copies = constants.superblock_copies,
31
31
  });
32
32
 
33
33
  pub const SuperBlockVersion: u16 = 0;
@@ -81,7 +81,7 @@ pub const SuperBlockSector = extern struct {
81
81
 
82
82
  /// A listing of persistent read snapshots that have been issued to clients.
83
83
  /// A snapshot.created timestamp of 0 indicates that the snapshot is null.
84
- snapshots: [config.lsm_snapshots_max]Snapshot,
84
+ snapshots: [constants.lsm_snapshots_max]Snapshot,
85
85
 
86
86
  /// The size of the manifest block references stored in the superblock trailer.
87
87
  /// The block addresses and checksums in this section of the trailer are laid out as follows:
@@ -170,7 +170,7 @@ pub const SuperBlockSector = extern struct {
170
170
  /// to ensure determinstic storage.
171
171
  pub fn op_compacted(state: VSRState, op: u64) bool {
172
172
  // If commit_min is 0, we have never checkpointed, so no compactions are checkpointed.
173
- return state.commit_min > 0 and op <= state.commit_min + config.lsm_batch_multiple;
173
+ return state.commit_min > 0 and op <= state.commit_min + constants.lsm_batch_multiple;
174
174
  }
175
175
  };
176
176
 
@@ -204,7 +204,7 @@ pub const SuperBlockSector = extern struct {
204
204
  };
205
205
 
206
206
  comptime {
207
- assert(@sizeOf(SuperBlockSector) == config.sector_size);
207
+ assert(@sizeOf(SuperBlockSector) == constants.sector_size);
208
208
  // Assert that there is no implicit padding in the struct.
209
209
  assert(@bitSizeOf(SuperBlockSector) == @sizeOf(SuperBlockSector) * 8);
210
210
  }
@@ -225,7 +225,7 @@ pub const SuperBlockSector = extern struct {
225
225
  }
226
226
 
227
227
  pub fn set_checksum(superblock: *SuperBlockSector) void {
228
- assert(superblock.copy < config.superblock_copies);
228
+ assert(superblock.copy < constants.superblock_copies);
229
229
  assert(superblock.version == SuperBlockVersion);
230
230
  assert(superblock.flags == 0);
231
231
 
@@ -271,19 +271,19 @@ pub const SuperBlockSector = extern struct {
271
271
  };
272
272
 
273
273
  comptime {
274
- switch (config.superblock_copies) {
274
+ switch (constants.superblock_copies) {
275
275
  4, 6, 8 => {},
276
276
  else => @compileError("superblock_copies must be either { 4, 6, 8 } for flexible quorums."),
277
277
  }
278
278
  }
279
279
 
280
280
  /// The size of the entire superblock storage zone.
281
- pub const superblock_zone_size = superblock_copy_size * config.superblock_copies;
281
+ pub const superblock_zone_size = superblock_copy_size * constants.superblock_copies;
282
282
 
283
283
  /// The size of an individual superblock including trailer.
284
284
  pub const superblock_copy_size = @sizeOf(SuperBlockSector) + superblock_trailer_size_max;
285
285
  comptime {
286
- assert(superblock_copy_size % config.sector_size == 0);
286
+ assert(superblock_copy_size % constants.sector_size == 0);
287
287
  }
288
288
 
289
289
  /// The maximum possible size of the superblock trailer, following the superblock sector.
@@ -293,14 +293,14 @@ pub const superblock_trailer_size_max = blk: {
293
293
  // 2. the maximum possible size of the EWAH-compressed bit set addressable by the free set.
294
294
 
295
295
  assert(superblock_trailer_manifest_size_max > 0);
296
- assert(superblock_trailer_manifest_size_max % config.sector_size == 0);
296
+ assert(superblock_trailer_manifest_size_max % constants.sector_size == 0);
297
297
  assert(superblock_trailer_manifest_size_max % SuperBlockManifest.BlockReferenceSize == 0);
298
298
 
299
299
  assert(superblock_trailer_free_set_size_max > 0);
300
- assert(superblock_trailer_free_set_size_max % config.sector_size == 0);
300
+ assert(superblock_trailer_free_set_size_max % constants.sector_size == 0);
301
301
 
302
302
  assert(superblock_trailer_client_table_size_max > 0);
303
- assert(superblock_trailer_client_table_size_max % config.sector_size == 0);
303
+ assert(superblock_trailer_client_table_size_max % constants.sector_size == 0);
304
304
 
305
305
  // We order the smaller manifest section ahead of the block free set for better access locality.
306
306
  // For example, it's cheaper to skip over 1 MiB when reading from disk than to skip over 32 MiB.
@@ -317,15 +317,15 @@ pub const superblock_trailer_manifest_size_max = blk: {
317
317
  // Use a multiple of sector * reference so that the size is exactly divisible without padding:
318
318
  // For example, this 2.5 MiB manifest trailer == 65536 references == 65536 * 511 or 34m tables.
319
319
  // TODO Size this relative to the expected number of tables & fragmentation.
320
- break :blk 16 * config.sector_size * SuperBlockManifest.BlockReferenceSize;
320
+ break :blk 16 * constants.sector_size * SuperBlockManifest.BlockReferenceSize;
321
321
  };
322
322
 
323
323
  pub const superblock_trailer_free_set_size_max = blk: {
324
- const encode_size_max = SuperBlockFreeSet.encode_size_max(config.block_count_max);
324
+ const encode_size_max = SuperBlockFreeSet.encode_size_max(constants.block_count_max);
325
325
  assert(encode_size_max > 0);
326
326
 
327
327
  // Round up to the nearest sector:
328
- break :blk div_ceil(encode_size_max, config.sector_size) * config.sector_size;
328
+ break :blk div_ceil(encode_size_max, constants.sector_size) * constants.sector_size;
329
329
  };
330
330
 
331
331
  pub const superblock_trailer_client_table_size_max = blk: {
@@ -333,11 +333,11 @@ pub const superblock_trailer_client_table_size_max = blk: {
333
333
  assert(encode_size_max > 0);
334
334
 
335
335
  // Round up to the nearest sector:
336
- break :blk div_ceil(encode_size_max, config.sector_size) * config.sector_size;
336
+ break :blk div_ceil(encode_size_max, constants.sector_size) * constants.sector_size;
337
337
  };
338
338
 
339
339
  pub const data_file_size_min = blk: {
340
- break :blk superblock_zone_size + config.journal_size_max;
340
+ break :blk superblock_zone_size + constants.journal_size_max;
341
341
  };
342
342
 
343
343
  /// This table shows the sequence number progression of the SuperBlock's sectors.
@@ -405,15 +405,15 @@ pub fn SuperBlockType(comptime Storage: type) type {
405
405
  storage_size: u64 = superblock_zone_size,
406
406
 
407
407
  /// The superblock that was recovered at startup after a crash or that was last written.
408
- working: *align(config.sector_size) SuperBlockSector,
408
+ working: *align(constants.sector_size) SuperBlockSector,
409
409
 
410
410
  /// The superblock that will replace the current working superblock once written.
411
411
  /// We cannot mutate any working state directly until it is safely on stable storage.
412
412
  /// Otherwise, we may accidentally externalize guarantees that are not yet durable.
413
- staging: *align(config.sector_size) SuperBlockSector,
413
+ staging: *align(constants.sector_size) SuperBlockSector,
414
414
 
415
415
  /// The copies that we read into at startup or when verifying the written superblock.
416
- reading: []align(config.sector_size) SuperBlockSector,
416
+ reading: []align(constants.sector_size) SuperBlockSector,
417
417
 
418
418
  /// It might seem that, at startup, we simply install the copy with the highest sequence.
419
419
  ///
@@ -435,9 +435,9 @@ pub fn SuperBlockType(comptime Storage: type) type {
435
435
  free_set: FreeSet,
436
436
  client_table: ClientTable,
437
437
 
438
- manifest_buffer: []align(config.sector_size) u8,
439
- free_set_buffer: []align(config.sector_size) u8,
440
- client_table_buffer: []align(config.sector_size) u8,
438
+ manifest_buffer: []align(constants.sector_size) u8,
439
+ free_set_buffer: []align(constants.sector_size) u8,
440
+ client_table_buffer: []align(constants.sector_size) u8,
441
441
 
442
442
  /// Whether the superblock has been opened. An open superblock may not be formatted.
443
443
  opened: bool = false,
@@ -457,15 +457,15 @@ pub fn SuperBlockType(comptime Storage: type) type {
457
457
  storage: *Storage,
458
458
  message_pool: *MessagePool,
459
459
  ) !SuperBlock {
460
- const a = try allocator.allocAdvanced(SuperBlockSector, config.sector_size, 1, .exact);
460
+ const a = try allocator.allocAdvanced(SuperBlockSector, constants.sector_size, 1, .exact);
461
461
  errdefer allocator.free(a);
462
462
 
463
- const b = try allocator.allocAdvanced(SuperBlockSector, config.sector_size, 1, .exact);
463
+ const b = try allocator.allocAdvanced(SuperBlockSector, constants.sector_size, 1, .exact);
464
464
  errdefer allocator.free(b);
465
465
 
466
466
  const reading = try allocator.allocAdvanced(
467
- [config.superblock_copies]SuperBlockSector,
468
- config.sector_size,
467
+ [constants.superblock_copies]SuperBlockSector,
468
+ constants.sector_size,
469
469
  1,
470
470
  .exact,
471
471
  );
@@ -481,7 +481,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
481
481
  );
482
482
  errdefer manifest.deinit(allocator);
483
483
 
484
- var free_set = try FreeSet.init(allocator, config.block_count_max);
484
+ var free_set = try FreeSet.init(allocator, constants.block_count_max);
485
485
  errdefer free_set.deinit(allocator);
486
486
 
487
487
  var client_table = try ClientTable.init(allocator, message_pool);
@@ -489,7 +489,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
489
489
 
490
490
  const manifest_buffer = try allocator.allocAdvanced(
491
491
  u8,
492
- config.sector_size,
492
+ constants.sector_size,
493
493
  superblock_trailer_manifest_size_max,
494
494
  .exact,
495
495
  );
@@ -497,7 +497,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
497
497
 
498
498
  const free_set_buffer = try allocator.allocAdvanced(
499
499
  u8,
500
- config.sector_size,
500
+ constants.sector_size,
501
501
  superblock_trailer_free_set_size_max,
502
502
  .exact,
503
503
  );
@@ -505,7 +505,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
505
505
 
506
506
  const client_table_buffer = try allocator.allocAdvanced(
507
507
  u8,
508
- config.sector_size,
508
+ constants.sector_size,
509
509
  superblock_trailer_client_table_size_max,
510
510
  .exact,
511
511
  );
@@ -555,9 +555,9 @@ pub fn SuperBlockType(comptime Storage: type) type {
555
555
  ) void {
556
556
  assert(!superblock.opened);
557
557
 
558
- assert(options.replica < config.replicas_max);
558
+ assert(options.replica < constants.replicas_max);
559
559
  assert(options.size_max >= data_file_size_min);
560
- assert(options.size_max % config.sector_size == 0);
560
+ assert(options.size_max % constants.sector_size == 0);
561
561
 
562
562
  // This working copy provides the parent checksum, and will not be written to disk.
563
563
  // We therefore use zero values to make this parent checksum as stable as possible.
@@ -757,7 +757,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
757
757
 
758
758
  fn write_staging_encode_free_set(superblock: *SuperBlock) void {
759
759
  const staging: *SuperBlockSector = superblock.staging;
760
- const encode_size_max = FreeSet.encode_size_max(config.block_count_max);
760
+ const encode_size_max = FreeSet.encode_size_max(constants.block_count_max);
761
761
  const target = superblock.free_set_buffer[0..encode_size_max];
762
762
 
763
763
  superblock.free_set.include_staging();
@@ -768,7 +768,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
768
768
  staging.size = data_file_size_min;
769
769
 
770
770
  if (superblock.free_set.highest_address_acquired()) |address| {
771
- staging.size += address * config.block_size;
771
+ staging.size += address * constants.block_size;
772
772
  }
773
773
  assert(staging.size >= data_file_size_min);
774
774
  assert(staging.size <= staging.size_max);
@@ -933,7 +933,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
933
933
  assert(superblock.staging.size >= data_file_size_min);
934
934
  assert(superblock.staging.size <= superblock.staging.size_max);
935
935
 
936
- assert(context.copy.? < config.superblock_copies);
936
+ assert(context.copy.? < constants.superblock_copies);
937
937
  superblock.staging.copy = context.copy.?;
938
938
 
939
939
  // Updating the copy number should not affect the checksum, which was previously set:
@@ -970,7 +970,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
970
970
 
971
971
  assert(superblock.queue_head == context);
972
972
 
973
- assert(copy < config.superblock_copies);
973
+ assert(copy < constants.superblock_copies);
974
974
  assert(copy == superblock.staging.copy);
975
975
 
976
976
  if (context.caller == .open) {
@@ -979,7 +979,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
979
979
  return;
980
980
  }
981
981
 
982
- if (copy + 1 == config.superblock_copies) {
982
+ if (copy + 1 == constants.superblock_copies) {
983
983
  context.copy = null;
984
984
  superblock.read_working(context, .verify);
985
985
  } else {
@@ -1015,7 +1015,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
1015
1015
 
1016
1016
  fn read_sector(superblock: *SuperBlock, context: *Context) void {
1017
1017
  assert(superblock.queue_head == context);
1018
- assert(context.copy.? < config.superblock_copies);
1018
+ assert(context.copy.? < constants.superblock_copies);
1019
1019
  assert(context.read_threshold != null);
1020
1020
 
1021
1021
  const buffer = mem.asBytes(&superblock.reading[context.copy.?]);
@@ -1046,8 +1046,8 @@ pub fn SuperBlockType(comptime Storage: type) type {
1046
1046
 
1047
1047
  assert(superblock.queue_head == context);
1048
1048
 
1049
- assert(context.copy.? < config.superblock_copies);
1050
- if (context.copy.? + 1 != config.superblock_copies) {
1049
+ assert(context.copy.? < constants.superblock_copies);
1050
+ if (context.copy.? + 1 != constants.superblock_copies) {
1051
1051
  context.copy = context.copy.? + 1;
1052
1052
  superblock.read_sector(context);
1053
1053
  return;
@@ -1134,7 +1134,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
1134
1134
  fn read_manifest(superblock: *SuperBlock, context: *Context) void {
1135
1135
  assert(context.caller == .open);
1136
1136
  assert(superblock.queue_head == context);
1137
- assert(context.copy.? < config.superblock_copies);
1137
+ assert(context.copy.? < constants.superblock_copies);
1138
1138
 
1139
1139
  const size = vsr.sector_ceil(superblock.working.manifest_size);
1140
1140
  assert(size <= superblock_trailer_manifest_size_max);
@@ -1189,7 +1189,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
1189
1189
  // We do not repair padding.
1190
1190
  context.copy = 0;
1191
1191
  superblock.read_free_set(context);
1192
- } else if (copy + 1 == config.superblock_copies) {
1192
+ } else if (copy + 1 == constants.superblock_copies) {
1193
1193
  @panic("superblock manifest lost");
1194
1194
  } else {
1195
1195
  log.debug("open: read_manifest: corrupt copy={}", .{copy});
@@ -1201,7 +1201,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
1201
1201
  fn read_free_set(superblock: *SuperBlock, context: *Context) void {
1202
1202
  assert(context.caller == .open);
1203
1203
  assert(superblock.queue_head == context);
1204
- assert(context.copy.? < config.superblock_copies);
1204
+ assert(context.copy.? < constants.superblock_copies);
1205
1205
 
1206
1206
  const size = vsr.sector_ceil(superblock.working.free_set_size);
1207
1207
  assert(size <= superblock_trailer_free_set_size_max);
@@ -1248,14 +1248,14 @@ pub fn SuperBlockType(comptime Storage: type) type {
1248
1248
 
1249
1249
  log.debug("open: read_free_set: acquired blocks: {}/{}", .{
1250
1250
  superblock.free_set.count_acquired(),
1251
- config.block_count_max,
1251
+ constants.block_count_max,
1252
1252
  });
1253
1253
 
1254
1254
  superblock.verify_manifest_blocks_are_acquired_in_free_set();
1255
1255
 
1256
1256
  // TODO Repair any impaired copies before we continue.
1257
1257
  superblock.read_client_table(context);
1258
- } else if (copy + 1 == config.superblock_copies) {
1258
+ } else if (copy + 1 == constants.superblock_copies) {
1259
1259
  @panic("superblock free set lost");
1260
1260
  } else {
1261
1261
  log.debug("open: read_free_set: corrupt copy={}", .{copy});
@@ -1274,7 +1274,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
1274
1274
  fn read_client_table(superblock: *SuperBlock, context: *Context) void {
1275
1275
  assert(context.caller == .open);
1276
1276
  assert(superblock.queue_head == context);
1277
- assert(context.copy.? < config.superblock_copies);
1277
+ assert(context.copy.? < constants.superblock_copies);
1278
1278
 
1279
1279
  const size = vsr.sector_ceil(superblock.working.client_table_size);
1280
1280
  assert(size <= superblock_trailer_client_table_size_max);
@@ -1321,12 +1321,12 @@ pub fn SuperBlockType(comptime Storage: type) type {
1321
1321
 
1322
1322
  log.debug("open: read_client_table: client requests: {}/{}", .{
1323
1323
  superblock.client_table.count(),
1324
- config.clients_max,
1324
+ constants.clients_max,
1325
1325
  });
1326
1326
 
1327
1327
  context.copy = null;
1328
1328
  superblock.repair(context);
1329
- } else if (copy + 1 == config.superblock_copies) {
1329
+ } else if (copy + 1 == constants.superblock_copies) {
1330
1330
  @panic("superblock client table lost");
1331
1331
  } else {
1332
1332
  log.debug("open: read_client_table: corrupt copy={}", .{copy});
@@ -1430,7 +1430,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
1430
1430
  // The rule is that the write quorum plus the read quorum must be exactly copies + 1.
1431
1431
 
1432
1432
  return switch (caller) {
1433
- .format, .checkpoint, .view_change => switch (config.superblock_copies) {
1433
+ .format, .checkpoint, .view_change => switch (constants.superblock_copies) {
1434
1434
  4 => 3,
1435
1435
  6 => 4,
1436
1436
  8 => 5,
@@ -1438,7 +1438,7 @@ pub fn SuperBlockType(comptime Storage: type) type {
1438
1438
  },
1439
1439
  // The open quorum must allow for at least two copy faults, because our view change
1440
1440
  // updates an existing set of copies in place, temporarily impairing one copy.
1441
- .open => switch (config.superblock_copies) {
1441
+ .open => switch (constants.superblock_copies) {
1442
1442
  4 => 2,
1443
1443
  6 => 3,
1444
1444
  8 => 4,
@@ -1451,22 +1451,22 @@ pub fn SuperBlockType(comptime Storage: type) type {
1451
1451
 
1452
1452
  pub const Layout = struct {
1453
1453
  pub fn offset_sector(copy: u8) u64 {
1454
- assert(copy < config.superblock_copies);
1454
+ assert(copy < constants.superblock_copies);
1455
1455
  return superblock_copy_size * @as(u64, copy);
1456
1456
  }
1457
1457
 
1458
1458
  pub fn offset_manifest(copy: u8) u64 {
1459
- assert(copy < config.superblock_copies);
1459
+ assert(copy < constants.superblock_copies);
1460
1460
  return offset_sector(copy) + @sizeOf(SuperBlockSector);
1461
1461
  }
1462
1462
 
1463
1463
  pub fn offset_free_set(copy: u8) u64 {
1464
- assert(copy < config.superblock_copies);
1464
+ assert(copy < constants.superblock_copies);
1465
1465
  return offset_manifest(copy) + superblock_trailer_manifest_size_max;
1466
1466
  }
1467
1467
 
1468
1468
  pub fn offset_client_table(copy: u8) u64 {
1469
- assert(copy < config.superblock_copies);
1469
+ assert(copy < constants.superblock_copies);
1470
1470
  return offset_free_set(copy) + superblock_trailer_free_set_size_max;
1471
1471
  }
1472
1472
  };
@@ -2,7 +2,7 @@ const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
  const mem = std.mem;
4
4
 
5
- const config = @import("../constants.zig");
5
+ const constants = @import("../constants.zig");
6
6
  const vsr = @import("../vsr.zig");
7
7
  const util = @import("../util.zig");
8
8
 
@@ -43,10 +43,10 @@ pub const ClientTable = struct {
43
43
  var entries: Entries = .{};
44
44
  errdefer entries.deinit(allocator);
45
45
 
46
- try entries.ensureTotalCapacity(allocator, @intCast(u32, config.clients_max));
47
- assert(entries.capacity() >= config.clients_max);
46
+ try entries.ensureTotalCapacity(allocator, @intCast(u32, constants.clients_max));
47
+ assert(entries.capacity() >= constants.clients_max);
48
48
 
49
- const sorted = try allocator.alloc(*const Entry, config.clients_max);
49
+ const sorted = try allocator.alloc(*const Entry, constants.clients_max);
50
50
  errdefer allocator.free(sorted);
51
51
 
52
52
  return ClientTable{
@@ -84,15 +84,15 @@ pub const ClientTable = struct {
84
84
  // First goes the vsr headers for the entries.
85
85
  // This takes advantage of the buffer alignment to avoid adding padding for the headers.
86
86
  size_max = std.mem.alignForward(size_max, @alignOf(vsr.Header));
87
- size_max += @sizeOf(vsr.Header) * config.clients_max;
87
+ size_max += @sizeOf(vsr.Header) * constants.clients_max;
88
88
 
89
89
  // Then follows the session values for the entries.
90
90
  size_max = std.mem.alignForward(size_max, @alignOf(u64));
91
- size_max += @sizeOf(u64) * config.clients_max;
91
+ size_max += @sizeOf(u64) * constants.clients_max;
92
92
 
93
93
  // Followed by the message bodies for the entries.
94
94
  size_max = std.mem.alignForward(size_max, @alignOf(u8));
95
- size_max += config.message_size_max * config.clients_max;
95
+ size_max += constants.message_size_max * constants.clients_max;
96
96
 
97
97
  // Finally the entry count at the end
98
98
  size_max = std.mem.alignForward(size_max, @alignOf(u32));
@@ -239,13 +239,13 @@ pub const ClientTable = struct {
239
239
  const client = entry.reply.header.client;
240
240
  client_table.entries.putAssumeCapacityNoClobber(client, entry.*);
241
241
 
242
- if (config.verify) assert(client_table.entries.contains(client));
242
+ if (constants.verify) assert(client_table.entries.contains(client));
243
243
  }
244
244
 
245
245
  pub fn remove(client_table: *ClientTable, client: u128) void {
246
246
  assert(client_table.entries.remove(client));
247
247
 
248
- if (config.verify) assert(!client_table.entries.contains(client));
248
+ if (constants.verify) assert(!client_table.entries.contains(client));
249
249
  }
250
250
 
251
251
  pub const Iterator = Entries.ValueIterator;
@@ -5,7 +5,7 @@ const mem = std.mem;
5
5
  const DynamicBitSetUnmanaged = std.bit_set.DynamicBitSetUnmanaged;
6
6
  const MaskInt = DynamicBitSetUnmanaged.MaskInt;
7
7
 
8
- const config = @import("../constants.zig");
8
+ const constants = @import("../constants.zig");
9
9
 
10
10
  const ewah = @import("../ewah.zig").ewah(usize);
11
11
  const div_ceil = @import("../util.zig").div_ceil;
@@ -74,7 +74,7 @@ pub const FreeSet = struct {
74
74
  //
75
75
  // e.g. 10TiB disk ÷ 64KiB/block ÷ 512*8 blocks/shard ÷ 8 shards/byte = 5120B index
76
76
  const shard_cache_lines = 8;
77
- pub const shard_size = shard_cache_lines * config.cache_line_size * @bitSizeOf(u8);
77
+ pub const shard_size = shard_cache_lines * constants.cache_line_size * @bitSizeOf(u8);
78
78
  comptime {
79
79
  assert(shard_size == 4096);
80
80
  assert(@bitSizeOf(MaskInt) == 64);
@@ -404,8 +404,8 @@ fn bit_set_masks(bit_set: DynamicBitSetUnmanaged) []usize {
404
404
  }
405
405
 
406
406
  test "FreeSet block shard count" {
407
- if (config.block_size != 64 * 1024) return;
408
- const blocks_in_tb = @divExact(1 << 40, config.block_size);
407
+ if (constants.block_size != 64 * 1024) return;
408
+ const blocks_in_tb = @divExact(1 << 40, constants.block_size);
409
409
  try test_block_shards_count(5120 * 8, 10 * blocks_in_tb);
410
410
  try test_block_shards_count(5120 * 8 - 1, 10 * blocks_in_tb - FreeSet.shard_size);
411
411
  try test_block_shards_count(1, FreeSet.shard_size); // Must be at least one index bit.
@@ -14,7 +14,7 @@ const std = @import("std");
14
14
  const assert = std.debug.assert;
15
15
  const log = std.log.scoped(.fuzz_vsr_superblock);
16
16
 
17
- const config = @import("../constants.zig");
17
+ const constants = @import("../constants.zig");
18
18
  const util = @import("../util.zig");
19
19
  const vsr = @import("../vsr.zig");
20
20
  const Storage = @import("../test/storage.zig").Storage;
@@ -254,7 +254,7 @@ const Environment = struct {
254
254
  .replica = 0,
255
255
  // Include extra space (beyond what Storage actually needs) because SuperBlock will
256
256
  // update the sector's size according to the highest FreeSet block allocated.
257
- .size_max = data_file_size_min + 1000 * config.block_size,
257
+ .size_max = data_file_size_min + 1000 * constants.block_size,
258
258
  });
259
259
 
260
260
  assert(env.sequence_states.items.len == 0);
@@ -5,7 +5,7 @@ const assert = std.debug.assert;
5
5
  const log = std.log.scoped(.superblock_manifest);
6
6
  const mem = std.mem;
7
7
 
8
- const config = @import("../constants.zig");
8
+ const constants = @import("../constants.zig");
9
9
  const util = @import("../util.zig");
10
10
 
11
11
  // TODO Compute & use the upper bound of manifest blocks (per tree) to size the trailer zone.
@@ -92,7 +92,7 @@ pub const Manifest = struct {
92
92
  }
93
93
 
94
94
  pub fn encode(manifest: *const Manifest, target: []align(@alignOf(u128)) u8) u64 {
95
- if (config.verify) manifest.verify();
95
+ if (constants.verify) manifest.verify();
96
96
 
97
97
  assert(target.len > 0);
98
98
  assert(target.len % @sizeOf(u128) == 0);
@@ -177,7 +177,7 @@ pub const Manifest = struct {
177
177
  mem.set(u128, manifest.checksums[manifest.count..], 0);
178
178
  mem.set(u64, manifest.addresses[manifest.count..], 0);
179
179
 
180
- if (config.verify) manifest.verify();
180
+ if (constants.verify) manifest.verify();
181
181
  }
182
182
 
183
183
  /// Addresses must be unique across all appends, or remove() must be called first.
@@ -208,7 +208,7 @@ pub const Manifest = struct {
208
208
  manifest.count_max,
209
209
  });
210
210
 
211
- if (config.verify) {
211
+ if (constants.verify) {
212
212
  const index = manifest.index_for_address(address).?;
213
213
  assert(index == manifest.count - 1);
214
214
  manifest.verify_index_tree_checksum_address(index, tree, checksum, address);
@@ -243,7 +243,7 @@ pub const Manifest = struct {
243
243
  manifest.count_max,
244
244
  });
245
245
 
246
- if (config.verify) {
246
+ if (constants.verify) {
247
247
  assert(manifest.index_for_address(address) == null);
248
248
  manifest.verify();
249
249
  }
@@ -4,7 +4,7 @@ const Allocator = std.mem.Allocator;
4
4
  const assert = std.debug.assert;
5
5
  const log = std.log.scoped(.vsr);
6
6
 
7
- const config = @import("constants.zig");
7
+ const constants = @import("constants.zig");
8
8
 
9
9
  /// The version of our Viewstamped Replication protocol in use, including customizations.
10
10
  /// For backwards compatibility through breaking changes (e.g. upgrading checksums/ciphers).
@@ -29,8 +29,8 @@ pub const Zone = enum {
29
29
  grid,
30
30
 
31
31
  const size_superblock = @import("vsr/superblock.zig").superblock_zone_size;
32
- const size_wal_headers = config.journal_size_headers;
33
- const size_wal_prepares = config.journal_size_prepares;
32
+ const size_wal_headers = constants.journal_size_headers;
33
+ const size_wal_prepares = constants.journal_size_prepares;
34
34
 
35
35
  comptime {
36
36
  for (.{
@@ -38,7 +38,7 @@ pub const Zone = enum {
38
38
  size_wal_headers,
39
39
  size_wal_prepares,
40
40
  }) |zone_size| {
41
- assert(zone_size % config.sector_size == 0);
41
+ assert(zone_size % constants.sector_size == 0);
42
42
  }
43
43
  }
44
44
 
@@ -630,7 +630,7 @@ pub const Header = extern struct {
630
630
  }
631
631
 
632
632
  pub fn reserved(cluster: u32, slot: u64) Header {
633
- assert(slot < config.journal_slot_count);
633
+ assert(slot < constants.journal_slot_count);
634
634
 
635
635
  var header = Header{
636
636
  .command = .reserved,
@@ -662,8 +662,8 @@ pub const Timeout = struct {
662
662
  id: u128,
663
663
  after: u64,
664
664
  attempts: u8 = 0,
665
- rtt: u64 = config.rtt_ticks,
666
- rtt_multiple: u8 = config.rtt_multiple,
665
+ rtt: u64 = constants.rtt_ticks,
666
+ rtt_multiple: u8 = constants.rtt_multiple,
667
667
  ticks: u64 = 0,
668
668
  ticking: bool = false,
669
669
 
@@ -714,8 +714,8 @@ pub const Timeout = struct {
714
714
 
715
715
  const after = (self.rtt * self.rtt_multiple) + exponential_backoff_with_jitter(
716
716
  random,
717
- config.backoff_min_ticks,
718
- config.backoff_max_ticks,
717
+ constants.backoff_min_ticks,
718
+ constants.backoff_max_ticks,
719
719
  self.attempts,
720
720
  );
721
721
 
@@ -727,8 +727,8 @@ pub const Timeout = struct {
727
727
  self.after,
728
728
  after,
729
729
  self.rtt,
730
- config.backoff_min_ticks,
731
- config.backoff_max_ticks,
730
+ constants.backoff_min_ticks,
731
+ constants.backoff_max_ticks,
732
732
  self.attempts,
733
733
  });
734
734
 
@@ -871,12 +871,12 @@ pub fn parse_address(raw: []const u8) !std.net.Address {
871
871
 
872
872
  // Let's try parsing as a port first:
873
873
  if (std.fmt.parseUnsigned(u16, raw, 10)) |port| {
874
- return std.net.Address.parseIp4(config.address, port) catch unreachable;
874
+ return std.net.Address.parseIp4(constants.address, port) catch unreachable;
875
875
  } else |err| switch (err) {
876
876
  error.Overflow => return error.PortOverflow,
877
877
  error.InvalidCharacter => {
878
878
  // Something was not a digit, let's try parsing as an IPv4 instead:
879
- return std.net.Address.parseIp4(raw, config.port) catch {
879
+ return std.net.Address.parseIp4(raw, constants.port) catch {
880
880
  return error.AddressInvalid;
881
881
  };
882
882
  },
@@ -912,8 +912,8 @@ test "parse_addresses" {
912
912
  .raw = "1.2.3.4:5,4321,2.3.4.5",
913
913
  .addresses = &[3]std.net.Address{
914
914
  std.net.Address.initIp4([_]u8{ 1, 2, 3, 4 }, 5),
915
- try std.net.Address.parseIp4(config.address, 4321),
916
- std.net.Address.initIp4([_]u8{ 2, 3, 4, 5 }, config.port),
915
+ try std.net.Address.parseIp4(constants.address, 4321),
916
+ std.net.Address.initIp4([_]u8{ 2, 3, 4, 5 }, constants.port),
917
917
  },
918
918
  },
919
919
  .{
@@ -921,7 +921,7 @@ test "parse_addresses" {
921
921
  .raw = "1.2.3.4:5,4321",
922
922
  .addresses = &[2]std.net.Address{
923
923
  std.net.Address.initIp4([_]u8{ 1, 2, 3, 4 }, 5),
924
- try std.net.Address.parseIp4(config.address, 4321),
924
+ try std.net.Address.parseIp4(constants.address, 4321),
925
925
  },
926
926
  },
927
927
  };
@@ -963,13 +963,13 @@ test "parse_addresses" {
963
963
  }
964
964
 
965
965
  pub fn sector_floor(offset: u64) u64 {
966
- const sectors = math.divFloor(u64, offset, config.sector_size) catch unreachable;
967
- return sectors * config.sector_size;
966
+ const sectors = math.divFloor(u64, offset, constants.sector_size) catch unreachable;
967
+ return sectors * constants.sector_size;
968
968
  }
969
969
 
970
970
  pub fn sector_ceil(offset: u64) u64 {
971
- const sectors = math.divCeil(u64, offset, config.sector_size) catch unreachable;
972
- return sectors * config.sector_size;
971
+ const sectors = math.divCeil(u64, offset, constants.sector_size) catch unreachable;
972
+ return sectors * constants.sector_size;
973
973
  }
974
974
 
975
975
  pub fn checksum(source: []const u8) u128 {