tigerbeetle-node 0.11.4 → 0.11.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/dist/.client.node.sha256 +1 -1
  2. package/package.json +1 -1
  3. package/src/node.zig +5 -5
  4. package/src/tigerbeetle/src/benchmark.zig +4 -4
  5. package/src/tigerbeetle/src/c/tb_client/context.zig +6 -6
  6. package/src/tigerbeetle/src/c/tb_client/echo_client.zig +2 -2
  7. package/src/tigerbeetle/src/c/tb_client/thread.zig +0 -1
  8. package/src/tigerbeetle/src/c/tb_client.zig +2 -2
  9. package/src/tigerbeetle/src/c/test.zig +8 -8
  10. package/src/tigerbeetle/src/cli.zig +9 -9
  11. package/src/tigerbeetle/src/demo.zig +4 -4
  12. package/src/tigerbeetle/src/io/darwin.zig +4 -4
  13. package/src/tigerbeetle/src/io/linux.zig +6 -6
  14. package/src/tigerbeetle/src/io/windows.zig +4 -4
  15. package/src/tigerbeetle/src/lsm/compaction.zig +8 -8
  16. package/src/tigerbeetle/src/lsm/forest.zig +2 -2
  17. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +9 -9
  18. package/src/tigerbeetle/src/lsm/grid.zig +5 -5
  19. package/src/tigerbeetle/src/lsm/groove.zig +4 -4
  20. package/src/tigerbeetle/src/lsm/level_iterator.zig +2 -2
  21. package/src/tigerbeetle/src/lsm/manifest.zig +19 -18
  22. package/src/tigerbeetle/src/lsm/manifest_level.zig +2 -2
  23. package/src/tigerbeetle/src/lsm/manifest_log.zig +8 -8
  24. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +15 -7
  25. package/src/tigerbeetle/src/lsm/posted_groove.zig +3 -3
  26. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +13 -13
  27. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +2 -2
  28. package/src/tigerbeetle/src/lsm/table.zig +19 -19
  29. package/src/tigerbeetle/src/lsm/table_immutable.zig +4 -4
  30. package/src/tigerbeetle/src/lsm/table_iterator.zig +17 -9
  31. package/src/tigerbeetle/src/lsm/table_mutable.zig +3 -3
  32. package/src/tigerbeetle/src/lsm/test.zig +6 -6
  33. package/src/tigerbeetle/src/lsm/tree.zig +48 -45
  34. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +14 -14
  35. package/src/tigerbeetle/src/main.zig +18 -11
  36. package/src/tigerbeetle/src/message_bus.zig +25 -25
  37. package/src/tigerbeetle/src/message_pool.zig +17 -17
  38. package/src/tigerbeetle/src/simulator.zig +6 -4
  39. package/src/tigerbeetle/src/storage.zig +12 -12
  40. package/src/tigerbeetle/src/test/accounting/auditor.zig +2 -2
  41. package/src/tigerbeetle/src/test/accounting/workload.zig +5 -5
  42. package/src/tigerbeetle/src/test/cluster.zig +8 -8
  43. package/src/tigerbeetle/src/test/conductor.zig +6 -5
  44. package/src/tigerbeetle/src/test/message_bus.zig +0 -2
  45. package/src/tigerbeetle/src/test/network.zig +5 -5
  46. package/src/tigerbeetle/src/test/state_checker.zig +2 -2
  47. package/src/tigerbeetle/src/test/storage.zig +54 -51
  48. package/src/tigerbeetle/src/test/storage_checker.zig +3 -3
  49. package/src/tigerbeetle/src/time.zig +0 -1
  50. package/src/tigerbeetle/src/tracer.zig +4 -4
  51. package/src/tigerbeetle/src/vsr/client.zig +5 -5
  52. package/src/tigerbeetle/src/vsr/clock.zig +9 -8
  53. package/src/tigerbeetle/src/vsr/journal.zig +47 -47
  54. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +11 -11
  55. package/src/tigerbeetle/src/vsr/replica.zig +53 -53
  56. package/src/tigerbeetle/src/vsr/replica_format.zig +8 -8
  57. package/src/tigerbeetle/src/vsr/superblock.zig +55 -55
  58. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +9 -9
  59. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +4 -4
  60. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +2 -2
  61. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +5 -5
  62. package/src/tigerbeetle/src/vsr.zig +20 -20
@@ -6,7 +6,7 @@ const os = std.os;
6
6
 
7
7
  const is_linux = builtin.target.os.tag == .linux;
8
8
 
9
- const config = @import("constants.zig");
9
+ const constants = @import("constants.zig");
10
10
  const log = std.log.scoped(.message_bus);
11
11
 
12
12
  const vsr = @import("vsr.zig");
@@ -23,14 +23,14 @@ pub const MessageBusClient = MessageBusType(.client);
23
23
 
24
24
  fn MessageBusType(comptime process_type: vsr.ProcessType) type {
25
25
  const SendQueue = RingBuffer(*Message, switch (process_type) {
26
- .replica => config.connection_send_queue_max_replica,
26
+ .replica => constants.connection_send_queue_max_replica,
27
27
  // A client has at most 1 in-flight request, plus pings.
28
- .client => config.connection_send_queue_max_client,
28
+ .client => constants.connection_send_queue_max_client,
29
29
  }, .array);
30
30
 
31
31
  const tcp_sndbuf = switch (process_type) {
32
- .replica => config.tcp_sndbuf_replica,
33
- .client => config.tcp_sndbuf_client,
32
+ .replica => constants.tcp_sndbuf_replica,
33
+ .client => constants.tcp_sndbuf_client,
34
34
  };
35
35
 
36
36
  const Process = union(vsr.ProcessType) {
@@ -99,10 +99,10 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
99
99
  options: Options,
100
100
  ) !Self {
101
101
  // There must be enough connections for all replicas and at least one client.
102
- assert(config.connections_max > options.configuration.len);
102
+ assert(constants.connections_max > options.configuration.len);
103
103
  assert(@as(vsr.ProcessType, process) == process_type);
104
104
 
105
- const connections = try allocator.alloc(Connection, config.connections_max);
105
+ const connections = try allocator.alloc(Connection, constants.connections_max);
106
106
  errdefer allocator.free(connections);
107
107
  mem.set(Connection, connections, .{});
108
108
 
@@ -140,7 +140,7 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
140
140
 
141
141
  // Pre-allocate enough memory to hold all possible connections in the client map.
142
142
  if (process_type == .replica) {
143
- try bus.process.clients.ensureTotalCapacity(allocator, config.connections_max);
143
+ try bus.process.clients.ensureTotalCapacity(allocator, constants.connections_max);
144
144
  }
145
145
 
146
146
  return bus;
@@ -174,17 +174,17 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
174
174
  }
175
175
  }.set;
176
176
 
177
- if (config.tcp_rcvbuf > 0) rcvbuf: {
177
+ if (constants.tcp_rcvbuf > 0) rcvbuf: {
178
178
  if (is_linux) {
179
179
  // Requires CAP_NET_ADMIN privilege (settle for SO_RCVBUF in case of an EPERM):
180
- if (set(fd, os.SOL.SOCKET, os.SO.RCVBUFFORCE, config.tcp_rcvbuf)) |_| {
180
+ if (set(fd, os.SOL.SOCKET, os.SO.RCVBUFFORCE, constants.tcp_rcvbuf)) |_| {
181
181
  break :rcvbuf;
182
182
  } else |err| switch (err) {
183
183
  error.PermissionDenied => {},
184
184
  else => |e| return e,
185
185
  }
186
186
  }
187
- try set(fd, os.SOL.SOCKET, os.SO.RCVBUF, config.tcp_rcvbuf);
187
+ try set(fd, os.SOL.SOCKET, os.SO.RCVBUF, constants.tcp_rcvbuf);
188
188
  }
189
189
 
190
190
  if (tcp_sndbuf > 0) sndbuf: {
@@ -200,23 +200,23 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
200
200
  try set(fd, os.SOL.SOCKET, os.SO.SNDBUF, tcp_sndbuf);
201
201
  }
202
202
 
203
- if (config.tcp_keepalive) {
203
+ if (constants.tcp_keepalive) {
204
204
  try set(fd, os.SOL.SOCKET, os.SO.KEEPALIVE, 1);
205
205
  if (is_linux) {
206
- try set(fd, os.IPPROTO.TCP, os.TCP.KEEPIDLE, config.tcp_keepidle);
207
- try set(fd, os.IPPROTO.TCP, os.TCP.KEEPINTVL, config.tcp_keepintvl);
208
- try set(fd, os.IPPROTO.TCP, os.TCP.KEEPCNT, config.tcp_keepcnt);
206
+ try set(fd, os.IPPROTO.TCP, os.TCP.KEEPIDLE, constants.tcp_keepidle);
207
+ try set(fd, os.IPPROTO.TCP, os.TCP.KEEPINTVL, constants.tcp_keepintvl);
208
+ try set(fd, os.IPPROTO.TCP, os.TCP.KEEPCNT, constants.tcp_keepcnt);
209
209
  }
210
210
  }
211
211
 
212
- if (config.tcp_user_timeout_ms > 0) {
212
+ if (constants.tcp_user_timeout_ms > 0) {
213
213
  if (is_linux) {
214
- try set(fd, os.IPPROTO.TCP, os.TCP.USER_TIMEOUT, config.tcp_user_timeout_ms);
214
+ try set(fd, os.IPPROTO.TCP, os.TCP.USER_TIMEOUT, constants.tcp_user_timeout_ms);
215
215
  }
216
216
  }
217
217
 
218
218
  // Set tcp no-delay
219
- if (config.tcp_nodelay) {
219
+ if (constants.tcp_nodelay) {
220
220
  if (is_linux) {
221
221
  try set(fd, os.IPPROTO.TCP, os.TCP.NODELAY, 1);
222
222
  }
@@ -224,7 +224,7 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
224
224
 
225
225
  try set(fd, os.SOL.SOCKET, os.SO.REUSEADDR, 1);
226
226
  try os.bind(fd, &address.any, address.getOsSockLen());
227
- try os.listen(fd, config.tcp_backlog);
227
+ try os.listen(fd, constants.tcp_backlog);
228
228
 
229
229
  return fd;
230
230
  }
@@ -465,8 +465,8 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
465
465
  var attempts = &bus.replicas_connect_attempts[replica];
466
466
  const ms = vsr.exponential_backoff_with_jitter(
467
467
  bus.prng.random(),
468
- config.connection_delay_min_ms,
469
- config.connection_delay_max_ms,
468
+ constants.connection_delay_min_ms,
469
+ constants.connection_delay_max_ms,
470
470
  attempts.*,
471
471
  );
472
472
  attempts.* += 1;
@@ -691,7 +691,7 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
691
691
  return null;
692
692
  }
693
693
 
694
- if (header.size < @sizeOf(Header) or header.size > config.message_size_max) {
694
+ if (header.size < @sizeOf(Header) or header.size > constants.message_size_max) {
695
695
  log.err("header with invalid size {d} received from peer {}", .{
696
696
  header.size,
697
697
  connection.peer,
@@ -771,7 +771,7 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
771
771
  const sector_ceil = vsr.sector_ceil(message.header.size);
772
772
  if (message.header.size != sector_ceil) {
773
773
  assert(message.header.size < sector_ceil);
774
- assert(message.buffer.len == config.message_size_max + config.sector_size);
774
+ assert(message.buffer.len == constants.message_size_max + constants.sector_size);
775
775
  mem.set(u8, message.buffer[message.header.size..sector_ceil], 0);
776
776
  }
777
777
  }
@@ -862,7 +862,7 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
862
862
  assert(!connection.recv_submitted);
863
863
  connection.recv_submitted = true;
864
864
 
865
- assert(connection.recv_progress < config.message_size_max);
865
+ assert(connection.recv_progress < constants.message_size_max);
866
866
 
867
867
  bus.io.recv(
868
868
  *Self,
@@ -870,7 +870,7 @@ fn MessageBusType(comptime process_type: vsr.ProcessType) type {
870
870
  on_recv,
871
871
  &connection.recv_completion,
872
872
  connection.fd,
873
- connection.recv_message.?.buffer[connection.recv_progress..config.message_size_max],
873
+ connection.recv_message.?.buffer[connection.recv_progress..constants.message_size_max],
874
874
  );
875
875
  }
876
876
 
@@ -3,36 +3,36 @@ const builtin = @import("builtin");
3
3
  const assert = std.debug.assert;
4
4
  const mem = std.mem;
5
5
 
6
- const config = @import("constants.zig");
6
+ const constants = @import("constants.zig");
7
7
 
8
8
  const vsr = @import("vsr.zig");
9
9
  const Header = vsr.Header;
10
10
 
11
11
  comptime {
12
12
  // message_size_max must be a multiple of sector_size for Direct I/O
13
- assert(config.message_size_max % config.sector_size == 0);
13
+ assert(constants.message_size_max % constants.sector_size == 0);
14
14
  }
15
15
 
16
16
  /// Add an extra sector_size bytes to allow a partially received subsequent
17
17
  /// message to be shifted to make space for 0 padding to vsr.sector_ceil.
18
- const message_size_max_padded = config.message_size_max + config.sector_size;
18
+ const message_size_max_padded = constants.message_size_max + constants.sector_size;
19
19
 
20
20
  /// The number of full-sized messages allocated at initialization by the replica message pool.
21
21
  /// There must be enough messages to ensure that the replica can always progress, to avoid deadlock.
22
22
  pub const messages_max_replica = messages_max: {
23
23
  var sum: usize = 0;
24
24
 
25
- sum += config.journal_iops_read_max + config.journal_iops_write_max; // Journal I/O
26
- sum += config.clients_max; // SuperBlock.client_table
25
+ sum += constants.journal_iops_read_max + constants.journal_iops_write_max; // Journal I/O
26
+ sum += constants.clients_max; // SuperBlock.client_table
27
27
  sum += 1; // Replica.loopback_queue
28
- sum += config.pipeline_max; // Replica.pipeline
28
+ sum += constants.pipeline_max; // Replica.pipeline
29
29
  sum += 1; // Replica.commit_prepare
30
30
  // Replica.do_view_change_from_all_replicas quorum:
31
31
  // Replica.recovery_response_quorum is only used for recovery and does not increase the limit.
32
32
  // All other quorums are bitsets.
33
- sum += config.replicas_max;
34
- sum += config.connections_max; // Connection.recv_message
35
- sum += config.connections_max * config.connection_send_queue_max_replica; // Connection.send_queue
33
+ sum += constants.replicas_max;
34
+ sum += constants.connections_max; // Connection.recv_message
35
+ sum += constants.connections_max * constants.connection_send_queue_max_replica; // Connection.send_queue
36
36
  sum += 1; // Handle bursts (e.g. Connection.parse_message)
37
37
  // Handle Replica.commit_op's reply:
38
38
  // (This is separate from the burst +1 because they may occur concurrently).
@@ -45,9 +45,9 @@ pub const messages_max_replica = messages_max: {
45
45
  pub const messages_max_client = messages_max: {
46
46
  var sum: usize = 0;
47
47
 
48
- sum += config.replicas_max; // Connection.recv_message
49
- sum += config.replicas_max * config.connection_send_queue_max_client; // Connection.send_queue
50
- sum += config.client_request_queue_max; // Client.request_queue
48
+ sum += constants.replicas_max; // Connection.recv_message
49
+ sum += constants.replicas_max * constants.connection_send_queue_max_client; // Connection.send_queue
50
+ sum += constants.client_request_queue_max; // Client.request_queue
51
51
  // Handle bursts (e.g. Connection.parse_message, or sending a ping when the send queue is full).
52
52
  sum += 1;
53
53
 
@@ -56,8 +56,8 @@ pub const messages_max_client = messages_max: {
56
56
 
57
57
  comptime {
58
58
  // These conditions are necessary (but not sufficient) to prevent deadlocks.
59
- assert(messages_max_replica > config.replicas_max);
60
- assert(messages_max_client > config.client_request_queue_max);
59
+ assert(messages_max_replica > constants.replicas_max);
60
+ assert(messages_max_client > constants.client_request_queue_max);
61
61
  }
62
62
 
63
63
  /// A pool of reference-counted Messages, memory for which is allocated only once during
@@ -66,7 +66,7 @@ pub const MessagePool = struct {
66
66
  pub const Message = struct {
67
67
  // TODO: replace this with a header() function to save memory
68
68
  header: *Header,
69
- buffer: []align(config.sector_size) u8,
69
+ buffer: []align(constants.sector_size) u8,
70
70
  references: u32 = 0,
71
71
  next: ?*Message,
72
72
 
@@ -103,7 +103,7 @@ pub const MessagePool = struct {
103
103
  while (i < messages_max) : (i += 1) {
104
104
  const buffer = try allocator.allocAdvanced(
105
105
  u8,
106
- config.sector_size,
106
+ constants.sector_size,
107
107
  message_size_max_padded,
108
108
  .exact,
109
109
  );
@@ -134,7 +134,7 @@ pub const MessagePool = struct {
134
134
  assert(free_count == pool.messages_max);
135
135
  }
136
136
 
137
- /// Get an unused message with a buffer of config.message_size_max.
137
+ /// Get an unused message with a buffer of constants.message_size_max.
138
138
  /// The returned message has exactly one reference.
139
139
  pub fn get_message(pool: *MessagePool) *Message {
140
140
  const message = pool.free_list.?;
@@ -4,7 +4,7 @@ const assert = std.debug.assert;
4
4
  const mem = std.mem;
5
5
 
6
6
  const tb = @import("tigerbeetle.zig");
7
- const config = @import("constants.zig");
7
+ const constants = @import("constants.zig");
8
8
  const vsr = @import("vsr.zig");
9
9
  const Header = vsr.Header;
10
10
 
@@ -31,6 +31,8 @@ const log_state_transitions_only = builtin.mode != .Debug;
31
31
 
32
32
  const log_simulator = std.log.scoped(.simulator);
33
33
 
34
+ pub const tigerbeetle_config = @import("config.zig").configs.test_min;
35
+
34
36
  /// You can fine tune your log levels even further (debug/info/warn/err):
35
37
  pub const log_level: std.log.Level = if (log_state_transitions_only) .info else .debug;
36
38
 
@@ -77,8 +79,8 @@ pub fn main() !void {
77
79
  var prng = std.rand.DefaultPrng.init(seed);
78
80
  const random = prng.random();
79
81
 
80
- const replica_count = 1 + random.uintLessThan(u8, config.replicas_max);
81
- const client_count = 1 + random.uintLessThan(u8, config.clients_max);
82
+ const replica_count = 1 + random.uintLessThan(u8, constants.replicas_max);
83
+ const client_count = 1 + random.uintLessThan(u8, constants.clients_max);
82
84
  const node_count = replica_count + client_count;
83
85
 
84
86
  const ticks_max = 50_000_000;
@@ -88,7 +90,7 @@ pub fn main() !void {
88
90
 
89
91
  // TODO: When block recovery and state transfer are implemented, remove this flag to allow
90
92
  // crashes to coexist with WAL wraps.
91
- const requests_committed_max: usize = config.journal_slot_count * 3;
93
+ const requests_committed_max: usize = constants.journal_slot_count * 3;
92
94
 
93
95
  const cluster_options: ClusterOptions = .{
94
96
  .cluster = cluster_id,
@@ -5,7 +5,7 @@ const assert = std.debug.assert;
5
5
  const log = std.log.scoped(.storage);
6
6
 
7
7
  const IO = @import("io.zig").IO;
8
- const config = @import("constants.zig");
8
+ const constants = @import("constants.zig");
9
9
  const fatal = @import("cli.zig").fatal;
10
10
  const vsr = @import("vsr.zig");
11
11
 
@@ -50,10 +50,10 @@ pub const Storage = struct {
50
50
  // that we need to subtract from `target_max` to get back onto the boundary.
51
51
  var max = read.target_max;
52
52
 
53
- const partial_sector_read_remainder = read.buffer.len % config.sector_size;
53
+ const partial_sector_read_remainder = read.buffer.len % constants.sector_size;
54
54
  if (partial_sector_read_remainder != 0) {
55
55
  // TODO log.debug() because this is interesting, and to ensure fuzz test coverage.
56
- const partial_sector_read = config.sector_size - partial_sector_read_remainder;
56
+ const partial_sector_read = constants.sector_size - partial_sector_read_remainder;
57
57
  max -= partial_sector_read;
58
58
  }
59
59
 
@@ -151,7 +151,7 @@ pub const Storage = struct {
151
151
  // physical sectors than the logical sector size, so we cannot expect `target.len`
152
152
  // to be an exact logical sector multiple.
153
153
  const target = read.target();
154
- if (target.len > config.sector_size) {
154
+ if (target.len > constants.sector_size) {
155
155
  // We tried to read more than a logical sector and failed.
156
156
  log.err("latent sector error: offset={}, subdividing read...", .{read.offset});
157
157
 
@@ -164,10 +164,10 @@ pub const Storage = struct {
164
164
  // These lines both implement ceiling division e.g. `((3 - 1) / 2) + 1 == 2` and
165
165
  // require that the numerator is always greater than zero:
166
166
  assert(target.len > 0);
167
- const target_sectors = @divFloor(target.len - 1, config.sector_size) + 1;
167
+ const target_sectors = @divFloor(target.len - 1, constants.sector_size) + 1;
168
168
  assert(target_sectors > 0);
169
- read.target_max = (@divFloor(target_sectors - 1, 2) + 1) * config.sector_size;
170
- assert(read.target_max >= config.sector_size);
169
+ read.target_max = (@divFloor(target_sectors - 1, 2) + 1) * constants.sector_size;
170
+ assert(read.target_max >= constants.sector_size);
171
171
 
172
172
  // Pass 0 for `bytes_read`, we want to retry the read with smaller `target_max`:
173
173
  self.start_read(read, 0);
@@ -227,9 +227,9 @@ pub const Storage = struct {
227
227
  // then increase `target_max` according to AIMD now that we have read successfully and
228
228
  // hopefully cleared the faulty zone.
229
229
  // We assume that `target_max` may exceed `read.buffer.len` at any time.
230
- if (read.target_max == config.sector_size) {
230
+ if (read.target_max == constants.sector_size) {
231
231
  // TODO Add log.debug because this is interesting.
232
- read.target_max += config.sector_size;
232
+ read.target_max += constants.sector_size;
233
233
  }
234
234
 
235
235
  self.start_read(read, bytes_read);
@@ -319,9 +319,9 @@ pub const Storage = struct {
319
319
  /// We check this only at the start of a read or write because the physical sector size may be
320
320
  /// less than our logical sector size so that partial IOs then leave us no longer aligned.
321
321
  fn assert_alignment(buffer: []const u8, offset: u64) void {
322
- assert(@ptrToInt(buffer.ptr) % config.sector_size == 0);
323
- assert(buffer.len % config.sector_size == 0);
324
- assert(offset % config.sector_size == 0);
322
+ assert(@ptrToInt(buffer.ptr) % constants.sector_size == 0);
323
+ assert(buffer.len % constants.sector_size == 0);
324
+ assert(offset % constants.sector_size == 0);
325
325
  }
326
326
 
327
327
  /// Ensures that the read or write is within bounds and intends to read or write some bytes.
@@ -6,7 +6,7 @@ const std = @import("std");
6
6
  const assert = std.debug.assert;
7
7
  const log = std.log.scoped(.test_auditor);
8
8
 
9
- const config = @import("../../constants.zig");
9
+ const constants = @import("../../constants.zig");
10
10
  const tb = @import("../../tigerbeetle.zig");
11
11
  const vsr = @import("../../vsr.zig");
12
12
  const RingBuffer = @import("../../ring_buffer.zig").RingBuffer;
@@ -16,7 +16,7 @@ const IdPermutation = @import("../id.zig").IdPermutation;
16
16
  const PriorityQueue = @import("../priority_queue.zig").PriorityQueue;
17
17
  const Storage = @import("../storage.zig").Storage;
18
18
  const StateMachine = @import("../../state_machine.zig").StateMachineType(Storage, .{
19
- .message_body_size_max = config.message_body_size_max,
19
+ .message_body_size_max = constants.message_body_size_max,
20
20
  });
21
21
 
22
22
  pub const CreateAccountResultSet = std.enums.EnumSet(tb.CreateAccountResult);
@@ -21,7 +21,7 @@ const std = @import("std");
21
21
  const assert = std.debug.assert;
22
22
  const log = std.log.scoped(.test_workload);
23
23
 
24
- const config = @import("../../constants.zig");
24
+ const constants = @import("../../constants.zig");
25
25
  const tb = @import("../../tigerbeetle.zig");
26
26
  const vsr = @import("../../vsr.zig");
27
27
  const accounting_auditor = @import("./auditor.zig");
@@ -266,7 +266,7 @@ pub fn WorkloadType(comptime AccountingStateMachine: type) type {
266
266
  var transfers_delivered_recently = TransferBatchQueue.init(allocator, {});
267
267
  errdefer transfers_delivered_recently.deinit();
268
268
  try transfers_delivered_recently.ensureTotalCapacity(
269
- options.auditor_options.client_count * config.client_request_queue_max,
269
+ options.auditor_options.client_count * constants.client_request_queue_max,
270
270
  );
271
271
 
272
272
  for (auditor.accounts) |*account, i| {
@@ -308,7 +308,7 @@ pub fn WorkloadType(comptime AccountingStateMachine: type) type {
308
308
  size: usize,
309
309
  } {
310
310
  assert(client_index < self.auditor.options.client_count);
311
- assert(body.len == config.message_size_max - @sizeOf(vsr.Header));
311
+ assert(body.len == constants.message_size_max - @sizeOf(vsr.Header));
312
312
 
313
313
  const action = action: {
314
314
  if (!self.accounts_sent and self.random.boolean()) {
@@ -353,8 +353,8 @@ pub fn WorkloadType(comptime AccountingStateMachine: type) type {
353
353
  reply_body: []align(@alignOf(vsr.Header)) const u8,
354
354
  ) void {
355
355
  assert(timestamp != 0);
356
- assert(request_body.len <= config.message_size_max - @sizeOf(vsr.Header));
357
- assert(reply_body.len <= config.message_size_max - @sizeOf(vsr.Header));
356
+ assert(request_body.len <= constants.message_size_max - @sizeOf(vsr.Header));
357
+ assert(reply_body.len <= constants.message_size_max - @sizeOf(vsr.Header));
358
358
 
359
359
  switch (operation.cast(AccountingStateMachine)) {
360
360
  .create_accounts => self.auditor.on_create_accounts(
@@ -2,7 +2,7 @@ const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
  const mem = std.mem;
4
4
 
5
- const config = @import("../constants.zig");
5
+ const constants = @import("../constants.zig");
6
6
 
7
7
  const message_pool = @import("../message_pool.zig");
8
8
  const MessagePool = message_pool.MessagePool;
@@ -12,7 +12,7 @@ const Network = @import("network.zig").Network;
12
12
  const NetworkOptions = @import("network.zig").NetworkOptions;
13
13
 
14
14
  pub const StateMachine = @import("../state_machine.zig").StateMachineType(Storage, .{
15
- .message_body_size_max = config.message_body_size_max,
15
+ .message_body_size_max = constants.message_body_size_max,
16
16
  });
17
17
  const MessageBus = @import("message_bus.zig").MessageBus;
18
18
  const Storage = @import("storage.zig").Storage;
@@ -77,7 +77,7 @@ pub const Cluster = struct {
77
77
  assert(options.replica_count > 0);
78
78
  assert(options.client_count > 0);
79
79
  assert(options.grid_size_max > 0);
80
- assert(options.grid_size_max % config.sector_size == 0);
80
+ assert(options.grid_size_max % constants.sector_size == 0);
81
81
  assert(options.health_options.crash_probability < 1.0);
82
82
  assert(options.health_options.crash_probability >= 0.0);
83
83
  assert(options.health_options.restart_probability < 1.0);
@@ -123,10 +123,10 @@ pub const Cluster = struct {
123
123
  .network = network,
124
124
  };
125
125
 
126
- var buffer: [config.replicas_max]Storage.FaultyAreas = undefined;
126
+ var buffer: [constants.replicas_max]Storage.FaultyAreas = undefined;
127
127
  const faulty_wal_areas = Storage.generate_faulty_wal_areas(
128
128
  prng,
129
- config.journal_size_max,
129
+ constants.journal_size_max,
130
130
  options.replica_count,
131
131
  &buffer,
132
132
  );
@@ -138,7 +138,7 @@ pub const Cluster = struct {
138
138
  storage_options.faulty_wal_areas = faulty_wal_areas[replica_index];
139
139
  storage.* = try Storage.init(
140
140
  allocator,
141
- superblock_zone_size + config.journal_size_max + options.grid_size_max,
141
+ superblock_zone_size + constants.journal_size_max + options.grid_size_max,
142
142
  storage_options,
143
143
  );
144
144
  }
@@ -161,7 +161,7 @@ pub const Cluster = struct {
161
161
 
162
162
  for (cluster.replicas) |_, replica_index| {
163
163
  try cluster.open_replica(@intCast(u8, replica_index), .{
164
- .resolution = config.tick_ms * std.time.ns_per_ms,
164
+ .resolution = constants.tick_ms * std.time.ns_per_ms,
165
165
  .offset_type = .linear,
166
166
  .offset_coefficient_A = 0,
167
167
  .offset_coefficient_B = 0,
@@ -238,7 +238,7 @@ pub const Cluster = struct {
238
238
 
239
239
  // This whole workaround doesn't handle log wrapping correctly.
240
240
  // If the log has wrapped, don't crash the replica.
241
- if (cluster_op_max >= config.journal_slot_count) {
241
+ if (cluster_op_max >= constants.journal_slot_count) {
242
242
  return false;
243
243
  }
244
244
 
@@ -9,7 +9,7 @@ const log = std.log.scoped(.test_conductor);
9
9
 
10
10
  const vsr = @import("../vsr.zig");
11
11
  const util = @import("../util.zig");
12
- const config = @import("../constants.zig");
12
+ const constants = @import("../constants.zig");
13
13
  const IdPermutation = @import("id.zig").IdPermutation;
14
14
  const MessagePool = @import("../message_pool.zig").MessagePool;
15
15
  const Message = MessagePool.Message;
@@ -47,7 +47,8 @@ pub fn ConductorType(
47
47
  ///
48
48
  /// `Conduction.stalled_queue` hold replies (and corresponding requests) that are
49
49
  /// waiting to be processed.
50
- pub const stalled_queue_capacity = config.clients_max * config.client_request_queue_max * 2;
50
+ pub const stalled_queue_capacity =
51
+ constants.clients_max * constants.client_request_queue_max * 2;
51
52
 
52
53
  random: std.rand.Random,
53
54
  workload: *Workload,
@@ -225,7 +226,7 @@ pub fn ConductorType(
225
226
  var client = &self.clients[client_index];
226
227
 
227
228
  // Check for space in the client's own request queue.
228
- if (client.request_queue.count + 1 > config.client_request_queue_max) return;
229
+ if (client.request_queue.count + 1 > constants.client_request_queue_max) return;
229
230
 
230
231
  var request_message = client.get_message();
231
232
  defer client.unref(request_message);
@@ -234,10 +235,10 @@ pub fn ConductorType(
234
235
  client_index,
235
236
  @alignCast(
236
237
  @alignOf(vsr.Header),
237
- request_message.buffer[@sizeOf(vsr.Header)..config.message_size_max],
238
+ request_message.buffer[@sizeOf(vsr.Header)..constants.message_size_max],
238
239
  ),
239
240
  );
240
- assert(request_metadata.size <= config.message_size_max - @sizeOf(vsr.Header));
241
+ assert(request_metadata.size <= constants.message_size_max - @sizeOf(vsr.Header));
241
242
 
242
243
  client.request(
243
244
  0,
@@ -1,8 +1,6 @@
1
1
  const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
 
4
- const config = @import("../constants.zig");
5
-
6
4
  const MessagePool = @import("../message_pool.zig").MessagePool;
7
5
  const Message = MessagePool.Message;
8
6
  const Header = @import("../vsr.zig").Header;
@@ -3,7 +3,7 @@ const math = std.math;
3
3
  const mem = std.mem;
4
4
  const assert = std.debug.assert;
5
5
 
6
- const config = @import("../constants.zig");
6
+ const constants = @import("../constants.zig");
7
7
  const vsr = @import("../vsr.zig");
8
8
  const util = @import("../util.zig");
9
9
 
@@ -115,7 +115,7 @@ pub const Network = struct {
115
115
  const raw_process = switch (process) {
116
116
  .replica => |replica| replica,
117
117
  .client => |client| blk: {
118
- assert(client >= config.replicas_max);
118
+ assert(client >= constants.replicas_max);
119
119
  break :blk client;
120
120
  },
121
121
  };
@@ -194,7 +194,7 @@ pub const Network = struct {
194
194
  const sector_ceil = vsr.sector_ceil(target_message.header.size);
195
195
  if (target_message.header.size != sector_ceil) {
196
196
  assert(target_message.header.size < sector_ceil);
197
- assert(target_message.buffer.len == config.message_size_max + config.sector_size);
197
+ assert(target_message.buffer.len == constants.message_size_max + constants.sector_size);
198
198
  mem.set(u8, target_message.buffer[target_message.header.size..sector_ceil], 0);
199
199
  }
200
200
  }
@@ -204,9 +204,9 @@ pub const Network = struct {
204
204
 
205
205
  fn raw_process_to_process(raw: u128) Process {
206
206
  switch (raw) {
207
- 0...(config.replicas_max - 1) => return .{ .replica = @intCast(u8, raw) },
207
+ 0...(constants.replicas_max - 1) => return .{ .replica = @intCast(u8, raw) },
208
208
  else => {
209
- assert(raw >= config.replicas_max);
209
+ assert(raw >= constants.replicas_max);
210
210
  return .{ .client = raw };
211
211
  },
212
212
  }
@@ -2,7 +2,7 @@ const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
  const mem = std.mem;
4
4
 
5
- const config = @import("../constants.zig");
5
+ const constants = @import("../constants.zig");
6
6
  const vsr = @import("../vsr.zig");
7
7
 
8
8
  const Cluster = @import("cluster.zig").Cluster;
@@ -23,7 +23,7 @@ const log = std.log.scoped(.state_checker);
23
23
 
24
24
  pub const StateChecker = struct {
25
25
  /// Indexed by replica index.
26
- replica_states: [config.replicas_max]u128 = [_]u128{0} ** config.replicas_max,
26
+ replica_states: [constants.replicas_max]u128 = [_]u128{0} ** constants.replicas_max,
27
27
 
28
28
  /// Keyed by committed `message.header.checksum`.
29
29
  ///