tigerbeetle-node 0.11.4 → 0.11.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/dist/.client.node.sha256 +1 -1
  2. package/package.json +1 -1
  3. package/src/node.zig +5 -5
  4. package/src/tigerbeetle/src/benchmark.zig +4 -4
  5. package/src/tigerbeetle/src/c/tb_client/context.zig +6 -6
  6. package/src/tigerbeetle/src/c/tb_client/echo_client.zig +2 -2
  7. package/src/tigerbeetle/src/c/tb_client/thread.zig +0 -1
  8. package/src/tigerbeetle/src/c/tb_client.zig +2 -2
  9. package/src/tigerbeetle/src/c/test.zig +8 -8
  10. package/src/tigerbeetle/src/cli.zig +9 -9
  11. package/src/tigerbeetle/src/demo.zig +4 -4
  12. package/src/tigerbeetle/src/io/darwin.zig +4 -4
  13. package/src/tigerbeetle/src/io/linux.zig +6 -6
  14. package/src/tigerbeetle/src/io/windows.zig +4 -4
  15. package/src/tigerbeetle/src/lsm/compaction.zig +8 -8
  16. package/src/tigerbeetle/src/lsm/forest.zig +2 -2
  17. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +9 -9
  18. package/src/tigerbeetle/src/lsm/grid.zig +5 -5
  19. package/src/tigerbeetle/src/lsm/groove.zig +4 -4
  20. package/src/tigerbeetle/src/lsm/level_iterator.zig +2 -2
  21. package/src/tigerbeetle/src/lsm/manifest.zig +19 -18
  22. package/src/tigerbeetle/src/lsm/manifest_level.zig +2 -2
  23. package/src/tigerbeetle/src/lsm/manifest_log.zig +8 -8
  24. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +15 -7
  25. package/src/tigerbeetle/src/lsm/posted_groove.zig +3 -3
  26. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +13 -13
  27. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +2 -2
  28. package/src/tigerbeetle/src/lsm/table.zig +19 -19
  29. package/src/tigerbeetle/src/lsm/table_immutable.zig +4 -4
  30. package/src/tigerbeetle/src/lsm/table_iterator.zig +17 -9
  31. package/src/tigerbeetle/src/lsm/table_mutable.zig +3 -3
  32. package/src/tigerbeetle/src/lsm/test.zig +6 -6
  33. package/src/tigerbeetle/src/lsm/tree.zig +48 -45
  34. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +14 -14
  35. package/src/tigerbeetle/src/main.zig +18 -11
  36. package/src/tigerbeetle/src/message_bus.zig +25 -25
  37. package/src/tigerbeetle/src/message_pool.zig +17 -17
  38. package/src/tigerbeetle/src/simulator.zig +6 -4
  39. package/src/tigerbeetle/src/storage.zig +12 -12
  40. package/src/tigerbeetle/src/test/accounting/auditor.zig +2 -2
  41. package/src/tigerbeetle/src/test/accounting/workload.zig +5 -5
  42. package/src/tigerbeetle/src/test/cluster.zig +8 -8
  43. package/src/tigerbeetle/src/test/conductor.zig +6 -5
  44. package/src/tigerbeetle/src/test/message_bus.zig +0 -2
  45. package/src/tigerbeetle/src/test/network.zig +5 -5
  46. package/src/tigerbeetle/src/test/state_checker.zig +2 -2
  47. package/src/tigerbeetle/src/test/storage.zig +54 -51
  48. package/src/tigerbeetle/src/test/storage_checker.zig +3 -3
  49. package/src/tigerbeetle/src/time.zig +0 -1
  50. package/src/tigerbeetle/src/tracer.zig +4 -4
  51. package/src/tigerbeetle/src/vsr/client.zig +5 -5
  52. package/src/tigerbeetle/src/vsr/clock.zig +9 -8
  53. package/src/tigerbeetle/src/vsr/journal.zig +47 -47
  54. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +11 -11
  55. package/src/tigerbeetle/src/vsr/replica.zig +53 -53
  56. package/src/tigerbeetle/src/vsr/replica_format.zig +8 -8
  57. package/src/tigerbeetle/src/vsr/superblock.zig +55 -55
  58. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +9 -9
  59. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +4 -4
  60. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +2 -2
  61. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +5 -5
  62. package/src/tigerbeetle/src/vsr.zig +20 -20
@@ -1 +1 @@
1
- 17ba799be39b3aa1c55b37b39f013d58b0c9d8a5e3356aa750951d4c3cc7e248 dist/client.node
1
+ 004fa5f957fa7933ae697abff26d8e6879ebef4257a380594a24c92d285b07d8 dist/client.node
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "tigerbeetle-node",
3
- "version": "0.11.4",
3
+ "version": "0.11.5",
4
4
  "description": "TigerBeetle Node.js client",
5
5
  "main": "dist/index.js",
6
6
  "typings": "dist/index.d.ts",
package/src/node.zig CHANGED
@@ -14,13 +14,13 @@ const CreateTransfersResult = tb.CreateTransfersResult;
14
14
 
15
15
  const Storage = @import("tigerbeetle/src/storage.zig").Storage;
16
16
  const StateMachine = @import("tigerbeetle/src/state_machine.zig").StateMachineType(Storage, .{
17
- .message_body_size_max = config.message_body_size_max,
17
+ .message_body_size_max = constants.message_body_size_max,
18
18
  });
19
19
  const Operation = StateMachine.Operation;
20
20
  const MessageBus = @import("tigerbeetle/src/message_bus.zig").MessageBusClient;
21
21
  const MessagePool = @import("tigerbeetle/src/message_pool.zig").MessagePool;
22
22
  const IO = @import("tigerbeetle/src/io.zig").IO;
23
- const config = @import("tigerbeetle/src/constants.zig");
23
+ const constants = @import("tigerbeetle/src/constants.zig");
24
24
 
25
25
  const vsr = @import("tigerbeetle/src/vsr.zig");
26
26
  const Header = vsr.Header;
@@ -46,7 +46,7 @@ export fn napi_register_module_v1(env: c.napi_env, exports: c.napi_value) c.napi
46
46
  env,
47
47
  exports,
48
48
  "tick_ms",
49
- config.tick_ms,
49
+ constants.tick_ms,
50
50
  "failed to add tick_ms to exports",
51
51
  ) catch return null;
52
52
 
@@ -145,7 +145,7 @@ const Context = struct {
145
145
  context.message_pool = try MessagePool.init(allocator, .client);
146
146
  errdefer context.message_pool.deinit(allocator);
147
147
 
148
- context.addresses = try vsr.parse_addresses(allocator, addresses_raw, config.replicas_max);
148
+ context.addresses = try vsr.parse_addresses(allocator, addresses_raw, constants.replicas_max);
149
149
  errdefer allocator.free(context.addresses);
150
150
  assert(context.addresses.len > 0);
151
151
 
@@ -242,7 +242,7 @@ fn decode_events_from_array(
242
242
  if (array_length < 1) return translate.throw(env, "Batch must contain at least one event.");
243
243
 
244
244
  const body_length = @sizeOf(T) * array_length;
245
- if (@sizeOf(Header) + body_length > config.message_size_max) {
245
+ if (@sizeOf(Header) + body_length > constants.message_size_max) {
246
246
  return translate.throw(env, "Batch is larger than the maximum message size.");
247
247
  }
248
248
 
@@ -1,7 +1,7 @@
1
1
  const std = @import("std");
2
2
  const builtin = @import("builtin");
3
3
  const assert = std.debug.assert;
4
- const config = @import("constants.zig");
4
+ const constants = @import("constants.zig");
5
5
 
6
6
  const log = std.log;
7
7
  pub const log_level: std.log.Level = .err;
@@ -14,7 +14,7 @@ const Storage = @import("storage.zig").Storage;
14
14
  const MessagePool = @import("message_pool.zig").MessagePool;
15
15
  const MessageBus = @import("message_bus.zig").MessageBusClient;
16
16
  const StateMachine = @import("state_machine.zig").StateMachineType(Storage, .{
17
- .message_body_size_max = config.message_body_size_max,
17
+ .message_body_size_max = constants.message_body_size_max,
18
18
  });
19
19
  const RingBuffer = @import("ring_buffer.zig").RingBuffer;
20
20
 
@@ -26,7 +26,7 @@ const tb = @import("tigerbeetle.zig");
26
26
  const batches_count = 100;
27
27
 
28
28
  const transfers_per_batch: u32 = @divExact(
29
- config.message_size_max - @sizeOf(vsr.Header),
29
+ constants.message_size_max - @sizeOf(vsr.Header),
30
30
  @sizeOf(tb.Transfer),
31
31
  );
32
32
  comptime {
@@ -78,7 +78,7 @@ pub fn main() !void {
78
78
 
79
79
  const client_id = std.crypto.random.int(u128);
80
80
  const cluster_id: u32 = 0;
81
- var address = [_]std.net.Address{try std.net.Address.parseIp4("127.0.0.1", config.port)};
81
+ var address = [_]std.net.Address{try std.net.Address.parseIp4("127.0.0.1", constants.port)};
82
82
 
83
83
  var io = try IO.init(32, 0);
84
84
  defer io.deinit();
@@ -2,7 +2,7 @@ const std = @import("std");
2
2
  const os = std.os;
3
3
  const assert = std.debug.assert;
4
4
 
5
- const config = @import("../../constants.zig");
5
+ const constants = @import("../../constants.zig");
6
6
  const log = std.log.scoped(.tb_client_context);
7
7
 
8
8
  const util = @import("../../util.zig");
@@ -122,7 +122,7 @@ pub fn ContextType(
122
122
  context.addresses = vsr.parse_addresses(
123
123
  context.allocator,
124
124
  addresses,
125
- config.replicas_max,
125
+ constants.replicas_max,
126
126
  ) catch |err| return switch (err) {
127
127
  error.AddressLimitExceeded => error.AddressLimitExceeded,
128
128
  else => error.AddressInvalid,
@@ -165,7 +165,7 @@ pub fn ContextType(
165
165
  );
166
166
  errdefer context.client.deinit(context.allocator);
167
167
 
168
- context.messages_available = config.client_request_queue_max;
168
+ context.messages_available = constants.client_request_queue_max;
169
169
  context.on_completion_ctx = on_completion_ctx;
170
170
  context.on_completion_fn = on_completion_fn;
171
171
  context.implementation = .{
@@ -199,7 +199,7 @@ pub fn ContextType(
199
199
  pub fn run(self: *Context) void {
200
200
  while (!self.thread.signal.is_shutdown()) {
201
201
  self.tick();
202
- self.io.run_for_ns(config.tick_ms * std.time.ns_per_ms) catch |err| {
202
+ self.io.run_for_ns(constants.tick_ms * std.time.ns_per_ms) catch |err| {
203
203
  log.err("{}: IO.run() failed: {s}", .{
204
204
  self.client_id,
205
205
  @errorName(err),
@@ -226,7 +226,7 @@ pub fn ContextType(
226
226
  }
227
227
 
228
228
  // Make sure the packet.data wouldn't overflow a message:
229
- const writable = message.buffer[@sizeOf(Header)..][0..config.message_body_size_max];
229
+ const writable = message.buffer[@sizeOf(Header)..][0..constants.message_body_size_max];
230
230
  if (readable.len > writable.len) {
231
231
  return self.on_complete(packet, error.TooMuchData);
232
232
  }
@@ -267,7 +267,7 @@ pub fn ContextType(
267
267
  result: PacketError![]const u8,
268
268
  ) void {
269
269
  self.messages_available += 1;
270
- assert(self.messages_available <= config.client_request_queue_max);
270
+ assert(self.messages_available <= constants.client_request_queue_max);
271
271
 
272
272
  // Signal to resume sending requests that was waiting for available messages.
273
273
  if (self.messages_available == 1) self.thread.signal.notify();
@@ -2,7 +2,7 @@ const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
  const mem = std.mem;
4
4
 
5
- const config = @import("../../constants.zig");
5
+ const constants = @import("../../constants.zig");
6
6
  const vsr = @import("../../vsr.zig");
7
7
  const Header = vsr.Header;
8
8
 
@@ -24,7 +24,7 @@ pub fn EchoClient(comptime StateMachine_: type, comptime MessageBus: type) type
24
24
  };
25
25
  };
26
26
 
27
- request_queue: RingBuffer(Self.Request, config.client_request_queue_max, .array) = .{},
27
+ request_queue: RingBuffer(Self.Request, constants.client_request_queue_max, .array) = .{},
28
28
  message_pool: *MessagePool,
29
29
 
30
30
  pub fn init(
@@ -1,7 +1,6 @@
1
1
  const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
 
4
- const config = @import("../../constants.zig");
5
4
  const log = std.log.scoped(.tb_client_thread);
6
5
 
7
6
  const Packet = @import("packet.zig").Packet;
@@ -26,11 +26,11 @@ pub const tb_completion_t = fn (
26
26
  result_len: u32,
27
27
  ) callconv(.C) void;
28
28
 
29
- const config = @import("../constants.zig");
29
+ const constants = @import("../constants.zig");
30
30
  const Storage = @import("../storage.zig").Storage;
31
31
  const MessageBus = @import("../message_bus.zig").MessageBusClient;
32
32
  const StateMachine = @import("../state_machine.zig").StateMachineType(Storage, .{
33
- .message_body_size_max = config.message_body_size_max,
33
+ .message_body_size_max = constants.message_body_size_max,
34
34
  });
35
35
 
36
36
  const ContextType = @import("tb_client/context.zig").ContextType;
@@ -6,7 +6,7 @@ const testing = std.testing;
6
6
  const c = @cImport(@cInclude("tb_client.h"));
7
7
 
8
8
  const util = @import("../util.zig");
9
- const config = @import("../constants.zig");
9
+ const constants = @import("../constants.zig");
10
10
  const Packet = @import("tb_client/packet.zig").Packet;
11
11
 
12
12
  const Mutex = std.Thread.Mutex;
@@ -91,10 +91,10 @@ const Completion = struct {
91
91
  // 3. the data marshaling is correct, and exactly the same data sent was received back.
92
92
  test "c_client echo" {
93
93
  // Using the create_accounts operation for this test.
94
- const RequestContext = RequestContextType(config.message_body_size_max);
94
+ const RequestContext = RequestContextType(constants.message_body_size_max);
95
95
  const create_accounts_operation: u8 = c.TB_OPERATION_CREATE_ACCOUNTS;
96
96
  const event_size = @sizeOf(c.tb_account_t);
97
- const event_request_max = @divFloor(config.message_body_size_max, event_size);
97
+ const event_request_max = @divFloor(constants.message_body_size_max, event_size);
98
98
 
99
99
  // Initializing an echo client for testing purposes.
100
100
  // We ensure that the retry mechanism is being tested
@@ -103,7 +103,7 @@ test "c_client echo" {
103
103
  var tb_packet_list: c.tb_packet_list_t = undefined;
104
104
  const cluster_id = 0;
105
105
  const address = "3000";
106
- const packets_count: u32 = config.client_request_queue_max * 2;
106
+ const packets_count: u32 = constants.client_request_queue_max * 2;
107
107
  const tb_context: usize = 42;
108
108
  const result = c.tb_client_init_echo(
109
109
  &tb_client,
@@ -221,7 +221,7 @@ test "c_client tb_status" {
221
221
  // More addresses thant "replicas_max" should return "TB_STATUS_ADDRESS_LIMIT_EXCEEDED":
222
222
  try assert_status(
223
223
  1,
224
- ("3000," ** config.replicas_max) ++ "3001",
224
+ ("3000," ** constants.replicas_max) ++ "3001",
225
225
  c.TB_STATUS_ADDRESS_LIMIT_EXCEEDED,
226
226
  );
227
227
 
@@ -234,7 +234,7 @@ test "c_client tb_status" {
234
234
 
235
235
  // Asserts the validation rules associated with the "TB_PACKET_STATUS" enum.
236
236
  test "c_client tb_packet_status" {
237
- const RequestContext = RequestContextType(config.message_body_size_max);
237
+ const RequestContext = RequestContextType(constants.message_body_size_max);
238
238
 
239
239
  var tb_client: c.tb_client_t = undefined;
240
240
  var tb_packet_list: c.tb_packet_list_t = undefined;
@@ -299,12 +299,12 @@ test "c_client tb_packet_status" {
299
299
 
300
300
  var packet_list = @ptrCast(*Packet.List, &tb_packet_list);
301
301
 
302
- // Messages larger than config.message_body_size_max should return "too_much_data":
302
+ // Messages larger than constants.message_body_size_max should return "too_much_data":
303
303
  try assert_result(
304
304
  tb_client,
305
305
  packet_list,
306
306
  c.TB_OPERATION_CREATE_TRANSFERS,
307
- config.message_body_size_max + @sizeOf(c.tb_transfer_t),
307
+ constants.message_body_size_max + @sizeOf(c.tb_transfer_t),
308
308
  c.TB_PACKET_TOO_MUCH_DATA,
309
309
  );
310
310
 
@@ -7,7 +7,7 @@ const meta = std.meta;
7
7
  const net = std.net;
8
8
  const os = std.os;
9
9
 
10
- const config = @import("constants.zig");
10
+ const constants = @import("constants.zig");
11
11
  const tigerbeetle = @import("tigerbeetle.zig");
12
12
  const vsr = @import("vsr.zig");
13
13
  const IO = @import("io.zig").IO;
@@ -72,8 +72,8 @@ const usage = fmt.comptimePrint(
72
72
  \\ tigerbeetle version --verbose
73
73
  \\
74
74
  , .{
75
- .default_address = config.address,
76
- .default_port = config.port,
75
+ .default_address = constants.address,
76
+ .default_port = constants.port,
77
77
  });
78
78
 
79
79
  pub const Command = union(enum) {
@@ -206,17 +206,17 @@ pub fn parse_args(allocator: std.mem.Allocator) !Command {
206
206
  .cache_accounts = parse_size_to_count(
207
207
  tigerbeetle.Account,
208
208
  cache_accounts,
209
- config.cache_accounts_max,
209
+ constants.cache_accounts_max,
210
210
  ),
211
211
  .cache_transfers = parse_size_to_count(
212
212
  tigerbeetle.Transfer,
213
213
  cache_transfers,
214
- config.cache_transfers_max,
214
+ constants.cache_transfers_max,
215
215
  ),
216
216
  .cache_transfers_posted = parse_size_to_count(
217
217
  u256, // TODO(#264): Use actual type here, once exposed.
218
218
  cache_transfers_posted,
219
- config.cache_transfers_posted_max,
219
+ constants.cache_transfers_posted_max,
220
220
  ),
221
221
  .path = path orelse fatal("required: <path>", .{}),
222
222
  },
@@ -255,11 +255,11 @@ fn parse_cluster(raw_cluster: []const u8) u32 {
255
255
 
256
256
  /// Parse and allocate the addresses returning a slice into that array.
257
257
  fn parse_addresses(allocator: std.mem.Allocator, raw_addresses: []const u8) []net.Address {
258
- return vsr.parse_addresses(allocator, raw_addresses, config.replicas_max) catch |err| switch (err) {
258
+ return vsr.parse_addresses(allocator, raw_addresses, constants.replicas_max) catch |err| switch (err) {
259
259
  error.AddressHasTrailingComma => fatal("--addresses: invalid trailing comma", .{}),
260
260
  error.AddressLimitExceeded => {
261
261
  fatal("--addresses: too many addresses, at most {d} are allowed", .{
262
- config.replicas_max,
262
+ constants.replicas_max,
263
263
  });
264
264
  },
265
265
  error.AddressHasMoreThanOneColon => {
@@ -366,7 +366,7 @@ fn parse_size_to_count(comptime T: type, string_opt: ?[]const u8, comptime defau
366
366
  }
367
367
 
368
368
  fn parse_replica(raw_replica: []const u8) u8 {
369
- comptime assert(config.replicas_max <= std.math.maxInt(u8));
369
+ comptime assert(constants.replicas_max <= std.math.maxInt(u8));
370
370
  const replica = fmt.parseUnsigned(u8, raw_replica, 10) catch |err| switch (err) {
371
371
  error.Overflow => fatal("--replica: value exceeds an 8-bit unsigned integer", .{}),
372
372
  error.InvalidCharacter => fatal("--replica: value contains an invalid character", .{}),
@@ -1,7 +1,7 @@
1
1
  const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
 
4
- const config = @import("constants.zig");
4
+ const constants = @import("constants.zig");
5
5
 
6
6
  const tb = @import("tigerbeetle.zig");
7
7
  const Account = tb.Account;
@@ -16,7 +16,7 @@ const Storage = @import("storage.zig").Storage;
16
16
  const MessagePool = @import("message_pool.zig").MessagePool;
17
17
  const MessageBus = @import("message_bus.zig").MessageBusClient;
18
18
  const StateMachine = @import("state_machine.zig").StateMachineType(Storage, .{
19
- .message_body_size_max = config.message_body_size_max,
19
+ .message_body_size_max = constants.message_body_size_max,
20
20
  });
21
21
 
22
22
  const vsr = @import("vsr.zig");
@@ -37,7 +37,7 @@ pub fn request(
37
37
  const allocator = std.heap.page_allocator;
38
38
  const client_id = std.crypto.random.int(u128);
39
39
  const cluster_id: u32 = 0;
40
- var addresses = [_]std.net.Address{try std.net.Address.parseIp4("127.0.0.1", config.port)};
40
+ var addresses = [_]std.net.Address{try std.net.Address.parseIp4("127.0.0.1", constants.port)};
41
41
 
42
42
  var io = try IO.init(32, 0);
43
43
  defer io.deinit();
@@ -74,7 +74,7 @@ pub fn request(
74
74
 
75
75
  while (client.request_queue.count > 0) {
76
76
  client.tick();
77
- try io.run_for_ns(config.tick_ms * std.time.ns_per_ms);
77
+ try io.run_for_ns(constants.tick_ms * std.time.ns_per_ms);
78
78
  }
79
79
  }
80
80
 
@@ -4,7 +4,7 @@ const mem = std.mem;
4
4
  const assert = std.debug.assert;
5
5
  const log = std.log.scoped(.io);
6
6
 
7
- const config = @import("../constants.zig");
7
+ const constants = @import("../constants.zig");
8
8
  const FIFO = @import("../fifo.zig").FIFO;
9
9
  const Time = @import("../time.zig").Time;
10
10
  const buffer_limit = @import("../io.zig").buffer_limit;
@@ -658,8 +658,8 @@ pub const IO = struct {
658
658
  must_create: bool,
659
659
  ) !os.fd_t {
660
660
  assert(relative_path.len > 0);
661
- assert(size >= config.sector_size);
662
- assert(size % config.sector_size == 0);
661
+ assert(size >= constants.sector_size);
662
+ assert(size % constants.sector_size == 0);
663
663
 
664
664
  // TODO Use O_EXCL when opening as a block device to obtain a mandatory exclusive lock.
665
665
  // This is much stronger than an advisory exclusive lock, and is required on some platforms.
@@ -694,7 +694,7 @@ pub const IO = struct {
694
694
 
695
695
  // On darwin assume that Direct I/O is always supported.
696
696
  // Use F_NOCACHE to disable the page cache as O_DIRECT doesn't exist.
697
- if (config.direct_io) {
697
+ if (constants.direct_io) {
698
698
  _ = try os.fcntl(fd, os.F.NOCACHE, 1);
699
699
  }
700
700
 
@@ -7,7 +7,7 @@ const io_uring_cqe = linux.io_uring_cqe;
7
7
  const io_uring_sqe = linux.io_uring_sqe;
8
8
  const log = std.log.scoped(.io);
9
9
 
10
- const config = @import("../constants.zig");
10
+ const constants = @import("../constants.zig");
11
11
  const FIFO = @import("../fifo.zig").FIFO;
12
12
  const buffer_limit = @import("../io.zig").buffer_limit;
13
13
 
@@ -905,8 +905,8 @@ pub const IO = struct {
905
905
  must_create: bool,
906
906
  ) !os.fd_t {
907
907
  assert(relative_path.len > 0);
908
- assert(size >= config.sector_size);
909
- assert(size % config.sector_size == 0);
908
+ assert(size >= constants.sector_size);
909
+ assert(size % constants.sector_size == 0);
910
910
 
911
911
  // TODO Use O_EXCL when opening as a block device to obtain a mandatory exclusive lock.
912
912
  // This is much stronger than an advisory exclusive lock, and is required on some platforms.
@@ -918,11 +918,11 @@ pub const IO = struct {
918
918
  if (@hasDecl(os.O, "LARGEFILE")) flags |= os.O.LARGEFILE;
919
919
 
920
920
  var direct_io_supported = false;
921
- if (config.direct_io) {
921
+ if (constants.direct_io) {
922
922
  direct_io_supported = try fs_supports_direct_io(dir_fd);
923
923
  if (direct_io_supported) {
924
924
  flags |= os.O.DIRECT;
925
- } else if (!config.direct_io_required) {
925
+ } else if (!constants.direct_io_required) {
926
926
  log.warn("file system does not support Direct I/O", .{});
927
927
  } else {
928
928
  // We require Direct I/O for safety to handle fsync failure correctly, and therefore
@@ -968,7 +968,7 @@ pub const IO = struct {
968
968
  log.warn("file system does not support fallocate(), an ENOSPC will panic", .{});
969
969
  log.info("allocating by writing to the last sector of the file instead...", .{});
970
970
 
971
- const sector_size = config.sector_size;
971
+ const sector_size = constants.sector_size;
972
972
  const sector: [sector_size]u8 align(sector_size) = [_]u8{0} ** sector_size;
973
973
 
974
974
  // Handle partial writes where the physical sector is less than a logical sector:
@@ -2,7 +2,7 @@ const std = @import("std");
2
2
  const os = std.os;
3
3
  const assert = std.debug.assert;
4
4
  const log = std.log.scoped(.io);
5
- const config = @import("../constants.zig");
5
+ const constants = @import("../constants.zig");
6
6
 
7
7
  const FIFO = @import("../fifo.zig").FIFO;
8
8
  const Time = @import("../time.zig").Time;
@@ -941,8 +941,8 @@ pub const IO = struct {
941
941
  must_create: bool,
942
942
  ) !os.fd_t {
943
943
  assert(relative_path.len > 0);
944
- assert(size >= config.sector_size);
945
- assert(size % config.sector_size == 0);
944
+ assert(size >= constants.sector_size);
945
+ assert(size % constants.sector_size == 0);
946
946
 
947
947
  const path_w = try os.windows.sliceToPrefixedFileW(relative_path);
948
948
 
@@ -1013,7 +1013,7 @@ pub const IO = struct {
1013
1013
  log.warn("file system failed to preallocate the file memory", .{});
1014
1014
  log.info("allocating by writing to the last sector of the file instead...", .{});
1015
1015
 
1016
- const sector_size = config.sector_size;
1016
+ const sector_size = constants.sector_size;
1017
1017
  const sector: [sector_size]u8 align(sector_size) = [_]u8{0} ** sector_size;
1018
1018
 
1019
1019
  // Handle partial writes where the physical sector is less than a logical sector:
@@ -38,7 +38,7 @@ const assert = std.debug.assert;
38
38
  const log = std.log.scoped(.compaction);
39
39
  const tracer = @import("../tracer.zig");
40
40
 
41
- const config = @import("../constants.zig");
41
+ const constants = @import("../constants.zig");
42
42
 
43
43
  const GridType = @import("grid.zig").GridType;
44
44
  const ManifestType = @import("manifest.zig").ManifestType;
@@ -226,17 +226,17 @@ pub fn CompactionType(
226
226
  assert(!compaction.merge_done and compaction.merge_iterator == null);
227
227
  assert(compaction.tracer_slot == null);
228
228
 
229
- assert(op_min % @divExact(config.lsm_batch_multiple, 2) == 0);
229
+ assert(op_min % @divExact(constants.lsm_batch_multiple, 2) == 0);
230
230
  assert(range.table_count > 0);
231
231
  if (table_a) |t| assert(t.visible(op_min));
232
232
 
233
- assert(level_b < config.lsm_levels);
233
+ assert(level_b < constants.lsm_levels);
234
234
  assert((level_b == 0) == (table_a == null));
235
235
 
236
236
  // Levels may choose to drop tombstones if keys aren't included in the lower levels.
237
237
  // This invariant is always true for the last level as it doesn't have any lower ones.
238
238
  const drop_tombstones = manifest.compaction_must_drop_tombstones(level_b, range);
239
- assert(drop_tombstones or level_b < config.lsm_levels - 1);
239
+ assert(drop_tombstones or level_b < constants.lsm_levels - 1);
240
240
 
241
241
  compaction.* = .{
242
242
  .tree_name = compaction.tree_name,
@@ -625,11 +625,11 @@ pub fn CompactionType(
625
625
  }
626
626
 
627
627
  fn snapshot_max_for_table_input(op_min: u64) u64 {
628
- assert(op_min % @divExact(config.lsm_batch_multiple, 2) == 0);
629
- return op_min + @divExact(config.lsm_batch_multiple, 2) - 1;
628
+ assert(op_min % @divExact(constants.lsm_batch_multiple, 2) == 0);
629
+ return op_min + @divExact(constants.lsm_batch_multiple, 2) - 1;
630
630
  }
631
631
 
632
632
  fn snapshot_min_for_table_output(op_min: u64) u64 {
633
- assert(op_min % @divExact(config.lsm_batch_multiple, 2) == 0);
634
- return op_min + @divExact(config.lsm_batch_multiple, 2);
633
+ assert(op_min % @divExact(constants.lsm_batch_multiple, 2) == 0);
634
+ return op_min + @divExact(constants.lsm_batch_multiple, 2);
635
635
  }
@@ -4,11 +4,11 @@ const assert = std.debug.assert;
4
4
  const math = std.math;
5
5
  const mem = std.mem;
6
6
 
7
- const config = @import("../constants.zig");
7
+ const constants = @import("../constants.zig");
8
8
  const vsr = @import("../vsr.zig");
9
9
 
10
10
  const GridType = @import("grid.zig").GridType;
11
- const NodePool = @import("node_pool.zig").NodePool(config.lsm_manifest_node_size, 16);
11
+ const NodePool = @import("node_pool.zig").NodePool(constants.lsm_manifest_node_size, 16);
12
12
 
13
13
  pub fn ForestType(comptime Storage: type, comptime groove_config: anytype) type {
14
14
  var groove_fields: []const std.builtin.TypeInfo.StructField = &.{};
@@ -3,7 +3,7 @@ const testing = std.testing;
3
3
  const allocator = testing.allocator;
4
4
  const assert = std.debug.assert;
5
5
 
6
- const config = @import("../constants.zig");
6
+ const constants = @import("../constants.zig");
7
7
  const fuzz = @import("../test/fuzz.zig");
8
8
  const vsr = @import("../vsr.zig");
9
9
 
@@ -15,7 +15,7 @@ const Transfer = @import("../tigerbeetle.zig").Transfer;
15
15
  const Account = @import("../tigerbeetle.zig").Account;
16
16
  const Storage = @import("../test/storage.zig").Storage;
17
17
  const StateMachine = @import("../state_machine.zig").StateMachineType(Storage, .{
18
- .message_body_size_max = config.message_body_size_max,
18
+ .message_body_size_max = constants.message_body_size_max,
19
19
  });
20
20
 
21
21
  const GridType = @import("grid.zig").GridType;
@@ -63,8 +63,8 @@ const Environment = struct {
63
63
 
64
64
  const compacts_per_checkpoint = std.math.divCeil(
65
65
  usize,
66
- config.journal_slot_count,
67
- config.lsm_batch_multiple,
66
+ constants.journal_slot_count,
67
+ constants.lsm_batch_multiple,
68
68
  ) catch unreachable;
69
69
 
70
70
  const State = enum {
@@ -193,7 +193,7 @@ const Environment = struct {
193
193
  }
194
194
 
195
195
  pub fn checkpoint(env: *Environment, op: u64) void {
196
- env.checkpoint_op = op - config.lsm_batch_multiple;
196
+ env.checkpoint_op = op - constants.lsm_batch_multiple;
197
197
  env.change_state(.forest_open, .forest_checkpointing);
198
198
  env.forest.checkpoint(forest_checkpoint_callback);
199
199
  env.tick_until_state_change(.forest_checkpointing, .superblock_checkpointing);
@@ -320,9 +320,9 @@ pub fn generate_fuzz_ops(random: std.rand.Random, fuzz_op_count: usize) ![]const
320
320
  // Maybe compact more often than forced to by `puts_since_compact`.
321
321
  .compact = if (random.boolean()) 0 else 1,
322
322
  // Always do puts, and always more puts than removes.
323
- .put_account = config.lsm_batch_multiple * 2,
323
+ .put_account = constants.lsm_batch_multiple * 2,
324
324
  // Maybe do some gets.
325
- .get_account = if (random.boolean()) 0 else config.lsm_batch_multiple,
325
+ .get_account = if (random.boolean()) 0 else constants.lsm_batch_multiple,
326
326
  };
327
327
  log.info("fuzz_op_distribution = {d:.2}", .{fuzz_op_distribution});
328
328
 
@@ -347,8 +347,8 @@ pub fn generate_fuzz_ops(random: std.rand.Random, fuzz_op_count: usize) ![]const
347
347
  op += 1;
348
348
  const checkpoint =
349
349
  // Can only checkpoint on the last beat of the bar.
350
- compact_op % config.lsm_batch_multiple == config.lsm_batch_multiple - 1 and
351
- compact_op > config.lsm_batch_multiple and
350
+ compact_op % constants.lsm_batch_multiple == constants.lsm_batch_multiple - 1 and
351
+ compact_op > constants.lsm_batch_multiple and
352
352
  // Checkpoint at roughly the same rate as log wraparound.
353
353
  random.uintLessThan(usize, Environment.compacts_per_checkpoint) == 0;
354
354
  break :compact FuzzOp{
@@ -2,7 +2,7 @@ const std = @import("std");
2
2
  const assert = std.debug.assert;
3
3
  const mem = std.mem;
4
4
 
5
- const config = @import("../constants.zig");
5
+ const constants = @import("../constants.zig");
6
6
  const vsr = @import("../vsr.zig");
7
7
  const free_set = @import("../vsr/superblock_free_set.zig");
8
8
 
@@ -41,7 +41,7 @@ pub const BlockType = enum(u8) {
41
41
  ///
42
42
  /// Recently/frequently-used blocks are transparently cached in memory.
43
43
  pub fn GridType(comptime Storage: type) type {
44
- const block_size = config.block_size;
44
+ const block_size = constants.block_size;
45
45
  const SuperBlock = SuperBlockType(Storage);
46
46
 
47
47
  const cache_interface = struct {
@@ -72,7 +72,7 @@ pub fn GridType(comptime Storage: type) type {
72
72
  cache_interface.equal_addresses,
73
73
  .{
74
74
  .ways = set_associative_cache_ways,
75
- .value_alignment = config.sector_size,
75
+ .value_alignment = constants.sector_size,
76
76
  },
77
77
  );
78
78
 
@@ -90,8 +90,8 @@ pub fn GridType(comptime Storage: type) type {
90
90
  // TODO put more thought into how low/high this limit should be.
91
91
  pub const write_iops_max = 16;
92
92
 
93
- pub const BlockPtr = *align(config.sector_size) [block_size]u8;
94
- pub const BlockPtrConst = *align(config.sector_size) const [block_size]u8;
93
+ pub const BlockPtr = *align(constants.sector_size) [block_size]u8;
94
+ pub const BlockPtrConst = *align(constants.sector_size) const [block_size]u8;
95
95
  pub const Reservation = free_set.Reservation;
96
96
 
97
97
  pub const Write = struct {
@@ -4,13 +4,13 @@ const assert = std.debug.assert;
4
4
  const math = std.math;
5
5
  const mem = std.mem;
6
6
 
7
- const config = @import("../constants.zig");
7
+ const constants = @import("../constants.zig");
8
8
 
9
9
  const TableType = @import("table.zig").TableType;
10
10
  const TreeType = @import("tree.zig").TreeType;
11
11
  const GridType = @import("grid.zig").GridType;
12
12
  const CompositeKey = @import("composite_key.zig").CompositeKey;
13
- const NodePool = @import("node_pool.zig").NodePool(config.lsm_manifest_node_size, 16);
13
+ const NodePool = @import("node_pool.zig").NodePool(constants.lsm_manifest_node_size, 16);
14
14
 
15
15
  const snapshot_latest = @import("tree.zig").snapshot_latest;
16
16
  const compaction_snapshot_for_op = @import("tree.zig").compaction_snapshot_for_op;
@@ -655,7 +655,7 @@ pub fn GrooveType(
655
655
  assert(!id_tree_value.tombstone());
656
656
  lookup_id_callback(&worker.lookup_id, id_tree_value);
657
657
 
658
- if (config.verify) {
658
+ if (constants.verify) {
659
659
  // If the id is cached, then we must be prefetching it because the object
660
660
  // was not also cached.
661
661
  assert(worker.context.groove.objects.lookup_from_memory(
@@ -682,7 +682,7 @@ pub fn GrooveType(
682
682
  const worker = @fieldParentPtr(PrefetchWorker, "lookup_id", completion);
683
683
 
684
684
  if (result) |id_tree_value| {
685
- if (config.verify) {
685
+ if (constants.verify) {
686
686
  // This was checked in prefetch_enqueue().
687
687
  assert(
688
688
  worker.context.groove.ids.lookup_from_memory(