tigerbeetle-node 0.5.1 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +88 -69
  2. package/dist/benchmark.js +96 -94
  3. package/dist/benchmark.js.map +1 -1
  4. package/dist/index.d.ts +82 -82
  5. package/dist/index.js +74 -93
  6. package/dist/index.js.map +1 -1
  7. package/dist/test.js +134 -111
  8. package/dist/test.js.map +1 -1
  9. package/package.json +3 -2
  10. package/src/benchmark.ts +114 -118
  11. package/src/index.ts +102 -111
  12. package/src/node.zig +54 -62
  13. package/src/test.ts +146 -125
  14. package/src/tigerbeetle/scripts/benchmark.bat +46 -0
  15. package/src/tigerbeetle/scripts/benchmark.sh +5 -0
  16. package/src/tigerbeetle/scripts/install_zig.bat +2 -2
  17. package/src/tigerbeetle/scripts/install_zig.sh +3 -3
  18. package/src/tigerbeetle/scripts/vopr.sh +2 -2
  19. package/src/tigerbeetle/src/benchmark.zig +65 -102
  20. package/src/tigerbeetle/src/cli.zig +39 -18
  21. package/src/tigerbeetle/src/config.zig +27 -12
  22. package/src/tigerbeetle/src/demo.zig +1 -14
  23. package/src/tigerbeetle/src/demo_01_create_accounts.zig +10 -10
  24. package/src/tigerbeetle/src/demo_03_create_transfers.zig +3 -1
  25. package/src/tigerbeetle/src/{demo_04_create_transfers_two_phase_commit.zig → demo_04_create_pending_transfers.zig} +12 -6
  26. package/src/tigerbeetle/src/demo_05_post_pending_transfers.zig +37 -0
  27. package/src/tigerbeetle/src/demo_06_void_pending_transfers.zig +24 -0
  28. package/src/tigerbeetle/src/demo_07_lookup_transfers.zig +1 -1
  29. package/src/tigerbeetle/src/io/benchmark.zig +24 -49
  30. package/src/tigerbeetle/src/io/darwin.zig +175 -44
  31. package/src/tigerbeetle/src/io/linux.zig +177 -72
  32. package/src/tigerbeetle/src/io/test.zig +61 -39
  33. package/src/tigerbeetle/src/io/windows.zig +1161 -0
  34. package/src/tigerbeetle/src/io.zig +2 -0
  35. package/src/tigerbeetle/src/main.zig +14 -9
  36. package/src/tigerbeetle/src/message_bus.zig +56 -74
  37. package/src/tigerbeetle/src/message_pool.zig +63 -57
  38. package/src/tigerbeetle/src/ring_buffer.zig +7 -0
  39. package/src/tigerbeetle/src/simulator.zig +4 -4
  40. package/src/tigerbeetle/src/state_machine.zig +1804 -797
  41. package/src/tigerbeetle/src/storage.zig +0 -230
  42. package/src/tigerbeetle/src/test/cluster.zig +3 -6
  43. package/src/tigerbeetle/src/test/message_bus.zig +4 -3
  44. package/src/tigerbeetle/src/test/network.zig +13 -16
  45. package/src/tigerbeetle/src/test/state_checker.zig +3 -2
  46. package/src/tigerbeetle/src/tigerbeetle.zig +108 -101
  47. package/src/tigerbeetle/src/time.zig +58 -11
  48. package/src/tigerbeetle/src/vsr/client.zig +18 -32
  49. package/src/tigerbeetle/src/vsr/clock.zig +1 -1
  50. package/src/tigerbeetle/src/vsr/journal.zig +2 -6
  51. package/src/tigerbeetle/src/vsr/replica.zig +152 -175
  52. package/src/tigerbeetle/src/vsr.zig +263 -5
  53. package/src/translate.zig +10 -0
  54. package/src/tigerbeetle/src/demo_05_accept_transfers.zig +0 -23
  55. package/src/tigerbeetle/src/demo_06_reject_transfers.zig +0 -17
  56. package/src/tigerbeetle/src/format_test.zig +0 -69
@@ -6,8 +6,6 @@ const assert = std.debug.assert;
6
6
  const log = std.log.scoped(.storage);
7
7
 
8
8
  const IO = @import("io.zig").IO;
9
- const is_darwin = builtin.target.isDarwin();
10
-
11
9
  const config = @import("config.zig");
12
10
  const vsr = @import("vsr.zig");
13
11
 
@@ -307,232 +305,4 @@ pub const Storage = struct {
307
305
  assert(buffer.len > 0);
308
306
  assert(offset + buffer.len <= self.size);
309
307
  }
310
-
311
- // Static helper functions to handle data file creation/opening/allocation:
312
-
313
- /// Opens or creates a journal file:
314
- /// - For reading and writing.
315
- /// - For Direct I/O (if possible in development mode, but required in production mode).
316
- /// - Obtains an advisory exclusive lock to the file descriptor.
317
- /// - Allocates the file contiguously on disk if this is supported by the file system.
318
- /// - Ensures that the file data (and file inode in the parent directory) is durable on disk.
319
- /// The caller is responsible for ensuring that the parent directory inode is durable.
320
- /// - Verifies that the file size matches the expected file size before returning.
321
- pub fn open(
322
- dir_fd: os.fd_t,
323
- relative_path: [:0]const u8,
324
- size: u64,
325
- must_create: bool,
326
- ) !os.fd_t {
327
- assert(relative_path.len > 0);
328
- assert(size >= config.sector_size);
329
- assert(size % config.sector_size == 0);
330
-
331
- // TODO Use O_EXCL when opening as a block device to obtain a mandatory exclusive lock.
332
- // This is much stronger than an advisory exclusive lock, and is required on some platforms.
333
-
334
- var flags: u32 = os.O.CLOEXEC | os.O.RDWR | os.O.DSYNC;
335
- var mode: os.mode_t = 0;
336
-
337
- // TODO Document this and investigate whether this is in fact correct to set here.
338
- if (@hasDecl(os, "O_LARGEFILE")) flags |= os.O.LARGEFILE;
339
-
340
- var direct_io_supported = false;
341
- if (config.direct_io) {
342
- direct_io_supported = try Storage.fs_supports_direct_io(dir_fd);
343
- if (direct_io_supported) {
344
- if (!is_darwin) flags |= os.O.DIRECT;
345
- } else if (config.deployment_environment == .development) {
346
- log.warn("file system does not support Direct I/O", .{});
347
- } else {
348
- // We require Direct I/O for safety to handle fsync failure correctly, and therefore
349
- // panic in production if it is not supported.
350
- @panic("file system does not support Direct I/O");
351
- }
352
- }
353
-
354
- if (must_create) {
355
- log.info("creating \"{s}\"...", .{relative_path});
356
- flags |= os.O.CREAT;
357
- flags |= os.O.EXCL;
358
- mode = 0o666;
359
- } else {
360
- log.info("opening \"{s}\"...", .{relative_path});
361
- }
362
-
363
- // This is critical as we rely on O_DSYNC for fsync() whenever we write to the file:
364
- assert((flags & os.O.DSYNC) > 0);
365
-
366
- // Be careful with openat(2): "If pathname is absolute, then dirfd is ignored." (man page)
367
- assert(!std.fs.path.isAbsolute(relative_path));
368
- const fd = try os.openatZ(dir_fd, relative_path, flags, mode);
369
- // TODO Return a proper error message when the path exists or does not exist (init/start).
370
- errdefer os.close(fd);
371
-
372
- // TODO Check that the file is actually a file.
373
-
374
- // On darwin, use F_NOCACHE on direct_io to disable the page cache as O_DIRECT doesn't exit.
375
- if (is_darwin and config.direct_io and direct_io_supported) {
376
- _ = try os.fcntl(fd, os.F.NOCACHE, 1);
377
- }
378
-
379
- // Obtain an advisory exclusive lock that works only if all processes actually use flock().
380
- // LOCK_NB means that we want to fail the lock without waiting if another process has it.
381
- os.flock(fd, os.LOCK.EX | os.LOCK.NB) catch |err| switch (err) {
382
- error.WouldBlock => @panic("another process holds the data file lock"),
383
- else => return err,
384
- };
385
-
386
- // Ask the file system to allocate contiguous sectors for the file (if possible):
387
- // If the file system does not support `fallocate()`, then this could mean more seeks or a
388
- // panic if we run out of disk space (ENOSPC).
389
- if (must_create) try Storage.allocate(fd, size);
390
-
391
- // The best fsync strategy is always to fsync before reading because this prevents us from
392
- // making decisions on data that was never durably written by a previously crashed process.
393
- // We therefore always fsync when we open the path, also to wait for any pending O_DSYNC.
394
- // Thanks to Alex Miller from FoundationDB for diving into our source and pointing this out.
395
- try os.fsync(fd);
396
-
397
- // We fsync the parent directory to ensure that the file inode is durably written.
398
- // The caller is responsible for the parent directory inode stored under the grandparent.
399
- // We always do this when opening because we don't know if this was done before crashing.
400
- try os.fsync(dir_fd);
401
-
402
- const stat = try os.fstat(fd);
403
- if (stat.size != size) @panic("data file inode size was truncated or corrupted");
404
-
405
- return fd;
406
- }
407
-
408
- /// Allocates a file contiguously using fallocate() if supported.
409
- /// Alternatively, writes to the last sector so that at least the file size is correct.
410
- pub fn allocate(fd: os.fd_t, size: u64) !void {
411
- log.info("allocating {}...", .{std.fmt.fmtIntSizeBin(size)});
412
- Storage.fallocate(fd, 0, 0, @intCast(i64, size)) catch |err| switch (err) {
413
- error.OperationNotSupported => {
414
- log.warn("file system does not support fallocate(), an ENOSPC will panic", .{});
415
- log.info("allocating by writing to the last sector of the file instead...", .{});
416
-
417
- const sector_size = config.sector_size;
418
- const sector: [sector_size]u8 align(sector_size) = [_]u8{0} ** sector_size;
419
-
420
- // Handle partial writes where the physical sector is less than a logical sector:
421
- const offset = size - sector.len;
422
- var written: usize = 0;
423
- while (written < sector.len) {
424
- written += try os.pwrite(fd, sector[written..], offset + written);
425
- }
426
- },
427
- else => return err,
428
- };
429
- }
430
-
431
- fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) !void {
432
- // https://stackoverflow.com/a/11497568
433
- // https://api.kde.org/frameworks/kcoreaddons/html/posix__fallocate__mac_8h_source.html
434
- // http://hg.mozilla.org/mozilla-central/file/3d846420a907/xpcom/glue/FileUtils.cpp#l61
435
- if (is_darwin) {
436
- const F_ALLOCATECONTIG = 0x2; // allocate contiguous space
437
- const F_ALLOCATEALL = 0x4; // allocate all or nothing
438
- const F_PEOFPOSMODE = 3; // use relative offset from the seek pos mode
439
- const F_VOLPOSMODE = 4; // use the specified volume offset
440
- _ = F_VOLPOSMODE;
441
-
442
- const fstore_t = extern struct {
443
- fst_flags: c_uint,
444
- fst_posmode: c_int,
445
- fst_offset: os.off_t,
446
- fst_length: os.off_t,
447
- fst_bytesalloc: os.off_t,
448
- };
449
-
450
- var store = fstore_t{
451
- .fst_flags = F_ALLOCATECONTIG | F_ALLOCATEALL,
452
- .fst_posmode = F_PEOFPOSMODE,
453
- .fst_offset = 0,
454
- .fst_length = offset + length,
455
- .fst_bytesalloc = 0,
456
- };
457
-
458
- // try to pre-allocate contiguous space and fall back to default non-continugous
459
- var res = os.system.fcntl(fd, os.F.PREALLOCATE, @ptrToInt(&store));
460
- if (os.errno(res) != .SUCCESS) {
461
- store.fst_flags = F_ALLOCATEALL;
462
- res = os.system.fcntl(fd, os.F.PREALLOCATE, @ptrToInt(&store));
463
- }
464
-
465
- switch (os.errno(res)) {
466
- .SUCCESS => {},
467
- .ACCES => unreachable, // F_SETLK or F_SETSIZE of F_WRITEBOOTSTRAP
468
- .BADF => return error.FileDescriptorInvalid,
469
- .DEADLK => unreachable, // F_SETLKW
470
- .INTR => unreachable, // F_SETLKW
471
- .INVAL => return error.ArgumentsInvalid, // for F_PREALLOCATE (offset invalid)
472
- .MFILE => unreachable, // F_DUPFD or F_DUPED
473
- .NOLCK => unreachable, // F_SETLK or F_SETLKW
474
- .OVERFLOW => return error.FileTooBig,
475
- .SRCH => unreachable, // F_SETOWN
476
- .OPNOTSUPP => return error.OperationNotSupported, // not reported but need same error union
477
- else => |errno| return os.unexpectedErrno(errno),
478
- }
479
-
480
- // now actually perform the allocation
481
- return os.ftruncate(fd, @intCast(u64, length)) catch |err| switch (err) {
482
- error.AccessDenied => error.PermissionDenied,
483
- else => |e| e,
484
- };
485
- }
486
-
487
- while (true) {
488
- const rc = os.linux.fallocate(fd, mode, offset, length);
489
- switch (os.linux.getErrno(rc)) {
490
- .SUCCESS => return,
491
- .BADF => return error.FileDescriptorInvalid,
492
- .FBIG => return error.FileTooBig,
493
- .INTR => continue,
494
- .INVAL => return error.ArgumentsInvalid,
495
- .IO => return error.InputOutput,
496
- .NODEV => return error.NoDevice,
497
- .NOSPC => return error.NoSpaceLeft,
498
- .NOSYS => return error.SystemOutdated,
499
- .OPNOTSUPP => return error.OperationNotSupported,
500
- .PERM => return error.PermissionDenied,
501
- .SPIPE => return error.Unseekable,
502
- .TXTBSY => return error.FileBusy,
503
- else => |errno| return os.unexpectedErrno(errno),
504
- }
505
- }
506
- }
507
-
508
- /// Detects whether the underlying file system for a given directory fd supports Direct I/O.
509
- /// Not all Linux file systems support `O_DIRECT`, e.g. a shared macOS volume.
510
- fn fs_supports_direct_io(dir_fd: std.os.fd_t) !bool {
511
- if (!@hasDecl(std.os, "O_DIRECT") and !is_darwin) return false;
512
-
513
- const path = "fs_supports_direct_io";
514
- const dir = std.fs.Dir{ .fd = dir_fd };
515
- const fd = try os.openatZ(dir_fd, path, os.O.CLOEXEC | os.O.CREAT | os.O.TRUNC, 0o666);
516
- defer os.close(fd);
517
- defer dir.deleteFile(path) catch {};
518
-
519
- // F_NOCACHE on darwin is the most similar option to O_DIRECT on linux.
520
- if (is_darwin) {
521
- _ = os.fcntl(fd, os.F.NOCACHE, 1) catch return false;
522
- return true;
523
- }
524
-
525
- while (true) {
526
- const res = os.system.openat(dir_fd, path, os.O.CLOEXEC | os.O.RDONLY | os.O.DIRECT, 0);
527
- switch (os.linux.getErrno(res)) {
528
- 0 => {
529
- os.close(@intCast(os.fd_t, res));
530
- return true;
531
- },
532
- os.linux.EINTR => continue,
533
- os.linux.EINVAL => return false,
534
- else => |err| return os.unexpectedErrno(err),
535
- }
536
- }
537
- }
538
308
  };
@@ -6,7 +6,8 @@ const config = @import("../config.zig");
6
6
 
7
7
  const StateChecker = @import("state_checker.zig").StateChecker;
8
8
 
9
- const MessagePool = @import("../message_pool.zig").MessagePool;
9
+ const message_pool = @import("../message_pool.zig");
10
+ const MessagePool = message_pool.MessagePool;
10
11
  const Message = MessagePool.Message;
11
12
 
12
13
  const Network = @import("network.zig").Network;
@@ -197,12 +198,8 @@ pub const Cluster = struct {
197
198
  var it = message_bus.pool.free_list;
198
199
  while (it) |message| : (it = message.next) messages_in_pool += 1;
199
200
  }
200
- {
201
- var it = message_bus.pool.header_only_free_list;
202
- while (it) |message| : (it = message.next) messages_in_pool += 1;
203
- }
204
201
 
205
- const total_messages = config.message_bus_messages_max + config.message_bus_headers_max;
202
+ const total_messages = message_pool.messages_max_replica;
206
203
  assert(messages_in_network + messages_in_pool == total_messages);
207
204
 
208
205
  replica.* = try Replica.init(
@@ -6,12 +6,13 @@ const config = @import("../config.zig");
6
6
  const MessagePool = @import("../message_pool.zig").MessagePool;
7
7
  const Message = MessagePool.Message;
8
8
  const Header = @import("../vsr.zig").Header;
9
+ const ProcessType = @import("../vsr.zig").ProcessType;
9
10
 
10
11
  const Network = @import("network.zig").Network;
11
12
 
12
13
  const log = std.log.scoped(.message_bus);
13
14
 
14
- pub const Process = union(enum) {
15
+ pub const Process = union(ProcessType) {
15
16
  replica: u8,
16
17
  client: u128,
17
18
  };
@@ -35,7 +36,7 @@ pub const MessageBus = struct {
35
36
  network: *Network,
36
37
  ) !MessageBus {
37
38
  return MessageBus{
38
- .pool = try MessagePool.init(allocator),
39
+ .pool = try MessagePool.init(allocator, @as(ProcessType, process)),
39
40
  .network = network,
40
41
  .cluster = cluster,
41
42
  .process = process,
@@ -61,7 +62,7 @@ pub const MessageBus = struct {
61
62
 
62
63
  pub fn tick(_: *MessageBus) void {}
63
64
 
64
- pub fn get_message(bus: *MessageBus) ?*Message {
65
+ pub fn get_message(bus: *MessageBus) *Message {
65
66
  return bus.pool.get_message();
66
67
  }
67
68
 
@@ -28,7 +28,7 @@ pub const Network = struct {
28
28
  message: *Message,
29
29
 
30
30
  pub fn deinit(packet: *const Packet, path: PacketSimulatorPath) void {
31
- const source_bus = &packet.network.busses.items[path.source];
31
+ const source_bus = &packet.network.buses.items[path.source];
32
32
  source_bus.unref(packet.message);
33
33
  }
34
34
  };
@@ -43,7 +43,7 @@ pub const Network = struct {
43
43
  options: NetworkOptions,
44
44
  packet_simulator: PacketSimulator(Packet),
45
45
 
46
- busses: std.ArrayListUnmanaged(MessageBus),
46
+ buses: std.ArrayListUnmanaged(MessageBus),
47
47
  processes: std.ArrayListUnmanaged(u128),
48
48
 
49
49
  pub fn init(
@@ -55,8 +55,8 @@ pub const Network = struct {
55
55
  const process_count = client_count + replica_count;
56
56
  assert(process_count <= std.math.maxInt(u8));
57
57
 
58
- var busses = try std.ArrayListUnmanaged(MessageBus).initCapacity(allocator, process_count);
59
- errdefer busses.deinit(allocator);
58
+ var buses = try std.ArrayListUnmanaged(MessageBus).initCapacity(allocator, process_count);
59
+ errdefer buses.deinit(allocator);
60
60
 
61
61
  var processes = try std.ArrayListUnmanaged(u128).initCapacity(allocator, process_count);
62
62
  errdefer processes.deinit(allocator);
@@ -71,18 +71,18 @@ pub const Network = struct {
71
71
  .allocator = allocator,
72
72
  .options = options,
73
73
  .packet_simulator = packet_simulator,
74
- .busses = busses,
74
+ .buses = buses,
75
75
  .processes = processes,
76
76
  };
77
77
  }
78
78
 
79
79
  pub fn deinit(network: *Network) void {
80
- // TODO: deinit the busses themselves when they gain a deinit()
81
- network.busses.deinit(network.allocator);
80
+ // TODO: deinit the buses themselves when they gain a deinit()
81
+ network.buses.deinit(network.allocator);
82
82
  network.processes.deinit(network.allocator);
83
83
  }
84
84
 
85
- /// Returns the address (index into Network.busses)
85
+ /// Returns the address (index into Network.buses)
86
86
  pub fn init_message_bus(network: *Network, cluster: u32, process: Process) !*MessageBus {
87
87
  const raw_process = switch (process) {
88
88
  .replica => |replica| replica,
@@ -97,9 +97,9 @@ pub const Network = struct {
97
97
  const bus = try MessageBus.init(network.allocator, cluster, process, network);
98
98
 
99
99
  network.processes.appendAssumeCapacity(raw_process);
100
- network.busses.appendAssumeCapacity(bus);
100
+ network.buses.appendAssumeCapacity(bus);
101
101
 
102
- return &network.busses.items[network.busses.items.len - 1];
102
+ return &network.buses.items[network.buses.items.len - 1];
103
103
  }
104
104
 
105
105
  pub fn send_message(network: *Network, message: *Message, path: Path) void {
@@ -131,18 +131,15 @@ pub const Network = struct {
131
131
  }
132
132
 
133
133
  pub fn get_message_bus(network: *Network, process: Process) *MessageBus {
134
- return &network.busses.items[network.process_to_address(process)];
134
+ return &network.buses.items[network.process_to_address(process)];
135
135
  }
136
136
 
137
137
  fn deliver_message(packet: Packet, path: PacketSimulatorPath) void {
138
138
  const network = packet.network;
139
139
 
140
- const target_bus = &network.busses.items[path.target];
140
+ const target_bus = &network.buses.items[path.target];
141
141
 
142
- const message = target_bus.get_message() orelse {
143
- log.debug("deliver_message: target message bus has no free messages, dropping", .{});
144
- return;
145
- };
142
+ const message = target_bus.get_message();
146
143
  defer target_bus.unref(message);
147
144
 
148
145
  std.mem.copy(u8, message.buffer, packet.message.buffer);
@@ -8,12 +8,13 @@ const Cluster = @import("cluster.zig").Cluster;
8
8
  const Network = @import("network.zig").Network;
9
9
  const StateMachine = @import("state_machine.zig").StateMachine;
10
10
 
11
- const MessagePool = @import("../message_pool.zig").MessagePool;
11
+ const message_pool = @import("../message_pool.zig");
12
+ const MessagePool = message_pool.MessagePool;
12
13
  const Message = MessagePool.Message;
13
14
 
14
15
  const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
15
16
 
16
- const RequestQueue = RingBuffer(u128, config.message_bus_messages_max - 1);
17
+ const RequestQueue = RingBuffer(u128, config.client_request_queue_max);
17
18
  const StateTransitions = std.AutoHashMap(u128, u64);
18
19
 
19
20
  const log = std.log.scoped(.state_checker);