tigerbeetle-node 0.11.6 → 0.11.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/dist/.client.node.sha256 +1 -1
  2. package/package.json +1 -1
  3. package/src/tigerbeetle/scripts/benchmark.bat +1 -2
  4. package/src/tigerbeetle/scripts/benchmark.sh +1 -2
  5. package/src/tigerbeetle/scripts/install.bat +7 -0
  6. package/src/tigerbeetle/scripts/install.sh +2 -3
  7. package/src/tigerbeetle/src/benchmark.zig +3 -3
  8. package/src/tigerbeetle/src/ewah.zig +6 -5
  9. package/src/tigerbeetle/src/ewah_fuzz.zig +1 -1
  10. package/src/tigerbeetle/src/io/darwin.zig +19 -0
  11. package/src/tigerbeetle/src/io/linux.zig +8 -0
  12. package/src/tigerbeetle/src/io/windows.zig +20 -2
  13. package/src/tigerbeetle/src/iops.zig +7 -1
  14. package/src/tigerbeetle/src/lsm/compaction.zig +18 -29
  15. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +9 -5
  16. package/src/tigerbeetle/src/lsm/grid.zig +267 -267
  17. package/src/tigerbeetle/src/lsm/level_iterator.zig +18 -1
  18. package/src/tigerbeetle/src/lsm/manifest.zig +29 -1
  19. package/src/tigerbeetle/src/lsm/manifest_log.zig +5 -5
  20. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +2 -2
  21. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +26 -70
  22. package/src/tigerbeetle/src/lsm/table.zig +42 -0
  23. package/src/tigerbeetle/src/lsm/table_iterator.zig +27 -0
  24. package/src/tigerbeetle/src/lsm/table_mutable.zig +1 -1
  25. package/src/tigerbeetle/src/lsm/test.zig +2 -3
  26. package/src/tigerbeetle/src/lsm/tree.zig +27 -6
  27. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +1 -1
  28. package/src/tigerbeetle/src/simulator.zig +0 -5
  29. package/src/tigerbeetle/src/storage.zig +58 -6
  30. package/src/tigerbeetle/src/test/cluster.zig +3 -0
  31. package/src/tigerbeetle/src/test/state_checker.zig +1 -1
  32. package/src/tigerbeetle/src/test/storage.zig +22 -1
  33. package/src/tigerbeetle/src/tracer.zig +50 -28
  34. package/src/tigerbeetle/src/unit_tests.zig +9 -4
  35. package/src/tigerbeetle/src/vopr.zig +4 -4
  36. package/src/tigerbeetle/src/vsr/client.zig +11 -7
  37. package/src/tigerbeetle/src/vsr/journal.zig +153 -93
  38. package/src/tigerbeetle/src/vsr/replica.zig +10 -20
  39. package/src/tigerbeetle/src/vsr/superblock.zig +19 -16
  40. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +114 -93
  41. package/src/tigerbeetle/src/vsr/superblock_free_set_fuzz.zig +1 -1
  42. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +1 -3
  43. package/src/tigerbeetle/src/vsr.zig +55 -8
  44. package/src/tigerbeetle/src/c/tb_client/context.zig +0 -304
  45. package/src/tigerbeetle/src/c/tb_client/echo_client.zig +0 -108
  46. package/src/tigerbeetle/src/c/tb_client/packet.zig +0 -80
  47. package/src/tigerbeetle/src/c/tb_client/signal.zig +0 -286
  48. package/src/tigerbeetle/src/c/tb_client/thread.zig +0 -88
  49. package/src/tigerbeetle/src/c/tb_client.h +0 -220
  50. package/src/tigerbeetle/src/c/tb_client.zig +0 -177
  51. package/src/tigerbeetle/src/c/tb_client_header.zig +0 -218
  52. package/src/tigerbeetle/src/c/tb_client_header_test.zig +0 -135
  53. package/src/tigerbeetle/src/c/test.zig +0 -371
  54. package/src/tigerbeetle/src/cli.zig +0 -399
  55. package/src/tigerbeetle/src/main.zig +0 -242
@@ -1 +1 @@
1
- b80cf792efc775e33454af2ca68f6d93c2e369d29eb95509a4fae3c215019bbc dist/client.node
1
+ c82197101574fc1267be050abfa7d31ca355036eec3f7d3da9a43a6c83bf6b0c dist/client.node
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "tigerbeetle-node",
3
- "version": "0.11.6",
3
+ "version": "0.11.8",
4
4
  "description": "TigerBeetle Node.js client",
5
5
  "main": "dist/index.js",
6
6
  "typings": "dist/index.d.ts",
@@ -27,8 +27,7 @@ echo.
27
27
  exit /b
28
28
 
29
29
  :main
30
- zig\zig.exe build -Drelease-safe
31
- move zig-out\bin\tigerbeetle.exe . >nul
30
+ zig\zig.exe build install -Drelease-safe
32
31
 
33
32
  for /l %%i in (0, 1, 0) do (
34
33
  echo Initializing replica %%i
@@ -12,8 +12,7 @@ fi
12
12
  COLOR_RED='\033[1;31m'
13
13
  COLOR_END='\033[0m'
14
14
 
15
- zig/zig build -Drelease-safe
16
- mv zig-out/bin/tigerbeetle .
15
+ zig/zig build install -Drelease-safe
17
16
 
18
17
  function onerror {
19
18
  if [ "$?" == "0" ]; then
@@ -0,0 +1,7 @@
1
+ @echo off
2
+ setlocal enabledelayedexpansion
3
+
4
+ call .\scripts\install_zig.bat
5
+
6
+ echo "Building TigerBeetle..."
7
+ .\zig\zig.exe build install -Dcpu=baseline -Drelease-safe
@@ -14,9 +14,8 @@ set -eu
14
14
  scripts/install_zig.sh
15
15
  if [ "$debug" = "true" ]; then
16
16
  echo "Building Tigerbeetle debug..."
17
- zig/zig build -Dcpu=baseline
17
+ zig/zig build install -Dcpu=baseline
18
18
  else
19
19
  echo "Building TigerBeetle..."
20
- zig/zig build -Dcpu=baseline -Drelease-safe
20
+ zig/zig build install -Dcpu=baseline -Drelease-safe
21
21
  fi
22
- mv zig-out/bin/tigerbeetle .
@@ -6,7 +6,6 @@ const constants = @import("constants.zig");
6
6
  const log = std.log;
7
7
  pub const log_level: std.log.Level = .err;
8
8
 
9
- const cli = @import("cli.zig");
10
9
  const IO = @import("io.zig").IO;
11
10
 
12
11
  const util = @import("util.zig");
@@ -23,7 +22,7 @@ const Client = vsr.Client(StateMachine, MessageBus);
23
22
 
24
23
  const tb = @import("tigerbeetle.zig");
25
24
 
26
- const batches_count = 100;
25
+ const batches_count = 1000;
27
26
 
28
27
  const transfers_per_batch: u32 = @divExact(
29
28
  constants.message_size_max - @sizeOf(vsr.Header),
@@ -151,7 +150,8 @@ pub fn main() !void {
151
150
 
152
151
  const result: i64 = @divFloor(@intCast(i64, transfers.len * 1000), ms);
153
152
  try stdout.print("============================================\n", .{});
154
- try stdout.print("{} transfers per second\n\n", .{result});
153
+ try stdout.print("{} batches in {} ms\n", .{ batches_count, ms });
154
+ try stdout.print("{} transfers per second\n", .{result});
155
155
  try stdout.print("max p100 latency per {} transfers = {}ms\n", .{
156
156
  transfers_per_batch,
157
157
  queue.transfers_latency_max,
@@ -64,7 +64,6 @@ pub fn ewah(comptime Word: type) type {
64
64
  // so that we can test invalid encodings.
65
65
  pub fn decode(source: []align(@alignOf(Word)) const u8, target_words: []Word) usize {
66
66
  assert(source.len % @sizeOf(Word) == 0);
67
- assert(source.len >= @sizeOf(Marker));
68
67
  assert(disjoint_slices(u8, Word, source, target_words));
69
68
 
70
69
  const source_words = mem.bytesAsSlice(Word, source);
@@ -95,7 +94,6 @@ pub fn ewah(comptime Word: type) type {
95
94
 
96
95
  // Returns the number of bytes written to `target`.
97
96
  pub fn encode(source_words: []const Word, target: []align(@alignOf(Word)) u8) usize {
98
- assert(target.len >= @sizeOf(Marker));
99
97
  assert(target.len == encode_size_max(source_words.len));
100
98
  assert(disjoint_slices(Word, u8, source_words, target));
101
99
 
@@ -149,8 +147,6 @@ pub fn ewah(comptime Word: type) type {
149
147
  /// Assumes (pessimistically) that every word will be encoded as a literal.
150
148
  pub fn encode_size_max(word_count: usize) usize {
151
149
  const marker_count = div_ceil(word_count, marker_literal_word_count_max);
152
- assert(marker_count != 0);
153
-
154
150
  return marker_count * @sizeOf(Marker) + word_count * @sizeOf(Word);
155
151
  }
156
152
 
@@ -178,7 +174,7 @@ test "ewah encode→decode cycle" {
178
174
  }
179
175
  }
180
176
 
181
- test "ewah Word=u8 decode→encode→decode" {
177
+ test "ewah Word=u8" {
182
178
  try test_decode_with_word(u8);
183
179
 
184
180
  const codec = ewah(u8);
@@ -195,6 +191,9 @@ test "ewah Word=u8 decode→encode→decode" {
195
191
  56,
196
192
  });
197
193
  }
194
+
195
+ try std.testing.expectEqual(codec.encode_size_max(0), 0);
196
+ try std.testing.expectEqual(codec.encode(&.{}, &.{}), 0);
198
197
  }
199
198
 
200
199
  test "ewah Word=u16" {
@@ -205,6 +204,8 @@ test "ewah Word=u16" {
205
204
  fn test_decode_with_word(comptime Word: type) !void {
206
205
  const codec = ewah(Word);
207
206
 
207
+ // No set bits.
208
+ try test_decode(Word, &.{});
208
209
  // Alternating runs, no literals.
209
210
  try test_decode(Word, &.{
210
211
  codec.marker_word(.{ .uniform_bit = 0, .uniform_word_count = 2, .literal_word_count = 0 }),
@@ -17,7 +17,7 @@ pub fn main() !void {
17
17
  const random = prng.random();
18
18
 
19
19
  const decoded_size_max = @divExact(1024 * 1024, @sizeOf(Word));
20
- const decoded_size = 1 + random.uintLessThan(usize, decoded_size_max);
20
+ const decoded_size = random.uintLessThan(usize, 1 + decoded_size_max);
21
21
  const decoded = try allocator.alloc(Word, decoded_size);
22
22
  defer allocator.free(decoded);
23
23
 
@@ -571,6 +571,25 @@ pub const IO = struct {
571
571
  completion: *Completion,
572
572
  nanoseconds: u63,
573
573
  ) void {
574
+ // Special case a zero timeout as a yield.
575
+ if (nanoseconds == 0) {
576
+ completion.* = .{
577
+ .next = null,
578
+ .context = context,
579
+ .operation = undefined,
580
+ .callback = struct {
581
+ fn on_complete(_io: *IO, _completion: *Completion) void {
582
+ _ = _io;
583
+ const _context = @intToPtr(Context, @ptrToInt(_completion.context));
584
+ callback(_context, _completion, {});
585
+ }
586
+ }.on_complete,
587
+ };
588
+
589
+ self.completed.push(completion);
590
+ return;
591
+ }
592
+
574
593
  self.submit(
575
594
  context,
576
595
  callback,
@@ -819,6 +819,14 @@ pub const IO = struct {
819
819
  },
820
820
  },
821
821
  };
822
+
823
+ // Special case a zero timeout as a yield.
824
+ if (nanoseconds == 0) {
825
+ completion.result = -@intCast(i32, @enumToInt(std.os.E.TIME));
826
+ self.completed.push(completion);
827
+ return;
828
+ }
829
+
822
830
  self.enqueue(completion);
823
831
  }
824
832
 
@@ -62,7 +62,7 @@ pub const IO = struct {
62
62
  };
63
63
 
64
64
  fn flush(self: *IO, mode: FlushMode) !void {
65
- if (self.completed.peek() == null) {
65
+ if (self.completed.empty()) {
66
66
  // Compute how long to poll by flushing timeout completions.
67
67
  // NOTE: this may push to completed queue
68
68
  var timeout_ms: ?os.windows.DWORD = null;
@@ -78,7 +78,7 @@ pub const IO = struct {
78
78
  }
79
79
 
80
80
  // Poll for IO iff theres IO pending and flush_timeouts() found no ready completions
81
- if (self.io_pending > 0 and self.completed.peek() == null) {
81
+ if (self.io_pending > 0 and self.completed.empty()) {
82
82
  // In blocking mode, we're always waiting at least until the timeout by run_for_ns.
83
83
  // In non-blocking mode, we shouldn't wait at all.
84
84
  const io_timeout = switch (mode) {
@@ -868,6 +868,24 @@ pub const IO = struct {
868
868
  completion: *Completion,
869
869
  nanoseconds: u63,
870
870
  ) void {
871
+ // Special case a zero timeout as a yield.
872
+ if (nanoseconds == 0) {
873
+ completion.* = .{
874
+ .next = null,
875
+ .context = @ptrCast(?*anyopaque, context),
876
+ .operation = undefined,
877
+ .callback = struct {
878
+ fn on_complete(ctx: Completion.Context) void {
879
+ const _context = @intToPtr(Context, @ptrToInt(ctx.completion.context));
880
+ callback(_context, ctx.completion, {});
881
+ }
882
+ }.on_complete,
883
+ };
884
+
885
+ self.completed.push(completion);
886
+ return;
887
+ }
888
+
871
889
  self.submit(
872
890
  context,
873
891
  callback,
@@ -19,11 +19,17 @@ pub fn IOPS(comptime T: type, comptime size: u6) type {
19
19
 
20
20
  pub fn release(self: *Self, item: *T) void {
21
21
  item.* = undefined;
22
- const i = (@ptrToInt(item) - @ptrToInt(&self.items)) / @sizeOf(T);
22
+ const i = self.index(item);
23
23
  assert(!self.free.isSet(i));
24
24
  self.free.set(i);
25
25
  }
26
26
 
27
+ pub fn index(self: *Self, item: *T) usize {
28
+ const i = (@ptrToInt(item) - @ptrToInt(&self.items)) / @sizeOf(T);
29
+ assert(i < size);
30
+ return i;
31
+ }
32
+
27
33
  /// Returns the count of IOPs available.
28
34
  pub fn available(self: *const Self) usize {
29
35
  return self.free.count();
@@ -61,7 +61,7 @@ pub fn CompactionType(
61
61
  const BlockPtrConst = Grid.BlockPtrConst;
62
62
  const BlockWrite = struct {
63
63
  write: Grid.Write = undefined,
64
- block: BlockPtr = undefined,
64
+ block: *BlockPtr = undefined,
65
65
  writable: bool = false,
66
66
  };
67
67
 
@@ -85,7 +85,8 @@ pub fn CompactionType(
85
85
  done,
86
86
  };
87
87
 
88
- tree_name: [:0]const u8,
88
+ /// Used only for debugging/tracing.
89
+ name: [:0]const u8,
89
90
 
90
91
  grid: *Grid,
91
92
  grid_reservation: Grid.Reservation,
@@ -124,7 +125,7 @@ pub fn CompactionType(
124
125
 
125
126
  tracer_slot: ?tracer.SpanStart = null,
126
127
 
127
- pub fn init(allocator: mem.Allocator, tree_name: [:0]const u8) !Compaction {
128
+ pub fn init(allocator: mem.Allocator, name: [:0]const u8) !Compaction {
128
129
  var iterator_a = try IteratorA.init(allocator);
129
130
  errdefer iterator_a.deinit(allocator);
130
131
 
@@ -135,7 +136,7 @@ pub fn CompactionType(
135
136
  errdefer table_builder.deinit(allocator);
136
137
 
137
138
  return Compaction{
138
- .tree_name = tree_name,
139
+ .name = name,
139
140
 
140
141
  // Assigned by start()
141
142
  .grid = undefined,
@@ -202,7 +203,7 @@ pub fn CompactionType(
202
203
  assert(drop_tombstones or level_b < constants.lsm_levels - 1);
203
204
 
204
205
  compaction.* = .{
205
- .tree_name = compaction.tree_name,
206
+ .name = compaction.name,
206
207
 
207
208
  .grid = grid,
208
209
  // Reserve enough blocks to write our output tables in the worst case, where:
@@ -313,11 +314,8 @@ pub fn CompactionType(
313
314
 
314
315
  tracer.start(
315
316
  &compaction.tracer_slot,
316
- .{ .tree = .{ .tree_name = compaction.tree_name } },
317
- .{ .tree_compaction_tick = .{
318
- .tree_name = compaction.tree_name,
319
- .level_b = compaction.level_b,
320
- } },
317
+ .{ .tree_compaction = .{ .compaction_name = compaction.name } },
318
+ .{ .tree_compaction_tick = .{ .level_b = compaction.level_b } },
321
319
  @src(),
322
320
  );
323
321
 
@@ -360,7 +358,7 @@ pub fn CompactionType(
360
358
  write_callback,
361
359
  &block_write.write,
362
360
  block_write.block,
363
- Table.block_address(block_write.block),
361
+ Table.block_address(block_write.block.*),
364
362
  );
365
363
  }
366
364
  }
@@ -392,11 +390,8 @@ pub fn CompactionType(
392
390
  var tracer_slot: ?tracer.SpanStart = null;
393
391
  tracer.start(
394
392
  &tracer_slot,
395
- .{ .tree = .{ .tree_name = compaction.tree_name } },
396
- .{ .tree_compaction_merge = .{
397
- .tree_name = compaction.tree_name,
398
- .level_b = compaction.level_b,
399
- } },
393
+ .{ .tree_compaction = .{ .compaction_name = compaction.name } },
394
+ .{ .tree_compaction_merge = .{ .level_b = compaction.level_b } },
400
395
  @src(),
401
396
  );
402
397
 
@@ -422,19 +417,13 @@ pub fn CompactionType(
422
417
 
423
418
  tracer.end(
424
419
  &tracer_slot,
425
- .{ .tree = .{ .tree_name = compaction.tree_name } },
426
- .{ .tree_compaction_merge = .{
427
- .tree_name = compaction.tree_name,
428
- .level_b = compaction.level_b,
429
- } },
420
+ .{ .tree_compaction = .{ .compaction_name = compaction.name } },
421
+ .{ .tree_compaction_merge = .{ .level_b = compaction.level_b } },
430
422
  );
431
423
  tracer.end(
432
424
  &compaction.tracer_slot,
433
- .{ .tree = .{ .tree_name = compaction.tree_name } },
434
- .{ .tree_compaction_tick = .{
435
- .tree_name = compaction.tree_name,
436
- .level_b = compaction.level_b,
437
- } },
425
+ .{ .tree_compaction = .{ .compaction_name = compaction.name } },
426
+ .{ .tree_compaction_tick = .{ .level_b = compaction.level_b } },
438
427
  );
439
428
 
440
429
  // TODO Implement pacing here by deciding if we should do another compact_tick()
@@ -480,7 +469,7 @@ pub fn CompactionType(
480
469
  });
481
470
 
482
471
  // Mark the finished data block as writable for the next compact_tick() call.
483
- compaction.data.block = compaction.table_builder.data_block;
472
+ compaction.data.block = &compaction.table_builder.data_block;
484
473
  assert(!compaction.data.writable);
485
474
  compaction.data.writable = true;
486
475
  }
@@ -497,7 +486,7 @@ pub fn CompactionType(
497
486
  });
498
487
 
499
488
  // Mark the finished filter block as writable for the next compact_tick() call.
500
- compaction.filter.block = compaction.table_builder.filter_block;
489
+ compaction.filter.block = &compaction.table_builder.filter_block;
501
490
  assert(!compaction.filter.writable);
502
491
  compaction.filter.writable = true;
503
492
  }
@@ -517,7 +506,7 @@ pub fn CompactionType(
517
506
  compaction.manifest.insert_table(compaction.level_b, &table);
518
507
 
519
508
  // Mark the finished index block as writable for the next compact_tick() call.
520
- compaction.index.block = compaction.table_builder.index_block;
509
+ compaction.index.block = &compaction.table_builder.index_block;
521
510
  assert(!compaction.index.writable);
522
511
  compaction.index.writable = true;
523
512
 
@@ -46,8 +46,7 @@ const Environment = struct {
46
46
  // This is the smallest size that set_associative_cache will allow us.
47
47
  const cache_entries_max = 2048;
48
48
  const forest_options = StateMachine.forest_options(.{
49
- // Ignored by StateMachine.forest_options().
50
- .lsm_forest_node_count = undefined,
49
+ .lsm_forest_node_count = node_count,
51
50
  .cache_entries_accounts = cache_entries_max,
52
51
  .cache_entries_transfers = cache_entries_max,
53
52
  .cache_entries_posted = cache_entries_max,
@@ -93,7 +92,11 @@ const Environment = struct {
93
92
  env.message_pool = try MessagePool.init(allocator, .replica);
94
93
  errdefer env.message_pool.deinit(allocator);
95
94
 
96
- env.superblock = try SuperBlock.init(allocator, env.storage, &env.message_pool);
95
+ env.superblock = try SuperBlock.init(allocator, .{
96
+ .storage = env.storage,
97
+ .storage_size_limit = constants.storage_size_max,
98
+ .message_pool = &env.message_pool,
99
+ });
97
100
  errdefer env.superblock.deinit(allocator);
98
101
 
99
102
  env.grid = try Grid.init(allocator, &env.superblock);
@@ -121,7 +124,7 @@ const Environment = struct {
121
124
  }
122
125
 
123
126
  fn tick(env: *Environment) void {
124
- env.grid.tick();
127
+ // env.grid.tick();
125
128
  env.storage.tick();
126
129
  }
127
130
 
@@ -147,7 +150,6 @@ const Environment = struct {
147
150
  env.superblock.format(superblock_format_callback, &env.superblock_context, .{
148
151
  .cluster = cluster,
149
152
  .replica = replica,
150
- .storage_size_max = constants.storage_size_max,
151
153
  });
152
154
  env.tick_until_state_change(.init, .formatted);
153
155
  }
@@ -260,6 +262,8 @@ const Environment = struct {
260
262
  if (compact.checkpoint) env.checkpoint(compact.op);
261
263
  },
262
264
  .put_account => |account| {
265
+ // The forest requires prefetch before put.
266
+ env.prefetch_account(account.id);
263
267
  env.forest.grooves.accounts.put(&account);
264
268
  try model.put(account.id, account);
265
269
  },