tigerbeetle 0.0.37 → 0.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 552e8ed13e850cd27f9b0516763a3151939d0b13dcb7615cb9e562a1f5ec8ff8
4
- data.tar.gz: 469bab9414b812cf7a65d2312362706725d7c30071c05691dd835cf2510d2ba0
3
+ metadata.gz: 55a5b0b68111a9aeac3084772fb0f0ad185a67d9f41c67b5bcd6392fd7fcab61
4
+ data.tar.gz: '03976907f14a64b11b0fd9f7bf0693a4f44a887b997b34c9912bc71b1653eea5'
5
5
  SHA512:
6
- metadata.gz: e0a9f6179ead5c9fc058e0168fac1edd5c4fbd07bb5376387f37110b43f3a7d26c3ec9502bcfb6e34c97dae856fb570d76f6bda5c0459652e4601ed82e627d65
7
- data.tar.gz: 9634608bd3eb31963cb5f02007bebd8fcf926982dac54f74e1a306b57383677731417e7f86d01cb832e6d06d4c60c6f694135fb86b0060d1d726c74d07201867
6
+ metadata.gz: 8c256706f6343e8e3d0cc1d130482009b28cdcc696ababa97a0d75f4fb7b74e7aa880fa364cb0ff55e274e26b876242f5f3418e6090f9e5dd5bdf17698252737
7
+ data.tar.gz: 7683f0b905a05ec28a2b164bc8a18f285ef64be2b6190ae40503b8f9ade3864238047ac5f53e6e0c5c60e94347a1483b1ad8a1b79221ae688941673bade1f82a
data/CHANGELOG.md CHANGED
@@ -1,5 +1,10 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.0.38
4
+
5
+ - Based on TigerBeetle 0.16.74
6
+
7
+
3
8
  ## 0.0.36
4
9
 
5
10
  - Based on TigerBeetle 0.16.73
@@ -23,6 +23,10 @@ const Message = MessagePool.Message;
23
23
  const Packet = @import("packet.zig").Packet;
24
24
  const Signal = @import("signal.zig").Signal;
25
25
 
26
+ const KiB = stdx.KiB;
27
+
28
+ const io_thread_stack_size = 512 * KiB;
29
+
26
30
  pub const InitParameters = extern struct {
27
31
  cluster_id: u128,
28
32
  client_id: u128,
@@ -351,7 +355,11 @@ pub fn ContextType(
351
355
  context.client.register(client_register_callback, @intFromPtr(context));
352
356
 
353
357
  log.debug("{}: init: spawning thread", .{context.client_id});
354
- context.thread = std.Thread.spawn(.{}, Context.io_thread, .{context}) catch |err| {
358
+ context.thread = std.Thread.spawn(
359
+ .{ .stack_size = io_thread_stack_size },
360
+ Context.io_thread,
361
+ .{context},
362
+ ) catch |err| {
355
363
  log.err("{}: failed to spawn thread: {s}", .{
356
364
  context.client_id,
357
365
  @errorName(err),
@@ -1,3 +1,25 @@
1
+ const std = @import("std");
2
+ const stdx = @import("stdx");
3
+ const assert = std.debug.assert;
4
+ const maybe = stdx.maybe;
5
+ const Elem = std.meta.Elem;
6
+
7
+ const binary_search = @import("lsm/binary_search.zig");
8
+
9
+ /// LSM Tree is a sorted array with a monocle and a top hat.
10
+ ///
11
+ /// We want to iterate it in both directions:
12
+ /// - For CDC, you want to learn about all new objects with timestamp>threshold.
13
+ /// - For paginated timelines, you want to learn about past objects with timestamp<threshold.
14
+ ///
15
+ /// Sadly, this can't be implemented via just a single branch near the end of the system, we need to
16
+ /// change a whole bunch of `<` to `>` throughout the stack.
17
+ ///
18
+ /// Direction encapsulate the logic of "if ascending use < if descending use >". The mnemonic is
19
+ /// that usual comparison is horizontal along a number line, but Direction-aware is vertical.
20
+ ///
21
+ /// In other words, `key_min` and `key_max` track natural ordering, while `key_lower` and
22
+ /// `key_upper` are direction-aware.
1
23
  pub const Direction = enum(u1) {
2
24
  ascending = 0,
3
25
  descending = 1,
@@ -8,4 +30,91 @@ pub const Direction = enum(u1) {
8
30
  .descending => .ascending,
9
31
  };
10
32
  }
33
+
34
+ pub inline fn cmp(
35
+ d: Direction,
36
+ a: anytype,
37
+ comptime op: enum { @"<", @"<=" },
38
+ b: @TypeOf(a),
39
+ ) bool {
40
+ return switch (op) {
41
+ .@"<" => switch (d) {
42
+ .ascending => a < b,
43
+ .descending => a > b,
44
+ },
45
+ .@"<=" => switch (d) {
46
+ .ascending => a <= b,
47
+ .descending => a >= b,
48
+ },
49
+ };
50
+ }
51
+
52
+ pub inline fn lower(d: Direction, a: anytype, b: @TypeOf(a)) @TypeOf(a) {
53
+ return if (d.cmp(a, .@"<", b)) a else b;
54
+ }
55
+
56
+ pub inline fn upper(d: Direction, a: anytype, b: @TypeOf(a)) @TypeOf(a) {
57
+ return if (d.cmp(a, .@"<", b)) b else a;
58
+ }
59
+
60
+ pub inline fn slice_peek(d: Direction, slice: anytype) *const Elem(@TypeOf(slice)) {
61
+ assert(slice.len > 0);
62
+ return switch (d) {
63
+ .ascending => &slice[0],
64
+ .descending => &slice[slice.len - 1],
65
+ };
66
+ }
67
+
68
+ pub inline fn slice_pop(
69
+ d: Direction,
70
+ slice: anytype,
71
+ ) struct { Elem(@TypeOf(slice)), @TypeOf(slice) } {
72
+ assert(slice.len > 0);
73
+ return switch (d) {
74
+ .ascending => .{ slice[0], slice[1..] },
75
+ .descending => .{ slice[slice.len - 1], slice[0 .. slice.len - 1] },
76
+ };
77
+ }
78
+
79
+ pub inline fn slice_lower_bound(
80
+ direction: Direction,
81
+ comptime Key: type,
82
+ comptime Value: type,
83
+ comptime key_from_value: fn (*const Value) callconv(.@"inline") Key,
84
+ slice: []const Value,
85
+ key: Key,
86
+ ) []const Value {
87
+ maybe(slice.len == 0);
88
+ switch (direction) {
89
+ .ascending => {
90
+ const start = binary_search.binary_search_values_upsert_index(
91
+ Key,
92
+ Value,
93
+ key_from_value,
94
+ slice,
95
+ key,
96
+ .{ .mode = .lower_bound },
97
+ );
98
+
99
+ return if (start == slice.len) &.{} else slice[start..];
100
+ },
101
+ .descending => {
102
+ const end = end: {
103
+ const index = binary_search.binary_search_values_upsert_index(
104
+ Key,
105
+ Value,
106
+ key_from_value,
107
+ slice,
108
+ key,
109
+ .{ .mode = .upper_bound },
110
+ );
111
+ break :end index + @intFromBool(
112
+ index < slice.len and key_from_value(&slice[index]) <= key,
113
+ );
114
+ };
115
+
116
+ return if (end == 0) &.{} else slice[0..end];
117
+ },
118
+ }
119
+ }
11
120
  };
@@ -482,10 +482,10 @@ pub fn ScanType(
482
482
 
483
483
  if (comptime is_composite_key(Value)) {
484
484
  const prefix = prefix: {
485
- const prefix_min = Value.key_prefix(scan_impl.key_min);
486
- const prefix_max = Value.key_prefix(scan_impl.key_max);
487
- assert(prefix_min == prefix_max);
488
- break :prefix prefix_min;
485
+ const prefix_lower = Value.key_prefix(scan_impl.key_lower);
486
+ const prefix_upper = Value.key_prefix(scan_impl.key_upper);
487
+ assert(prefix_lower == prefix_upper);
488
+ break :prefix prefix_lower;
489
489
  };
490
490
  scan_impl.probe(Value.key_from_value(&.{
491
491
  .field = prefix,
@@ -496,7 +496,7 @@ pub fn ScanType(
496
496
  comptime assert(Groove.IdTree != void);
497
497
 
498
498
  // Scans over the IdTree cannot probe for a next timestamp.
499
- assert(scan_impl.key_min == scan_impl.key_max);
499
+ assert(scan_impl.key_lower == scan_impl.key_upper);
500
500
  }
501
501
  },
502
502
  }
@@ -516,12 +516,12 @@ pub fn ScanType(
516
516
  if (comptime is_composite_key(Value)) {
517
517
  // Secondary indexes can only produce results sorted by timestamp if
518
518
  // scanning over the same key prefix.
519
- assert(Value.key_prefix(scan_impl.key_min) ==
520
- Value.key_prefix(scan_impl.key_max));
519
+ assert(Value.key_prefix(scan_impl.key_lower) ==
520
+ Value.key_prefix(scan_impl.key_upper));
521
521
  } else {
522
522
  comptime assert(tag == .id);
523
523
  comptime assert(Groove.IdTree != void);
524
- assert(scan_impl.key_min == scan_impl.key_max);
524
+ assert(scan_impl.key_lower == scan_impl.key_upper);
525
525
  }
526
526
 
527
527
  return scan_impl.direction;
@@ -267,10 +267,7 @@ fn ScanMergeType(
267
267
  // 2. k_way_merge₁ relays 11 to its streams (tree₁ + zig_zag_merge₂).
268
268
  // 3. Probing zig_zag_merge₂ with 12 trips the assert, because tree₃ has
269
269
  // already produced a higher key (11 < 12).
270
- switch (self.direction) {
271
- .ascending => maybe(key_popped < timestamp),
272
- .descending => maybe(key_popped > timestamp),
273
- }
270
+ maybe(self.direction.cmp(key_popped, .@"<", timestamp));
274
271
  }
275
272
 
276
273
  // Once the underlying streams have been changed, the merge iterator needs
@@ -122,8 +122,8 @@ pub fn ScanTreeType(
122
122
  buffer: *const ScanBuffer,
123
123
 
124
124
  direction: Direction,
125
- key_min: Key,
126
- key_max: Key,
125
+ key_lower: Key,
126
+ key_upper: Key,
127
127
  snapshot: u64,
128
128
 
129
129
  table_mutable_values: []const Value,
@@ -207,8 +207,8 @@ pub fn ScanTreeType(
207
207
  .buffer = buffer,
208
208
  .state = .idle,
209
209
  .snapshot = snapshot,
210
- .key_min = key_min,
211
- .key_max = key_max,
210
+ .key_lower = direction.lower(key_min, key_max),
211
+ .key_upper = direction.upper(key_min, key_max),
212
212
  .direction = direction,
213
213
  .table_mutable_values = table_mutable_values,
214
214
  .table_immutable_values = table_immutable_values,
@@ -304,45 +304,35 @@ pub fn ScanTreeType(
304
304
  assert(self.state != .buffering);
305
305
 
306
306
  // No need to move if the current range is already tighter.
307
- // It can abort scanning if the key is unreachable.
308
- if (probe_key < self.key_min) {
309
- if (self.direction == .descending) self.abort();
310
- return;
311
- } else if (self.key_max < probe_key) {
312
- if (self.direction == .ascending) self.abort();
307
+ if (self.direction.cmp(probe_key, .@"<", self.key_lower)) return;
308
+ if (self.direction.cmp(self.key_upper, .@"<", probe_key)) {
309
+ self.abort(); // The key is unreachable, abort scanning.
313
310
  return;
314
311
  }
315
312
 
316
313
  // It's allowed to probe multiple times with the same `probe_key`.
317
314
  // In this case, there's no need to move since the key range was already set.
318
- if (switch (self.direction) {
319
- .ascending => self.key_min == probe_key,
320
- .descending => self.key_max == probe_key,
321
- }) {
315
+ if (probe_key == self.key_lower) {
322
316
  assert(self.state == .idle or
323
317
  self.state == .seeking or
324
318
  self.state == .needs_data);
325
319
  return;
326
320
  }
327
321
 
328
- // Updates the scan range depending on the direction.
329
- switch (self.direction) {
330
- .ascending => {
331
- assert(self.key_min < probe_key);
332
- assert(probe_key <= self.key_max);
333
- self.key_min = probe_key;
334
- },
335
- .descending => {
336
- assert(probe_key < self.key_max);
337
- assert(self.key_min <= probe_key);
338
- self.key_max = probe_key;
339
- },
340
- }
322
+ assert(self.direction.cmp(self.key_lower, .@"<", probe_key));
323
+ assert(self.direction.cmp(probe_key, .@"<=", self.key_upper));
324
+ self.key_lower = probe_key;
341
325
 
342
326
  // Re-slicing the in-memory tables:
343
327
  inline for (.{ &self.table_mutable_values, &self.table_immutable_values }) |field| {
344
328
  const table_memory = field.*;
345
- const slice: []const Value = probe_values(self.direction, table_memory, probe_key);
329
+ const slice: []const Value = self.direction.slice_lower_bound(
330
+ Key,
331
+ Value,
332
+ key_from_value,
333
+ table_memory,
334
+ probe_key,
335
+ );
346
336
  assert(slice.len <= table_memory.len);
347
337
  field.* = slice;
348
338
  }
@@ -358,10 +348,7 @@ pub fn ScanTreeType(
358
348
  // It's not expected to probe a scan that already produced a key equals
359
349
  // or ahead the probe.
360
350
  assert(self.merge_iterator.?.key_popped == null or
361
- switch (self.direction) {
362
- .ascending => self.merge_iterator.?.key_popped.? < probe_key,
363
- .descending => self.merge_iterator.?.key_popped.? > probe_key,
364
- });
351
+ self.direction.cmp(self.merge_iterator.?.key_popped.?, .@"<", probe_key));
365
352
 
366
353
  // Once the underlying streams have been changed, the merge iterator needs
367
354
  // to reset its state, otherwise it may have dirty keys buffered.
@@ -429,12 +416,7 @@ pub fn ScanTreeType(
429
416
  assert(self.state == .seeking);
430
417
 
431
418
  if (values.len == 0) return null;
432
-
433
- const value: *const Value = switch (self.direction) {
434
- .ascending => &values[0],
435
- .descending => &values[values.len - 1],
436
- };
437
-
419
+ const value: *const Value = self.direction.slice_peek(values);
438
420
  const key = key_from_value(value);
439
421
  return key;
440
422
  }
@@ -452,25 +434,15 @@ pub fn ScanTreeType(
452
434
 
453
435
  assert(values.len > 0);
454
436
 
455
- // TableMemory already deduplicates.
456
- switch (self.direction) {
457
- .ascending => {
458
- assert(values.len <= 1 or
459
- key_from_value(&values[0]) != key_from_value(&values[1]));
460
-
461
- const value_first = values[0];
462
- values = values[1..];
463
- return value_first;
464
- },
465
- .descending => {
466
- assert(values.len <= 1 or key_from_value(&values[values.len - 1]) !=
467
- key_from_value(&values[values.len - 2]));
437
+ const result, values = self.direction.slice_pop(values);
438
+ // TableMemory already deduplicates
439
+ if (values.len > 0) assert(self.direction.cmp(
440
+ key_from_value(&result),
441
+ .@"<",
442
+ key_from_value(self.direction.slice_peek(values)),
443
+ ));
468
444
 
469
- const value_last = values[values.len - 1];
470
- values = values[0 .. values.len - 1];
471
- return value_last;
472
- },
473
- }
445
+ return result;
474
446
  }
475
447
 
476
448
  fn merge_level_peek(self: *const ScanTree, level_index: u32) Pending!?Key {
@@ -488,40 +460,6 @@ pub fn ScanTreeType(
488
460
  const level = &self.levels[level_index];
489
461
  return level.pop();
490
462
  }
491
-
492
- fn probe_values(direction: Direction, values: []const Value, key: Key) []const Value {
493
- switch (direction) {
494
- .ascending => {
495
- const start = binary_search.binary_search_values_upsert_index(
496
- Key,
497
- Value,
498
- key_from_value,
499
- values,
500
- key,
501
- .{ .mode = .lower_bound },
502
- );
503
-
504
- return if (start == values.len) &.{} else values[start..];
505
- },
506
- .descending => {
507
- const end = end: {
508
- const index = binary_search.binary_search_values_upsert_index(
509
- Key,
510
- Value,
511
- key_from_value,
512
- values,
513
- key,
514
- .{ .mode = .upper_bound },
515
- );
516
- break :end index + @intFromBool(
517
- index < values.len and key_from_value(&values[index]) <= key,
518
- );
519
- };
520
-
521
- return if (end == 0) &.{} else values[0..end];
522
- },
523
- }
524
- }
525
463
  };
526
464
  }
527
465
 
@@ -655,11 +593,7 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
655
593
  assert(@intFromPtr(values.ptr) <=
656
594
  @intFromPtr(self.buffer.value_block) + self.buffer.value_block.len);
657
595
 
658
- const value: *const Value = switch (self.scan.direction) {
659
- .ascending => &values[0],
660
- .descending => &values[values.len - 1],
661
- };
662
-
596
+ const value: *const Value = self.scan.direction.slice_peek(values);
663
597
  const key = key_from_value(value);
664
598
  return key;
665
599
  },
@@ -674,33 +608,19 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
674
608
  assert(self.values == .buffered);
675
609
  assert(self.scan.state == .seeking);
676
610
 
677
- var values = self.values.buffered;
611
+ const values = self.values.buffered;
678
612
  assert(values.len > 0);
679
613
  assert(@intFromPtr(values.ptr) >= @intFromPtr(self.buffer.value_block));
680
614
  assert(@intFromPtr(values.ptr) <=
681
615
  @intFromPtr(self.buffer.value_block) + self.buffer.value_block.len);
682
616
 
683
- defer {
684
- assert(self.values == .buffered);
685
- if (self.values.buffered.len == 0) {
686
- // Moving to the next `value_block` or `table_info`.
687
- // This will cause the next `peek()` to return `Pending`.
688
- self.move_next();
689
- }
690
- }
691
-
692
- switch (self.scan.direction) {
693
- .ascending => {
694
- const first_value = values[0];
695
- self.values = .{ .buffered = values[1..] };
696
- return first_value;
697
- },
698
- .descending => {
699
- const last_value = values[values.len - 1];
700
- self.values = .{ .buffered = values[0 .. values.len - 1] };
701
- return last_value;
702
- },
617
+ const result, self.values.buffered = self.scan.direction.slice_pop(values);
618
+ if (self.values.buffered.len == 0) {
619
+ // Moving to the next `value_block` or `table_info`.
620
+ // This will cause the next `peek()` to return `Pending`.
621
+ self.move_next();
703
622
  }
623
+ return result;
704
624
  }
705
625
 
706
626
  pub fn probe(self: *ScanTreeLevel, probe_key: Key) void {
@@ -712,8 +632,10 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
712
632
  .fetching => {},
713
633
  .buffered => |buffer| {
714
634
  assert(buffer.len > 0);
715
- const slice: []const Value = ScanTree.probe_values(
716
- self.scan.direction,
635
+ const slice: []const Value = self.scan.direction.slice_lower_bound(
636
+ Key,
637
+ Value,
638
+ key_from_value,
717
639
  buffer,
718
640
  probe_key,
719
641
  );
@@ -729,10 +651,7 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
729
651
  if (self.state == .iterating) {
730
652
  const key_exclusive_next =
731
653
  self.state.iterating.key_exclusive_next;
732
- assert(switch (self.scan.direction) {
733
- .ascending => key_exclusive_next >= probe_key,
734
- .descending => key_exclusive_next <= probe_key,
735
- });
654
+ assert(self.scan.direction.cmp(probe_key, .@"<=", key_exclusive_next));
736
655
  }
737
656
 
738
657
  self.values = .{ .buffered = slice };
@@ -772,10 +691,11 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
772
691
  // If the next key is out of the range,
773
692
  // there are no more `table_info`s to scan next.
774
693
  const key_exclusive_next = iterating.key_exclusive_next;
775
- if (switch (self.scan.direction) {
776
- .ascending => key_exclusive_next > self.scan.key_max,
777
- .descending => key_exclusive_next < self.scan.key_min,
778
- }) {
694
+ if (self.scan.direction.cmp(
695
+ self.scan.key_upper,
696
+ .@"<",
697
+ key_exclusive_next,
698
+ )) {
779
699
  // The next `table_info` is out of the key range, so it's finished.
780
700
  self.state = .{ .finished = .{} };
781
701
  self.values = .finished;
@@ -783,13 +703,14 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
783
703
  // Load the next `table_info`.
784
704
  self.state = .loading_manifest;
785
705
  self.values = .fetching;
786
- if (switch (self.scan.direction) {
787
- .ascending => key_exclusive_next < self.scan.key_min,
788
- .descending => key_exclusive_next > self.scan.key_max,
789
- }) {
706
+ if (self.scan.direction.cmp(
707
+ key_exclusive_next,
708
+ .@"<",
709
+ self.scan.key_lower,
710
+ )) {
790
711
  // A probe() skipped past the last table we iterated, so our
791
712
  // key_exclusive_next is now out of bounds, superseded by the
792
- // tightened key_min (ascending) or key_max (descending) bound.
713
+ // tightened lower bound.
793
714
  self.move_next_manifest_table(null);
794
715
  } else {
795
716
  self.move_next_manifest_table(key_exclusive_next);
@@ -820,8 +741,8 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
820
741
  if (manifest.next_table(.{
821
742
  .level = self.level_index,
822
743
  .snapshot = self.scan.snapshot,
823
- .key_min = self.scan.key_min,
824
- .key_max = self.scan.key_max,
744
+ .key_min = @min(self.scan.key_lower, self.scan.key_upper),
745
+ .key_max = @max(self.scan.key_lower, self.scan.key_upper),
825
746
  .key_exclusive = key_exclusive,
826
747
  .direction = self.scan.direction,
827
748
  })) |table_info| {
@@ -865,20 +786,23 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
865
786
 
866
787
  const Range = struct { start: u32, end: u32 };
867
788
  const range_found: ?Range = range: {
789
+ const scan_key_min = @min(self.scan.key_lower, self.scan.key_upper);
790
+ const scan_key_max = @max(self.scan.key_lower, self.scan.key_upper);
791
+
868
792
  const keys_max = Table.index_value_keys_used(self.buffer.index_block, .key_max);
869
793
  const keys_min = Table.index_value_keys_used(self.buffer.index_block, .key_min);
870
794
  // The `index_block` *might* contain the key range,
871
795
  // otherwise, it shouldn't have been returned by the manifest.
872
796
  assert(keys_min.len > 0 and keys_max.len > 0);
873
797
  assert(keys_min.len == keys_max.len);
874
- assert(keys_min[0] <= self.scan.key_max and
875
- self.scan.key_min <= keys_max[keys_max.len - 1]);
798
+ assert(keys_min[0] <= scan_key_max);
799
+ assert(scan_key_min <= keys_max[keys_max.len - 1]);
876
800
 
877
801
  const indexes = binary_search.binary_search_keys_range_upsert_indexes(
878
802
  Key,
879
803
  keys_max,
880
- self.scan.key_min,
881
- self.scan.key_max,
804
+ scan_key_min,
805
+ scan_key_max,
882
806
  );
883
807
 
884
808
  // The key range was not found.
@@ -889,7 +813,7 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
889
813
  // the key depending on the `key_min`.
890
814
  const end = end: {
891
815
  break :end indexes.end + @intFromBool(
892
- indexes.end < keys_max.len and keys_min[indexes.end] <= self.scan.key_max,
816
+ indexes.end < keys_max.len and keys_min[indexes.end] <= scan_key_max,
893
817
  );
894
818
  };
895
819
 
@@ -961,13 +885,15 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
961
885
  assert(self.scan.state.buffering.pending_count > 0);
962
886
 
963
887
  const values = Table.value_block_values_used(value_block);
888
+ const scan_key_min = @min(self.scan.key_lower, self.scan.key_upper);
889
+ const scan_key_max = @max(self.scan.key_lower, self.scan.key_upper);
964
890
  const range = binary_search.binary_search_values_range(
965
891
  Key,
966
892
  Value,
967
893
  key_from_value,
968
894
  values,
969
- self.scan.key_min,
970
- self.scan.key_max,
895
+ scan_key_min,
896
+ scan_key_max,
971
897
  );
972
898
 
973
899
  if (range.count > 0) {
@@ -988,8 +914,8 @@ fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
988
914
  // otherwise, it shouldn't have been returned by the iterator.
989
915
  const key_min = key_from_value(&values[0]);
990
916
  const key_max = key_from_value(&values[values.len - 1]);
991
- assert(key_min < self.scan.key_min and
992
- self.scan.key_max < key_max);
917
+ assert(key_min < scan_key_min);
918
+ assert(scan_key_max < key_max);
993
919
 
994
920
  // Keep fetching if there are more value blocks on this table,
995
921
  // or move to the next table otherwise.
@@ -53,19 +53,16 @@ pub fn TableValueIteratorType(comptime Storage: type) type {
53
53
  assert(it.callback == null);
54
54
  assert(!it.empty());
55
55
 
56
- const index: usize = switch (it.context.direction) {
57
- .ascending => 0,
58
- .descending => it.context.addresses.len - 1,
59
- };
60
-
61
- assert(it.context.checksums[index].padding == 0);
56
+ const address = it.context.direction.slice_peek(it.context.addresses).*;
57
+ const checksum = it.context.direction.slice_peek(it.context.checksums).*;
58
+ assert(checksum.padding == 0);
62
59
 
63
60
  it.callback = callback;
64
61
  it.context.grid.read_block(
65
62
  .{ .from_local_or_global_storage = read_block_callback },
66
63
  &it.read,
67
- it.context.addresses[index],
68
- it.context.checksums[index].value,
64
+ address,
65
+ checksum.value,
69
66
  .{ .cache_read = true, .cache_write = true },
70
67
  );
71
68
  }
@@ -78,26 +75,14 @@ pub fn TableValueIteratorType(comptime Storage: type) type {
78
75
  const callback = it.callback.?;
79
76
  it.callback = null;
80
77
 
81
- switch (it.context.direction) {
82
- .ascending => {
83
- const header = schema.header_from_block(block);
84
- assert(header.address == it.context.addresses[0]);
85
- assert(header.checksum == it.context.checksums[0].value);
86
-
87
- it.context.addresses = it.context.addresses[1..];
88
- it.context.checksums = it.context.checksums[1..];
89
- },
90
- .descending => {
91
- const index_last = it.context.checksums.len - 1;
92
- const header = schema.header_from_block(block);
93
- assert(header.address == it.context.addresses[index_last]);
94
- assert(header.checksum == it.context.checksums[index_last].value);
95
-
96
- it.context.addresses = it.context.addresses[0..index_last];
97
- it.context.checksums = it.context.checksums[0..index_last];
98
- },
99
- }
78
+ const address, it.context.addresses =
79
+ it.context.direction.slice_pop(it.context.addresses);
80
+ const checksum, it.context.checksums =
81
+ it.context.direction.slice_pop(it.context.checksums);
100
82
 
83
+ const header = schema.header_from_block(block);
84
+ assert(header.address == address);
85
+ assert(header.checksum == checksum.value);
101
86
  assert(it.context.addresses.len == it.context.checksums.len);
102
87
  callback(it, block);
103
88
  }
@@ -74,12 +74,8 @@ pub fn ZigZagMergeIteratorType(
74
74
  return null;
75
75
 
76
76
  if (it.key_popped) |previous| {
77
- switch (std.math.order(previous, key)) {
78
- .lt => assert(it.direction == .ascending),
79
- // Duplicate values are not expected.
80
- .eq => unreachable,
81
- .gt => assert(it.direction == .descending),
82
- }
77
+ // Duplicate values are not expected.
78
+ assert(it.direction.cmp(previous, .@"<", key));
83
79
  }
84
80
  it.key_popped = key;
85
81
 
@@ -134,19 +130,13 @@ pub fn ZigZagMergeIteratorType(
134
130
  return null;
135
131
 
136
132
  // The stream cannot regress.
137
- assert(
138
- it.probe_key_previous == null or
139
- key == it.probe_key_previous.? or
140
- it.key_ahead(.{
141
- .key_after = key,
142
- .key_before = it.probe_key_previous.?,
143
- }),
144
- );
133
+ assert(it.probe_key_previous == null or
134
+ it.direction.cmp(it.probe_key_previous.?, .@"<=", key));
145
135
 
146
136
  // The keys match, continuing to the next stream.
147
- if (key == probe_key) continue;
137
+ if (probe_key == key) continue;
148
138
 
149
- if (it.key_ahead(.{ .key_after = key, .key_before = probe_key })) {
139
+ if (it.direction.cmp(probe_key, .@"<", key)) {
150
140
  // The stream is ahead, it will be the probe key,
151
141
  // meaning all streams before must be probed.
152
142
  probe_key = key;
@@ -180,7 +170,7 @@ pub fn ZigZagMergeIteratorType(
180
170
  if (key == probe_key) {
181
171
  probing.unset(stream_index);
182
172
  } else {
183
- assert(it.key_ahead(.{ .key_after = key, .key_before = probe_key }));
173
+ assert(it.direction.cmp(probe_key, .@"<", key));
184
174
  }
185
175
  }
186
176
  }
@@ -207,24 +197,11 @@ pub fn ZigZagMergeIteratorType(
207
197
 
208
198
  // The iterator cannot regress.
209
199
  assert(it.probe_key_previous == null or
210
- probe_key == it.probe_key_previous.? or
211
- it.key_ahead(.{ .key_after = probe_key, .key_before = it.probe_key_previous.? }));
200
+ it.direction.cmp(it.probe_key_previous.?, .@"<=", probe_key));
212
201
 
213
202
  it.probe_key_previous = probe_key;
214
203
  return if (pending.count() == 0) probe_key else error.Pending;
215
204
  }
216
-
217
- /// Returns true if `key_after` is ahead of `key_before` depending on the direction,
218
- /// that is `key_after > key_before` (ascending) or `key_after < key_before` (descending).
219
- inline fn key_ahead(
220
- it: *const ZigZagMergeIterator,
221
- keys: struct { key_after: Key, key_before: Key },
222
- ) bool {
223
- return switch (it.direction) {
224
- .ascending => keys.key_after > keys.key_before,
225
- .descending => keys.key_after < keys.key_before,
226
- };
227
- }
228
205
  };
229
206
  }
230
207
 
@@ -476,6 +476,24 @@ test "Cluster: network: partition 1-2 (isolate primary, asymmetric, receive-only
476
476
  try c.request(2, 2);
477
477
  }
478
478
 
479
+ test "Cluster: network: partition primary-all (isolate primary, asymmetric, send-only)" {
480
+ // The primary can send to the backups and clients, but not receive.
481
+ // Since primary can't see requests, it doesn't know that it needs to abdicate.
482
+ // The rest of the cluster needs to view-change anyway.
483
+ const t = try TestContext.init(.{ .replica_count = 3 });
484
+ defer t.deinit();
485
+
486
+ var c = t.clients(.{});
487
+ try c.request(1, 1);
488
+ t.replica(.A0).drop(.__, .incoming, .request);
489
+ // Since the primary doesn't receive requests, it can't ever abdicate.
490
+ const mark = marks.check("send_commit: primary abdicating");
491
+ // TODO:
492
+ // try c.request(2, 2);
493
+ try c.request(2, 1);
494
+ try mark.expect_not_hit();
495
+ }
496
+
479
497
  test "Cluster: network: partition client-primary (symmetric)" {
480
498
  // Clients cannot communicate with the primary, but they still request/reply via a backup.
481
499
  const t = try TestContext.init(.{ .replica_count = 3 });
@@ -1,4 +1,4 @@
1
1
  module TigerBeetle
2
- VERSION = '0.0.37'.freeze
3
- TB_VERSION = '0.16.73'.freeze
2
+ VERSION = '0.0.38'.freeze
3
+ TB_VERSION = '0.16.74'.freeze
4
4
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tigerbeetle
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.37
4
+ version: 0.0.38
5
5
  platform: ruby
6
6
  authors:
7
7
  - Anthony D
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2026-04-08 00:00:00.000000000 Z
11
+ date: 2026-05-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi