tigerbeetle-node 0.11.13 → 0.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. package/README.md +5 -10
  2. package/dist/bin/aarch64-linux-gnu/client.node +0 -0
  3. package/dist/bin/aarch64-linux-musl/client.node +0 -0
  4. package/dist/bin/aarch64-macos/client.node +0 -0
  5. package/dist/bin/x86_64-linux-gnu/client.node +0 -0
  6. package/dist/bin/x86_64-linux-musl/client.node +0 -0
  7. package/dist/bin/x86_64-macos/client.node +0 -0
  8. package/dist/index.js +33 -1
  9. package/dist/index.js.map +1 -1
  10. package/package-lock.json +66 -0
  11. package/package.json +6 -16
  12. package/src/index.ts +56 -1
  13. package/src/node.zig +9 -9
  14. package/dist/.client.node.sha256 +0 -1
  15. package/scripts/build_lib.sh +0 -61
  16. package/scripts/download_node_headers.sh +0 -32
  17. package/src/tigerbeetle/scripts/benchmark.bat +0 -55
  18. package/src/tigerbeetle/scripts/benchmark.sh +0 -66
  19. package/src/tigerbeetle/scripts/confirm_image.sh +0 -44
  20. package/src/tigerbeetle/scripts/fail_on_diff.sh +0 -9
  21. package/src/tigerbeetle/scripts/fuzz_loop.sh +0 -15
  22. package/src/tigerbeetle/scripts/fuzz_loop_hash_log.sh +0 -12
  23. package/src/tigerbeetle/scripts/fuzz_unique_errors.sh +0 -7
  24. package/src/tigerbeetle/scripts/install.bat +0 -7
  25. package/src/tigerbeetle/scripts/install.sh +0 -21
  26. package/src/tigerbeetle/scripts/install_zig.bat +0 -113
  27. package/src/tigerbeetle/scripts/install_zig.sh +0 -90
  28. package/src/tigerbeetle/scripts/lint.zig +0 -199
  29. package/src/tigerbeetle/scripts/pre-commit.sh +0 -9
  30. package/src/tigerbeetle/scripts/scripts/benchmark.bat +0 -55
  31. package/src/tigerbeetle/scripts/scripts/benchmark.sh +0 -66
  32. package/src/tigerbeetle/scripts/scripts/confirm_image.sh +0 -44
  33. package/src/tigerbeetle/scripts/scripts/fail_on_diff.sh +0 -9
  34. package/src/tigerbeetle/scripts/scripts/fuzz_loop.sh +0 -15
  35. package/src/tigerbeetle/scripts/scripts/fuzz_loop_hash_log.sh +0 -12
  36. package/src/tigerbeetle/scripts/scripts/fuzz_unique_errors.sh +0 -7
  37. package/src/tigerbeetle/scripts/scripts/install.bat +0 -7
  38. package/src/tigerbeetle/scripts/scripts/install.sh +0 -21
  39. package/src/tigerbeetle/scripts/scripts/install_zig.bat +0 -113
  40. package/src/tigerbeetle/scripts/scripts/install_zig.sh +0 -90
  41. package/src/tigerbeetle/scripts/scripts/lint.zig +0 -199
  42. package/src/tigerbeetle/scripts/scripts/pre-commit.sh +0 -9
  43. package/src/tigerbeetle/scripts/scripts/shellcheck.sh +0 -5
  44. package/src/tigerbeetle/scripts/scripts/tests_on_alpine.sh +0 -10
  45. package/src/tigerbeetle/scripts/scripts/tests_on_ubuntu.sh +0 -14
  46. package/src/tigerbeetle/scripts/scripts/upgrade_ubuntu_kernel.sh +0 -48
  47. package/src/tigerbeetle/scripts/scripts/validate_docs.sh +0 -23
  48. package/src/tigerbeetle/scripts/scripts/vr_state_enumerate +0 -46
  49. package/src/tigerbeetle/scripts/shellcheck.sh +0 -5
  50. package/src/tigerbeetle/scripts/tests_on_alpine.sh +0 -10
  51. package/src/tigerbeetle/scripts/tests_on_ubuntu.sh +0 -14
  52. package/src/tigerbeetle/scripts/upgrade_ubuntu_kernel.sh +0 -48
  53. package/src/tigerbeetle/scripts/validate_docs.sh +0 -23
  54. package/src/tigerbeetle/scripts/vr_state_enumerate +0 -46
  55. package/src/tigerbeetle/src/benchmark.zig +0 -336
  56. package/src/tigerbeetle/src/config.zig +0 -233
  57. package/src/tigerbeetle/src/constants.zig +0 -428
  58. package/src/tigerbeetle/src/ewah.zig +0 -286
  59. package/src/tigerbeetle/src/ewah_benchmark.zig +0 -120
  60. package/src/tigerbeetle/src/ewah_fuzz.zig +0 -130
  61. package/src/tigerbeetle/src/fifo.zig +0 -120
  62. package/src/tigerbeetle/src/io/benchmark.zig +0 -213
  63. package/src/tigerbeetle/src/io/darwin.zig +0 -814
  64. package/src/tigerbeetle/src/io/linux.zig +0 -1071
  65. package/src/tigerbeetle/src/io/test.zig +0 -643
  66. package/src/tigerbeetle/src/io/windows.zig +0 -1183
  67. package/src/tigerbeetle/src/io.zig +0 -34
  68. package/src/tigerbeetle/src/iops.zig +0 -107
  69. package/src/tigerbeetle/src/lsm/README.md +0 -308
  70. package/src/tigerbeetle/src/lsm/binary_search.zig +0 -341
  71. package/src/tigerbeetle/src/lsm/bloom_filter.zig +0 -125
  72. package/src/tigerbeetle/src/lsm/compaction.zig +0 -603
  73. package/src/tigerbeetle/src/lsm/composite_key.zig +0 -77
  74. package/src/tigerbeetle/src/lsm/direction.zig +0 -11
  75. package/src/tigerbeetle/src/lsm/eytzinger.zig +0 -587
  76. package/src/tigerbeetle/src/lsm/eytzinger_benchmark.zig +0 -330
  77. package/src/tigerbeetle/src/lsm/forest.zig +0 -205
  78. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +0 -450
  79. package/src/tigerbeetle/src/lsm/grid.zig +0 -573
  80. package/src/tigerbeetle/src/lsm/groove.zig +0 -1036
  81. package/src/tigerbeetle/src/lsm/k_way_merge.zig +0 -474
  82. package/src/tigerbeetle/src/lsm/level_iterator.zig +0 -332
  83. package/src/tigerbeetle/src/lsm/manifest.zig +0 -617
  84. package/src/tigerbeetle/src/lsm/manifest_level.zig +0 -878
  85. package/src/tigerbeetle/src/lsm/manifest_log.zig +0 -789
  86. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +0 -691
  87. package/src/tigerbeetle/src/lsm/merge_iterator.zig +0 -106
  88. package/src/tigerbeetle/src/lsm/node_pool.zig +0 -235
  89. package/src/tigerbeetle/src/lsm/posted_groove.zig +0 -381
  90. package/src/tigerbeetle/src/lsm/segmented_array.zig +0 -1329
  91. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +0 -148
  92. package/src/tigerbeetle/src/lsm/segmented_array_fuzz.zig +0 -9
  93. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +0 -850
  94. package/src/tigerbeetle/src/lsm/table.zig +0 -1009
  95. package/src/tigerbeetle/src/lsm/table_immutable.zig +0 -192
  96. package/src/tigerbeetle/src/lsm/table_iterator.zig +0 -340
  97. package/src/tigerbeetle/src/lsm/table_mutable.zig +0 -203
  98. package/src/tigerbeetle/src/lsm/test.zig +0 -439
  99. package/src/tigerbeetle/src/lsm/tree.zig +0 -1169
  100. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +0 -479
  101. package/src/tigerbeetle/src/message_bus.zig +0 -1013
  102. package/src/tigerbeetle/src/message_pool.zig +0 -156
  103. package/src/tigerbeetle/src/ring_buffer.zig +0 -399
  104. package/src/tigerbeetle/src/simulator.zig +0 -580
  105. package/src/tigerbeetle/src/state_machine/auditor.zig +0 -578
  106. package/src/tigerbeetle/src/state_machine/workload.zig +0 -883
  107. package/src/tigerbeetle/src/state_machine.zig +0 -2099
  108. package/src/tigerbeetle/src/static_allocator.zig +0 -65
  109. package/src/tigerbeetle/src/stdx.zig +0 -171
  110. package/src/tigerbeetle/src/storage.zig +0 -393
  111. package/src/tigerbeetle/src/testing/cluster/message_bus.zig +0 -82
  112. package/src/tigerbeetle/src/testing/cluster/network.zig +0 -237
  113. package/src/tigerbeetle/src/testing/cluster/state_checker.zig +0 -169
  114. package/src/tigerbeetle/src/testing/cluster/storage_checker.zig +0 -202
  115. package/src/tigerbeetle/src/testing/cluster.zig +0 -444
  116. package/src/tigerbeetle/src/testing/fuzz.zig +0 -140
  117. package/src/tigerbeetle/src/testing/hash_log.zig +0 -66
  118. package/src/tigerbeetle/src/testing/id.zig +0 -99
  119. package/src/tigerbeetle/src/testing/packet_simulator.zig +0 -374
  120. package/src/tigerbeetle/src/testing/priority_queue.zig +0 -645
  121. package/src/tigerbeetle/src/testing/reply_sequence.zig +0 -139
  122. package/src/tigerbeetle/src/testing/state_machine.zig +0 -250
  123. package/src/tigerbeetle/src/testing/storage.zig +0 -757
  124. package/src/tigerbeetle/src/testing/table.zig +0 -247
  125. package/src/tigerbeetle/src/testing/time.zig +0 -84
  126. package/src/tigerbeetle/src/tigerbeetle.zig +0 -227
  127. package/src/tigerbeetle/src/time.zig +0 -112
  128. package/src/tigerbeetle/src/tracer.zig +0 -529
  129. package/src/tigerbeetle/src/unit_tests.zig +0 -40
  130. package/src/tigerbeetle/src/vopr.zig +0 -495
  131. package/src/tigerbeetle/src/vsr/README.md +0 -209
  132. package/src/tigerbeetle/src/vsr/client.zig +0 -544
  133. package/src/tigerbeetle/src/vsr/clock.zig +0 -855
  134. package/src/tigerbeetle/src/vsr/journal.zig +0 -2415
  135. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +0 -111
  136. package/src/tigerbeetle/src/vsr/marzullo.zig +0 -309
  137. package/src/tigerbeetle/src/vsr/replica.zig +0 -6616
  138. package/src/tigerbeetle/src/vsr/replica_format.zig +0 -219
  139. package/src/tigerbeetle/src/vsr/superblock.zig +0 -1631
  140. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +0 -256
  141. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +0 -929
  142. package/src/tigerbeetle/src/vsr/superblock_free_set_fuzz.zig +0 -334
  143. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +0 -390
  144. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +0 -615
  145. package/src/tigerbeetle/src/vsr/superblock_quorums.zig +0 -394
  146. package/src/tigerbeetle/src/vsr/superblock_quorums_fuzz.zig +0 -314
  147. package/src/tigerbeetle/src/vsr.zig +0 -1425
@@ -1,203 +0,0 @@
1
- const std = @import("std");
2
- const mem = std.mem;
3
- const math = std.math;
4
- const assert = std.debug.assert;
5
-
6
- const constants = @import("../constants.zig");
7
- const div_ceil = @import("../stdx.zig").div_ceil;
8
- const SetAssociativeCache = @import("set_associative_cache.zig").SetAssociativeCache;
9
-
10
- /// Range queries are not supported on the TableMutable, it must first be made immutable.
11
- pub fn TableMutableType(comptime Table: type) type {
12
- const Key = Table.Key;
13
- const Value = Table.Value;
14
- const compare_keys = Table.compare_keys;
15
- const key_from_value = Table.key_from_value;
16
- const tombstone_from_key = Table.tombstone_from_key;
17
- const tombstone = Table.tombstone;
18
- const value_count_max = Table.value_count_max;
19
- const usage = Table.usage;
20
-
21
- return struct {
22
- const TableMutable = @This();
23
-
24
- const load_factor = 50;
25
- const Values = std.HashMapUnmanaged(Value, void, Table.HashMapContextValue, load_factor);
26
-
27
- pub const ValuesCache = SetAssociativeCache(
28
- Key,
29
- Value,
30
- Table.key_from_value,
31
- struct {
32
- inline fn hash(key: Key) u64 {
33
- return std.hash.Wyhash.hash(0, mem.asBytes(&key));
34
- }
35
- }.hash,
36
- struct {
37
- inline fn equal(a: Key, b: Key) bool {
38
- return compare_keys(a, b) == .eq;
39
- }
40
- }.equal,
41
- .{},
42
- );
43
-
44
- values: Values = .{},
45
-
46
- /// Rather than using values.count(), we count how many values we could have had if every
47
- /// operation had been on a different key. This means that mistakes in calculating
48
- /// value_count_max are much easier to catch when fuzzing, rather than requiring very
49
- /// specific workloads.
50
- /// Invariant: value_count_worst_case <= value_count_max
51
- value_count_worst_case: u32 = 0,
52
-
53
- /// This is used to accelerate point lookups and is not used for range queries.
54
- /// Secondary index trees used only for range queries can therefore set this to null.
55
- ///
56
- /// The values cache is only used for the latest snapshot for simplicity.
57
- /// Earlier snapshots will still be able to utilize the block cache.
58
- ///
59
- /// The values cache is updated (in bulk) when the mutable table is sorted and frozen,
60
- /// rather than updating on every `put()`/`remove()`.
61
- /// This amortizes cache inserts for hot keys in the mutable table, and avoids redundantly
62
- /// storing duplicate values in both the mutable table and values cache.
63
- // TODO Share cache between trees of different grooves:
64
- // "A set associative cache of values shared by trees with the same key/value sizes.
65
- // The value type will be []u8 and this will be shared by trees with the same value size."
66
- values_cache: ?*ValuesCache,
67
-
68
- pub fn init(
69
- allocator: mem.Allocator,
70
- values_cache: ?*ValuesCache,
71
- ) !TableMutable {
72
- var values: Values = .{};
73
- try values.ensureTotalCapacity(allocator, value_count_max);
74
- errdefer values.deinit(allocator);
75
-
76
- return TableMutable{
77
- .values = values,
78
- .values_cache = values_cache,
79
- };
80
- }
81
-
82
- pub fn deinit(table: *TableMutable, allocator: mem.Allocator) void {
83
- table.values.deinit(allocator);
84
- }
85
-
86
- pub fn get(table: *const TableMutable, key: Key) ?*const Value {
87
- if (table.values.getKeyPtr(tombstone_from_key(key))) |value| {
88
- return value;
89
- }
90
- if (table.values_cache) |cache| {
91
- // Check the cache after the mutable table (see `values_cache` for explanation).
92
- if (cache.get(key)) |value| return value;
93
- }
94
- return null;
95
- }
96
-
97
- pub fn put(table: *TableMutable, value: *const Value) void {
98
- assert(table.value_count_worst_case < value_count_max);
99
- table.value_count_worst_case += 1;
100
- switch (usage) {
101
- .secondary_index => {
102
- const existing = table.values.fetchRemove(value.*);
103
- if (existing) |kv| {
104
- // If there was a previous operation on this key then it must have been a remove.
105
- // The put and remove cancel out.
106
- assert(tombstone(&kv.key));
107
- } else {
108
- table.values.putAssumeCapacityNoClobber(value.*, {});
109
- }
110
- },
111
- .general => {
112
- // If the key is already present in the hash map, the old key will not be overwritten
113
- // by the new one if using e.g. putAssumeCapacity(). Instead we must use the lower
114
- // level getOrPut() API and manually overwrite the old key.
115
- const upsert = table.values.getOrPutAssumeCapacity(value.*);
116
- upsert.key_ptr.* = value.*;
117
- },
118
- }
119
-
120
- // The hash map's load factor may allow for more capacity because of rounding:
121
- assert(table.values.count() <= value_count_max);
122
- }
123
-
124
- pub fn remove(table: *TableMutable, value: *const Value) void {
125
- assert(table.value_count_worst_case < value_count_max);
126
- table.value_count_worst_case += 1;
127
- switch (usage) {
128
- .secondary_index => {
129
- const existing = table.values.fetchRemove(value.*);
130
- if (existing) |kv| {
131
- // The previous operation on this key then it must have been a put.
132
- // The put and remove cancel out.
133
- assert(!tombstone(&kv.key));
134
- } else {
135
- // If the put is already on-disk, then we need to follow it with a tombstone.
136
- // The put and the tombstone may cancel each other out later during compaction.
137
- table.values.putAssumeCapacityNoClobber(tombstone_from_key(key_from_value(value)), {});
138
- }
139
- },
140
- .general => {
141
- // If the key is already present in the hash map, the old key will not be overwritten
142
- // by the new one if using e.g. putAssumeCapacity(). Instead we must use the lower
143
- // level getOrPut() API and manually overwrite the old key.
144
- const upsert = table.values.getOrPutAssumeCapacity(value.*);
145
- upsert.key_ptr.* = tombstone_from_key(key_from_value(value));
146
- },
147
- }
148
-
149
- assert(table.values.count() <= value_count_max);
150
- }
151
-
152
- pub fn clear(table: *TableMutable) void {
153
- assert(table.values.count() > 0);
154
- table.value_count_worst_case = 0;
155
- table.values.clearRetainingCapacity();
156
- assert(table.values.count() == 0);
157
- }
158
-
159
- pub fn count(table: *const TableMutable) u32 {
160
- const value = @intCast(u32, table.values.count());
161
- assert(value <= value_count_max);
162
- return value;
163
- }
164
-
165
- /// The returned slice is invalidated whenever this is called for any tree.
166
- pub fn sort_into_values_and_clear(
167
- table: *TableMutable,
168
- values_max: []Value,
169
- ) []const Value {
170
- assert(table.count() > 0);
171
- assert(table.count() <= value_count_max);
172
- assert(table.count() <= values_max.len);
173
- assert(values_max.len == value_count_max);
174
-
175
- var i: usize = 0;
176
- var it = table.values.keyIterator();
177
- while (it.next()) |value| : (i += 1) {
178
- values_max[i] = value.*;
179
-
180
- if (table.values_cache) |cache| {
181
- if (tombstone(value)) {
182
- cache.remove(key_from_value(value));
183
- } else {
184
- cache.insert(value);
185
- }
186
- }
187
- }
188
-
189
- const values = values_max[0..i];
190
- assert(values.len == table.count());
191
- std.sort.sort(Value, values, {}, sort_values_by_key_in_ascending_order);
192
-
193
- table.clear();
194
- assert(table.count() == 0);
195
-
196
- return values;
197
- }
198
-
199
- fn sort_values_by_key_in_ascending_order(_: void, a: Value, b: Value) bool {
200
- return compare_keys(key_from_value(&a), key_from_value(&b)) == .lt;
201
- }
202
- };
203
- }
@@ -1,439 +0,0 @@
1
- const std = @import("std");
2
- const testing = std.testing;
3
- const allocator = testing.allocator;
4
- const assert = std.debug.assert;
5
- const os = std.os;
6
-
7
- const constants = @import("../constants.zig");
8
- const vsr = @import("../vsr.zig");
9
- const log = std.log.scoped(.lsm_forest_test);
10
-
11
- const MessagePool = @import("../message_pool.zig").MessagePool;
12
- const Transfer = @import("../tigerbeetle.zig").Transfer;
13
- const Account = @import("../tigerbeetle.zig").Account;
14
- const Storage = @import("../storage.zig").Storage;
15
- const IO = @import("../io.zig").IO;
16
- const StateMachine = @import("../state_machine.zig").StateMachineType(Storage, .{
17
- .message_body_size_max = constants.message_body_size_max,
18
- .lsm_batch_multiple = constants.lsm_batch_multiple,
19
- });
20
-
21
- const GridType = @import("grid.zig").GridType;
22
- const GrooveType = @import("groove.zig").GrooveType;
23
- const Forest = StateMachine.Forest;
24
-
25
- const Grid = GridType(Storage);
26
- const SuperBlock = vsr.SuperBlockType(Storage);
27
-
28
- const Environment = struct {
29
- const cluster = 32;
30
- const replica = 4;
31
- const storage_size_max = vsr.Zone.superblock.size().? +
32
- vsr.Zone.wal_headers.size().? +
33
- vsr.Zone.wal_prepares.size().? +
34
- (512 + 64) * 1024 * 1024;
35
-
36
- const node_count = 1024;
37
- const cache_entries_max = 2 * 1024 * 1024;
38
- const forest_options = StateMachine.forest_options(.{
39
- .lsm_forest_node_count = node_count,
40
- .cache_entries_accounts = cache_entries_max,
41
- .cache_entries_transfers = cache_entries_max,
42
- .cache_entries_posted = cache_entries_max,
43
- });
44
-
45
- const State = enum {
46
- uninit,
47
- init,
48
- formatted,
49
- superblock_open,
50
- forest_open,
51
- forest_compacting,
52
- forest_checkpointing,
53
- superblock_checkpointing,
54
- };
55
-
56
- state: State,
57
- dir_fd: os.fd_t,
58
- fd: os.fd_t,
59
- io: IO,
60
- storage: Storage,
61
- message_pool: MessagePool,
62
- superblock: SuperBlock,
63
- superblock_context: SuperBlock.Context,
64
- grid: Grid,
65
- forest: Forest,
66
- // We need @fieldParentPtr() of forest, so we can't use an optional Forest.
67
- forest_exists: bool = false,
68
-
69
- fn init(env: *Environment, must_create: bool) !void {
70
- env.state = .uninit;
71
-
72
- const dir_path = ".";
73
- env.dir_fd = try IO.open_dir(dir_path);
74
- errdefer std.os.close(env.dir_fd);
75
-
76
- env.fd = try IO.open_file(env.dir_fd, "test_forest", storage_size_max, must_create);
77
- errdefer std.os.close(env.fd);
78
-
79
- env.io = try IO.init(128, 0);
80
- errdefer env.io.deinit();
81
-
82
- env.storage = try Storage.init(&env.io, env.fd);
83
- errdefer env.storage.deinit();
84
-
85
- env.message_pool = try MessagePool.init(allocator, .replica);
86
- errdefer env.message_pool.deinit(allocator);
87
-
88
- env.superblock = try SuperBlock.init(allocator, .{
89
- .storage = &env.storage,
90
- .storage_size_limit = constants.storage_size_max,
91
- .message_pool = &env.message_pool,
92
- });
93
- env.superblock_context = undefined;
94
- errdefer env.superblock.deinit(allocator);
95
-
96
- env.grid = try Grid.init(allocator, &env.superblock);
97
- errdefer env.grid.deinit(allocator);
98
-
99
- // Forest must be initialized with an open superblock.
100
- env.forest = undefined;
101
- env.forest_exists = false;
102
-
103
- env.state = .init;
104
- }
105
-
106
- fn deinit(env: *Environment) void {
107
- assert(env.state != .uninit);
108
- defer env.state = .uninit;
109
-
110
- if (env.forest_exists) {
111
- env.forest.deinit(allocator);
112
- env.forest_exists = false;
113
- }
114
- env.grid.deinit(allocator);
115
- env.superblock.deinit(allocator);
116
- env.message_pool.deinit(allocator);
117
- env.storage.deinit();
118
- env.io.deinit();
119
- std.os.close(env.fd);
120
- std.os.close(env.dir_fd);
121
- }
122
-
123
- fn tick(env: *Environment) !void {
124
- // env.grid.tick();
125
- try env.io.tick();
126
- }
127
-
128
- pub fn format() !void {
129
- var env: Environment = undefined;
130
-
131
- const must_create = true;
132
- try env.init(must_create);
133
- defer env.deinit();
134
-
135
- assert(env.state == .init);
136
- env.superblock.format(superblock_format_callback, &env.superblock_context, .{
137
- .cluster = cluster,
138
- .replica = replica,
139
- .storage_size_max = storage_size_max,
140
- });
141
-
142
- while (true) {
143
- switch (env.state) {
144
- .init => try env.tick(),
145
- .formatted => break,
146
- else => unreachable,
147
- }
148
- }
149
- }
150
-
151
- fn superblock_format_callback(superblock_context: *SuperBlock.Context) void {
152
- const env = @fieldParentPtr(@This(), "superblock_context", superblock_context);
153
- assert(env.state == .init);
154
- env.state = .formatted;
155
- }
156
-
157
- pub fn open(env: *Environment) !void {
158
- assert(env.state == .init);
159
- env.superblock.open(superblock_open_callback, &env.superblock_context);
160
-
161
- while (true) {
162
- switch (env.state) {
163
- .init, .superblock_open => try env.tick(),
164
- .forest_open => break,
165
- else => unreachable,
166
- }
167
- }
168
- }
169
-
170
- fn superblock_open_callback(superblock_context: *SuperBlock.Context) void {
171
- const env = @fieldParentPtr(@This(), "superblock_context", superblock_context);
172
- assert(env.state == .init);
173
- env.state = .superblock_open;
174
-
175
- env.forest = Forest.init(allocator, &env.grid, node_count, forest_options) catch unreachable;
176
- env.forest_exists = true;
177
- env.forest.open(forest_open_callback);
178
- }
179
-
180
- fn forest_open_callback(forest: *Forest) void {
181
- const env = @fieldParentPtr(@This(), "forest", forest);
182
- assert(env.state == .superblock_open);
183
- env.state = .forest_open;
184
- }
185
-
186
- pub fn checkpoint(env: *Environment) !void {
187
- assert(env.state == .forest_open);
188
- env.state = .forest_checkpointing;
189
- env.forest.checkpoint(forest_checkpoint_callback);
190
-
191
- while (true) {
192
- switch (env.state) {
193
- .forest_checkpointing, .superblock_checkpointing => try env.tick(),
194
- .forest_open => break,
195
- else => unreachable,
196
- }
197
- }
198
- }
199
-
200
- fn forest_checkpoint_callback(forest: *Forest) void {
201
- const env = @fieldParentPtr(@This(), "forest", forest);
202
- assert(env.state == .forest_checkpointing);
203
-
204
- log.debug("forest checkpointing completed!", .{});
205
-
206
- const vsr_state = &env.superblock.staging.vsr_state;
207
-
208
- env.state = .superblock_checkpointing;
209
- env.superblock.checkpoint(
210
- superblock_checkpoint_callback,
211
- &env.superblock_context,
212
- .{
213
- .commit_min_checkpoint = vsr_state.commit_min_checkpoint + 1,
214
- .commit_min = vsr_state.commit_min + 1,
215
- },
216
- );
217
- }
218
-
219
- fn superblock_checkpoint_callback(superblock_context: *SuperBlock.Context) void {
220
- const env = @fieldParentPtr(@This(), "superblock_context", superblock_context);
221
- assert(env.state == .superblock_checkpointing);
222
- env.state = .forest_open;
223
-
224
- log.debug("superblock checkpointing completed!", .{});
225
- }
226
-
227
- pub fn compact(env: *Environment, op: u64) !void {
228
- assert(env.state == .forest_open);
229
- env.state = .forest_compacting;
230
- env.forest.compact(forest_compact_callback, op);
231
-
232
- while (true) {
233
- switch (env.state) {
234
- .forest_compacting => try env.tick(),
235
- .forest_open => break,
236
- else => unreachable,
237
- }
238
- }
239
- }
240
-
241
- fn forest_compact_callback(forest: *Forest) void {
242
- const env = @fieldParentPtr(@This(), "forest", forest);
243
- assert(env.state == .forest_compacting);
244
- env.state = .forest_open;
245
- }
246
-
247
- const Visibility = enum {
248
- visible,
249
- invisible,
250
- };
251
-
252
- pub fn assert_visibility(
253
- env: *Environment,
254
- comptime visibility: Visibility,
255
- groove: anytype,
256
- objects: anytype,
257
- comptime commit_entries_max: u32,
258
- ) !void {
259
- const Groove = @TypeOf(groove.*);
260
- const Object = @TypeOf(objects[0]);
261
-
262
- const VisibilityAssertion = struct {
263
- prefetch_context: Groove.PrefetchContext = undefined,
264
- verify_count: usize = 0,
265
- objects: []const Object,
266
- groove: *Groove,
267
-
268
- fn verify(assertion: *@This()) void {
269
- assert(assertion.verify_count == 0);
270
- assertion.verify_count = std.math.min(commit_entries_max, assertion.objects.len);
271
- if (assertion.verify_count == 0) return;
272
-
273
- assertion.groove.prefetch_setup(null);
274
- for (assertion.objects[0..assertion.verify_count]) |*object| {
275
- assertion.groove.prefetch_enqueue(object.id);
276
- }
277
-
278
- assertion.groove.prefetch(prefetch_callback, &assertion.prefetch_context);
279
- }
280
-
281
- fn prefetch_callback(prefetch_context: *Groove.PrefetchContext) void {
282
- const assertion = @fieldParentPtr(@This(), "prefetch_context", prefetch_context);
283
- assert(assertion.verify_count > 0);
284
-
285
- {
286
- for (assertion.objects[0..assertion.verify_count]) |*object| {
287
- log.debug("verifying {} for id={}", .{ visibility, object.id });
288
- const result = assertion.groove.get(object.id);
289
-
290
- switch (visibility) {
291
- .invisible => assert(result == null),
292
- .visible => {
293
- assert(result != null);
294
- assert(std.mem.eql(u8, std.mem.asBytes(result.?), std.mem.asBytes(object)));
295
- },
296
- }
297
- }
298
- }
299
-
300
- assertion.objects = assertion.objects[assertion.verify_count..];
301
- assertion.verify_count = 0;
302
- assertion.verify();
303
- }
304
- };
305
-
306
- var assertion = VisibilityAssertion{ .objects = objects, .groove = groove };
307
- assertion.verify();
308
- while (assertion.verify_count > 0) try env.tick();
309
- }
310
-
311
- fn run() !void {
312
- var env: Environment = undefined;
313
-
314
- const must_create = false;
315
- try env.init(must_create);
316
-
317
- // We will be manually deinitializing during the test to simulate crashes and recovery.
318
- // If an error occurs during re-initialization, we don't want to trip this call to deinit().
319
- var crashing = false;
320
- defer if (!crashing) env.deinit();
321
-
322
- // Open the superblock then forest to start inserting accounts and transfers.
323
- try env.open();
324
-
325
- // Recording types for verification
326
- var inserted = std.ArrayList(Account).init(allocator);
327
- defer inserted.deinit();
328
-
329
- const accounts_to_insert_per_op = 1; // forest_options.accounts.commit_entries_max;
330
- const iterations = 4;
331
-
332
- var op: u64 = 1;
333
- var id: u128 = 1;
334
- var timestamp: u64 = 42;
335
- var crash_probability = std.rand.DefaultPrng.init(1337);
336
-
337
- var iter: usize = 0;
338
- while (iter < (accounts_to_insert_per_op * constants.lsm_batch_multiple * iterations)) : (iter += 1) {
339
- // Insert a bunch of accounts
340
-
341
- var i: u32 = 0;
342
- while (i < accounts_to_insert_per_op) : (i += 1) {
343
- defer id += 1;
344
- defer timestamp += 1;
345
- const account = Account{
346
- .id = id,
347
- .timestamp = timestamp,
348
- .user_data = 0,
349
- .reserved = [_]u8{0} ** 48,
350
- .ledger = 710, // Let's use the ISO-4217 Code Number for ZAR
351
- .code = 1000, // A chart of accounts code to describe this as a clearing account.
352
- .flags = .{ .debits_must_not_exceed_credits = true },
353
- .debits_pending = 0,
354
- .debits_posted = 0,
355
- .credits_pending = 0,
356
- .credits_posted = 42,
357
- };
358
-
359
- // Insert an account ...
360
- const groove = &env.forest.grooves.accounts;
361
- groove.put(&account);
362
-
363
- // ..and make sure it can be retrieved
364
- try env.assert_visibility(
365
- .visible,
366
- &env.forest.grooves.accounts,
367
- @as([]const Account, &.{account}),
368
- forest_options.accounts.tree_options_object.commit_entries_max,
369
- );
370
-
371
- // Record the successfull insertion.
372
- try inserted.append(account);
373
- }
374
-
375
- // compact the forest
376
- try env.compact(op);
377
- defer op += 1;
378
-
379
- // Checkpoint when the forest finishes compaction.
380
- // Don't repeat a checkpoint (commit_min must always advance).
381
- const checkpoint_op = op -| constants.lsm_batch_multiple;
382
- if (checkpoint_op % constants.lsm_batch_multiple == constants.lsm_batch_multiple - 1 and
383
- checkpoint_op != env.superblock.staging.vsr_state.commit_min)
384
- {
385
- // Checkpoint the forest then superblock
386
- env.superblock.staging.vsr_state.commit_min = checkpoint_op;
387
- env.superblock.staging.vsr_state.commit_max = checkpoint_op;
388
- try env.checkpoint();
389
-
390
- const checkpointed = inserted.items[0 .. checkpoint_op * accounts_to_insert_per_op];
391
- const uncommitted = inserted.items[checkpointed.len..];
392
- log.debug("checkpointed={d} uncommitted={d}", .{ checkpointed.len, uncommitted.len });
393
- assert(uncommitted.len == constants.lsm_batch_multiple * accounts_to_insert_per_op);
394
-
395
- // Randomly initiate a crash
396
- if (crash_probability.random().uintLessThanBiased(u32, 100) >= 50) {
397
- // Simulate crashing and restoring.
398
- log.debug("simulating crash", .{});
399
- crashing = true;
400
- {
401
- env.deinit();
402
- try env.init(must_create);
403
-
404
- // Re-open the superblock and forest.
405
- try env.open();
406
-
407
- // Double check the forest DOES NOT contain the un-checkpointed values (negative space)
408
- try env.assert_visibility(
409
- .invisible,
410
- &env.forest.grooves.accounts,
411
- uncommitted,
412
- forest_options.accounts.tree_options_object.commit_entries_max,
413
- );
414
-
415
- // Reset everything to after checkpoint
416
- op = checkpoint_op;
417
- inserted.items.len = checkpointed.len;
418
- id = checkpointed[checkpointed.len - 1].id + 1;
419
- timestamp = checkpointed[checkpointed.len - 1].timestamp + 1;
420
- }
421
- crashing = false;
422
- }
423
-
424
- // Double check the forest contains the checkpointed values (positive space)
425
- try env.assert_visibility(
426
- .visible,
427
- &env.forest.grooves.accounts,
428
- checkpointed,
429
- forest_options.accounts.tree_options_object.commit_entries_max,
430
- );
431
- }
432
- }
433
- }
434
- };
435
-
436
- pub fn main() !void {
437
- try Environment.format(); // NOTE: this can be commented out after first run to speed up testing.
438
- try Environment.run(); //try do_simple();
439
- }