tigerbeetle-node 0.11.12 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. package/README.md +212 -196
  2. package/dist/bin/aarch64-linux-gnu/client.node +0 -0
  3. package/dist/bin/aarch64-linux-musl/client.node +0 -0
  4. package/dist/bin/aarch64-macos/client.node +0 -0
  5. package/dist/bin/x86_64-linux-gnu/client.node +0 -0
  6. package/dist/bin/x86_64-linux-musl/client.node +0 -0
  7. package/dist/bin/x86_64-macos/client.node +0 -0
  8. package/dist/index.js +33 -1
  9. package/dist/index.js.map +1 -1
  10. package/package-lock.json +66 -0
  11. package/package.json +8 -17
  12. package/src/index.ts +56 -1
  13. package/src/node.zig +10 -9
  14. package/dist/.client.node.sha256 +0 -1
  15. package/scripts/build_lib.sh +0 -61
  16. package/scripts/download_node_headers.sh +0 -32
  17. package/src/tigerbeetle/scripts/benchmark.bat +0 -48
  18. package/src/tigerbeetle/scripts/benchmark.sh +0 -66
  19. package/src/tigerbeetle/scripts/confirm_image.sh +0 -44
  20. package/src/tigerbeetle/scripts/fuzz_loop.sh +0 -15
  21. package/src/tigerbeetle/scripts/fuzz_unique_errors.sh +0 -7
  22. package/src/tigerbeetle/scripts/install.bat +0 -7
  23. package/src/tigerbeetle/scripts/install.sh +0 -21
  24. package/src/tigerbeetle/scripts/install_zig.bat +0 -113
  25. package/src/tigerbeetle/scripts/install_zig.sh +0 -90
  26. package/src/tigerbeetle/scripts/lint.zig +0 -199
  27. package/src/tigerbeetle/scripts/pre-commit.sh +0 -9
  28. package/src/tigerbeetle/scripts/scripts/benchmark.bat +0 -48
  29. package/src/tigerbeetle/scripts/scripts/benchmark.sh +0 -66
  30. package/src/tigerbeetle/scripts/scripts/confirm_image.sh +0 -44
  31. package/src/tigerbeetle/scripts/scripts/fuzz_loop.sh +0 -15
  32. package/src/tigerbeetle/scripts/scripts/fuzz_unique_errors.sh +0 -7
  33. package/src/tigerbeetle/scripts/scripts/install.bat +0 -7
  34. package/src/tigerbeetle/scripts/scripts/install.sh +0 -21
  35. package/src/tigerbeetle/scripts/scripts/install_zig.bat +0 -113
  36. package/src/tigerbeetle/scripts/scripts/install_zig.sh +0 -90
  37. package/src/tigerbeetle/scripts/scripts/lint.zig +0 -199
  38. package/src/tigerbeetle/scripts/scripts/pre-commit.sh +0 -9
  39. package/src/tigerbeetle/scripts/scripts/shellcheck.sh +0 -5
  40. package/src/tigerbeetle/scripts/scripts/tests_on_alpine.sh +0 -10
  41. package/src/tigerbeetle/scripts/scripts/tests_on_ubuntu.sh +0 -14
  42. package/src/tigerbeetle/scripts/scripts/upgrade_ubuntu_kernel.sh +0 -48
  43. package/src/tigerbeetle/scripts/scripts/validate_docs.sh +0 -23
  44. package/src/tigerbeetle/scripts/scripts/vr_state_enumerate +0 -46
  45. package/src/tigerbeetle/scripts/shellcheck.sh +0 -5
  46. package/src/tigerbeetle/scripts/tests_on_alpine.sh +0 -10
  47. package/src/tigerbeetle/scripts/tests_on_ubuntu.sh +0 -14
  48. package/src/tigerbeetle/scripts/upgrade_ubuntu_kernel.sh +0 -48
  49. package/src/tigerbeetle/scripts/validate_docs.sh +0 -23
  50. package/src/tigerbeetle/scripts/vr_state_enumerate +0 -46
  51. package/src/tigerbeetle/src/benchmark.zig +0 -314
  52. package/src/tigerbeetle/src/config.zig +0 -234
  53. package/src/tigerbeetle/src/constants.zig +0 -436
  54. package/src/tigerbeetle/src/ewah.zig +0 -286
  55. package/src/tigerbeetle/src/ewah_benchmark.zig +0 -120
  56. package/src/tigerbeetle/src/ewah_fuzz.zig +0 -130
  57. package/src/tigerbeetle/src/fifo.zig +0 -120
  58. package/src/tigerbeetle/src/io/benchmark.zig +0 -213
  59. package/src/tigerbeetle/src/io/darwin.zig +0 -814
  60. package/src/tigerbeetle/src/io/linux.zig +0 -1062
  61. package/src/tigerbeetle/src/io/test.zig +0 -643
  62. package/src/tigerbeetle/src/io/windows.zig +0 -1183
  63. package/src/tigerbeetle/src/io.zig +0 -34
  64. package/src/tigerbeetle/src/iops.zig +0 -107
  65. package/src/tigerbeetle/src/lsm/README.md +0 -308
  66. package/src/tigerbeetle/src/lsm/binary_search.zig +0 -341
  67. package/src/tigerbeetle/src/lsm/bloom_filter.zig +0 -125
  68. package/src/tigerbeetle/src/lsm/compaction.zig +0 -603
  69. package/src/tigerbeetle/src/lsm/composite_key.zig +0 -77
  70. package/src/tigerbeetle/src/lsm/direction.zig +0 -11
  71. package/src/tigerbeetle/src/lsm/eytzinger.zig +0 -587
  72. package/src/tigerbeetle/src/lsm/eytzinger_benchmark.zig +0 -330
  73. package/src/tigerbeetle/src/lsm/forest.zig +0 -204
  74. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +0 -401
  75. package/src/tigerbeetle/src/lsm/grid.zig +0 -573
  76. package/src/tigerbeetle/src/lsm/groove.zig +0 -972
  77. package/src/tigerbeetle/src/lsm/k_way_merge.zig +0 -474
  78. package/src/tigerbeetle/src/lsm/level_iterator.zig +0 -332
  79. package/src/tigerbeetle/src/lsm/manifest.zig +0 -617
  80. package/src/tigerbeetle/src/lsm/manifest_level.zig +0 -877
  81. package/src/tigerbeetle/src/lsm/manifest_log.zig +0 -789
  82. package/src/tigerbeetle/src/lsm/manifest_log_fuzz.zig +0 -691
  83. package/src/tigerbeetle/src/lsm/merge_iterator.zig +0 -106
  84. package/src/tigerbeetle/src/lsm/node_pool.zig +0 -235
  85. package/src/tigerbeetle/src/lsm/posted_groove.zig +0 -378
  86. package/src/tigerbeetle/src/lsm/segmented_array.zig +0 -1328
  87. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +0 -148
  88. package/src/tigerbeetle/src/lsm/segmented_array_fuzz.zig +0 -9
  89. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +0 -850
  90. package/src/tigerbeetle/src/lsm/table.zig +0 -1031
  91. package/src/tigerbeetle/src/lsm/table_immutable.zig +0 -203
  92. package/src/tigerbeetle/src/lsm/table_iterator.zig +0 -340
  93. package/src/tigerbeetle/src/lsm/table_mutable.zig +0 -220
  94. package/src/tigerbeetle/src/lsm/test.zig +0 -438
  95. package/src/tigerbeetle/src/lsm/tree.zig +0 -1193
  96. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +0 -474
  97. package/src/tigerbeetle/src/message_bus.zig +0 -1012
  98. package/src/tigerbeetle/src/message_pool.zig +0 -156
  99. package/src/tigerbeetle/src/ring_buffer.zig +0 -399
  100. package/src/tigerbeetle/src/simulator.zig +0 -569
  101. package/src/tigerbeetle/src/state_machine/auditor.zig +0 -577
  102. package/src/tigerbeetle/src/state_machine/workload.zig +0 -883
  103. package/src/tigerbeetle/src/state_machine.zig +0 -1881
  104. package/src/tigerbeetle/src/static_allocator.zig +0 -65
  105. package/src/tigerbeetle/src/stdx.zig +0 -162
  106. package/src/tigerbeetle/src/storage.zig +0 -393
  107. package/src/tigerbeetle/src/testing/cluster/message_bus.zig +0 -82
  108. package/src/tigerbeetle/src/testing/cluster/network.zig +0 -237
  109. package/src/tigerbeetle/src/testing/cluster/state_checker.zig +0 -169
  110. package/src/tigerbeetle/src/testing/cluster/storage_checker.zig +0 -202
  111. package/src/tigerbeetle/src/testing/cluster.zig +0 -443
  112. package/src/tigerbeetle/src/testing/fuzz.zig +0 -140
  113. package/src/tigerbeetle/src/testing/hash_log.zig +0 -66
  114. package/src/tigerbeetle/src/testing/id.zig +0 -99
  115. package/src/tigerbeetle/src/testing/packet_simulator.zig +0 -364
  116. package/src/tigerbeetle/src/testing/priority_queue.zig +0 -645
  117. package/src/tigerbeetle/src/testing/reply_sequence.zig +0 -139
  118. package/src/tigerbeetle/src/testing/state_machine.zig +0 -249
  119. package/src/tigerbeetle/src/testing/storage.zig +0 -757
  120. package/src/tigerbeetle/src/testing/table.zig +0 -247
  121. package/src/tigerbeetle/src/testing/time.zig +0 -84
  122. package/src/tigerbeetle/src/tigerbeetle.zig +0 -227
  123. package/src/tigerbeetle/src/time.zig +0 -112
  124. package/src/tigerbeetle/src/tracer.zig +0 -529
  125. package/src/tigerbeetle/src/unit_tests.zig +0 -42
  126. package/src/tigerbeetle/src/vopr.zig +0 -495
  127. package/src/tigerbeetle/src/vsr/README.md +0 -209
  128. package/src/tigerbeetle/src/vsr/client.zig +0 -544
  129. package/src/tigerbeetle/src/vsr/clock.zig +0 -853
  130. package/src/tigerbeetle/src/vsr/journal.zig +0 -2413
  131. package/src/tigerbeetle/src/vsr/journal_format_fuzz.zig +0 -111
  132. package/src/tigerbeetle/src/vsr/marzullo.zig +0 -309
  133. package/src/tigerbeetle/src/vsr/replica.zig +0 -6381
  134. package/src/tigerbeetle/src/vsr/replica_format.zig +0 -219
  135. package/src/tigerbeetle/src/vsr/superblock.zig +0 -1631
  136. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +0 -256
  137. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +0 -929
  138. package/src/tigerbeetle/src/vsr/superblock_free_set_fuzz.zig +0 -334
  139. package/src/tigerbeetle/src/vsr/superblock_fuzz.zig +0 -390
  140. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +0 -615
  141. package/src/tigerbeetle/src/vsr/superblock_quorums.zig +0 -394
  142. package/src/tigerbeetle/src/vsr/superblock_quorums_fuzz.zig +0 -314
  143. package/src/tigerbeetle/src/vsr.zig +0 -1352
@@ -1,378 +0,0 @@
1
- const std = @import("std");
2
- const builtin = @import("builtin");
3
- const assert = std.debug.assert;
4
- const math = std.math;
5
- const mem = std.mem;
6
-
7
- const constants = @import("../constants.zig");
8
-
9
- const TableType = @import("table.zig").TableType;
10
- const TreeType = @import("tree.zig").TreeType;
11
- const GridType = @import("grid.zig").GridType;
12
- const NodePool = @import("node_pool.zig").NodePool(constants.lsm_manifest_node_size, 16);
13
-
14
- const snapshot_latest = @import("tree.zig").snapshot_latest;
15
- const compaction_snapshot_for_op = @import("tree.zig").compaction_snapshot_for_op;
16
-
17
- /// This type wraps a single LSM tree in the API needed to integrate it with the Forest.
18
- /// TigerBeetle's state machine requires a map from u128 ID to posted boolean for transfers
19
- /// and this type implements that.
20
- /// TODO Make the LSM Forest library flexible enough to be able to get rid of this special case.
21
- pub fn PostedGrooveType(comptime Storage: type) type {
22
- return struct {
23
- const PostedGroove = @This();
24
-
25
- const Value = extern struct {
26
- id: u128,
27
- data: enum(u8) {
28
- posted,
29
- voided,
30
- tombstone,
31
- },
32
- padding: [15]u8 = [_]u8{0} ** 15,
33
-
34
- comptime {
35
- // Assert that there is no implicit padding.
36
- assert(@sizeOf(Value) == 32);
37
- assert(@bitSizeOf(Value) == 32 * 8);
38
- }
39
-
40
- inline fn compare_keys(a: u128, b: u128) math.Order {
41
- return math.order(a, b);
42
- }
43
-
44
- inline fn key_from_value(value: *const Value) u128 {
45
- return value.id;
46
- }
47
-
48
- const sentinel_key = math.maxInt(u128);
49
-
50
- inline fn tombstone(value: *const Value) bool {
51
- return value.data == .tombstone;
52
- }
53
-
54
- inline fn tombstone_from_key(id: u128) Value {
55
- return .{
56
- .id = id,
57
- .data = .tombstone,
58
- };
59
- }
60
- };
61
-
62
- const Table = TableType(
63
- u128,
64
- Value,
65
- Value.compare_keys,
66
- Value.key_from_value,
67
- Value.sentinel_key,
68
- Value.tombstone,
69
- Value.tombstone_from_key,
70
- .general,
71
- );
72
-
73
- const Tree = TreeType(Table, Storage, "posted_groove");
74
- const Grid = GridType(Storage);
75
-
76
- const PrefetchIDs = std.AutoHashMapUnmanaged(u128, void);
77
- const PrefetchObjects = std.AutoHashMapUnmanaged(u128, bool); // true:posted, false:voided
78
-
79
- tree: Tree,
80
-
81
- /// Object IDs enqueued to be prefetched.
82
- /// Prefetching ensures that point lookups against the latest snapshot are synchronous.
83
- /// This shields state machine implementations from the challenges of concurrency and I/O,
84
- /// and enables simple state machine function signatures that commit writes atomically.
85
- prefetch_ids: PrefetchIDs,
86
-
87
- /// The prefetched Objects. This hash map holds the subset of objects in the LSM tree
88
- /// that are required for the current commit. All get()/put()/remove() operations during
89
- /// the commit are both passed to the LSM trees and mirrored in this hash map. It is always
90
- /// sufficient to query this hashmap alone to know the state of the LSM trees.
91
- prefetch_objects: PrefetchObjects,
92
-
93
- /// The snapshot to prefetch from.
94
- prefetch_snapshot: ?u64,
95
-
96
- /// This field is necessary to expose the same open()/compact()/checkpoint() function
97
- /// signatures as the real Groove type.
98
- callback: ?fn (*PostedGroove) void = null,
99
-
100
- /// See comments for Groove.Options.
101
- pub const Options = struct {
102
- cache_entries_max: u32,
103
- prefetch_entries_max: u32,
104
- commit_entries_max: u32,
105
- };
106
-
107
- pub fn init(
108
- allocator: mem.Allocator,
109
- node_pool: *NodePool,
110
- grid: *Grid,
111
- options: Options,
112
- ) !PostedGroove {
113
- var tree = try Tree.init(
114
- allocator,
115
- node_pool,
116
- grid,
117
- .{
118
- .cache_entries_max = options.cache_entries_max,
119
- .commit_entries_max = options.commit_entries_max,
120
- },
121
- );
122
- errdefer tree.deinit(allocator);
123
-
124
- var prefetch_ids = PrefetchIDs{};
125
- try prefetch_ids.ensureTotalCapacity(allocator, options.prefetch_entries_max);
126
- errdefer prefetch_ids.deinit(allocator);
127
-
128
- var prefetch_objects = PrefetchObjects{};
129
- try prefetch_objects.ensureTotalCapacity(allocator, options.prefetch_entries_max);
130
- errdefer prefetch_objects.deinit(allocator);
131
-
132
- return PostedGroove{
133
- .tree = tree,
134
-
135
- .prefetch_ids = prefetch_ids,
136
- .prefetch_objects = prefetch_objects,
137
- .prefetch_snapshot = null,
138
- };
139
- }
140
-
141
- pub fn deinit(groove: *PostedGroove, allocator: mem.Allocator) void {
142
- groove.tree.deinit(allocator);
143
-
144
- groove.prefetch_ids.deinit(allocator);
145
- groove.prefetch_objects.deinit(allocator);
146
-
147
- groove.* = undefined;
148
- }
149
-
150
- pub fn get(groove: *const PostedGroove, id: u128) ?bool {
151
- return groove.prefetch_objects.get(id);
152
- }
153
-
154
- /// Must be called directly before the state machine begins queuing ids for prefetch.
155
- /// When `snapshot` is null, prefetch from the current snapshot.
156
- pub fn prefetch_setup(groove: *PostedGroove, snapshot: ?u64) void {
157
- // We may query the input tables of an ongoing compaction, but must not query the
158
- // output tables until the compaction is complete. (Until then, the output tables may
159
- // be in the manifest but not yet on disk).
160
- const snapshot_max = groove.tree.lookup_snapshot_max;
161
- const snapshot_target = snapshot orelse snapshot_max;
162
- assert(snapshot_target <= snapshot_max);
163
-
164
- if (groove.prefetch_snapshot == null) {
165
- groove.prefetch_objects.clearRetainingCapacity();
166
- } else {
167
- // If there is a snapshot already set from the previous prefetch_setup(), then its
168
- // prefetch() was never called, so there must already be no queued objects or ids.
169
- }
170
-
171
- groove.prefetch_snapshot = snapshot_target;
172
- assert(groove.prefetch_objects.count() == 0);
173
- assert(groove.prefetch_ids.count() == 0);
174
- }
175
-
176
- /// This must be called by the state machine for every key to be prefetched.
177
- /// We tolerate duplicate IDs enqueued by the state machine.
178
- /// For example, if all unique operations require the same two dependencies.
179
- pub fn prefetch_enqueue(groove: *PostedGroove, id: u128) void {
180
- if (groove.tree.lookup_from_memory(groove.prefetch_snapshot.?, id)) |value| {
181
- assert(value.id == id);
182
- switch (value.data) {
183
- .posted => groove.prefetch_objects.putAssumeCapacity(value.id, true),
184
- .voided => groove.prefetch_objects.putAssumeCapacity(value.id, false),
185
- .tombstone => {}, // Leave the ID out of prefetch_objects.
186
- }
187
- } else {
188
- groove.prefetch_ids.putAssumeCapacity(id, {});
189
- }
190
- }
191
-
192
- /// Ensure the objects corresponding to all ids enqueued with prefetch_enqueue() are
193
- /// available in `prefetch_objects`.
194
- pub fn prefetch(
195
- groove: *PostedGroove,
196
- callback: fn (*PrefetchContext) void,
197
- context: *PrefetchContext,
198
- ) void {
199
- context.* = .{
200
- .groove = groove,
201
- .callback = callback,
202
- .snapshot = groove.prefetch_snapshot.?,
203
- .id_iterator = groove.prefetch_ids.keyIterator(),
204
- };
205
- groove.prefetch_snapshot = null;
206
- context.start_workers();
207
- }
208
-
209
- pub const PrefetchContext = struct {
210
- groove: *PostedGroove,
211
- callback: fn (*PrefetchContext) void,
212
- snapshot: u64,
213
-
214
- id_iterator: PrefetchIDs.KeyIterator,
215
-
216
- /// The goal is to fully utilize the disk I/O to ensure the prefetch completes as
217
- /// quickly as possible, so we run multiple lookups in parallel based on the max
218
- /// I/O depth of the Grid.
219
- workers: [Grid.read_iops_max]PrefetchWorker = undefined,
220
- /// The number of workers that are currently running in parallel.
221
- workers_busy: u32 = 0,
222
-
223
- fn start_workers(context: *PrefetchContext) void {
224
- assert(context.workers_busy == 0);
225
-
226
- // Track an extra "worker" that will finish after the loop.
227
- //
228
- // This prevents `context.finish()` from being called within the loop body when
229
- // every worker finishes synchronously. `context.finish()` calls the user-provided
230
- // callback which may re-use the memory of this `PrefetchContext`. However, we
231
- // rely on `context` being well-defined for the loop condition.
232
- context.workers_busy += 1;
233
-
234
- for (context.workers) |*worker| {
235
- worker.* = .{ .context = context };
236
- context.workers_busy += 1;
237
- worker.lookup_start_next();
238
- }
239
-
240
- assert(context.workers_busy >= 1);
241
- context.worker_finished();
242
- }
243
-
244
- fn worker_finished(context: *PrefetchContext) void {
245
- context.workers_busy -= 1;
246
- if (context.workers_busy == 0) context.finish();
247
- }
248
-
249
- fn finish(context: *PrefetchContext) void {
250
- assert(context.workers_busy == 0);
251
-
252
- assert(context.id_iterator.next() == null);
253
- context.groove.prefetch_ids.clearRetainingCapacity();
254
- assert(context.groove.prefetch_ids.count() == 0);
255
-
256
- context.callback(context);
257
- }
258
- };
259
-
260
- pub const PrefetchWorker = struct {
261
- context: *PrefetchContext,
262
- lookup_id: Tree.LookupContext = undefined,
263
-
264
- /// Returns true if asynchronous I/O has been started.
265
- /// Returns false if there are no more IDs to prefetch.
266
- fn lookup_start_next(worker: *PrefetchWorker) void {
267
- const id = worker.context.id_iterator.next() orelse {
268
- worker.context.worker_finished();
269
- return;
270
- };
271
-
272
- if (constants.verify) {
273
- // This was checked in prefetch_enqueue().
274
- assert(worker.context.groove.tree.lookup_from_memory(worker.context.snapshot, id.*) == null);
275
- }
276
-
277
- // If not in the LSM tree's cache, the object must be read from disk and added
278
- // to the auxillary prefetch_objects hash map.
279
- // TODO: this LSM tree function needlessly checks the LSM tree's cache a
280
- // second time. Adding API to the LSM tree to avoid this may be worthwhile.
281
- worker.context.groove.tree.lookup_from_levels(
282
- lookup_id_callback,
283
- &worker.lookup_id,
284
- worker.context.snapshot,
285
- id.*,
286
- );
287
- }
288
-
289
- fn lookup_id_callback(
290
- completion: *Tree.LookupContext,
291
- result: ?*const Value,
292
- ) void {
293
- const worker = @fieldParentPtr(PrefetchWorker, "lookup_id", completion);
294
- const groove = worker.context.groove;
295
-
296
- if (result) |value| {
297
- switch (value.data) {
298
- .posted => {
299
- groove.prefetch_objects.putAssumeCapacityNoClobber(value.id, true);
300
- },
301
- .voided => {
302
- groove.prefetch_objects.putAssumeCapacityNoClobber(value.id, false);
303
- },
304
- .tombstone => {
305
- // Leave the ID out of prefetch_objects.
306
- },
307
- }
308
- }
309
- worker.lookup_start_next();
310
- }
311
- };
312
-
313
- pub fn put_no_clobber(groove: *PostedGroove, id: u128, posted: bool) void {
314
- const gop = groove.prefetch_objects.getOrPutAssumeCapacity(id);
315
- assert(!gop.found_existing);
316
-
317
- const value: Value = .{
318
- .id = id,
319
- .data = if (posted) .posted else .voided,
320
- };
321
- groove.tree.put(&value);
322
- gop.value_ptr.* = posted;
323
- }
324
-
325
- pub fn remove(groove: *PostedGroove, id: u128) void {
326
- assert(groove.prefetch_objects.remove(id));
327
- groove.tree.remove(&Value{ .id = id, .data = .tombstone });
328
- }
329
-
330
- fn tree_callback(tree: *Tree) void {
331
- const groove = @fieldParentPtr(PostedGroove, "tree", tree);
332
- const callback = groove.callback.?;
333
- groove.callback = null;
334
- callback(groove);
335
- }
336
-
337
- pub fn open(groove: *PostedGroove, callback: fn (*PostedGroove) void) void {
338
- assert(groove.callback == null);
339
- groove.callback = callback;
340
- groove.tree.open(tree_callback);
341
- }
342
-
343
- pub fn compact(groove: *PostedGroove, callback: fn (*PostedGroove) void, op: u64) void {
344
- assert(groove.callback == null);
345
- groove.callback = callback;
346
- groove.tree.compact(tree_callback, op);
347
- }
348
-
349
- pub fn checkpoint(groove: *PostedGroove, callback: fn (*PostedGroove) void) void {
350
- assert(groove.callback == null);
351
- groove.callback = callback;
352
- groove.tree.checkpoint(tree_callback);
353
- }
354
- };
355
- }
356
-
357
- test "PostedGroove" {
358
- const Storage = @import("../storage.zig").Storage;
359
-
360
- const PostedGroove = PostedGrooveType(Storage);
361
-
362
- _ = PostedGroove.init;
363
- _ = PostedGroove.deinit;
364
-
365
- _ = PostedGroove.get;
366
- _ = PostedGroove.put_no_clobber;
367
- _ = PostedGroove.remove;
368
-
369
- _ = PostedGroove.compact;
370
- _ = PostedGroove.checkpoint;
371
-
372
- _ = PostedGroove.prefetch_enqueue;
373
- _ = PostedGroove.prefetch;
374
- _ = PostedGroove.prefetch_setup;
375
-
376
- std.testing.refAllDecls(PostedGroove.PrefetchWorker);
377
- std.testing.refAllDecls(PostedGroove.PrefetchContext);
378
- }