tigerbeetle-node 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/README.md +305 -103
  2. package/dist/index.d.ts +70 -67
  3. package/dist/index.js +70 -67
  4. package/dist/index.js.map +1 -1
  5. package/package.json +6 -6
  6. package/scripts/download_node_headers.sh +14 -7
  7. package/src/index.ts +11 -10
  8. package/src/node.zig +22 -20
  9. package/src/tigerbeetle/scripts/benchmark.bat +4 -3
  10. package/src/tigerbeetle/scripts/benchmark.sh +25 -10
  11. package/src/tigerbeetle/scripts/confirm_image.sh +44 -0
  12. package/src/tigerbeetle/scripts/fuzz_loop.sh +15 -0
  13. package/src/tigerbeetle/scripts/fuzz_unique_errors.sh +7 -0
  14. package/src/tigerbeetle/scripts/install.sh +20 -4
  15. package/src/tigerbeetle/scripts/install_zig.bat +5 -1
  16. package/src/tigerbeetle/scripts/install_zig.sh +32 -26
  17. package/src/tigerbeetle/scripts/pre-commit.sh +9 -0
  18. package/src/tigerbeetle/scripts/shellcheck.sh +5 -0
  19. package/src/tigerbeetle/scripts/tests_on_alpine.sh +10 -0
  20. package/src/tigerbeetle/scripts/tests_on_ubuntu.sh +14 -0
  21. package/src/tigerbeetle/scripts/upgrade_ubuntu_kernel.sh +12 -3
  22. package/src/tigerbeetle/src/benchmark.zig +19 -9
  23. package/src/tigerbeetle/src/benchmark_array_search.zig +317 -0
  24. package/src/tigerbeetle/src/benchmarks/perf.zig +299 -0
  25. package/src/tigerbeetle/src/c/tb_client/context.zig +103 -0
  26. package/src/tigerbeetle/src/c/tb_client/packet.zig +80 -0
  27. package/src/tigerbeetle/src/c/tb_client/signal.zig +288 -0
  28. package/src/tigerbeetle/src/c/tb_client/thread.zig +328 -0
  29. package/src/tigerbeetle/src/c/tb_client.h +221 -0
  30. package/src/tigerbeetle/src/c/tb_client.zig +104 -0
  31. package/src/tigerbeetle/src/c/test.zig +1 -0
  32. package/src/tigerbeetle/src/cli.zig +143 -84
  33. package/src/tigerbeetle/src/config.zig +161 -20
  34. package/src/tigerbeetle/src/demo.zig +14 -8
  35. package/src/tigerbeetle/src/demo_05_post_pending_transfers.zig +2 -2
  36. package/src/tigerbeetle/src/ewah.zig +318 -0
  37. package/src/tigerbeetle/src/ewah_benchmark.zig +121 -0
  38. package/src/tigerbeetle/src/eytzinger_benchmark.zig +317 -0
  39. package/src/tigerbeetle/src/fifo.zig +17 -1
  40. package/src/tigerbeetle/src/io/darwin.zig +12 -10
  41. package/src/tigerbeetle/src/io/linux.zig +25 -9
  42. package/src/tigerbeetle/src/io/windows.zig +13 -9
  43. package/src/tigerbeetle/src/iops.zig +101 -0
  44. package/src/tigerbeetle/src/lsm/README.md +214 -0
  45. package/src/tigerbeetle/src/lsm/binary_search.zig +341 -0
  46. package/src/tigerbeetle/src/lsm/bloom_filter.zig +125 -0
  47. package/src/tigerbeetle/src/lsm/compaction.zig +557 -0
  48. package/src/tigerbeetle/src/lsm/composite_key.zig +77 -0
  49. package/src/tigerbeetle/src/lsm/direction.zig +11 -0
  50. package/src/tigerbeetle/src/lsm/eytzinger.zig +587 -0
  51. package/src/tigerbeetle/src/lsm/forest.zig +204 -0
  52. package/src/tigerbeetle/src/lsm/forest_fuzz.zig +412 -0
  53. package/src/tigerbeetle/src/lsm/grid.zig +549 -0
  54. package/src/tigerbeetle/src/lsm/groove.zig +1002 -0
  55. package/src/tigerbeetle/src/lsm/k_way_merge.zig +474 -0
  56. package/src/tigerbeetle/src/lsm/level_iterator.zig +315 -0
  57. package/src/tigerbeetle/src/lsm/manifest.zig +580 -0
  58. package/src/tigerbeetle/src/lsm/manifest_level.zig +925 -0
  59. package/src/tigerbeetle/src/lsm/manifest_log.zig +953 -0
  60. package/src/tigerbeetle/src/lsm/node_pool.zig +231 -0
  61. package/src/tigerbeetle/src/lsm/posted_groove.zig +387 -0
  62. package/src/tigerbeetle/src/lsm/segmented_array.zig +1318 -0
  63. package/src/tigerbeetle/src/lsm/segmented_array_benchmark.zig +148 -0
  64. package/src/tigerbeetle/src/lsm/segmented_array_fuzz.zig +9 -0
  65. package/src/tigerbeetle/src/lsm/set_associative_cache.zig +894 -0
  66. package/src/tigerbeetle/src/lsm/table.zig +967 -0
  67. package/src/tigerbeetle/src/lsm/table_immutable.zig +203 -0
  68. package/src/tigerbeetle/src/lsm/table_iterator.zig +306 -0
  69. package/src/tigerbeetle/src/lsm/table_mutable.zig +174 -0
  70. package/src/tigerbeetle/src/lsm/test.zig +423 -0
  71. package/src/tigerbeetle/src/lsm/tree.zig +1090 -0
  72. package/src/tigerbeetle/src/lsm/tree_fuzz.zig +457 -0
  73. package/src/tigerbeetle/src/main.zig +141 -109
  74. package/src/tigerbeetle/src/message_bus.zig +49 -48
  75. package/src/tigerbeetle/src/message_pool.zig +22 -12
  76. package/src/tigerbeetle/src/ring_buffer.zig +126 -30
  77. package/src/tigerbeetle/src/simulator.zig +205 -140
  78. package/src/tigerbeetle/src/state_machine.zig +1268 -721
  79. package/src/tigerbeetle/src/static_allocator.zig +65 -0
  80. package/src/tigerbeetle/src/storage.zig +40 -14
  81. package/src/tigerbeetle/src/test/accounting/auditor.zig +577 -0
  82. package/src/tigerbeetle/src/test/accounting/workload.zig +819 -0
  83. package/src/tigerbeetle/src/test/cluster.zig +104 -88
  84. package/src/tigerbeetle/src/test/conductor.zig +365 -0
  85. package/src/tigerbeetle/src/test/fuzz.zig +121 -0
  86. package/src/tigerbeetle/src/test/id.zig +89 -0
  87. package/src/tigerbeetle/src/test/message_bus.zig +15 -24
  88. package/src/tigerbeetle/src/test/network.zig +26 -17
  89. package/src/tigerbeetle/src/test/priority_queue.zig +645 -0
  90. package/src/tigerbeetle/src/test/state_checker.zig +94 -68
  91. package/src/tigerbeetle/src/test/state_machine.zig +135 -69
  92. package/src/tigerbeetle/src/test/storage.zig +78 -28
  93. package/src/tigerbeetle/src/tigerbeetle.zig +19 -16
  94. package/src/tigerbeetle/src/unit_tests.zig +15 -0
  95. package/src/tigerbeetle/src/util.zig +51 -0
  96. package/src/tigerbeetle/src/vopr.zig +494 -0
  97. package/src/tigerbeetle/src/vopr_hub/README.md +58 -0
  98. package/src/tigerbeetle/src/vopr_hub/SETUP.md +199 -0
  99. package/src/tigerbeetle/src/vopr_hub/go.mod +3 -0
  100. package/src/tigerbeetle/src/vopr_hub/main.go +1022 -0
  101. package/src/tigerbeetle/src/vopr_hub/scheduler/go.mod +3 -0
  102. package/src/tigerbeetle/src/vopr_hub/scheduler/main.go +403 -0
  103. package/src/tigerbeetle/src/vsr/client.zig +34 -7
  104. package/src/tigerbeetle/src/vsr/journal.zig +164 -174
  105. package/src/tigerbeetle/src/vsr/replica.zig +1602 -651
  106. package/src/tigerbeetle/src/vsr/superblock.zig +1761 -0
  107. package/src/tigerbeetle/src/vsr/superblock_client_table.zig +255 -0
  108. package/src/tigerbeetle/src/vsr/superblock_free_set.zig +644 -0
  109. package/src/tigerbeetle/src/vsr/superblock_manifest.zig +561 -0
  110. package/src/tigerbeetle/src/vsr.zig +118 -170
  111. package/src/tigerbeetle/scripts/vopr.bat +0 -48
  112. package/src/tigerbeetle/scripts/vopr.sh +0 -33
@@ -0,0 +1,1002 @@
1
+ const std = @import("std");
2
+ const builtin = @import("builtin");
3
+ const assert = std.debug.assert;
4
+ const math = std.math;
5
+ const mem = std.mem;
6
+
7
+ const config = @import("../config.zig");
8
+
9
+ const TableType = @import("table.zig").TableType;
10
+ const TreeType = @import("tree.zig").TreeType;
11
+ const GridType = @import("grid.zig").GridType;
12
+ const CompositeKey = @import("composite_key.zig").CompositeKey;
13
+ const NodePool = @import("node_pool.zig").NodePool(config.lsm_manifest_node_size, 16);
14
+
15
+ const snapshot_latest = @import("tree.zig").snapshot_latest;
16
+ const compaction_snapshot_for_op = @import("tree.zig").compaction_snapshot_for_op;
17
+
18
+ fn ObjectTreeHelpers(comptime Object: type) type {
19
+ assert(@hasField(Object, "id"));
20
+ assert(std.meta.fieldInfo(Object, .id).field_type == u128);
21
+ assert(@hasField(Object, "timestamp"));
22
+ assert(std.meta.fieldInfo(Object, .timestamp).field_type == u64);
23
+
24
+ return struct {
25
+ inline fn compare_keys(timestamp_a: u64, timestamp_b: u64) std.math.Order {
26
+ return std.math.order(timestamp_a, timestamp_b);
27
+ }
28
+
29
+ inline fn key_from_value(value: *const Object) u64 {
30
+ return value.timestamp & ~@as(u64, tombstone_bit);
31
+ }
32
+
33
+ const sentinel_key = std.math.maxInt(u64);
34
+ const tombstone_bit = 1 << (64 - 1);
35
+
36
+ inline fn tombstone(value: *const Object) bool {
37
+ return (value.timestamp & tombstone_bit) != 0;
38
+ }
39
+
40
+ inline fn tombstone_from_key(timestamp: u64) Object {
41
+ var value = std.mem.zeroes(Object); // Full zero-initialized Value.
42
+ value.timestamp = timestamp | tombstone_bit;
43
+ return value;
44
+ }
45
+ };
46
+ }
47
+
48
+ const IdTreeValue = extern struct {
49
+ id: u128,
50
+ timestamp: u64,
51
+ padding: u64 = 0,
52
+
53
+ comptime {
54
+ // Assert that there is no implicit padding.
55
+ assert(@sizeOf(IdTreeValue) == 32);
56
+ assert(@bitSizeOf(IdTreeValue) == 32 * 8);
57
+ }
58
+
59
+ inline fn compare_keys(a: u128, b: u128) std.math.Order {
60
+ return std.math.order(a, b);
61
+ }
62
+
63
+ inline fn key_from_value(value: *const IdTreeValue) u128 {
64
+ return value.id;
65
+ }
66
+
67
+ const sentinel_key = std.math.maxInt(u128);
68
+ const tombstone_bit = 1 << (64 - 1);
69
+
70
+ inline fn tombstone(value: *const IdTreeValue) bool {
71
+ return (value.timestamp & tombstone_bit) != 0;
72
+ }
73
+
74
+ inline fn tombstone_from_key(id: u128) IdTreeValue {
75
+ return .{
76
+ .id = id,
77
+ .timestamp = tombstone_bit,
78
+ };
79
+ }
80
+ };
81
+
82
+ /// Normalizes index tree field types into either u64 or u128 for CompositeKey
83
+ fn IndexCompositeKeyType(comptime Field: type) type {
84
+ switch (@typeInfo(Field)) {
85
+ .Enum => |e| {
86
+ return switch (@bitSizeOf(e.tag_type)) {
87
+ 0...@bitSizeOf(u64) => u64,
88
+ @bitSizeOf(u65)...@bitSizeOf(u128) => u128,
89
+ else => @compileError("Unsupported enum tag for index: " ++ @typeName(e.tag_type)),
90
+ };
91
+ },
92
+ .Int => |i| {
93
+ if (i.signedness != .unsigned) {
94
+ @compileError("Index int type (" ++ @typeName(Field) ++ ") is not unsigned");
95
+ }
96
+ return switch (@bitSizeOf(Field)) {
97
+ 0...@bitSizeOf(u64) => u64,
98
+ @bitSizeOf(u65)...@bitSizeOf(u128) => u128,
99
+ else => @compileError("Unsupported int type for index: " ++ @typeName(Field)),
100
+ };
101
+ },
102
+ else => @compileError("Index type " ++ @typeName(Field) ++ " is not supported"),
103
+ }
104
+ }
105
+
106
+ comptime {
107
+ assert(IndexCompositeKeyType(u1) == u64);
108
+ assert(IndexCompositeKeyType(u16) == u64);
109
+ assert(IndexCompositeKeyType(enum(u16) { x }) == u64);
110
+
111
+ assert(IndexCompositeKeyType(u32) == u64);
112
+ assert(IndexCompositeKeyType(u63) == u64);
113
+ assert(IndexCompositeKeyType(u64) == u64);
114
+
115
+ assert(IndexCompositeKeyType(enum(u65) { x }) == u128);
116
+ assert(IndexCompositeKeyType(u65) == u128);
117
+ assert(IndexCompositeKeyType(u128) == u128);
118
+ }
119
+
120
+ fn IndexTreeType(
121
+ comptime Storage: type,
122
+ comptime Field: type,
123
+ comptime tree_name: []const u8,
124
+ ) type {
125
+ const Key = CompositeKey(IndexCompositeKeyType(Field));
126
+ const Table = TableType(
127
+ Key,
128
+ Key.Value,
129
+ Key.compare_keys,
130
+ Key.key_from_value,
131
+ Key.sentinel_key,
132
+ Key.tombstone,
133
+ Key.tombstone_from_key,
134
+ );
135
+
136
+ return TreeType(Table, Storage, tree_name);
137
+ }
138
+
139
+ /// A Groove is a collection of LSM trees auto generated for fields on a struct type
140
+ /// as well as custom derived fields from said struct type.
141
+ ///
142
+ /// Invariants:
143
+ /// - Between beats, all of a groove's trees share the same lookup_snapshot_max.
144
+ pub fn GrooveType(
145
+ comptime Storage: type,
146
+ comptime Object: type,
147
+ /// An anonymous struct instance which contains the following:
148
+ ///
149
+ /// - ignored: [][]const u8:
150
+ /// An array of fields on the Object type that should not be given index trees
151
+ ///
152
+ /// - derived: { .field = fn (*const Object) ?DerivedType }:
153
+ /// An anonymous struct which contain fields that don't exist on the Object
154
+ /// but can be derived from an Object instance using the field's corresponding function.
155
+ comptime groove_options: anytype,
156
+ ) type {
157
+ @setEvalBranchQuota(64000);
158
+
159
+ assert(@hasField(Object, "id"));
160
+ assert(std.meta.fieldInfo(Object, .id).field_type == u128);
161
+ assert(@hasField(Object, "timestamp"));
162
+ assert(std.meta.fieldInfo(Object, .timestamp).field_type == u64);
163
+
164
+ comptime var index_fields: []const std.builtin.TypeInfo.StructField = &.{};
165
+
166
+ // Generate index LSM trees from the struct fields.
167
+ for (std.meta.fields(Object)) |field| {
168
+ // See if we should ignore this field from the options.
169
+ //
170
+ // By default, we ignore the "timestamp" field since it's a special identifier.
171
+ // Since the "timestamp" is ignored by default, it shouldn't be provided in groove_options.ignored.
172
+ comptime var ignored = mem.eql(u8, field.name, "timestamp") or mem.eql(u8, field.name, "id");
173
+ for (groove_options.ignored) |ignored_field_name| {
174
+ comptime assert(!std.mem.eql(u8, ignored_field_name, "timestamp"));
175
+ comptime assert(!std.mem.eql(u8, ignored_field_name, "id"));
176
+ ignored = ignored or std.mem.eql(u8, field.name, ignored_field_name);
177
+ }
178
+
179
+ if (!ignored) {
180
+ const tree_name = @typeName(Object) ++ "." ++ field.name;
181
+ const IndexTree = IndexTreeType(Storage, field.field_type, tree_name);
182
+ index_fields = index_fields ++ [_]std.builtin.TypeInfo.StructField{
183
+ .{
184
+ .name = field.name,
185
+ .field_type = IndexTree,
186
+ .default_value = null,
187
+ .is_comptime = false,
188
+ .alignment = @alignOf(IndexTree),
189
+ },
190
+ };
191
+ }
192
+ }
193
+
194
+ // Generate IndexTrees for fields derived from the Value in groove_options.
195
+ const derived_fields = std.meta.fields(@TypeOf(groove_options.derived));
196
+ for (derived_fields) |field| {
197
+ // Get the function info for the derived field.
198
+ const derive_func = @field(groove_options.derived, field.name);
199
+ const derive_func_info = @typeInfo(@TypeOf(derive_func)).Fn;
200
+
201
+ // Make sure it has only one argument.
202
+ if (derive_func_info.args.len != 1) {
203
+ @compileError("expected derive fn to take in *const " ++ @typeName(Object));
204
+ }
205
+
206
+ // Make sure the function takes in a reference to the Value:
207
+ const derive_arg = derive_func_info.args[0];
208
+ if (derive_arg.is_generic) @compileError("expected derive fn arg to not be generic");
209
+ if (derive_arg.arg_type != *const Object) {
210
+ @compileError("expected derive fn to take in *const " ++ @typeName(Object));
211
+ }
212
+
213
+ // Get the return value from the derived field as the DerivedType.
214
+ const derive_return_type = derive_func_info.return_type orelse {
215
+ @compileError("expected derive fn to return valid tree index type");
216
+ };
217
+
218
+ // Create an IndexTree for the DerivedType:
219
+ const tree_name = @typeName(Object) ++ "." ++ field.name;
220
+ const DerivedType = @typeInfo(derive_return_type).Optional.child;
221
+ const IndexTree = IndexTreeType(Storage, DerivedType, tree_name);
222
+
223
+ index_fields = index_fields ++ &.{
224
+ .{
225
+ .name = field.name,
226
+ .field_type = IndexTree,
227
+ .default_value = null,
228
+ .is_comptime = false,
229
+ .alignment = @alignOf(IndexTree),
230
+ },
231
+ };
232
+ }
233
+
234
+ comptime var index_options_fields: []const std.builtin.TypeInfo.StructField = &.{};
235
+ for (index_fields) |index_field| {
236
+ const IndexTree = index_field.field_type;
237
+ index_options_fields = index_options_fields ++ [_]std.builtin.TypeInfo.StructField{
238
+ .{
239
+ .name = index_field.name,
240
+ .field_type = IndexTree.Options,
241
+ .default_value = null,
242
+ .is_comptime = false,
243
+ .alignment = @alignOf(IndexTree.Options),
244
+ },
245
+ };
246
+ }
247
+
248
+ const ObjectTree = blk: {
249
+ const Table = TableType(
250
+ u64, // key = timestamp
251
+ Object,
252
+ ObjectTreeHelpers(Object).compare_keys,
253
+ ObjectTreeHelpers(Object).key_from_value,
254
+ ObjectTreeHelpers(Object).sentinel_key,
255
+ ObjectTreeHelpers(Object).tombstone,
256
+ ObjectTreeHelpers(Object).tombstone_from_key,
257
+ );
258
+
259
+ const tree_name = @typeName(Object);
260
+ break :blk TreeType(Table, Storage, tree_name);
261
+ };
262
+
263
+ const IdTree = blk: {
264
+ const Table = TableType(
265
+ u128,
266
+ IdTreeValue,
267
+ IdTreeValue.compare_keys,
268
+ IdTreeValue.key_from_value,
269
+ IdTreeValue.sentinel_key,
270
+ IdTreeValue.tombstone,
271
+ IdTreeValue.tombstone_from_key,
272
+ );
273
+
274
+ const tree_name = @typeName(Object) ++ ".id";
275
+ break :blk TreeType(Table, Storage, tree_name);
276
+ };
277
+
278
+ const IndexTrees = @Type(.{
279
+ .Struct = .{
280
+ .layout = .Auto,
281
+ .fields = index_fields,
282
+ .decls = &.{},
283
+ .is_tuple = false,
284
+ },
285
+ });
286
+ const IndexTreeOptions = @Type(.{
287
+ .Struct = .{
288
+ .layout = .Auto,
289
+ .fields = index_options_fields,
290
+ .decls = &.{},
291
+ .is_tuple = false,
292
+ },
293
+ });
294
+
295
+ // Verify no hash collisions between all the trees:
296
+ comptime var hashes: []const u128 = &.{ObjectTree.hash};
297
+
298
+ inline for (std.meta.fields(IndexTrees)) |field| {
299
+ const IndexTree = @TypeOf(@field(@as(IndexTrees, undefined), field.name));
300
+ const hash: []const u128 = &.{IndexTree.hash};
301
+
302
+ assert(std.mem.indexOf(u128, hashes, hash) == null);
303
+ hashes = hashes ++ hash;
304
+ }
305
+
306
+ // Verify groove index count:
307
+ const indexes_count_actual = std.meta.fields(IndexTrees).len;
308
+ const indexes_count_expect = std.meta.fields(Object).len -
309
+ groove_options.ignored.len -
310
+ // The id/timestamp field is implicitly ignored since it's the primary key for ObjectTree:
311
+ 2 +
312
+ std.meta.fields(@TypeOf(groove_options.derived)).len;
313
+
314
+ assert(indexes_count_actual == indexes_count_expect);
315
+ assert(indexes_count_actual == std.meta.fields(IndexTreeOptions).len);
316
+
317
+ // Generate a helper function for interacting with an Index field type.
318
+ const IndexTreeFieldHelperType = struct {
319
+ /// Returns true if the field is a derived field.
320
+ fn is_derived(comptime field_name: []const u8) bool {
321
+ comptime var derived = false;
322
+ inline for (derived_fields) |derived_field| {
323
+ derived = derived or std.mem.eql(u8, derived_field.name, field_name);
324
+ }
325
+ return derived;
326
+ }
327
+
328
+ /// Gets the index type from the index name (even if the index is derived).
329
+ fn IndexType(comptime field_name: []const u8) type {
330
+ if (!is_derived(field_name)) {
331
+ return @TypeOf(@field(@as(Object, undefined), field_name));
332
+ }
333
+
334
+ const derived_fn = @TypeOf(@field(groove_options.derived, field_name));
335
+ return @typeInfo(derived_fn).Fn.return_type.?.Optional.child;
336
+ }
337
+
338
+ fn HelperType(comptime field_name: []const u8) type {
339
+ return struct {
340
+ const Index = IndexType(field_name);
341
+
342
+ /// Try to extract an index from the object, deriving it when necessary.
343
+ pub fn derive_index(object: *const Object) ?Index {
344
+ if (comptime is_derived(field_name)) {
345
+ return @field(groove_options.derived, field_name)(object);
346
+ } else {
347
+ return @field(object, field_name);
348
+ }
349
+ }
350
+
351
+ /// Create a Value from the index that can be used in the IndexTree.
352
+ pub fn derive_value(
353
+ object: *const Object,
354
+ index: Index,
355
+ ) CompositeKey(IndexCompositeKeyType(Index)).Value {
356
+ return .{
357
+ .timestamp = object.timestamp,
358
+ .field = switch (@typeInfo(Index)) {
359
+ .Int => index,
360
+ .Enum => @enumToInt(index),
361
+ else => @compileError("Unsupported index type for " ++ field_name),
362
+ },
363
+ };
364
+ }
365
+ };
366
+ }
367
+ }.HelperType;
368
+
369
+ return struct {
370
+ const Groove = @This();
371
+
372
+ const Grid = GridType(Storage);
373
+
374
+ const Callback = fn (*Groove) void;
375
+ const JoinOp = enum {
376
+ compacting,
377
+ checkpoint,
378
+ open,
379
+ };
380
+
381
+ const PrefetchIDs = std.AutoHashMapUnmanaged(u128, void);
382
+
383
+ const PrefetchObjectsContext = struct {
384
+ pub fn hash(_: PrefetchObjectsContext, object: Object) u64 {
385
+ return std.hash.Wyhash.hash(0, mem.asBytes(&object.id));
386
+ }
387
+
388
+ pub fn eql(_: PrefetchObjectsContext, a: Object, b: Object) bool {
389
+ return a.id == b.id;
390
+ }
391
+ };
392
+ const PrefetchObjectsAdapter = struct {
393
+ pub fn hash(_: PrefetchObjectsAdapter, id: u128) u64 {
394
+ return std.hash.Wyhash.hash(0, mem.asBytes(&id));
395
+ }
396
+
397
+ pub fn eql(_: PrefetchObjectsAdapter, a_id: u128, b_object: Object) bool {
398
+ return a_id == b_object.id;
399
+ }
400
+ };
401
+ const PrefetchObjects = std.HashMapUnmanaged(Object, void, PrefetchObjectsContext, 70);
402
+
403
+ join_op: ?JoinOp = null,
404
+ join_pending: usize = 0,
405
+ join_callback: ?Callback = null,
406
+
407
+ objects_cache: *ObjectTree.TableMutable.ValuesCache,
408
+ objects: ObjectTree,
409
+
410
+ ids_cache: *IdTree.TableMutable.ValuesCache,
411
+ ids: IdTree,
412
+
413
+ indexes: IndexTrees,
414
+
415
+ /// Object IDs enqueued to be prefetched.
416
+ /// Prefetching ensures that point lookups against the latest snapshot are synchronous.
417
+ /// This shields state machine implementations from the challenges of concurrency and I/O,
418
+ /// and enables simple state machine function signatures that commit writes atomically.
419
+ prefetch_ids: PrefetchIDs,
420
+
421
+ /// The prefetched Objects. This hash map holds the subset of objects in the LSM trees
422
+ /// that are required for the current commit. All get()/put()/remove() operations during
423
+ /// the commit are both passed to the LSM trees and mirrored in this hash map. It is always
424
+ /// sufficient to query this hashmap alone to know the state of the LSM trees.
425
+ prefetch_objects: PrefetchObjects,
426
+
427
+ /// The snapshot to prefetch from.
428
+ prefetch_snapshot: ?u64,
429
+
430
+ pub const Options = struct {
431
+ /// TODO Improve unit in this name to make more clear what should be passed.
432
+ /// For example, is this a size in bytes or a count in objects? It's a count in objects,
433
+ /// but the name poorly reflects this.
434
+ cache_entries_max: u32,
435
+ /// The maximum number of objects that might be prefetched by a batch.
436
+ prefetch_entries_max: u32,
437
+
438
+ tree_options_object: ObjectTree.Options,
439
+ tree_options_id: IdTree.Options,
440
+ tree_options_index: IndexTreeOptions,
441
+ };
442
+
443
+ pub fn init(
444
+ allocator: mem.Allocator,
445
+ node_pool: *NodePool,
446
+ grid: *Grid,
447
+ options: Options,
448
+ ) !Groove {
449
+ // Cache is heap-allocated to pass a pointer into the Object tree.
450
+ const objects_cache = try allocator.create(ObjectTree.TableMutable.ValuesCache);
451
+ errdefer allocator.destroy(objects_cache);
452
+
453
+ objects_cache.* = try ObjectTree.TableMutable.ValuesCache.init(
454
+ allocator,
455
+ options.cache_entries_max,
456
+ );
457
+ errdefer objects_cache.deinit(allocator);
458
+
459
+ // Intialize the object LSM tree.
460
+ var object_tree = try ObjectTree.init(
461
+ allocator,
462
+ node_pool,
463
+ grid,
464
+ objects_cache,
465
+ options.tree_options_object,
466
+ );
467
+ errdefer object_tree.deinit(allocator);
468
+
469
+ // Cache is heap-allocated to pass a pointer into the ID tree.
470
+ const ids_cache = try allocator.create(IdTree.TableMutable.ValuesCache);
471
+ errdefer allocator.destroy(ids_cache);
472
+
473
+ ids_cache.* = try IdTree.TableMutable.ValuesCache.init(allocator, options.cache_entries_max);
474
+ errdefer ids_cache.deinit(allocator);
475
+
476
+ var id_tree = try IdTree.init(
477
+ allocator,
478
+ node_pool,
479
+ grid,
480
+ ids_cache,
481
+ options.tree_options_id,
482
+ );
483
+ errdefer id_tree.deinit(allocator);
484
+
485
+ var index_trees_initialized: usize = 0;
486
+ var index_trees: IndexTrees = undefined;
487
+
488
+ // Make sure to deinit initialized index LSM trees on error.
489
+ errdefer inline for (std.meta.fields(IndexTrees)) |field, field_index| {
490
+ if (index_trees_initialized >= field_index + 1) {
491
+ @field(index_trees, field.name).deinit(allocator);
492
+ }
493
+ };
494
+
495
+ // Initialize index LSM trees.
496
+ inline for (std.meta.fields(IndexTrees)) |field| {
497
+ @field(index_trees, field.name) = try field.field_type.init(
498
+ allocator,
499
+ node_pool,
500
+ grid,
501
+ null, // No value cache for index trees, since they only do range queries.
502
+ @field(options.tree_options_index, field.name),
503
+ );
504
+ index_trees_initialized += 1;
505
+ }
506
+
507
+ var prefetch_ids = PrefetchIDs{};
508
+ try prefetch_ids.ensureTotalCapacity(allocator, options.prefetch_entries_max);
509
+ errdefer prefetch_ids.deinit(allocator);
510
+
511
+ var prefetch_objects = PrefetchObjects{};
512
+ try prefetch_objects.ensureTotalCapacity(allocator, options.prefetch_entries_max);
513
+ errdefer prefetch_objects.deinit(allocator);
514
+
515
+ return Groove{
516
+ .objects_cache = objects_cache,
517
+ .objects = object_tree,
518
+
519
+ .ids_cache = ids_cache,
520
+ .ids = id_tree,
521
+
522
+ .indexes = index_trees,
523
+
524
+ .prefetch_ids = prefetch_ids,
525
+ .prefetch_objects = prefetch_objects,
526
+ .prefetch_snapshot = null,
527
+ };
528
+ }
529
+
530
+ pub fn deinit(groove: *Groove, allocator: mem.Allocator) void {
531
+ inline for (std.meta.fields(IndexTrees)) |field| {
532
+ @field(groove.indexes, field.name).deinit(allocator);
533
+ }
534
+
535
+ groove.objects.deinit(allocator);
536
+ groove.objects_cache.deinit(allocator);
537
+ allocator.destroy(groove.objects_cache);
538
+
539
+ groove.ids.deinit(allocator);
540
+ groove.ids_cache.deinit(allocator);
541
+ allocator.destroy(groove.ids_cache);
542
+
543
+ groove.prefetch_ids.deinit(allocator);
544
+ groove.prefetch_objects.deinit(allocator);
545
+
546
+ groove.* = undefined;
547
+ }
548
+
549
+ pub fn get(groove: *const Groove, id: u128) ?*const Object {
550
+ return groove.prefetch_objects.getKeyPtrAdapted(id, PrefetchObjectsAdapter{});
551
+ }
552
+
553
+ /// Must be called directly before the state machine begins queuing ids for prefetch.
554
+ /// When `snapshot` is null, prefetch from the current snapshot.
555
+ pub fn prefetch_setup(groove: *Groove, snapshot: ?u64) void {
556
+ // We may query the input tables of an ongoing compaction, but must not query the
557
+ // output tables until the compaction is complete. (Until then, the output tables may
558
+ // be in the manifest but not yet on disk).
559
+ const snapshot_max = groove.objects.lookup_snapshot_max;
560
+ assert(snapshot_max == groove.ids.lookup_snapshot_max);
561
+
562
+ const snapshot_target = snapshot orelse snapshot_max;
563
+ assert(snapshot_target <= snapshot_max);
564
+
565
+ if (groove.prefetch_snapshot == null) {
566
+ groove.prefetch_objects.clearRetainingCapacity();
567
+ } else {
568
+ // If there is a snapshot already set from the previous prefetch_setup(), then its
569
+ // prefetch() was never called, so there must already be no queued objects or ids.
570
+ }
571
+
572
+ groove.prefetch_snapshot = snapshot_target;
573
+ assert(groove.prefetch_objects.count() == 0);
574
+ assert(groove.prefetch_ids.count() == 0);
575
+ }
576
+
577
+ /// This must be called by the state machine for every key to be prefetched.
578
+ /// We tolerate duplicate IDs enqueued by the state machine.
579
+ /// For example, if all unique operations require the same two dependencies.
580
+ pub fn prefetch_enqueue(groove: *Groove, id: u128) void {
581
+ if (groove.ids.lookup_from_memory(groove.prefetch_snapshot.?, id)) |id_tree_value| {
582
+ if (id_tree_value.tombstone()) {
583
+ // Do nothing; an explicit ID tombstone indicates that the object was deleted.
584
+ } else {
585
+ if (groove.objects.lookup_from_memory(
586
+ groove.prefetch_snapshot.?,
587
+ id_tree_value.timestamp,
588
+ )) |object| {
589
+ assert(!ObjectTreeHelpers(Object).tombstone(object));
590
+ groove.prefetch_objects.putAssumeCapacity(object.*, {});
591
+ } else {
592
+ // The id was in the IdTree's value cache, but not in the ObjectTree's
593
+ // value cache.
594
+ groove.prefetch_ids.putAssumeCapacity(id, {});
595
+ }
596
+ }
597
+ } else {
598
+ groove.prefetch_ids.putAssumeCapacity(id, {});
599
+ }
600
+ }
601
+
602
+ /// Ensure the objects corresponding to all ids enqueued with prefetch_enqueue() are
603
+ /// available in `prefetch_objects`.
604
+ pub fn prefetch(
605
+ groove: *Groove,
606
+ callback: fn (*PrefetchContext) void,
607
+ context: *PrefetchContext,
608
+ ) void {
609
+ context.* = .{
610
+ .groove = groove,
611
+ .callback = callback,
612
+ .snapshot = groove.prefetch_snapshot.?,
613
+ .id_iterator = groove.prefetch_ids.keyIterator(),
614
+ };
615
+ groove.prefetch_snapshot = null;
616
+ context.start_workers();
617
+ }
618
+
619
+ pub const PrefetchContext = struct {
620
+ groove: *Groove,
621
+ callback: fn (*PrefetchContext) void,
622
+ snapshot: u64,
623
+
624
+ id_iterator: PrefetchIDs.KeyIterator,
625
+
626
+ /// The goal is to fully utilize the disk I/O to ensure the prefetch completes as
627
+ /// quickly as possible, so we run multiple lookups in parallel based on the max
628
+ /// I/O depth of the Grid.
629
+ workers: [Grid.read_iops_max]PrefetchWorker = undefined,
630
+ /// The number of workers that are currently running in parallel.
631
+ workers_busy: u32 = 0,
632
+
633
+ fn start_workers(context: *PrefetchContext) void {
634
+ assert(context.workers_busy == 0);
635
+
636
+ // Track an extra "worker" that will finish after the loop.
637
+ //
638
+ // This prevents `context.finish()` from being called within the loop body when
639
+ // every worker finishes synchronously. `context.finish()` calls the user-provided
640
+ // callback which may re-use the memory of this `PrefetchContext`. However, we
641
+ // rely on `context` being well-defined for the loop condition.
642
+ context.workers_busy += 1;
643
+
644
+ for (context.workers) |*worker| {
645
+ worker.* = .{ .context = context };
646
+ context.workers_busy += 1;
647
+ worker.lookup_start_next();
648
+ }
649
+
650
+ assert(context.workers_busy >= 1);
651
+ context.worker_finished();
652
+ }
653
+
654
+ fn worker_finished(context: *PrefetchContext) void {
655
+ context.workers_busy -= 1;
656
+ if (context.workers_busy == 0) context.finish();
657
+ }
658
+
659
+ fn finish(context: *PrefetchContext) void {
660
+ assert(context.workers_busy == 0);
661
+
662
+ assert(context.id_iterator.next() == null);
663
+ context.groove.prefetch_ids.clearRetainingCapacity();
664
+ assert(context.groove.prefetch_ids.count() == 0);
665
+
666
+ context.callback(context);
667
+ }
668
+ };
669
+
670
+ pub const PrefetchWorker = struct {
671
+ context: *PrefetchContext,
672
+ // TODO(ifreund): use a union for these to save memory, likely an extern union
673
+ // so that we can safetly @ptrCast() until @fieldParentPtr() is implemented
674
+ // for unions. See: https://github.com/ziglang/zig/issues/6611
675
+ lookup_id: IdTree.LookupContext = undefined,
676
+ lookup_object: ObjectTree.LookupContext = undefined,
677
+
678
+ fn lookup_start_next(worker: *PrefetchWorker) void {
679
+ const id = worker.context.id_iterator.next() orelse {
680
+ worker.context.worker_finished();
681
+ return;
682
+ };
683
+
684
+ if (worker.context.groove.ids.lookup_from_memory(
685
+ worker.context.snapshot,
686
+ id.*,
687
+ )) |id_tree_value| {
688
+ assert(!id_tree_value.tombstone());
689
+ lookup_id_callback(&worker.lookup_id, id_tree_value);
690
+
691
+ if (config.verify) {
692
+ // If the id is cached, then we must be prefetching it because the object
693
+ // was not also cached.
694
+ assert(worker.context.groove.objects.lookup_from_memory(
695
+ worker.context.snapshot,
696
+ id_tree_value.timestamp,
697
+ ) == null);
698
+ }
699
+ } else {
700
+ // If not in the LSM tree's cache, the object must be read from disk and added
701
+ // to the auxiliary prefetch_objects hash map.
702
+ worker.context.groove.ids.lookup_from_levels(
703
+ lookup_id_callback,
704
+ &worker.lookup_id,
705
+ worker.context.snapshot,
706
+ id.*,
707
+ );
708
+ }
709
+ }
710
+
711
+ fn lookup_id_callback(
712
+ completion: *IdTree.LookupContext,
713
+ result: ?*const IdTreeValue,
714
+ ) void {
715
+ const worker = @fieldParentPtr(PrefetchWorker, "lookup_id", completion);
716
+
717
+ if (result) |id_tree_value| {
718
+ if (config.verify) {
719
+ // This was checked in prefetch_enqueue().
720
+ assert(
721
+ worker.context.groove.ids.lookup_from_memory(
722
+ worker.context.snapshot,
723
+ worker.lookup_id.key,
724
+ ) == null or
725
+ worker.context.groove.objects.lookup_from_memory(
726
+ worker.context.snapshot,
727
+ id_tree_value.timestamp,
728
+ ) == null,
729
+ );
730
+ }
731
+
732
+ if (id_tree_value.tombstone()) {
733
+ worker.lookup_start_next();
734
+ return;
735
+ }
736
+
737
+ if (worker.context.groove.objects.lookup_from_memory(
738
+ worker.context.snapshot,
739
+ id_tree_value.timestamp,
740
+ )) |object| {
741
+ // The object is not a tombstone; the ID and Object trees are in sync.
742
+ assert(!ObjectTreeHelpers(Object).tombstone(object));
743
+
744
+ worker.context.groove.prefetch_objects.putAssumeCapacityNoClobber(object.*, {});
745
+ worker.lookup_start_next();
746
+ return;
747
+ }
748
+
749
+ worker.context.groove.objects.lookup_from_levels(
750
+ lookup_object_callback,
751
+ &worker.lookup_object,
752
+ worker.context.snapshot,
753
+ id_tree_value.timestamp,
754
+ );
755
+ } else {
756
+ worker.lookup_start_next();
757
+ }
758
+ }
759
+
760
+ fn lookup_object_callback(
761
+ completion: *ObjectTree.LookupContext,
762
+ result: ?*const Object,
763
+ ) void {
764
+ const worker = @fieldParentPtr(PrefetchWorker, "lookup_object", completion);
765
+
766
+ // The result must be non-null as we keep the ID and Object trees in sync.
767
+ const object = result.?;
768
+ assert(!ObjectTreeHelpers(Object).tombstone(object));
769
+
770
+ worker.context.groove.prefetch_objects.putAssumeCapacityNoClobber(object.*, {});
771
+ worker.lookup_start_next();
772
+ }
773
+ };
774
+
775
+ pub fn put_no_clobber(groove: *Groove, object: *const Object) void {
776
+ const gop = groove.prefetch_objects.getOrPutAssumeCapacityAdapted(object.id, PrefetchObjectsAdapter{});
777
+ assert(!gop.found_existing);
778
+ groove.insert(object);
779
+ gop.key_ptr.* = object.*;
780
+ }
781
+
782
+ pub fn put(groove: *Groove, object: *const Object) void {
783
+ const gop = groove.prefetch_objects.getOrPutAssumeCapacityAdapted(object.id, PrefetchObjectsAdapter{});
784
+ if (gop.found_existing) {
785
+ groove.update(gop.key_ptr, object);
786
+ } else {
787
+ groove.insert(object);
788
+ }
789
+ gop.key_ptr.* = object.*;
790
+ }
791
+
792
+ /// Insert the value into the objects tree and its fields into the index trees.
793
+ fn insert(groove: *Groove, object: *const Object) void {
794
+ groove.objects.put(object);
795
+ groove.ids.put(&IdTreeValue{ .id = object.id, .timestamp = object.timestamp });
796
+
797
+ inline for (std.meta.fields(IndexTrees)) |field| {
798
+ const Helper = IndexTreeFieldHelperType(field.name);
799
+
800
+ if (Helper.derive_index(object)) |index| {
801
+ const index_value = Helper.derive_value(object, index);
802
+ @field(groove.indexes, field.name).put(&index_value);
803
+ }
804
+ }
805
+ }
806
+
807
+ /// Update the object and index trees by diff'ing the old and new values.
808
+ fn update(groove: *Groove, old: *const Object, new: *const Object) void {
809
+ assert(old.id == new.id);
810
+ assert(old.timestamp == new.timestamp);
811
+
812
+ // Update the object tree entry if any of the fields (even ignored) are different.
813
+ if (!std.mem.eql(u8, std.mem.asBytes(old), std.mem.asBytes(new))) {
814
+ // Unlike the index trees, the new and old values in the object tree share the
815
+ // same key. Therefore put() is sufficient to overwrite the old value.
816
+ groove.objects.put(new);
817
+ }
818
+
819
+ inline for (std.meta.fields(IndexTrees)) |field| {
820
+ const Helper = IndexTreeFieldHelperType(field.name);
821
+ const old_index = Helper.derive_index(old);
822
+ const new_index = Helper.derive_index(new);
823
+
824
+ // Only update the indexes that change.
825
+ if (!std.meta.eql(old_index, new_index)) {
826
+ if (old_index) |index| {
827
+ const old_index_value = Helper.derive_value(old, index);
828
+ @field(groove.indexes, field.name).remove(&old_index_value);
829
+ }
830
+
831
+ if (new_index) |index| {
832
+ const new_index_value = Helper.derive_value(new, index);
833
+ @field(groove.indexes, field.name).put(&new_index_value);
834
+ }
835
+ }
836
+ }
837
+ }
838
+
839
+ /// Asserts that the object with the given ID exists.
840
+ pub fn remove(groove: *Groove, id: u128) void {
841
+ const object = groove.prefetch_objects.getKeyPtrAdapted(id, PrefetchObjectsAdapter{}).?;
842
+
843
+ groove.objects.remove(object);
844
+ groove.ids.remove(&IdTreeValue{ .id = object.id, .timestamp = object.timestamp });
845
+
846
+ inline for (std.meta.fields(IndexTrees)) |field| {
847
+ const Helper = IndexTreeFieldHelperType(field.name);
848
+
849
+ if (Helper.derive_index(object)) |index| {
850
+ const index_value = Helper.derive_value(object, index);
851
+ @field(groove.indexes, field.name).remove(&index_value);
852
+ }
853
+ }
854
+
855
+ // TODO(zig) Replace this with a call to removeByPtr() after upgrading to 0.10.
856
+ // removeByPtr() replaces an unnecessary lookup here with some pointer arithmetic.
857
+ assert(groove.prefetch_objects.removeAdapted(object.id, PrefetchObjectsAdapter{}));
858
+ }
859
+
860
+ /// Maximum number of pending sync callbacks (ObjectTree + IdTree + IndexTrees).
861
+ const join_pending_max = 2 + std.meta.fields(IndexTrees).len;
862
+
863
+ fn JoinType(comptime join_op: JoinOp) type {
864
+ return struct {
865
+ pub fn start(groove: *Groove, join_callback: Callback) void {
866
+ // Make sure no sync op is currently running.
867
+ assert(groove.join_op == null);
868
+ assert(groove.join_pending == 0);
869
+ assert(groove.join_callback == null);
870
+
871
+ // Start the sync operations
872
+ groove.join_op = join_op;
873
+ groove.join_callback = join_callback;
874
+ groove.join_pending = join_pending_max;
875
+ }
876
+
877
+ const JoinField = union(enum) {
878
+ ids,
879
+ objects,
880
+ index: []const u8,
881
+ };
882
+
883
+ /// Returns LSM tree type for the given index field name (or ObjectTree if null).
884
+ fn TreeFor(comptime join_field: JoinField) type {
885
+ return switch (join_field) {
886
+ .ids => IdTree,
887
+ .objects => ObjectTree,
888
+ .index => |field| @TypeOf(@field(@as(IndexTrees, undefined), field)),
889
+ };
890
+ }
891
+
892
+ pub fn tree_callback(
893
+ comptime join_field: JoinField,
894
+ ) fn (*TreeFor(join_field)) void {
895
+ return struct {
896
+ fn tree_cb(tree: *TreeFor(join_field)) void {
897
+ // Derive the groove pointer from the tree using the join_field.
898
+ const groove = switch (join_field) {
899
+ .ids => @fieldParentPtr(Groove, "ids", tree),
900
+ .objects => @fieldParentPtr(Groove, "objects", tree),
901
+ .index => |field| blk: {
902
+ const indexes = @fieldParentPtr(IndexTrees, field, tree);
903
+ break :blk @fieldParentPtr(Groove, "indexes", indexes);
904
+ },
905
+ };
906
+
907
+ // Make sure the sync operation is currently running.
908
+ assert(groove.join_op == join_op);
909
+ assert(groove.join_callback != null);
910
+ assert(groove.join_pending <= join_pending_max);
911
+
912
+ // Guard until all pending sync ops complete.
913
+ groove.join_pending -= 1;
914
+ if (groove.join_pending > 0) return;
915
+
916
+ const callback = groove.join_callback.?;
917
+ groove.join_op = null;
918
+ groove.join_callback = null;
919
+ callback(groove);
920
+ }
921
+ }.tree_cb;
922
+ }
923
+ };
924
+ }
925
+
926
+ pub fn open(groove: *Groove, callback: fn (*Groove) void) void {
927
+ const Join = JoinType(.open);
928
+ Join.start(groove, callback);
929
+
930
+ groove.ids.open(Join.tree_callback(.ids));
931
+ groove.objects.open(Join.tree_callback(.objects));
932
+
933
+ inline for (std.meta.fields(IndexTrees)) |field| {
934
+ const open_callback = Join.tree_callback(.{ .index = field.name });
935
+ @field(groove.indexes, field.name).open(open_callback);
936
+ }
937
+ }
938
+
939
+ pub fn compact(groove: *Groove, callback: Callback, op: u64) void {
940
+ // Start a compacting join operation.
941
+ const Join = JoinType(.compacting);
942
+ Join.start(groove, callback);
943
+
944
+ // Compact the ObjectTree and IdTree
945
+ groove.ids.compact(Join.tree_callback(.ids), op);
946
+ groove.objects.compact(Join.tree_callback(.objects), op);
947
+
948
+ // Compact the IndexTrees.
949
+ inline for (std.meta.fields(IndexTrees)) |field| {
950
+ const compact_callback = Join.tree_callback(.{ .index = field.name });
951
+ @field(groove.indexes, field.name).compact(compact_callback, op);
952
+ }
953
+ }
954
+
955
+ pub fn checkpoint(groove: *Groove, callback: fn (*Groove) void) void {
956
+ // Start a checkpoint join operation.
957
+ const Join = JoinType(.checkpoint);
958
+ Join.start(groove, callback);
959
+
960
+ // Checkpoint the IdTree and ObjectTree.
961
+ groove.ids.checkpoint(Join.tree_callback(.ids));
962
+ groove.objects.checkpoint(Join.tree_callback(.objects));
963
+
964
+ // Checkpoint the IndexTrees.
965
+ inline for (std.meta.fields(IndexTrees)) |field| {
966
+ const checkpoint_callback = Join.tree_callback(.{ .index = field.name });
967
+ @field(groove.indexes, field.name).checkpoint(checkpoint_callback);
968
+ }
969
+ }
970
+ };
971
+ }
972
+
973
+ test "Groove" {
974
+ const Transfer = @import("../tigerbeetle.zig").Transfer;
975
+ const Storage = @import("../storage.zig").Storage;
976
+
977
+ const Groove = GrooveType(
978
+ Storage,
979
+ Transfer,
980
+ .{
981
+ .ignored = [_][]const u8{ "reserved", "user_data", "flags" },
982
+ .derived = .{},
983
+ },
984
+ );
985
+
986
+ _ = Groove.init;
987
+ _ = Groove.deinit;
988
+
989
+ _ = Groove.get;
990
+ _ = Groove.put;
991
+ _ = Groove.remove;
992
+
993
+ _ = Groove.compact;
994
+ _ = Groove.checkpoint;
995
+
996
+ _ = Groove.prefetch_enqueue;
997
+ _ = Groove.prefetch;
998
+ _ = Groove.prefetch_setup;
999
+
1000
+ std.testing.refAllDecls(Groove.PrefetchWorker);
1001
+ std.testing.refAllDecls(Groove.PrefetchContext);
1002
+ }