tigerbeetle-node 0.9.0 → 0.9.143

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/README.md +580 -179
  2. package/dist/benchmark.js +44 -36
  3. package/dist/benchmark.js.map +1 -1
  4. package/dist/bin/aarch64-linux-gnu/client.node +0 -0
  5. package/dist/bin/aarch64-linux-musl/client.node +0 -0
  6. package/dist/bin/aarch64-macos/client.node +0 -0
  7. package/dist/bin/x86_64-linux-gnu/client.node +0 -0
  8. package/dist/bin/x86_64-linux-musl/client.node +0 -0
  9. package/dist/bin/x86_64-macos/client.node +0 -0
  10. package/dist/bin/x86_64-windows/client.node +0 -0
  11. package/dist/bindings.d.ts +141 -0
  12. package/dist/bindings.js +112 -0
  13. package/dist/bindings.js.map +1 -0
  14. package/dist/index.d.ts +2 -125
  15. package/dist/index.js +51 -101
  16. package/dist/index.js.map +1 -1
  17. package/dist/test.js +68 -54
  18. package/dist/test.js.map +1 -1
  19. package/package-lock.json +26 -0
  20. package/package.json +13 -22
  21. package/src/benchmark.ts +58 -49
  22. package/src/bindings.ts +631 -0
  23. package/src/index.ts +71 -163
  24. package/src/node.zig +169 -148
  25. package/src/test.ts +71 -57
  26. package/src/translate.zig +19 -36
  27. package/scripts/download_node_headers.sh +0 -25
  28. package/src/tigerbeetle/scripts/benchmark.bat +0 -46
  29. package/src/tigerbeetle/scripts/benchmark.sh +0 -55
  30. package/src/tigerbeetle/scripts/install.sh +0 -6
  31. package/src/tigerbeetle/scripts/install_zig.bat +0 -109
  32. package/src/tigerbeetle/scripts/install_zig.sh +0 -84
  33. package/src/tigerbeetle/scripts/lint.zig +0 -199
  34. package/src/tigerbeetle/scripts/upgrade_ubuntu_kernel.sh +0 -39
  35. package/src/tigerbeetle/scripts/vopr.bat +0 -48
  36. package/src/tigerbeetle/scripts/vopr.sh +0 -33
  37. package/src/tigerbeetle/scripts/vr_state_enumerate +0 -46
  38. package/src/tigerbeetle/src/benchmark.zig +0 -290
  39. package/src/tigerbeetle/src/cli.zig +0 -244
  40. package/src/tigerbeetle/src/config.zig +0 -239
  41. package/src/tigerbeetle/src/demo.zig +0 -125
  42. package/src/tigerbeetle/src/demo_01_create_accounts.zig +0 -35
  43. package/src/tigerbeetle/src/demo_02_lookup_accounts.zig +0 -7
  44. package/src/tigerbeetle/src/demo_03_create_transfers.zig +0 -24
  45. package/src/tigerbeetle/src/demo_04_create_pending_transfers.zig +0 -61
  46. package/src/tigerbeetle/src/demo_05_post_pending_transfers.zig +0 -37
  47. package/src/tigerbeetle/src/demo_06_void_pending_transfers.zig +0 -24
  48. package/src/tigerbeetle/src/demo_07_lookup_transfers.zig +0 -7
  49. package/src/tigerbeetle/src/fifo.zig +0 -104
  50. package/src/tigerbeetle/src/io/benchmark.zig +0 -213
  51. package/src/tigerbeetle/src/io/darwin.zig +0 -793
  52. package/src/tigerbeetle/src/io/linux.zig +0 -1038
  53. package/src/tigerbeetle/src/io/test.zig +0 -643
  54. package/src/tigerbeetle/src/io/windows.zig +0 -1161
  55. package/src/tigerbeetle/src/io.zig +0 -34
  56. package/src/tigerbeetle/src/main.zig +0 -144
  57. package/src/tigerbeetle/src/message_bus.zig +0 -1000
  58. package/src/tigerbeetle/src/message_pool.zig +0 -142
  59. package/src/tigerbeetle/src/ring_buffer.zig +0 -289
  60. package/src/tigerbeetle/src/simulator.zig +0 -417
  61. package/src/tigerbeetle/src/state_machine.zig +0 -2470
  62. package/src/tigerbeetle/src/storage.zig +0 -308
  63. package/src/tigerbeetle/src/test/cluster.zig +0 -351
  64. package/src/tigerbeetle/src/test/message_bus.zig +0 -93
  65. package/src/tigerbeetle/src/test/network.zig +0 -179
  66. package/src/tigerbeetle/src/test/packet_simulator.zig +0 -387
  67. package/src/tigerbeetle/src/test/state_checker.zig +0 -145
  68. package/src/tigerbeetle/src/test/state_machine.zig +0 -76
  69. package/src/tigerbeetle/src/test/storage.zig +0 -438
  70. package/src/tigerbeetle/src/test/time.zig +0 -84
  71. package/src/tigerbeetle/src/tigerbeetle.zig +0 -222
  72. package/src/tigerbeetle/src/time.zig +0 -113
  73. package/src/tigerbeetle/src/unit_tests.zig +0 -14
  74. package/src/tigerbeetle/src/vsr/client.zig +0 -505
  75. package/src/tigerbeetle/src/vsr/clock.zig +0 -812
  76. package/src/tigerbeetle/src/vsr/journal.zig +0 -2293
  77. package/src/tigerbeetle/src/vsr/marzullo.zig +0 -309
  78. package/src/tigerbeetle/src/vsr/replica.zig +0 -5015
  79. package/src/tigerbeetle/src/vsr.zig +0 -1017
@@ -1,505 +0,0 @@
1
- const std = @import("std");
2
- const assert = std.debug.assert;
3
- const mem = std.mem;
4
-
5
- const config = @import("../config.zig");
6
- const vsr = @import("../vsr.zig");
7
- const Header = vsr.Header;
8
-
9
- const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
10
- const message_pool = @import("../message_pool.zig");
11
- const Message = message_pool.MessagePool.Message;
12
-
13
- const log = std.log.scoped(.client);
14
-
15
- pub fn Client(comptime StateMachine: type, comptime MessageBus: type) type {
16
- return struct {
17
- const Self = @This();
18
-
19
- pub const Error = error{
20
- TooManyOutstandingRequests,
21
- };
22
-
23
- const Request = struct {
24
- const Callback = fn (
25
- user_data: u128,
26
- operation: StateMachine.Operation,
27
- results: Error![]const u8,
28
- ) void;
29
- user_data: u128,
30
- callback: Callback,
31
- message: *Message,
32
- };
33
-
34
- allocator: mem.Allocator,
35
- message_bus: *MessageBus,
36
-
37
- /// A universally unique identifier for the client (must not be zero).
38
- /// Used for routing replies back to the client via any network path (multi-path routing).
39
- /// The client ID must be ephemeral and random per process, and never persisted, so that
40
- /// lingering or zombie deployment processes cannot break correctness and/or liveness.
41
- /// A cryptographic random number generator must be used to ensure these properties.
42
- id: u128,
43
-
44
- /// The identifier for the cluster that this client intends to communicate with.
45
- cluster: u32,
46
-
47
- /// The number of replicas in the cluster.
48
- replica_count: u8,
49
-
50
- /// The total number of ticks elapsed since the client was initialized.
51
- ticks: u64 = 0,
52
-
53
- /// We hash-chain request/reply checksums to verify linearizability within a client session:
54
- /// * so that the parent of the next request is the checksum of the latest reply, and
55
- /// * so that the parent of the next reply is the checksum of the latest request.
56
- parent: u128 = 0,
57
-
58
- /// The session number for the client, zero when registering a session, non-zero thereafter.
59
- session: u64 = 0,
60
-
61
- /// The request number of the next request.
62
- request_number: u32 = 0,
63
-
64
- /// The highest view number seen by the client in messages exchanged with the cluster.
65
- /// Used to locate the current leader, and provide more information to a partitioned leader.
66
- view: u32 = 0,
67
-
68
- /// A client is allowed at most one inflight request at a time at the protocol layer.
69
- /// We therefore queue any further concurrent requests made by the application layer.
70
- request_queue: RingBuffer(Request, config.client_request_queue_max) = .{},
71
-
72
- /// The number of ticks without a reply before the client resends the inflight request.
73
- /// Dynamically adjusted as a function of recent request round-trip time.
74
- request_timeout: vsr.Timeout,
75
-
76
- /// The number of ticks before the client broadcasts a ping to the cluster.
77
- /// Used for end-to-end keepalive, and to discover a new leader between requests.
78
- ping_timeout: vsr.Timeout,
79
-
80
- /// Used to calculate exponential backoff with random jitter.
81
- /// Seeded with the client's ID.
82
- prng: std.rand.DefaultPrng,
83
-
84
- pub fn init(
85
- allocator: mem.Allocator,
86
- id: u128,
87
- cluster: u32,
88
- replica_count: u8,
89
- message_bus: *MessageBus,
90
- ) !Self {
91
- assert(id > 0);
92
- assert(replica_count > 0);
93
-
94
- var self = Self{
95
- .allocator = allocator,
96
- .message_bus = message_bus,
97
- .id = id,
98
- .cluster = cluster,
99
- .replica_count = replica_count,
100
- .request_timeout = .{
101
- .name = "request_timeout",
102
- .id = id,
103
- .after = config.rtt_ticks * config.rtt_multiple,
104
- },
105
- .ping_timeout = .{
106
- .name = "ping_timeout",
107
- .id = id,
108
- .after = 30000 / config.tick_ms,
109
- },
110
- .prng = std.rand.DefaultPrng.init(@truncate(u64, id)),
111
- };
112
-
113
- self.ping_timeout.start();
114
-
115
- return self;
116
- }
117
-
118
- pub fn deinit(_: *Self) void {}
119
-
120
- pub fn on_message(self: *Self, message: *Message) void {
121
- log.debug("{}: on_message: {}", .{ self.id, message.header });
122
- if (message.header.invalid()) |reason| {
123
- log.debug("{}: on_message: invalid ({s})", .{ self.id, reason });
124
- return;
125
- }
126
- if (message.header.cluster != self.cluster) {
127
- log.warn("{}: on_message: wrong cluster (cluster should be {}, not {})", .{
128
- self.id,
129
- self.cluster,
130
- message.header.cluster,
131
- });
132
- return;
133
- }
134
- switch (message.header.command) {
135
- .pong => self.on_pong(message),
136
- .reply => self.on_reply(message),
137
- .eviction => self.on_eviction(message),
138
- else => {
139
- log.warn("{}: on_message: ignoring misdirected {s} message", .{
140
- self.id,
141
- @tagName(message.header.command),
142
- });
143
- return;
144
- },
145
- }
146
- }
147
-
148
- pub fn tick(self: *Self) void {
149
- self.ticks += 1;
150
-
151
- self.message_bus.tick();
152
-
153
- self.ping_timeout.tick();
154
- self.request_timeout.tick();
155
-
156
- if (self.ping_timeout.fired()) self.on_ping_timeout();
157
- if (self.request_timeout.fired()) self.on_request_timeout();
158
- }
159
-
160
- pub fn request(
161
- self: *Self,
162
- user_data: u128,
163
- callback: Request.Callback,
164
- operation: StateMachine.Operation,
165
- message: *Message,
166
- message_body_size: usize,
167
- ) void {
168
- self.register();
169
-
170
- // We will set parent, context, view and checksums only when sending for the first time:
171
- message.header.* = .{
172
- .client = self.id,
173
- .request = self.request_number,
174
- .cluster = self.cluster,
175
- .command = .request,
176
- .operation = vsr.Operation.from(StateMachine, operation),
177
- .size = @intCast(u32, @sizeOf(Header) + message_body_size),
178
- };
179
-
180
- assert(self.request_number > 0);
181
- self.request_number += 1;
182
-
183
- log.debug("{}: request: user_data={} request={} size={} {s}", .{
184
- self.id,
185
- user_data,
186
- message.header.request,
187
- message.header.size,
188
- @tagName(operation),
189
- });
190
-
191
- if (self.request_queue.full()) {
192
- callback(user_data, operation, error.TooManyOutstandingRequests);
193
- return;
194
- }
195
-
196
- const was_empty = self.request_queue.empty();
197
-
198
- self.request_queue.push_assume_capacity(.{
199
- .user_data = user_data,
200
- .callback = callback,
201
- .message = message.ref(),
202
- });
203
-
204
- // If the queue was empty, then there is no request inflight and we must send this one:
205
- if (was_empty) self.send_request_for_the_first_time(message);
206
- }
207
-
208
- /// Acquires a message from the message bus if one is available.
209
- pub fn get_message(self: *Self) *Message {
210
- return self.message_bus.get_message();
211
- }
212
-
213
- /// Releases a message back to the message bus.
214
- pub fn unref(self: *Self, message: *Message) void {
215
- self.message_bus.unref(message);
216
- }
217
-
218
- fn on_eviction(self: *Self, eviction: *const Message) void {
219
- assert(eviction.header.command == .eviction);
220
- assert(eviction.header.cluster == self.cluster);
221
-
222
- if (eviction.header.client != self.id) {
223
- log.warn("{}: on_eviction: ignoring (wrong client={})", .{
224
- self.id,
225
- eviction.header.client,
226
- });
227
- return;
228
- }
229
-
230
- if (eviction.header.view < self.view) {
231
- log.debug("{}: on_eviction: ignoring (older view={})", .{
232
- self.id,
233
- eviction.header.view,
234
- });
235
- return;
236
- }
237
-
238
- assert(eviction.header.client == self.id);
239
- assert(eviction.header.view >= self.view);
240
-
241
- log.err("{}: session evicted: too many concurrent client sessions", .{self.id});
242
- @panic("session evicted: too many concurrent client sessions");
243
- }
244
-
245
- fn on_pong(self: *Self, pong: *const Message) void {
246
- assert(pong.header.command == .pong);
247
- assert(pong.header.cluster == self.cluster);
248
-
249
- if (pong.header.client != 0) {
250
- log.debug("{}: on_pong: ignoring (client != 0)", .{self.id});
251
- return;
252
- }
253
-
254
- if (pong.header.view > self.view) {
255
- log.debug("{}: on_pong: newer view={}..{}", .{
256
- self.id,
257
- self.view,
258
- pong.header.view,
259
- });
260
- self.view = pong.header.view;
261
- }
262
-
263
- // Now that we know the view number, it's a good time to register if we haven't already:
264
- self.register();
265
- }
266
-
267
- fn on_reply(self: *Self, reply: *Message) void {
268
- // We check these checksums again here because this is the last time we get to downgrade
269
- // a correctness bug into a liveness bug, before we return data back to the application.
270
- assert(reply.header.valid_checksum());
271
- assert(reply.header.valid_checksum_body(reply.body()));
272
- assert(reply.header.command == .reply);
273
-
274
- if (reply.header.client != self.id) {
275
- log.debug("{}: on_reply: ignoring (wrong client={})", .{
276
- self.id,
277
- reply.header.client,
278
- });
279
- return;
280
- }
281
-
282
- if (self.request_queue.head_ptr()) |inflight| {
283
- if (reply.header.request < inflight.message.header.request) {
284
- log.debug("{}: on_reply: ignoring (request {} < {})", .{
285
- self.id,
286
- reply.header.request,
287
- inflight.message.header.request,
288
- });
289
- return;
290
- }
291
- } else {
292
- log.debug("{}: on_reply: ignoring (no inflight request)", .{self.id});
293
- return;
294
- }
295
-
296
- const inflight = self.request_queue.pop().?;
297
- defer self.message_bus.unref(inflight.message);
298
-
299
- log.debug("{}: on_reply: user_data={} request={} size={} {s}", .{
300
- self.id,
301
- inflight.user_data,
302
- reply.header.request,
303
- reply.header.size,
304
- @tagName(reply.header.operation.cast(StateMachine)),
305
- });
306
-
307
- assert(reply.header.parent == self.parent);
308
- assert(reply.header.client == self.id);
309
- assert(reply.header.context == 0);
310
- assert(reply.header.request == inflight.message.header.request);
311
- assert(reply.header.cluster == self.cluster);
312
- assert(reply.header.op == reply.header.commit);
313
- assert(reply.header.operation == inflight.message.header.operation);
314
-
315
- // The checksum of this reply becomes the parent of our next request:
316
- self.parent = reply.header.checksum;
317
-
318
- if (reply.header.view > self.view) {
319
- log.debug("{}: on_reply: newer view={}..{}", .{
320
- self.id,
321
- self.view,
322
- reply.header.view,
323
- });
324
- self.view = reply.header.view;
325
- }
326
-
327
- self.request_timeout.stop();
328
-
329
- if (inflight.message.header.operation == .register) {
330
- assert(self.session == 0);
331
- assert(reply.header.commit > 0);
332
- self.session = reply.header.commit; // The commit number becomes the session number.
333
- }
334
-
335
- // We must process the next request before releasing control back to the callback.
336
- // Otherwise, requests may run through send_request_for_the_first_time() more than once.
337
- if (self.request_queue.head_ptr()) |next_request| {
338
- self.send_request_for_the_first_time(next_request.message);
339
- }
340
-
341
- if (inflight.message.header.operation != .register) {
342
- inflight.callback(
343
- inflight.user_data,
344
- inflight.message.header.operation.cast(StateMachine),
345
- reply.body(),
346
- );
347
- }
348
- }
349
-
350
- fn on_ping_timeout(self: *Self) void {
351
- self.ping_timeout.reset();
352
-
353
- const ping = Header{
354
- .command = .ping,
355
- .cluster = self.cluster,
356
- .client = self.id,
357
- };
358
-
359
- // TODO If we haven't received a pong from a replica since our last ping, then back off.
360
- self.send_header_to_replicas(ping);
361
- }
362
-
363
- fn on_request_timeout(self: *Self) void {
364
- self.request_timeout.backoff(self.prng.random());
365
-
366
- const message = self.request_queue.head_ptr().?.message;
367
- assert(message.header.command == .request);
368
- assert(message.header.request < self.request_number);
369
- assert(message.header.checksum == self.parent);
370
- assert(message.header.context == self.session);
371
-
372
- log.debug("{}: on_request_timeout: resending request={} checksum={}", .{
373
- self.id,
374
- message.header.request,
375
- message.header.checksum,
376
- });
377
-
378
- // We assume the leader is down and round-robin through the cluster:
379
- self.send_message_to_replica(
380
- @intCast(u8, (self.view + self.request_timeout.attempts) % self.replica_count),
381
- message,
382
- );
383
- }
384
-
385
- /// The caller owns the returned message, if any, which has exactly 1 reference.
386
- fn create_message_from_header(self: *Self, header: Header) *Message {
387
- assert(header.client == self.id);
388
- assert(header.cluster == self.cluster);
389
- assert(header.size == @sizeOf(Header));
390
-
391
- const message = self.message_bus.pool.get_message();
392
- defer self.message_bus.unref(message);
393
-
394
- message.header.* = header;
395
- message.header.set_checksum_body(message.body());
396
- message.header.set_checksum();
397
-
398
- return message.ref();
399
- }
400
-
401
- /// Registers a session with the cluster for the client, if this has not yet been done.
402
- fn register(self: *Self) void {
403
- if (self.request_number > 0) return;
404
-
405
- const message = self.message_bus.get_message();
406
- defer self.message_bus.unref(message);
407
-
408
- // We will set parent, context, view and checksums only when sending for the first time:
409
- message.header.* = .{
410
- .client = self.id,
411
- .request = self.request_number,
412
- .cluster = self.cluster,
413
- .command = .request,
414
- .operation = .register,
415
- };
416
-
417
- assert(self.request_number == 0);
418
- self.request_number += 1;
419
-
420
- log.debug("{}: register: registering a session with the cluster", .{self.id});
421
-
422
- assert(self.request_queue.empty());
423
-
424
- self.request_queue.push_assume_capacity(.{
425
- .user_data = 0,
426
- .callback = undefined,
427
- .message = message.ref(),
428
- });
429
-
430
- self.send_request_for_the_first_time(message);
431
- }
432
-
433
- fn send_header_to_replica(self: *Self, replica: u8, header: Header) void {
434
- const message = self.create_message_from_header(header);
435
- defer self.message_bus.unref(message);
436
-
437
- self.send_message_to_replica(replica, message);
438
- }
439
-
440
- fn send_header_to_replicas(self: *Self, header: Header) void {
441
- const message = self.create_message_from_header(header);
442
- defer self.message_bus.unref(message);
443
-
444
- var replica: u8 = 0;
445
- while (replica < self.replica_count) : (replica += 1) {
446
- self.send_message_to_replica(replica, message);
447
- }
448
- }
449
-
450
- fn send_message_to_replica(self: *Self, replica: u8, message: *Message) void {
451
- log.debug("{}: sending {s} to replica {}: {}", .{
452
- self.id,
453
- @tagName(message.header.command),
454
- replica,
455
- message.header,
456
- });
457
-
458
- assert(replica < self.replica_count);
459
- assert(message.header.valid_checksum());
460
- assert(message.header.client == self.id);
461
- assert(message.header.cluster == self.cluster);
462
-
463
- self.message_bus.send_message_to_replica(replica, message);
464
- }
465
-
466
- fn send_request_for_the_first_time(self: *Self, message: *Message) void {
467
- assert(self.request_queue.head_ptr().?.message == message);
468
-
469
- assert(message.header.command == .request);
470
- assert(message.header.parent == 0);
471
- assert(message.header.context == 0);
472
- assert(message.header.request < self.request_number);
473
- assert(message.header.view == 0);
474
- assert(message.header.size <= config.message_size_max);
475
-
476
- // We set the message checksums only when sending the request for the first time,
477
- // which is when we have the checksum of the latest reply available to set as `parent`,
478
- // and similarly also the session number if requests were queued while registering:
479
- message.header.parent = self.parent;
480
- message.header.context = self.session;
481
- // We also try to include our highest view number, so we wait until the request is ready
482
- // to be sent for the first time. However, beyond that, it is not necessary to update
483
- // the view number again, for example if it should change between now and resending.
484
- message.header.view = self.view;
485
- message.header.set_checksum_body(message.body());
486
- message.header.set_checksum();
487
-
488
- // The checksum of this request becomes the parent of our next reply:
489
- self.parent = message.header.checksum;
490
-
491
- log.debug("{}: send_request_for_the_first_time: request={} checksum={}", .{
492
- self.id,
493
- message.header.request,
494
- message.header.checksum,
495
- });
496
-
497
- assert(!self.request_timeout.ticking);
498
- self.request_timeout.start();
499
-
500
- // If our view number is out of date, then the old leader will forward our request.
501
- // If the leader is offline, then our request timeout will fire and we will round-robin.
502
- self.send_message_to_replica(@intCast(u8, self.view % self.replica_count), message);
503
- }
504
- };
505
- }