tigerbeetle-node 0.3.3 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -7
- package/dist/benchmark.js +1 -1
- package/dist/benchmark.js.map +1 -1
- package/dist/index.d.ts +22 -20
- package/dist/index.js +40 -18
- package/dist/index.js.map +1 -1
- package/dist/test.js +13 -1
- package/dist/test.js.map +1 -1
- package/package.json +12 -12
- package/scripts/postinstall.sh +2 -2
- package/src/benchmark.ts +4 -4
- package/src/index.ts +35 -9
- package/src/node.zig +139 -28
- package/src/test.ts +19 -5
- package/src/tigerbeetle/scripts/benchmark.sh +10 -3
- package/src/tigerbeetle/scripts/install.sh +2 -2
- package/src/tigerbeetle/scripts/install_zig.bat +109 -0
- package/src/tigerbeetle/scripts/install_zig.sh +21 -4
- package/src/tigerbeetle/scripts/vopr.bat +48 -0
- package/src/tigerbeetle/scripts/vopr.sh +33 -0
- package/src/tigerbeetle/src/benchmark.zig +74 -42
- package/src/tigerbeetle/src/cli.zig +136 -83
- package/src/tigerbeetle/src/config.zig +80 -26
- package/src/tigerbeetle/src/demo.zig +101 -78
- package/src/tigerbeetle/src/demo_01_create_accounts.zig +2 -7
- package/src/tigerbeetle/src/demo_02_lookup_accounts.zig +2 -7
- package/src/tigerbeetle/src/demo_03_create_transfers.zig +2 -7
- package/src/tigerbeetle/src/demo_04_create_transfers_two_phase_commit.zig +2 -5
- package/src/tigerbeetle/src/demo_05_accept_transfers.zig +2 -7
- package/src/tigerbeetle/src/demo_06_reject_transfers.zig +2 -7
- package/src/tigerbeetle/src/demo_07_lookup_transfers.zig +8 -0
- package/src/tigerbeetle/src/fifo.zig +20 -11
- package/src/tigerbeetle/src/io.zig +35 -22
- package/src/tigerbeetle/src/io_darwin.zig +701 -0
- package/src/tigerbeetle/src/main.zig +72 -25
- package/src/tigerbeetle/src/message_bus.zig +379 -456
- package/src/tigerbeetle/src/message_pool.zig +3 -3
- package/src/tigerbeetle/src/ring_buffer.zig +192 -37
- package/src/tigerbeetle/src/simulator.zig +317 -0
- package/src/tigerbeetle/src/state_machine.zig +846 -38
- package/src/tigerbeetle/src/storage.zig +488 -90
- package/src/tigerbeetle/src/test/cluster.zig +221 -0
- package/src/tigerbeetle/src/test/message_bus.zig +92 -0
- package/src/tigerbeetle/src/test/network.zig +182 -0
- package/src/tigerbeetle/src/test/packet_simulator.zig +371 -0
- package/src/tigerbeetle/src/test/state_checker.zig +142 -0
- package/src/tigerbeetle/src/test/state_machine.zig +71 -0
- package/src/tigerbeetle/src/test/storage.zig +375 -0
- package/src/tigerbeetle/src/test/time.zig +84 -0
- package/src/tigerbeetle/src/tigerbeetle.zig +6 -3
- package/src/tigerbeetle/src/time.zig +65 -0
- package/src/tigerbeetle/src/unit_tests.zig +14 -0
- package/src/tigerbeetle/src/vsr/client.zig +519 -0
- package/src/tigerbeetle/src/vsr/clock.zig +829 -0
- package/src/tigerbeetle/src/vsr/journal.zig +1368 -0
- package/src/tigerbeetle/src/vsr/marzullo.zig +306 -0
- package/src/tigerbeetle/src/vsr/replica.zig +4248 -0
- package/src/tigerbeetle/src/vsr.zig +601 -0
- package/src/tigerbeetle/LICENSE +0 -177
- package/src/tigerbeetle/README.md +0 -116
- package/src/tigerbeetle/src/client.zig +0 -319
- package/src/tigerbeetle/src/concurrent_ranges.zig +0 -162
- package/src/tigerbeetle/src/fixed_array_list.zig +0 -53
- package/src/tigerbeetle/src/io_async.zig +0 -600
- package/src/tigerbeetle/src/journal.zig +0 -567
- package/src/tigerbeetle/src/test_client.zig +0 -41
- package/src/tigerbeetle/src/test_main.zig +0 -118
- package/src/tigerbeetle/src/test_message_bus.zig +0 -132
- package/src/tigerbeetle/src/vr/journal.zig +0 -672
- package/src/tigerbeetle/src/vr/replica.zig +0 -3061
- package/src/tigerbeetle/src/vr.zig +0 -374
|
@@ -0,0 +1,701 @@
|
|
|
1
|
+
const std = @import("std");
|
|
2
|
+
const os = std.os;
|
|
3
|
+
const assert = std.debug.assert;
|
|
4
|
+
|
|
5
|
+
const FIFO = @import("fifo.zig").FIFO;
|
|
6
|
+
const Time = @import("time.zig").Time;
|
|
7
|
+
const buffer_limit = @import("io.zig").buffer_limit;
|
|
8
|
+
|
|
9
|
+
pub const IO = struct {
|
|
10
|
+
kq: os.fd_t,
|
|
11
|
+
time: Time = .{},
|
|
12
|
+
timeouts: FIFO(Completion) = .{},
|
|
13
|
+
completed: FIFO(Completion) = .{},
|
|
14
|
+
io_pending: FIFO(Completion) = .{},
|
|
15
|
+
|
|
16
|
+
pub fn init(entries: u12, flags: u32) !IO {
|
|
17
|
+
const kq = try os.kqueue();
|
|
18
|
+
assert(kq > -1);
|
|
19
|
+
return IO{ .kq = kq };
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
pub fn deinit(self: *IO) void {
|
|
23
|
+
assert(self.kq > -1);
|
|
24
|
+
os.close(self.kq);
|
|
25
|
+
self.kq = -1;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/// Pass all queued submissions to the kernel and peek for completions.
|
|
29
|
+
pub fn tick(self: *IO) !void {
|
|
30
|
+
// TODO This is a hack to block 1ms every 10ms tick on macOS while we fix `flush(false)`:
|
|
31
|
+
// What we're seeing for the tigerbeetle-node client is that recv() syscalls never complete.
|
|
32
|
+
return self.run_for_ns(std.time.ns_per_ms);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/// Pass all queued submissions to the kernel and run for `nanoseconds`.
|
|
36
|
+
/// The `nanoseconds` argument is a u63 to allow coercion to the i64 used
|
|
37
|
+
/// in the __kernel_timespec struct.
|
|
38
|
+
pub fn run_for_ns(self: *IO, nanoseconds: u63) !void {
|
|
39
|
+
var timed_out = false;
|
|
40
|
+
var completion: Completion = undefined;
|
|
41
|
+
const on_timeout = struct {
|
|
42
|
+
fn callback(
|
|
43
|
+
timed_out_ptr: *bool,
|
|
44
|
+
_completion: *Completion,
|
|
45
|
+
_result: TimeoutError!void,
|
|
46
|
+
) void {
|
|
47
|
+
timed_out_ptr.* = true;
|
|
48
|
+
}
|
|
49
|
+
}.callback;
|
|
50
|
+
|
|
51
|
+
// Submit a timeout which sets the timed_out value to true to terminate the loop below.
|
|
52
|
+
self.timeout(
|
|
53
|
+
*bool,
|
|
54
|
+
&timed_out,
|
|
55
|
+
on_timeout,
|
|
56
|
+
&completion,
|
|
57
|
+
nanoseconds,
|
|
58
|
+
);
|
|
59
|
+
|
|
60
|
+
// Loop until our timeout completion is processed above, which sets timed_out to true.
|
|
61
|
+
// LLVM shouldn't be able to cache timed_out's value here since its address escapes above.
|
|
62
|
+
while (!timed_out) {
|
|
63
|
+
try self.flush(true);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
fn flush(self: *IO, wait_for_completions: bool) !void {
|
|
68
|
+
var io_pending = self.io_pending.peek();
|
|
69
|
+
var events: [256]os.Kevent = undefined;
|
|
70
|
+
|
|
71
|
+
// Check timeouts and fill events with completions in io_pending
|
|
72
|
+
// (they will be submitted through kevent).
|
|
73
|
+
// Timeouts are expired here and possibly pushed to the completed queue.
|
|
74
|
+
const next_timeout = self.flush_timeouts();
|
|
75
|
+
const change_events = self.flush_io(&events, &io_pending);
|
|
76
|
+
|
|
77
|
+
// Only call kevent() if we need to submit io events or if we need to wait for completions.
|
|
78
|
+
if (change_events > 0 or self.completed.peek() == null) {
|
|
79
|
+
// Zero timeouts for kevent() implies a non-blocking poll
|
|
80
|
+
var ts = std.mem.zeroes(os.timespec);
|
|
81
|
+
|
|
82
|
+
// We need to wait (not poll) on kevent if there's nothing to submit or complete.
|
|
83
|
+
// We should never wait indefinitely (timeout_ptr = null for kevent) given:
|
|
84
|
+
// - tick() is non-blocking (wait_for_completions = false)
|
|
85
|
+
// - run_for_ns() always submits a timeout
|
|
86
|
+
if (change_events == 0 and self.completed.peek() == null) {
|
|
87
|
+
if (!wait_for_completions) return;
|
|
88
|
+
const timeout_ns = next_timeout orelse @panic("kevent() blocking forever");
|
|
89
|
+
ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
|
|
90
|
+
ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const new_events = try os.kevent(
|
|
94
|
+
self.kq,
|
|
95
|
+
events[0..change_events],
|
|
96
|
+
events[0..events.len],
|
|
97
|
+
&ts,
|
|
98
|
+
);
|
|
99
|
+
|
|
100
|
+
// Mark the io events submitted only after kevent() successfully processed them
|
|
101
|
+
self.io_pending.out = io_pending;
|
|
102
|
+
if (io_pending == null) {
|
|
103
|
+
self.io_pending.in = null;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
for (events[0..new_events]) |event| {
|
|
107
|
+
const completion = @intToPtr(*Completion, event.udata);
|
|
108
|
+
completion.next = null;
|
|
109
|
+
self.completed.push(completion);
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
var completed = self.completed;
|
|
114
|
+
self.completed = .{};
|
|
115
|
+
while (completed.pop()) |completion| {
|
|
116
|
+
(completion.callback)(self, completion);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
fn flush_io(self: *IO, events: []os.Kevent, io_pending_top: *?*Completion) usize {
|
|
121
|
+
for (events) |*event, flushed| {
|
|
122
|
+
const completion = io_pending_top.* orelse return flushed;
|
|
123
|
+
io_pending_top.* = completion.next;
|
|
124
|
+
|
|
125
|
+
const event_info = switch (completion.operation) {
|
|
126
|
+
.accept => |op| [2]c_int{ op.socket, os.EVFILT_READ },
|
|
127
|
+
.connect => |op| [2]c_int{ op.socket, os.EVFILT_WRITE },
|
|
128
|
+
.read => |op| [2]c_int{ op.fd, os.EVFILT_READ },
|
|
129
|
+
.write => |op| [2]c_int{ op.fd, os.EVFILT_WRITE },
|
|
130
|
+
.recv => |op| [2]c_int{ op.socket, os.EVFILT_READ },
|
|
131
|
+
.send => |op| [2]c_int{ op.socket, os.EVFILT_WRITE },
|
|
132
|
+
else => @panic("invalid completion operation queued for io"),
|
|
133
|
+
};
|
|
134
|
+
|
|
135
|
+
event.* = .{
|
|
136
|
+
.ident = @intCast(u32, event_info[0]),
|
|
137
|
+
.filter = @intCast(i16, event_info[1]),
|
|
138
|
+
.flags = os.EV_ADD | os.EV_ENABLE | os.EV_ONESHOT,
|
|
139
|
+
.fflags = 0,
|
|
140
|
+
.data = 0,
|
|
141
|
+
.udata = @ptrToInt(completion),
|
|
142
|
+
};
|
|
143
|
+
}
|
|
144
|
+
return events.len;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
fn flush_timeouts(self: *IO) ?u64 {
|
|
148
|
+
var min_timeout: ?u64 = null;
|
|
149
|
+
var timeouts: ?*Completion = self.timeouts.peek();
|
|
150
|
+
while (timeouts) |completion| {
|
|
151
|
+
timeouts = completion.next;
|
|
152
|
+
|
|
153
|
+
// NOTE: We could cache `now` above the loop but monotonic() should be cheap to call.
|
|
154
|
+
const now = self.time.monotonic();
|
|
155
|
+
const expires = completion.operation.timeout.expires;
|
|
156
|
+
|
|
157
|
+
// NOTE: remove() could be O(1) here with a doubly-linked-list
|
|
158
|
+
// since we know the previous Completion.
|
|
159
|
+
if (now >= expires) {
|
|
160
|
+
self.timeouts.remove(completion);
|
|
161
|
+
self.completed.push(completion);
|
|
162
|
+
continue;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
const timeout_ns = expires - now;
|
|
166
|
+
if (min_timeout) |min_ns| {
|
|
167
|
+
min_timeout = std.math.min(min_ns, timeout_ns);
|
|
168
|
+
} else {
|
|
169
|
+
min_timeout = timeout_ns;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
return min_timeout;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/// This struct holds the data needed for a single IO operation
|
|
176
|
+
pub const Completion = struct {
|
|
177
|
+
next: ?*Completion,
|
|
178
|
+
context: ?*c_void,
|
|
179
|
+
callback: fn (*IO, *Completion) void,
|
|
180
|
+
operation: Operation,
|
|
181
|
+
};
|
|
182
|
+
|
|
183
|
+
const Operation = union(enum) {
|
|
184
|
+
accept: struct {
|
|
185
|
+
socket: os.socket_t,
|
|
186
|
+
flags: u32,
|
|
187
|
+
},
|
|
188
|
+
close: struct {
|
|
189
|
+
fd: os.fd_t,
|
|
190
|
+
},
|
|
191
|
+
connect: struct {
|
|
192
|
+
socket: os.socket_t,
|
|
193
|
+
address: std.net.Address,
|
|
194
|
+
initiated: bool,
|
|
195
|
+
},
|
|
196
|
+
fsync: struct {
|
|
197
|
+
fd: os.fd_t,
|
|
198
|
+
flags: u32,
|
|
199
|
+
},
|
|
200
|
+
openat: struct {
|
|
201
|
+
fd: os.fd_t,
|
|
202
|
+
path: [*:0]const u8,
|
|
203
|
+
flags: u32,
|
|
204
|
+
mode: os.mode_t,
|
|
205
|
+
},
|
|
206
|
+
read: struct {
|
|
207
|
+
fd: os.fd_t,
|
|
208
|
+
buf: [*]u8,
|
|
209
|
+
len: u32,
|
|
210
|
+
offset: u64,
|
|
211
|
+
},
|
|
212
|
+
recv: struct {
|
|
213
|
+
socket: os.socket_t,
|
|
214
|
+
buf: [*]u8,
|
|
215
|
+
len: u32,
|
|
216
|
+
flags: u32,
|
|
217
|
+
},
|
|
218
|
+
send: struct {
|
|
219
|
+
socket: os.socket_t,
|
|
220
|
+
buf: [*]const u8,
|
|
221
|
+
len: u32,
|
|
222
|
+
flags: u32,
|
|
223
|
+
},
|
|
224
|
+
timeout: struct {
|
|
225
|
+
expires: u64,
|
|
226
|
+
},
|
|
227
|
+
write: struct {
|
|
228
|
+
fd: os.fd_t,
|
|
229
|
+
buf: [*]const u8,
|
|
230
|
+
len: u32,
|
|
231
|
+
offset: u64,
|
|
232
|
+
},
|
|
233
|
+
};
|
|
234
|
+
|
|
235
|
+
fn submit(
|
|
236
|
+
self: *IO,
|
|
237
|
+
context: anytype,
|
|
238
|
+
comptime callback: anytype,
|
|
239
|
+
completion: *Completion,
|
|
240
|
+
comptime operation_tag: std.meta.Tag(Operation),
|
|
241
|
+
operation_data: anytype,
|
|
242
|
+
comptime OperationImpl: type,
|
|
243
|
+
) void {
|
|
244
|
+
const Context = @TypeOf(context);
|
|
245
|
+
const onCompleteFn = struct {
|
|
246
|
+
fn onComplete(io: *IO, _completion: *Completion) void {
|
|
247
|
+
// Perform the actual operaton
|
|
248
|
+
const op_data = &@field(_completion.operation, @tagName(operation_tag));
|
|
249
|
+
const result = OperationImpl.doOperation(op_data);
|
|
250
|
+
|
|
251
|
+
// Requeue onto io_pending if error.WouldBlock
|
|
252
|
+
switch (operation_tag) {
|
|
253
|
+
.accept, .connect, .read, .write, .send, .recv => {
|
|
254
|
+
_ = result catch |err| switch (err) {
|
|
255
|
+
error.WouldBlock => {
|
|
256
|
+
_completion.next = null;
|
|
257
|
+
io.io_pending.push(_completion);
|
|
258
|
+
return;
|
|
259
|
+
},
|
|
260
|
+
else => {},
|
|
261
|
+
};
|
|
262
|
+
},
|
|
263
|
+
else => {},
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// Complete the Completion
|
|
267
|
+
return callback(
|
|
268
|
+
@intToPtr(Context, @ptrToInt(_completion.context)),
|
|
269
|
+
_completion,
|
|
270
|
+
result,
|
|
271
|
+
);
|
|
272
|
+
}
|
|
273
|
+
}.onComplete;
|
|
274
|
+
|
|
275
|
+
completion.* = .{
|
|
276
|
+
.next = null,
|
|
277
|
+
.context = context,
|
|
278
|
+
.callback = onCompleteFn,
|
|
279
|
+
.operation = @unionInit(Operation, @tagName(operation_tag), operation_data),
|
|
280
|
+
};
|
|
281
|
+
|
|
282
|
+
switch (operation_tag) {
|
|
283
|
+
.timeout => self.timeouts.push(completion),
|
|
284
|
+
else => self.completed.push(completion),
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
pub const AcceptError = os.AcceptError;
|
|
289
|
+
|
|
290
|
+
pub fn accept(
|
|
291
|
+
self: *IO,
|
|
292
|
+
comptime Context: type,
|
|
293
|
+
context: Context,
|
|
294
|
+
comptime callback: fn (
|
|
295
|
+
context: Context,
|
|
296
|
+
completion: *Completion,
|
|
297
|
+
result: AcceptError!os.socket_t,
|
|
298
|
+
) void,
|
|
299
|
+
completion: *Completion,
|
|
300
|
+
socket: os.socket_t,
|
|
301
|
+
flags: u32,
|
|
302
|
+
) void {
|
|
303
|
+
self.submit(
|
|
304
|
+
context,
|
|
305
|
+
callback,
|
|
306
|
+
completion,
|
|
307
|
+
.accept,
|
|
308
|
+
.{
|
|
309
|
+
.socket = socket,
|
|
310
|
+
.flags = flags,
|
|
311
|
+
},
|
|
312
|
+
struct {
|
|
313
|
+
fn doOperation(op: anytype) AcceptError!os.socket_t {
|
|
314
|
+
return os.accept(op.socket, null, null, op.flags);
|
|
315
|
+
}
|
|
316
|
+
},
|
|
317
|
+
);
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
pub const CloseError = error{
|
|
321
|
+
FileDescriptorInvalid,
|
|
322
|
+
DiskQuota,
|
|
323
|
+
InputOutput,
|
|
324
|
+
NoSpaceLeft,
|
|
325
|
+
} || os.UnexpectedError;
|
|
326
|
+
|
|
327
|
+
pub fn close(
|
|
328
|
+
self: *IO,
|
|
329
|
+
comptime Context: type,
|
|
330
|
+
context: Context,
|
|
331
|
+
comptime callback: fn (
|
|
332
|
+
context: Context,
|
|
333
|
+
completion: *Completion,
|
|
334
|
+
result: CloseError!void,
|
|
335
|
+
) void,
|
|
336
|
+
completion: *Completion,
|
|
337
|
+
fd: os.fd_t,
|
|
338
|
+
) void {
|
|
339
|
+
self.submit(
|
|
340
|
+
context,
|
|
341
|
+
callback,
|
|
342
|
+
completion,
|
|
343
|
+
.close,
|
|
344
|
+
.{
|
|
345
|
+
.fd = fd,
|
|
346
|
+
},
|
|
347
|
+
struct {
|
|
348
|
+
fn doOperation(op: anytype) CloseError!void {
|
|
349
|
+
return switch (os.errno(os.system.close(op.fd))) {
|
|
350
|
+
0 => {},
|
|
351
|
+
os.EBADF => error.FileDescriptorInvalid,
|
|
352
|
+
os.EINTR => {}, // A success, see https://github.com/ziglang/zig/issues/2425
|
|
353
|
+
os.EIO => error.InputOutput,
|
|
354
|
+
else => |errno| os.unexpectedErrno(errno),
|
|
355
|
+
};
|
|
356
|
+
}
|
|
357
|
+
},
|
|
358
|
+
);
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
pub const ConnectError = os.ConnectError;
|
|
362
|
+
|
|
363
|
+
pub fn connect(
|
|
364
|
+
self: *IO,
|
|
365
|
+
comptime Context: type,
|
|
366
|
+
context: Context,
|
|
367
|
+
comptime callback: fn (
|
|
368
|
+
context: Context,
|
|
369
|
+
completion: *Completion,
|
|
370
|
+
result: ConnectError!void,
|
|
371
|
+
) void,
|
|
372
|
+
completion: *Completion,
|
|
373
|
+
socket: os.socket_t,
|
|
374
|
+
address: std.net.Address,
|
|
375
|
+
) void {
|
|
376
|
+
self.submit(
|
|
377
|
+
context,
|
|
378
|
+
callback,
|
|
379
|
+
completion,
|
|
380
|
+
.connect,
|
|
381
|
+
.{
|
|
382
|
+
.socket = socket,
|
|
383
|
+
.address = address,
|
|
384
|
+
.initiated = false,
|
|
385
|
+
},
|
|
386
|
+
struct {
|
|
387
|
+
fn doOperation(op: anytype) ConnectError!void {
|
|
388
|
+
// Don't call connect after being rescheduled by io_pending as it gives EISCONN.
|
|
389
|
+
// Instead, check the socket error to see if has been connected successfully.
|
|
390
|
+
const result = switch (op.initiated) {
|
|
391
|
+
true => os.getsockoptError(op.socket),
|
|
392
|
+
else => os.connect(op.socket, &op.address.any, op.address.getOsSockLen()),
|
|
393
|
+
};
|
|
394
|
+
|
|
395
|
+
op.initiated = true;
|
|
396
|
+
return result;
|
|
397
|
+
}
|
|
398
|
+
},
|
|
399
|
+
);
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
pub const FsyncError = error{
|
|
403
|
+
FileDescriptorInvalid,
|
|
404
|
+
DiskQuota,
|
|
405
|
+
ArgumentsInvalid,
|
|
406
|
+
InputOutput,
|
|
407
|
+
NoSpaceLeft,
|
|
408
|
+
ReadOnlyFileSystem,
|
|
409
|
+
AccessDenied,
|
|
410
|
+
} || os.UnexpectedError;
|
|
411
|
+
|
|
412
|
+
pub fn fsync(
|
|
413
|
+
self: *IO,
|
|
414
|
+
comptime Context: type,
|
|
415
|
+
context: Context,
|
|
416
|
+
comptime callback: fn (
|
|
417
|
+
context: Context,
|
|
418
|
+
completion: *Completion,
|
|
419
|
+
result: FsyncError!void,
|
|
420
|
+
) void,
|
|
421
|
+
completion: *Completion,
|
|
422
|
+
fd: os.fd_t,
|
|
423
|
+
flags: u32,
|
|
424
|
+
) void {
|
|
425
|
+
self.submit(
|
|
426
|
+
context,
|
|
427
|
+
callback,
|
|
428
|
+
completion,
|
|
429
|
+
.fsync,
|
|
430
|
+
.{
|
|
431
|
+
.fd = fd,
|
|
432
|
+
.flags = flags,
|
|
433
|
+
},
|
|
434
|
+
struct {
|
|
435
|
+
fn doOperation(op: anytype) FsyncError!void {
|
|
436
|
+
_ = os.fcntl(op.fd, os.F_FULLFSYNC, 1) catch return os.fsync(op.fd);
|
|
437
|
+
}
|
|
438
|
+
},
|
|
439
|
+
);
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
pub const OpenatError = error{
|
|
443
|
+
AccessDenied,
|
|
444
|
+
FileDescriptorInvalid,
|
|
445
|
+
DeviceBusy,
|
|
446
|
+
PathAlreadyExists,
|
|
447
|
+
FileTooBig,
|
|
448
|
+
ArgumentsInvalid,
|
|
449
|
+
IsDir,
|
|
450
|
+
SymLinkLoop,
|
|
451
|
+
ProcessFdQuotaExceeded,
|
|
452
|
+
NameTooLong,
|
|
453
|
+
SystemFdQuotaExceeded,
|
|
454
|
+
NoDevice,
|
|
455
|
+
FileNotFound,
|
|
456
|
+
SystemResources,
|
|
457
|
+
NoSpaceLeft,
|
|
458
|
+
NotDir,
|
|
459
|
+
FileLocksNotSupported,
|
|
460
|
+
BadPathName,
|
|
461
|
+
InvalidUtf8,
|
|
462
|
+
WouldBlock,
|
|
463
|
+
} || os.UnexpectedError;
|
|
464
|
+
|
|
465
|
+
pub fn openat(
|
|
466
|
+
self: *IO,
|
|
467
|
+
comptime Context: type,
|
|
468
|
+
context: Context,
|
|
469
|
+
comptime callback: fn (
|
|
470
|
+
context: Context,
|
|
471
|
+
completion: *Completion,
|
|
472
|
+
result: OpenatError!os.fd_t,
|
|
473
|
+
) void,
|
|
474
|
+
completion: *Completion,
|
|
475
|
+
fd: os.fd_t,
|
|
476
|
+
path: [*:0]const u8,
|
|
477
|
+
flags: u32,
|
|
478
|
+
mode: os.mode_t,
|
|
479
|
+
) void {
|
|
480
|
+
self.submit(
|
|
481
|
+
context,
|
|
482
|
+
callback,
|
|
483
|
+
completion,
|
|
484
|
+
.openat,
|
|
485
|
+
.{
|
|
486
|
+
.fd = fd,
|
|
487
|
+
.path = path,
|
|
488
|
+
.mode = mode,
|
|
489
|
+
.flags = flags,
|
|
490
|
+
},
|
|
491
|
+
struct {
|
|
492
|
+
fn doOperation(op: anytype) OpenatError!os.fd_t {
|
|
493
|
+
return os.openatZ(op.fd, op.path, op.flags, op.mode);
|
|
494
|
+
}
|
|
495
|
+
},
|
|
496
|
+
);
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
pub const ReadError = error{
|
|
500
|
+
WouldBlock,
|
|
501
|
+
NotOpenForReading,
|
|
502
|
+
ConnectionResetByPeer,
|
|
503
|
+
Alignment,
|
|
504
|
+
InputOutput,
|
|
505
|
+
IsDir,
|
|
506
|
+
SystemResources,
|
|
507
|
+
Unseekable,
|
|
508
|
+
} || os.UnexpectedError;
|
|
509
|
+
|
|
510
|
+
pub fn read(
|
|
511
|
+
self: *IO,
|
|
512
|
+
comptime Context: type,
|
|
513
|
+
context: Context,
|
|
514
|
+
comptime callback: fn (
|
|
515
|
+
context: Context,
|
|
516
|
+
completion: *Completion,
|
|
517
|
+
result: ReadError!usize,
|
|
518
|
+
) void,
|
|
519
|
+
completion: *Completion,
|
|
520
|
+
fd: os.fd_t,
|
|
521
|
+
buffer: []u8,
|
|
522
|
+
offset: u64,
|
|
523
|
+
) void {
|
|
524
|
+
self.submit(
|
|
525
|
+
context,
|
|
526
|
+
callback,
|
|
527
|
+
completion,
|
|
528
|
+
.read,
|
|
529
|
+
.{
|
|
530
|
+
.fd = fd,
|
|
531
|
+
.buf = buffer.ptr,
|
|
532
|
+
.len = @intCast(u32, buffer_limit(buffer.len)),
|
|
533
|
+
.offset = offset,
|
|
534
|
+
},
|
|
535
|
+
struct {
|
|
536
|
+
fn doOperation(op: anytype) ReadError!usize {
|
|
537
|
+
while (true) {
|
|
538
|
+
const rc = os.system.pread(
|
|
539
|
+
op.fd,
|
|
540
|
+
op.buf,
|
|
541
|
+
op.len,
|
|
542
|
+
@bitCast(isize, op.offset),
|
|
543
|
+
);
|
|
544
|
+
return switch (os.errno(rc)) {
|
|
545
|
+
0 => @intCast(usize, rc),
|
|
546
|
+
os.EINTR => continue,
|
|
547
|
+
os.EAGAIN => error.WouldBlock,
|
|
548
|
+
os.EBADF => error.NotOpenForReading,
|
|
549
|
+
os.ECONNRESET => error.ConnectionResetByPeer,
|
|
550
|
+
os.EFAULT => unreachable,
|
|
551
|
+
os.EINVAL => error.Alignment,
|
|
552
|
+
os.EIO => error.InputOutput,
|
|
553
|
+
os.EISDIR => error.IsDir,
|
|
554
|
+
os.ENOBUFS => error.SystemResources,
|
|
555
|
+
os.ENOMEM => error.SystemResources,
|
|
556
|
+
os.ENXIO => error.Unseekable,
|
|
557
|
+
os.EOVERFLOW => error.Unseekable,
|
|
558
|
+
os.ESPIPE => error.Unseekable,
|
|
559
|
+
else => |err| os.unexpectedErrno(err),
|
|
560
|
+
};
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
},
|
|
564
|
+
);
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
pub const RecvError = os.RecvFromError;
|
|
568
|
+
|
|
569
|
+
pub fn recv(
|
|
570
|
+
self: *IO,
|
|
571
|
+
comptime Context: type,
|
|
572
|
+
context: Context,
|
|
573
|
+
comptime callback: fn (
|
|
574
|
+
context: Context,
|
|
575
|
+
completion: *Completion,
|
|
576
|
+
result: RecvError!usize,
|
|
577
|
+
) void,
|
|
578
|
+
completion: *Completion,
|
|
579
|
+
socket: os.socket_t,
|
|
580
|
+
buffer: []u8,
|
|
581
|
+
flags: u32,
|
|
582
|
+
) void {
|
|
583
|
+
self.submit(
|
|
584
|
+
context,
|
|
585
|
+
callback,
|
|
586
|
+
completion,
|
|
587
|
+
.recv,
|
|
588
|
+
.{
|
|
589
|
+
.socket = socket,
|
|
590
|
+
.buf = buffer.ptr,
|
|
591
|
+
.len = @intCast(u32, buffer_limit(buffer.len)),
|
|
592
|
+
.flags = flags,
|
|
593
|
+
},
|
|
594
|
+
struct {
|
|
595
|
+
fn doOperation(op: anytype) RecvError!usize {
|
|
596
|
+
return os.recv(op.socket, op.buf[0..op.len], op.flags);
|
|
597
|
+
}
|
|
598
|
+
},
|
|
599
|
+
);
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
pub const SendError = os.SendError;
|
|
603
|
+
|
|
604
|
+
pub fn send(
|
|
605
|
+
self: *IO,
|
|
606
|
+
comptime Context: type,
|
|
607
|
+
context: Context,
|
|
608
|
+
comptime callback: fn (
|
|
609
|
+
context: Context,
|
|
610
|
+
completion: *Completion,
|
|
611
|
+
result: SendError!usize,
|
|
612
|
+
) void,
|
|
613
|
+
completion: *Completion,
|
|
614
|
+
socket: os.socket_t,
|
|
615
|
+
buffer: []const u8,
|
|
616
|
+
flags: u32,
|
|
617
|
+
) void {
|
|
618
|
+
self.submit(
|
|
619
|
+
context,
|
|
620
|
+
callback,
|
|
621
|
+
completion,
|
|
622
|
+
.send,
|
|
623
|
+
.{
|
|
624
|
+
.socket = socket,
|
|
625
|
+
.buf = buffer.ptr,
|
|
626
|
+
.len = @intCast(u32, buffer_limit(buffer.len)),
|
|
627
|
+
.flags = flags,
|
|
628
|
+
},
|
|
629
|
+
struct {
|
|
630
|
+
fn doOperation(op: anytype) SendError!usize {
|
|
631
|
+
return os.send(op.socket, op.buf[0..op.len], op.flags);
|
|
632
|
+
}
|
|
633
|
+
},
|
|
634
|
+
);
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
pub const TimeoutError = error{Canceled} || os.UnexpectedError;
|
|
638
|
+
|
|
639
|
+
pub fn timeout(
|
|
640
|
+
self: *IO,
|
|
641
|
+
comptime Context: type,
|
|
642
|
+
context: Context,
|
|
643
|
+
comptime callback: fn (
|
|
644
|
+
context: Context,
|
|
645
|
+
completion: *Completion,
|
|
646
|
+
result: TimeoutError!void,
|
|
647
|
+
) void,
|
|
648
|
+
completion: *Completion,
|
|
649
|
+
nanoseconds: u63,
|
|
650
|
+
) void {
|
|
651
|
+
self.submit(
|
|
652
|
+
context,
|
|
653
|
+
callback,
|
|
654
|
+
completion,
|
|
655
|
+
.timeout,
|
|
656
|
+
.{
|
|
657
|
+
.expires = self.time.monotonic() + nanoseconds,
|
|
658
|
+
},
|
|
659
|
+
struct {
|
|
660
|
+
fn doOperation(_: anytype) TimeoutError!void {
|
|
661
|
+
return; // timeouts don't have errors for now
|
|
662
|
+
}
|
|
663
|
+
},
|
|
664
|
+
);
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
pub const WriteError = os.PWriteError;
|
|
668
|
+
|
|
669
|
+
pub fn write(
|
|
670
|
+
self: *IO,
|
|
671
|
+
comptime Context: type,
|
|
672
|
+
context: Context,
|
|
673
|
+
comptime callback: fn (
|
|
674
|
+
context: Context,
|
|
675
|
+
completion: *Completion,
|
|
676
|
+
result: WriteError!usize,
|
|
677
|
+
) void,
|
|
678
|
+
completion: *Completion,
|
|
679
|
+
fd: os.fd_t,
|
|
680
|
+
buffer: []const u8,
|
|
681
|
+
offset: u64,
|
|
682
|
+
) void {
|
|
683
|
+
self.submit(
|
|
684
|
+
context,
|
|
685
|
+
callback,
|
|
686
|
+
completion,
|
|
687
|
+
.write,
|
|
688
|
+
.{
|
|
689
|
+
.fd = fd,
|
|
690
|
+
.buf = buffer.ptr,
|
|
691
|
+
.len = @intCast(u32, buffer_limit(buffer.len)),
|
|
692
|
+
.offset = offset,
|
|
693
|
+
},
|
|
694
|
+
struct {
|
|
695
|
+
fn doOperation(op: anytype) WriteError!usize {
|
|
696
|
+
return os.pwrite(op.fd, op.buf[0..op.len], op.offset);
|
|
697
|
+
}
|
|
698
|
+
},
|
|
699
|
+
);
|
|
700
|
+
}
|
|
701
|
+
};
|