uringmachine 0.29.1 → 0.30.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 248a2f0e2a780904f26f0183d08237039e18b76b736e7b3563c2699e30041d65
4
- data.tar.gz: 2020b0c8cc9be25becddef80fa4c66f0bc49e30d0791ec2b2b2e618fbf3ab0b6
3
+ metadata.gz: a97099f1d89b2333b8be056c91acabad63fad122ce4c802a666e4abb10aca93b
4
+ data.tar.gz: '095878ee7df374be5dc87b74ea636b3def2b611006c76c7d86ae7c00ad064f47'
5
5
  SHA512:
6
- metadata.gz: 543a8adf2c628f080f9ec249e11ff7e4bd969f0eca9e47285ba0ef20c28575ddd6d1902a3890d49ccb1ccf5100d13cd721bbd03c472bde15205344524fd7795c
7
- data.tar.gz: 4afbe860f29187507c57a8e319d02f2f843d27aa7d65176f0e0040914bf4d8e68ce894d4663b13d3ace530bf2aa4fe3ca547e58d5cbcb1daf5d89f6d5bb94ae2
6
+ metadata.gz: 339349fd4116011334517f124201a1cf425762c1bafe33ba61fa3eaf175fa5064512f780f8354f8fc317515bab6c4914cb41fba2022d23c17aac7e08d18cad7b
7
+ data.tar.gz: 02c7b30e4143f07382f2788a9f242e6f051f57b438fc3ecc63153a13f318bbbab69e5239add2af31e277c68b21483be60bd542863c8f7aa358d7fa1b0c0793e9
data/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ # 0.30.0 2026-03-23
2
+
3
+ - Add `Stream#each`
4
+ - Add `UM#splice`, `UM#tee`, `UM#fsync`
5
+ - Add `UM#tcp_listen`, `UM#tcp_connect`
6
+
7
+ # 0.29.2 2026-03-15
8
+
9
+ - Add `Stream#consumed`, `Stream#pending`
10
+ - Add `UM#stream`
11
+ - Add `Stream#skip`
12
+
1
13
  # 0.29.1 2026-03-14
2
14
 
3
15
  - Add support for exception instance in `#timeout`
data/README.md CHANGED
@@ -32,11 +32,13 @@ implementation that allows integration with the entire Ruby ecosystem.
32
32
  ## Features
33
33
 
34
34
  - Automatic fiber switching when performing blocking I/O operations.
35
- - Automatic cancellation using of ongoing operations with Ruby exceptions.
35
+ - Automatic cancellation ongoing operations with Ruby exceptions.
36
36
  - General-purpose API for cancelling any operation on timeout.
37
37
  - Excellent performance characteristics for concurrent I/O-bound applications.
38
38
  - `Fiber::Scheduler` implementation to automatically integrate with the Ruby
39
39
  ecosystem in a transparent fashion.
40
+ - Read streams with automatic buffer management.
41
+ - Optimized I/O for encrypted SSL connections.
40
42
 
41
43
  ## Design
42
44
 
@@ -61,8 +63,8 @@ allow any library that performs I/O using standard-library classes such as `IO`,
61
63
 
62
64
  To install UringMachine, simply run `gem install uringmachine` or `bundle add
63
65
  uringmachine` in your project directory. Note: to use UringMachine, you'll need
64
- a Linux machine with a minimum kernel version of 6.7. Some features require
65
- newer kernel versions.
66
+ a Linux machine with a minimum Linux kernel version of 6.7. Some features
67
+ require newer kernel versions.
66
68
 
67
69
  To perform I/O using UringMachine, simply create an instance:
68
70
 
@@ -79,7 +81,7 @@ You can perform I/O by directly making method calls such as `write` or `read`
79
81
  ```ruby
80
82
  # Most UringMachine instance methods will need you to provide a file descriptor.
81
83
  # Here we print a message to STDOUT. Note the explicit line break:
82
- machine.write(STDOUT, "Hello, world!\n")
84
+ machine.write(UM::STDOUT_FILENO, "Hello, world!\n")
83
85
  ```
84
86
 
85
87
  UringMachine provides an I/O interface that is to a large degree equivalent to
@@ -91,14 +93,14 @@ the Unix standard C interface:
91
93
  fd = machine.open('foo.txt', UM::O_RDONLY)
92
94
  buf = +''
93
95
  size = machine.read(fd, buf, 8192)
94
- machine.write(STDOUT, "File content: #{buf.inspect}")
96
+ machine.write(UM::STDOUT_FILENO, "File content: #{buf.inspect}")
95
97
  machine.close(fd)
96
98
 
97
99
  # Or alternatively (with automatic file closing):
98
100
  machine.open('foo.txt', UM::O_RDONLY) do |fd|
99
101
  buf = +''
100
102
  size = machine.read(fd, buf, 8192)
101
- machine.write(STDOUT, "File content: #{buf.inspect}")
103
+ machine.write(UM::STDOUT_FILENO, "File content: #{buf.inspect}")
102
104
  end
103
105
  ```
104
106
 
@@ -284,6 +286,104 @@ fiber = Fiber.schedule do
284
286
  end
285
287
  ```
286
288
 
289
+ ## Read Streams
290
+
291
+ A UringMachine stream is used to efficiently read from a socket or other file
292
+ descriptor. Streams are ideal for implementing the read side of protocols, and
293
+ provide an API that is useful for both line-based protocols and binary
294
+ (frame-based) protocols.
295
+
296
+ A stream is associated with a UringMachine instance and a target file descriptor
297
+ (see also [stream modes](#stream-modes) below). Behind the scenes, streams take
298
+ advantage of io_uring's registered buffers feature, and more recently, the
299
+ introduction of [incremental buffer
300
+ consumption](https://github.com/axboe/liburing/wiki/What's-new-with-io_uring-in-6.11-and-6.12#incremental-provided-buffer-consumption).
301
+
302
+ When streams are used, UringMachine automatically manages the buffers it
303
+ provides to the kernel, maximizing buffer reuse and minimizing allocations.
304
+ UringMachine also responds to stress conditions (increased incoming traffic) by
305
+ automatically provisioning additional buffers.
306
+
307
+ To create a stream for a given fd, use `UM#stream`:
308
+
309
+ ```ruby
310
+ stream = machine.stream(fd)
311
+
312
+ # you can also provide a block that will be passed the stream instance:
313
+ machine.stream(fd) { |s| do_something_with(s) }
314
+
315
+ # you can also instantiate a stream directly:
316
+ stream = UM::Stream.new(machine, fd)
317
+ ```
318
+
319
+ The following API is used to interact with the stream:
320
+
321
+ ```ruby
322
+ # Read until a newline character is encountered:
323
+ line = stream.get_line(0)
324
+
325
+ # Read line with a maximum length of 13 bytes:
326
+ line = stream.get_line(13)
327
+
328
+ # Read all data:
329
+ buf = stream.get_string(0)
330
+
331
+ # Read exactly 13 bytes:
332
+ buf = stream.get_string(13)
333
+
334
+ # Read up to 13 bytes:
335
+ buf = stream.get_string(-13)
336
+
337
+ # Skip 3 bytes:
338
+ stream.skip(3)
339
+ ```
340
+
341
+ Here's an example of a how a basic HTTP request parser might be implemented
342
+ using a stream:
343
+
344
+ ```ruby
345
+ def parse_http_request_headers(stream)
346
+ request_line = stream.get_line(0)
347
+ m = request_line.match(REQUEST_LINE_RE)
348
+ return nil if !m
349
+
350
+ headers = {
351
+ ':method' => m[1],
352
+ ':path' => m[2],
353
+ ':protocol' => m[3]
354
+ }
355
+
356
+ while true
357
+ line = stream.get_line(0)
358
+ break if !line || line.empty?
359
+
360
+ m = line.match(HEADER_RE)
361
+ headers[m[1].downcase] = m[2]
362
+ end
363
+ headers
364
+ end
365
+ ```
366
+
367
+ ### Stream modes
368
+
369
+ Stream modes allow streams to be transport agnostic. Currently streams support
370
+ three modes:
371
+
372
+ - `:bp_read` - use the buffer pool, read data using multishot read
373
+ (this is the default mode).
374
+ - `:bp_recv` - use the buffer pool, read data using multishot recv.
375
+ - `:ssl` - read from an `SSLSocket` object.
376
+
377
+ The mode is specified as an additional argument to `Stream.new`:
378
+
379
+ ```ruby
380
+ # stream using recv:
381
+ stream = machine.stream(fd, :bp_recv)
382
+
383
+ # stream on an SSL socket:
384
+ stream = machine.stream(ssl, :ssl)
385
+ ```
386
+
287
387
  ## Performance
288
388
 
289
389
  [Detailed benchmarks](benchmark/README.md)
data/TODO.md CHANGED
@@ -1,68 +1,10 @@
1
1
  ## immediate
2
2
 
3
- - Add support for exception instances in `#timeout`.
4
- - Add support for returning a value on timeout
5
-
6
- Since to do this safely we need to actually raise an exception that wraps the
7
- value, rescue it and return the value, we might want a separate method that
8
- wraps `#timeout`:
9
-
10
- ```ruby
11
- TimeoutValueError < StandardError
12
-
13
- def timeout_with_value(interval, value, &block)
14
- timeout_error = TimeoutValueError
15
- timeout(interval, timeout_error, &block)
16
- rescue TimeoutValueError => e
17
- raise if e != timeout_error
18
-
19
- value
20
- end
21
- ```
22
-
23
3
  - Add tests for support for Set in `machine#await`
24
4
  - Add tests for support for Set, Array in `machine#join`
25
5
  - Add `#read_file` for reading entire file
26
6
  - Add `#write_file` for writing entire file
27
7
 
28
- - (?) Fix all futex value (Queue, Mutex) to be properly aligned
29
-
30
- <<<<<<< HEAD
31
- =======
32
- ## Buffer rings - automatic management
33
-
34
- - Take the buffer_pool branch, rewrite it
35
- - Allow multiple stream modes:
36
- - :buffer_pool - uses buffer rings
37
- - :ssl - read from an SSL connection (`SSLSocket`)
38
- - :io - read from an `IO`
39
-
40
- The API will look something like:
41
-
42
- ```ruby
43
- # The mode is selected automatically according to the given target
44
-
45
- stream = UM::Stream.new(machine, fd) # buffer_pool mode (read)
46
- stream = UM::Stream.new(machine, fd, :recv) # buffer_pool mode (recv)
47
- stream = UM::Stream.new(machine, ssl_sock) # SSLSocket mode
48
- stream = UM::Stream.new(machine, conn) # IO mode
49
- stream = UM::Stream.new(machine, str) # string mode
50
- stream = UM::Stream.new(machine, io_buf) # IO:Buffer mode
51
- ```
52
-
53
- This can be very useful in testing of stuff such as protocol implementations:
54
-
55
- ```ruby
56
- stream = UM::Stream.new(machine, "GET /foo HTTP/1.1\r\nHost: bar.com\r\n")
57
- ```
58
-
59
- So basically the stream is tied to a machine, and that means it can only be used
60
- on the thread with which the machine is associated. It is not thread-safe. (This
61
- is incidentally true also for most of the UringMachine instance methods!)
62
-
63
- Continued discussion in docs/design/buffer_pool.md
64
-
65
- >>>>>>> 04d9eb7 (Docs)
66
8
  ## Balancing I/O with the runqueue
67
9
 
68
10
  - in some cases where there are many entries in the runqueue, this can
@@ -90,6 +32,8 @@ Continued discussion in docs/design/buffer_pool.md
90
32
  debouncer = machine.debounce { }
91
33
  ```
92
34
 
35
+ - happy eyeballs algo for TCP connect
36
+
93
37
  - read multiple files
94
38
 
95
39
  ```ruby
@@ -100,12 +44,31 @@ Continued discussion in docs/design/buffer_pool.md
100
44
  machine.read_files(*fns) #=> { fn1:, fn2:, fn3:, ...}
101
45
  ```
102
46
 
47
+ - more generally, a DSL for expressing batch operations:
48
+
49
+ ```ruby
50
+ result = machine.batch do |b|
51
+ fns.each { b[it] = read_file(b, it) }
52
+ end
53
+ #=> { fn1 => data1, fn2 => data2, ... }
54
+
55
+ # we can also imagine performing operations in sequence using linking:
56
+ result = machine.batch {
57
+ m.
58
+ }
59
+
60
+ end
61
+ ```
62
+
103
63
  ## polyvalent select
104
64
 
105
65
  - select on multiple queues (ala Go)
106
66
  - select on mixture of queues and fds
67
+ - select on fibers:
68
+ - select fibers that are done
69
+ - select first done fiber
107
70
 
108
- ## ops
71
+ ## ops still not implemented
109
72
 
110
73
  - splice / - tee
111
74
  - sendto
@@ -124,31 +87,7 @@ Continued discussion in docs/design/buffer_pool.md
124
87
  When doing a `call`, we need to provide a mailbox for the response. can this be
125
88
  automatic?
126
89
 
127
- ## streams
128
-
129
- We're still missing:
130
-
131
- - limit on line length in `get_line`
132
- - ability to supply buffer to `get_line` and `get_string`
133
- - allow read to eof, maybe with `read_to_eof`
134
-
135
- For the sake of performance, simplicity and explicitness, we change the API as
136
- follows:
137
-
138
- ```ruby
139
- stream.get_line(buf, limit)
140
- # the defaults:
141
- stream.get_line(nil, -1)
142
-
143
- stream.get_string(len, buf)
144
- # defaults:
145
- stream.get_string(len, nil)
146
-
147
- # and
148
- stream.read_to_eof(buf)
149
- # defaults:
150
- stream.read_to_eof(nil)
151
- ```
90
+ ##
152
91
 
153
92
  ## Syntax / pattern for launching/supervising multiple operations
154
93
 
@@ -167,6 +106,5 @@ machine.shift_select(*queues) #=> [result, queue]
167
106
  ```ruby
168
107
  # addrs: [['1.1.1.1', 80], ['2.2.2.2', 80]]
169
108
  # ['1.1.1.1:80', '2.2.2.2:80']
170
- tcp_connect_happy_eyeballs(*addrs)
109
+ tcp_connect_he(*addrs)
171
110
  ```
172
-
@@ -0,0 +1,128 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative './common'
4
+ require 'socket'
5
+ require 'openssl'
6
+ require 'localhost/authority'
7
+
8
+ GROUPS = 48
9
+ ITERATIONS = 5000
10
+
11
+ SIZE = 1 << 14
12
+ DATA = '*' * SIZE
13
+
14
+ class UMBenchmark
15
+ def server_ctx
16
+ @server_ctx ||= Localhost::Authority.fetch.server_context
17
+ end
18
+
19
+ def ssl_wrap(sock, ctx)
20
+ OpenSSL::SSL::SSLSocket.new(sock, ctx).tap { it.sync_close = true }
21
+ end
22
+
23
+ def ssl_socketpair(machine)
24
+ sock1, sock2 = Socket.socketpair(:AF_UNIX, :SOCK_STREAM, 0)
25
+ ssl1 = ssl_wrap(sock1, server_ctx)
26
+ ssl2 = ssl_wrap(sock2, OpenSSL::SSL::SSLContext.new)
27
+
28
+ if !machine
29
+ t = Thread.new { ssl1.accept rescue nil }
30
+ ssl2.connect
31
+ t.join
32
+ else
33
+ machine.ssl_set_bio(ssl1)
34
+ machine.ssl_set_bio(ssl2)
35
+ f = machine.spin { ssl1.accept rescue nil }
36
+ ssl2.connect
37
+ machine.join(f)
38
+ end
39
+ [ssl1, ssl2]
40
+ end
41
+
42
+ def do_threads(threads, ios)
43
+ GROUPS.times do
44
+ r, w = ssl_socketpair(nil)
45
+ threads << Thread.new do
46
+ ITERATIONS.times { w.write(DATA) }
47
+ w.close
48
+ end
49
+ threads << Thread.new do
50
+ ITERATIONS.times { r.readpartial(SIZE) }
51
+ r.close
52
+ end
53
+ end
54
+ end
55
+
56
+ def do_thread_pool(thread_pool, ios)
57
+ GROUPS.times do
58
+ r, w = ssl_socketpair(nil)
59
+ r.sync = true
60
+ w.sync = true
61
+ ios << r << w
62
+ ITERATIONS.times {
63
+ thread_pool.queue { w.write(DATA) }
64
+ thread_pool.queue { r.readpartial(SIZE) }
65
+ }
66
+ end
67
+ end
68
+
69
+ def do_scheduler(scheduler, ios)
70
+ GROUPS.times do
71
+ r, w = ssl_socketpair(nil)
72
+ r.sync = true
73
+ w.sync = true
74
+ Fiber.schedule do
75
+ ITERATIONS.times { w.write(DATA) }
76
+ w.close
77
+ end
78
+ Fiber.schedule do
79
+ ITERATIONS.times { r.readpartial(SIZE) }
80
+ r.close
81
+ end
82
+ end
83
+ end
84
+
85
+ def do_scheduler_x(div, scheduler, ios)
86
+ (GROUPS/div).times do
87
+ r, w = ssl_socketpair(nil)
88
+ r.sync = true
89
+ w.sync = true
90
+ Fiber.schedule do
91
+ ITERATIONS.times { w.write(DATA) }
92
+ w.close
93
+ end
94
+ Fiber.schedule do
95
+ ITERATIONS.times { r.readpartial(SIZE) }
96
+ r.close
97
+ end
98
+ end
99
+ end
100
+
101
+ def do_um(machine, fibers, fds)
102
+ GROUPS.times do
103
+ r, w = ssl_socketpair(machine)
104
+ fibers << machine.spin do
105
+ ITERATIONS.times { machine.ssl_write(w, DATA, SIZE) }
106
+ machine.close_async(w)
107
+ end
108
+ fibers << machine.spin do
109
+ ITERATIONS.times { machine.ssl_read(r, +'', SIZE) }
110
+ machine.close_async(r)
111
+ end
112
+ end
113
+ end
114
+
115
+ def do_um_x(div, machine, fibers, fds)
116
+ (GROUPS/div).times do
117
+ r, w = ssl_socketpair(machine)
118
+ fibers << machine.spin do
119
+ ITERATIONS.times { machine.ssl_write(w, DATA, SIZE) }
120
+ machine.close_async(w)
121
+ end
122
+ fibers << machine.spin do
123
+ ITERATIONS.times { machine.ssl_read(r, +'', SIZE) }
124
+ machine.close_async(r)
125
+ end
126
+ end
127
+ end
128
+ end
@@ -0,0 +1,76 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative './common'
4
+ require 'securerandom'
5
+
6
+ C = ENV['C']&.to_i || 50
7
+ I = 10
8
+ puts "C=#{C}"
9
+
10
+ class UMBenchmark
11
+ CONTAINER_NAME = "redis-#{SecureRandom.hex}"
12
+
13
+ def start_redis_server
14
+ `docker run --name #{CONTAINER_NAME} -d -p 6379:6379 redis:latest`
15
+ end
16
+
17
+ def stop_redis_server
18
+ `docker stop #{CONTAINER_NAME}`
19
+ end
20
+
21
+ def create_redis_conn(retries = 0)
22
+ Redis.new
23
+ rescue
24
+ if retries < 3
25
+ sleep 0.5
26
+ create_redis_conn(retries + 1)
27
+ else
28
+ raise
29
+ end
30
+ end
31
+
32
+ def query_redis(conn)
33
+ conn.set('abc', 'def')
34
+ p conn.get('abc')
35
+ end
36
+
37
+ def with_container
38
+ start_redis_server
39
+ sleep 0.5
40
+ yield
41
+ rescue Exception => e
42
+ p e
43
+ p e.backtrace
44
+ ensure
45
+ stop_redis_server
46
+ end
47
+
48
+ def benchmark
49
+ with_container {
50
+ Benchmark.bm { run_benchmarks(it) }
51
+ }
52
+ end
53
+
54
+ # def do_threads(threads, ios)
55
+ # C.times.map do
56
+ # threads << Thread.new do
57
+ # conn = create_redis_conn
58
+ # I.times { query_redis(conn) }
59
+ # ensure
60
+ # conn.close
61
+ # end
62
+ # end
63
+ # end
64
+
65
+ def do_scheduler(scheduler, ios)
66
+ return if !scheduler.is_a?(UM::FiberScheduler)
67
+ C.times do
68
+ Fiber.schedule do
69
+ conn = create_redis_conn
70
+ I.times { query_redis(conn) }
71
+ ensure
72
+ conn.close
73
+ end
74
+ end
75
+ end
76
+ end
data/benchmark/common.rb CHANGED
@@ -9,6 +9,7 @@ gemfile do
9
9
  gem 'io-event'
10
10
  gem 'async'
11
11
  gem 'pg'
12
+ gem 'redis'
12
13
  gem 'gvltools'
13
14
  gem 'openssl'
14
15
  gem 'localhost'
@@ -62,7 +62,7 @@ end
62
62
 
63
63
  require 'stringio'
64
64
 
65
- RE_REQUEST_LINE = /^([a-z]+)\s+([^\s]+)\s+(http\/[0-9\.]{1,3})/i
65
+ RE_REQUEST_LINE = /^([a-z]+)\s+([^\s]+)\s+(http\/1\.1)/i
66
66
  RE_HEADER_LINE = /^([a-z0-9\-]+)\:\s+(.+)/i
67
67
 
68
68
  def get_line(fd, sio, buffer)
@@ -137,7 +137,7 @@ def stream_parse_headers(fd)
137
137
  return nil if !headers
138
138
 
139
139
  while true
140
- line = stream.get_line(buf, 0)
140
+ line = stream.get_line(0)
141
141
  break if line.empty?
142
142
 
143
143
  m = line.match(RE_HEADER_LINE)
@@ -150,7 +150,7 @@ def stream_parse_headers(fd)
150
150
  end
151
151
 
152
152
  def stream_get_request_line(stream, buf)
153
- line = stream.get_line(buf, 0)
153
+ line = stream.get_line(0)
154
154
 
155
155
  m = line.match(RE_REQUEST_LINE)
156
156
  return nil if !m
@@ -57,7 +57,7 @@ buffers, to using managed buffers from the buffer pool.
57
57
 
58
58
  - The buffer pool is created and managed automatically. No API is involved.
59
59
  -
60
- -
60
+ -
61
61
 
62
62
  - To use the buffer pool, two dedicated APIs are added:
63
63
 
data/ext/um/um.c CHANGED
@@ -1285,6 +1285,57 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
1285
1285
  return statx_to_hash(&stat);
1286
1286
  }
1287
1287
 
1288
+ VALUE um_splice(struct um *machine, int in_fd, int out_fd, uint nbytes) {
1289
+ struct um_op *op = um_op_acquire(machine);
1290
+ um_prep_op(machine, op, OP_SPLICE, 2, 0);
1291
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1292
+ io_uring_prep_splice(sqe, in_fd, -1, out_fd, -1, nbytes, 0);
1293
+
1294
+ VALUE ret = um_yield(machine);
1295
+
1296
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
1297
+ um_op_release(machine, op);
1298
+
1299
+ RAISE_IF_EXCEPTION(ret);
1300
+ RB_GC_GUARD(ret);
1301
+
1302
+ return ret;
1303
+ }
1304
+
1305
+ VALUE um_tee(struct um *machine, int in_fd, int out_fd, uint nbytes) {
1306
+ struct um_op *op = um_op_acquire(machine);
1307
+ um_prep_op(machine, op, OP_TEE, 2, 0);
1308
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1309
+ io_uring_prep_tee(sqe, in_fd, out_fd, nbytes, 0);
1310
+
1311
+ VALUE ret = um_yield(machine);
1312
+
1313
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
1314
+ um_op_release(machine, op);
1315
+
1316
+ RAISE_IF_EXCEPTION(ret);
1317
+ RB_GC_GUARD(ret);
1318
+
1319
+ return ret;
1320
+ }
1321
+
1322
+ VALUE um_fsync(struct um *machine, int fd) {
1323
+ struct um_op *op = um_op_acquire(machine);
1324
+ um_prep_op(machine, op, OP_FSYNC, 2, 0);
1325
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1326
+ io_uring_prep_fsync(sqe, fd, 0);
1327
+
1328
+ VALUE ret = um_yield(machine);
1329
+
1330
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
1331
+ um_op_release(machine, op);
1332
+
1333
+ RAISE_IF_EXCEPTION(ret);
1334
+ RB_GC_GUARD(ret);
1335
+
1336
+ return ret;
1337
+ }
1338
+
1288
1339
  /*******************************************************************************
1289
1340
  multishot ops
1290
1341
  *******************************************************************************/