polyphony 1.0.2 → 1.1.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '09f35eaafb1ff569e8a36e1b1e2350e52ce3048b814fb8ccca935ed1a45d1dbd'
4
- data.tar.gz: 0e2296097fdb1d6604c6a41a08880d0cab62052cab4b1b4ebe2ac2ce92bbb737
3
+ metadata.gz: a30f7362ca02a1e3b3fe8a76394d5bca243f8dc774b3a6f3f7e9ffa81aac04f7
4
+ data.tar.gz: 6fa0684c3e4ddf3fe62ea6d40d5e49578e36042ff57d20848cff6da165ab6027
5
5
  SHA512:
6
- metadata.gz: 80004fb81f991c8cca2c65bf89ed642f90f0e4947a47ea7ac34b59200aebc28e5a73215e8c80e837cd22edf92cf8026332a7df4722a5f5555956648ad5e8c9ee
7
- data.tar.gz: 3eb8022b4fab6b344b422de7791260b1036bd147f7ad8f9a4942e398f0071d8c3468ece6f7d576bb167b4333477643fe1e1b3348601d6fb9db164d8d49775328
6
+ metadata.gz: 1eb08ca45b2129c25c5a1b023aea14fdbade30323a8f5db824f3c33c9b386f24cd541cfa7d536696c2ecd253dd7b4eeb976e87f53a93a59aa1ff96166e54fe05
7
+ data.tar.gz: 2f9145ea40f5d8aeb280cbc70249793805e770735c4758ff9f26f80138a752aacdf347e8e6705604a602ddec3216f33bc1797a00d9521585646dc2d8ccd432c4
@@ -8,7 +8,7 @@ jobs:
8
8
  fail-fast: false
9
9
  matrix:
10
10
  os: [ubuntu-latest, macos-latest]
11
- ruby: ['3.0', '3.1', '3.2']
11
+ ruby: ['3.0', '3.1', '3.2', 'head']
12
12
 
13
13
  name: >-
14
14
  ${{matrix.os}}, ${{matrix.ruby}}
@@ -8,7 +8,7 @@ jobs:
8
8
  fail-fast: false
9
9
  matrix:
10
10
  os: [ubuntu-latest]
11
- ruby: ['3.0', '3.1', '3.2']
11
+ ruby: ['3.0', '3.1', '3.2', 'head']
12
12
 
13
13
  name: >-
14
14
  ${{matrix.os}}, ${{matrix.ruby}}
data/.yardopts CHANGED
@@ -20,6 +20,7 @@
20
20
  docs/readme.md
21
21
  docs/overview.md
22
22
  docs/tutorial.md
23
+ docs/advanced-io.md
23
24
  docs/cheat-sheet.md
24
25
  docs/faq.md
25
26
  docs/concurrency.md
data/CHANGELOG.md CHANGED
@@ -1,3 +1,12 @@
1
+ ## 1.1.1 2023-06-08
2
+
3
+ - Minor improvements to documentation
4
+
5
+ ## 1.1 2023-06-08
6
+
7
+ - Add advanced I/O doc page
8
+ - Add `Fiber#receive_loop` API
9
+
1
10
  ## 1.0.2 2023-05-28
2
11
 
3
12
  - Remove liburing man files from gemspec (#103)
data/README.md CHANGED
@@ -77,6 +77,7 @@ $ gem install polyphony
77
77
 
78
78
  - [Overview](docs/overview.md)
79
79
  - [Tutorial](docs/tutorial.md)
80
+ - [Advanced I/O with Polyphony](docs/advanced-io.md)
80
81
  - [Cheat-Sheet](docs/cheat-sheet.md)
81
82
  - [FAQ](docs/faq.md)
82
83
 
data/TODO.md CHANGED
@@ -1,4 +1,3 @@
1
- - issue #102 - test and see what this is about
2
1
  - Look at RPC benchmark more closely: is there a way to reduce the overhead of
3
2
  the `backend_base_switch_fiber` function?
4
3
 
@@ -15,24 +14,14 @@
15
14
  - Add support for IPv6:
16
15
  https://www.reddit.com/r/ruby/comments/lyen23/understanding_ipv6_and_why_its_important_to_you/
17
16
 
18
- - Check why `throttled_loop` inside of `move_on_after` fails to stop
19
-
20
17
  - Override stock `::SizedQueue` impl with Queue with capacity
21
18
 
22
- - Add support for `break` and `StopIteration` in all loops (with tests)
23
-
24
19
  - More tight loops
25
20
  - `IO#gets_loop`, `Socket#gets_loop`, `OpenSSL::Socket#gets_loop` (medium effort)
26
- - `Fiber#receive_loop` (very little effort, should be implemented in C)
27
21
 
28
22
  - Add support for `close` to io_uring backend
29
23
 
30
- ## Roadmap for Polyphony 1.0
31
-
32
- - Add test that mimics the original design for Monocrono:
33
- - 256 fibers each waiting for a message
34
- - When message received do some blocking work using a `ThreadPool`
35
- - Send messages, collect responses, check for correctness
24
+ ## Roadmap for Polyphony 1.1
36
25
 
37
26
  - io_uring
38
27
  - Use playground.c to find out why we when submitting and waiting for
@@ -117,7 +106,7 @@
117
106
 
118
107
  - Allow locking the scheduler on to one fiber
119
108
  - Add instance var `@fiber_lock`
120
- - API is `Thread#fiber_lock` which sets the fiber_lock instance varwhile
109
+ - API is `Thread#fiber_lock` which sets the fiber_lock instance var while
121
110
  running the block:
122
111
 
123
112
  ```ruby
@@ -127,6 +116,7 @@
127
116
  end
128
117
  end
129
118
  ```
119
+
130
120
  - When `@fiber_lock` is set, it is considered as the only one in the run
131
121
  queue:
132
122
 
@@ -0,0 +1,313 @@
1
+ # @title Advanced I/O with Polyphony
2
+
3
+ # Advanced I/O with Polyphony
4
+
5
+ ## Using splice for moving data between files and sockets
6
+
7
+ Splice is linux-specific API that lets you move data between two file
8
+ descriptors without copying data between kernel-space and user-space. This is
9
+ not only useful for copying data between two files, but also for implementing
10
+ things such as web servers, where you might need to serve files of an arbitrary
11
+ size. Using splice, you can avoid the cost of having to load a file's content
12
+ into memory, in order to send it to a TCP connection.
13
+
14
+ In order to use `splice`, at least one of the file descriptors involved needs to
15
+ be a pipe. This is because in Linux, pipes are actually kernel buffers. The idea
16
+ is that you first move data from a source fd into a kernel buffer, then you move
17
+ data from the kernel buffer to the destination fd. In some cases, this lets the
18
+ Linux kernel completely avoid having to copy data in order to move it from the
19
+ source to the destination. So the normal way of using splice is that first you
20
+ splice data from the source fd to the pipe (to its *write* fd), and then you
21
+ splice data from the pipe (from its *read* fd) to the destination fd.
22
+
23
+ Here's how you can use splice with Polyphony:
24
+
25
+ ```ruby
26
+ def send_file_using_splice(src, dest)
27
+ # create a pipe. Polyphony::Pipe encapsulates a kernel pipe in a single
28
+ # IO-like object, but we can also use the stock IO.pipe method call that
29
+ # returns two separate pipe fds.
30
+ pipe = Polyphony::Pipe.new
31
+ loop do
32
+ # splices data from src to the pipe
33
+ bytes_available = IO.splice(src, pipe, 2**14)
34
+ break if bytes_available == 0 # EOF
35
+
36
+ # splices data from the pipe to the dest
37
+ while (bytes_avilable > 0)
38
+ written = IO.splice(pipe, dest, bytes_avilable)
39
+ bytes_avilable -= written
40
+ end
41
+ end
42
+ end
43
+ ```
44
+
45
+ Let's examine the code above. First of all, we have a loop that repeatedly
46
+ splices data in chunks of 16KB, using the `IO.splice` API provided by Polyphony.
47
+ We break from the loop once EOF is encountered. Secondly, for moving data from
48
+ the pipe to the destination, we need to make sure *all* data made avilable on
49
+ the pipe has been spliced to the destination, since the call to `IO.splice` can
50
+ actually write fewer bytes than specified. So, we need to repeatedly perform two
51
+ splice operations, one after the other, and we need to make sure all data is
52
+ spliced to the destination. Would there be a better way to do this?
53
+
54
+ Fortunately, with Polyphony there is! Firstly, we can tell Polyphony to splice
55
+ data repeatedly until EOF is encountered by passing a negative max size:
56
+
57
+ ```ruby
58
+ IO.splice(src, pipe, -2**14)
59
+ ```
60
+
61
+ Secondly, we can perform the two splice operations concurrently, by spinning up
62
+ a separate fiber that performs one of the splice operations, which gives us the
63
+ following:
64
+
65
+ ```ruby
66
+ def send_file_using_splice(src, dest)
67
+ pipe = Polyphony::Pipe.new
68
+ spin do
69
+ IO.splice(src, pipe, -2**14)
70
+ # We need to close the pipe in order to signal EOF for the 2nd splice call.
71
+ pipe.close
72
+ end
73
+ IO.splice(pipe, dest, -2**14)
74
+ end
75
+ ```
76
+
77
+ There are a few things to notice here: While we have two concurrent operations
78
+ running in two separate fibers, they are still inter-dependent in their
79
+ progress, as one is filling a kernel buffer, and the other is flushing it, and
80
+ thus the progress of the whole will be bound by the slowest operation.
81
+
82
+ Take an HTTP server that serves a large file to a slow client, or a client with
83
+ a bad network connection. The web server is perfectly capable of reading the
84
+ file from its disk very fast, but sending data to the HTTP client can be much
85
+ much slower. The second splice operation, splicing from the pipe to the
86
+ destination, will flush the kernel buffer much more slowly that it is being
87
+ filled. At a certain point, the buffer is full, and the first splice operation
88
+ from the source to the pipe cannot continue. It will need to wait for the other
89
+ splice operation to progress, in order to continue filling the buffer. This is
90
+ called back-pressure propagation, it's a good thing, and we get it
91
+ automatically.
92
+
93
+ Let's now look at all the things we didn't need to do: we didn't need to read
94
+ data into a Ruby string (which is costly in CPU time, in memory, and eventually
95
+ in GC pressure), we didn't need to manage a buffer and take care of
96
+ synchronizing access to the buffer. We got to move data from the source to the
97
+ destination concurrently, and we got back-pressure propagation for free. Can we
98
+ do any better than that?
99
+
100
+ Actually, we can! Polyphony also provides an API that does all of the above in a
101
+ single method call:
102
+
103
+ ```ruby
104
+ def send_file_using_splice(src, dest)
105
+ IO.double_splice(src, dest)
106
+ end
107
+ ```
108
+
109
+ The `IO.double_splice` creates a pipe and repeatedly splices data concurrently
110
+ from the source to the pipe and from the pipe to the destination until the
111
+ source is exhausted. All this, without needing to instantiate a
112
+ `Polyphony::Pipe` object, and without needing to spin up a second fiber, further
113
+ minimizing memory use and GC pressure.
114
+
115
+ ## Compressing and decompressing in-flight data
116
+
117
+ You might be familiar with Ruby's [zlib](https://github.com/ruby/zlib) gem (docs
118
+ [here](https://rubyapi.org/3.2/o/zlib)), which can be used to compress and
119
+ uncompress data using the popular gzip format. Imagine we want to implement an
120
+ HTTP server that can serve files compressed using gzip:
121
+
122
+ ```ruby
123
+ def serve_compressed_file(socket, file)
124
+ # we leave aside sending the HTTP headers and dealing with transfer encoding
125
+ compressed = Zlib.gzip(file.read)
126
+ socket << compressed
127
+ end
128
+ ```
129
+
130
+ In the above example, we read the file contents into a Ruby string, then pass
131
+ the contents to `Zlib.gzip`, which returns the compressed contents in another
132
+ Ruby string, then write the compressed data to the socket. We can see how this
133
+ can lead to lots of memory allocations (especially if the file is large), and
134
+ more pressure on the Ruby GC. How can we improve this?
135
+
136
+ One way would be to utilise Zlib's `GzipWriter` class:
137
+
138
+ ```ruby
139
+ def serve_compressed_file(socket, file)
140
+ # we leave aside sending the HTTP headers and dealing with transfer encoding
141
+ compressor = Zlib::GzipWriter.new(socket)
142
+ while (data = file.read(2**14))
143
+ compressor << data
144
+ end
145
+ end
146
+ ```
147
+
148
+ In the above code, we instantiate a `Zlib::GzipWriter`, which we then feed with
149
+ data from the file, with the compressor object writing the compressed data to
150
+ the socket. Notice how we still need to read the file contents into a Ruby
151
+ string and then pass it to the compressor. Could we avoid this? With Polyphony
152
+ the answer is yes we can!
153
+
154
+ Polyphony provides a number of APIs for compressing and decompressing data on
155
+ the fly between two file descriptors (i.e. `IO` instances), namely: `IO.gzip`,
156
+ `IO.gunzip`, `IO.deflate` and `IO.inflate`. Let's see how this can be used to
157
+ serve gzipped data to an HTTP client:
158
+
159
+ ```ruby
160
+ def serve_compressed_file(socket, file)
161
+ IO.gzip(file, socket) # and that's it!
162
+ end
163
+ ```
164
+
165
+ Using the `IO.gzip` API provided by Polyphony, we completely avoid instantiating
166
+ Ruby strings into which data is read, and in fact we avoid allocating any
167
+ buffers on the heap (apart from what `zlib` might be doing). *And* we get to
168
+ move data *and compress it* between the given file and the socket using a single
169
+ method call!
170
+
171
+ ## Feeding data from a file descriptor to a parser
172
+
173
+ Some times we want to process data from a given file or socket by passing
174
+ through some object that parses the data, or otherwise manipulates it. Normally,
175
+ we would write a loop that repeatedly reads the data from the source, then
176
+ passes it to the parser object. Imagine we have data transmitted using the
177
+ `MessagePack` format that we need to convert back into its original form. We
178
+ might do something like the folowing:
179
+
180
+ ```ruby
181
+ def with_message_pack_data_from_io(io, &block)
182
+ unpacker = MessagePack::Unpacker.new
183
+ while (data = io.read(2**14))
184
+ unpacker.feed_each(data, &block)
185
+ end
186
+ end
187
+
188
+ # Which we can use as follows:
189
+ with_message_pack_data_from_io(socket) do |o|
190
+ puts "got: #{o.inspect}"
191
+ end
192
+ ```
193
+
194
+ Polyphony provides some APIs that help us write less code, and even optimize the
195
+ performance of our code. Let's look at the `IO#read_loop` (or `IO#recv_loop` for
196
+ sockets) API:
197
+
198
+ ```ruby
199
+ def with_message_pack_data_from_io(io, &block)
200
+ unpacker = MessagePack::Unpacker.new
201
+ io.read_loop do |data|
202
+ unpacker.feed_each(data, &block)
203
+ end
204
+ end
205
+ ```
206
+
207
+ In the above code, we replaced our `while` loop with a call to `IO#read_loop`,
208
+ which yields read data to the block given to it. In the block, we pass the data
209
+ to the MessagePack unpacker. While this does not like much different than the
210
+ previous implementation, the `IO#read_loop` API implements a tight loop at the
211
+ C-extension level, that provides slightly better performance.
212
+
213
+ But Polyphony goes even further than that and provides a `IO#feed_loop` API that
214
+ lets us feed read data to a given parser or processor object. Here's how we can
215
+ use it:
216
+
217
+ ```ruby
218
+ def with_message_pack_data_from_io(io, &block)
219
+ unpacker = MessagePack::Unpacker.new
220
+ io.feed_loop(unpacker, :feed_each, &block)
221
+ end
222
+ ```
223
+
224
+ With `IO#feed_loop` we get to write even less code, and as with `IO#read_loop`,
225
+ `IO#feed_loop` is implemented at the C-extension level using a tight loop that
226
+ maximizes performance.
227
+
228
+ ## Fast and easy chunked transfer-encoding in HTTP/1
229
+
230
+ [Chunked transfer
231
+ encoding](https://en.wikipedia.org/wiki/Chunked_transfer_encoding) is a great
232
+ way to serve HTTP responses of arbitrary size, because we don't need to know
233
+ their size in advance, which means we don't necessarily need to hold them in
234
+ memory, or perform expensive fstat calls to get file metadata. Sending HTTP
235
+ responses in chunked transfer encoding is simple enough:
236
+
237
+ ```ruby
238
+ def send_chunked_response_from_io(socket, io)
239
+ while true
240
+ chunk = io.read(MAX_CHUNK_SIZE)
241
+ socket << "#{chunk.bytesize.to_s(16)}\r\n#{chunk}\r\n"
242
+ break if chunk.empty?
243
+ end
244
+ end
245
+ ```
246
+
247
+ Note how we read the chunk into memory and then send it on to the client. Would
248
+ it be possible to splice the data instead? Let's see how that would look:
249
+
250
+ ```ruby
251
+ def send_chunked_response_from_io(socket, io)
252
+ pipe = Polyphony::Pipe.new
253
+ while true
254
+ bytes_spliced = IO.splice(io, pipe, MAX_CHUNK_SIZE)
255
+ socket << "#{bytes_spliced.to_s(16)}\r\n"
256
+ IO.splice(pipe, socket, bytes_spliced) if bytes_spliced > 0
257
+ socket << "\r\n"
258
+ break if bytes_spliced == 0
259
+ end
260
+ end
261
+ ```
262
+
263
+ In the code above, while we avoid having to read chunks of the source data into
264
+ Ruby strings, we now perform 3 I/O operations for each chunk: writing the chunk
265
+ size, splicing the data from the pipe (the kernel buffer), and finally writing
266
+ the `"\r\n"` delimiter. We can probably write some more complex logic to reduce
267
+ this to 2 operations (coalescing the two write operations into one), but still
268
+ this implementation involves a lot of back and forth between our code, the
269
+ Polyphony I/O backend, and the operating system.
270
+
271
+ Fortunately, Polyphony provides a special API for sending HTTP chunked
272
+ responses:
273
+
274
+ ```ruby
275
+ def send_chunked_response_from_io(socket, io)
276
+ IO.http1_splice_chunked(io, socket, MAX_CHUNK_SIZE)
277
+ end
278
+ ```
279
+
280
+ A single method call replaces the whole mechanism we devised above, and in
281
+ addition Polyphony makes sure to perform it with the minimum possible number of
282
+ I/O operations!
283
+
284
+ # Sending compressed data using chunked transfer encoding
285
+
286
+ We can now combine the different APIs discussed above to create even more
287
+ complex behaviour. Let's see how we can send an HTTP response using compressed
288
+ content encoding and chunked transfer encoding:
289
+
290
+ ```ruby
291
+ def send_compressed_chunked_response_from_io(socket, io)
292
+ pipe = Polyphony::Pipe.new
293
+ spin { IO.gzip(io, pipe) }
294
+ IO.http1_splice_chunked(pipe, socket, MAX_CHUNK_SIZE)
295
+ end
296
+ ```
297
+
298
+ The code above looks simple enough, but it actually packs a lot of power in just
299
+ 3 lines of code: we create a pipe, then spin up a fiber that compresses data
300
+ data `io` into the pipe. We then serve data from the pipe to the socket using
301
+ chunked transfer encoding. As discussed above, we do this without actually
302
+ allocating any Ruby strings for holding the data, we take maximum advantage of
303
+ kernel buffers (a.k.a. pipes) and we perform the two operations - compressing
304
+ the data and sending it to the client - concurrently.
305
+
306
+ ## Conclusion
307
+
308
+ In this article we have looked at some of the advanced I/O functionality
309
+ provided by Polyphony, which lets us write less code, have it run faster, have
310
+ it run concurrently, and minimize memory allocations and pressure on the Ruby
311
+ GC. Feel free to browse the [IO
312
+ examples](https://github.com/digital-fabric/polyphony/tree/master/examples/io)
313
+ included in Polyphony.
data/docs/cheat-sheet.md CHANGED
@@ -71,7 +71,7 @@ def calculate_some_stuff(n)
71
71
  acc += big_calc(acc, i)
72
72
  snooze if (i % 1000) == 0
73
73
  end
74
- end
74
+ end
75
75
  ```
76
76
 
77
77
  ### Suspend fiber
@@ -191,7 +191,7 @@ dest2.tee_from(source, 8192)
191
191
  dest1.splice_from(source, 8192)
192
192
  # or:
193
193
  IO.tee(src, dest2)
194
- IO.splice(src, dest2)
194
+ IO.splice(src, dest1)
195
195
  ```
196
196
 
197
197
  ### Splice data between two arbitrary file descriptors, without creating a pipe
data/docs/readme.md CHANGED
@@ -79,6 +79,7 @@ $ gem install polyphony
79
79
 
80
80
  - {file:/docs/overview.md Overview}
81
81
  - {file:/docs/tutorial.md Tutorial}
82
+ - {file:/docs/advanced-io.md Advanced I/O with Polyphony}
82
83
  - {file:/docs/cheat-sheet.md Cheat-Sheet}
83
84
  - {file:/docs/faq.md FAQ}
84
85
 
@@ -94,11 +94,11 @@ def bm_fiber_raw
94
94
  $server_raw.transfer 3
95
95
  end
96
96
 
97
- p bm_raw
98
- p bm_send
99
- p bm_fiber
100
- p bm_fiber_optimized
101
- p bm_fiber_single
97
+ # p bm_raw
98
+ # p bm_send
99
+ # p bm_fiber
100
+ # p bm_fiber_optimized
101
+ # p bm_fiber_single
102
102
  p bm_fiber_raw
103
103
  p bm_fiber_schedule
104
104
 
@@ -116,17 +116,17 @@ end
116
116
 
117
117
  puts "warming up JIT..."
118
118
 
119
- 3.times do
120
- warmup_jit
121
- sleep 1
122
- end
119
+ # 3.times do
120
+ # warmup_jit
121
+ # sleep 1
122
+ # end
123
123
 
124
124
  Benchmark.ips do |x|
125
- x.report("raw") { bm_raw }
126
- x.report("send") { bm_send }
127
- x.report("fiber") { bm_fiber }
128
- x.report("fiber_optimized") { bm_fiber_optimized }
129
- x.report("fiber_single") { bm_fiber_single }
125
+ # x.report("raw") { bm_raw }
126
+ # x.report("send") { bm_send }
127
+ # x.report("fiber") { bm_fiber }
128
+ # x.report("fiber_optimized") { bm_fiber_optimized }
129
+ # x.report("fiber_single") { bm_fiber_single }
130
130
  x.report("fiber_raw") { bm_fiber_raw }
131
131
  x.report("fiber_schedule") { bm_fiber_schedule }
132
132
  x.compare!
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/setup'
4
+ require 'polyphony'
5
+
6
+ class Stream
7
+ def initialize(io)
8
+ @io = io
9
+ @buffer = +''
10
+ @length = 0
11
+ @pos = 0
12
+ end
13
+
14
+ def getbyte
15
+ if @pos == @length
16
+ return nil if !fill_buffer
17
+ end
18
+ byte = @buffer[@pos].getbyte(0)
19
+ @pos += 1
20
+ byte
21
+ end
22
+
23
+ def getc
24
+ if @pos == @length
25
+ return nil if !fill_buffer
26
+ end
27
+ char = @buffer[@pos]
28
+ @pos += 1
29
+ char
30
+ end
31
+
32
+ def ungetc(c)
33
+ @buffer.insert(@pos, c)
34
+ @length += 1
35
+ c
36
+ end
37
+
38
+ def gets
39
+ end
40
+
41
+ def read
42
+ end
43
+
44
+ def readpartial
45
+ end
46
+
47
+ private
48
+
49
+ def fill_buffer
50
+ Polyphony.backend_read(@io, @buffer, 8192, false, -1)
51
+ @length = @buffer.size
52
+ end
53
+ end
54
+
55
+ i, o = IO.pipe
56
+ s = Stream.new(i)
57
+
58
+ f = spin do
59
+ loop do
60
+ b = s.getbyte
61
+ p getbyte: b
62
+ s.ungetc(b.to_s) if rand > 0.5
63
+ end
64
+ end
65
+
66
+ o << 'hello'
67
+ sleep 0.1
68
+
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/setup'
4
+ require 'polyphony'
5
+
6
+ i = 0
7
+ value = move_on_after(1, with_value: 42) do
8
+ throttled_loop(20) do
9
+ p (i += 1)
10
+ end
11
+ end
12
+
13
+ p value: value
@@ -389,12 +389,12 @@ inline void set_fd_blocking_mode(int fd, int blocking) {
389
389
  #endif
390
390
  }
391
391
 
392
- inline void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking) {
392
+ inline void io_verify_blocking_mode(VALUE io, int fd, VALUE blocking) {
393
393
  VALUE blocking_mode = rb_ivar_get(io, ID_ivar_blocking_mode);
394
394
  if (blocking == blocking_mode) return;
395
395
 
396
396
  rb_ivar_set(io, ID_ivar_blocking_mode, blocking);
397
- set_fd_blocking_mode(fptr->fd, blocking == Qtrue);
397
+ set_fd_blocking_mode(fd, blocking == Qtrue);
398
398
  }
399
399
 
400
400
  inline void backend_run_idle_tasks(struct Backend_base *base) {
@@ -455,9 +455,7 @@ VALUE Backend_stats(VALUE self) {
455
455
  }
456
456
 
457
457
  VALUE Backend_verify_blocking_mode(VALUE self, VALUE io, VALUE blocking) {
458
- rb_io_t *fptr;
459
- GetOpenFile(io, fptr);
460
- io_verify_blocking_mode(fptr, io, blocking);
458
+ io_verify_blocking_mode(io, rb_io_descriptor(io), blocking);
461
459
  return self;
462
460
  }
463
461
 
@@ -10,6 +10,15 @@
10
10
  #include "ruby/io.h"
11
11
  #include "runqueue.h"
12
12
 
13
+ #ifndef HAVE_RB_IO_DESCRIPTOR
14
+ static int rb_io_descriptor_fallback(VALUE io) {
15
+ rb_io_t *fptr;
16
+ GetOpenFile(io, fptr);
17
+ return fptr->fd;
18
+ }
19
+ #define rb_io_descriptor rb_io_descriptor_fallback
20
+ #endif
21
+
13
22
  struct backend_stats {
14
23
  unsigned int runqueue_size;
15
24
  unsigned int runqueue_length;
@@ -145,7 +154,7 @@ VALUE Backend_stats(VALUE self);
145
154
  VALUE Backend_verify_blocking_mode(VALUE self, VALUE io, VALUE blocking);
146
155
  void backend_run_idle_tasks(struct Backend_base *base);
147
156
  void set_fd_blocking_mode(int fd, int blocking);
148
- void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking);
157
+ void io_verify_blocking_mode(VALUE io, int fd, VALUE blocking);
149
158
  void backend_setup_stats_symbols();
150
159
  int backend_getaddrinfo(VALUE host, VALUE port, struct sockaddr **ai_addr);
151
160
  VALUE name_to_addrinfo(void *name, socklen_t len);
@@ -28,9 +28,9 @@ VALUE SYM_write;
28
28
  VALUE eArgumentError;
29
29
 
30
30
  #ifdef POLYPHONY_UNSET_NONBLOCK
31
- #define io_unset_nonblock(fptr, io) io_verify_blocking_mode(fptr, io, Qtrue)
31
+ #define io_unset_nonblock(io, fd) io_verify_blocking_mode(io, fd, Qtrue)
32
32
  #else
33
- #define io_unset_nonblock(fptr, io)
33
+ #define io_unset_nonblock(io, fd)
34
34
  #endif
35
35
 
36
36
  typedef struct Backend_t {
@@ -389,10 +389,10 @@ static inline int fd_from_io(VALUE io, rb_io_t **fptr, int write_mode, int recti
389
389
  if (underlying_io != Qnil) io = underlying_io;
390
390
 
391
391
  GetOpenFile(io, *fptr);
392
- io_unset_nonblock(*fptr, io);
392
+ int fd = rb_io_descriptor(io);
393
+ io_unset_nonblock(io, fd);
393
394
  if (rectify_file_pos) rectify_io_file_pos(*fptr);
394
-
395
- return (*fptr)->fd;
395
+ return fd;
396
396
  }
397
397
  }
398
398
 
@@ -1376,7 +1376,7 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
1376
1376
 
1377
1377
  // if (fd < 0) return Qnil;
1378
1378
 
1379
- // io_unset_nonblock(fptr, io);
1379
+ // io_unset_nonblock(io, fd);
1380
1380
 
1381
1381
  // ctx = context_store_acquire(&backend->store, OP_CLOSE);
1382
1382
  // sqe = io_uring_backend_get_sqe(backend);