uringmachine 0.2 → 0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 52ccd2dc6b2caf48fd03405d4536cb9a0a9640470fb14a5f105da84029857471
4
- data.tar.gz: cf1c06469be27a93089a805cfd7552978f8f84d1a6f6d225289954d908ec981e
3
+ metadata.gz: 7129b3c8605d5734f7152bddb4fa1b6f034fd1de26262eabfbfc2190846967bd
4
+ data.tar.gz: 902e6663bac65d56e45d67383e2cd3eb0601534ed2b451f4f76b2812ce6125b1
5
5
  SHA512:
6
- metadata.gz: e79d481864b8be758efc1f67924b491118c4d00a038fc24ee2a6078cc34edbfab9602166836e155fb74dcd423f5bcee2da47dd89d24fb23c68b3412481704f98
7
- data.tar.gz: 00f2867826b20a9ed0198dc683f5f26d81d59a94bacfa2320b991c89c0dcb0cb327cb821b33d6c501fb2f67d73c2ff0784841cb01d973d58584081d621ff4563
6
+ metadata.gz: 683db63642ddb5d98c9eb137f8121a8cb8cb0fe44456928fca78cf8e322cebc01078efa71379abcceae1a1889140bf5b39f6ac16348db0b77d9c8a5bb9cf5cd0
7
+ data.tar.gz: 850dce780030b6102371861805f831299d3ea113f8c02141515afe167b0e49f43a51812a6159a91b6335085b41fcb226713eeb963ae69d208818cb1f3cb303b3
data/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ # 2024-10-04 Version 0.3
2
+
3
+ - Fix race condition affecting `#timeout` and `#sleep`.
4
+ - Add `#accept_each`
5
+ - Add `#accept`
6
+
1
7
  # 2024-10-03 Version 0.2
2
8
 
3
9
  - Remove old IOU code.
data/ext/um/um.c CHANGED
@@ -2,6 +2,54 @@
2
2
  #include "ruby/thread.h"
3
3
  #include <sys/mman.h>
4
4
 
5
+ void um_setup(struct um *machine) {
6
+ machine->ring_initialized = 0;
7
+ machine->unsubmitted_count = 0;
8
+ machine->buffer_ring_count = 0;
9
+ machine->pending_count = 0;
10
+ machine->runqueue_head = NULL;
11
+ machine->runqueue_tail = NULL;
12
+ machine->op_freelist = NULL;
13
+ machine->result_freelist = NULL;
14
+
15
+ unsigned prepared_limit = 4096;
16
+ int flags = 0;
17
+ #ifdef HAVE_IORING_SETUP_SUBMIT_ALL
18
+ flags |= IORING_SETUP_SUBMIT_ALL;
19
+ #endif
20
+ #ifdef HAVE_IORING_SETUP_COOP_TASKRUN
21
+ flags |= IORING_SETUP_COOP_TASKRUN;
22
+ #endif
23
+
24
+ while (1) {
25
+ int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
26
+ if (likely(!ret)) break;
27
+
28
+ // if ENOMEM is returned, try with half as much entries
29
+ if (unlikely(ret == -ENOMEM && prepared_limit > 64))
30
+ prepared_limit = prepared_limit / 2;
31
+ else
32
+ rb_syserr_fail(-ret, strerror(-ret));
33
+ }
34
+ machine->ring_initialized = 1;
35
+ }
36
+
37
+ inline void um_teardown(struct um *machine) {
38
+ if (!machine->ring_initialized) return;
39
+
40
+ for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
41
+ struct buf_ring_descriptor *desc = machine->buffer_rings + i;
42
+ io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
43
+ free(desc->buf_base);
44
+ }
45
+ machine->buffer_ring_count = 0;
46
+ io_uring_queue_exit(&machine->ring);
47
+ machine->ring_initialized = 0;
48
+
49
+ um_free_op_linked_list(machine, machine->op_freelist);
50
+ um_free_op_linked_list(machine, machine->runqueue_head);
51
+ }
52
+
5
53
  static inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
6
54
  struct io_uring_sqe *sqe;
7
55
  sqe = io_uring_get_sqe(&machine->ring);
@@ -24,22 +72,6 @@ done:
24
72
  return sqe;
25
73
  }
26
74
 
27
- inline void um_cleanup(struct um *machine) {
28
- if (!machine->ring_initialized) return;
29
-
30
- for (unsigned i = 0; i < machine->buffer_ring_count; i++) {
31
- struct buf_ring_descriptor *desc = machine->buffer_rings + i;
32
- io_uring_free_buf_ring(&machine->ring, desc->br, desc->buf_count, i);
33
- free(desc->buf_base);
34
- }
35
- machine->buffer_ring_count = 0;
36
- io_uring_queue_exit(&machine->ring);
37
- machine->ring_initialized = 0;
38
-
39
- um_free_linked_list(machine, machine->freelist_head);
40
- um_free_linked_list(machine, machine->runqueue_head);
41
- }
42
-
43
75
  struct wait_for_cqe_ctx {
44
76
  struct um *machine;
45
77
  struct io_uring_cqe *cqe;
@@ -97,6 +129,7 @@ inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
97
129
  // op has been abandonned by the I/O method, so we need to cleanup (check
98
130
  // the op in to the free list).
99
131
  um_op_checkin(machine, op);
132
+ break;
100
133
  default:
101
134
  // TODO: invalid state, should raise!
102
135
  }
@@ -270,10 +303,10 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
270
303
  if (!ID_new) ID_new = rb_intern("new");
271
304
 
272
305
  struct um_op *op = um_op_checkout(machine);
273
- struct __kernel_timespec ts = um_double_to_timespec(NUM2DBL(interval));
306
+ op->ts = um_double_to_timespec(NUM2DBL(interval));
274
307
 
275
308
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
276
- io_uring_prep_timeout(sqe, &ts, 0, 0);
309
+ io_uring_prep_timeout(sqe, &op->ts, 0, 0);
277
310
  op->state = OP_submitted;
278
311
  op->fiber = rb_fiber_current();
279
312
  op->resume_value = rb_funcall(class, ID_new, 0);
@@ -284,11 +317,11 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
284
317
 
285
318
  inline VALUE um_sleep(struct um *machine, double duration) {
286
319
  struct um_op *op = um_op_checkout(machine);
287
- struct __kernel_timespec ts = um_double_to_timespec(duration);
320
+ op->ts = um_double_to_timespec(duration);
288
321
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
289
322
  int result = 0;
290
323
 
291
- io_uring_prep_timeout(sqe, &ts, 0, 0);
324
+ io_uring_prep_timeout(sqe, &op->ts, 0, 0);
292
325
  op->state = OP_submitted;
293
326
 
294
327
  return um_await_op(machine, op, &result, NULL);
@@ -311,7 +344,7 @@ inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int b
311
344
  return INT2FIX(result);
312
345
  }
313
346
 
314
- VALUE um_read_each_ensure(VALUE arg) {
347
+ VALUE um_multishot_ensure(VALUE arg) {
315
348
  struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
316
349
  switch (ctx->op->state) {
317
350
  case OP_submitted:
@@ -358,5 +391,67 @@ VALUE um_read_each(struct um *machine, int fd, int bgid) {
358
391
  op->state = OP_submitted;
359
392
 
360
393
  struct op_ensure_ctx ctx = { .machine = machine, .op = op, .bgid = bgid };
361
- return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_read_each_ensure, (VALUE)&ctx);
394
+ return rb_ensure(um_read_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
395
+ }
396
+
397
+ VALUE um_write(struct um *machine, int fd, VALUE buffer, int len) {
398
+ struct um_op *op = um_op_checkout(machine);
399
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
400
+ int result = 0;
401
+ int flags = 0;
402
+
403
+ io_uring_prep_write(sqe, fd, RSTRING_PTR(buffer), len, -1);
404
+ op->state = OP_submitted;
405
+
406
+ um_await_op(machine, op, &result, &flags);
407
+ um_raise_on_system_error(result);
408
+ return INT2FIX(result);
409
+ }
410
+
411
+ VALUE um_accept(struct um *machine, int fd) {
412
+ struct um_op *op = um_op_checkout(machine);
413
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
414
+ struct sockaddr addr;
415
+ socklen_t len;
416
+ int result = 0;
417
+ int flags = 0;
418
+ io_uring_prep_accept(sqe, fd, &addr, &len, 0);
419
+ op->state = OP_submitted;
420
+
421
+ um_await_op(machine, op, &result, &flags);
422
+ um_raise_on_system_error(result);
423
+ return INT2FIX(result);
424
+ }
425
+
426
+ VALUE um_accept_each_safe_loop(VALUE arg) {
427
+ struct op_ensure_ctx *ctx = (struct op_ensure_ctx *)arg;
428
+ int result = 0;
429
+ int flags = 0;
430
+
431
+ while (1) {
432
+ um_await_op(ctx->machine, ctx->op, NULL, NULL);
433
+ if (!ctx->op->results_head) {
434
+ // TODO: raise, this shouldn't happen
435
+ printf("no result found!\n");
436
+ }
437
+ while (um_op_result_shift(ctx->machine, ctx->op, &result, &flags)) {
438
+ if (likely(result > 0))
439
+ rb_yield(INT2FIX(result));
440
+ else
441
+ return Qnil;
442
+ }
443
+ }
444
+ }
445
+
446
+ VALUE um_accept_each(struct um *machine, int fd) {
447
+ struct um_op *op = um_op_checkout(machine);
448
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
449
+ struct sockaddr addr;
450
+ socklen_t len;
451
+ io_uring_prep_multishot_accept(sqe, fd, &addr, &len, 0);
452
+ op->state = OP_submitted;
453
+ op->is_multishot = 1;
454
+
455
+ struct op_ensure_ctx ctx = { .machine = machine, .op = op };
456
+ return rb_ensure(um_accept_each_safe_loop, (VALUE)&ctx, um_multishot_ensure, (VALUE)&ctx);
362
457
  }
data/ext/um/um.h CHANGED
@@ -48,6 +48,8 @@ struct um_op {
48
48
  VALUE fiber;
49
49
  VALUE resume_value;
50
50
  int is_multishot;
51
+ struct __kernel_timespec ts;
52
+
51
53
  int cqe_result;
52
54
  int cqe_flags;
53
55
  };
@@ -63,7 +65,9 @@ struct buf_ring_descriptor {
63
65
  #define BUFFER_RING_MAX_COUNT 10
64
66
 
65
67
  struct um {
66
- struct um_op *freelist_head;
68
+ struct um_op *op_freelist;
69
+ struct um_result_entry *result_freelist;
70
+
67
71
  struct um_op *runqueue_head;
68
72
  struct um_op *runqueue_tail;
69
73
 
@@ -79,6 +83,11 @@ struct um {
79
83
 
80
84
  extern VALUE cUM;
81
85
 
86
+ void um_setup(struct um *machine);
87
+ void um_teardown(struct um *machine);
88
+ void um_free_op_linked_list(struct um *machine, struct um_op *op);
89
+ void um_free_result_linked_list(struct um *machine, struct um_result_entry *entry);
90
+
82
91
  struct __kernel_timespec um_double_to_timespec(double value);
83
92
  int um_value_is_exception_p(VALUE v);
84
93
  VALUE um_raise_exception(VALUE v);
@@ -88,9 +97,6 @@ void * um_prepare_read_buffer(VALUE buffer, unsigned len, int ofs);
88
97
  void um_update_read_buffer(struct um *machine, VALUE buffer, int buffer_offset, int result, int flags);
89
98
  VALUE get_string_from_buffer_ring(struct um *machine, int bgid, int result, int flags);
90
99
 
91
- void um_cleanup(struct um *machine);
92
-
93
- void um_free_linked_list(struct um *machine, struct um_op *op);
94
100
  VALUE um_fiber_switch(struct um *machine);
95
101
  VALUE um_await(struct um *machine);
96
102
 
@@ -111,5 +117,9 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class);
111
117
  VALUE um_sleep(struct um *machine, double duration);
112
118
  VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset);
113
119
  VALUE um_read_each(struct um *machine, int fd, int bgid);
120
+ VALUE um_write(struct um *machine, int fd, VALUE buffer, int len);
121
+
122
+ VALUE um_accept(struct um *machine, int fd);
123
+ VALUE um_accept_each(struct um *machine, int fd);
114
124
 
115
125
  #endif // UM_H
data/ext/um/um_class.c CHANGED
@@ -14,7 +14,7 @@ static void UM_compact(void *ptr) {
14
14
  }
15
15
 
16
16
  static void UM_free(void *ptr) {
17
- um_cleanup((struct um *)ptr);
17
+ um_teardown((struct um *)ptr);
18
18
  free(ptr);
19
19
  }
20
20
 
@@ -43,36 +43,7 @@ inline struct um *get_machine(VALUE self) {
43
43
 
44
44
  VALUE UM_initialize(VALUE self) {
45
45
  struct um *machine = RTYPEDDATA_DATA(self);
46
-
47
- machine->ring_initialized = 0;
48
- machine->unsubmitted_count = 0;
49
- machine->buffer_ring_count = 0;
50
- machine->pending_count = 0;
51
- machine->runqueue_head = NULL;
52
- machine->runqueue_tail = NULL;
53
- machine->freelist_head = NULL;
54
-
55
- unsigned prepared_limit = 4096;
56
- int flags = 0;
57
- #ifdef HAVE_IORING_SETUP_SUBMIT_ALL
58
- flags |= IORING_SETUP_SUBMIT_ALL;
59
- #endif
60
- #ifdef HAVE_IORING_SETUP_COOP_TASKRUN
61
- flags |= IORING_SETUP_COOP_TASKRUN;
62
- #endif
63
-
64
- while (1) {
65
- int ret = io_uring_queue_init(prepared_limit, &machine->ring, flags);
66
- if (likely(!ret)) break;
67
-
68
- // if ENOMEM is returned, try with half as much entries
69
- if (unlikely(ret == -ENOMEM && prepared_limit > 64))
70
- prepared_limit = prepared_limit / 2;
71
- else
72
- rb_syserr_fail(-ret, strerror(-ret));
73
- }
74
- machine->ring_initialized = 1;
75
-
46
+ um_setup(machine);
76
47
  return self;
77
48
  }
78
49
 
@@ -126,7 +97,6 @@ VALUE UM_setup_buffer_ring(VALUE self, VALUE size, VALUE count) {
126
97
  return UINT2NUM(bg_id);
127
98
  }
128
99
 
129
-
130
100
  VALUE UM_pending_count(VALUE self) {
131
101
  struct um *machine = get_machine(self);
132
102
  return INT2FIX(machine->pending_count);
@@ -185,6 +155,27 @@ VALUE UM_read_each(VALUE self, VALUE fd, VALUE bgid) {
185
155
  return um_read_each(machine, NUM2INT(fd), NUM2INT(bgid));
186
156
  }
187
157
 
158
+ VALUE UM_write(int argc, VALUE *argv, VALUE self) {
159
+ struct um *machine = get_machine(self);
160
+ VALUE fd;
161
+ VALUE buffer;
162
+ VALUE len;
163
+ rb_scan_args(argc, argv, "21", &fd, &buffer, &len);
164
+
165
+ int bytes = NIL_P(len) ? RSTRING_LEN(buffer) : NUM2INT(len);
166
+ return um_write(machine, NUM2INT(fd), buffer, bytes);
167
+ }
168
+
169
+ VALUE UM_accept(VALUE self, VALUE fd) {
170
+ struct um *machine = get_machine(self);
171
+ return um_accept(machine, NUM2INT(fd));
172
+ }
173
+
174
+ VALUE UM_accept_each(VALUE self, VALUE fd) {
175
+ struct um *machine = get_machine(self);
176
+ return um_accept_each(machine, NUM2INT(fd));
177
+ }
178
+
188
179
  void Init_UM(void) {
189
180
  rb_ext_ractor_safe(true);
190
181
 
@@ -204,6 +195,10 @@ void Init_UM(void) {
204
195
  rb_define_method(cUM, "sleep", UM_sleep, 1);
205
196
  rb_define_method(cUM, "read", UM_read, -1);
206
197
  rb_define_method(cUM, "read_each", UM_read_each, 2);
198
+ rb_define_method(cUM, "write", UM_write, -1);
199
+
200
+ rb_define_method(cUM, "accept", UM_accept, 1);
201
+ rb_define_method(cUM, "accept_each", UM_accept_each, 1);
207
202
 
208
203
  // rb_define_method(cUM, "emit", UM_emit, 1);
209
204
 
data/ext/um/um_op.c CHANGED
@@ -1,17 +1,33 @@
1
1
  #include "um.h"
2
2
 
3
- inline void um_op_result_cleanup(struct um *machine, struct um_op *op) {
3
+ inline struct um_result_entry *um_result_checkout(struct um *machine) {
4
+ if (machine->result_freelist) {
5
+ struct um_result_entry *entry = machine->result_freelist;
6
+ machine->result_freelist = entry->next;
7
+ return entry;
8
+ }
9
+
10
+ struct um_result_entry *entry = malloc(sizeof(struct um_result_entry));
11
+ return entry;
12
+ }
13
+
14
+ inline void um_result_checkin(struct um *machine, struct um_result_entry *entry) {
15
+ entry->next = machine->result_freelist;
16
+ machine->result_freelist = entry;
17
+ }
18
+
19
+ inline void um_op_result_cleanup(struct um *machine, struct um_op *op) {
4
20
  struct um_result_entry *entry = op->results_head;
5
21
  while (entry) {
6
22
  struct um_result_entry *next = entry->next;
7
- free(entry);
23
+ um_result_checkin(machine, entry);
8
24
  entry = next;
9
25
  }
10
26
  op->results_head = op->results_tail = NULL;
11
27
  }
12
28
 
13
29
  inline void um_op_result_push(struct um *machine, struct um_op *op, int result, int flags) {
14
- struct um_result_entry *entry = malloc(sizeof(struct um_result_entry));
30
+ struct um_result_entry *entry = um_result_checkout(machine);
15
31
  entry->next = 0;
16
32
  entry->result = result;
17
33
  entry->flags = flags;
@@ -33,7 +49,7 @@ inline int um_op_result_shift(struct um *machine, struct um_op *op, int *result,
33
49
  op->results_head = entry->next;
34
50
  if (!op->results_head)
35
51
  op->results_tail = NULL;
36
- free(entry);
52
+ um_result_checkin(machine, entry);
37
53
  return 1;
38
54
  }
39
55
 
@@ -45,9 +61,9 @@ inline void um_op_clear(struct um_op *op) {
45
61
  inline struct um_op *um_op_checkout(struct um *machine) {
46
62
  machine->pending_count++;
47
63
 
48
- if (machine->freelist_head) {
49
- struct um_op *op = machine->freelist_head;
50
- machine->freelist_head = op->next;
64
+ if (machine->op_freelist) {
65
+ struct um_op *op = machine->op_freelist;
66
+ machine->op_freelist = op->next;
51
67
  um_op_clear(op);
52
68
  return op;
53
69
  }
@@ -62,8 +78,8 @@ inline void um_op_checkin(struct um *machine, struct um_op *op) {
62
78
 
63
79
  machine->pending_count--;
64
80
 
65
- op->next = machine->freelist_head;
66
- machine->freelist_head = op;
81
+ op->next = machine->op_freelist;
82
+ machine->op_freelist = op;
67
83
  }
68
84
 
69
85
  inline struct um_op *um_runqueue_find_by_fiber(struct um *machine, VALUE fiber) {
@@ -117,7 +133,7 @@ inline struct um_op *um_runqueue_shift(struct um *machine) {
117
133
  return op;
118
134
  }
119
135
 
120
- inline void um_free_linked_list(struct um *machine, struct um_op *op) {
136
+ inline void um_free_op_linked_list(struct um *machine, struct um_op *op) {
121
137
  while (op) {
122
138
  struct um_op *next = op->next;
123
139
  um_op_result_cleanup(machine, op);
@@ -125,3 +141,11 @@ inline void um_free_linked_list(struct um *machine, struct um_op *op) {
125
141
  op = next;
126
142
  }
127
143
  }
144
+
145
+ inline void um_free_result_linked_list(struct um *machine, struct um_result_entry *entry) {
146
+ while (entry) {
147
+ struct um_result_entry *next = entry->next;
148
+ free(entry);
149
+ entry = next;
150
+ }
151
+ }
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class UringMachine
4
- VERSION = '0.2'
4
+ VERSION = '0.3'
5
5
  end
data/test/helper.rb CHANGED
@@ -3,8 +3,12 @@
3
3
  require 'bundler/setup'
4
4
  require_relative './coverage' if ENV['COVERAGE']
5
5
  require 'uringmachine'
6
+ require 'socket'
6
7
  require 'minitest/autorun'
7
8
 
9
+ STDOUT.sync = true
10
+ STDERR.sync = true
11
+
8
12
  module ::Kernel
9
13
  def debug(**h)
10
14
  k, v = h.first
@@ -46,18 +50,6 @@ module Minitest::Assertions
46
50
  end
47
51
  end
48
52
 
49
- class IOURingBaseTest < Minitest::Test
50
- attr_accessor :ring
51
-
52
- def setup
53
- @ring = IOU::Ring.new
54
- end
55
-
56
- def teardown
57
- ring.close
58
- end
59
- end
60
-
61
53
  class UMBaseTest < Minitest::Test
62
54
  attr_accessor :machine
63
55
 
@@ -66,5 +58,6 @@ class UMBaseTest < Minitest::Test
66
58
  end
67
59
 
68
60
  def teardown
61
+ # @machine&.cleanup
69
62
  end
70
63
  end
data/test/test_um.rb CHANGED
@@ -110,7 +110,7 @@ class SchedulingTest < UMBaseTest
110
110
  def test_timeout_with_raising_block
111
111
  e = nil
112
112
  begin
113
- machine.timeout(0.01, TOError) do
113
+ machine.timeout(0.1, TOError) do
114
114
  raise 'hi'
115
115
  end
116
116
  rescue => e
@@ -125,7 +125,7 @@ class SchedulingTest < UMBaseTest
125
125
  end
126
126
 
127
127
  def test_timeout_with_nothing_blocking
128
- v = machine.timeout(0.01, TOError) { 42 }
128
+ v = machine.timeout(0.1, TOError) { 42 }
129
129
 
130
130
  assert_equal 42, v
131
131
 
@@ -189,7 +189,7 @@ class ReadTest < UMBaseTest
189
189
  assert_equal '', buf
190
190
  end
191
191
 
192
- def test_prep_read_bad_fd
192
+ def test_read_bad_fd
193
193
  _r, w = IO.pipe
194
194
 
195
195
  assert_raises(Errno::EBADF) do
@@ -334,3 +334,81 @@ class ReadEachTest < UMBaseTest
334
334
  assert_equal 0, machine.pending_count
335
335
  end
336
336
  end
337
+
338
+ class WriteTest < UMBaseTest
339
+ def test_write
340
+ r, w = IO.pipe
341
+
342
+ machine.write(w.fileno, 'foo')
343
+ assert_equal 'foo', r.readpartial(3)
344
+
345
+ machine.write(w.fileno, 'bar', 2)
346
+ assert_equal 'ba', r.readpartial(3)
347
+ end
348
+
349
+ def test_write_bad_fd
350
+ r, _w = IO.pipe
351
+
352
+ assert_raises(Errno::EBADF) do
353
+ machine.write(r.fileno, 'foo')
354
+ end
355
+ end
356
+ end
357
+
358
+ class AcceptTest < UMBaseTest
359
+ def setup
360
+ super
361
+ @port = 9000 + rand(1000)
362
+ @server = TCPServer.open('127.0.0.1', @port)
363
+ end
364
+
365
+ def teardown
366
+ @server&.close
367
+ super
368
+ end
369
+
370
+ def test_accept
371
+ conn = TCPSocket.new('127.0.0.1', @port)
372
+
373
+ fd = machine.accept(@server.fileno)
374
+ assert_kind_of Integer, fd
375
+ assert fd > 0
376
+
377
+ machine.write(fd, 'foo')
378
+ buf = conn.readpartial(3)
379
+
380
+ assert_equal 'foo', buf
381
+ end
382
+ end
383
+
384
+ class AcceptEachTest < UMBaseTest
385
+ def setup
386
+ super
387
+ @port = 9000 + rand(1000)
388
+ @server = TCPServer.open('127.0.0.1', @port)
389
+ end
390
+
391
+ def teardown
392
+ @server&.close
393
+ super
394
+ end
395
+
396
+ def test_accept_each
397
+ conns = 3.times.map { TCPSocket.new('127.0.0.1', @port) }
398
+
399
+ count = 0
400
+ machine.accept_each(@server.fileno) do |fd|
401
+ machine.write(fd, (count += 1).to_s)
402
+ break if count == 3
403
+ end
404
+
405
+ assert_equal 3, count
406
+ assert_equal 1, machine.pending_count
407
+ machine.snooze
408
+ assert_equal 0, machine.pending_count
409
+
410
+ assert_equal '1', conns[0].readpartial(3)
411
+ assert_equal '2', conns[1].readpartial(3)
412
+ assert_equal '3', conns[2].readpartial(3)
413
+ end
414
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: uringmachine
3
3
  version: !ruby/object:Gem::Version
4
- version: '0.2'
4
+ version: '0.3'
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-10-03 00:00:00.000000000 Z
11
+ date: 2024-10-04 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake-compiler