io-event 1.6.6 → 1.6.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 853f198e7ea15ab8efd9f3c864a224af60e1c45342a226ce2768eb554b1a21bb
4
- data.tar.gz: 8fd1da4d954a4c78072f78499b322d87fd354e22786b3858f909c91346e5f92e
3
+ metadata.gz: 53bb3bfd2bffe7db8d43a4dd2d136a2ec9b62df30b78f90894b816a33a855765
4
+ data.tar.gz: 151b806b36724b5f210e28a59949ca4c4550a79a15e29174b6c7bdf9e5e4ba7d
5
5
  SHA512:
6
- metadata.gz: 25ea52c93eabc794db7abb8a91eb8b7311dd81eaf0bc725d67a3a553e8591df108b71a49ca3947aa3ed4a69d5e7f0247e1ffd24e0a93c54fbd2f90fc09688c13
7
- data.tar.gz: 69937d7849c3c3ea9fd1b657dc5184f1921a44d7f51d8184ded13112ab57a5511bebbf2e40c5acd9b06e393c8e43041a3f0feedb2c4eb70230f330a280ffe125
6
+ metadata.gz: ef219c25c4acc91a0fb56bfb3a36275b131d71e594682c0f6d383e0d66657ecd286b711bf90323e661d83be0961fa0baaf4368c0e3c2309b0e3376f9c006fefc
7
+ data.tar.gz: 56112593370c044238ee9198ef1ee608e0be69507a37dd380c9a54d03948074b1ec89e692308f4b6e58096730398be1fd2a3efee4c9e055977e543058716aaec
checksums.yaml.gz.sig CHANGED
Binary file
data/ext/extconf.rb CHANGED
@@ -17,11 +17,9 @@ extension_name = "IO_Event"
17
17
  $CFLAGS << " -Wall -Wno-unknown-pragmas -std=c99"
18
18
 
19
19
  if ENV.key?("RUBY_DEBUG")
20
- $CFLAGS << " -DRUBY_DEBUG -O0"
20
+ $stderr.puts "Enabling debug mode..."
21
21
 
22
- # Add address and undefined behaviour sanitizers:
23
- $CFLAGS << " -fsanitize=undefined -fno-omit-frame-pointer"
24
- $LDFLAGS << " -fsanitize=undefined"
22
+ $CFLAGS << " -DRUBY_DEBUG -O0"
25
23
  end
26
24
 
27
25
  $srcs = ["io/event/event.c", "io/event/selector/selector.c"]
@@ -56,6 +54,14 @@ have_func("epoll_pwait2")
56
54
 
57
55
  have_header("ruby/io/buffer.h")
58
56
 
57
+ if ENV.key?("RUBY_SANITIZE")
58
+ $stderr.puts "Enabling sanitizers..."
59
+
60
+ # Add address and undefined behaviour sanitizers:
61
+ $CFLAGS << " -fsanitize=address -fsanitize=undefined -fno-omit-frame-pointer"
62
+ $LDFLAGS << " -fsanitize=address -fsanitize=undefined"
63
+ end
64
+
59
65
  create_header
60
66
 
61
67
  # Generate the makefile to compile the native binary into `lib`:
@@ -29,6 +29,8 @@ inline static void IO_Event_Array_allocate(struct IO_Event_Array *array, size_t
29
29
  {
30
30
  if (count) {
31
31
  array->base = (void**)calloc(count, sizeof(void*));
32
+ assert(array->base);
33
+
32
34
  array->count = count;
33
35
  } else {
34
36
  array->base = NULL;
@@ -51,6 +53,7 @@ inline static void IO_Event_Array_free(struct IO_Event_Array *array)
51
53
  void *element = array->base[i];
52
54
  if (element) {
53
55
  array->element_free(element);
56
+
54
57
  free(element);
55
58
  }
56
59
  }
@@ -107,6 +110,7 @@ inline static void* IO_Event_Array_lookup(struct IO_Event_Array *array, size_t i
107
110
  // Allocate the element if it doesn't exist:
108
111
  if (*element == NULL) {
109
112
  *element = malloc(array->element_size);
113
+ assert(*element);
110
114
 
111
115
  if (array->element_initialize) {
112
116
  array->element_initialize(*element);
@@ -184,7 +184,7 @@ static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
184
184
  .dsize = IO_Event_Selector_EPoll_Type_size,
185
185
  },
186
186
  .data = NULL,
187
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
187
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
188
188
  };
189
189
 
190
190
  inline static
@@ -333,7 +333,7 @@ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
333
333
  struct IO_Event_Selector_EPoll *selector = NULL;
334
334
  VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
335
335
 
336
- IO_Event_Selector_initialize(&selector->backend, Qnil);
336
+ IO_Event_Selector_initialize(&selector->backend, self, Qnil);
337
337
  selector->descriptor = -1;
338
338
  selector->blocked = 0;
339
339
 
@@ -363,7 +363,7 @@ VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
363
363
  struct IO_Event_Selector_EPoll *selector = NULL;
364
364
  TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
365
365
 
366
- IO_Event_Selector_initialize(&selector->backend, loop);
366
+ IO_Event_Selector_initialize(&selector->backend, self, loop);
367
367
  int result = epoll_create1(EPOLL_CLOEXEC);
368
368
 
369
369
  if (result == -1) {
@@ -183,7 +183,7 @@ static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
183
183
  .dsize = IO_Event_Selector_KQueue_Type_size,
184
184
  },
185
185
  .data = NULL,
186
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
186
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
187
187
  };
188
188
 
189
189
  inline static
@@ -307,7 +307,7 @@ VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
307
307
  struct IO_Event_Selector_KQueue *selector = NULL;
308
308
  VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
309
309
 
310
- IO_Event_Selector_initialize(&selector->backend, Qnil);
310
+ IO_Event_Selector_initialize(&selector->backend, self, Qnil);
311
311
  selector->descriptor = -1;
312
312
  selector->blocked = 0;
313
313
 
@@ -340,7 +340,7 @@ VALUE IO_Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
340
340
  struct IO_Event_Selector_KQueue *selector = NULL;
341
341
  TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
342
342
 
343
- IO_Event_Selector_initialize(&selector->backend, loop);
343
+ IO_Event_Selector_initialize(&selector->backend, self, loop);
344
344
  int result = kqueue();
345
345
 
346
346
  if (result == -1) {
@@ -172,12 +172,14 @@ static void queue_pop(struct IO_Event_Selector *backend, struct IO_Event_Selecto
172
172
  if (waiting->head) {
173
173
  waiting->head->tail = waiting->tail;
174
174
  } else {
175
+ // We must have been at the head of the queue:
175
176
  backend->waiting = waiting->tail;
176
177
  }
177
178
 
178
179
  if (waiting->tail) {
179
180
  waiting->tail->head = waiting->head;
180
181
  } else {
182
+ // We must have been at the tail of the queue:
181
183
  backend->ready = waiting->head;
182
184
  }
183
185
 
@@ -190,12 +192,15 @@ static void queue_push(struct IO_Event_Selector *backend, struct IO_Event_Select
190
192
  assert(waiting->tail == NULL);
191
193
 
192
194
  if (backend->waiting) {
195
+ // If there was an item in the queue already, we shift it along:
193
196
  backend->waiting->head = waiting;
194
197
  waiting->tail = backend->waiting;
195
198
  } else {
199
+ // If the queue was empty, we update the tail too:
196
200
  backend->ready = waiting;
197
201
  }
198
202
 
203
+ // We always push to the front/head:
199
204
  backend->waiting = waiting;
200
205
  }
201
206
 
@@ -276,11 +281,14 @@ VALUE IO_Event_Selector_raise(struct IO_Event_Selector *backend, int argc, VALUE
276
281
  void IO_Event_Selector_queue_push(struct IO_Event_Selector *backend, VALUE fiber)
277
282
  {
278
283
  struct IO_Event_Selector_Queue *waiting = malloc(sizeof(struct IO_Event_Selector_Queue));
284
+ assert(waiting);
279
285
 
280
286
  waiting->head = NULL;
281
287
  waiting->tail = NULL;
282
288
  waiting->flags = IO_EVENT_SELECTOR_QUEUE_INTERNAL;
289
+
283
290
  waiting->fiber = fiber;
291
+ RB_OBJ_WRITTEN(backend->self, Qundef, fiber);
284
292
 
285
293
  queue_push(backend, waiting);
286
294
  }
@@ -292,7 +300,7 @@ void IO_Event_Selector_queue_pop(struct IO_Event_Selector *backend, struct IO_Ev
292
300
 
293
301
  if (ready->flags & IO_EVENT_SELECTOR_QUEUE_FIBER) {
294
302
  IO_Event_Selector_fiber_transfer(ready->fiber, 0, NULL);
295
- } else {
303
+ } else if (ready->flags & IO_EVENT_SELECTOR_QUEUE_INTERNAL) {
296
304
  VALUE fiber = ready->fiber;
297
305
  queue_pop(backend, ready);
298
306
  free(ready);
@@ -300,6 +308,8 @@ void IO_Event_Selector_queue_pop(struct IO_Event_Selector *backend, struct IO_Ev
300
308
  if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
301
309
  rb_funcall(fiber, id_transfer, 0);
302
310
  }
311
+ } else {
312
+ rb_raise(rb_eRuntimeError, "Unknown queue type!");
303
313
  }
304
314
  }
305
315
 
@@ -307,6 +317,8 @@ int IO_Event_Selector_queue_flush(struct IO_Event_Selector *backend)
307
317
  {
308
318
  int count = 0;
309
319
 
320
+ // During iteration of the queue, the same item may be re-queued. If we don't handle this correctly, we may end up in an infinite loop. So, to avoid this situation, we keep note of the current head of the queue and break the loop if we reach the same item again.
321
+
310
322
  // Get the current tail and head of the queue:
311
323
  struct IO_Event_Selector_Queue *waiting = backend->waiting;
312
324
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_queue_flush waiting = %p\n", waiting);
@@ -95,27 +95,31 @@ struct IO_Event_Selector_Queue {
95
95
  };
96
96
 
97
97
  struct IO_Event_Selector {
98
+ VALUE self;
98
99
  VALUE loop;
99
100
 
100
101
  struct IO_Event_Selector_Queue *free;
101
102
 
102
- // Append to waiting.
103
+ // Append to waiting (front/head of queue).
103
104
  struct IO_Event_Selector_Queue *waiting;
104
- // Process from ready.
105
+ // Process from ready (back/tail of queue).
105
106
  struct IO_Event_Selector_Queue *ready;
106
107
  };
107
108
 
108
109
  static inline
109
- void IO_Event_Selector_initialize(struct IO_Event_Selector *backend, VALUE loop) {
110
- backend->loop = loop;
110
+ void IO_Event_Selector_initialize(struct IO_Event_Selector *backend, VALUE self, VALUE loop) {
111
+ RB_OBJ_WRITE(self, &backend->self, self);
112
+ RB_OBJ_WRITE(self, &backend->loop, loop);
111
113
  backend->waiting = NULL;
112
114
  backend->ready = NULL;
113
115
  }
114
116
 
115
117
  static inline
116
118
  void IO_Event_Selector_mark(struct IO_Event_Selector *backend) {
119
+ rb_gc_mark_movable(backend->self);
117
120
  rb_gc_mark_movable(backend->loop);
118
121
 
122
+ // Walk backwards through the ready queue:
119
123
  struct IO_Event_Selector_Queue *ready = backend->ready;
120
124
  while (ready) {
121
125
  rb_gc_mark_movable(ready->fiber);
@@ -125,6 +129,7 @@ void IO_Event_Selector_mark(struct IO_Event_Selector *backend) {
125
129
 
126
130
  static inline
127
131
  void IO_Event_Selector_compact(struct IO_Event_Selector *backend) {
132
+ backend->self = rb_gc_location(backend->self);
128
133
  backend->loop = rb_gc_location(backend->loop);
129
134
 
130
135
  struct IO_Event_Selector_Queue *ready = backend->ready;
@@ -152,7 +152,7 @@ static const rb_data_type_t IO_Event_Selector_URing_Type = {
152
152
  .dsize = IO_Event_Selector_URing_Type_size,
153
153
  },
154
154
  .data = NULL,
155
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
155
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
156
156
  };
157
157
 
158
158
  inline static
@@ -228,7 +228,7 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
228
228
  struct IO_Event_Selector_URing *selector = NULL;
229
229
  VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
230
230
 
231
- IO_Event_Selector_initialize(&selector->backend, Qnil);
231
+ IO_Event_Selector_initialize(&selector->backend, self, Qnil);
232
232
  selector->ring.ring_fd = -1;
233
233
 
234
234
  selector->pending = 0;
@@ -249,7 +249,7 @@ VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
249
249
  struct IO_Event_Selector_URing *selector = NULL;
250
250
  TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
251
251
 
252
- IO_Event_Selector_initialize(&selector->backend, loop);
252
+ IO_Event_Selector_initialize(&selector->backend, self, loop);
253
253
  int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, 0);
254
254
 
255
255
  if (result < 0) {
@@ -5,6 +5,6 @@
5
5
 
6
6
  class IO
7
7
  module Event
8
- VERSION = "1.6.6"
8
+ VERSION = "1.6.7"
9
9
  end
10
10
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: io-event
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.6.6
4
+ version: 1.6.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
metadata.gz.sig CHANGED
Binary file