io-event 1.6.7 → 1.7.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/io/event/event.c +2 -8
- data/ext/io/event/interrupt.c +7 -0
- data/ext/io/event/selector/epoll.c +3 -6
- data/ext/io/event/selector/kqueue.c +1 -4
- data/ext/io/event/selector/list.h +19 -1
- data/ext/io/event/selector/selector.c +1 -2
- data/ext/io/event/selector/selector.h +0 -2
- data/ext/io/event/selector/uring.c +2 -4
- data/lib/io/event/version.rb +1 -1
- data/license.md +2 -0
- data.tar.gz.sig +0 -0
- metadata +3 -3
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b7b49a9181d8917dd2dba7620f964ad0dced4d33ab9a5d725f5493debd927ea6
|
4
|
+
data.tar.gz: ce62d1d7c3488ccb39620a66b3c25a12537a586fc0e6b9b9f3263ad662abbcb0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6c0d875c4ec99d9670d16980ea6b6f92073547b6da089cccab1dc96af6b40f0034a3f3ce6151123acddedb48b808b27782bce638ec93884823a07951fb381f57
|
7
|
+
data.tar.gz: fc818ad30b6087149020f7b9fc718c9219b54d19dbfef69c468605c4154d58e0a75d802d1b646829d2a48c8a24d4b4bcdae15a5cea21aa2826d051eb1975f585
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/ext/io/event/event.c
CHANGED
@@ -21,20 +21,14 @@
|
|
21
21
|
#include "event.h"
|
22
22
|
#include "selector/selector.h"
|
23
23
|
|
24
|
-
VALUE IO_Event = Qnil;
|
25
|
-
VALUE IO_Event_Selector = Qnil;
|
26
|
-
|
27
24
|
void Init_IO_Event(void)
|
28
25
|
{
|
29
26
|
#ifdef HAVE_RB_EXT_RACTOR_SAFE
|
30
27
|
rb_ext_ractor_safe(true);
|
31
28
|
#endif
|
32
29
|
|
33
|
-
IO_Event = rb_define_module_under(rb_cIO, "Event");
|
34
|
-
|
35
|
-
|
36
|
-
IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
|
37
|
-
rb_gc_register_mark_object(IO_Event_Selector);
|
30
|
+
VALUE IO_Event = rb_define_module_under(rb_cIO, "Event");
|
31
|
+
VALUE IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
|
38
32
|
|
39
33
|
Init_IO_Event_Selector(IO_Event_Selector);
|
40
34
|
|
data/ext/io/event/interrupt.c
CHANGED
@@ -25,6 +25,13 @@
|
|
25
25
|
|
26
26
|
#include "selector/selector.h"
|
27
27
|
|
28
|
+
#ifdef HAVE_RUBY_WIN32_H
|
29
|
+
#include <ruby/win32.h>
|
30
|
+
#if !defined(HAVE_PIPE) && !defined(pipe)
|
31
|
+
#define pipe(p) rb_w32_pipe(p)
|
32
|
+
#endif
|
33
|
+
#endif
|
34
|
+
|
28
35
|
#ifdef HAVE_SYS_EVENTFD_H
|
29
36
|
#include <sys/eventfd.h>
|
30
37
|
|
@@ -34,8 +34,6 @@ enum {
|
|
34
34
|
DEBUG = 0,
|
35
35
|
};
|
36
36
|
|
37
|
-
static VALUE IO_Event_Selector_EPoll = Qnil;
|
38
|
-
|
39
37
|
enum {EPOLL_MAX_EVENTS = 64};
|
40
38
|
|
41
39
|
// This represents an actual fiber waiting for a specific event.
|
@@ -243,7 +241,7 @@ int IO_Event_Selector_EPoll_Descriptor_update(struct IO_Event_Selector_EPoll *se
|
|
243
241
|
} else {
|
244
242
|
// The IO has changed, we need to reset the state:
|
245
243
|
epoll_descriptor->registered_events = 0;
|
246
|
-
epoll_descriptor->io
|
244
|
+
RB_OBJ_WRITE(selector->backend.self, &epoll_descriptor->io, io);
|
247
245
|
}
|
248
246
|
|
249
247
|
if (epoll_descriptor->waiting_events == 0) {
|
@@ -253,7 +251,7 @@ int IO_Event_Selector_EPoll_Descriptor_update(struct IO_Event_Selector_EPoll *se
|
|
253
251
|
epoll_descriptor->registered_events = 0;
|
254
252
|
}
|
255
253
|
|
256
|
-
epoll_descriptor->io
|
254
|
+
RB_OBJ_WRITE(selector->backend.self, &epoll_descriptor->io, 0);
|
257
255
|
|
258
256
|
return 0;
|
259
257
|
}
|
@@ -1037,8 +1035,7 @@ VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
|
|
1037
1035
|
}
|
1038
1036
|
|
1039
1037
|
void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector) {
|
1040
|
-
IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
|
1041
|
-
rb_gc_register_mark_object(IO_Event_Selector_EPoll);
|
1038
|
+
VALUE IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
|
1042
1039
|
|
1043
1040
|
rb_define_alloc_func(IO_Event_Selector_EPoll, IO_Event_Selector_EPoll_allocate);
|
1044
1041
|
rb_define_method(IO_Event_Selector_EPoll, "initialize", IO_Event_Selector_EPoll_initialize, 1);
|
@@ -43,8 +43,6 @@ enum {
|
|
43
43
|
#define IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
|
44
44
|
#endif
|
45
45
|
|
46
|
-
static VALUE IO_Event_Selector_KQueue = Qnil;
|
47
|
-
|
48
46
|
enum {KQUEUE_MAX_EVENTS = 64};
|
49
47
|
|
50
48
|
// This represents an actual fiber waiting for a specific event.
|
@@ -1052,8 +1050,7 @@ VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
|
|
1052
1050
|
}
|
1053
1051
|
|
1054
1052
|
void Init_IO_Event_Selector_KQueue(VALUE IO_Event_Selector) {
|
1055
|
-
IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
|
1056
|
-
rb_gc_register_mark_object(IO_Event_Selector_KQueue);
|
1053
|
+
VALUE IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
|
1057
1054
|
|
1058
1055
|
rb_define_alloc_func(IO_Event_Selector_KQueue, IO_Event_Selector_KQueue_allocate);
|
1059
1056
|
rb_define_method(IO_Event_Selector_KQueue, "initialize", IO_Event_Selector_KQueue_initialize, 1);
|
@@ -38,6 +38,7 @@ inline static void IO_Event_List_append(struct IO_Event_List *list, struct IO_Ev
|
|
38
38
|
head->tail = node;
|
39
39
|
}
|
40
40
|
|
41
|
+
// Prepend an item to the beginning of the list.
|
41
42
|
inline static void IO_Event_List_prepend(struct IO_Event_List *list, struct IO_Event_List *node)
|
42
43
|
{
|
43
44
|
assert(node->head == NULL);
|
@@ -64,6 +65,7 @@ inline static void IO_Event_List_pop(struct IO_Event_List *node)
|
|
64
65
|
node->head = node->tail = NULL;
|
65
66
|
}
|
66
67
|
|
68
|
+
// Remove an item from the list, if it is in a list.
|
67
69
|
inline static void IO_Event_List_free(struct IO_Event_List *node)
|
68
70
|
{
|
69
71
|
if (node->head && node->tail) {
|
@@ -71,11 +73,27 @@ inline static void IO_Event_List_free(struct IO_Event_List *node)
|
|
71
73
|
}
|
72
74
|
}
|
73
75
|
|
74
|
-
|
76
|
+
// Calculate the memory size of the list nodes.
|
77
|
+
inline static size_t IO_Event_List_memory_size(const struct IO_Event_List *list)
|
78
|
+
{
|
79
|
+
size_t memsize = 0;
|
80
|
+
|
81
|
+
const struct IO_Event_List *node = list->tail;
|
82
|
+
while (node != list) {
|
83
|
+
memsize += sizeof(struct IO_Event_List);
|
84
|
+
node = node->tail;
|
85
|
+
}
|
86
|
+
|
87
|
+
return memsize;
|
88
|
+
}
|
89
|
+
|
90
|
+
// Return true if the list is empty.
|
91
|
+
inline static int IO_Event_List_empty(const struct IO_Event_List *list)
|
75
92
|
{
|
76
93
|
return list->head == list->tail;
|
77
94
|
}
|
78
95
|
|
96
|
+
// Enumerate all items in the list, assuming the list will not be modified during iteration.
|
79
97
|
inline static void IO_Event_List_immutable_each(struct IO_Event_List *list, void (*callback)(struct IO_Event_List *node))
|
80
98
|
{
|
81
99
|
struct IO_Event_List *node = list->tail;
|
@@ -287,8 +287,7 @@ void IO_Event_Selector_queue_push(struct IO_Event_Selector *backend, VALUE fiber
|
|
287
287
|
waiting->tail = NULL;
|
288
288
|
waiting->flags = IO_EVENT_SELECTOR_QUEUE_INTERNAL;
|
289
289
|
|
290
|
-
waiting->fiber
|
291
|
-
RB_OBJ_WRITTEN(backend->self, Qundef, fiber);
|
290
|
+
RB_OBJ_WRITE(backend->self, &waiting->fiber, fiber);
|
292
291
|
|
293
292
|
queue_push(backend, waiting);
|
294
293
|
}
|
@@ -37,8 +37,6 @@ enum {
|
|
37
37
|
DEBUG_COMPLETION = 0,
|
38
38
|
};
|
39
39
|
|
40
|
-
static VALUE IO_Event_Selector_URing = Qnil;
|
41
|
-
|
42
40
|
enum {URING_ENTRIES = 64};
|
43
41
|
|
44
42
|
#pragma mark - Data Type
|
@@ -140,6 +138,7 @@ size_t IO_Event_Selector_URing_Type_size(const void *_selector)
|
|
140
138
|
|
141
139
|
return sizeof(struct IO_Event_Selector_URing)
|
142
140
|
+ IO_Event_Array_memory_size(&selector->completions)
|
141
|
+
+ IO_Event_List_memory_size(&selector->free_list)
|
143
142
|
;
|
144
143
|
}
|
145
144
|
|
@@ -1094,8 +1093,7 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
|
|
1094
1093
|
#pragma mark - Native Methods
|
1095
1094
|
|
1096
1095
|
void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
|
1097
|
-
IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
|
1098
|
-
rb_gc_register_mark_object(IO_Event_Selector_URing);
|
1096
|
+
VALUE IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
|
1099
1097
|
|
1100
1098
|
rb_define_alloc_func(IO_Event_Selector_URing, IO_Event_Selector_URing_allocate);
|
1101
1099
|
rb_define_method(IO_Event_Selector_URing, "initialize", IO_Event_Selector_URing_initialize, 1);
|
data/lib/io/event/version.rb
CHANGED
data/license.md
CHANGED
@@ -9,6 +9,8 @@ Copyright, 2022, by Bruno Sutic.
|
|
9
9
|
Copyright, 2023, by Math Ieu.
|
10
10
|
Copyright, 2024, by Pavel Rosický.
|
11
11
|
Copyright, 2024, by Anthony Ross.
|
12
|
+
Copyright, 2024, by Shizuo Fujita.
|
13
|
+
Copyright, 2024, by Jean Boussier.
|
12
14
|
|
13
15
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
14
16
|
of this software and associated documentation files (the "Software"), to deal
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: io-event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.7.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -45,7 +45,7 @@ cert_chain:
|
|
45
45
|
Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
|
46
46
|
voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
|
47
47
|
-----END CERTIFICATE-----
|
48
|
-
date: 2024-10-
|
48
|
+
date: 2024-10-04 00:00:00.000000000 Z
|
49
49
|
dependencies: []
|
50
50
|
description:
|
51
51
|
email:
|
@@ -104,7 +104,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
104
104
|
- !ruby/object:Gem::Version
|
105
105
|
version: '0'
|
106
106
|
requirements: []
|
107
|
-
rubygems_version: 3.
|
107
|
+
rubygems_version: 3.5.11
|
108
108
|
signing_key:
|
109
109
|
specification_version: 4
|
110
110
|
summary: An event loop.
|
metadata.gz.sig
CHANGED
Binary file
|