io-event 1.9.0 → 1.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7265f0fb5702ae05b5d928a8efb9e5a5e877f62afc5c93d23fec26f8a3dd1437
4
- data.tar.gz: 8ba9b4b0d4e95be1401b8952cccac1a9bcc93c7a0a40e843aea60f0015c791ad
3
+ metadata.gz: ec418d3289f8648ac13d7808bbe8d56b50a3b4cf518f61da116c56c3c345daa9
4
+ data.tar.gz: 8de6423981f2bfb2da54e0cd99e493787253a2c2c83bc644f72fffdcf52a295c
5
5
  SHA512:
6
- metadata.gz: b4346972aa2dbe00b560e2a1e2cce32a50e9acafaa0c9253f12791c183f35bea8e3cffba93eb93147b4d975ef05edeffef84e0ed8bf29934fb39c70ea1320c6f
7
- data.tar.gz: 41b6b53ca30bd0934c702c1ff0899e0e8a6bd3bc97a95f455b0d6cf0be28ff690b69575c44d92a784226edea27958cd43c486e9d907989859e298d602536078b
6
+ metadata.gz: 9fd70cf075b1703fd8e4659fa2adaf7381f7d5523e47e8700eeda233a01679c671ebb8197fd06f525da17b7a30332e996dd087874503f8ff7a594b8f88268554
7
+ data.tar.gz: c42e23c74e1069fb199fd1044b7fc487b8f1587294703a19362242bbbfa23c21fbba36a6a114fafb978ddc4cfcd2a21d2a6c7b7c3d3dc1c4a08094ca9b51bd2a
checksums.yaml.gz.sig CHANGED
Binary file
data/ext/extconf.rb CHANGED
@@ -22,7 +22,7 @@ if ENV.key?("RUBY_DEBUG")
22
22
  $CFLAGS << " -DRUBY_DEBUG -O0"
23
23
  end
24
24
 
25
- $srcs = ["io/event/event.c", "io/event/time.c", "io/event/fiber.c", "io/event/profiler.c", "io/event/selector/selector.c"]
25
+ $srcs = ["io/event/event.c", "io/event/time.c", "io/event/fiber.c", "io/event/selector/selector.c"]
26
26
  $VPATH << "$(srcdir)/io/event"
27
27
  $VPATH << "$(srcdir)/io/event/selector"
28
28
 
data/ext/io/event/event.c CHANGED
@@ -3,9 +3,7 @@
3
3
 
4
4
  #include "event.h"
5
5
  #include "fiber.h"
6
- #include "profiler.h"
7
6
  #include "selector/selector.h"
8
- #include <complex.h>
9
7
 
10
8
  void Init_IO_Event(void)
11
9
  {
@@ -16,7 +14,6 @@ void Init_IO_Event(void)
16
14
  VALUE IO_Event = rb_define_module_under(rb_cIO, "Event");
17
15
 
18
16
  Init_IO_Event_Fiber(IO_Event);
19
- Init_IO_Event_Profiler(IO_Event);
20
17
 
21
18
  VALUE IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
22
19
  Init_IO_Event_Selector(IO_Event_Selector);
@@ -1035,7 +1035,25 @@ VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
1035
1035
  return Qfalse;
1036
1036
  }
1037
1037
 
1038
+ static int IO_Event_Selector_EPoll_supported_p(void) {
1039
+ int fd = epoll_create1(EPOLL_CLOEXEC);
1040
+
1041
+ if (fd < 0) {
1042
+ rb_warn("epoll_create1() was available at compile time but failed at run time: %s\n", strerror(errno));
1043
+
1044
+ return 0;
1045
+ }
1046
+
1047
+ close(fd);
1048
+
1049
+ return 1;
1050
+ }
1051
+
1038
1052
  void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector) {
1053
+ if (!IO_Event_Selector_EPoll_supported_p()) {
1054
+ return;
1055
+ }
1056
+
1039
1057
  VALUE IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
1040
1058
 
1041
1059
  rb_define_alloc_func(IO_Event_Selector_EPoll, IO_Event_Selector_EPoll_allocate);
@@ -1045,7 +1045,26 @@ VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
1045
1045
  return Qfalse;
1046
1046
  }
1047
1047
 
1048
+
1049
+ static int IO_Event_Selector_KQueue_supported_p(void) {
1050
+ int fd = kqueue();
1051
+
1052
+ if (fd < 0) {
1053
+ rb_warn("kqueue() was available at compile time but failed at run time: %s\n", strerror(errno));
1054
+
1055
+ return 0;
1056
+ }
1057
+
1058
+ close(fd);
1059
+
1060
+ return 1;
1061
+ }
1062
+
1048
1063
  void Init_IO_Event_Selector_KQueue(VALUE IO_Event_Selector) {
1064
+ if (!IO_Event_Selector_KQueue_supported_p()) {
1065
+ return;
1066
+ }
1067
+
1049
1068
  VALUE IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
1050
1069
 
1051
1070
  rb_define_alloc_func(IO_Event_Selector_KQueue, IO_Event_Selector_KQueue_allocate);
@@ -1175,7 +1175,26 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
1175
1175
 
1176
1176
  #pragma mark - Native Methods
1177
1177
 
1178
+ static int IO_Event_Selector_URing_supported_p(void) {
1179
+ struct io_uring ring;
1180
+ int result = io_uring_queue_init(32, &ring, 0);
1181
+
1182
+ if (result < 0) {
1183
+ rb_warn("io_uring_queue_init() was available at compile time but failed at run time: %s\n", strerror(-result));
1184
+
1185
+ return 0;
1186
+ }
1187
+
1188
+ io_uring_queue_exit(&ring);
1189
+
1190
+ return 1;
1191
+ }
1192
+
1178
1193
  void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
1194
+ if (!IO_Event_Selector_URing_supported_p()) {
1195
+ return;
1196
+ }
1197
+
1179
1198
  VALUE IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
1180
1199
 
1181
1200
  rb_define_alloc_func(IO_Event_Selector_URing, IO_Event_Selector_URing_allocate);
@@ -7,6 +7,6 @@
7
7
  class IO
8
8
  # @namespace
9
9
  module Event
10
- VERSION = "1.9.0"
10
+ VERSION = "1.10.0"
11
11
  end
12
12
  end
data/readme.md CHANGED
@@ -1,4 +1,4 @@
1
- # ![Event](logo.svg)
1
+ # ![IO::Event](logo.svg)
2
2
 
3
3
  Provides low level cross-platform primitives for constructing event loops, with support for `select`, `kqueue`, `epoll` and `io_uring`.
4
4
 
@@ -18,13 +18,18 @@ Please see the [project documentation](https://socketry.github.io/io-event/) for
18
18
 
19
19
  Please see the [project releases](https://socketry.github.io/io-event/releases/index) for all releases.
20
20
 
21
+ ### v1.10.0
22
+
23
+ - `IO::Event::Profiler` is moved to dedicated gem: [fiber-profiler](https://github.com/socketry/fiber-profiler).
24
+ - Perform runtime checks for native selectors to ensure they are supported in the current environment. While compile-time checks determine availability, restrictions like seccomp and SELinux may still prevent them from working.
25
+
21
26
  ### v1.9.0
22
27
 
23
- - [Improved `IO::Event::Profiler` for detecting stalls.](https://socketry.github.io/io-event/releases/index#improved-io::event::profiler-for-detecting-stalls.)
28
+ - Improved `IO::Event::Profiler` for detecting stalls.
24
29
 
25
30
  ### v1.8.0
26
31
 
27
- - [Detecting fibers that are stalling the event loop.](https://socketry.github.io/io-event/releases/index#detecting-fibers-that-are-stalling-the-event-loop.)
32
+ - Detecting fibers that are stalling the event loop.
28
33
 
29
34
  ### v1.7.5
30
35
 
data/releases.md CHANGED
@@ -1,56 +1,17 @@
1
1
  # Releases
2
2
 
3
- ## v1.9.0
4
-
5
- ### Improved `IO::Event::Profiler` for detecting stalls.
6
-
7
- A new `IO::Event::Profiler` class has been added to help detect stalls in the event loop. The previous approach was insufficient to detect all possible stalls. This new approach uses the `RUBY_EVENT_FIBER_SWITCH` event to track context switching by the scheduler, and can detect stalls no matter how they occur.
8
-
9
- ``` ruby
10
- profiler = IO::Event::Profiler.new
3
+ ## v1.10.0
11
4
 
12
- profiler.start
13
-
14
- Fiber.new do
15
- sleep 1.0
16
- end.transfer
5
+ - `IO::Event::Profiler` is moved to dedicated gem: [fiber-profiler](https://github.com/socketry/fiber-profiler).
6
+ - Perform runtime checks for native selectors to ensure they are supported in the current environment. While compile-time checks determine availability, restrictions like seccomp and SELinux may still prevent them from working.
17
7
 
18
- profiler.stop
19
- ```
20
-
21
- A default profiler is exposed using `IO::Event::Profiler.default` which is controlled by the following environment variables:
22
-
23
- - `IO_EVENT_PROFILER=true` - Enable the profiler, otherwise `IO::Event::Profiler.default` will return `nil`.
24
- - `IO_EVENT_PROFILER_LOG_THRESHOLD` - Specify the threshold in seconds for logging a stall. Defaults to `0.01`.
25
- - `IO_EVENT_PROFILER_TRACK_CALLS` - Track the method call for each event, in order to log specifically which method is causing the stall. Defaults to `true`.
8
+ ## v1.9.0
26
9
 
27
- The previous environment variables `IO_EVENT_SELECTOR_STALL_LOG_THRESHOLD` and `IO_EVENT_SELECTOR_STALL_LOG` no longer have any effect.
10
+ - Improved `IO::Event::Profiler` for detecting stalls.
28
11
 
29
12
  ## v1.8.0
30
13
 
31
- ### Detecting fibers that are stalling the event loop.
32
-
33
- A new (experimental) feature for detecting fiber stalls has been added. This feature is disabled by default and can be enabled by setting the `IO_EVENT_SELECTOR_STALL_LOG_THRESHOLD` to `true` or a floating point number representing the threshold in seconds.
34
-
35
- When enabled, the event loop will measure and profile user code when resuming a fiber. If the fiber takes too long to return back to the event loop, the event loop will log a warning message with a profile of the fiber's execution.
36
-
37
- > cat test.rb
38
- #!/usr/bin/env ruby
39
-
40
- require_relative "lib/async"
41
-
42
- Async do
43
- Fiber.blocking do
44
- sleep 1
45
- end
46
- end
47
-
48
- > IO_EVENT_SELECTOR_STALL_LOG_THRESHOLD=true bundle exec ./test.rb
49
- Fiber stalled for 1.003 seconds
50
- /home/samuel/Developer/socketry/async/test.rb:6 in '#<Class:Fiber>#blocking' (1s)
51
- /home/samuel/Developer/socketry/async/test.rb:7 in 'Kernel#sleep' (1s)
52
-
53
- There is a performance overhead to this feature, so it is recommended to only enable it when debugging performance issues.
14
+ - Detecting fibers that are stalling the event loop.
54
15
 
55
16
  ## v1.7.5
56
17
 
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: io-event
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.9.0
4
+ version: 1.10.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
@@ -46,7 +46,7 @@ cert_chain:
46
46
  Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
47
47
  voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
48
48
  -----END CERTIFICATE-----
49
- date: 2025-02-10 00:00:00.000000000 Z
49
+ date: 2025-03-12 00:00:00.000000000 Z
50
50
  dependencies: []
51
51
  executables: []
52
52
  extensions:
@@ -63,8 +63,6 @@ files:
63
63
  - ext/io/event/interrupt.c
64
64
  - ext/io/event/interrupt.h
65
65
  - ext/io/event/list.h
66
- - ext/io/event/profiler.c
67
- - ext/io/event/profiler.h
68
66
  - ext/io/event/selector/epoll.c
69
67
  - ext/io/event/selector/epoll.h
70
68
  - ext/io/event/selector/kqueue.c
@@ -81,7 +79,6 @@ files:
81
79
  - lib/io/event/interrupt.rb
82
80
  - lib/io/event/native.rb
83
81
  - lib/io/event/priority_heap.rb
84
- - lib/io/event/profiler.rb
85
82
  - lib/io/event/selector.rb
86
83
  - lib/io/event/selector/nonblock.rb
87
84
  - lib/io/event/selector/select.rb
metadata.gz.sig CHANGED
Binary file
@@ -1,505 +0,0 @@
1
- // Released under the MIT License.
2
- // Copyright, 2025, by Samuel Williams.
3
-
4
- #include "profiler.h"
5
-
6
- #include "time.h"
7
- #include "fiber.h"
8
- #include "array.h"
9
-
10
- #include <ruby/debug.h>
11
- #include <stdio.h>
12
-
13
- VALUE IO_Event_Profiler = Qnil;
14
-
15
- struct IO_Event_Profiler_Call {
16
- struct timespec enter_time;
17
- struct timespec exit_time;
18
-
19
- size_t nesting;
20
-
21
- rb_event_flag_t event_flag;
22
- ID id;
23
-
24
- VALUE klass;
25
- const char *path;
26
- int line;
27
-
28
- struct IO_Event_Profiler_Call *parent;
29
- };
30
-
31
- struct IO_Event_Profiler {
32
- // Configuration:
33
- float log_threshold;
34
- int track_calls;
35
-
36
- // Whether or not the profiler is currently running:
37
- int running;
38
-
39
- // Whether or not to capture call data:
40
- int capture;
41
-
42
- size_t stalls;
43
-
44
- // From this point on, the state of any profile in progress:
45
- struct timespec start_time;
46
- struct timespec stop_time;
47
-
48
- // The depth of the call stack:
49
- size_t nesting;
50
-
51
- // The current call frame:
52
- struct IO_Event_Profiler_Call *current;
53
-
54
- struct IO_Event_Array calls;
55
- };
56
-
57
- void IO_Event_Profiler_reset(struct IO_Event_Profiler *profiler) {
58
- profiler->nesting = 0;
59
- profiler->current = NULL;
60
- IO_Event_Array_truncate(&profiler->calls, 0);
61
- }
62
-
63
- void IO_Event_Profiler_Call_initialize(struct IO_Event_Profiler_Call *call) {
64
- call->enter_time.tv_sec = 0;
65
- call->enter_time.tv_nsec = 0;
66
- call->exit_time.tv_sec = 0;
67
- call->exit_time.tv_nsec = 0;
68
-
69
- call->nesting = 0;
70
-
71
- call->event_flag = 0;
72
- call->id = 0;
73
-
74
- call->path = NULL;
75
- call->line = 0;
76
- }
77
-
78
- void IO_Event_Profiler_Call_free(struct IO_Event_Profiler_Call *call) {
79
- if (call->path) {
80
- free((void*)call->path);
81
- call->path = NULL;
82
- }
83
- }
84
-
85
- static void IO_Event_Profiler_mark(void *ptr) {
86
- struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
87
-
88
- // If `klass` is stored as a VALUE in calls, we need to mark them here:
89
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
90
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
91
- rb_gc_mark_movable(call->klass);
92
- }
93
- }
94
-
95
- static void IO_Event_Profiler_compact(void *ptr) {
96
- struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
97
-
98
- // If `klass` is stored as a VALUE in calls, we need to update their locations here:
99
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
100
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
101
- call->klass = rb_gc_location(call->klass);
102
- }
103
- }
104
-
105
- static void IO_Event_Profiler_free(void *ptr) {
106
- struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
107
-
108
- IO_Event_Array_free(&profiler->calls);
109
-
110
- free(profiler);
111
- }
112
-
113
- static size_t IO_Event_Profiler_memsize(const void *ptr) {
114
- const struct IO_Event_Profiler *profiler = (const struct IO_Event_Profiler*)ptr;
115
- return sizeof(*profiler) + IO_Event_Array_memory_size(&profiler->calls);
116
- }
117
-
118
- const rb_data_type_t IO_Event_Profiler_Type = {
119
- .wrap_struct_name = "IO::Event::Profiler",
120
- .function = {
121
- .dmark = IO_Event_Profiler_mark,
122
- .dcompact = IO_Event_Profiler_compact,
123
- .dfree = IO_Event_Profiler_free,
124
- .dsize = IO_Event_Profiler_memsize,
125
- },
126
- .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
127
- };
128
-
129
- struct IO_Event_Profiler *IO_Event_Profiler_get(VALUE self) {
130
- struct IO_Event_Profiler *profiler;
131
- TypedData_Get_Struct(self, struct IO_Event_Profiler, &IO_Event_Profiler_Type, profiler);
132
- return profiler;
133
- }
134
-
135
- VALUE IO_Event_Profiler_allocate(VALUE klass) {
136
- struct IO_Event_Profiler *profiler = ALLOC(struct IO_Event_Profiler);
137
-
138
- // Initialize the profiler state:
139
- profiler->running = 0;
140
- profiler->capture = 0;
141
- profiler->stalls = 0;
142
- profiler->nesting = 0;
143
- profiler->current = NULL;
144
-
145
- profiler->calls.element_initialize = (void (*)(void*))IO_Event_Profiler_Call_initialize;
146
- profiler->calls.element_free = (void (*)(void*))IO_Event_Profiler_Call_free;
147
- IO_Event_Array_initialize(&profiler->calls, 0, sizeof(struct IO_Event_Profiler_Call));
148
-
149
- return TypedData_Wrap_Struct(klass, &IO_Event_Profiler_Type, profiler);
150
- }
151
-
152
- int IO_Event_Profiler_p(void) {
153
- const char *enabled = getenv("IO_EVENT_PROFILER");
154
-
155
- if (enabled && strcmp(enabled, "true") == 0) {
156
- return 1;
157
- }
158
-
159
- return 0;
160
- }
161
-
162
- float IO_Event_Profiler_default_log_threshold(void) {
163
- const char *log_threshold = getenv("IO_EVENT_PROFILER_LOG_THRESHOLD");
164
-
165
- if (log_threshold) {
166
- return strtof(log_threshold, NULL);
167
- } else {
168
- return 0.01;
169
- }
170
- }
171
-
172
- int IO_Event_Profiler_default_track_calls(void) {
173
- const char *track_calls = getenv("IO_EVENT_PROFILER_TRACK_CALLS");
174
-
175
- if (track_calls && strcmp(track_calls, "false") == 0) {
176
- return 0;
177
- } else {
178
- return 1;
179
- }
180
- }
181
-
182
- VALUE IO_Event_Profiler_initialize(int argc, VALUE *argv, VALUE self) {
183
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
184
- VALUE log_threshold, track_calls;
185
-
186
- rb_scan_args(argc, argv, "02", &log_threshold, &track_calls);
187
-
188
- if (RB_NIL_P(log_threshold)) {
189
- profiler->log_threshold = IO_Event_Profiler_default_log_threshold();
190
- } else {
191
- profiler->log_threshold = NUM2DBL(log_threshold);
192
- }
193
-
194
- if (RB_NIL_P(track_calls)) {
195
- profiler->track_calls = IO_Event_Profiler_default_track_calls();
196
- } else {
197
- profiler->track_calls = RB_TEST(track_calls);
198
- }
199
-
200
- return self;
201
- }
202
-
203
- VALUE IO_Event_Profiler_default(VALUE klass) {
204
- if (!IO_Event_Profiler_p()) {
205
- return Qnil;
206
- }
207
-
208
- VALUE profiler = IO_Event_Profiler_allocate(klass);
209
-
210
- struct IO_Event_Profiler *profiler_data = IO_Event_Profiler_get(profiler);
211
- profiler_data->log_threshold = IO_Event_Profiler_default_log_threshold();
212
- profiler_data->track_calls = IO_Event_Profiler_default_track_calls();
213
-
214
- return profiler;
215
- }
216
-
217
- VALUE IO_Event_Profiler_new(float log_threshold, int track_calls) {
218
- VALUE profiler = IO_Event_Profiler_allocate(IO_Event_Profiler);
219
-
220
- struct IO_Event_Profiler *profiler_data = IO_Event_Profiler_get(profiler);
221
- profiler_data->log_threshold = log_threshold;
222
- profiler_data->track_calls = track_calls;
223
-
224
- return profiler;
225
- }
226
-
227
- int event_flag_call_p(rb_event_flag_t event_flags) {
228
- return event_flags & (RUBY_EVENT_CALL | RUBY_EVENT_C_CALL | RUBY_EVENT_B_CALL);
229
- }
230
-
231
- int event_flag_return_p(rb_event_flag_t event_flags) {
232
- return event_flags & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN);
233
- }
234
-
235
- const char *event_flag_name(rb_event_flag_t event_flag) {
236
- switch (event_flag) {
237
- case RUBY_EVENT_CALL: return "call";
238
- case RUBY_EVENT_C_CALL: return "c-call";
239
- case RUBY_EVENT_B_CALL: return "b-call";
240
- case RUBY_EVENT_RETURN: return "return";
241
- case RUBY_EVENT_C_RETURN: return "c-return";
242
- case RUBY_EVENT_B_RETURN: return "b-return";
243
- default: return "unknown";
244
- }
245
- }
246
-
247
- static struct IO_Event_Profiler_Call* profiler_event_record_call(struct IO_Event_Profiler *profiler, rb_event_flag_t event_flag, ID id, VALUE klass) {
248
- struct IO_Event_Profiler_Call *call = IO_Event_Array_push(&profiler->calls);
249
-
250
- call->event_flag = event_flag;
251
-
252
- call->parent = profiler->current;
253
- profiler->current = call;
254
-
255
- call->nesting = profiler->nesting;
256
- profiler->nesting += 1;
257
-
258
- if (id) {
259
- call->id = id;
260
- call->klass = klass;
261
- } else {
262
- rb_frame_method_id_and_class(&call->id, &call->klass);
263
- }
264
-
265
- const char *path = rb_sourcefile();
266
- if (path) {
267
- call->path = strdup(path);
268
- }
269
- call->line = rb_sourceline();
270
-
271
- return call;
272
- }
273
-
274
- void IO_Event_Profiler_fiber_switch(struct IO_Event_Profiler *profiler);
275
-
276
- static void IO_Event_Profiler_callback(rb_event_flag_t event_flag, VALUE data, VALUE self, ID id, VALUE klass) {
277
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(data);
278
-
279
- if (event_flag & RUBY_EVENT_FIBER_SWITCH) {
280
- IO_Event_Profiler_fiber_switch(profiler);
281
- return;
282
- }
283
-
284
- // We don't want to capture data if we're not running:
285
- if (!profiler->capture) return;
286
-
287
- if (event_flag_call_p(event_flag)) {
288
- struct IO_Event_Profiler_Call *call = profiler_event_record_call(profiler, event_flag, id, klass);
289
- IO_Event_Time_current(&call->enter_time);
290
- }
291
-
292
- else if (event_flag_return_p(event_flag)) {
293
- struct IO_Event_Profiler_Call *call = profiler->current;
294
-
295
- // We may encounter returns without a preceeding call. This isn't an error, but we should pretend like the call started at the beginning of the profiling session:
296
- if (call == NULL) {
297
- struct IO_Event_Profiler_Call *last_call = IO_Event_Array_last(&profiler->calls);
298
- call = profiler_event_record_call(profiler, event_flag, id, klass);
299
-
300
- if (last_call) {
301
- call->enter_time = last_call->enter_time;
302
- } else {
303
- call->enter_time = profiler->start_time;
304
- }
305
- }
306
-
307
- IO_Event_Time_current(&call->exit_time);
308
-
309
- profiler->current = call->parent;
310
-
311
- // We may encounter returns without a preceeding call.
312
- if (profiler->nesting > 0)
313
- profiler->nesting -= 1;
314
- }
315
- }
316
-
317
- VALUE IO_Event_Profiler_start(VALUE self) {
318
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
319
-
320
- if (profiler->running) return Qfalse;
321
-
322
- profiler->running = 1;
323
-
324
- IO_Event_Profiler_reset(profiler);
325
- IO_Event_Time_current(&profiler->start_time);
326
-
327
- rb_event_flag_t event_flags = RUBY_EVENT_FIBER_SWITCH;
328
-
329
- if (profiler->track_calls) {
330
- event_flags |= RUBY_EVENT_CALL | RUBY_EVENT_RETURN;
331
- event_flags |= RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN;
332
- // event_flags |= RUBY_EVENT_B_CALL | RUBY_EVENT_B_RETURN;
333
- }
334
-
335
- VALUE thread = rb_thread_current();
336
- rb_thread_add_event_hook(thread, IO_Event_Profiler_callback, event_flags, self);
337
-
338
- return self;
339
- }
340
-
341
- VALUE IO_Event_Profiler_stop(VALUE self) {
342
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
343
-
344
- if (!profiler->running) return Qfalse;
345
-
346
- profiler->running = 0;
347
-
348
- VALUE thread = rb_thread_current();
349
- rb_thread_remove_event_hook_with_data(thread, IO_Event_Profiler_callback, self);
350
-
351
- IO_Event_Time_current(&profiler->stop_time);
352
- IO_Event_Profiler_reset(profiler);
353
-
354
- return self;
355
- }
356
-
357
- static inline float IO_Event_Profiler_duration(struct IO_Event_Profiler *profiler) {
358
- struct timespec duration;
359
-
360
- IO_Event_Time_current(&profiler->stop_time);
361
- IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &duration);
362
-
363
- return IO_Event_Time_duration(&duration);
364
- }
365
-
366
- void IO_Event_Profiler_print(struct IO_Event_Profiler *profiler, FILE *restrict stream);
367
-
368
- void IO_Event_Profiler_finish(struct IO_Event_Profiler *profiler) {
369
- profiler->capture = 0;
370
-
371
- struct IO_Event_Profiler_Call *current = profiler->current;
372
- while (current) {
373
- IO_Event_Time_current(&current->exit_time);
374
-
375
- current = current->parent;
376
- }
377
- }
378
-
379
- void IO_Event_Profiler_fiber_switch(struct IO_Event_Profiler *profiler)
380
- {
381
- float duration = IO_Event_Profiler_duration(profiler);
382
-
383
- if (profiler->capture) {
384
- IO_Event_Profiler_finish(profiler);
385
-
386
- if (duration > profiler->log_threshold) {
387
- profiler->stalls += 1;
388
- IO_Event_Profiler_print(profiler, stderr);
389
- }
390
- }
391
-
392
- IO_Event_Profiler_reset(profiler);
393
-
394
- if (!IO_Event_Fiber_blocking(IO_Event_Fiber_current())) {
395
- // Reset the start time:
396
- IO_Event_Time_current(&profiler->start_time);
397
-
398
- profiler->capture = 1;
399
- }
400
- }
401
-
402
- static const float IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION = 0.01;
403
-
404
- void IO_Event_Profiler_print_tty(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
405
- struct timespec total_duration = {};
406
- IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &total_duration);
407
-
408
- fprintf(stderr, "Fiber stalled for %.3f seconds\n", IO_Event_Time_duration(&total_duration));
409
-
410
- size_t skipped = 0;
411
-
412
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
413
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
414
- struct timespec duration = {};
415
- IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
416
-
417
- // Skip calls that are too short to be meaningful:
418
- if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION) {
419
- skipped += 1;
420
- continue;
421
- }
422
-
423
- for (size_t i = 0; i < call->nesting; i += 1) {
424
- fputc('\t', stream);
425
- }
426
-
427
- VALUE class_inspect = rb_inspect(call->klass);
428
- const char *name = rb_id2name(call->id);
429
-
430
- fprintf(stream, "%s:%d in %s '%s#%s' (" IO_EVENT_TIME_PRINTF_TIMESPEC "s)\n", call->path, call->line, event_flag_name(call->event_flag), RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration));
431
- }
432
-
433
- if (skipped > 0) {
434
- fprintf(stream, "Skipped %zu calls that were too short to be meaningful.\n", skipped);
435
- }
436
- }
437
-
438
- void IO_Event_Profiler_print_json(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
439
- struct timespec total_duration = {};
440
- IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &total_duration);
441
-
442
- fputc('{', stream);
443
-
444
- fprintf(stream, "\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(total_duration));
445
-
446
- size_t skipped = 0;
447
-
448
- fprintf(stream, ",\"calls\":[");
449
- int first = 1;
450
-
451
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
452
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
453
- struct timespec duration = {};
454
- IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
455
-
456
- // Skip calls that are too short to be meaningful:
457
- if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION) {
458
- skipped += 1;
459
- continue;
460
- }
461
-
462
- VALUE class_inspect = rb_inspect(call->klass);
463
- const char *name = rb_id2name(call->id);
464
-
465
- fprintf(stream, "%s{\"path\":\"%s\",\"line\":%d,\"class\":\"%s\",\"method\":\"%s\",\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC ",\"nesting\":%zu}", first ? "" : ",", call->path, call->line, RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration), call->nesting);
466
-
467
- first = 0;
468
- }
469
-
470
- fprintf(stream, "]");
471
-
472
- if (skipped > 0) {
473
- fprintf(stream, ",\"skipped\":%zu", skipped);
474
- }
475
-
476
- fprintf(stream, "}\n");
477
- }
478
-
479
- void IO_Event_Profiler_print(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
480
- if (isatty(fileno(stream))) {
481
- IO_Event_Profiler_print_tty(profiler, stream);
482
- } else {
483
- IO_Event_Profiler_print_json(profiler, stream);
484
- }
485
- }
486
-
487
- VALUE IO_Event_Profiler_stalls(VALUE self) {
488
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
489
-
490
- return SIZET2NUM(profiler->stalls);
491
- }
492
-
493
- void Init_IO_Event_Profiler(VALUE IO_Event) {
494
- IO_Event_Profiler = rb_define_class_under(IO_Event, "Profiler", rb_cObject);
495
- rb_define_alloc_func(IO_Event_Profiler, IO_Event_Profiler_allocate);
496
-
497
- rb_define_singleton_method(IO_Event_Profiler, "default", IO_Event_Profiler_default, 0);
498
-
499
- rb_define_method(IO_Event_Profiler, "initialize", IO_Event_Profiler_initialize, -1);
500
-
501
- rb_define_method(IO_Event_Profiler, "start", IO_Event_Profiler_start, 0);
502
- rb_define_method(IO_Event_Profiler, "stop", IO_Event_Profiler_stop, 0);
503
-
504
- rb_define_method(IO_Event_Profiler, "stalls", IO_Event_Profiler_stalls, 0);
505
- }
@@ -1,8 +0,0 @@
1
- // Released under the MIT License.
2
- // Copyright, 2025, by Samuel Williams.
3
-
4
- #pragma once
5
-
6
- #include <ruby.h>
7
-
8
- void Init_IO_Event_Profiler(VALUE IO_Event);
@@ -1,18 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Released under the MIT License.
4
- # Copyright, 2025, by Samuel Williams.
5
-
6
- require_relative "native"
7
-
8
- module IO::Event
9
- unless self.const_defined?(:Profiler)
10
- module Profiler
11
- # The default profiler, if the platform supports it.
12
- # Use `IO_EVENT_PROFILER=true` to enable it.
13
- def self.default
14
- nil
15
- end
16
- end
17
- end
18
- end