io-event 1.8.4 → 1.9.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f8f87753ac2f9c6404a21012d3e2dd385302bbfd49b3769383ef03bc48bd85da
4
- data.tar.gz: 4d158577022ddfe99db042ee2d3285bd9db86d9192e8081e0d34217f526252f4
3
+ metadata.gz: 7265f0fb5702ae05b5d928a8efb9e5a5e877f62afc5c93d23fec26f8a3dd1437
4
+ data.tar.gz: 8ba9b4b0d4e95be1401b8952cccac1a9bcc93c7a0a40e843aea60f0015c791ad
5
5
  SHA512:
6
- metadata.gz: 77ef53ab5090825e04ca6fcee9f1643c42977fffced2493485b68f085790657a62c8280091a8564f92e6edd22150a762b22e70e1b3d09e42dacced90faa5b192
7
- data.tar.gz: 0f11763cc34d3f84ccc0c131b275ca837f64bac73e6a87069c2bbb2e14fe5e665051f95a8495fa32a04fcd82edecefb928970e040709017393996b9d6cb7d71d
6
+ metadata.gz: b4346972aa2dbe00b560e2a1e2cce32a50e9acafaa0c9253f12791c183f35bea8e3cffba93eb93147b4d975ef05edeffef84e0ed8bf29934fb39c70ea1320c6f
7
+ data.tar.gz: 41b6b53ca30bd0934c702c1ff0899e0e8a6bd3bc97a95f455b0d6cf0be28ff690b69575c44d92a784226edea27958cd43c486e9d907989859e298d602536078b
checksums.yaml.gz.sig CHANGED
Binary file
data/ext/extconf.rb CHANGED
@@ -2,7 +2,7 @@
2
2
  # frozen_string_literal: true
3
3
 
4
4
  # Released under the MIT License.
5
- # Copyright, 2021-2024, by Samuel Williams.
5
+ # Copyright, 2021-2025, by Samuel Williams.
6
6
  # Copyright, 2023, by Math Ieu.
7
7
 
8
8
  return if RUBY_DESCRIPTION =~ /jruby/
@@ -22,7 +22,7 @@ if ENV.key?("RUBY_DEBUG")
22
22
  $CFLAGS << " -DRUBY_DEBUG -O0"
23
23
  end
24
24
 
25
- $srcs = ["io/event/event.c", "io/event/selector/selector.c", "io/event/time.c", "io/event/profile.c"]
25
+ $srcs = ["io/event/event.c", "io/event/time.c", "io/event/fiber.c", "io/event/profiler.c", "io/event/selector/selector.c"]
26
26
  $VPATH << "$(srcdir)/io/event"
27
27
  $VPATH << "$(srcdir)/io/event/selector"
28
28
 
data/ext/io/event/array.h CHANGED
@@ -159,6 +159,22 @@ inline static void* IO_Event_Array_last(struct IO_Event_Array *array)
159
159
  else return array->base[array->limit - 1];
160
160
  }
161
161
 
162
+ inline static void IO_Event_Array_truncate(struct IO_Event_Array *array, size_t limit)
163
+ {
164
+ if (limit < array->limit) {
165
+ for (size_t i = limit; i < array->limit; i += 1) {
166
+ void **element = array->base + i;
167
+ if (*element) {
168
+ array->element_free(*element);
169
+ free(*element);
170
+ *element = NULL;
171
+ }
172
+ }
173
+
174
+ array->limit = limit;
175
+ }
176
+ }
177
+
162
178
  // Push a new element onto the end of the array.
163
179
  inline static void* IO_Event_Array_push(struct IO_Event_Array *array)
164
180
  {
data/ext/io/event/event.c CHANGED
@@ -2,8 +2,10 @@
2
2
  // Copyright, 2021-2025, by Samuel Williams.
3
3
 
4
4
  #include "event.h"
5
- #include "profile.h"
5
+ #include "fiber.h"
6
+ #include "profiler.h"
6
7
  #include "selector/selector.h"
8
+ #include <complex.h>
7
9
 
8
10
  void Init_IO_Event(void)
9
11
  {
@@ -13,7 +15,8 @@ void Init_IO_Event(void)
13
15
 
14
16
  VALUE IO_Event = rb_define_module_under(rb_cIO, "Event");
15
17
 
16
- Init_IO_Event_Profile(IO_Event);
18
+ Init_IO_Event_Fiber(IO_Event);
19
+ Init_IO_Event_Profiler(IO_Event);
17
20
 
18
21
  VALUE IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
19
22
  Init_IO_Event_Selector(IO_Event_Selector);
@@ -0,0 +1,63 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #include "fiber.h"
5
+
6
+ static ID id_transfer, id_alive_p;
7
+
8
+ VALUE IO_Event_Fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
9
+ // TODO Consider introducing something like `rb_fiber_scheduler_transfer(...)`.
10
+ #ifdef HAVE__RB_FIBER_TRANSFER
11
+ if (RTEST(rb_obj_is_fiber(fiber))) {
12
+ if (RTEST(rb_fiber_alive_p(fiber))) {
13
+ return rb_fiber_transfer(fiber, argc, argv);
14
+ }
15
+
16
+ // If it's a fiber, but dead, we are done.
17
+ return Qnil;
18
+ }
19
+ #endif
20
+ if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
21
+ return rb_funcallv(fiber, id_transfer, argc, argv);
22
+ }
23
+
24
+ return Qnil;
25
+ }
26
+
27
+ #ifndef HAVE__RB_FIBER_RAISE
28
+ static ID id_raise;
29
+
30
+ VALUE IO_Event_Fiber_raise(VALUE fiber, int argc, VALUE *argv) {
31
+ return rb_funcallv(fiber, id_raise, argc, argv);
32
+ }
33
+ #endif
34
+
35
+ #ifndef HAVE_RB_FIBER_CURRENT
36
+ static ID id_current;
37
+
38
+ static VALUE IO_Event_Fiber_current(void) {
39
+ return rb_funcall(rb_cFiber, id_current, 0);
40
+ }
41
+ #endif
42
+
43
+ // There is no public interface for this... yet.
44
+ static ID id_blocking_p;
45
+
46
+ int IO_Event_Fiber_blocking(VALUE fiber) {
47
+ return RTEST(rb_funcall(fiber, id_blocking_p, 0));
48
+ }
49
+
50
+ void Init_IO_Event_Fiber(VALUE IO_Event) {
51
+ id_transfer = rb_intern("transfer");
52
+ id_alive_p = rb_intern("alive?");
53
+
54
+ #ifndef HAVE__RB_FIBER_RAISE
55
+ id_raise = rb_intern("raise");
56
+ #endif
57
+
58
+ #ifndef HAVE_RB_FIBER_CURRENT
59
+ id_current = rb_intern("current");
60
+ #endif
61
+
62
+ id_blocking_p = rb_intern("blocking?");
63
+ }
@@ -0,0 +1,23 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #pragma once
5
+
6
+ #include <ruby.h>
7
+
8
+ VALUE IO_Event_Fiber_transfer(VALUE fiber, int argc, VALUE *argv);
9
+
10
+ #ifdef HAVE__RB_FIBER_RAISE
11
+ #define IO_Event_Fiber_raise(fiber, argc, argv) rb_fiber_raise(fiber, argc, argv)
12
+ #else
13
+ VALUE IO_Event_Fiber_raise(VALUE fiber, int argc, VALUE *argv);
14
+ #endif
15
+
16
+ #ifdef HAVE_RB_FIBER_CURRENT
17
+ #define IO_Event_Fiber_current() rb_fiber_current()
18
+ #else
19
+ VALUE IO_Event_Fiber_current(void);
20
+ #endif
21
+
22
+ int IO_Event_Fiber_blocking(VALUE fiber);
23
+ void Init_IO_Event_Fiber(VALUE IO_Event);
@@ -0,0 +1,505 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #include "profiler.h"
5
+
6
+ #include "time.h"
7
+ #include "fiber.h"
8
+ #include "array.h"
9
+
10
+ #include <ruby/debug.h>
11
+ #include <stdio.h>
12
+
13
+ VALUE IO_Event_Profiler = Qnil;
14
+
15
+ struct IO_Event_Profiler_Call {
16
+ struct timespec enter_time;
17
+ struct timespec exit_time;
18
+
19
+ size_t nesting;
20
+
21
+ rb_event_flag_t event_flag;
22
+ ID id;
23
+
24
+ VALUE klass;
25
+ const char *path;
26
+ int line;
27
+
28
+ struct IO_Event_Profiler_Call *parent;
29
+ };
30
+
31
+ struct IO_Event_Profiler {
32
+ // Configuration:
33
+ float log_threshold;
34
+ int track_calls;
35
+
36
+ // Whether or not the profiler is currently running:
37
+ int running;
38
+
39
+ // Whether or not to capture call data:
40
+ int capture;
41
+
42
+ size_t stalls;
43
+
44
+ // From this point on, the state of any profile in progress:
45
+ struct timespec start_time;
46
+ struct timespec stop_time;
47
+
48
+ // The depth of the call stack:
49
+ size_t nesting;
50
+
51
+ // The current call frame:
52
+ struct IO_Event_Profiler_Call *current;
53
+
54
+ struct IO_Event_Array calls;
55
+ };
56
+
57
+ void IO_Event_Profiler_reset(struct IO_Event_Profiler *profiler) {
58
+ profiler->nesting = 0;
59
+ profiler->current = NULL;
60
+ IO_Event_Array_truncate(&profiler->calls, 0);
61
+ }
62
+
63
+ void IO_Event_Profiler_Call_initialize(struct IO_Event_Profiler_Call *call) {
64
+ call->enter_time.tv_sec = 0;
65
+ call->enter_time.tv_nsec = 0;
66
+ call->exit_time.tv_sec = 0;
67
+ call->exit_time.tv_nsec = 0;
68
+
69
+ call->nesting = 0;
70
+
71
+ call->event_flag = 0;
72
+ call->id = 0;
73
+
74
+ call->path = NULL;
75
+ call->line = 0;
76
+ }
77
+
78
+ void IO_Event_Profiler_Call_free(struct IO_Event_Profiler_Call *call) {
79
+ if (call->path) {
80
+ free((void*)call->path);
81
+ call->path = NULL;
82
+ }
83
+ }
84
+
85
+ static void IO_Event_Profiler_mark(void *ptr) {
86
+ struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
87
+
88
+ // If `klass` is stored as a VALUE in calls, we need to mark them here:
89
+ for (size_t i = 0; i < profiler->calls.limit; i += 1) {
90
+ struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
91
+ rb_gc_mark_movable(call->klass);
92
+ }
93
+ }
94
+
95
+ static void IO_Event_Profiler_compact(void *ptr) {
96
+ struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
97
+
98
+ // If `klass` is stored as a VALUE in calls, we need to update their locations here:
99
+ for (size_t i = 0; i < profiler->calls.limit; i += 1) {
100
+ struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
101
+ call->klass = rb_gc_location(call->klass);
102
+ }
103
+ }
104
+
105
+ static void IO_Event_Profiler_free(void *ptr) {
106
+ struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
107
+
108
+ IO_Event_Array_free(&profiler->calls);
109
+
110
+ free(profiler);
111
+ }
112
+
113
+ static size_t IO_Event_Profiler_memsize(const void *ptr) {
114
+ const struct IO_Event_Profiler *profiler = (const struct IO_Event_Profiler*)ptr;
115
+ return sizeof(*profiler) + IO_Event_Array_memory_size(&profiler->calls);
116
+ }
117
+
118
+ const rb_data_type_t IO_Event_Profiler_Type = {
119
+ .wrap_struct_name = "IO::Event::Profiler",
120
+ .function = {
121
+ .dmark = IO_Event_Profiler_mark,
122
+ .dcompact = IO_Event_Profiler_compact,
123
+ .dfree = IO_Event_Profiler_free,
124
+ .dsize = IO_Event_Profiler_memsize,
125
+ },
126
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
127
+ };
128
+
129
+ struct IO_Event_Profiler *IO_Event_Profiler_get(VALUE self) {
130
+ struct IO_Event_Profiler *profiler;
131
+ TypedData_Get_Struct(self, struct IO_Event_Profiler, &IO_Event_Profiler_Type, profiler);
132
+ return profiler;
133
+ }
134
+
135
+ VALUE IO_Event_Profiler_allocate(VALUE klass) {
136
+ struct IO_Event_Profiler *profiler = ALLOC(struct IO_Event_Profiler);
137
+
138
+ // Initialize the profiler state:
139
+ profiler->running = 0;
140
+ profiler->capture = 0;
141
+ profiler->stalls = 0;
142
+ profiler->nesting = 0;
143
+ profiler->current = NULL;
144
+
145
+ profiler->calls.element_initialize = (void (*)(void*))IO_Event_Profiler_Call_initialize;
146
+ profiler->calls.element_free = (void (*)(void*))IO_Event_Profiler_Call_free;
147
+ IO_Event_Array_initialize(&profiler->calls, 0, sizeof(struct IO_Event_Profiler_Call));
148
+
149
+ return TypedData_Wrap_Struct(klass, &IO_Event_Profiler_Type, profiler);
150
+ }
151
+
152
+ int IO_Event_Profiler_p(void) {
153
+ const char *enabled = getenv("IO_EVENT_PROFILER");
154
+
155
+ if (enabled && strcmp(enabled, "true") == 0) {
156
+ return 1;
157
+ }
158
+
159
+ return 0;
160
+ }
161
+
162
+ float IO_Event_Profiler_default_log_threshold(void) {
163
+ const char *log_threshold = getenv("IO_EVENT_PROFILER_LOG_THRESHOLD");
164
+
165
+ if (log_threshold) {
166
+ return strtof(log_threshold, NULL);
167
+ } else {
168
+ return 0.01;
169
+ }
170
+ }
171
+
172
+ int IO_Event_Profiler_default_track_calls(void) {
173
+ const char *track_calls = getenv("IO_EVENT_PROFILER_TRACK_CALLS");
174
+
175
+ if (track_calls && strcmp(track_calls, "false") == 0) {
176
+ return 0;
177
+ } else {
178
+ return 1;
179
+ }
180
+ }
181
+
182
+ VALUE IO_Event_Profiler_initialize(int argc, VALUE *argv, VALUE self) {
183
+ struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
184
+ VALUE log_threshold, track_calls;
185
+
186
+ rb_scan_args(argc, argv, "02", &log_threshold, &track_calls);
187
+
188
+ if (RB_NIL_P(log_threshold)) {
189
+ profiler->log_threshold = IO_Event_Profiler_default_log_threshold();
190
+ } else {
191
+ profiler->log_threshold = NUM2DBL(log_threshold);
192
+ }
193
+
194
+ if (RB_NIL_P(track_calls)) {
195
+ profiler->track_calls = IO_Event_Profiler_default_track_calls();
196
+ } else {
197
+ profiler->track_calls = RB_TEST(track_calls);
198
+ }
199
+
200
+ return self;
201
+ }
202
+
203
+ VALUE IO_Event_Profiler_default(VALUE klass) {
204
+ if (!IO_Event_Profiler_p()) {
205
+ return Qnil;
206
+ }
207
+
208
+ VALUE profiler = IO_Event_Profiler_allocate(klass);
209
+
210
+ struct IO_Event_Profiler *profiler_data = IO_Event_Profiler_get(profiler);
211
+ profiler_data->log_threshold = IO_Event_Profiler_default_log_threshold();
212
+ profiler_data->track_calls = IO_Event_Profiler_default_track_calls();
213
+
214
+ return profiler;
215
+ }
216
+
217
+ VALUE IO_Event_Profiler_new(float log_threshold, int track_calls) {
218
+ VALUE profiler = IO_Event_Profiler_allocate(IO_Event_Profiler);
219
+
220
+ struct IO_Event_Profiler *profiler_data = IO_Event_Profiler_get(profiler);
221
+ profiler_data->log_threshold = log_threshold;
222
+ profiler_data->track_calls = track_calls;
223
+
224
+ return profiler;
225
+ }
226
+
227
+ int event_flag_call_p(rb_event_flag_t event_flags) {
228
+ return event_flags & (RUBY_EVENT_CALL | RUBY_EVENT_C_CALL | RUBY_EVENT_B_CALL);
229
+ }
230
+
231
+ int event_flag_return_p(rb_event_flag_t event_flags) {
232
+ return event_flags & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN);
233
+ }
234
+
235
+ const char *event_flag_name(rb_event_flag_t event_flag) {
236
+ switch (event_flag) {
237
+ case RUBY_EVENT_CALL: return "call";
238
+ case RUBY_EVENT_C_CALL: return "c-call";
239
+ case RUBY_EVENT_B_CALL: return "b-call";
240
+ case RUBY_EVENT_RETURN: return "return";
241
+ case RUBY_EVENT_C_RETURN: return "c-return";
242
+ case RUBY_EVENT_B_RETURN: return "b-return";
243
+ default: return "unknown";
244
+ }
245
+ }
246
+
247
+ static struct IO_Event_Profiler_Call* profiler_event_record_call(struct IO_Event_Profiler *profiler, rb_event_flag_t event_flag, ID id, VALUE klass) {
248
+ struct IO_Event_Profiler_Call *call = IO_Event_Array_push(&profiler->calls);
249
+
250
+ call->event_flag = event_flag;
251
+
252
+ call->parent = profiler->current;
253
+ profiler->current = call;
254
+
255
+ call->nesting = profiler->nesting;
256
+ profiler->nesting += 1;
257
+
258
+ if (id) {
259
+ call->id = id;
260
+ call->klass = klass;
261
+ } else {
262
+ rb_frame_method_id_and_class(&call->id, &call->klass);
263
+ }
264
+
265
+ const char *path = rb_sourcefile();
266
+ if (path) {
267
+ call->path = strdup(path);
268
+ }
269
+ call->line = rb_sourceline();
270
+
271
+ return call;
272
+ }
273
+
274
+ void IO_Event_Profiler_fiber_switch(struct IO_Event_Profiler *profiler);
275
+
276
+ static void IO_Event_Profiler_callback(rb_event_flag_t event_flag, VALUE data, VALUE self, ID id, VALUE klass) {
277
+ struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(data);
278
+
279
+ if (event_flag & RUBY_EVENT_FIBER_SWITCH) {
280
+ IO_Event_Profiler_fiber_switch(profiler);
281
+ return;
282
+ }
283
+
284
+ // We don't want to capture data if we're not running:
285
+ if (!profiler->capture) return;
286
+
287
+ if (event_flag_call_p(event_flag)) {
288
+ struct IO_Event_Profiler_Call *call = profiler_event_record_call(profiler, event_flag, id, klass);
289
+ IO_Event_Time_current(&call->enter_time);
290
+ }
291
+
292
+ else if (event_flag_return_p(event_flag)) {
293
+ struct IO_Event_Profiler_Call *call = profiler->current;
294
+
295
+ // We may encounter returns without a preceeding call. This isn't an error, but we should pretend like the call started at the beginning of the profiling session:
296
+ if (call == NULL) {
297
+ struct IO_Event_Profiler_Call *last_call = IO_Event_Array_last(&profiler->calls);
298
+ call = profiler_event_record_call(profiler, event_flag, id, klass);
299
+
300
+ if (last_call) {
301
+ call->enter_time = last_call->enter_time;
302
+ } else {
303
+ call->enter_time = profiler->start_time;
304
+ }
305
+ }
306
+
307
+ IO_Event_Time_current(&call->exit_time);
308
+
309
+ profiler->current = call->parent;
310
+
311
+ // We may encounter returns without a preceeding call.
312
+ if (profiler->nesting > 0)
313
+ profiler->nesting -= 1;
314
+ }
315
+ }
316
+
317
+ VALUE IO_Event_Profiler_start(VALUE self) {
318
+ struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
319
+
320
+ if (profiler->running) return Qfalse;
321
+
322
+ profiler->running = 1;
323
+
324
+ IO_Event_Profiler_reset(profiler);
325
+ IO_Event_Time_current(&profiler->start_time);
326
+
327
+ rb_event_flag_t event_flags = RUBY_EVENT_FIBER_SWITCH;
328
+
329
+ if (profiler->track_calls) {
330
+ event_flags |= RUBY_EVENT_CALL | RUBY_EVENT_RETURN;
331
+ event_flags |= RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN;
332
+ // event_flags |= RUBY_EVENT_B_CALL | RUBY_EVENT_B_RETURN;
333
+ }
334
+
335
+ VALUE thread = rb_thread_current();
336
+ rb_thread_add_event_hook(thread, IO_Event_Profiler_callback, event_flags, self);
337
+
338
+ return self;
339
+ }
340
+
341
+ VALUE IO_Event_Profiler_stop(VALUE self) {
342
+ struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
343
+
344
+ if (!profiler->running) return Qfalse;
345
+
346
+ profiler->running = 0;
347
+
348
+ VALUE thread = rb_thread_current();
349
+ rb_thread_remove_event_hook_with_data(thread, IO_Event_Profiler_callback, self);
350
+
351
+ IO_Event_Time_current(&profiler->stop_time);
352
+ IO_Event_Profiler_reset(profiler);
353
+
354
+ return self;
355
+ }
356
+
357
+ static inline float IO_Event_Profiler_duration(struct IO_Event_Profiler *profiler) {
358
+ struct timespec duration;
359
+
360
+ IO_Event_Time_current(&profiler->stop_time);
361
+ IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &duration);
362
+
363
+ return IO_Event_Time_duration(&duration);
364
+ }
365
+
366
+ void IO_Event_Profiler_print(struct IO_Event_Profiler *profiler, FILE *restrict stream);
367
+
368
+ void IO_Event_Profiler_finish(struct IO_Event_Profiler *profiler) {
369
+ profiler->capture = 0;
370
+
371
+ struct IO_Event_Profiler_Call *current = profiler->current;
372
+ while (current) {
373
+ IO_Event_Time_current(&current->exit_time);
374
+
375
+ current = current->parent;
376
+ }
377
+ }
378
+
379
+ void IO_Event_Profiler_fiber_switch(struct IO_Event_Profiler *profiler)
380
+ {
381
+ float duration = IO_Event_Profiler_duration(profiler);
382
+
383
+ if (profiler->capture) {
384
+ IO_Event_Profiler_finish(profiler);
385
+
386
+ if (duration > profiler->log_threshold) {
387
+ profiler->stalls += 1;
388
+ IO_Event_Profiler_print(profiler, stderr);
389
+ }
390
+ }
391
+
392
+ IO_Event_Profiler_reset(profiler);
393
+
394
+ if (!IO_Event_Fiber_blocking(IO_Event_Fiber_current())) {
395
+ // Reset the start time:
396
+ IO_Event_Time_current(&profiler->start_time);
397
+
398
+ profiler->capture = 1;
399
+ }
400
+ }
401
+
402
+ static const float IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION = 0.01;
403
+
404
+ void IO_Event_Profiler_print_tty(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
405
+ struct timespec total_duration = {};
406
+ IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &total_duration);
407
+
408
+ fprintf(stderr, "Fiber stalled for %.3f seconds\n", IO_Event_Time_duration(&total_duration));
409
+
410
+ size_t skipped = 0;
411
+
412
+ for (size_t i = 0; i < profiler->calls.limit; i += 1) {
413
+ struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
414
+ struct timespec duration = {};
415
+ IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
416
+
417
+ // Skip calls that are too short to be meaningful:
418
+ if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION) {
419
+ skipped += 1;
420
+ continue;
421
+ }
422
+
423
+ for (size_t i = 0; i < call->nesting; i += 1) {
424
+ fputc('\t', stream);
425
+ }
426
+
427
+ VALUE class_inspect = rb_inspect(call->klass);
428
+ const char *name = rb_id2name(call->id);
429
+
430
+ fprintf(stream, "%s:%d in %s '%s#%s' (" IO_EVENT_TIME_PRINTF_TIMESPEC "s)\n", call->path, call->line, event_flag_name(call->event_flag), RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration));
431
+ }
432
+
433
+ if (skipped > 0) {
434
+ fprintf(stream, "Skipped %zu calls that were too short to be meaningful.\n", skipped);
435
+ }
436
+ }
437
+
438
+ void IO_Event_Profiler_print_json(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
439
+ struct timespec total_duration = {};
440
+ IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &total_duration);
441
+
442
+ fputc('{', stream);
443
+
444
+ fprintf(stream, "\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(total_duration));
445
+
446
+ size_t skipped = 0;
447
+
448
+ fprintf(stream, ",\"calls\":[");
449
+ int first = 1;
450
+
451
+ for (size_t i = 0; i < profiler->calls.limit; i += 1) {
452
+ struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
453
+ struct timespec duration = {};
454
+ IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
455
+
456
+ // Skip calls that are too short to be meaningful:
457
+ if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION) {
458
+ skipped += 1;
459
+ continue;
460
+ }
461
+
462
+ VALUE class_inspect = rb_inspect(call->klass);
463
+ const char *name = rb_id2name(call->id);
464
+
465
+ fprintf(stream, "%s{\"path\":\"%s\",\"line\":%d,\"class\":\"%s\",\"method\":\"%s\",\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC ",\"nesting\":%zu}", first ? "" : ",", call->path, call->line, RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration), call->nesting);
466
+
467
+ first = 0;
468
+ }
469
+
470
+ fprintf(stream, "]");
471
+
472
+ if (skipped > 0) {
473
+ fprintf(stream, ",\"skipped\":%zu", skipped);
474
+ }
475
+
476
+ fprintf(stream, "}\n");
477
+ }
478
+
479
+ void IO_Event_Profiler_print(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
480
+ if (isatty(fileno(stream))) {
481
+ IO_Event_Profiler_print_tty(profiler, stream);
482
+ } else {
483
+ IO_Event_Profiler_print_json(profiler, stream);
484
+ }
485
+ }
486
+
487
+ VALUE IO_Event_Profiler_stalls(VALUE self) {
488
+ struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
489
+
490
+ return SIZET2NUM(profiler->stalls);
491
+ }
492
+
493
+ void Init_IO_Event_Profiler(VALUE IO_Event) {
494
+ IO_Event_Profiler = rb_define_class_under(IO_Event, "Profiler", rb_cObject);
495
+ rb_define_alloc_func(IO_Event_Profiler, IO_Event_Profiler_allocate);
496
+
497
+ rb_define_singleton_method(IO_Event_Profiler, "default", IO_Event_Profiler_default, 0);
498
+
499
+ rb_define_method(IO_Event_Profiler, "initialize", IO_Event_Profiler_initialize, -1);
500
+
501
+ rb_define_method(IO_Event_Profiler, "start", IO_Event_Profiler_start, 0);
502
+ rb_define_method(IO_Event_Profiler, "stop", IO_Event_Profiler_stop, 0);
503
+
504
+ rb_define_method(IO_Event_Profiler, "stalls", IO_Event_Profiler_stalls, 0);
505
+ }
@@ -0,0 +1,8 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #pragma once
5
+
6
+ #include <ruby.h>
7
+
8
+ void Init_IO_Event_Profiler(VALUE IO_Event);