io-event 1.9.0 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,505 +0,0 @@
1
- // Released under the MIT License.
2
- // Copyright, 2025, by Samuel Williams.
3
-
4
- #include "profiler.h"
5
-
6
- #include "time.h"
7
- #include "fiber.h"
8
- #include "array.h"
9
-
10
- #include <ruby/debug.h>
11
- #include <stdio.h>
12
-
13
- VALUE IO_Event_Profiler = Qnil;
14
-
15
- struct IO_Event_Profiler_Call {
16
- struct timespec enter_time;
17
- struct timespec exit_time;
18
-
19
- size_t nesting;
20
-
21
- rb_event_flag_t event_flag;
22
- ID id;
23
-
24
- VALUE klass;
25
- const char *path;
26
- int line;
27
-
28
- struct IO_Event_Profiler_Call *parent;
29
- };
30
-
31
- struct IO_Event_Profiler {
32
- // Configuration:
33
- float log_threshold;
34
- int track_calls;
35
-
36
- // Whether or not the profiler is currently running:
37
- int running;
38
-
39
- // Whether or not to capture call data:
40
- int capture;
41
-
42
- size_t stalls;
43
-
44
- // From this point on, the state of any profile in progress:
45
- struct timespec start_time;
46
- struct timespec stop_time;
47
-
48
- // The depth of the call stack:
49
- size_t nesting;
50
-
51
- // The current call frame:
52
- struct IO_Event_Profiler_Call *current;
53
-
54
- struct IO_Event_Array calls;
55
- };
56
-
57
- void IO_Event_Profiler_reset(struct IO_Event_Profiler *profiler) {
58
- profiler->nesting = 0;
59
- profiler->current = NULL;
60
- IO_Event_Array_truncate(&profiler->calls, 0);
61
- }
62
-
63
- void IO_Event_Profiler_Call_initialize(struct IO_Event_Profiler_Call *call) {
64
- call->enter_time.tv_sec = 0;
65
- call->enter_time.tv_nsec = 0;
66
- call->exit_time.tv_sec = 0;
67
- call->exit_time.tv_nsec = 0;
68
-
69
- call->nesting = 0;
70
-
71
- call->event_flag = 0;
72
- call->id = 0;
73
-
74
- call->path = NULL;
75
- call->line = 0;
76
- }
77
-
78
- void IO_Event_Profiler_Call_free(struct IO_Event_Profiler_Call *call) {
79
- if (call->path) {
80
- free((void*)call->path);
81
- call->path = NULL;
82
- }
83
- }
84
-
85
- static void IO_Event_Profiler_mark(void *ptr) {
86
- struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
87
-
88
- // If `klass` is stored as a VALUE in calls, we need to mark them here:
89
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
90
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
91
- rb_gc_mark_movable(call->klass);
92
- }
93
- }
94
-
95
- static void IO_Event_Profiler_compact(void *ptr) {
96
- struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
97
-
98
- // If `klass` is stored as a VALUE in calls, we need to update their locations here:
99
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
100
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
101
- call->klass = rb_gc_location(call->klass);
102
- }
103
- }
104
-
105
- static void IO_Event_Profiler_free(void *ptr) {
106
- struct IO_Event_Profiler *profiler = (struct IO_Event_Profiler*)ptr;
107
-
108
- IO_Event_Array_free(&profiler->calls);
109
-
110
- free(profiler);
111
- }
112
-
113
- static size_t IO_Event_Profiler_memsize(const void *ptr) {
114
- const struct IO_Event_Profiler *profiler = (const struct IO_Event_Profiler*)ptr;
115
- return sizeof(*profiler) + IO_Event_Array_memory_size(&profiler->calls);
116
- }
117
-
118
- const rb_data_type_t IO_Event_Profiler_Type = {
119
- .wrap_struct_name = "IO::Event::Profiler",
120
- .function = {
121
- .dmark = IO_Event_Profiler_mark,
122
- .dcompact = IO_Event_Profiler_compact,
123
- .dfree = IO_Event_Profiler_free,
124
- .dsize = IO_Event_Profiler_memsize,
125
- },
126
- .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
127
- };
128
-
129
- struct IO_Event_Profiler *IO_Event_Profiler_get(VALUE self) {
130
- struct IO_Event_Profiler *profiler;
131
- TypedData_Get_Struct(self, struct IO_Event_Profiler, &IO_Event_Profiler_Type, profiler);
132
- return profiler;
133
- }
134
-
135
- VALUE IO_Event_Profiler_allocate(VALUE klass) {
136
- struct IO_Event_Profiler *profiler = ALLOC(struct IO_Event_Profiler);
137
-
138
- // Initialize the profiler state:
139
- profiler->running = 0;
140
- profiler->capture = 0;
141
- profiler->stalls = 0;
142
- profiler->nesting = 0;
143
- profiler->current = NULL;
144
-
145
- profiler->calls.element_initialize = (void (*)(void*))IO_Event_Profiler_Call_initialize;
146
- profiler->calls.element_free = (void (*)(void*))IO_Event_Profiler_Call_free;
147
- IO_Event_Array_initialize(&profiler->calls, 0, sizeof(struct IO_Event_Profiler_Call));
148
-
149
- return TypedData_Wrap_Struct(klass, &IO_Event_Profiler_Type, profiler);
150
- }
151
-
152
- int IO_Event_Profiler_p(void) {
153
- const char *enabled = getenv("IO_EVENT_PROFILER");
154
-
155
- if (enabled && strcmp(enabled, "true") == 0) {
156
- return 1;
157
- }
158
-
159
- return 0;
160
- }
161
-
162
- float IO_Event_Profiler_default_log_threshold(void) {
163
- const char *log_threshold = getenv("IO_EVENT_PROFILER_LOG_THRESHOLD");
164
-
165
- if (log_threshold) {
166
- return strtof(log_threshold, NULL);
167
- } else {
168
- return 0.01;
169
- }
170
- }
171
-
172
- int IO_Event_Profiler_default_track_calls(void) {
173
- const char *track_calls = getenv("IO_EVENT_PROFILER_TRACK_CALLS");
174
-
175
- if (track_calls && strcmp(track_calls, "false") == 0) {
176
- return 0;
177
- } else {
178
- return 1;
179
- }
180
- }
181
-
182
- VALUE IO_Event_Profiler_initialize(int argc, VALUE *argv, VALUE self) {
183
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
184
- VALUE log_threshold, track_calls;
185
-
186
- rb_scan_args(argc, argv, "02", &log_threshold, &track_calls);
187
-
188
- if (RB_NIL_P(log_threshold)) {
189
- profiler->log_threshold = IO_Event_Profiler_default_log_threshold();
190
- } else {
191
- profiler->log_threshold = NUM2DBL(log_threshold);
192
- }
193
-
194
- if (RB_NIL_P(track_calls)) {
195
- profiler->track_calls = IO_Event_Profiler_default_track_calls();
196
- } else {
197
- profiler->track_calls = RB_TEST(track_calls);
198
- }
199
-
200
- return self;
201
- }
202
-
203
- VALUE IO_Event_Profiler_default(VALUE klass) {
204
- if (!IO_Event_Profiler_p()) {
205
- return Qnil;
206
- }
207
-
208
- VALUE profiler = IO_Event_Profiler_allocate(klass);
209
-
210
- struct IO_Event_Profiler *profiler_data = IO_Event_Profiler_get(profiler);
211
- profiler_data->log_threshold = IO_Event_Profiler_default_log_threshold();
212
- profiler_data->track_calls = IO_Event_Profiler_default_track_calls();
213
-
214
- return profiler;
215
- }
216
-
217
- VALUE IO_Event_Profiler_new(float log_threshold, int track_calls) {
218
- VALUE profiler = IO_Event_Profiler_allocate(IO_Event_Profiler);
219
-
220
- struct IO_Event_Profiler *profiler_data = IO_Event_Profiler_get(profiler);
221
- profiler_data->log_threshold = log_threshold;
222
- profiler_data->track_calls = track_calls;
223
-
224
- return profiler;
225
- }
226
-
227
- int event_flag_call_p(rb_event_flag_t event_flags) {
228
- return event_flags & (RUBY_EVENT_CALL | RUBY_EVENT_C_CALL | RUBY_EVENT_B_CALL);
229
- }
230
-
231
- int event_flag_return_p(rb_event_flag_t event_flags) {
232
- return event_flags & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN);
233
- }
234
-
235
- const char *event_flag_name(rb_event_flag_t event_flag) {
236
- switch (event_flag) {
237
- case RUBY_EVENT_CALL: return "call";
238
- case RUBY_EVENT_C_CALL: return "c-call";
239
- case RUBY_EVENT_B_CALL: return "b-call";
240
- case RUBY_EVENT_RETURN: return "return";
241
- case RUBY_EVENT_C_RETURN: return "c-return";
242
- case RUBY_EVENT_B_RETURN: return "b-return";
243
- default: return "unknown";
244
- }
245
- }
246
-
247
- static struct IO_Event_Profiler_Call* profiler_event_record_call(struct IO_Event_Profiler *profiler, rb_event_flag_t event_flag, ID id, VALUE klass) {
248
- struct IO_Event_Profiler_Call *call = IO_Event_Array_push(&profiler->calls);
249
-
250
- call->event_flag = event_flag;
251
-
252
- call->parent = profiler->current;
253
- profiler->current = call;
254
-
255
- call->nesting = profiler->nesting;
256
- profiler->nesting += 1;
257
-
258
- if (id) {
259
- call->id = id;
260
- call->klass = klass;
261
- } else {
262
- rb_frame_method_id_and_class(&call->id, &call->klass);
263
- }
264
-
265
- const char *path = rb_sourcefile();
266
- if (path) {
267
- call->path = strdup(path);
268
- }
269
- call->line = rb_sourceline();
270
-
271
- return call;
272
- }
273
-
274
- void IO_Event_Profiler_fiber_switch(struct IO_Event_Profiler *profiler);
275
-
276
- static void IO_Event_Profiler_callback(rb_event_flag_t event_flag, VALUE data, VALUE self, ID id, VALUE klass) {
277
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(data);
278
-
279
- if (event_flag & RUBY_EVENT_FIBER_SWITCH) {
280
- IO_Event_Profiler_fiber_switch(profiler);
281
- return;
282
- }
283
-
284
- // We don't want to capture data if we're not running:
285
- if (!profiler->capture) return;
286
-
287
- if (event_flag_call_p(event_flag)) {
288
- struct IO_Event_Profiler_Call *call = profiler_event_record_call(profiler, event_flag, id, klass);
289
- IO_Event_Time_current(&call->enter_time);
290
- }
291
-
292
- else if (event_flag_return_p(event_flag)) {
293
- struct IO_Event_Profiler_Call *call = profiler->current;
294
-
295
- // We may encounter returns without a preceeding call. This isn't an error, but we should pretend like the call started at the beginning of the profiling session:
296
- if (call == NULL) {
297
- struct IO_Event_Profiler_Call *last_call = IO_Event_Array_last(&profiler->calls);
298
- call = profiler_event_record_call(profiler, event_flag, id, klass);
299
-
300
- if (last_call) {
301
- call->enter_time = last_call->enter_time;
302
- } else {
303
- call->enter_time = profiler->start_time;
304
- }
305
- }
306
-
307
- IO_Event_Time_current(&call->exit_time);
308
-
309
- profiler->current = call->parent;
310
-
311
- // We may encounter returns without a preceeding call.
312
- if (profiler->nesting > 0)
313
- profiler->nesting -= 1;
314
- }
315
- }
316
-
317
- VALUE IO_Event_Profiler_start(VALUE self) {
318
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
319
-
320
- if (profiler->running) return Qfalse;
321
-
322
- profiler->running = 1;
323
-
324
- IO_Event_Profiler_reset(profiler);
325
- IO_Event_Time_current(&profiler->start_time);
326
-
327
- rb_event_flag_t event_flags = RUBY_EVENT_FIBER_SWITCH;
328
-
329
- if (profiler->track_calls) {
330
- event_flags |= RUBY_EVENT_CALL | RUBY_EVENT_RETURN;
331
- event_flags |= RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN;
332
- // event_flags |= RUBY_EVENT_B_CALL | RUBY_EVENT_B_RETURN;
333
- }
334
-
335
- VALUE thread = rb_thread_current();
336
- rb_thread_add_event_hook(thread, IO_Event_Profiler_callback, event_flags, self);
337
-
338
- return self;
339
- }
340
-
341
- VALUE IO_Event_Profiler_stop(VALUE self) {
342
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
343
-
344
- if (!profiler->running) return Qfalse;
345
-
346
- profiler->running = 0;
347
-
348
- VALUE thread = rb_thread_current();
349
- rb_thread_remove_event_hook_with_data(thread, IO_Event_Profiler_callback, self);
350
-
351
- IO_Event_Time_current(&profiler->stop_time);
352
- IO_Event_Profiler_reset(profiler);
353
-
354
- return self;
355
- }
356
-
357
- static inline float IO_Event_Profiler_duration(struct IO_Event_Profiler *profiler) {
358
- struct timespec duration;
359
-
360
- IO_Event_Time_current(&profiler->stop_time);
361
- IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &duration);
362
-
363
- return IO_Event_Time_duration(&duration);
364
- }
365
-
366
- void IO_Event_Profiler_print(struct IO_Event_Profiler *profiler, FILE *restrict stream);
367
-
368
- void IO_Event_Profiler_finish(struct IO_Event_Profiler *profiler) {
369
- profiler->capture = 0;
370
-
371
- struct IO_Event_Profiler_Call *current = profiler->current;
372
- while (current) {
373
- IO_Event_Time_current(&current->exit_time);
374
-
375
- current = current->parent;
376
- }
377
- }
378
-
379
- void IO_Event_Profiler_fiber_switch(struct IO_Event_Profiler *profiler)
380
- {
381
- float duration = IO_Event_Profiler_duration(profiler);
382
-
383
- if (profiler->capture) {
384
- IO_Event_Profiler_finish(profiler);
385
-
386
- if (duration > profiler->log_threshold) {
387
- profiler->stalls += 1;
388
- IO_Event_Profiler_print(profiler, stderr);
389
- }
390
- }
391
-
392
- IO_Event_Profiler_reset(profiler);
393
-
394
- if (!IO_Event_Fiber_blocking(IO_Event_Fiber_current())) {
395
- // Reset the start time:
396
- IO_Event_Time_current(&profiler->start_time);
397
-
398
- profiler->capture = 1;
399
- }
400
- }
401
-
402
- static const float IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION = 0.01;
403
-
404
- void IO_Event_Profiler_print_tty(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
405
- struct timespec total_duration = {};
406
- IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &total_duration);
407
-
408
- fprintf(stderr, "Fiber stalled for %.3f seconds\n", IO_Event_Time_duration(&total_duration));
409
-
410
- size_t skipped = 0;
411
-
412
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
413
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
414
- struct timespec duration = {};
415
- IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
416
-
417
- // Skip calls that are too short to be meaningful:
418
- if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION) {
419
- skipped += 1;
420
- continue;
421
- }
422
-
423
- for (size_t i = 0; i < call->nesting; i += 1) {
424
- fputc('\t', stream);
425
- }
426
-
427
- VALUE class_inspect = rb_inspect(call->klass);
428
- const char *name = rb_id2name(call->id);
429
-
430
- fprintf(stream, "%s:%d in %s '%s#%s' (" IO_EVENT_TIME_PRINTF_TIMESPEC "s)\n", call->path, call->line, event_flag_name(call->event_flag), RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration));
431
- }
432
-
433
- if (skipped > 0) {
434
- fprintf(stream, "Skipped %zu calls that were too short to be meaningful.\n", skipped);
435
- }
436
- }
437
-
438
- void IO_Event_Profiler_print_json(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
439
- struct timespec total_duration = {};
440
- IO_Event_Time_elapsed(&profiler->start_time, &profiler->stop_time, &total_duration);
441
-
442
- fputc('{', stream);
443
-
444
- fprintf(stream, "\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(total_duration));
445
-
446
- size_t skipped = 0;
447
-
448
- fprintf(stream, ",\"calls\":[");
449
- int first = 1;
450
-
451
- for (size_t i = 0; i < profiler->calls.limit; i += 1) {
452
- struct IO_Event_Profiler_Call *call = profiler->calls.base[i];
453
- struct timespec duration = {};
454
- IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
455
-
456
- // Skip calls that are too short to be meaningful:
457
- if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILER_PRINT_MINIMUM_PROPORTION) {
458
- skipped += 1;
459
- continue;
460
- }
461
-
462
- VALUE class_inspect = rb_inspect(call->klass);
463
- const char *name = rb_id2name(call->id);
464
-
465
- fprintf(stream, "%s{\"path\":\"%s\",\"line\":%d,\"class\":\"%s\",\"method\":\"%s\",\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC ",\"nesting\":%zu}", first ? "" : ",", call->path, call->line, RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration), call->nesting);
466
-
467
- first = 0;
468
- }
469
-
470
- fprintf(stream, "]");
471
-
472
- if (skipped > 0) {
473
- fprintf(stream, ",\"skipped\":%zu", skipped);
474
- }
475
-
476
- fprintf(stream, "}\n");
477
- }
478
-
479
- void IO_Event_Profiler_print(struct IO_Event_Profiler *profiler, FILE *restrict stream) {
480
- if (isatty(fileno(stream))) {
481
- IO_Event_Profiler_print_tty(profiler, stream);
482
- } else {
483
- IO_Event_Profiler_print_json(profiler, stream);
484
- }
485
- }
486
-
487
- VALUE IO_Event_Profiler_stalls(VALUE self) {
488
- struct IO_Event_Profiler *profiler = IO_Event_Profiler_get(self);
489
-
490
- return SIZET2NUM(profiler->stalls);
491
- }
492
-
493
- void Init_IO_Event_Profiler(VALUE IO_Event) {
494
- IO_Event_Profiler = rb_define_class_under(IO_Event, "Profiler", rb_cObject);
495
- rb_define_alloc_func(IO_Event_Profiler, IO_Event_Profiler_allocate);
496
-
497
- rb_define_singleton_method(IO_Event_Profiler, "default", IO_Event_Profiler_default, 0);
498
-
499
- rb_define_method(IO_Event_Profiler, "initialize", IO_Event_Profiler_initialize, -1);
500
-
501
- rb_define_method(IO_Event_Profiler, "start", IO_Event_Profiler_start, 0);
502
- rb_define_method(IO_Event_Profiler, "stop", IO_Event_Profiler_stop, 0);
503
-
504
- rb_define_method(IO_Event_Profiler, "stalls", IO_Event_Profiler_stalls, 0);
505
- }
@@ -1,18 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Released under the MIT License.
4
- # Copyright, 2025, by Samuel Williams.
5
-
6
- require_relative "native"
7
-
8
- module IO::Event
9
- unless self.const_defined?(:Profiler)
10
- module Profiler
11
- # The default profiler, if the platform supports it.
12
- # Use `IO_EVENT_PROFILER=true` to enable it.
13
- def self.default
14
- nil
15
- end
16
- end
17
- end
18
- end