io-event 1.8.4 → 1.9.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,245 +0,0 @@
1
- // Released under the MIT License.
2
- // Copyright, 2025, by Samuel Williams.
3
-
4
- #include "profile.h"
5
- #include "time.h"
6
-
7
- #include <ruby/debug.h>
8
-
9
- #include <stdio.h>
10
-
11
- VALUE IO_Event_Profile = Qnil;
12
-
13
- void IO_Event_Profile_Call_initialize(struct IO_Event_Profile_Call *call) {
14
- call->enter_time.tv_sec = 0;
15
- call->enter_time.tv_nsec = 0;
16
- call->exit_time.tv_sec = 0;
17
- call->exit_time.tv_nsec = 0;
18
-
19
- call->nesting = 0;
20
-
21
- call->event_flag = 0;
22
- call->id = 0;
23
-
24
- call->path = NULL;
25
- call->line = 0;
26
- }
27
-
28
- void IO_Event_Profile_Call_free(struct IO_Event_Profile_Call *call) {
29
- if (call->path) {
30
- free((void*)call->path);
31
- }
32
- }
33
-
34
- static void IO_Event_Profile_mark(void *ptr) {
35
- struct IO_Event_Profile *profile = (struct IO_Event_Profile*)ptr;
36
-
37
- // If `klass` is stored as a VALUE in calls, we need to mark them here:
38
- for (size_t i = 0; i < profile->calls.limit; i += 1) {
39
- struct IO_Event_Profile_Call *call = profile->calls.base[i];
40
- rb_gc_mark(call->klass);
41
- }
42
- }
43
-
44
- static void IO_Event_Profile_free(void *ptr) {
45
- struct IO_Event_Profile *profile = (struct IO_Event_Profile*)ptr;
46
-
47
- IO_Event_Array_free(&profile->calls);
48
-
49
- free(profile);
50
- }
51
-
52
- const rb_data_type_t IO_Event_Profile_Type = {
53
- .wrap_struct_name = "IO_Event_Profile",
54
- .function = {
55
- .dmark = IO_Event_Profile_mark,
56
- .dfree = IO_Event_Profile_free,
57
- },
58
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
59
- };
60
-
61
- VALUE IO_Event_Profile_allocate(VALUE klass) {
62
- struct IO_Event_Profile *profile = ALLOC(struct IO_Event_Profile);
63
-
64
- profile->calls.element_initialize = (void (*)(void*))IO_Event_Profile_Call_initialize;
65
- profile->calls.element_free = (void (*)(void*))IO_Event_Profile_Call_free;
66
-
67
- IO_Event_Array_initialize(&profile->calls, 0, sizeof(struct IO_Event_Profile_Call));
68
-
69
- return TypedData_Wrap_Struct(klass, &IO_Event_Profile_Type, profile);
70
- }
71
-
72
- struct IO_Event_Profile *IO_Event_Profile_get(VALUE self) {
73
- struct IO_Event_Profile *profile;
74
- TypedData_Get_Struct(self, struct IO_Event_Profile, &IO_Event_Profile_Type, profile);
75
- return profile;
76
- }
77
-
78
- int event_flag_call_p(rb_event_flag_t event_flags) {
79
- return event_flags & (RUBY_EVENT_CALL | RUBY_EVENT_C_CALL);
80
- }
81
-
82
- int event_flag_return_p(rb_event_flag_t event_flags) {
83
- return event_flags & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN);
84
- }
85
-
86
- static void profile_event_callback(rb_event_flag_t event_flag, VALUE data, VALUE self, ID id, VALUE klass) {
87
- struct IO_Event_Profile *profile = IO_Event_Profile_get(data);
88
-
89
- if (event_flag_call_p(event_flag)) {
90
- struct IO_Event_Profile_Call *call = IO_Event_Array_push(&profile->calls);
91
- IO_Event_Time_current(&call->enter_time);
92
-
93
- call->event_flag = event_flag;
94
-
95
- call->parent = profile->current;
96
- profile->current = call;
97
-
98
- call->nesting = profile->nesting;
99
- profile->nesting += 1;
100
-
101
- if (id) {
102
- call->id = id;
103
- call->klass = klass;
104
- } else {
105
- rb_frame_method_id_and_class(&call->id, &call->klass);
106
- }
107
-
108
- const char *path = rb_sourcefile();
109
- if (path) {
110
- call->path = strdup(path);
111
- }
112
- call->line = rb_sourceline();
113
- } else if (event_flag_return_p(event_flag)) {
114
- struct IO_Event_Profile_Call *call = profile->current;
115
-
116
- // Bad call sequence?
117
- if (call == NULL) return;
118
-
119
- IO_Event_Time_current(&call->exit_time);
120
-
121
- profile->current = call->parent;
122
- profile->nesting -= 1;
123
- }
124
- }
125
-
126
- void IO_Event_Profile_start(VALUE self, int track_calls) {
127
- struct IO_Event_Profile *profile = IO_Event_Profile_get(self);
128
-
129
- IO_Event_Time_current(&profile->start_time);
130
- profile->nesting = 0;
131
- profile->current = NULL;
132
-
133
- profile->track_calls = track_calls;
134
-
135
- // Since fibers are currently limited to a single thread, we use this in the hope that it's a little more efficient:
136
- if (profile->track_calls) {
137
- VALUE thread = rb_thread_current();
138
- rb_thread_add_event_hook(thread, profile_event_callback, RUBY_EVENT_CALL | RUBY_EVENT_C_CALL | RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN, self);
139
- }
140
- }
141
-
142
- void IO_Event_Profile_stop(VALUE self) {
143
- struct IO_Event_Profile *profile = IO_Event_Profile_get(self);
144
-
145
- IO_Event_Time_current(&profile->stop_time);
146
-
147
- if (profile->track_calls) {
148
- VALUE thread = rb_thread_current();
149
- rb_thread_remove_event_hook_with_data(thread, profile_event_callback, self);
150
- }
151
- }
152
-
153
- static const float IO_EVENT_PROFILE_PRINT_MINIMUM_PROPORTION = 0.01;
154
-
155
- void IO_Event_Profile_print_tty(VALUE self, FILE *restrict stream) {
156
- struct IO_Event_Profile *profile = IO_Event_Profile_get(self);
157
-
158
- struct timespec total_duration = {};
159
- IO_Event_Time_elapsed(&profile->start_time, &profile->stop_time, &total_duration);
160
-
161
- fprintf(stderr, "Fiber stalled for %.3f seconds\n", IO_Event_Time_duration(&total_duration));
162
-
163
- size_t skipped = 0;
164
-
165
- for (size_t i = 0; i < profile->calls.limit; i += 1) {
166
- struct IO_Event_Profile_Call *call = profile->calls.base[i];
167
- struct timespec duration = {};
168
- IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
169
-
170
- // Skip calls that are too short to be meaningful:
171
- if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILE_PRINT_MINIMUM_PROPORTION) {
172
- skipped += 1;
173
- continue;
174
- }
175
-
176
- for (size_t i = 0; i < call->nesting; i += 1) {
177
- fputc('\t', stream);
178
- }
179
-
180
- VALUE class_inspect = rb_inspect(call->klass);
181
- const char *name = rb_id2name(call->id);
182
-
183
- fprintf(stream, "\t%s:%d in '%s#%s' (" IO_EVENT_TIME_PRINTF_TIMESPEC "s)\n", call->path, call->line, RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration));
184
- }
185
-
186
- if (skipped > 0) {
187
- fprintf(stream, "Skipped %zu calls that were too short to be meaningful.\n", skipped);
188
- }
189
- }
190
-
191
- void IO_Event_Profile_print_json(VALUE self, FILE *restrict stream) {
192
- struct IO_Event_Profile *profile = IO_Event_Profile_get(self);
193
-
194
- struct timespec total_duration = {};
195
- IO_Event_Time_elapsed(&profile->start_time, &profile->stop_time, &total_duration);
196
-
197
- fputc('{', stream);
198
-
199
- fprintf(stream, "\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(total_duration));
200
-
201
- size_t skipped = 0;
202
-
203
- fprintf(stream, ",\"calls\":[");
204
- int first = 1;
205
-
206
- for (size_t i = 0; i < profile->calls.limit; i += 1) {
207
- struct IO_Event_Profile_Call *call = profile->calls.base[i];
208
- struct timespec duration = {};
209
- IO_Event_Time_elapsed(&call->enter_time, &call->exit_time, &duration);
210
-
211
- // Skip calls that are too short to be meaningful:
212
- if (IO_Event_Time_proportion(&duration, &total_duration) < IO_EVENT_PROFILE_PRINT_MINIMUM_PROPORTION) {
213
- skipped += 1;
214
- continue;
215
- }
216
-
217
- VALUE class_inspect = rb_inspect(call->klass);
218
- const char *name = rb_id2name(call->id);
219
-
220
- fprintf(stream, "%s{\"path\":\"%s\",\"line\":%d,\"class\":\"%s\",\"method\":\"%s\",\"duration\":" IO_EVENT_TIME_PRINTF_TIMESPEC ",\"nesting\":%zu}", first ? "" : ",", call->path, call->line, RSTRING_PTR(class_inspect), name, IO_EVENT_TIME_PRINTF_TIMESPEC_ARGUMENTS(duration), call->nesting);
221
-
222
- first = 0;
223
- }
224
-
225
- fprintf(stream, "]");
226
-
227
- if (skipped > 0) {
228
- fprintf(stream, ",\"skipped\":%zu", skipped);
229
- }
230
-
231
- fprintf(stream, "}\n");
232
- }
233
-
234
- void IO_Event_Profile_print(VALUE self, FILE *restrict stream) {
235
- if (isatty(fileno(stream))) {
236
- IO_Event_Profile_print_tty(self, stream);
237
- } else {
238
- IO_Event_Profile_print_json(self, stream);
239
- }
240
- }
241
-
242
- void Init_IO_Event_Profile(VALUE IO_Event) {
243
- IO_Event_Profile = rb_define_class_under(IO_Event, "Profile", rb_cObject);
244
- rb_define_alloc_func(IO_Event_Profile, IO_Event_Profile_allocate);
245
- }
@@ -1,63 +0,0 @@
1
- // Released under the MIT License.
2
- // Copyright, 2025, by Samuel Williams.
3
-
4
- #pragma once
5
-
6
- #include <ruby.h>
7
- #include "array.h"
8
- #include "time.h"
9
-
10
- extern VALUE IO_Event_Profile;
11
-
12
- struct IO_Event_Profile_Call {
13
- struct timespec enter_time;
14
- struct timespec exit_time;
15
-
16
- size_t nesting;
17
-
18
- rb_event_flag_t event_flag;
19
- ID id;
20
-
21
- VALUE klass;
22
- const char *path;
23
- int line;
24
-
25
- struct IO_Event_Profile_Call *parent;
26
- };
27
-
28
- struct IO_Event_Profile {
29
- int track_calls;
30
-
31
- struct timespec start_time;
32
- struct timespec stop_time;
33
-
34
- // The depth of the call stack:
35
- size_t nesting;
36
-
37
- // The current call frame:
38
- struct IO_Event_Profile_Call *current;
39
-
40
- struct IO_Event_Array calls;
41
- };
42
-
43
- extern const rb_data_type_t IO_Event_Profile_Type;
44
- VALUE IO_Event_Profile_allocate(VALUE klass);
45
- struct IO_Event_Profile *IO_Event_Profile_get(VALUE self);
46
-
47
- void IO_Event_Profile_initialize(struct IO_Event_Profile *profile, VALUE fiber);
48
- void IO_Event_Profile_start(VALUE self, int track_calls);
49
- void IO_Event_Profile_stop(VALUE self);
50
-
51
- void IO_Event_Profile_print(VALUE profile, FILE *restrict stream);
52
-
53
- static inline float IO_Event_Profile_duration(VALUE self) {
54
- struct IO_Event_Profile *profile = IO_Event_Profile_get(self);
55
-
56
- struct timespec duration;
57
-
58
- IO_Event_Time_elapsed(&profile->start_time, &profile->stop_time, &duration);
59
-
60
- return IO_Event_Time_duration(&duration);
61
- }
62
-
63
- void Init_IO_Event_Profile(VALUE IO_Event);