memory-profiler 1.1.7 → 1.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +1 -1
- data/ext/memory/profiler/allocations.h +15 -11
- data/ext/memory/profiler/capture.c +107 -256
- data/ext/memory/profiler/capture.h +7 -0
- data/ext/memory/profiler/events.c +209 -0
- data/ext/memory/profiler/events.h +48 -0
- data/lib/memory/profiler/call_tree.rb +7 -7
- data/lib/memory/profiler/sampler.rb +11 -11
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +4 -0
- data/releases.md +4 -0
- data.tar.gz.sig +0 -0
- metadata +3 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 1c43ffe32db5ad0039b47cc3aa32162d58cd320e82864d78112dd5bef43f1a53
|
|
4
|
+
data.tar.gz: '01095d67b202e81846c70b915b7077708c5b395faa1f3165d4877764f18c547b'
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 37ddeacd7c676da0ca4d0656c9d971dc209b5340f122adb9201a16b740523d208dfb5845ba79ef21192cb4165496f1c4baedfda3368309eb113458aa6b9fd27e
|
|
7
|
+
data.tar.gz: d2c7fb78cdf96d799cfc531d9219defe941a50723b72f6e386c90b23507c78d3ed1bc6edb3f0b060b71e2fcb2109532239e78dcaff0f85f2743fc3d6cb014cf7
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
data/ext/extconf.rb
CHANGED
|
@@ -16,7 +16,7 @@ if ENV.key?("RUBY_DEBUG")
|
|
|
16
16
|
append_cflags(["-DRUBY_DEBUG", "-O0"])
|
|
17
17
|
end
|
|
18
18
|
|
|
19
|
-
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c"]
|
|
19
|
+
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c", "memory/profiler/events.c"]
|
|
20
20
|
$VPATH << "$(srcdir)/memory/profiler"
|
|
21
21
|
|
|
22
22
|
# Check for required headers
|
|
@@ -6,26 +6,30 @@
|
|
|
6
6
|
#include "ruby.h"
|
|
7
7
|
#include "ruby/st.h"
|
|
8
8
|
|
|
9
|
-
// Per-class allocation tracking record
|
|
9
|
+
// Per-class allocation tracking record:
|
|
10
10
|
struct Memory_Profiler_Capture_Allocations {
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
//
|
|
11
|
+
// Optional Ruby proc/lambda to call on allocation.
|
|
12
|
+
VALUE callback;
|
|
13
|
+
|
|
14
|
+
// Total allocations seen since tracking started.
|
|
15
|
+
size_t new_count;
|
|
16
|
+
// // Total frees seen since tracking started.
|
|
17
|
+
size_t free_count;
|
|
18
|
+
// Live count = new_count - free_count.
|
|
15
19
|
|
|
16
|
-
// For detailed tracking: map object (VALUE) => state (VALUE)
|
|
17
|
-
// State is returned from callback on
|
|
20
|
+
// For detailed tracking: map object (VALUE) => state (VALUE).
|
|
21
|
+
// State is returned from callback on `newobj` and passed back on `freeobj`.
|
|
18
22
|
st_table *object_states;
|
|
19
23
|
};
|
|
20
24
|
|
|
21
|
-
// Wrap an allocations record in a VALUE
|
|
25
|
+
// Wrap an allocations record in a VALUE.
|
|
22
26
|
VALUE Memory_Profiler_Allocations_wrap(struct Memory_Profiler_Capture_Allocations *record);
|
|
23
27
|
|
|
24
|
-
// Get allocations record from wrapper VALUE
|
|
28
|
+
// Get allocations record from wrapper VALUE.
|
|
25
29
|
struct Memory_Profiler_Capture_Allocations* Memory_Profiler_Allocations_get(VALUE self);
|
|
26
30
|
|
|
27
|
-
// Clear/reset allocation counts and state for a record
|
|
31
|
+
// Clear/reset allocation counts and state for a record.
|
|
28
32
|
void Memory_Profiler_Allocations_clear(VALUE allocations);
|
|
29
33
|
|
|
30
|
-
// Initialize the Allocations class
|
|
34
|
+
// Initialize the Allocations class.
|
|
31
35
|
void Init_Memory_Profiler_Allocations(VALUE Memory_Profiler);
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
#include "capture.h"
|
|
5
5
|
#include "allocations.h"
|
|
6
|
-
#include "
|
|
6
|
+
#include "events.h"
|
|
7
7
|
|
|
8
8
|
#include "ruby.h"
|
|
9
9
|
#include "ruby/debug.h"
|
|
@@ -13,107 +13,47 @@
|
|
|
13
13
|
|
|
14
14
|
enum {
|
|
15
15
|
DEBUG = 0,
|
|
16
|
-
DEBUG_EVENT_QUEUES = 0,
|
|
17
16
|
DEBUG_STATE = 0,
|
|
18
17
|
};
|
|
19
18
|
|
|
20
19
|
static VALUE Memory_Profiler_Capture = Qnil;
|
|
21
20
|
|
|
22
|
-
// Event symbols
|
|
23
|
-
static VALUE sym_newobj;
|
|
24
|
-
static VALUE sym_freeobj;
|
|
21
|
+
// Event symbols:
|
|
22
|
+
static VALUE sym_newobj, sym_freeobj;
|
|
25
23
|
|
|
26
|
-
//
|
|
27
|
-
struct Memory_Profiler_Newobj_Queue_Item {
|
|
28
|
-
// The class of the new object:
|
|
29
|
-
VALUE klass;
|
|
30
|
-
|
|
31
|
-
// The Allocations wrapper:
|
|
32
|
-
VALUE allocations;
|
|
33
|
-
|
|
34
|
-
// The newly allocated object:
|
|
35
|
-
VALUE object;
|
|
36
|
-
};
|
|
37
|
-
|
|
38
|
-
// Queue item - freed object data to be processed via postponed job
|
|
39
|
-
struct Memory_Profiler_Freeobj_Queue_Item {
|
|
40
|
-
// The class of the freed object:
|
|
41
|
-
VALUE klass;
|
|
42
|
-
|
|
43
|
-
// The Allocations wrapper:
|
|
44
|
-
VALUE allocations;
|
|
45
|
-
|
|
46
|
-
// The state returned from callback on newobj:
|
|
47
|
-
VALUE state;
|
|
48
|
-
};
|
|
49
|
-
|
|
50
|
-
// Main capture state
|
|
24
|
+
// Main capture state (per-instance).
|
|
51
25
|
struct Memory_Profiler_Capture {
|
|
52
|
-
// class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
26
|
+
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
53
27
|
st_table *tracked_classes;
|
|
54
28
|
|
|
55
|
-
// Master switch - is tracking active? (set by start/stop)
|
|
29
|
+
// Master switch - is tracking active? (set by start/stop).
|
|
56
30
|
int running;
|
|
57
31
|
|
|
58
|
-
//
|
|
32
|
+
// Should we queue callbacks? (temporarily disabled during queue processing).
|
|
59
33
|
int enabled;
|
|
60
|
-
|
|
61
|
-
// Queue for new objects (processed via postponed job):
|
|
62
|
-
struct Memory_Profiler_Queue newobj_queue;
|
|
63
|
-
|
|
64
|
-
// Queue for freed objects (processed via postponed job):
|
|
65
|
-
struct Memory_Profiler_Queue freeobj_queue;
|
|
66
|
-
|
|
67
|
-
// Handle for the postponed job (processes both queues)
|
|
68
|
-
rb_postponed_job_handle_t postponed_job_handle;
|
|
69
34
|
};
|
|
70
35
|
|
|
71
|
-
// GC mark callback for tracked_classes table
|
|
36
|
+
// GC mark callback for tracked_classes table.
|
|
72
37
|
static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
73
|
-
// Mark class as un-movable
|
|
38
|
+
// Mark class as un-movable:
|
|
74
39
|
VALUE klass = (VALUE)key;
|
|
75
40
|
rb_gc_mark(klass);
|
|
76
41
|
|
|
77
|
-
// Mark the wrapped Allocations VALUE
|
|
42
|
+
// Mark the wrapped Allocations VALUE:
|
|
78
43
|
VALUE allocations = (VALUE)value;
|
|
79
44
|
rb_gc_mark_movable(allocations);
|
|
80
45
|
|
|
81
46
|
return ST_CONTINUE;
|
|
82
47
|
}
|
|
83
48
|
|
|
84
|
-
// GC mark function
|
|
85
49
|
static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
86
50
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
87
51
|
|
|
88
|
-
if (!capture) {
|
|
89
|
-
return;
|
|
90
|
-
}
|
|
91
|
-
|
|
92
52
|
if (capture->tracked_classes) {
|
|
93
53
|
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
|
|
94
54
|
}
|
|
95
|
-
|
|
96
|
-
// Mark new objects in the queue:
|
|
97
|
-
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
98
|
-
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
99
|
-
rb_gc_mark_movable(newobj->klass);
|
|
100
|
-
rb_gc_mark_movable(newobj->allocations);
|
|
101
|
-
rb_gc_mark_movable(newobj->object);
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
// Mark freed objects in the queue:
|
|
105
|
-
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
106
|
-
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
107
|
-
rb_gc_mark_movable(freed->klass);
|
|
108
|
-
rb_gc_mark_movable(freed->allocations);
|
|
109
|
-
|
|
110
|
-
if (freed->state) {
|
|
111
|
-
rb_gc_mark_movable(freed->state);
|
|
112
|
-
}
|
|
113
|
-
}
|
|
114
55
|
}
|
|
115
56
|
|
|
116
|
-
// GC free function
|
|
117
57
|
static void Memory_Profiler_Capture_free(void *ptr) {
|
|
118
58
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
119
59
|
|
|
@@ -121,14 +61,9 @@ static void Memory_Profiler_Capture_free(void *ptr) {
|
|
|
121
61
|
st_free_table(capture->tracked_classes);
|
|
122
62
|
}
|
|
123
63
|
|
|
124
|
-
// Free both queues (elements are stored directly, just free the queues)
|
|
125
|
-
Memory_Profiler_Queue_free(&capture->newobj_queue);
|
|
126
|
-
Memory_Profiler_Queue_free(&capture->freeobj_queue);
|
|
127
|
-
|
|
128
64
|
xfree(capture);
|
|
129
65
|
}
|
|
130
66
|
|
|
131
|
-
// GC memsize function
|
|
132
67
|
static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
|
|
133
68
|
const struct Memory_Profiler_Capture *capture = ptr;
|
|
134
69
|
size_t size = sizeof(struct Memory_Profiler_Capture);
|
|
@@ -137,29 +72,24 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
|
|
|
137
72
|
size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
|
|
138
73
|
}
|
|
139
74
|
|
|
140
|
-
// Add size of both queues (elements stored directly)
|
|
141
|
-
size += capture->newobj_queue.capacity * capture->newobj_queue.element_size;
|
|
142
|
-
size += capture->freeobj_queue.capacity * capture->freeobj_queue.element_size;
|
|
143
|
-
|
|
144
75
|
return size;
|
|
145
76
|
}
|
|
146
77
|
|
|
147
|
-
// Foreach callback for st_foreach_with_replace (iteration logic)
|
|
78
|
+
// Foreach callback for st_foreach_with_replace (iteration logic).
|
|
148
79
|
static int Memory_Profiler_Capture_tracked_classes_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
|
|
149
|
-
// Return ST_REPLACE to trigger the replace callback for each entry
|
|
150
80
|
return ST_REPLACE;
|
|
151
81
|
}
|
|
152
82
|
|
|
153
|
-
// Replace callback for st_foreach_with_replace (update logic)
|
|
83
|
+
// Replace callback for st_foreach_with_replace (update logic).
|
|
154
84
|
static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
|
|
155
|
-
// Update class key if it moved
|
|
85
|
+
// Update class key if it moved:
|
|
156
86
|
VALUE old_klass = (VALUE)*key;
|
|
157
87
|
VALUE new_klass = rb_gc_location(old_klass);
|
|
158
88
|
if (old_klass != new_klass) {
|
|
159
89
|
*key = (st_data_t)new_klass;
|
|
160
90
|
}
|
|
161
91
|
|
|
162
|
-
// Update wrapped Allocations VALUE if it moved
|
|
92
|
+
// Update wrapped Allocations VALUE if it moved:
|
|
163
93
|
VALUE old_allocations = (VALUE)*value;
|
|
164
94
|
VALUE new_allocations = rb_gc_location(old_allocations);
|
|
165
95
|
if (old_allocations != new_allocations) {
|
|
@@ -169,36 +99,15 @@ static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_dat
|
|
|
169
99
|
return ST_CONTINUE;
|
|
170
100
|
}
|
|
171
101
|
|
|
172
|
-
// GC compact function - update VALUEs when GC compaction moves objects
|
|
173
102
|
static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
174
103
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
175
104
|
|
|
176
|
-
// Update tracked_classes keys and
|
|
105
|
+
// Update tracked_classes keys and allocations values in-place:
|
|
177
106
|
if (capture->tracked_classes && capture->tracked_classes->num_entries > 0) {
|
|
178
107
|
if (st_foreach_with_replace(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_foreach, Memory_Profiler_Capture_tracked_classes_update, 0)) {
|
|
179
108
|
rb_raise(rb_eRuntimeError, "tracked_classes modified during GC compaction");
|
|
180
109
|
}
|
|
181
110
|
}
|
|
182
|
-
|
|
183
|
-
// Update new objects in the queue
|
|
184
|
-
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
185
|
-
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
186
|
-
|
|
187
|
-
// Update all VALUEs if they moved during compaction
|
|
188
|
-
newobj->klass = rb_gc_location(newobj->klass);
|
|
189
|
-
newobj->allocations = rb_gc_location(newobj->allocations);
|
|
190
|
-
newobj->object = rb_gc_location(newobj->object);
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
// Update freed objects in the queue
|
|
194
|
-
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
195
|
-
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
196
|
-
|
|
197
|
-
// Update all VALUEs if they moved during compaction
|
|
198
|
-
freed->klass = rb_gc_location(freed->klass);
|
|
199
|
-
freed->allocations = rb_gc_location(freed->allocations);
|
|
200
|
-
freed->state = rb_gc_location(freed->state);
|
|
201
|
-
}
|
|
202
111
|
}
|
|
203
112
|
|
|
204
113
|
static const rb_data_type_t Memory_Profiler_Capture_type = {
|
|
@@ -230,174 +139,130 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
|
|
|
230
139
|
}
|
|
231
140
|
}
|
|
232
141
|
|
|
233
|
-
//
|
|
234
|
-
|
|
235
|
-
// IMPORTANT: Process newobj queue first, then freeobj queue to maintain order
|
|
236
|
-
static void Memory_Profiler_Capture_process_queues(void *arg) {
|
|
237
|
-
VALUE self = (VALUE)arg;
|
|
142
|
+
// Process a NEWOBJ event. Handles both callback and non-callback cases, stores appropriate state.
|
|
143
|
+
static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE klass, VALUE allocations, VALUE object) {
|
|
238
144
|
struct Memory_Profiler_Capture *capture;
|
|
239
|
-
TypedData_Get_Struct(
|
|
145
|
+
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
146
|
+
|
|
147
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
240
148
|
|
|
241
|
-
|
|
242
|
-
|
|
149
|
+
// Ensure object_states table exists:
|
|
150
|
+
if (!record->object_states) {
|
|
151
|
+
record->object_states = st_init_numtable();
|
|
152
|
+
}
|
|
243
153
|
|
|
244
|
-
|
|
245
|
-
// (rb_funcall can allocate, which would trigger more NEWOBJ events)
|
|
246
|
-
int was_enabled = capture->enabled;
|
|
247
|
-
capture->enabled = 0;
|
|
154
|
+
VALUE state;
|
|
248
155
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
VALUE allocations = newobj->allocations;
|
|
254
|
-
VALUE object = newobj->object;
|
|
156
|
+
if (!NIL_P(record->callback)) {
|
|
157
|
+
// Temporarily disable queueing to prevent infinite loop:
|
|
158
|
+
int was_enabled = capture->enabled;
|
|
159
|
+
capture->enabled = 0;
|
|
255
160
|
|
|
256
|
-
|
|
161
|
+
state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
162
|
+
|
|
163
|
+
capture->enabled = was_enabled;
|
|
257
164
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
165
|
+
if (DEBUG_STATE) fprintf(stderr, "Storing callback state for object: %p\n", (void *)object);
|
|
166
|
+
} else {
|
|
167
|
+
// No callback, store sentinel (Qnil):
|
|
168
|
+
state = Qnil;
|
|
169
|
+
|
|
170
|
+
if (DEBUG_STATE) fprintf(stderr, "Storing sentinel (Qnil) for object: %p\n", (void *)object);
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// Always store something to maintain NEWOBJ/FREEOBJ symmetry:
|
|
174
|
+
st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// Process a FREEOBJ event. Looks up and deletes state, optionally calls callback.
|
|
178
|
+
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE allocations, VALUE object) {
|
|
179
|
+
struct Memory_Profiler_Capture *capture;
|
|
180
|
+
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
181
|
+
|
|
182
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
183
|
+
|
|
184
|
+
// Try to look up and delete state:
|
|
185
|
+
if (record->object_states) {
|
|
186
|
+
st_data_t state_data;
|
|
187
|
+
if (st_delete(record->object_states, (st_data_t *)&object, &state_data)) {
|
|
188
|
+
VALUE state = (VALUE)state_data;
|
|
189
|
+
|
|
190
|
+
if (DEBUG_STATE) fprintf(stderr, "Freed tracked object: %p, state=%s\n",
|
|
191
|
+
(void *)object, NIL_P(state) ? "Qnil (sentinel)" : "real state");
|
|
261
192
|
|
|
262
|
-
//
|
|
263
|
-
if (!NIL_P(state)) {
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
193
|
+
// Only call callback if we have both callback AND real state (not Qnil sentinel):
|
|
194
|
+
if (!NIL_P(record->callback) && !NIL_P(state)) {
|
|
195
|
+
// Temporarily disable queueing to prevent infinite loop:
|
|
196
|
+
int was_enabled = capture->enabled;
|
|
197
|
+
capture->enabled = 0;
|
|
267
198
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
|
|
199
|
+
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
|
|
200
|
+
|
|
201
|
+
capture->enabled = was_enabled;
|
|
272
202
|
}
|
|
203
|
+
} else {
|
|
204
|
+
if (DEBUG_STATE) fprintf(stderr, "Freed pre-existing object: %p (not tracked)\n", (void *)object);
|
|
273
205
|
}
|
|
274
206
|
}
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
if (!NIL_P(record->callback)) {
|
|
287
|
-
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
|
|
288
|
-
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// Process a single event (NEWOBJ or FREEOBJ). Called from events.c via rb_protect to catch exceptions.
|
|
210
|
+
void Memory_Profiler_Capture_process_event(struct Memory_Profiler_Event *event) {
|
|
211
|
+
switch (event->type) {
|
|
212
|
+
case MEMORY_PROFILER_EVENT_TYPE_NEWOBJ:
|
|
213
|
+
Memory_Profiler_Capture_process_newobj(event->capture, event->klass, event->allocations, event->object);
|
|
214
|
+
break;
|
|
215
|
+
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
|
|
216
|
+
Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->allocations, event->object);
|
|
217
|
+
break;
|
|
289
218
|
}
|
|
290
|
-
|
|
291
|
-
// Clear both queues (elements are reused on next cycle)
|
|
292
|
-
Memory_Profiler_Queue_clear(&capture->newobj_queue);
|
|
293
|
-
Memory_Profiler_Queue_clear(&capture->freeobj_queue);
|
|
294
|
-
|
|
295
|
-
// Restore tracking state
|
|
296
|
-
capture->enabled = was_enabled;
|
|
297
219
|
}
|
|
298
220
|
|
|
299
|
-
|
|
300
|
-
|
|
221
|
+
#pragma mark - Event Handlers
|
|
222
|
+
|
|
223
|
+
// NEWOBJ event handler. Automatically tracks ALL allocations (creates records on demand).
|
|
301
224
|
static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
302
225
|
st_data_t allocations_data;
|
|
226
|
+
VALUE allocations;
|
|
227
|
+
|
|
303
228
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
304
|
-
|
|
229
|
+
// Existing record, increment count:
|
|
230
|
+
allocations = (VALUE)allocations_data;
|
|
305
231
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
306
|
-
|
|
307
|
-
// Always track counts (even during queue processing)
|
|
308
232
|
record->new_count++;
|
|
309
|
-
|
|
310
|
-
// Only queue for callback if tracking is enabled (prevents infinite recursion)
|
|
311
|
-
if (capture->enabled && !NIL_P(record->callback)) {
|
|
312
|
-
// Push a new item onto the queue (returns pointer to write to)
|
|
313
|
-
// NOTE: realloc is safe during allocation (doesn't trigger Ruby allocation)
|
|
314
|
-
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_push(&capture->newobj_queue);
|
|
315
|
-
if (newobj) {
|
|
316
|
-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued newobj, queue size now: %zu/%zu\n",
|
|
317
|
-
capture->newobj_queue.count, capture->newobj_queue.capacity);
|
|
318
|
-
|
|
319
|
-
// Write VALUEs with write barriers (combines write + GC notification)
|
|
320
|
-
RB_OBJ_WRITE(self, &newobj->klass, klass);
|
|
321
|
-
RB_OBJ_WRITE(self, &newobj->allocations, allocations);
|
|
322
|
-
RB_OBJ_WRITE(self, &newobj->object, object);
|
|
323
|
-
|
|
324
|
-
// Trigger postponed job to process the queue
|
|
325
|
-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues\n");
|
|
326
|
-
rb_postponed_job_trigger(capture->postponed_job_handle);
|
|
327
|
-
} else {
|
|
328
|
-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Failed to queue newobj, out of memory\n");
|
|
329
|
-
}
|
|
330
|
-
// If push failed (out of memory), silently drop this newobj event
|
|
331
|
-
}
|
|
332
233
|
} else {
|
|
333
|
-
//
|
|
234
|
+
// First time seeing this class, create record automatically:
|
|
334
235
|
struct Memory_Profiler_Capture_Allocations *record = ALLOC(struct Memory_Profiler_Capture_Allocations);
|
|
335
236
|
record->callback = Qnil;
|
|
336
|
-
record->new_count = 1;
|
|
237
|
+
record->new_count = 1;
|
|
337
238
|
record->free_count = 0;
|
|
338
239
|
record->object_states = NULL;
|
|
339
240
|
|
|
340
|
-
|
|
341
|
-
VALUE allocations = Memory_Profiler_Allocations_wrap(record);
|
|
342
|
-
|
|
241
|
+
allocations = Memory_Profiler_Allocations_wrap(record);
|
|
343
242
|
st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
|
|
344
|
-
// Notify GC about the class VALUE stored as key in the table
|
|
345
243
|
RB_OBJ_WRITTEN(self, Qnil, klass);
|
|
346
|
-
// Notify GC about the allocations VALUE stored as value in the table
|
|
347
244
|
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
348
245
|
}
|
|
246
|
+
|
|
247
|
+
// Enqueue to global event queue:
|
|
248
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, allocations, object);
|
|
349
249
|
}
|
|
350
250
|
|
|
351
|
-
//
|
|
352
|
-
// CRITICAL: This runs during GC when no Ruby code can be executed!
|
|
353
|
-
// We MUST NOT call rb_funcall or any Ruby code here - just queue the work.
|
|
251
|
+
// FREEOBJ event handler. Increments count and enqueues for processing.
|
|
354
252
|
static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
355
253
|
st_data_t allocations_data;
|
|
356
254
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
357
255
|
VALUE allocations = (VALUE)allocations_data;
|
|
358
256
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
359
257
|
|
|
360
|
-
// Always track counts (even during queue processing)
|
|
361
258
|
record->free_count++;
|
|
362
259
|
|
|
363
|
-
//
|
|
364
|
-
|
|
365
|
-
if (capture->enabled && !NIL_P(record->callback) && record->object_states) {
|
|
366
|
-
if (DEBUG_STATE) fprintf(stderr, "Memory_Profiler_Capture_freeobj_handler: Looking up state for object: %p\n", (void *)object);
|
|
367
|
-
|
|
368
|
-
// Look up state stored during NEWOBJ
|
|
369
|
-
st_data_t state_data;
|
|
370
|
-
if (st_delete(record->object_states, (st_data_t *)&object, &state_data)) {
|
|
371
|
-
if (DEBUG_STATE) fprintf(stderr, "Found state for object: %p\n", (void *)object);
|
|
372
|
-
VALUE state = (VALUE)state_data;
|
|
373
|
-
|
|
374
|
-
// Push a new item onto the queue (returns pointer to write to)
|
|
375
|
-
// NOTE: realloc is safe during GC (doesn't trigger Ruby allocation)
|
|
376
|
-
struct Memory_Profiler_Freeobj_Queue_Item *freeobj = Memory_Profiler_Queue_push(&capture->freeobj_queue);
|
|
377
|
-
if (freeobj) {
|
|
378
|
-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued freed object, queue size now: %zu/%zu\n",
|
|
379
|
-
capture->freeobj_queue.count, capture->freeobj_queue.capacity);
|
|
380
|
-
|
|
381
|
-
// Write VALUEs with write barriers (combines write + GC notification)
|
|
382
|
-
// Note: We're during GC/FREEOBJ, but write barriers should be safe
|
|
383
|
-
RB_OBJ_WRITE(self, &freeobj->klass, klass);
|
|
384
|
-
RB_OBJ_WRITE(self, &freeobj->allocations, allocations);
|
|
385
|
-
RB_OBJ_WRITE(self, &freeobj->state, state);
|
|
386
|
-
|
|
387
|
-
// Trigger postponed job to process both queues after GC
|
|
388
|
-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues after GC\n");
|
|
389
|
-
rb_postponed_job_trigger(capture->postponed_job_handle);
|
|
390
|
-
} else {
|
|
391
|
-
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Failed to queue freed object, out of memory\n");
|
|
392
|
-
}
|
|
393
|
-
// If push failed (out of memory), silently drop this freeobj event
|
|
394
|
-
}
|
|
395
|
-
}
|
|
260
|
+
// Enqueue to global event queue:
|
|
261
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, klass, allocations, object);
|
|
396
262
|
}
|
|
397
263
|
}
|
|
398
264
|
|
|
399
|
-
// Check if object type is trackable (
|
|
400
|
-
// Excludes internal types (T_IMEMO, T_NODE, T_ICLASS, etc.) that don't have normal classes
|
|
265
|
+
// Check if object type is trackable. Excludes internal types (T_IMEMO, T_NODE, T_ICLASS, etc.) that don't have normal classes.
|
|
401
266
|
int Memory_Profiler_Capture_trackable_p(VALUE object) {
|
|
402
267
|
switch (rb_type(object)) {
|
|
403
268
|
// Normal Ruby objects with valid class pointers
|
|
@@ -486,21 +351,7 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
486
351
|
capture->running = 0;
|
|
487
352
|
capture->enabled = 0;
|
|
488
353
|
|
|
489
|
-
//
|
|
490
|
-
Memory_Profiler_Queue_initialize(&capture->newobj_queue, sizeof(struct Memory_Profiler_Newobj_Queue_Item));
|
|
491
|
-
Memory_Profiler_Queue_initialize(&capture->freeobj_queue, sizeof(struct Memory_Profiler_Freeobj_Queue_Item));
|
|
492
|
-
|
|
493
|
-
// Pre-register the postponed job for processing both queues
|
|
494
|
-
// The job will be triggered whenever we queue newobj or freeobj events
|
|
495
|
-
capture->postponed_job_handle = rb_postponed_job_preregister(
|
|
496
|
-
0, // flags
|
|
497
|
-
Memory_Profiler_Capture_process_queues,
|
|
498
|
-
(void *)obj
|
|
499
|
-
);
|
|
500
|
-
|
|
501
|
-
if (capture->postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID) {
|
|
502
|
-
rb_raise(rb_eRuntimeError, "Failed to register postponed job!");
|
|
503
|
-
}
|
|
354
|
+
// Global event queue system will auto-initialize on first use (lazy initialization)
|
|
504
355
|
|
|
505
356
|
return obj;
|
|
506
357
|
}
|
|
@@ -517,6 +368,10 @@ static VALUE Memory_Profiler_Capture_start(VALUE self) {
|
|
|
517
368
|
|
|
518
369
|
if (capture->running) return Qfalse;
|
|
519
370
|
|
|
371
|
+
// Ensure global event queue system is initialized:
|
|
372
|
+
// It could fail and we want to raise an error if it does, here specifically.
|
|
373
|
+
Memory_Profiler_Events_instance();
|
|
374
|
+
|
|
520
375
|
// Add event hook for NEWOBJ and FREEOBJ with RAW_ARG to get trace_arg
|
|
521
376
|
rb_add_event_hook2(
|
|
522
377
|
(rb_event_hook_func_t)Memory_Profiler_Capture_event_callback,
|
|
@@ -542,8 +397,9 @@ static VALUE Memory_Profiler_Capture_stop(VALUE self) {
|
|
|
542
397
|
// Remove event hook using same data (self) we registered with. No more events will be queued after this point:
|
|
543
398
|
rb_remove_event_hook_with_data((rb_event_hook_func_t)Memory_Profiler_Capture_event_callback, self);
|
|
544
399
|
|
|
545
|
-
// Flush any pending queued events
|
|
546
|
-
|
|
400
|
+
// Flush any pending queued events in the global queue before stopping.
|
|
401
|
+
// This ensures all callbacks are invoked and object_states is properly maintained.
|
|
402
|
+
Memory_Profiler_Events_process_all();
|
|
547
403
|
|
|
548
404
|
// Clear both flags - we're no longer running and callbacks are disabled
|
|
549
405
|
capture->running = 0;
|
|
@@ -575,7 +431,7 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
|
575
431
|
record->callback = callback; // Initial assignment, no write barrier needed
|
|
576
432
|
record->new_count = 0;
|
|
577
433
|
record->free_count = 0;
|
|
578
|
-
record->object_states =
|
|
434
|
+
record->object_states = st_init_numtable();
|
|
579
435
|
|
|
580
436
|
// Wrap the record in a VALUE
|
|
581
437
|
allocations = Memory_Profiler_Allocations_wrap(record);
|
|
@@ -616,7 +472,6 @@ static VALUE Memory_Profiler_Capture_tracking_p(VALUE self, VALUE klass) {
|
|
|
616
472
|
return st_lookup(capture->tracked_classes, (st_data_t)klass, NULL) ? Qtrue : Qfalse;
|
|
617
473
|
}
|
|
618
474
|
|
|
619
|
-
|
|
620
475
|
// Get count of live objects for a specific class (O(1) lookup!)
|
|
621
476
|
static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
|
|
622
477
|
struct Memory_Profiler_Capture *capture;
|
|
@@ -725,11 +580,7 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
|
725
580
|
// Tracked classes count
|
|
726
581
|
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_classes_count")), SIZET2NUM(capture->tracked_classes->num_entries));
|
|
727
582
|
|
|
728
|
-
//
|
|
729
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("newobj_queue_size")), SIZET2NUM(capture->newobj_queue.count));
|
|
730
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("newobj_queue_capacity")), SIZET2NUM(capture->newobj_queue.capacity));
|
|
731
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("freeobj_queue_size")), SIZET2NUM(capture->freeobj_queue.count));
|
|
732
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("freeobj_queue_capacity")), SIZET2NUM(capture->freeobj_queue.capacity));
|
|
583
|
+
// Note: Global event queue stats are internal to the events module
|
|
733
584
|
|
|
734
585
|
// Count object_states entries for each tracked class
|
|
735
586
|
struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
|
|
@@ -5,5 +5,12 @@
|
|
|
5
5
|
|
|
6
6
|
#include <ruby.h>
|
|
7
7
|
|
|
8
|
+
// Initialize the Capture module.
|
|
8
9
|
void Init_Memory_Profiler_Capture(VALUE Memory_Profiler);
|
|
9
10
|
|
|
11
|
+
// Forward declaration.
|
|
12
|
+
struct Memory_Profiler_Event;
|
|
13
|
+
|
|
14
|
+
// Process a single event. Called from the global event queue processor.
|
|
15
|
+
// This is wrapped with rb_protect to catch exceptions.
|
|
16
|
+
void Memory_Profiler_Capture_process_event(struct Memory_Profiler_Event *event);
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
// Released under the MIT License.
|
|
2
|
+
// Copyright, 2025, by Samuel Williams.
|
|
3
|
+
|
|
4
|
+
#include "ruby.h"
|
|
5
|
+
#include "ruby/debug.h"
|
|
6
|
+
|
|
7
|
+
#include "events.h"
|
|
8
|
+
#include "capture.h"
|
|
9
|
+
#include <stdio.h>
|
|
10
|
+
|
|
11
|
+
enum {
|
|
12
|
+
DEBUG = 0,
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
// Internal structure for the global event queue system.
|
|
16
|
+
struct Memory_Profiler_Events {
|
|
17
|
+
// Global event queue (contains events from all Capture instances).
|
|
18
|
+
struct Memory_Profiler_Queue queue;
|
|
19
|
+
|
|
20
|
+
// Postponed job handle for processing the queue.
|
|
21
|
+
// Postponed job handles are an extremely limited resource, so we only register one global event queue.
|
|
22
|
+
rb_postponed_job_handle_t postponed_job_handle;
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
static void Memory_Profiler_Events_process_queue(void *arg);
|
|
26
|
+
static void Memory_Profiler_Events_mark(void *ptr);
|
|
27
|
+
static void Memory_Profiler_Events_compact(void *ptr);
|
|
28
|
+
static void Memory_Profiler_Events_free(void *ptr);
|
|
29
|
+
static size_t Memory_Profiler_Events_memsize(const void *ptr);
|
|
30
|
+
|
|
31
|
+
// TypedData definition for Events.
|
|
32
|
+
static const rb_data_type_t Memory_Profiler_Events_type = {
|
|
33
|
+
"Memory::Profiler::Events",
|
|
34
|
+
{
|
|
35
|
+
.dmark = Memory_Profiler_Events_mark,
|
|
36
|
+
.dcompact = Memory_Profiler_Events_compact,
|
|
37
|
+
.dfree = Memory_Profiler_Events_free,
|
|
38
|
+
.dsize = Memory_Profiler_Events_memsize,
|
|
39
|
+
},
|
|
40
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
// Create and initialize the global event queue system.
|
|
44
|
+
static VALUE Memory_Profiler_Events_new(void) {
|
|
45
|
+
struct Memory_Profiler_Events *events;
|
|
46
|
+
VALUE obj = TypedData_Make_Struct(rb_cObject, struct Memory_Profiler_Events, &Memory_Profiler_Events_type, events);
|
|
47
|
+
|
|
48
|
+
// Initialize the global event queue:
|
|
49
|
+
Memory_Profiler_Queue_initialize(&events->queue, sizeof(struct Memory_Profiler_Event));
|
|
50
|
+
|
|
51
|
+
// Pre-register the single postponed job for processing the queue:
|
|
52
|
+
events->postponed_job_handle = rb_postponed_job_preregister(0,
|
|
53
|
+
// Callback function to process the queue:
|
|
54
|
+
Memory_Profiler_Events_process_queue,
|
|
55
|
+
// Pass the events struct as argument:
|
|
56
|
+
(void *)events
|
|
57
|
+
);
|
|
58
|
+
|
|
59
|
+
if (events->postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID) {
|
|
60
|
+
rb_raise(rb_eRuntimeError, "Failed to register postponed job!");
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
return obj;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Get the global events instance (internal helper).
|
|
67
|
+
struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void) {
|
|
68
|
+
static VALUE instance = Qnil;
|
|
69
|
+
static struct Memory_Profiler_Events *events = NULL;
|
|
70
|
+
|
|
71
|
+
if (instance == Qnil) {
|
|
72
|
+
instance = Memory_Profiler_Events_new();
|
|
73
|
+
|
|
74
|
+
// Pin the global events object so it's never GC'd:
|
|
75
|
+
rb_gc_register_mark_object(instance);
|
|
76
|
+
|
|
77
|
+
if (DEBUG) fprintf(stderr, "Global event queue system initialized and pinned\n");
|
|
78
|
+
|
|
79
|
+
TypedData_Get_Struct(instance, struct Memory_Profiler_Events, &Memory_Profiler_Events_type, events);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return events;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// GC mark callback - mark all VALUEs in the event queue.
|
|
86
|
+
static void Memory_Profiler_Events_mark(void *ptr) {
|
|
87
|
+
struct Memory_Profiler_Events *events = ptr;
|
|
88
|
+
|
|
89
|
+
// Mark all events in the global queue:
|
|
90
|
+
for (size_t i = 0; i < events->queue.count; i++) {
|
|
91
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(&events->queue, i);
|
|
92
|
+
|
|
93
|
+
// Mark the Capture instance this event belongs to:
|
|
94
|
+
rb_gc_mark_movable(event->capture);
|
|
95
|
+
rb_gc_mark_movable(event->klass);
|
|
96
|
+
rb_gc_mark_movable(event->allocations);
|
|
97
|
+
|
|
98
|
+
// For NEWOBJ, mark the object (it's alive).
|
|
99
|
+
// For FREEOBJ, DON'T mark (it's being freed - just used as key for lookup).
|
|
100
|
+
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
101
|
+
rb_gc_mark_movable(event->object);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// GC compact callback - update all VALUEs in the event queue.
|
|
107
|
+
static void Memory_Profiler_Events_compact(void *ptr) {
|
|
108
|
+
struct Memory_Profiler_Events *events = ptr;
|
|
109
|
+
|
|
110
|
+
// Update objects in the global event queue:
|
|
111
|
+
for (size_t i = 0; i < events->queue.count; i++) {
|
|
112
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(&events->queue, i);
|
|
113
|
+
|
|
114
|
+
// Update all VALUEs if they moved during compaction:
|
|
115
|
+
event->capture = rb_gc_location(event->capture);
|
|
116
|
+
event->klass = rb_gc_location(event->klass);
|
|
117
|
+
event->allocations = rb_gc_location(event->allocations);
|
|
118
|
+
|
|
119
|
+
// For NEWOBJ, update the object pointer.
|
|
120
|
+
// For FREEOBJ, DON'T update (it's being freed, pointer is stale).
|
|
121
|
+
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
122
|
+
event->object = rb_gc_location(event->object);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// GC free callback.
|
|
128
|
+
static void Memory_Profiler_Events_free(void *ptr) {
|
|
129
|
+
struct Memory_Profiler_Events *events = ptr;
|
|
130
|
+
Memory_Profiler_Queue_free(&events->queue);
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// GC memsize callback.
|
|
134
|
+
static size_t Memory_Profiler_Events_memsize(const void *ptr) {
|
|
135
|
+
const struct Memory_Profiler_Events *events = ptr;
|
|
136
|
+
return sizeof(struct Memory_Profiler_Events) + (events->queue.capacity * events->queue.element_size);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Enqueue an event to the global queue.
|
|
140
|
+
int Memory_Profiler_Events_enqueue(
|
|
141
|
+
enum Memory_Profiler_Event_Type type,
|
|
142
|
+
VALUE capture,
|
|
143
|
+
VALUE klass,
|
|
144
|
+
VALUE allocations,
|
|
145
|
+
VALUE object
|
|
146
|
+
) {
|
|
147
|
+
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
|
|
148
|
+
|
|
149
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_push(&events->queue);
|
|
150
|
+
if (event) {
|
|
151
|
+
event->type = type;
|
|
152
|
+
event->capture = capture;
|
|
153
|
+
event->klass = klass;
|
|
154
|
+
event->allocations = allocations;
|
|
155
|
+
event->object = object;
|
|
156
|
+
|
|
157
|
+
const char *type_name = (type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) ? "NEWOBJ" : "FREEOBJ";
|
|
158
|
+
if (DEBUG) fprintf(stderr, "Queued %s to global queue, size: %zu\n", type_name, events->queue.count);
|
|
159
|
+
|
|
160
|
+
rb_postponed_job_trigger(events->postponed_job_handle);
|
|
161
|
+
// Success:
|
|
162
|
+
return 1;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// Queue full:
|
|
166
|
+
return 0;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
// Process all queued events immediately (flush the queue).
|
|
170
|
+
// Public API function - called from Capture stop() to ensure all events are processed.
|
|
171
|
+
void Memory_Profiler_Events_process_all(void) {
|
|
172
|
+
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
|
|
173
|
+
Memory_Profiler_Events_process_queue((void *)events);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// Wrapper for rb_protect - processes a single event.
|
|
177
|
+
// rb_protect requires signature: VALUE func(VALUE arg).
|
|
178
|
+
static VALUE Memory_Profiler_Events_process_event_protected(VALUE arg) {
|
|
179
|
+
struct Memory_Profiler_Event *event = (struct Memory_Profiler_Event *)arg;
|
|
180
|
+
Memory_Profiler_Capture_process_event(event);
|
|
181
|
+
return Qnil;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// Postponed job callback - processes global event queue.
|
|
185
|
+
// This runs when it's safe to call Ruby code (not during allocation or GC).
|
|
186
|
+
// Processes events from ALL Capture instances.
|
|
187
|
+
static void Memory_Profiler_Events_process_queue(void *arg) {
|
|
188
|
+
struct Memory_Profiler_Events *events = (struct Memory_Profiler_Events *)arg;
|
|
189
|
+
|
|
190
|
+
if (DEBUG) fprintf(stderr, "Processing global event queue: %zu events\n", events->queue.count);
|
|
191
|
+
|
|
192
|
+
// Process all events in order (maintains NEWOBJ before FREEOBJ for same object):
|
|
193
|
+
for (size_t i = 0; i < events->queue.count; i++) {
|
|
194
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(&events->queue, i);
|
|
195
|
+
|
|
196
|
+
// Process event with rb_protect to catch any exceptions:
|
|
197
|
+
int state = 0;
|
|
198
|
+
rb_protect(Memory_Profiler_Events_process_event_protected, (VALUE)event, &state);
|
|
199
|
+
|
|
200
|
+
if (state) {
|
|
201
|
+
// Exception occurred, warn and suppress:
|
|
202
|
+
rb_warning("Exception in event processing callback (caught and suppressed): %"PRIsVALUE, rb_errinfo());
|
|
203
|
+
rb_set_errinfo(Qnil);
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// Always clear the global queue, even if exceptions occurred:
|
|
208
|
+
Memory_Profiler_Queue_clear(&events->queue);
|
|
209
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
// Released under the MIT License.
|
|
2
|
+
// Copyright, 2025, by Samuel Williams.
|
|
3
|
+
|
|
4
|
+
#pragma once
|
|
5
|
+
|
|
6
|
+
#include <ruby.h>
|
|
7
|
+
#include "queue.h"
|
|
8
|
+
|
|
9
|
+
// Event types
|
|
10
|
+
enum Memory_Profiler_Event_Type {
|
|
11
|
+
MEMORY_PROFILER_EVENT_TYPE_NEWOBJ,
|
|
12
|
+
MEMORY_PROFILER_EVENT_TYPE_FREEOBJ,
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
// Event queue item - stores all info needed to process an event
|
|
16
|
+
struct Memory_Profiler_Event {
|
|
17
|
+
enum Memory_Profiler_Event_Type type;
|
|
18
|
+
|
|
19
|
+
// Which Capture instance this event belongs to:
|
|
20
|
+
VALUE capture;
|
|
21
|
+
|
|
22
|
+
// The class of the object:
|
|
23
|
+
VALUE klass;
|
|
24
|
+
|
|
25
|
+
// The Allocations wrapper:
|
|
26
|
+
VALUE allocations;
|
|
27
|
+
|
|
28
|
+
// The object itself:
|
|
29
|
+
VALUE object;
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
struct Memory_Profiler_Events;
|
|
33
|
+
|
|
34
|
+
struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void);
|
|
35
|
+
|
|
36
|
+
// Enqueue an event to the global queue.
|
|
37
|
+
// Returns non-zero on success, zero on failure.
|
|
38
|
+
int Memory_Profiler_Events_enqueue(
|
|
39
|
+
enum Memory_Profiler_Event_Type type,
|
|
40
|
+
VALUE capture,
|
|
41
|
+
VALUE klass,
|
|
42
|
+
VALUE allocations,
|
|
43
|
+
VALUE object
|
|
44
|
+
);
|
|
45
|
+
|
|
46
|
+
// Process all queued events immediately (flush the queue)
|
|
47
|
+
// Called from Capture stop() to ensure all events are processed before stopping
|
|
48
|
+
void Memory_Profiler_Events_process_all(void);
|
|
@@ -79,7 +79,7 @@ module Memory
|
|
|
79
79
|
end
|
|
80
80
|
|
|
81
81
|
# Now recursively prune the retained children (avoid pruning nodes we just discarded)
|
|
82
|
-
@children.each_value
|
|
82
|
+
@children.each_value{|child| pruned_count += child.prune!(limit)}
|
|
83
83
|
|
|
84
84
|
# Clean up if we ended up with no children
|
|
85
85
|
@children = nil if @children.empty?
|
|
@@ -98,7 +98,7 @@ module Memory
|
|
|
98
98
|
|
|
99
99
|
# Recursively detach all children first and sum their counts
|
|
100
100
|
if @children
|
|
101
|
-
@children.each_value
|
|
101
|
+
@children.each_value{|child| count += child.detach!}
|
|
102
102
|
end
|
|
103
103
|
|
|
104
104
|
# Break all references
|
|
@@ -192,13 +192,13 @@ module Memory
|
|
|
192
192
|
|
|
193
193
|
@root.each_path do |path, total_count, retained_count|
|
|
194
194
|
# Filter out root node (has nil location) and map to location strings
|
|
195
|
-
locations = path.select(&:location).map
|
|
195
|
+
locations = path.select(&:location).map{|node| node.location.to_s}
|
|
196
196
|
paths << [locations, total_count, retained_count] unless locations.empty?
|
|
197
197
|
end
|
|
198
198
|
|
|
199
199
|
# Sort by the requested metric (default: retained, since that's what matters for leaks)
|
|
200
200
|
sort_index = (by == :total) ? 1 : 2
|
|
201
|
-
paths.sort_by
|
|
201
|
+
paths.sort_by{|path_data| -path_data[sort_index]}.first(limit)
|
|
202
202
|
end
|
|
203
203
|
|
|
204
204
|
# Get hotspot locations (individual frames with highest counts).
|
|
@@ -207,13 +207,13 @@ module Memory
|
|
|
207
207
|
# @parameter by [Symbol] Sort by :total or :retained count.
|
|
208
208
|
# @returns [Hash] Map of location => [total_count, retained_count].
|
|
209
209
|
def hotspots(limit = 20, by: :retained)
|
|
210
|
-
frames = Hash.new
|
|
210
|
+
frames = Hash.new{|h, k| h[k] = [0, 0]}
|
|
211
211
|
|
|
212
212
|
collect_frames(@root, frames)
|
|
213
213
|
|
|
214
214
|
# Sort by the requested metric
|
|
215
215
|
sort_index = (by == :total) ? 0 : 1
|
|
216
|
-
frames.sort_by
|
|
216
|
+
frames.sort_by{|_, counts| -counts[sort_index]}.first(limit).to_h
|
|
217
217
|
end
|
|
218
218
|
|
|
219
219
|
# Total number of allocations tracked.
|
|
@@ -255,7 +255,7 @@ module Memory
|
|
|
255
255
|
frames[location_str][1] += node.retained_count
|
|
256
256
|
end
|
|
257
257
|
|
|
258
|
-
node.each_child
|
|
258
|
+
node.each_child{|child| collect_frames(child, frames)}
|
|
259
259
|
end
|
|
260
260
|
end
|
|
261
261
|
end
|
|
@@ -102,28 +102,28 @@ module Memory
|
|
|
102
102
|
@call_trees = {}
|
|
103
103
|
@samples = {}
|
|
104
104
|
end
|
|
105
|
-
|
|
105
|
+
|
|
106
106
|
# @attribute [Integer] The depth of the call tree.
|
|
107
107
|
attr :depth
|
|
108
108
|
|
|
109
109
|
# @attribute [Proc] The filter to exclude frames from call paths.
|
|
110
110
|
attr :filter
|
|
111
|
-
|
|
111
|
+
|
|
112
112
|
# @attribute [Integer] The number of increases before enabling detailed tracking.
|
|
113
113
|
attr :increases_threshold
|
|
114
|
-
|
|
114
|
+
|
|
115
115
|
# @attribute [Integer] The number of insertions before auto-pruning (nil = no auto-pruning).
|
|
116
116
|
attr :prune_limit
|
|
117
|
-
|
|
117
|
+
|
|
118
118
|
# @attribute [Integer | Nil] The number of insertions before auto-pruning (nil = no auto-pruning).
|
|
119
119
|
attr :prune_threshold
|
|
120
|
-
|
|
120
|
+
|
|
121
121
|
# @attribute [Capture] The capture object.
|
|
122
122
|
attr :capture
|
|
123
|
-
|
|
123
|
+
|
|
124
124
|
# @attribute [Hash] The call trees.
|
|
125
125
|
attr :call_trees
|
|
126
|
-
|
|
126
|
+
|
|
127
127
|
# @attribute [Hash] The samples for each class being tracked.
|
|
128
128
|
attr :samples
|
|
129
129
|
|
|
@@ -249,11 +249,11 @@ module Memory
|
|
|
249
249
|
live_count: @capture.count_for(klass),
|
|
250
250
|
total_allocations: tree.total_allocations,
|
|
251
251
|
retained_allocations: tree.retained_allocations,
|
|
252
|
-
top_paths: tree.top_paths(10).map
|
|
253
|
-
{
|
|
252
|
+
top_paths: tree.top_paths(10).map{|path, total, retained|
|
|
253
|
+
{path: path, total_count: total, retained_count: retained}
|
|
254
254
|
},
|
|
255
|
-
hotspots: tree.hotspots(20).transform_values
|
|
256
|
-
{
|
|
255
|
+
hotspots: tree.hotspots(20).transform_values{|total, retained|
|
|
256
|
+
{total_count: total, retained_count: retained}
|
|
257
257
|
}
|
|
258
258
|
}
|
|
259
259
|
end
|
data/readme.md
CHANGED
|
@@ -22,6 +22,10 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.8
|
|
26
|
+
|
|
27
|
+
- Use single global queue for event handling to avoid incorrect ordering.
|
|
28
|
+
|
|
25
29
|
### v1.1.7
|
|
26
30
|
|
|
27
31
|
- Expose `Capture#statistics` for debugging internal memory tracking state.
|
data/releases.md
CHANGED
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: memory-profiler
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.1.
|
|
4
|
+
version: 1.1.8
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Samuel Williams
|
|
@@ -51,6 +51,8 @@ files:
|
|
|
51
51
|
- ext/memory/profiler/allocations.h
|
|
52
52
|
- ext/memory/profiler/capture.c
|
|
53
53
|
- ext/memory/profiler/capture.h
|
|
54
|
+
- ext/memory/profiler/events.c
|
|
55
|
+
- ext/memory/profiler/events.h
|
|
54
56
|
- ext/memory/profiler/profiler.c
|
|
55
57
|
- ext/memory/profiler/queue.h
|
|
56
58
|
- lib/memory/profiler.rb
|
metadata.gz.sig
CHANGED
|
Binary file
|