memory-profiler 1.1.9 → 1.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/memory/profiler/allocations.c +30 -41
- data/ext/memory/profiler/allocations.h +3 -3
- data/ext/memory/profiler/capture.c +155 -139
- data/ext/memory/profiler/events.c +88 -38
- data/ext/memory/profiler/events.h +2 -5
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +13 -0
- data/releases.md +13 -0
- data.tar.gz.sig +0 -0
- metadata +1 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 1237585b03f1c87b2c4cafb01668200bc33d518c35f832539315a7f6f37dc4f2
|
|
4
|
+
data.tar.gz: 13a79c36183a794df061076e3e09dd2a8a1e14eadfcdb0dc5e3317a6902f622c
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 98c104bac6001ec5cde21d984a80a69ac992a113677ea672a81d1d1e468e754331f1170ea58c3a6ba8f1fe4a453ec669d143f7af71ab797e0d206a50bd8bbe8a
|
|
7
|
+
data.tar.gz: 2bf799367a7cd59a760f52c89f8da0ab3ee252867bdcca431ca9340349172bcf0962f626f4cada76fcfd721084fb8de8dbe0d8692eb9593a5b4a56f625587436
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
|
@@ -10,39 +10,33 @@
|
|
|
10
10
|
|
|
11
11
|
static VALUE Memory_Profiler_Allocations = Qnil;
|
|
12
12
|
|
|
13
|
-
// Helper to mark
|
|
14
|
-
static int
|
|
15
|
-
// Don't mark the object
|
|
16
|
-
//
|
|
17
|
-
// rb_gc_mark_movable(object);
|
|
18
|
-
|
|
13
|
+
// Helper to mark states table (object => state)
|
|
14
|
+
static int Memory_Profiler_Allocations_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
15
|
+
// Don't mark the object key (weak reference - don't keep objects alive)
|
|
16
|
+
// Mark the state value
|
|
19
17
|
VALUE state = (VALUE)value;
|
|
20
|
-
|
|
21
|
-
rb_gc_mark_movable(state);
|
|
22
|
-
}
|
|
18
|
+
rb_gc_mark_movable(state);
|
|
23
19
|
return ST_CONTINUE;
|
|
24
20
|
}
|
|
25
21
|
|
|
26
22
|
// Foreach callback for st_foreach_with_replace (iteration logic)
|
|
27
|
-
static int
|
|
23
|
+
static int Memory_Profiler_Allocations_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
|
|
28
24
|
// Return ST_REPLACE to trigger the replace callback for each entry
|
|
29
25
|
return ST_REPLACE;
|
|
30
26
|
}
|
|
31
27
|
|
|
32
|
-
// Replace callback for st_foreach_with_replace to update
|
|
33
|
-
static int
|
|
34
|
-
|
|
28
|
+
// Replace callback for st_foreach_with_replace to update states during compaction
|
|
29
|
+
static int Memory_Profiler_Allocations_states_compact(st_data_t *key, st_data_t *value, st_data_t data, int existing) {
|
|
30
|
+
// Key is object (VALUE) - we can't update if it moved (would require rehashing/allocation)
|
|
35
31
|
VALUE old_state = (VALUE)*value;
|
|
36
|
-
|
|
37
|
-
VALUE new_object = rb_gc_location(old_object);
|
|
38
32
|
VALUE new_state = rb_gc_location(old_state);
|
|
39
33
|
|
|
40
|
-
//
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
34
|
+
// NOTE: We can't update object keys if they moved (would require rehashing/allocation).
|
|
35
|
+
// This means lookups may fail after compaction for moved objects.
|
|
36
|
+
// This is acceptable - FREEOBJ will simply not find the state and skip the callback.
|
|
37
|
+
// The state will be cleaned up when Allocations is freed/cleared.
|
|
44
38
|
|
|
45
|
-
// Update value if it moved
|
|
39
|
+
// Update state value if it moved (this is safe, doesn't rehash)
|
|
46
40
|
if (old_state != new_state) {
|
|
47
41
|
*value = (st_data_t)new_state;
|
|
48
42
|
}
|
|
@@ -50,7 +44,6 @@ static int Memory_Profiler_Allocations_object_states_compact(st_data_t *key, st_
|
|
|
50
44
|
return ST_CONTINUE;
|
|
51
45
|
}
|
|
52
46
|
|
|
53
|
-
// GC mark function for Allocations
|
|
54
47
|
static void Memory_Profiler_Allocations_mark(void *ptr) {
|
|
55
48
|
struct Memory_Profiler_Capture_Allocations *record = ptr;
|
|
56
49
|
|
|
@@ -58,22 +51,19 @@ static void Memory_Profiler_Allocations_mark(void *ptr) {
|
|
|
58
51
|
return;
|
|
59
52
|
}
|
|
60
53
|
|
|
61
|
-
|
|
62
|
-
rb_gc_mark_movable(record->callback);
|
|
63
|
-
}
|
|
54
|
+
rb_gc_mark_movable(record->callback);
|
|
64
55
|
|
|
65
|
-
// Mark
|
|
66
|
-
if (record->
|
|
67
|
-
st_foreach(record->
|
|
56
|
+
// Mark states table if it exists
|
|
57
|
+
if (record->states) {
|
|
58
|
+
st_foreach(record->states, Memory_Profiler_Allocations_states_mark, 0);
|
|
68
59
|
}
|
|
69
60
|
}
|
|
70
61
|
|
|
71
|
-
// GC free function for Allocations
|
|
72
62
|
static void Memory_Profiler_Allocations_free(void *ptr) {
|
|
73
63
|
struct Memory_Profiler_Capture_Allocations *record = ptr;
|
|
74
64
|
|
|
75
|
-
if (record->
|
|
76
|
-
st_free_table(record->
|
|
65
|
+
if (record->states) {
|
|
66
|
+
st_free_table(record->states);
|
|
77
67
|
}
|
|
78
68
|
|
|
79
69
|
xfree(record);
|
|
@@ -84,14 +74,12 @@ static void Memory_Profiler_Allocations_compact(void *ptr) {
|
|
|
84
74
|
struct Memory_Profiler_Capture_Allocations *record = ptr;
|
|
85
75
|
|
|
86
76
|
// Update callback if it moved
|
|
87
|
-
|
|
88
|
-
record->callback = rb_gc_location(record->callback);
|
|
89
|
-
}
|
|
77
|
+
record->callback = rb_gc_location(record->callback);
|
|
90
78
|
|
|
91
|
-
// Update
|
|
92
|
-
if (record->
|
|
93
|
-
if (st_foreach_with_replace(record->
|
|
94
|
-
rb_raise(rb_eRuntimeError, "
|
|
79
|
+
// Update states table if it exists
|
|
80
|
+
if (record->states && record->states->num_entries > 0) {
|
|
81
|
+
if (st_foreach_with_replace(record->states, Memory_Profiler_Allocations_states_foreach, Memory_Profiler_Allocations_states_compact, 0)) {
|
|
82
|
+
rb_raise(rb_eRuntimeError, "states modified during GC compaction");
|
|
95
83
|
}
|
|
96
84
|
}
|
|
97
85
|
}
|
|
@@ -158,10 +146,11 @@ void Memory_Profiler_Allocations_clear(VALUE allocations) {
|
|
|
158
146
|
record->free_count = 0; // Reset free count
|
|
159
147
|
RB_OBJ_WRITE(allocations, &record->callback, Qnil); // Clear callback with write barrier
|
|
160
148
|
|
|
161
|
-
// Clear
|
|
162
|
-
if (record->
|
|
163
|
-
|
|
164
|
-
|
|
149
|
+
// Clear states - either clear the table or reinitialize
|
|
150
|
+
if (record->states) {
|
|
151
|
+
st_clear(record->states);
|
|
152
|
+
} else {
|
|
153
|
+
record->states = st_init_numtable();
|
|
165
154
|
}
|
|
166
155
|
}
|
|
167
156
|
|
|
@@ -17,9 +17,9 @@ struct Memory_Profiler_Capture_Allocations {
|
|
|
17
17
|
size_t free_count;
|
|
18
18
|
// Live count = new_count - free_count.
|
|
19
19
|
|
|
20
|
-
//
|
|
21
|
-
//
|
|
22
|
-
st_table *
|
|
20
|
+
// Map object (VALUE) => state (VALUE).
|
|
21
|
+
// Object keys need compaction updates when they move.
|
|
22
|
+
st_table *states;
|
|
23
23
|
};
|
|
24
24
|
|
|
25
25
|
// Wrap an allocations record in a VALUE.
|
|
@@ -13,7 +13,6 @@
|
|
|
13
13
|
|
|
14
14
|
enum {
|
|
15
15
|
DEBUG = 0,
|
|
16
|
-
DEBUG_STATE = 0,
|
|
17
16
|
};
|
|
18
17
|
|
|
19
18
|
static VALUE Memory_Profiler_Capture = Qnil;
|
|
@@ -23,19 +22,25 @@ static VALUE sym_newobj, sym_freeobj;
|
|
|
23
22
|
|
|
24
23
|
// Main capture state (per-instance).
|
|
25
24
|
struct Memory_Profiler_Capture {
|
|
26
|
-
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
27
|
-
st_table *tracked_classes;
|
|
28
|
-
|
|
29
25
|
// Master switch - is tracking active? (set by start/stop).
|
|
30
26
|
int running;
|
|
31
27
|
|
|
32
28
|
// Should we queue callbacks? (temporarily disabled during queue processing).
|
|
33
|
-
int
|
|
29
|
+
int paused;
|
|
30
|
+
|
|
31
|
+
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
32
|
+
st_table *tracked_classes;
|
|
33
|
+
|
|
34
|
+
// Total number of allocations and frees seen since tracking started.
|
|
35
|
+
size_t new_count;
|
|
36
|
+
size_t free_count;
|
|
34
37
|
};
|
|
35
38
|
|
|
36
39
|
// GC mark callback for tracked_classes table.
|
|
37
40
|
static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
38
41
|
// Mark class as un-movable:
|
|
42
|
+
// - We don't want to re-index the table if the class moves.
|
|
43
|
+
// - We don't want objects in `freeobj` to have invalid class pointers (maybe helps).
|
|
39
44
|
VALUE klass = (VALUE)key;
|
|
40
45
|
rb_gc_mark(klass);
|
|
41
46
|
|
|
@@ -82,13 +87,6 @@ static int Memory_Profiler_Capture_tracked_classes_foreach(st_data_t key, st_dat
|
|
|
82
87
|
|
|
83
88
|
// Replace callback for st_foreach_with_replace (update logic).
|
|
84
89
|
static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
|
|
85
|
-
// Update class key if it moved:
|
|
86
|
-
VALUE old_klass = (VALUE)*key;
|
|
87
|
-
VALUE new_klass = rb_gc_location(old_klass);
|
|
88
|
-
if (old_klass != new_klass) {
|
|
89
|
-
*key = (st_data_t)new_klass;
|
|
90
|
-
}
|
|
91
|
-
|
|
92
90
|
// Update wrapped Allocations VALUE if it moved:
|
|
93
91
|
VALUE old_allocations = (VALUE)*value;
|
|
94
92
|
VALUE new_allocations = rb_gc_location(old_allocations);
|
|
@@ -139,129 +137,116 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
|
|
|
139
137
|
}
|
|
140
138
|
}
|
|
141
139
|
|
|
142
|
-
// Process a NEWOBJ event.
|
|
143
|
-
static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE klass, VALUE
|
|
140
|
+
// Process a NEWOBJ event. All allocation tracking logic is here.
|
|
141
|
+
static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE klass, VALUE object) {
|
|
144
142
|
struct Memory_Profiler_Capture *capture;
|
|
145
143
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
146
144
|
|
|
147
|
-
|
|
145
|
+
// Pause the capture to prevent infinite loop:
|
|
146
|
+
capture->paused += 1;
|
|
148
147
|
|
|
149
|
-
//
|
|
150
|
-
|
|
151
|
-
record->object_states = st_init_numtable();
|
|
152
|
-
}
|
|
148
|
+
// Increment global new count:
|
|
149
|
+
capture->new_count++;
|
|
153
150
|
|
|
154
|
-
|
|
151
|
+
// Look up or create allocations record for this class:
|
|
152
|
+
st_data_t allocations_data;
|
|
153
|
+
VALUE allocations;
|
|
154
|
+
struct Memory_Profiler_Capture_Allocations *record;
|
|
155
155
|
|
|
156
|
-
if (
|
|
157
|
-
//
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
162
|
-
|
|
163
|
-
capture->enabled = was_enabled;
|
|
164
|
-
|
|
165
|
-
if (DEBUG_STATE) fprintf(stderr, "Storing callback state for object: %p\n", (void *)object);
|
|
156
|
+
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
157
|
+
// Existing record
|
|
158
|
+
allocations = (VALUE)allocations_data;
|
|
159
|
+
record = Memory_Profiler_Allocations_get(allocations);
|
|
160
|
+
record->new_count++;
|
|
166
161
|
} else {
|
|
167
|
-
//
|
|
168
|
-
|
|
162
|
+
// First time seeing this class, create record automatically
|
|
163
|
+
record = ALLOC(struct Memory_Profiler_Capture_Allocations);
|
|
164
|
+
record->callback = Qnil;
|
|
165
|
+
record->new_count = 1;
|
|
166
|
+
record->free_count = 0;
|
|
167
|
+
record->states = st_init_numtable();
|
|
169
168
|
|
|
170
|
-
|
|
169
|
+
allocations = Memory_Profiler_Allocations_wrap(record);
|
|
170
|
+
st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
|
|
171
|
+
RB_OBJ_WRITTEN(capture_value, Qnil, klass);
|
|
172
|
+
RB_OBJ_WRITTEN(capture_value, Qnil, allocations);
|
|
171
173
|
}
|
|
172
174
|
|
|
173
|
-
//
|
|
174
|
-
|
|
175
|
+
// Only store state if there's a callback
|
|
176
|
+
if (!NIL_P(record->callback)) {
|
|
177
|
+
VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
178
|
+
|
|
179
|
+
// Store state using object as key (works because object is alive):
|
|
180
|
+
st_insert(record->states, (st_data_t)object, (st_data_t)state);
|
|
181
|
+
} else {
|
|
182
|
+
// Store state as nil:
|
|
183
|
+
st_insert(record->states, (st_data_t)object, (st_data_t)Qnil);
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// Resume the capture:
|
|
187
|
+
capture->paused -= 1;
|
|
175
188
|
}
|
|
176
189
|
|
|
177
|
-
// Process a FREEOBJ event.
|
|
178
|
-
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE
|
|
190
|
+
// Process a FREEOBJ event. All deallocation tracking logic is here.
|
|
191
|
+
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE object) {
|
|
179
192
|
struct Memory_Profiler_Capture *capture;
|
|
180
193
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
181
194
|
|
|
195
|
+
// Pause the capture to prevent infinite loop:
|
|
196
|
+
capture->paused += 1;
|
|
197
|
+
|
|
198
|
+
// Look up allocations record for this class
|
|
199
|
+
st_data_t allocations_data;
|
|
200
|
+
if (!st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
201
|
+
// The class is not tracked, so we are not tracking this object.
|
|
202
|
+
goto done;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
VALUE allocations = (VALUE)allocations_data;
|
|
182
206
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
183
207
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
capture->enabled = was_enabled;
|
|
202
|
-
}
|
|
203
|
-
} else {
|
|
204
|
-
if (DEBUG_STATE) fprintf(stderr, "Freed pre-existing object: %p (not tracked)\n", (void *)object);
|
|
208
|
+
if (!record->states) {
|
|
209
|
+
// There is no state table for this class, so we are not tracking it.
|
|
210
|
+
goto done;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
st_data_t state_data;
|
|
214
|
+
if (st_delete(record->states, (st_data_t *)&object, &state_data)) {
|
|
215
|
+
VALUE state = (VALUE)state_data;
|
|
216
|
+
|
|
217
|
+
// Increment global free count
|
|
218
|
+
capture->free_count++;
|
|
219
|
+
|
|
220
|
+
// Increment per-class free count
|
|
221
|
+
record->free_count++;
|
|
222
|
+
|
|
223
|
+
if (!NIL_P(record->callback) && !NIL_P(state)) {
|
|
224
|
+
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
|
|
205
225
|
}
|
|
206
226
|
}
|
|
227
|
+
|
|
228
|
+
done:
|
|
229
|
+
// Resume the capture:
|
|
230
|
+
capture->paused -= 1;
|
|
207
231
|
}
|
|
208
232
|
|
|
209
233
|
// Process a single event (NEWOBJ or FREEOBJ). Called from events.c via rb_protect to catch exceptions.
|
|
210
234
|
void Memory_Profiler_Capture_process_event(struct Memory_Profiler_Event *event) {
|
|
211
235
|
switch (event->type) {
|
|
212
236
|
case MEMORY_PROFILER_EVENT_TYPE_NEWOBJ:
|
|
213
|
-
Memory_Profiler_Capture_process_newobj(event->capture, event->klass, event->
|
|
237
|
+
Memory_Profiler_Capture_process_newobj(event->capture, event->klass, event->object);
|
|
214
238
|
break;
|
|
215
239
|
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
|
|
216
|
-
Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->
|
|
240
|
+
Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->object);
|
|
241
|
+
break;
|
|
242
|
+
default:
|
|
243
|
+
// Ignore.
|
|
217
244
|
break;
|
|
218
245
|
}
|
|
219
246
|
}
|
|
220
247
|
|
|
221
248
|
#pragma mark - Event Handlers
|
|
222
249
|
|
|
223
|
-
// NEWOBJ event handler. Automatically tracks ALL allocations (creates records on demand).
|
|
224
|
-
static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
225
|
-
st_data_t allocations_data;
|
|
226
|
-
VALUE allocations;
|
|
227
|
-
|
|
228
|
-
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
229
|
-
// Existing record, increment count:
|
|
230
|
-
allocations = (VALUE)allocations_data;
|
|
231
|
-
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
232
|
-
record->new_count++;
|
|
233
|
-
} else {
|
|
234
|
-
// First time seeing this class, create record automatically:
|
|
235
|
-
struct Memory_Profiler_Capture_Allocations *record = ALLOC(struct Memory_Profiler_Capture_Allocations);
|
|
236
|
-
record->callback = Qnil;
|
|
237
|
-
record->new_count = 1;
|
|
238
|
-
record->free_count = 0;
|
|
239
|
-
record->object_states = NULL;
|
|
240
|
-
|
|
241
|
-
allocations = Memory_Profiler_Allocations_wrap(record);
|
|
242
|
-
st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
|
|
243
|
-
RB_OBJ_WRITTEN(self, Qnil, klass);
|
|
244
|
-
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
// Enqueue to global event queue:
|
|
248
|
-
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, allocations, object);
|
|
249
|
-
}
|
|
250
|
-
|
|
251
|
-
// FREEOBJ event handler. Increments count and enqueues for processing.
|
|
252
|
-
static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
253
|
-
st_data_t allocations_data;
|
|
254
|
-
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
255
|
-
VALUE allocations = (VALUE)allocations_data;
|
|
256
|
-
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
257
|
-
|
|
258
|
-
record->free_count++;
|
|
259
|
-
|
|
260
|
-
// Enqueue to global event queue:
|
|
261
|
-
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, klass, allocations, object);
|
|
262
|
-
}
|
|
263
|
-
}
|
|
264
|
-
|
|
265
250
|
// Check if object type is trackable. Excludes internal types (T_IMEMO, T_NODE, T_ICLASS, etc.) that don't have normal classes.
|
|
266
251
|
int Memory_Profiler_Capture_trackable_p(VALUE object) {
|
|
267
252
|
switch (rb_type(object)) {
|
|
@@ -305,30 +290,22 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
|
|
|
305
290
|
TypedData_Get_Struct(data, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
306
291
|
|
|
307
292
|
VALUE object = rb_tracearg_object(trace_arg);
|
|
308
|
-
|
|
293
|
+
|
|
309
294
|
// We don't want to track internal non-Object allocations:
|
|
310
295
|
if (!Memory_Profiler_Capture_trackable_p(object)) return;
|
|
311
|
-
|
|
296
|
+
|
|
312
297
|
rb_event_flag_t event_flag = rb_tracearg_event_flag(trace_arg);
|
|
313
298
|
VALUE klass = rb_class_of(object);
|
|
314
299
|
if (!klass) return;
|
|
315
300
|
|
|
316
|
-
if (DEBUG) {
|
|
317
|
-
// In events other than NEWOBJ, we are unable to allocate objects (due to GC), so we simply say "ignored":
|
|
318
|
-
const char *klass_name = "ignored";
|
|
319
|
-
if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
|
|
320
|
-
klass_name = rb_class2name(klass);
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
fprintf(stderr, "Memory_Profiler_Capture_event_callback: %s, Object: %p, Class: %p (%s)\n", event_flag_name(event_flag), (void *)object, (void *)klass, klass_name);
|
|
324
|
-
}
|
|
325
|
-
|
|
326
301
|
if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
|
|
327
|
-
//
|
|
328
|
-
|
|
302
|
+
// Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
|
|
303
|
+
if (capture->paused) return;
|
|
304
|
+
|
|
305
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, data, klass, object);
|
|
329
306
|
} else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
|
|
330
|
-
//
|
|
331
|
-
|
|
307
|
+
// Always process FREEOBJ to ensure state cleanup
|
|
308
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, data, klass, object);
|
|
332
309
|
}
|
|
333
310
|
}
|
|
334
311
|
|
|
@@ -347,9 +324,13 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
347
324
|
rb_raise(rb_eRuntimeError, "Failed to initialize hash table");
|
|
348
325
|
}
|
|
349
326
|
|
|
327
|
+
// Initialize allocation tracking counters
|
|
328
|
+
capture->new_count = 0;
|
|
329
|
+
capture->free_count = 0;
|
|
330
|
+
|
|
350
331
|
// Initialize state flags - not running, callbacks disabled
|
|
351
332
|
capture->running = 0;
|
|
352
|
-
capture->
|
|
333
|
+
capture->paused = 0;
|
|
353
334
|
|
|
354
335
|
// Global event queue system will auto-initialize on first use (lazy initialization)
|
|
355
336
|
|
|
@@ -382,7 +363,7 @@ static VALUE Memory_Profiler_Capture_start(VALUE self) {
|
|
|
382
363
|
|
|
383
364
|
// Set both flags - we're now running and callbacks are enabled
|
|
384
365
|
capture->running = 1;
|
|
385
|
-
capture->
|
|
366
|
+
capture->paused = 0;
|
|
386
367
|
|
|
387
368
|
return Qtrue;
|
|
388
369
|
}
|
|
@@ -403,7 +384,7 @@ static VALUE Memory_Profiler_Capture_stop(VALUE self) {
|
|
|
403
384
|
|
|
404
385
|
// Clear both flags - we're no longer running and callbacks are disabled
|
|
405
386
|
capture->running = 0;
|
|
406
|
-
capture->
|
|
387
|
+
capture->paused = 0;
|
|
407
388
|
|
|
408
389
|
return Qtrue;
|
|
409
390
|
}
|
|
@@ -431,7 +412,7 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
|
431
412
|
record->callback = callback; // Initial assignment, no write barrier needed
|
|
432
413
|
record->new_count = 0;
|
|
433
414
|
record->free_count = 0;
|
|
434
|
-
record->
|
|
415
|
+
record->states = st_init_numtable();
|
|
435
416
|
|
|
436
417
|
// Wrap the record in a VALUE
|
|
437
418
|
allocations = Memory_Profiler_Allocations_wrap(record);
|
|
@@ -481,13 +462,9 @@ static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
|
|
|
481
462
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
482
463
|
VALUE allocations = (VALUE)allocations_data;
|
|
483
464
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
if (record->free_count > record->new_count) {
|
|
487
|
-
return INT2FIX(0); // Can't have negative live count
|
|
465
|
+
if (record->free_count <= record->new_count) {
|
|
466
|
+
return SIZET2NUM(record->new_count - record->free_count);
|
|
488
467
|
}
|
|
489
|
-
size_t live_count = record->new_count - record->free_count;
|
|
490
|
-
return SIZET2NUM(live_count);
|
|
491
468
|
}
|
|
492
469
|
|
|
493
470
|
return INT2FIX(0);
|
|
@@ -507,9 +484,21 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
|
|
|
507
484
|
struct Memory_Profiler_Capture *capture;
|
|
508
485
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
509
486
|
|
|
510
|
-
//
|
|
487
|
+
// SAFETY: Don't allow clearing while running - there may be:
|
|
488
|
+
// - Event handlers firing (adding to object_allocations).
|
|
489
|
+
// - Events queued that haven't been processed yet.
|
|
490
|
+
// - Processors trying to access the states table.
|
|
491
|
+
if (capture->running) {
|
|
492
|
+
rb_raise(rb_eRuntimeError, "Cannot clear while capture is running - call stop() first!");
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
// Reset all counts to 0 (don't free, just reset) - pass self for write barriers:
|
|
511
496
|
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_clear, 0);
|
|
512
497
|
|
|
498
|
+
// Reset allocation tracking counters
|
|
499
|
+
capture->new_count = 0;
|
|
500
|
+
capture->free_count = 0;
|
|
501
|
+
|
|
513
502
|
return self;
|
|
514
503
|
}
|
|
515
504
|
|
|
@@ -555,17 +544,17 @@ struct Memory_Profiler_Allocations_Statistics {
|
|
|
555
544
|
VALUE per_class_counts;
|
|
556
545
|
};
|
|
557
546
|
|
|
558
|
-
// Iterator callback to count
|
|
559
|
-
static int
|
|
547
|
+
// Iterator callback to count states per class
|
|
548
|
+
static int Memory_Profiler_Capture_count_states(st_data_t key, st_data_t value, st_data_t argument) {
|
|
560
549
|
struct Memory_Profiler_Allocations_Statistics *statistics = (struct Memory_Profiler_Allocations_Statistics *)argument;
|
|
561
550
|
VALUE klass = (VALUE)key;
|
|
562
551
|
VALUE allocations = (VALUE)value;
|
|
563
552
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
564
553
|
|
|
565
|
-
size_t
|
|
566
|
-
statistics->total_tracked_objects +=
|
|
554
|
+
size_t states_count = record->states ? record->states->num_entries : 0;
|
|
555
|
+
statistics->total_tracked_objects += states_count;
|
|
567
556
|
|
|
568
|
-
rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(
|
|
557
|
+
rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(states_count));
|
|
569
558
|
return ST_CONTINUE;
|
|
570
559
|
}
|
|
571
560
|
|
|
@@ -579,16 +568,14 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
|
579
568
|
|
|
580
569
|
// Tracked classes count
|
|
581
570
|
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_classes_count")), SIZET2NUM(capture->tracked_classes->num_entries));
|
|
582
|
-
|
|
583
|
-
//
|
|
584
|
-
|
|
585
|
-
// Count object_states entries for each tracked class
|
|
571
|
+
|
|
572
|
+
// Count states entries for each tracked class
|
|
586
573
|
struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
|
|
587
574
|
.total_tracked_objects = 0,
|
|
588
575
|
.per_class_counts = rb_hash_new()
|
|
589
576
|
};
|
|
590
577
|
|
|
591
|
-
st_foreach(capture->tracked_classes,
|
|
578
|
+
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_count_states, (st_data_t)&allocations_statistics);
|
|
592
579
|
|
|
593
580
|
rb_hash_aset(statistics, ID2SYM(rb_intern("total_tracked_objects")), SIZET2NUM(allocations_statistics.total_tracked_objects));
|
|
594
581
|
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_objects_per_class")), allocations_statistics.per_class_counts);
|
|
@@ -596,6 +583,32 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
|
596
583
|
return statistics;
|
|
597
584
|
}
|
|
598
585
|
|
|
586
|
+
// Get total new count across all classes
|
|
587
|
+
static VALUE Memory_Profiler_Capture_new_count(VALUE self) {
|
|
588
|
+
struct Memory_Profiler_Capture *capture;
|
|
589
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
590
|
+
|
|
591
|
+
return SIZET2NUM(capture->new_count);
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
// Get total free count across all classes
|
|
595
|
+
static VALUE Memory_Profiler_Capture_free_count(VALUE self) {
|
|
596
|
+
struct Memory_Profiler_Capture *capture;
|
|
597
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
598
|
+
|
|
599
|
+
return SIZET2NUM(capture->free_count);
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
// Get total retained count (new - free) across all classes
|
|
603
|
+
static VALUE Memory_Profiler_Capture_retained_count(VALUE self) {
|
|
604
|
+
struct Memory_Profiler_Capture *capture;
|
|
605
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
606
|
+
|
|
607
|
+
// Handle underflow if free_count > new_count (shouldn't happen but be safe)
|
|
608
|
+
size_t retained = capture->free_count > capture->new_count ? 0 : capture->new_count - capture->free_count;
|
|
609
|
+
return SIZET2NUM(retained);
|
|
610
|
+
}
|
|
611
|
+
|
|
599
612
|
void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
600
613
|
{
|
|
601
614
|
// Initialize event symbols
|
|
@@ -618,6 +631,9 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
|
618
631
|
rb_define_method(Memory_Profiler_Capture, "[]", Memory_Profiler_Capture_aref, 1);
|
|
619
632
|
rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
|
|
620
633
|
rb_define_method(Memory_Profiler_Capture, "statistics", Memory_Profiler_Capture_statistics, 0);
|
|
634
|
+
rb_define_method(Memory_Profiler_Capture, "new_count", Memory_Profiler_Capture_new_count, 0);
|
|
635
|
+
rb_define_method(Memory_Profiler_Capture, "free_count", Memory_Profiler_Capture_free_count, 0);
|
|
636
|
+
rb_define_method(Memory_Profiler_Capture, "retained_count", Memory_Profiler_Capture_retained_count, 0);
|
|
621
637
|
|
|
622
638
|
// Initialize Allocations class
|
|
623
639
|
Init_Memory_Profiler_Allocations(Memory_Profiler);
|
|
@@ -17,9 +17,10 @@ struct Memory_Profiler_Events {
|
|
|
17
17
|
// The VALUE wrapper for this struct (needed for write barriers).
|
|
18
18
|
VALUE self;
|
|
19
19
|
|
|
20
|
-
//
|
|
21
|
-
struct Memory_Profiler_Queue
|
|
22
|
-
|
|
20
|
+
// Double-buffered event queues (contains events from all Capture instances).
|
|
21
|
+
struct Memory_Profiler_Queue queues[2];
|
|
22
|
+
struct Memory_Profiler_Queue *available, *processing;
|
|
23
|
+
|
|
23
24
|
// Postponed job handle for processing the queue.
|
|
24
25
|
// Postponed job handles are an extremely limited resource, so we only register one global event queue.
|
|
25
26
|
rb_postponed_job_handle_t postponed_job_handle;
|
|
@@ -51,8 +52,13 @@ static VALUE Memory_Profiler_Events_new(void) {
|
|
|
51
52
|
// Store the VALUE wrapper for write barriers:
|
|
52
53
|
events->self = self;
|
|
53
54
|
|
|
54
|
-
// Initialize
|
|
55
|
-
Memory_Profiler_Queue_initialize(&events->
|
|
55
|
+
// Initialize both queues for double buffering:
|
|
56
|
+
Memory_Profiler_Queue_initialize(&events->queues[0], sizeof(struct Memory_Profiler_Event));
|
|
57
|
+
Memory_Profiler_Queue_initialize(&events->queues[1], sizeof(struct Memory_Profiler_Event));
|
|
58
|
+
|
|
59
|
+
// Start with queues[0] available for incoming events, queues[1] for processing (initially empty):
|
|
60
|
+
events->available = &events->queues[0];
|
|
61
|
+
events->processing = &events->queues[1];
|
|
56
62
|
|
|
57
63
|
// Pre-register the single postponed job for processing the queue:
|
|
58
64
|
events->postponed_job_handle = rb_postponed_job_preregister(0,
|
|
@@ -88,82 +94,115 @@ struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void) {
|
|
|
88
94
|
return events;
|
|
89
95
|
}
|
|
90
96
|
|
|
91
|
-
//
|
|
92
|
-
static void
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
97
|
+
// Helper to mark events in a queue.
|
|
98
|
+
static void Memory_Profiler_Events_mark_queue(struct Memory_Profiler_Queue *queue, int skip_none) {
|
|
99
|
+
for (size_t i = 0; i < queue->count; i++) {
|
|
100
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(queue, i);
|
|
101
|
+
|
|
102
|
+
// Skip already-processed events if requested:
|
|
103
|
+
if (skip_none && event->type == MEMORY_PROFILER_EVENT_TYPE_NONE) continue;
|
|
98
104
|
|
|
99
|
-
// Mark the Capture instance
|
|
105
|
+
// Mark the Capture instance and class:
|
|
100
106
|
rb_gc_mark_movable(event->capture);
|
|
101
107
|
rb_gc_mark_movable(event->klass);
|
|
102
|
-
rb_gc_mark_movable(event->allocations);
|
|
103
108
|
|
|
104
|
-
// For NEWOBJ
|
|
105
|
-
// For FREEOBJ
|
|
109
|
+
// For NEWOBJ: mark the object to keep it alive until processor runs
|
|
110
|
+
// For FREEOBJ: DON'T mark (object is being freed)
|
|
106
111
|
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
107
112
|
rb_gc_mark_movable(event->object);
|
|
108
113
|
}
|
|
109
114
|
}
|
|
110
115
|
}
|
|
111
116
|
|
|
112
|
-
// GC
|
|
113
|
-
static void
|
|
117
|
+
// GC mark callback - mark all VALUEs in both event queues.
|
|
118
|
+
static void Memory_Profiler_Events_mark(void *ptr) {
|
|
114
119
|
struct Memory_Profiler_Events *events = ptr;
|
|
115
120
|
|
|
116
|
-
//
|
|
117
|
-
|
|
118
|
-
|
|
121
|
+
// Mark all events in the available queue (receiving new events):
|
|
122
|
+
Memory_Profiler_Events_mark_queue(events->available, 0);
|
|
123
|
+
|
|
124
|
+
// Mark all events in the processing queue (currently being processed):
|
|
125
|
+
Memory_Profiler_Events_mark_queue(events->processing, 1);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Helper to compact events in a queue.
|
|
129
|
+
static void Memory_Profiler_Events_compact_queue(struct Memory_Profiler_Queue *queue, int skip_none) {
|
|
130
|
+
for (size_t i = 0; i < queue->count; i++) {
|
|
131
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(queue, i);
|
|
132
|
+
|
|
133
|
+
// Skip already-processed events if requested:
|
|
134
|
+
if (skip_none && event->type == MEMORY_PROFILER_EVENT_TYPE_NONE) continue;
|
|
119
135
|
|
|
120
136
|
// Update all VALUEs if they moved during compaction:
|
|
121
137
|
event->capture = rb_gc_location(event->capture);
|
|
122
138
|
event->klass = rb_gc_location(event->klass);
|
|
123
|
-
event->allocations = rb_gc_location(event->allocations);
|
|
124
139
|
|
|
125
|
-
// For NEWOBJ
|
|
126
|
-
// For FREEOBJ
|
|
140
|
+
// For NEWOBJ: update object pointer if it moved
|
|
141
|
+
// For FREEOBJ: DON'T update (object is being freed, pointer is stale)
|
|
127
142
|
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
128
143
|
event->object = rb_gc_location(event->object);
|
|
129
144
|
}
|
|
130
145
|
}
|
|
131
146
|
}
|
|
132
147
|
|
|
148
|
+
// GC compact callback - update all VALUEs in both event queues.
|
|
149
|
+
static void Memory_Profiler_Events_compact(void *ptr) {
|
|
150
|
+
struct Memory_Profiler_Events *events = ptr;
|
|
151
|
+
|
|
152
|
+
// Update objects in the available queue:
|
|
153
|
+
Memory_Profiler_Events_compact_queue(events->available, 0);
|
|
154
|
+
|
|
155
|
+
// Update objects in the processing queue:
|
|
156
|
+
Memory_Profiler_Events_compact_queue(events->processing, 1);
|
|
157
|
+
}
|
|
158
|
+
|
|
133
159
|
// GC free callback.
|
|
134
160
|
static void Memory_Profiler_Events_free(void *ptr) {
|
|
135
161
|
struct Memory_Profiler_Events *events = ptr;
|
|
136
|
-
Memory_Profiler_Queue_free(&events->
|
|
162
|
+
Memory_Profiler_Queue_free(&events->queues[0]);
|
|
163
|
+
Memory_Profiler_Queue_free(&events->queues[1]);
|
|
137
164
|
}
|
|
138
165
|
|
|
139
166
|
// GC memsize callback.
|
|
140
167
|
static size_t Memory_Profiler_Events_memsize(const void *ptr) {
|
|
141
168
|
const struct Memory_Profiler_Events *events = ptr;
|
|
142
|
-
return sizeof(struct Memory_Profiler_Events)
|
|
169
|
+
return sizeof(struct Memory_Profiler_Events)
|
|
170
|
+
+ (events->queues[0].capacity * events->queues[0].element_size)
|
|
171
|
+
+ (events->queues[1].capacity * events->queues[1].element_size);
|
|
143
172
|
}
|
|
144
173
|
|
|
145
|
-
|
|
174
|
+
const char *Memory_Profiler_Event_Type_name(enum Memory_Profiler_Event_Type type) {
|
|
175
|
+
switch (type) {
|
|
176
|
+
case MEMORY_PROFILER_EVENT_TYPE_NEWOBJ:
|
|
177
|
+
return "NEWOBJ";
|
|
178
|
+
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
|
|
179
|
+
return "FREEOBJ";
|
|
180
|
+
default:
|
|
181
|
+
return "NONE";
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// Enqueue an event to the available queue (can be called anytime, even during processing).
|
|
146
186
|
int Memory_Profiler_Events_enqueue(
|
|
147
187
|
enum Memory_Profiler_Event_Type type,
|
|
148
188
|
VALUE capture,
|
|
149
189
|
VALUE klass,
|
|
150
|
-
VALUE allocations,
|
|
151
190
|
VALUE object
|
|
152
191
|
) {
|
|
153
192
|
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
|
|
154
|
-
|
|
155
|
-
|
|
193
|
+
|
|
194
|
+
// Always enqueue to the available queue - it won't be touched during processing:
|
|
195
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_push(events->available);
|
|
156
196
|
if (event) {
|
|
157
197
|
event->type = type;
|
|
158
198
|
|
|
159
199
|
// Use write barriers when storing VALUEs (required for RUBY_TYPED_WB_PROTECTED):
|
|
160
200
|
RB_OBJ_WRITE(events->self, &event->capture, capture);
|
|
161
201
|
RB_OBJ_WRITE(events->self, &event->klass, klass);
|
|
162
|
-
RB_OBJ_WRITE(events->self, &event->allocations, allocations);
|
|
163
202
|
RB_OBJ_WRITE(events->self, &event->object, object);
|
|
164
203
|
|
|
165
|
-
|
|
166
|
-
|
|
204
|
+
if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
|
|
205
|
+
Memory_Profiler_Event_Type_name(type), events->available->count);
|
|
167
206
|
|
|
168
207
|
rb_postponed_job_trigger(events->postponed_job_handle);
|
|
169
208
|
// Success:
|
|
@@ -195,11 +234,16 @@ static VALUE Memory_Profiler_Events_process_event_protected(VALUE arg) {
|
|
|
195
234
|
static void Memory_Profiler_Events_process_queue(void *arg) {
|
|
196
235
|
struct Memory_Profiler_Events *events = (struct Memory_Profiler_Events *)arg;
|
|
197
236
|
|
|
198
|
-
|
|
237
|
+
// Swap the queues: available becomes processing, and the old processing queue (now empty) becomes available. This allows new events to continue enqueueing to the new available queue while we process.
|
|
238
|
+
struct Memory_Profiler_Queue *queue_to_process = events->available;
|
|
239
|
+
events->available = events->processing;
|
|
240
|
+
events->processing = queue_to_process;
|
|
199
241
|
|
|
242
|
+
if (DEBUG) fprintf(stderr, "Processing event queue: %zu events\n", events->processing->count);
|
|
243
|
+
|
|
200
244
|
// Process all events in order (maintains NEWOBJ before FREEOBJ for same object):
|
|
201
|
-
for (size_t i = 0; i < events->
|
|
202
|
-
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(
|
|
245
|
+
for (size_t i = 0; i < events->processing->count; i++) {
|
|
246
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(events->processing, i);
|
|
203
247
|
|
|
204
248
|
// Process event with rb_protect to catch any exceptions:
|
|
205
249
|
int state = 0;
|
|
@@ -210,8 +254,14 @@ static void Memory_Profiler_Events_process_queue(void *arg) {
|
|
|
210
254
|
rb_warning("Exception in event processing callback (caught and suppressed): %"PRIsVALUE, rb_errinfo());
|
|
211
255
|
rb_set_errinfo(Qnil);
|
|
212
256
|
}
|
|
257
|
+
|
|
258
|
+
// Clear this event after processing to prevent marking stale data if GC runs:
|
|
259
|
+
event->type = MEMORY_PROFILER_EVENT_TYPE_NONE;
|
|
260
|
+
RB_OBJ_WRITE(events->self, &event->capture, Qnil);
|
|
261
|
+
RB_OBJ_WRITE(events->self, &event->klass, Qnil);
|
|
262
|
+
RB_OBJ_WRITE(events->self, &event->object, Qnil);
|
|
213
263
|
}
|
|
214
264
|
|
|
215
|
-
//
|
|
216
|
-
Memory_Profiler_Queue_clear(
|
|
265
|
+
// Clear the processing queue (which is now empty logically):
|
|
266
|
+
Memory_Profiler_Queue_clear(events->processing);
|
|
217
267
|
}
|
|
@@ -8,6 +8,7 @@
|
|
|
8
8
|
|
|
9
9
|
// Event types
|
|
10
10
|
enum Memory_Profiler_Event_Type {
|
|
11
|
+
MEMORY_PROFILER_EVENT_TYPE_NONE = 0,
|
|
11
12
|
MEMORY_PROFILER_EVENT_TYPE_NEWOBJ,
|
|
12
13
|
MEMORY_PROFILER_EVENT_TYPE_FREEOBJ,
|
|
13
14
|
};
|
|
@@ -22,10 +23,7 @@ struct Memory_Profiler_Event {
|
|
|
22
23
|
// The class of the object:
|
|
23
24
|
VALUE klass;
|
|
24
25
|
|
|
25
|
-
// The
|
|
26
|
-
VALUE allocations;
|
|
27
|
-
|
|
28
|
-
// The object itself:
|
|
26
|
+
// The object itself (for NEWOBJ and FREEOBJ):
|
|
29
27
|
VALUE object;
|
|
30
28
|
};
|
|
31
29
|
|
|
@@ -39,7 +37,6 @@ int Memory_Profiler_Events_enqueue(
|
|
|
39
37
|
enum Memory_Profiler_Event_Type type,
|
|
40
38
|
VALUE capture,
|
|
41
39
|
VALUE klass,
|
|
42
|
-
VALUE allocations,
|
|
43
40
|
VALUE object
|
|
44
41
|
);
|
|
45
42
|
|
data/readme.md
CHANGED
|
@@ -22,6 +22,19 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.11
|
|
26
|
+
|
|
27
|
+
- Double buffer shared events queues to fix queue corruption.
|
|
28
|
+
|
|
29
|
+
### v1.1.10
|
|
30
|
+
|
|
31
|
+
- Added `Capture#new_count` - returns total number of allocations tracked across all classes.
|
|
32
|
+
- Added `Capture#free_count` - returns total number of objects freed across all classes.
|
|
33
|
+
- Added `Capture#retained_count` - returns retained object count (new\_count - free\_count).
|
|
34
|
+
- **Critical:** Fixed GC crash during compaction caused by missing write barriers in event queue.
|
|
35
|
+
- Fixed allocation/deallocation counts being inaccurate when objects are allocated during callbacks or freed after compaction.
|
|
36
|
+
- `Capture#clear` now raises `RuntimeError` if called while capture is running. Call `stop()` before `clear()`.
|
|
37
|
+
|
|
25
38
|
### v1.1.9
|
|
26
39
|
|
|
27
40
|
- More write barriers...
|
data/releases.md
CHANGED
|
@@ -1,5 +1,18 @@
|
|
|
1
1
|
# Releases
|
|
2
2
|
|
|
3
|
+
## v1.1.11
|
|
4
|
+
|
|
5
|
+
- Double buffer shared events queues to fix queue corruption.
|
|
6
|
+
|
|
7
|
+
## v1.1.10
|
|
8
|
+
|
|
9
|
+
- Added `Capture#new_count` - returns total number of allocations tracked across all classes.
|
|
10
|
+
- Added `Capture#free_count` - returns total number of objects freed across all classes.
|
|
11
|
+
- Added `Capture#retained_count` - returns retained object count (new\_count - free\_count).
|
|
12
|
+
- **Critical:** Fixed GC crash during compaction caused by missing write barriers in event queue.
|
|
13
|
+
- Fixed allocation/deallocation counts being inaccurate when objects are allocated during callbacks or freed after compaction.
|
|
14
|
+
- `Capture#clear` now raises `RuntimeError` if called while capture is running. Call `stop()` before `clear()`.
|
|
15
|
+
|
|
3
16
|
## v1.1.9
|
|
4
17
|
|
|
5
18
|
- More write barriers...
|
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
metadata.gz.sig
CHANGED
|
Binary file
|