memory-profiler 1.4.0 → 1.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +1 -1
- data/ext/memory/profiler/capture.c +133 -199
- data/ext/memory/profiler/events.c +10 -4
- data/ext/memory/profiler/events.h +7 -5
- data/ext/memory/profiler/profiler.c +17 -6
- data/ext/memory/profiler/table.c +429 -0
- data/ext/memory/profiler/table.h +72 -0
- data/lib/memory/profiler/sampler.rb +24 -31
- data/lib/memory/profiler/version.rb +1 -1
- data/lib/memory/profiler.rb +0 -1
- data/readme.md +12 -13
- data/releases.md +12 -0
- data.tar.gz.sig +0 -0
- metadata +3 -2
- metadata.gz.sig +0 -0
- data/lib/memory/profiler/graph.rb +0 -369
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: c1f0d854f80d796037192a81dfe2912291e0653510755e06d0a7e85e8b84a4ea
|
|
4
|
+
data.tar.gz: 11e5bbe54085baa3cc2c085c947b2ef9c74f3e85a44a59249ea2c5e41bc4a2bd
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: '05866071c2c1a20e868d8ca0c11f9951595bac35b86e051863ecde8c11e581d7c92780826c4816c087764f9cdc078859402e6d718e272f6a337dc59ab69d2b63'
|
|
7
|
+
data.tar.gz: e070faca1aa563faff377a56cb7287e1dbf369b65dc534c623425ad33c83e7d938925a25f470821e2134848ed2db7adce94408736e0fead52bba6cbc07ec5a9d
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
data/ext/extconf.rb
CHANGED
|
@@ -16,7 +16,7 @@ if ENV.key?("RUBY_DEBUG")
|
|
|
16
16
|
append_cflags(["-DRUBY_DEBUG", "-O0"])
|
|
17
17
|
end
|
|
18
18
|
|
|
19
|
-
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c", "memory/profiler/events.c"]
|
|
19
|
+
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c", "memory/profiler/events.c", "memory/profiler/table.c"]
|
|
20
20
|
$VPATH << "$(srcdir)/memory/profiler"
|
|
21
21
|
|
|
22
22
|
# Check for required headers
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
#include "capture.h"
|
|
5
5
|
#include "allocations.h"
|
|
6
6
|
#include "events.h"
|
|
7
|
+
#include "table.h"
|
|
7
8
|
|
|
8
9
|
#include <ruby/debug.h>
|
|
9
10
|
#include <ruby/st.h>
|
|
@@ -15,20 +16,10 @@ enum {
|
|
|
15
16
|
};
|
|
16
17
|
|
|
17
18
|
static VALUE Memory_Profiler_Capture = Qnil;
|
|
18
|
-
static VALUE rb_mObjectSpace = Qnil;
|
|
19
|
-
static ID id_id2ref = Qnil;
|
|
20
19
|
|
|
21
20
|
// Event symbols:
|
|
22
21
|
static VALUE sym_newobj, sym_freeobj;
|
|
23
22
|
|
|
24
|
-
// State state stored in the central states table.
|
|
25
|
-
// Maps object_id => state information.
|
|
26
|
-
struct Memory_Profiler_Capture_State {
|
|
27
|
-
VALUE klass; // The class of the allocated object
|
|
28
|
-
VALUE data; // User-defined state from callback
|
|
29
|
-
VALUE allocations; // The Allocations wrapper for this class
|
|
30
|
-
};
|
|
31
|
-
|
|
32
23
|
// Main capture state (per-instance).
|
|
33
24
|
struct Memory_Profiler_Capture {
|
|
34
25
|
// Master switch - is tracking active? (set by start/stop).
|
|
@@ -40,10 +31,9 @@ struct Memory_Profiler_Capture {
|
|
|
40
31
|
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
41
32
|
st_table *tracked;
|
|
42
33
|
|
|
43
|
-
//
|
|
44
|
-
//
|
|
45
|
-
|
|
46
|
-
st_table *states;
|
|
34
|
+
// Custom object table: object (address) => state hash
|
|
35
|
+
// Uses system malloc (GC-safe), updates addresses during compaction
|
|
36
|
+
struct Memory_Profiler_Object_Table *states;
|
|
47
37
|
|
|
48
38
|
// Total number of allocations and frees seen since tracking started.
|
|
49
39
|
size_t new_count;
|
|
@@ -65,20 +55,6 @@ static int Memory_Profiler_Capture_tracked_mark(st_data_t key, st_data_t value,
|
|
|
65
55
|
return ST_CONTINUE;
|
|
66
56
|
}
|
|
67
57
|
|
|
68
|
-
// GC mark callback for states table.
|
|
69
|
-
static int Memory_Profiler_Capture_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
70
|
-
// Key is object_id (Integer VALUE) - mark it as un-movable
|
|
71
|
-
rb_gc_mark((VALUE)key);
|
|
72
|
-
|
|
73
|
-
// Value is struct Memory_Profiler_Capture_State* - mark its VALUEs
|
|
74
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
|
|
75
|
-
rb_gc_mark(state->klass); // Mark class as un-movable
|
|
76
|
-
rb_gc_mark_movable(state->data); // State can move
|
|
77
|
-
rb_gc_mark_movable(state->allocations); // Allocations wrapper can move
|
|
78
|
-
|
|
79
|
-
return ST_CONTINUE;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
58
|
static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
83
59
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
84
60
|
|
|
@@ -86,16 +62,7 @@ static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
|
86
62
|
st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_mark, 0);
|
|
87
63
|
}
|
|
88
64
|
|
|
89
|
-
|
|
90
|
-
st_foreach(capture->states, Memory_Profiler_Capture_states_mark, 0);
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
// Helper to free state states in the states table
|
|
95
|
-
static int Memory_Profiler_Capture_states_free_callback(st_data_t key, st_data_t value, st_data_t arg) {
|
|
96
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
|
|
97
|
-
xfree(state);
|
|
98
|
-
return ST_CONTINUE;
|
|
65
|
+
Memory_Profiler_Object_Table_mark(capture->states);
|
|
99
66
|
}
|
|
100
67
|
|
|
101
68
|
static void Memory_Profiler_Capture_free(void *ptr) {
|
|
@@ -106,9 +73,7 @@ static void Memory_Profiler_Capture_free(void *ptr) {
|
|
|
106
73
|
}
|
|
107
74
|
|
|
108
75
|
if (capture->states) {
|
|
109
|
-
|
|
110
|
-
st_foreach(capture->states, Memory_Profiler_Capture_states_free_callback, 0);
|
|
111
|
-
st_free_table(capture->states);
|
|
76
|
+
Memory_Profiler_Object_Table_free(capture->states);
|
|
112
77
|
}
|
|
113
78
|
|
|
114
79
|
xfree(capture);
|
|
@@ -142,31 +107,6 @@ static int Memory_Profiler_Capture_tracked_update(st_data_t *key, st_data_t *val
|
|
|
142
107
|
return ST_CONTINUE;
|
|
143
108
|
}
|
|
144
109
|
|
|
145
|
-
// Foreach callback for states table compaction (iteration logic).
|
|
146
|
-
static int Memory_Profiler_Capture_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
|
|
147
|
-
return ST_REPLACE;
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
// Replace callback for states table compaction (update logic).
|
|
151
|
-
static int Memory_Profiler_Capture_states_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
|
|
152
|
-
// Update VALUEs in the state if they moved
|
|
153
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)*value;
|
|
154
|
-
|
|
155
|
-
VALUE old_state = state->data;
|
|
156
|
-
VALUE new_state = rb_gc_location(old_state);
|
|
157
|
-
if (old_state != new_state) {
|
|
158
|
-
state->data = new_state;
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
VALUE old_allocations = state->allocations;
|
|
162
|
-
VALUE new_allocations = rb_gc_location(old_allocations);
|
|
163
|
-
if (old_allocations != new_allocations) {
|
|
164
|
-
state->allocations = new_allocations;
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
return ST_CONTINUE;
|
|
168
|
-
}
|
|
169
|
-
|
|
170
110
|
static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
171
111
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
172
112
|
|
|
@@ -177,11 +117,9 @@ static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
|
177
117
|
}
|
|
178
118
|
}
|
|
179
119
|
|
|
180
|
-
// Update
|
|
181
|
-
if (capture->states
|
|
182
|
-
|
|
183
|
-
rb_raise(rb_eRuntimeError, "states modified during GC compaction");
|
|
184
|
-
}
|
|
120
|
+
// Update custom object table (system malloc, safe during GC)
|
|
121
|
+
if (capture->states) {
|
|
122
|
+
Memory_Profiler_Object_Table_compact(capture->states);
|
|
185
123
|
}
|
|
186
124
|
}
|
|
187
125
|
|
|
@@ -216,7 +154,9 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
|
|
|
216
154
|
|
|
217
155
|
// Process a NEWOBJ event. All allocation tracking logic is here.
|
|
218
156
|
// object_id parameter is the Integer object_id, NOT the raw object.
|
|
219
|
-
|
|
157
|
+
// Process a NEWOBJ event. All allocation tracking logic is here.
|
|
158
|
+
// object parameter is the actual object being allocated.
|
|
159
|
+
static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALUE object) {
|
|
220
160
|
struct Memory_Profiler_Capture *capture;
|
|
221
161
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
222
162
|
|
|
@@ -249,52 +189,55 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
|
|
|
249
189
|
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
250
190
|
}
|
|
251
191
|
|
|
252
|
-
// Get state from callback (if present)
|
|
253
192
|
VALUE data = Qnil;
|
|
254
193
|
if (!NIL_P(record->callback)) {
|
|
255
194
|
data = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
256
195
|
}
|
|
257
196
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
state->allocations = allocations;
|
|
263
|
-
|
|
264
|
-
st_insert(capture->states, (st_data_t)object_id, (st_data_t)state);
|
|
197
|
+
struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_insert(capture->states, object);
|
|
198
|
+
RB_OBJ_WRITTEN(self, Qnil, object);
|
|
199
|
+
RB_OBJ_WRITE(self, &entry->klass, klass);
|
|
200
|
+
RB_OBJ_WRITE(self, &entry->data, data);
|
|
265
201
|
|
|
266
|
-
|
|
267
|
-
RB_OBJ_WRITTEN(self, Qnil, klass);
|
|
268
|
-
RB_OBJ_WRITTEN(self, Qnil, data);
|
|
269
|
-
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
202
|
+
if (DEBUG) fprintf(stderr, "[NEWOBJ] Object inserted into table: %p\n", (void*)object);
|
|
270
203
|
|
|
271
204
|
// Resume the capture:
|
|
272
205
|
capture->paused -= 1;
|
|
273
206
|
}
|
|
274
207
|
|
|
275
208
|
// Process a FREEOBJ event. All deallocation tracking logic is here.
|
|
276
|
-
//
|
|
277
|
-
|
|
278
|
-
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE object_id) {
|
|
209
|
+
// freeobj_data parameter is [state_hash, object] array from event handler.
|
|
210
|
+
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE unused_klass, VALUE object) {
|
|
279
211
|
struct Memory_Profiler_Capture *capture;
|
|
280
212
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
281
213
|
|
|
282
214
|
// Pause the capture to prevent infinite loop:
|
|
283
215
|
capture->paused += 1;
|
|
284
216
|
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
217
|
+
struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_lookup(capture->states, object);
|
|
218
|
+
|
|
219
|
+
if (!entry) {
|
|
220
|
+
if (DEBUG) fprintf(stderr, "[FREEOBJ] Object not found in table: %p\n", (void*)object);
|
|
221
|
+
goto done;
|
|
222
|
+
} else {
|
|
223
|
+
if (DEBUG) fprintf(stderr, "[FREEOBJ] Object found in table: %p\n", (void*)object);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
VALUE klass = entry->klass;
|
|
227
|
+
VALUE data = entry->data;
|
|
228
|
+
|
|
229
|
+
// Look up allocations from tracked table:
|
|
230
|
+
st_data_t allocations_data;
|
|
231
|
+
if (!st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
|
|
232
|
+
// Class not tracked - shouldn't happen, but be defensive:
|
|
233
|
+
if (DEBUG) fprintf(stderr, "[FREEOBJ] Class not found in tracked: %p\n", (void*)klass);
|
|
290
234
|
goto done;
|
|
291
235
|
}
|
|
236
|
+
VALUE allocations = (VALUE)allocations_data;
|
|
237
|
+
|
|
238
|
+
// Delete by entry pointer (faster - no second lookup!)
|
|
239
|
+
Memory_Profiler_Object_Table_delete_entry(capture->states, entry);
|
|
292
240
|
|
|
293
|
-
// Extract information from the state
|
|
294
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)state_data;
|
|
295
|
-
klass = state->klass; // Use class from state, not parameter
|
|
296
|
-
VALUE data = state->data;
|
|
297
|
-
VALUE allocations = state->allocations;
|
|
298
241
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
299
242
|
|
|
300
243
|
// Increment global free count
|
|
@@ -307,10 +250,7 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE k
|
|
|
307
250
|
if (!NIL_P(record->callback) && !NIL_P(data)) {
|
|
308
251
|
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, data);
|
|
309
252
|
}
|
|
310
|
-
|
|
311
|
-
// Free the state struct
|
|
312
|
-
xfree(state);
|
|
313
|
-
|
|
253
|
+
|
|
314
254
|
done:
|
|
315
255
|
// Resume the capture:
|
|
316
256
|
capture->paused -= 1;
|
|
@@ -387,22 +327,17 @@ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
|
|
|
387
327
|
if (capture->paused) return;
|
|
388
328
|
|
|
389
329
|
VALUE klass = rb_obj_class(object);
|
|
390
|
-
if (!klass) return;
|
|
391
|
-
|
|
392
|
-
// Convert to object_id for storage (Integer VALUE).
|
|
393
|
-
// It's safe to unconditionally call rb_obj_id during NEWOBJ.
|
|
394
|
-
VALUE object_id = rb_obj_id(object);
|
|
395
330
|
|
|
396
|
-
|
|
331
|
+
// Skip if klass is not a Class
|
|
332
|
+
if (rb_type(klass) != RUBY_T_CLASS) return;
|
|
333
|
+
|
|
334
|
+
// Enqueue actual object (not object_id) - queue retains it until processed
|
|
335
|
+
// Ruby 3.5 compatible: no need for FL_SEEN_OBJ_ID or rb_obj_id
|
|
336
|
+
if (DEBUG) fprintf(stderr, "[NEWOBJ] Enqueuing event for object: %p\n", (void*)object);
|
|
337
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, object);
|
|
397
338
|
} else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
// Convert to object_id for storage (Integer VALUE).
|
|
401
|
-
// It's only safe to call rb_obj_id if the object already has an object ID.
|
|
402
|
-
VALUE object_id = rb_obj_id(object);
|
|
403
|
-
|
|
404
|
-
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object_id);
|
|
405
|
-
}
|
|
339
|
+
if (DEBUG) fprintf(stderr, "[FREEOBJ] Enqueuing event for object: %p\n", (void*)object);
|
|
340
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object);
|
|
406
341
|
}
|
|
407
342
|
}
|
|
408
343
|
|
|
@@ -421,11 +356,11 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
421
356
|
rb_raise(rb_eRuntimeError, "Failed to initialize tracked hash table");
|
|
422
357
|
}
|
|
423
358
|
|
|
424
|
-
// Initialize
|
|
425
|
-
capture->states =
|
|
426
|
-
|
|
359
|
+
// Initialize custom object table (uses system malloc, GC-safe)
|
|
360
|
+
capture->states = Memory_Profiler_Object_Table_new(1024);
|
|
427
361
|
if (!capture->states) {
|
|
428
|
-
|
|
362
|
+
st_free_table(capture->tracked);
|
|
363
|
+
rb_raise(rb_eRuntimeError, "Failed to initialize object table");
|
|
429
364
|
}
|
|
430
365
|
|
|
431
366
|
// Initialize allocation tracking counters
|
|
@@ -590,13 +525,13 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
|
|
|
590
525
|
rb_raise(rb_eRuntimeError, "Cannot clear while capture is running - call stop() first!");
|
|
591
526
|
}
|
|
592
527
|
|
|
593
|
-
// Reset all counts to 0 (don't free, just reset)
|
|
528
|
+
// Reset all counts to 0 (don't free, just reset):
|
|
594
529
|
st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_clear, 0);
|
|
595
530
|
|
|
596
|
-
// Clear
|
|
531
|
+
// Clear custom object table by recreating it
|
|
597
532
|
if (capture->states) {
|
|
598
|
-
|
|
599
|
-
|
|
533
|
+
Memory_Profiler_Object_Table_free(capture->states);
|
|
534
|
+
capture->states = Memory_Profiler_Object_Table_new(1024);
|
|
600
535
|
}
|
|
601
536
|
|
|
602
537
|
// Reset allocation tracking counters
|
|
@@ -629,35 +564,72 @@ static VALUE Memory_Profiler_Capture_each(VALUE self) {
|
|
|
629
564
|
return self;
|
|
630
565
|
}
|
|
631
566
|
|
|
632
|
-
// Struct for filtering states
|
|
633
|
-
struct
|
|
634
|
-
VALUE
|
|
567
|
+
// Struct for filtering states during each_object iteration
|
|
568
|
+
struct Memory_Profiler_Each_Object_Arguments {
|
|
569
|
+
VALUE self;
|
|
570
|
+
|
|
571
|
+
// The allocations wrapper to filter by (Qnil = no filter).
|
|
572
|
+
VALUE allocations;
|
|
635
573
|
};
|
|
636
574
|
|
|
637
|
-
//
|
|
638
|
-
static
|
|
639
|
-
|
|
640
|
-
struct
|
|
641
|
-
struct
|
|
575
|
+
// Cleanup function to ensure table is made weak again
|
|
576
|
+
static VALUE Memory_Profiler_Capture_each_object_ensure(VALUE arg) {
|
|
577
|
+
struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
|
|
578
|
+
struct Memory_Profiler_Capture *capture;
|
|
579
|
+
TypedData_Get_Struct(arguments->self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
642
580
|
|
|
643
|
-
//
|
|
644
|
-
|
|
645
|
-
return ST_CONTINUE;
|
|
646
|
-
}
|
|
581
|
+
// Make table weak again
|
|
582
|
+
Memory_Profiler_Object_Table_decrement_strong(capture->states);
|
|
647
583
|
|
|
648
|
-
|
|
649
|
-
|
|
584
|
+
return Qnil;
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
// Main iteration function
|
|
588
|
+
static VALUE Memory_Profiler_Capture_each_object_body(VALUE arg) {
|
|
589
|
+
struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
|
|
590
|
+
struct Memory_Profiler_Capture *capture;
|
|
591
|
+
TypedData_Get_Struct(arguments->self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
650
592
|
|
|
651
|
-
//
|
|
652
|
-
|
|
593
|
+
// Iterate custom object table entries
|
|
594
|
+
if (capture->states) {
|
|
595
|
+
if (DEBUG) fprintf(stderr, "[ITER] Iterating table, capacity=%zu, count=%zu\n", capture->states->capacity, capture->states->count);
|
|
596
|
+
|
|
597
|
+
for (size_t i = 0; i < capture->states->capacity; i++) {
|
|
598
|
+
struct Memory_Profiler_Object_Table_Entry *entry = &capture->states->entries[i];
|
|
599
|
+
|
|
600
|
+
// Skip empty or deleted slots (0 = not set)
|
|
601
|
+
if (entry->object == 0) {
|
|
602
|
+
continue;
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
// Look up allocations from klass
|
|
606
|
+
st_data_t allocations_data;
|
|
607
|
+
VALUE allocations = Qnil;
|
|
608
|
+
if (st_lookup(capture->tracked, (st_data_t)entry->klass, &allocations_data)) {
|
|
609
|
+
allocations = (VALUE)allocations_data;
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
// Filter by allocations if specified
|
|
613
|
+
if (!NIL_P(arguments->allocations)) {
|
|
614
|
+
if (allocations != arguments->allocations) continue;
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
rb_yield_values(2, entry->object, allocations);
|
|
618
|
+
}
|
|
619
|
+
}
|
|
653
620
|
|
|
654
|
-
return
|
|
621
|
+
return arguments->self;
|
|
655
622
|
}
|
|
656
623
|
|
|
657
|
-
// Iterate over tracked
|
|
624
|
+
// Iterate over tracked object IDs, optionally filtered by class
|
|
658
625
|
// Called as:
|
|
659
|
-
// capture.each_object(String) { |
|
|
660
|
-
// capture.each_object { |
|
|
626
|
+
// capture.each_object(String) { |object_id, state| ... } # Specific class
|
|
627
|
+
// capture.each_object { |object_id, state| ... } // All objects
|
|
628
|
+
//
|
|
629
|
+
// Yields object_id as Integer. Caller can:
|
|
630
|
+
// - Format as hex: "0x%x" % object_id
|
|
631
|
+
// - Convert to object with ObjectSpace._id2ref (may raise RangeError if recycled)
|
|
632
|
+
// Future-proof for Ruby 3.5 where _id2ref is deprecated
|
|
661
633
|
static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE self) {
|
|
662
634
|
struct Memory_Profiler_Capture *capture;
|
|
663
635
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
@@ -667,10 +639,10 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
|
|
|
667
639
|
|
|
668
640
|
RETURN_ENUMERATOR(self, argc, argv);
|
|
669
641
|
|
|
670
|
-
//
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
642
|
+
// Make table strong so objects won't be collected during iteration
|
|
643
|
+
Memory_Profiler_Object_Table_increment_strong(capture->states);
|
|
644
|
+
|
|
645
|
+
// Process all pending events and run GC to clean up stale objects:
|
|
674
646
|
Memory_Profiler_Events_process_all();
|
|
675
647
|
|
|
676
648
|
// If class provided, look up its allocations wrapper
|
|
@@ -681,25 +653,22 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
|
|
|
681
653
|
allocations = (VALUE)allocations_data;
|
|
682
654
|
} else {
|
|
683
655
|
// Class not tracked - nothing to iterate
|
|
684
|
-
|
|
685
|
-
rb_gc_enable();
|
|
686
|
-
}
|
|
656
|
+
Memory_Profiler_Object_Table_decrement_strong(capture->states);
|
|
687
657
|
return self;
|
|
688
658
|
}
|
|
689
659
|
}
|
|
690
660
|
|
|
691
|
-
//
|
|
692
|
-
struct
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
}
|
|
697
|
-
|
|
698
|
-
if (RTEST(was_enabled)) {
|
|
699
|
-
rb_gc_enable();
|
|
700
|
-
}
|
|
661
|
+
// Setup arguments for iteration
|
|
662
|
+
struct Memory_Profiler_Each_Object_Arguments arguments = {
|
|
663
|
+
.self = self,
|
|
664
|
+
.allocations = allocations
|
|
665
|
+
};
|
|
701
666
|
|
|
702
|
-
|
|
667
|
+
// Use rb_ensure to guarantee cleanup even if exception is raised
|
|
668
|
+
return rb_ensure(
|
|
669
|
+
Memory_Profiler_Capture_each_object_body, (VALUE)&arguments,
|
|
670
|
+
Memory_Profiler_Capture_each_object_ensure, (VALUE)&arguments
|
|
671
|
+
);
|
|
703
672
|
}
|
|
704
673
|
|
|
705
674
|
// Get allocations for a specific class
|
|
@@ -721,23 +690,6 @@ struct Memory_Profiler_Allocations_Statistics {
|
|
|
721
690
|
VALUE per_class_counts;
|
|
722
691
|
};
|
|
723
692
|
|
|
724
|
-
// Iterator callback to count states per class (from central states table)
|
|
725
|
-
static int Memory_Profiler_Capture_count_states(st_data_t key, st_data_t value, st_data_t argument) {
|
|
726
|
-
struct Memory_Profiler_Allocations_Statistics *statistics = (struct Memory_Profiler_Allocations_Statistics *)argument;
|
|
727
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
|
|
728
|
-
VALUE klass = state->klass;
|
|
729
|
-
|
|
730
|
-
// Increment total count
|
|
731
|
-
statistics->total_tracked_objects++;
|
|
732
|
-
|
|
733
|
-
// Increment per-class count
|
|
734
|
-
VALUE current_count = rb_hash_lookup(statistics->per_class_counts, klass);
|
|
735
|
-
size_t count = NIL_P(current_count) ? 1 : NUM2SIZET(current_count) + 1;
|
|
736
|
-
rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(count));
|
|
737
|
-
|
|
738
|
-
return ST_CONTINUE;
|
|
739
|
-
}
|
|
740
|
-
|
|
741
693
|
// Get internal statistics for debugging
|
|
742
694
|
// Returns hash with internal state sizes
|
|
743
695
|
static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
@@ -749,22 +701,9 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
|
749
701
|
// Tracked classes count
|
|
750
702
|
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_count")), SIZET2NUM(capture->tracked->num_entries));
|
|
751
703
|
|
|
752
|
-
//
|
|
753
|
-
size_t
|
|
754
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("
|
|
755
|
-
|
|
756
|
-
// Count states entries for each class (iterate central states table)
|
|
757
|
-
struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
|
|
758
|
-
.total_tracked_objects = 0,
|
|
759
|
-
.per_class_counts = rb_hash_new()
|
|
760
|
-
};
|
|
761
|
-
|
|
762
|
-
if (capture->states) {
|
|
763
|
-
st_foreach(capture->states, Memory_Profiler_Capture_count_states, (st_data_t)&allocations_statistics);
|
|
764
|
-
}
|
|
765
|
-
|
|
766
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("total_tracked_objects")), SIZET2NUM(allocations_statistics.total_tracked_objects));
|
|
767
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_objects_per_class")), allocations_statistics.per_class_counts);
|
|
704
|
+
// Custom object table size
|
|
705
|
+
size_t states_size = capture->states ? Memory_Profiler_Object_Table_size(capture->states) : 0;
|
|
706
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("object_table_size")), SIZET2NUM(states_size));
|
|
768
707
|
|
|
769
708
|
return statistics;
|
|
770
709
|
}
|
|
@@ -822,11 +761,6 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
|
822
761
|
rb_define_method(Memory_Profiler_Capture, "free_count", Memory_Profiler_Capture_free_count, 0);
|
|
823
762
|
rb_define_method(Memory_Profiler_Capture, "retained_count", Memory_Profiler_Capture_retained_count, 0);
|
|
824
763
|
|
|
825
|
-
// Cache ObjectSpace module for _id2ref calls
|
|
826
|
-
rb_mObjectSpace = rb_const_get(rb_cObject, rb_intern("ObjectSpace"));
|
|
827
|
-
rb_gc_register_mark_object(rb_mObjectSpace);
|
|
828
|
-
id_id2ref = rb_intern("_id2ref");
|
|
829
|
-
|
|
830
764
|
// Initialize Allocations class
|
|
831
765
|
Init_Memory_Profiler_Allocations(Memory_Profiler);
|
|
832
766
|
}
|
|
@@ -104,7 +104,10 @@ static void Memory_Profiler_Events_mark_queue(struct Memory_Profiler_Queue *queu
|
|
|
104
104
|
|
|
105
105
|
rb_gc_mark_movable(event->capture);
|
|
106
106
|
rb_gc_mark_movable(event->klass);
|
|
107
|
-
|
|
107
|
+
|
|
108
|
+
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
109
|
+
rb_gc_mark_movable(event->object);
|
|
110
|
+
}
|
|
108
111
|
}
|
|
109
112
|
}
|
|
110
113
|
|
|
@@ -129,7 +132,10 @@ static void Memory_Profiler_Events_compact_queue(struct Memory_Profiler_Queue *q
|
|
|
129
132
|
|
|
130
133
|
event->capture = rb_gc_location(event->capture);
|
|
131
134
|
event->klass = rb_gc_location(event->klass);
|
|
132
|
-
|
|
135
|
+
|
|
136
|
+
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
137
|
+
event->object = rb_gc_location(event->object);
|
|
138
|
+
}
|
|
133
139
|
}
|
|
134
140
|
}
|
|
135
141
|
|
|
@@ -175,7 +181,7 @@ int Memory_Profiler_Events_enqueue(
|
|
|
175
181
|
enum Memory_Profiler_Event_Type type,
|
|
176
182
|
VALUE capture,
|
|
177
183
|
VALUE klass,
|
|
178
|
-
VALUE
|
|
184
|
+
VALUE object
|
|
179
185
|
) {
|
|
180
186
|
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
|
|
181
187
|
|
|
@@ -187,7 +193,7 @@ int Memory_Profiler_Events_enqueue(
|
|
|
187
193
|
// Use write barriers when storing VALUEs (required for RUBY_TYPED_WB_PROTECTED):
|
|
188
194
|
RB_OBJ_WRITE(events->self, &event->capture, capture);
|
|
189
195
|
RB_OBJ_WRITE(events->self, &event->klass, klass);
|
|
190
|
-
RB_OBJ_WRITE(events->self, &event->object,
|
|
196
|
+
RB_OBJ_WRITE(events->self, &event->object, object);
|
|
191
197
|
|
|
192
198
|
if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
|
|
193
199
|
Memory_Profiler_Event_Type_name(type), events->available->count);
|
|
@@ -20,11 +20,10 @@ struct Memory_Profiler_Event {
|
|
|
20
20
|
// Which Capture instance this event belongs to:
|
|
21
21
|
VALUE capture;
|
|
22
22
|
|
|
23
|
-
// The class of the object:
|
|
23
|
+
// The class of the allocated object (Qnil for FREEOBJ):
|
|
24
24
|
VALUE klass;
|
|
25
25
|
|
|
26
|
-
// The
|
|
27
|
-
// We store object_id to avoid referencing objects that should be freed.
|
|
26
|
+
// The object pointer being alllocated or freed.
|
|
28
27
|
VALUE object;
|
|
29
28
|
};
|
|
30
29
|
|
|
@@ -33,13 +32,16 @@ struct Memory_Profiler_Events;
|
|
|
33
32
|
struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void);
|
|
34
33
|
|
|
35
34
|
// Enqueue an event to the global queue.
|
|
36
|
-
// object parameter
|
|
35
|
+
// object parameter semantics:
|
|
36
|
+
// - NEWOBJ: the actual object being allocated (queue retains it)
|
|
37
|
+
// - FREEOBJ: Array with state data for postponed processing
|
|
37
38
|
// Returns non-zero on success, zero on failure.
|
|
39
|
+
// Ruby 3.5 compatible: no FL_SEEN_OBJ_ID or object_id needed
|
|
38
40
|
int Memory_Profiler_Events_enqueue(
|
|
39
41
|
enum Memory_Profiler_Event_Type type,
|
|
40
42
|
VALUE capture,
|
|
41
43
|
VALUE klass,
|
|
42
|
-
VALUE
|
|
44
|
+
VALUE object
|
|
43
45
|
);
|
|
44
46
|
|
|
45
47
|
// Process all queued events immediately (flush the queue)
|
|
@@ -3,15 +3,26 @@
|
|
|
3
3
|
|
|
4
4
|
#include "capture.h"
|
|
5
5
|
|
|
6
|
+
// Return the memory address of an object as a hex string
|
|
7
|
+
// This matches the format used by ObjectSpace.dump_all
|
|
8
|
+
static VALUE Memory_Profiler_address_of(VALUE module, VALUE object) {
|
|
9
|
+
char buffer[32];
|
|
10
|
+
snprintf(buffer, sizeof(buffer), "0x%lx", (unsigned long)object);
|
|
11
|
+
return rb_str_new_cstr(buffer);
|
|
12
|
+
}
|
|
13
|
+
|
|
6
14
|
void Init_Memory_Profiler(void)
|
|
7
15
|
{
|
|
8
16
|
#ifdef HAVE_RB_EXT_RACTOR_SAFE
|
|
9
|
-
|
|
17
|
+
rb_ext_ractor_safe(true);
|
|
10
18
|
#endif
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
19
|
+
|
|
20
|
+
VALUE Memory = rb_const_get(rb_cObject, rb_intern("Memory"));
|
|
21
|
+
VALUE Memory_Profiler = rb_define_module_under(Memory, "Profiler");
|
|
22
|
+
|
|
23
|
+
// Add Memory::Profiler.address_of(object) module function:
|
|
24
|
+
rb_define_module_function(Memory_Profiler, "address_of", Memory_Profiler_address_of, 1);
|
|
25
|
+
|
|
26
|
+
Init_Memory_Profiler_Capture(Memory_Profiler);
|
|
16
27
|
}
|
|
17
28
|
|