memory-profiler 1.4.0 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +1 -1
- data/ext/memory/profiler/capture.c +119 -198
- data/ext/memory/profiler/events.c +10 -4
- data/ext/memory/profiler/events.h +7 -5
- data/ext/memory/profiler/profiler.c +17 -6
- data/ext/memory/profiler/table.c +343 -0
- data/ext/memory/profiler/table.h +73 -0
- data/lib/memory/profiler/sampler.rb +23 -30
- data/lib/memory/profiler/version.rb +1 -1
- data/lib/memory/profiler.rb +0 -1
- data/readme.md +8 -4
- data/releases.md +8 -0
- data.tar.gz.sig +0 -0
- metadata +3 -2
- metadata.gz.sig +0 -0
- data/lib/memory/profiler/graph.rb +0 -369
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: b936617787e483bcf790aee25bb224bf10a199f431dbb8ac51eff005504a44d4
|
|
4
|
+
data.tar.gz: 587f38cef39c639c9d9e2efbffdd325cffdfbd3000dddbca46f5aba895a66a21
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: c483112cf1dd1c82ea679afe3b86007bf20b2674558ac676fc2c66cd07f651787a09198b9f49fb3dd28f1f9b64865dfac9cc249d91b6b819e3849e0cc9c9ab0b
|
|
7
|
+
data.tar.gz: 0de0b5d28eb0f5af2b72ec0aa44ceaf8d0401822949ee4512f922b6caa730e1d69a6b2997fac633140168b3b8de93bdbf4c76223ae6ef789520ae5566ef23f08
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
data/ext/extconf.rb
CHANGED
|
@@ -16,7 +16,7 @@ if ENV.key?("RUBY_DEBUG")
|
|
|
16
16
|
append_cflags(["-DRUBY_DEBUG", "-O0"])
|
|
17
17
|
end
|
|
18
18
|
|
|
19
|
-
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c", "memory/profiler/events.c"]
|
|
19
|
+
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c", "memory/profiler/events.c", "memory/profiler/table.c"]
|
|
20
20
|
$VPATH << "$(srcdir)/memory/profiler"
|
|
21
21
|
|
|
22
22
|
# Check for required headers
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
#include "capture.h"
|
|
5
5
|
#include "allocations.h"
|
|
6
6
|
#include "events.h"
|
|
7
|
+
#include "table.h"
|
|
7
8
|
|
|
8
9
|
#include <ruby/debug.h>
|
|
9
10
|
#include <ruby/st.h>
|
|
@@ -15,19 +16,10 @@ enum {
|
|
|
15
16
|
};
|
|
16
17
|
|
|
17
18
|
static VALUE Memory_Profiler_Capture = Qnil;
|
|
18
|
-
static VALUE rb_mObjectSpace = Qnil;
|
|
19
|
-
static ID id_id2ref = Qnil;
|
|
20
19
|
|
|
21
20
|
// Event symbols:
|
|
22
21
|
static VALUE sym_newobj, sym_freeobj;
|
|
23
22
|
|
|
24
|
-
// State state stored in the central states table.
|
|
25
|
-
// Maps object_id => state information.
|
|
26
|
-
struct Memory_Profiler_Capture_State {
|
|
27
|
-
VALUE klass; // The class of the allocated object
|
|
28
|
-
VALUE data; // User-defined state from callback
|
|
29
|
-
VALUE allocations; // The Allocations wrapper for this class
|
|
30
|
-
};
|
|
31
23
|
|
|
32
24
|
// Main capture state (per-instance).
|
|
33
25
|
struct Memory_Profiler_Capture {
|
|
@@ -40,10 +32,9 @@ struct Memory_Profiler_Capture {
|
|
|
40
32
|
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
41
33
|
st_table *tracked;
|
|
42
34
|
|
|
43
|
-
//
|
|
44
|
-
//
|
|
45
|
-
|
|
46
|
-
st_table *states;
|
|
35
|
+
// Custom object table: object (address) => state hash
|
|
36
|
+
// Uses system malloc (GC-safe), updates addresses during compaction
|
|
37
|
+
struct Memory_Profiler_Object_Table *states;
|
|
47
38
|
|
|
48
39
|
// Total number of allocations and frees seen since tracking started.
|
|
49
40
|
size_t new_count;
|
|
@@ -65,20 +56,6 @@ static int Memory_Profiler_Capture_tracked_mark(st_data_t key, st_data_t value,
|
|
|
65
56
|
return ST_CONTINUE;
|
|
66
57
|
}
|
|
67
58
|
|
|
68
|
-
// GC mark callback for states table.
|
|
69
|
-
static int Memory_Profiler_Capture_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
70
|
-
// Key is object_id (Integer VALUE) - mark it as un-movable
|
|
71
|
-
rb_gc_mark((VALUE)key);
|
|
72
|
-
|
|
73
|
-
// Value is struct Memory_Profiler_Capture_State* - mark its VALUEs
|
|
74
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
|
|
75
|
-
rb_gc_mark(state->klass); // Mark class as un-movable
|
|
76
|
-
rb_gc_mark_movable(state->data); // State can move
|
|
77
|
-
rb_gc_mark_movable(state->allocations); // Allocations wrapper can move
|
|
78
|
-
|
|
79
|
-
return ST_CONTINUE;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
59
|
static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
83
60
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
84
61
|
|
|
@@ -86,16 +63,7 @@ static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
|
86
63
|
st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_mark, 0);
|
|
87
64
|
}
|
|
88
65
|
|
|
89
|
-
|
|
90
|
-
st_foreach(capture->states, Memory_Profiler_Capture_states_mark, 0);
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
// Helper to free state states in the states table
|
|
95
|
-
static int Memory_Profiler_Capture_states_free_callback(st_data_t key, st_data_t value, st_data_t arg) {
|
|
96
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
|
|
97
|
-
xfree(state);
|
|
98
|
-
return ST_CONTINUE;
|
|
66
|
+
Memory_Profiler_Object_Table_mark(capture->states);
|
|
99
67
|
}
|
|
100
68
|
|
|
101
69
|
static void Memory_Profiler_Capture_free(void *ptr) {
|
|
@@ -106,9 +74,7 @@ static void Memory_Profiler_Capture_free(void *ptr) {
|
|
|
106
74
|
}
|
|
107
75
|
|
|
108
76
|
if (capture->states) {
|
|
109
|
-
|
|
110
|
-
st_foreach(capture->states, Memory_Profiler_Capture_states_free_callback, 0);
|
|
111
|
-
st_free_table(capture->states);
|
|
77
|
+
Memory_Profiler_Object_Table_free(capture->states);
|
|
112
78
|
}
|
|
113
79
|
|
|
114
80
|
xfree(capture);
|
|
@@ -142,31 +108,6 @@ static int Memory_Profiler_Capture_tracked_update(st_data_t *key, st_data_t *val
|
|
|
142
108
|
return ST_CONTINUE;
|
|
143
109
|
}
|
|
144
110
|
|
|
145
|
-
// Foreach callback for states table compaction (iteration logic).
|
|
146
|
-
static int Memory_Profiler_Capture_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
|
|
147
|
-
return ST_REPLACE;
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
// Replace callback for states table compaction (update logic).
|
|
151
|
-
static int Memory_Profiler_Capture_states_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
|
|
152
|
-
// Update VALUEs in the state if they moved
|
|
153
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)*value;
|
|
154
|
-
|
|
155
|
-
VALUE old_state = state->data;
|
|
156
|
-
VALUE new_state = rb_gc_location(old_state);
|
|
157
|
-
if (old_state != new_state) {
|
|
158
|
-
state->data = new_state;
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
VALUE old_allocations = state->allocations;
|
|
162
|
-
VALUE new_allocations = rb_gc_location(old_allocations);
|
|
163
|
-
if (old_allocations != new_allocations) {
|
|
164
|
-
state->allocations = new_allocations;
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
return ST_CONTINUE;
|
|
168
|
-
}
|
|
169
|
-
|
|
170
111
|
static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
171
112
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
172
113
|
|
|
@@ -177,11 +118,9 @@ static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
|
177
118
|
}
|
|
178
119
|
}
|
|
179
120
|
|
|
180
|
-
// Update
|
|
181
|
-
if (capture->states
|
|
182
|
-
|
|
183
|
-
rb_raise(rb_eRuntimeError, "states modified during GC compaction");
|
|
184
|
-
}
|
|
121
|
+
// Update custom object table (system malloc, safe during GC)
|
|
122
|
+
if (capture->states) {
|
|
123
|
+
Memory_Profiler_Object_Table_compact(capture->states);
|
|
185
124
|
}
|
|
186
125
|
}
|
|
187
126
|
|
|
@@ -216,7 +155,9 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
|
|
|
216
155
|
|
|
217
156
|
// Process a NEWOBJ event. All allocation tracking logic is here.
|
|
218
157
|
// object_id parameter is the Integer object_id, NOT the raw object.
|
|
219
|
-
|
|
158
|
+
// Process a NEWOBJ event. All allocation tracking logic is here.
|
|
159
|
+
// object parameter is the actual object being allocated.
|
|
160
|
+
static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALUE object) {
|
|
220
161
|
struct Memory_Profiler_Capture *capture;
|
|
221
162
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
222
163
|
|
|
@@ -249,52 +190,48 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
|
|
|
249
190
|
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
250
191
|
}
|
|
251
192
|
|
|
252
|
-
// Get state from callback (if present)
|
|
253
193
|
VALUE data = Qnil;
|
|
254
194
|
if (!NIL_P(record->callback)) {
|
|
255
195
|
data = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
256
196
|
}
|
|
257
197
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
198
|
+
struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_insert(capture->states, object);
|
|
199
|
+
RB_OBJ_WRITTEN(self, Qnil, object);
|
|
200
|
+
RB_OBJ_WRITE(self, &entry->klass, klass);
|
|
201
|
+
RB_OBJ_WRITE(self, &entry->data, data);
|
|
202
|
+
RB_OBJ_WRITE(self, &entry->allocations, allocations);
|
|
263
203
|
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
// Write barriers for VALUEs we're storing
|
|
267
|
-
RB_OBJ_WRITTEN(self, Qnil, klass);
|
|
268
|
-
RB_OBJ_WRITTEN(self, Qnil, data);
|
|
269
|
-
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
204
|
+
if (DEBUG) fprintf(stderr, "[NEWOBJ] Object inserted into table: %p\n", (void*)object);
|
|
270
205
|
|
|
271
206
|
// Resume the capture:
|
|
272
207
|
capture->paused -= 1;
|
|
273
208
|
}
|
|
274
209
|
|
|
275
210
|
// Process a FREEOBJ event. All deallocation tracking logic is here.
|
|
276
|
-
//
|
|
277
|
-
|
|
278
|
-
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE object_id) {
|
|
211
|
+
// freeobj_data parameter is [state_hash, object] array from event handler.
|
|
212
|
+
static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE unused_klass, VALUE object) {
|
|
279
213
|
struct Memory_Profiler_Capture *capture;
|
|
280
214
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
281
215
|
|
|
282
216
|
// Pause the capture to prevent infinite loop:
|
|
283
217
|
capture->paused += 1;
|
|
284
218
|
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
// This object_id is not in our states table, so we're not tracking it
|
|
219
|
+
struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_lookup(capture->states, object);
|
|
220
|
+
|
|
221
|
+
if (!entry) {
|
|
222
|
+
if (DEBUG) fprintf(stderr, "[FREEOBJ] Object not found in table: %p\n", (void*)object);
|
|
290
223
|
goto done;
|
|
224
|
+
} else {
|
|
225
|
+
if (DEBUG) fprintf(stderr, "[FREEOBJ] Object found in table: %p\n", (void*)object);
|
|
291
226
|
}
|
|
292
227
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
228
|
+
VALUE klass = entry->klass;
|
|
229
|
+
VALUE data = entry->data;
|
|
230
|
+
VALUE allocations = entry->allocations;
|
|
231
|
+
|
|
232
|
+
// Delete by entry pointer (faster - no second lookup!)
|
|
233
|
+
Memory_Profiler_Object_Table_delete_entry(capture->states, entry);
|
|
234
|
+
|
|
298
235
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
299
236
|
|
|
300
237
|
// Increment global free count
|
|
@@ -307,10 +244,7 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE k
|
|
|
307
244
|
if (!NIL_P(record->callback) && !NIL_P(data)) {
|
|
308
245
|
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, data);
|
|
309
246
|
}
|
|
310
|
-
|
|
311
|
-
// Free the state struct
|
|
312
|
-
xfree(state);
|
|
313
|
-
|
|
247
|
+
|
|
314
248
|
done:
|
|
315
249
|
// Resume the capture:
|
|
316
250
|
capture->paused -= 1;
|
|
@@ -387,22 +321,17 @@ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
|
|
|
387
321
|
if (capture->paused) return;
|
|
388
322
|
|
|
389
323
|
VALUE klass = rb_obj_class(object);
|
|
390
|
-
if (!klass) return;
|
|
391
|
-
|
|
392
|
-
// Convert to object_id for storage (Integer VALUE).
|
|
393
|
-
// It's safe to unconditionally call rb_obj_id during NEWOBJ.
|
|
394
|
-
VALUE object_id = rb_obj_id(object);
|
|
395
324
|
|
|
396
|
-
|
|
325
|
+
// Skip if klass is not a Class
|
|
326
|
+
if (rb_type(klass) != RUBY_T_CLASS) return;
|
|
327
|
+
|
|
328
|
+
// Enqueue actual object (not object_id) - queue retains it until processed
|
|
329
|
+
// Ruby 3.5 compatible: no need for FL_SEEN_OBJ_ID or rb_obj_id
|
|
330
|
+
if (DEBUG) fprintf(stderr, "[NEWOBJ] Enqueuing event for object: %p\n", (void*)object);
|
|
331
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, object);
|
|
397
332
|
} else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
// Convert to object_id for storage (Integer VALUE).
|
|
401
|
-
// It's only safe to call rb_obj_id if the object already has an object ID.
|
|
402
|
-
VALUE object_id = rb_obj_id(object);
|
|
403
|
-
|
|
404
|
-
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object_id);
|
|
405
|
-
}
|
|
333
|
+
if (DEBUG) fprintf(stderr, "[FREEOBJ] Enqueuing event for object: %p\n", (void*)object);
|
|
334
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object);
|
|
406
335
|
}
|
|
407
336
|
}
|
|
408
337
|
|
|
@@ -421,11 +350,11 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
421
350
|
rb_raise(rb_eRuntimeError, "Failed to initialize tracked hash table");
|
|
422
351
|
}
|
|
423
352
|
|
|
424
|
-
// Initialize
|
|
425
|
-
capture->states =
|
|
426
|
-
|
|
353
|
+
// Initialize custom object table (uses system malloc, GC-safe)
|
|
354
|
+
capture->states = Memory_Profiler_Object_Table_new(1024);
|
|
427
355
|
if (!capture->states) {
|
|
428
|
-
|
|
356
|
+
st_free_table(capture->tracked);
|
|
357
|
+
rb_raise(rb_eRuntimeError, "Failed to initialize object table");
|
|
429
358
|
}
|
|
430
359
|
|
|
431
360
|
// Initialize allocation tracking counters
|
|
@@ -590,13 +519,13 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
|
|
|
590
519
|
rb_raise(rb_eRuntimeError, "Cannot clear while capture is running - call stop() first!");
|
|
591
520
|
}
|
|
592
521
|
|
|
593
|
-
// Reset all counts to 0 (don't free, just reset)
|
|
522
|
+
// Reset all counts to 0 (don't free, just reset):
|
|
594
523
|
st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_clear, 0);
|
|
595
524
|
|
|
596
|
-
// Clear
|
|
525
|
+
// Clear custom object table by recreating it
|
|
597
526
|
if (capture->states) {
|
|
598
|
-
|
|
599
|
-
|
|
527
|
+
Memory_Profiler_Object_Table_free(capture->states);
|
|
528
|
+
capture->states = Memory_Profiler_Object_Table_new(1024);
|
|
600
529
|
}
|
|
601
530
|
|
|
602
531
|
// Reset allocation tracking counters
|
|
@@ -629,35 +558,65 @@ static VALUE Memory_Profiler_Capture_each(VALUE self) {
|
|
|
629
558
|
return self;
|
|
630
559
|
}
|
|
631
560
|
|
|
632
|
-
// Struct for filtering states
|
|
633
|
-
struct
|
|
634
|
-
VALUE
|
|
561
|
+
// Struct for filtering states during each_object iteration
|
|
562
|
+
struct Memory_Profiler_Each_Object_Arguments {
|
|
563
|
+
VALUE self;
|
|
564
|
+
|
|
565
|
+
// The allocations wrapper to filter by (Qnil = no filter).
|
|
566
|
+
VALUE allocations;
|
|
635
567
|
};
|
|
636
568
|
|
|
637
|
-
//
|
|
638
|
-
static
|
|
639
|
-
|
|
640
|
-
struct
|
|
641
|
-
struct
|
|
569
|
+
// Cleanup function to ensure table is made weak again
|
|
570
|
+
static VALUE Memory_Profiler_Capture_each_object_ensure(VALUE arg) {
|
|
571
|
+
struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
|
|
572
|
+
struct Memory_Profiler_Capture *capture;
|
|
573
|
+
TypedData_Get_Struct(arguments->self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
642
574
|
|
|
643
|
-
//
|
|
644
|
-
|
|
645
|
-
return ST_CONTINUE;
|
|
646
|
-
}
|
|
575
|
+
// Make table weak again
|
|
576
|
+
Memory_Profiler_Object_Table_decrement_strong(capture->states);
|
|
647
577
|
|
|
648
|
-
|
|
649
|
-
|
|
578
|
+
return Qnil;
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
// Main iteration function
|
|
582
|
+
static VALUE Memory_Profiler_Capture_each_object_body(VALUE arg) {
|
|
583
|
+
struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
|
|
584
|
+
struct Memory_Profiler_Capture *capture;
|
|
585
|
+
TypedData_Get_Struct(arguments->self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
650
586
|
|
|
651
|
-
//
|
|
652
|
-
|
|
587
|
+
// Iterate custom object table entries
|
|
588
|
+
if (capture->states) {
|
|
589
|
+
if (DEBUG) fprintf(stderr, "[ITER] Iterating table, capacity=%zu, count=%zu\n", capture->states->capacity, capture->states->count);
|
|
590
|
+
|
|
591
|
+
for (size_t i = 0; i < capture->states->capacity; i++) {
|
|
592
|
+
struct Memory_Profiler_Object_Table_Entry *entry = &capture->states->entries[i];
|
|
593
|
+
|
|
594
|
+
// Skip empty or deleted slots (0 = not set)
|
|
595
|
+
if (entry->object == 0) {
|
|
596
|
+
continue;
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
// Filter by allocations if specified
|
|
600
|
+
if (!NIL_P(arguments->allocations)) {
|
|
601
|
+
if (entry->allocations != arguments->allocations) continue;
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
rb_yield_values(2, entry->object, entry->allocations);
|
|
605
|
+
}
|
|
606
|
+
}
|
|
653
607
|
|
|
654
|
-
return
|
|
608
|
+
return arguments->self;
|
|
655
609
|
}
|
|
656
610
|
|
|
657
|
-
// Iterate over tracked
|
|
611
|
+
// Iterate over tracked object IDs, optionally filtered by class
|
|
658
612
|
// Called as:
|
|
659
|
-
// capture.each_object(String) { |
|
|
660
|
-
// capture.each_object { |
|
|
613
|
+
// capture.each_object(String) { |object_id, state| ... } # Specific class
|
|
614
|
+
// capture.each_object { |object_id, state| ... } // All objects
|
|
615
|
+
//
|
|
616
|
+
// Yields object_id as Integer. Caller can:
|
|
617
|
+
// - Format as hex: "0x%x" % object_id
|
|
618
|
+
// - Convert to object with ObjectSpace._id2ref (may raise RangeError if recycled)
|
|
619
|
+
// Future-proof for Ruby 3.5 where _id2ref is deprecated
|
|
661
620
|
static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE self) {
|
|
662
621
|
struct Memory_Profiler_Capture *capture;
|
|
663
622
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
@@ -667,10 +626,10 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
|
|
|
667
626
|
|
|
668
627
|
RETURN_ENUMERATOR(self, argc, argv);
|
|
669
628
|
|
|
670
|
-
//
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
629
|
+
// Make table strong so objects won't be collected during iteration
|
|
630
|
+
Memory_Profiler_Object_Table_increment_strong(capture->states);
|
|
631
|
+
|
|
632
|
+
// Process all pending events and run GC to clean up stale objects:
|
|
674
633
|
Memory_Profiler_Events_process_all();
|
|
675
634
|
|
|
676
635
|
// If class provided, look up its allocations wrapper
|
|
@@ -681,25 +640,22 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
|
|
|
681
640
|
allocations = (VALUE)allocations_data;
|
|
682
641
|
} else {
|
|
683
642
|
// Class not tracked - nothing to iterate
|
|
684
|
-
|
|
685
|
-
rb_gc_enable();
|
|
686
|
-
}
|
|
643
|
+
Memory_Profiler_Object_Table_decrement_strong(capture->states);
|
|
687
644
|
return self;
|
|
688
645
|
}
|
|
689
646
|
}
|
|
690
647
|
|
|
691
|
-
//
|
|
692
|
-
struct
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
}
|
|
697
|
-
|
|
698
|
-
if (RTEST(was_enabled)) {
|
|
699
|
-
rb_gc_enable();
|
|
700
|
-
}
|
|
648
|
+
// Setup arguments for iteration
|
|
649
|
+
struct Memory_Profiler_Each_Object_Arguments arguments = {
|
|
650
|
+
.self = self,
|
|
651
|
+
.allocations = allocations
|
|
652
|
+
};
|
|
701
653
|
|
|
702
|
-
|
|
654
|
+
// Use rb_ensure to guarantee cleanup even if exception is raised
|
|
655
|
+
return rb_ensure(
|
|
656
|
+
Memory_Profiler_Capture_each_object_body, (VALUE)&arguments,
|
|
657
|
+
Memory_Profiler_Capture_each_object_ensure, (VALUE)&arguments
|
|
658
|
+
);
|
|
703
659
|
}
|
|
704
660
|
|
|
705
661
|
// Get allocations for a specific class
|
|
@@ -721,23 +677,6 @@ struct Memory_Profiler_Allocations_Statistics {
|
|
|
721
677
|
VALUE per_class_counts;
|
|
722
678
|
};
|
|
723
679
|
|
|
724
|
-
// Iterator callback to count states per class (from central states table)
|
|
725
|
-
static int Memory_Profiler_Capture_count_states(st_data_t key, st_data_t value, st_data_t argument) {
|
|
726
|
-
struct Memory_Profiler_Allocations_Statistics *statistics = (struct Memory_Profiler_Allocations_Statistics *)argument;
|
|
727
|
-
struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
|
|
728
|
-
VALUE klass = state->klass;
|
|
729
|
-
|
|
730
|
-
// Increment total count
|
|
731
|
-
statistics->total_tracked_objects++;
|
|
732
|
-
|
|
733
|
-
// Increment per-class count
|
|
734
|
-
VALUE current_count = rb_hash_lookup(statistics->per_class_counts, klass);
|
|
735
|
-
size_t count = NIL_P(current_count) ? 1 : NUM2SIZET(current_count) + 1;
|
|
736
|
-
rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(count));
|
|
737
|
-
|
|
738
|
-
return ST_CONTINUE;
|
|
739
|
-
}
|
|
740
|
-
|
|
741
680
|
// Get internal statistics for debugging
|
|
742
681
|
// Returns hash with internal state sizes
|
|
743
682
|
static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
@@ -749,22 +688,9 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
|
749
688
|
// Tracked classes count
|
|
750
689
|
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_count")), SIZET2NUM(capture->tracked->num_entries));
|
|
751
690
|
|
|
752
|
-
//
|
|
753
|
-
size_t
|
|
754
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("
|
|
755
|
-
|
|
756
|
-
// Count states entries for each class (iterate central states table)
|
|
757
|
-
struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
|
|
758
|
-
.total_tracked_objects = 0,
|
|
759
|
-
.per_class_counts = rb_hash_new()
|
|
760
|
-
};
|
|
761
|
-
|
|
762
|
-
if (capture->states) {
|
|
763
|
-
st_foreach(capture->states, Memory_Profiler_Capture_count_states, (st_data_t)&allocations_statistics);
|
|
764
|
-
}
|
|
765
|
-
|
|
766
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("total_tracked_objects")), SIZET2NUM(allocations_statistics.total_tracked_objects));
|
|
767
|
-
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_objects_per_class")), allocations_statistics.per_class_counts);
|
|
691
|
+
// Custom object table size
|
|
692
|
+
size_t states_size = capture->states ? Memory_Profiler_Object_Table_size(capture->states) : 0;
|
|
693
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("object_table_size")), SIZET2NUM(states_size));
|
|
768
694
|
|
|
769
695
|
return statistics;
|
|
770
696
|
}
|
|
@@ -822,11 +748,6 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
|
822
748
|
rb_define_method(Memory_Profiler_Capture, "free_count", Memory_Profiler_Capture_free_count, 0);
|
|
823
749
|
rb_define_method(Memory_Profiler_Capture, "retained_count", Memory_Profiler_Capture_retained_count, 0);
|
|
824
750
|
|
|
825
|
-
// Cache ObjectSpace module for _id2ref calls
|
|
826
|
-
rb_mObjectSpace = rb_const_get(rb_cObject, rb_intern("ObjectSpace"));
|
|
827
|
-
rb_gc_register_mark_object(rb_mObjectSpace);
|
|
828
|
-
id_id2ref = rb_intern("_id2ref");
|
|
829
|
-
|
|
830
751
|
// Initialize Allocations class
|
|
831
752
|
Init_Memory_Profiler_Allocations(Memory_Profiler);
|
|
832
753
|
}
|
|
@@ -104,7 +104,10 @@ static void Memory_Profiler_Events_mark_queue(struct Memory_Profiler_Queue *queu
|
|
|
104
104
|
|
|
105
105
|
rb_gc_mark_movable(event->capture);
|
|
106
106
|
rb_gc_mark_movable(event->klass);
|
|
107
|
-
|
|
107
|
+
|
|
108
|
+
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
109
|
+
rb_gc_mark_movable(event->object);
|
|
110
|
+
}
|
|
108
111
|
}
|
|
109
112
|
}
|
|
110
113
|
|
|
@@ -129,7 +132,10 @@ static void Memory_Profiler_Events_compact_queue(struct Memory_Profiler_Queue *q
|
|
|
129
132
|
|
|
130
133
|
event->capture = rb_gc_location(event->capture);
|
|
131
134
|
event->klass = rb_gc_location(event->klass);
|
|
132
|
-
|
|
135
|
+
|
|
136
|
+
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
137
|
+
event->object = rb_gc_location(event->object);
|
|
138
|
+
}
|
|
133
139
|
}
|
|
134
140
|
}
|
|
135
141
|
|
|
@@ -175,7 +181,7 @@ int Memory_Profiler_Events_enqueue(
|
|
|
175
181
|
enum Memory_Profiler_Event_Type type,
|
|
176
182
|
VALUE capture,
|
|
177
183
|
VALUE klass,
|
|
178
|
-
VALUE
|
|
184
|
+
VALUE object
|
|
179
185
|
) {
|
|
180
186
|
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
|
|
181
187
|
|
|
@@ -187,7 +193,7 @@ int Memory_Profiler_Events_enqueue(
|
|
|
187
193
|
// Use write barriers when storing VALUEs (required for RUBY_TYPED_WB_PROTECTED):
|
|
188
194
|
RB_OBJ_WRITE(events->self, &event->capture, capture);
|
|
189
195
|
RB_OBJ_WRITE(events->self, &event->klass, klass);
|
|
190
|
-
RB_OBJ_WRITE(events->self, &event->object,
|
|
196
|
+
RB_OBJ_WRITE(events->self, &event->object, object);
|
|
191
197
|
|
|
192
198
|
if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
|
|
193
199
|
Memory_Profiler_Event_Type_name(type), events->available->count);
|
|
@@ -20,11 +20,10 @@ struct Memory_Profiler_Event {
|
|
|
20
20
|
// Which Capture instance this event belongs to:
|
|
21
21
|
VALUE capture;
|
|
22
22
|
|
|
23
|
-
// The class of the object:
|
|
23
|
+
// The class of the allocated object (Qnil for FREEOBJ):
|
|
24
24
|
VALUE klass;
|
|
25
25
|
|
|
26
|
-
// The
|
|
27
|
-
// We store object_id to avoid referencing objects that should be freed.
|
|
26
|
+
// The object pointer being alllocated or freed.
|
|
28
27
|
VALUE object;
|
|
29
28
|
};
|
|
30
29
|
|
|
@@ -33,13 +32,16 @@ struct Memory_Profiler_Events;
|
|
|
33
32
|
struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void);
|
|
34
33
|
|
|
35
34
|
// Enqueue an event to the global queue.
|
|
36
|
-
// object parameter
|
|
35
|
+
// object parameter semantics:
|
|
36
|
+
// - NEWOBJ: the actual object being allocated (queue retains it)
|
|
37
|
+
// - FREEOBJ: Array with state data for postponed processing
|
|
37
38
|
// Returns non-zero on success, zero on failure.
|
|
39
|
+
// Ruby 3.5 compatible: no FL_SEEN_OBJ_ID or object_id needed
|
|
38
40
|
int Memory_Profiler_Events_enqueue(
|
|
39
41
|
enum Memory_Profiler_Event_Type type,
|
|
40
42
|
VALUE capture,
|
|
41
43
|
VALUE klass,
|
|
42
|
-
VALUE
|
|
44
|
+
VALUE object
|
|
43
45
|
);
|
|
44
46
|
|
|
45
47
|
// Process all queued events immediately (flush the queue)
|
|
@@ -3,15 +3,26 @@
|
|
|
3
3
|
|
|
4
4
|
#include "capture.h"
|
|
5
5
|
|
|
6
|
+
// Return the memory address of an object as a hex string
|
|
7
|
+
// This matches the format used by ObjectSpace.dump_all
|
|
8
|
+
static VALUE Memory_Profiler_address_of(VALUE module, VALUE object) {
|
|
9
|
+
char buffer[32];
|
|
10
|
+
snprintf(buffer, sizeof(buffer), "0x%lx", (unsigned long)object);
|
|
11
|
+
return rb_str_new_cstr(buffer);
|
|
12
|
+
}
|
|
13
|
+
|
|
6
14
|
void Init_Memory_Profiler(void)
|
|
7
15
|
{
|
|
8
16
|
#ifdef HAVE_RB_EXT_RACTOR_SAFE
|
|
9
|
-
|
|
17
|
+
rb_ext_ractor_safe(true);
|
|
10
18
|
#endif
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
19
|
+
|
|
20
|
+
VALUE Memory = rb_const_get(rb_cObject, rb_intern("Memory"));
|
|
21
|
+
VALUE Memory_Profiler = rb_define_module_under(Memory, "Profiler");
|
|
22
|
+
|
|
23
|
+
// Add Memory::Profiler.address_of(object) module function:
|
|
24
|
+
rb_define_module_function(Memory_Profiler, "address_of", Memory_Profiler_address_of, 1);
|
|
25
|
+
|
|
26
|
+
Init_Memory_Profiler_Capture(Memory_Profiler);
|
|
16
27
|
}
|
|
17
28
|
|