memory-tracker-x 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,510 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #include "capture.h"
5
+
6
+ #include "ruby.h"
7
+ #include "ruby/debug.h"
8
+ #include "ruby/st.h"
9
+ #include <stdatomic.h>
10
+ #include <stdio.h>
11
+
12
+ enum {
13
+ DEBUG = 0,
14
+ };
15
+
16
+ static VALUE Memory_Tracker_Capture = Qnil;
17
+ static VALUE Memory_Tracker_Allocations = Qnil;
18
+
19
+ // Per-class allocation tracking record
20
+ struct Memory_Tracker_Capture_Allocations {
21
+ VALUE callback; // Optional Ruby proc/lambda to call on allocation
22
+ size_t new_count; // Total allocations seen since tracking started
23
+ size_t free_count; // Total frees seen since tracking started
24
+ // Live count = new_count - free_count
25
+ };
26
+
27
+ // Main capture state
28
+ struct Memory_Tracker_Capture {
29
+ // class => Memory_Tracker_Capture_Allocations.
30
+ st_table *tracked_classes;
31
+
32
+ // Is tracking enabled (via start/stop):
33
+ int enabled;
34
+ };
35
+
36
+ // GC mark callback for tracked_classes table
37
+ static int Memory_Tracker_Capture_mark_class(st_data_t key, st_data_t value, st_data_t arg) {
38
+ // Mark class as un-movable as we don't want it moving in freeobj.
39
+ rb_gc_mark((VALUE)key);
40
+
41
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)value;
42
+ if (!NIL_P(record->callback)) {
43
+ rb_gc_mark_movable(record->callback); // Mark callback as movable
44
+ }
45
+ return ST_CONTINUE;
46
+ }
47
+
48
+ // GC mark function
49
+ static void Memory_Tracker_Capture_mark(void *ptr) {
50
+ struct Memory_Tracker_Capture *capture = ptr;
51
+
52
+ if (!capture) {
53
+ return;
54
+ }
55
+
56
+ if (capture->tracked_classes) {
57
+ st_foreach(capture->tracked_classes, Memory_Tracker_Capture_mark_class, 0);
58
+ }
59
+ }
60
+
61
+ // Iterator to free each class record
62
+ static int Memory_Tracker_Capture_free_class_record(st_data_t key, st_data_t value, st_data_t arg) {
63
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)value;
64
+ xfree(record);
65
+ return ST_CONTINUE;
66
+ }
67
+
68
+ // GC free function
69
+ static void Memory_Tracker_Capture_free(void *ptr) {
70
+ struct Memory_Tracker_Capture *capture = ptr;
71
+
72
+ // Event hooks must be removed via stop() before object is freed
73
+ // Ruby will automatically remove hooks when the VALUE is GC'd
74
+
75
+ if (capture->tracked_classes) {
76
+ st_foreach(capture->tracked_classes, Memory_Tracker_Capture_free_class_record, 0);
77
+ st_free_table(capture->tracked_classes);
78
+ }
79
+
80
+ xfree(capture);
81
+ }
82
+
83
+ // GC memsize function
84
+ static size_t Memory_Tracker_Capture_memsize(const void *ptr) {
85
+ const struct Memory_Tracker_Capture *capture = ptr;
86
+ size_t size = sizeof(struct Memory_Tracker_Capture);
87
+
88
+ if (capture->tracked_classes) {
89
+ size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Tracker_Capture_Allocations));
90
+ }
91
+
92
+ return size;
93
+ }
94
+
95
+
96
+ // Callback for st_foreach_with_replace to update class keys and callback values
97
+ static int Memory_Tracker_Capture_update_refs(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
98
+ // Update class key if it moved
99
+ VALUE old_klass = (VALUE)*key;
100
+ VALUE new_klass = rb_gc_location(old_klass);
101
+ if (old_klass != new_klass) {
102
+ *key = (st_data_t)new_klass;
103
+ }
104
+
105
+ // Update callback if it moved
106
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)*value;
107
+ if (!NIL_P(record->callback)) {
108
+ VALUE old_callback = record->callback;
109
+ VALUE new_callback = rb_gc_location(old_callback);
110
+ if (old_callback != new_callback) {
111
+ record->callback = new_callback;
112
+ }
113
+ }
114
+
115
+ return ST_CONTINUE;
116
+ }
117
+
118
+ // GC compact function - update VALUEs when GC compaction moves objects
119
+ static void Memory_Tracker_Capture_compact(void *ptr) {
120
+ struct Memory_Tracker_Capture *capture = ptr;
121
+
122
+ // Update tracked_classes keys and callback values in-place
123
+ if (capture->tracked_classes && capture->tracked_classes->num_entries > 0) {
124
+ if (st_foreach_with_replace(capture->tracked_classes, NULL, Memory_Tracker_Capture_update_refs, 0)) {
125
+ rb_raise(rb_eRuntimeError, "tracked_classes modified during GC compaction");
126
+ }
127
+ }
128
+ }
129
+
130
+ static const rb_data_type_t Memory_Tracker_Capture_type = {
131
+ "Memory::Tracker::Capture",
132
+ {
133
+ .dmark = Memory_Tracker_Capture_mark,
134
+ .dcompact = Memory_Tracker_Capture_compact,
135
+ .dfree = Memory_Tracker_Capture_free,
136
+ .dsize = Memory_Tracker_Capture_memsize,
137
+ },
138
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
139
+ };
140
+
141
+ const char *event_flag_name(rb_event_flag_t event_flag) {
142
+ switch (event_flag) {
143
+ case RUBY_EVENT_CALL: return "call";
144
+ case RUBY_EVENT_C_CALL: return "c-call";
145
+ case RUBY_EVENT_B_CALL: return "b-call";
146
+ case RUBY_EVENT_RETURN: return "return";
147
+ case RUBY_EVENT_C_RETURN: return "c-return";
148
+ case RUBY_EVENT_B_RETURN: return "b-return";
149
+ case RUBY_INTERNAL_EVENT_NEWOBJ: return "newobj";
150
+ case RUBY_INTERNAL_EVENT_FREEOBJ: return "freeobj";
151
+ case RUBY_INTERNAL_EVENT_GC_START: return "gc-start";
152
+ case RUBY_INTERNAL_EVENT_GC_END_MARK: return "gc-end-mark";
153
+ case RUBY_INTERNAL_EVENT_GC_END_SWEEP: return "gc-end-sweep";
154
+ case RUBY_EVENT_LINE: return "line";
155
+ default: return "unknown";
156
+ }
157
+ }
158
+
159
+ // Handler for NEWOBJ event
160
+ static void Memory_Tracker_Capture_newobj_handler(struct Memory_Tracker_Capture *capture, VALUE klass) {
161
+ st_data_t record_data;
162
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
163
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)record_data;
164
+ record->new_count++;
165
+ if (!NIL_P(record->callback)) {
166
+ // Invoke callback - runs during NEWOBJ with GC disabled
167
+ // CRITICAL CALLBACK REQUIREMENTS:
168
+ // - Must be FAST (runs on EVERY allocation)
169
+ // - Must NOT call GC.start (will deadlock)
170
+ // - Must NOT block/sleep (stalls all allocations system-wide)
171
+ // - Should NOT raise exceptions (will propagate to allocating code)
172
+ // - Avoid allocating objects (causes re-entry)
173
+ rb_funcall(record->callback, rb_intern("call"), 1, klass);
174
+ }
175
+ } else {
176
+ // Create record for this class (first time seeing it)
177
+ struct Memory_Tracker_Capture_Allocations *record = ALLOC(struct Memory_Tracker_Capture_Allocations);
178
+ record->callback = Qnil;
179
+ record->new_count = 1; // This is the first allocation
180
+ record->free_count = 0;
181
+ st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)record);
182
+ }
183
+ }
184
+
185
+ // Handler for FREEOBJ event
186
+ static void Memory_Tracker_Capture_freeobj_handler(struct Memory_Tracker_Capture *capture, VALUE klass) {
187
+ st_data_t record_data;
188
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
189
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)record_data;
190
+ record->free_count++;
191
+ }
192
+ }
193
+
194
+ // Check if object type is trackable (has valid class pointer and is a normal Ruby object)
195
+ // Excludes internal types (T_IMEMO, T_NODE, T_ICLASS, etc.) that don't have normal classes
196
+ int Memory_Tracker_Capture_trackable_p(VALUE object) {
197
+ switch (rb_type(object)) {
198
+ // Normal Ruby objects with valid class pointers
199
+ case RUBY_T_OBJECT: // Plain objects
200
+ case RUBY_T_CLASS: // Class objects
201
+ case RUBY_T_MODULE: // Module objects
202
+ case RUBY_T_STRING: // Strings
203
+ case RUBY_T_ARRAY: // Arrays
204
+ case RUBY_T_HASH: // Hashes
205
+ case RUBY_T_STRUCT: // Structs
206
+ case RUBY_T_BIGNUM: // Big integers
207
+ case RUBY_T_FLOAT: // Floating point numbers
208
+ case RUBY_T_FILE: // File objects
209
+ case RUBY_T_DATA: // C extension data
210
+ case RUBY_T_MATCH: // Regex match data
211
+ case RUBY_T_COMPLEX: // Complex numbers
212
+ case RUBY_T_RATIONAL: // Rational numbers
213
+ case RUBY_T_REGEXP: // Regular expressions
214
+ return true;
215
+
216
+ // Skip internal types (don't have normal class pointers):
217
+ // - T_IMEMO: Internal memory objects (use class field for other data)
218
+ // - T_NODE: AST nodes (internal)
219
+ // - T_ICLASS: Include wrappers (internal)
220
+ // - T_ZOMBIE: Finalizing objects (internal)
221
+ // - T_MOVED: Compaction placeholders (internal)
222
+ // - T_NONE: Uninitialized slots
223
+ // - T_UNDEF: Undefined value marker
224
+ default:
225
+ return false;
226
+ }
227
+ }
228
+
229
+ // Event hook callback with RAW_ARG
230
+ // Signature: (VALUE data, rb_trace_arg_t *trace_arg)
231
+ static void Memory_Tracker_Capture_event_callback(VALUE data, void *ptr) {
232
+ rb_trace_arg_t *trace_arg = (rb_trace_arg_t *)ptr;
233
+
234
+ struct Memory_Tracker_Capture *capture;
235
+ TypedData_Get_Struct(data, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
236
+
237
+ if (!capture->enabled) return;
238
+
239
+ VALUE object = rb_tracearg_object(trace_arg);
240
+
241
+ // We don't want to track internal non-Object allocations:
242
+ if (!Memory_Tracker_Capture_trackable_p(object)) return;
243
+
244
+ rb_event_flag_t event_flag = rb_tracearg_event_flag(trace_arg);
245
+ VALUE klass = rb_class_of(object);
246
+ if (!klass) return;
247
+
248
+ if (DEBUG) {
249
+ const char *klass_name = rb_class2name(klass);
250
+ fprintf(stderr, "Memory_Tracker_Capture_event_callback: %s, Object: %p, Class: %p (%s)\n", event_flag_name(event_flag), (void *)object, (void *)klass, klass_name);
251
+ }
252
+
253
+ if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
254
+ // self is the newly allocated object
255
+ Memory_Tracker_Capture_newobj_handler(capture, klass);
256
+ } else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
257
+ // self is the object being freed
258
+ Memory_Tracker_Capture_freeobj_handler(capture, klass);
259
+ }
260
+ }
261
+
262
+ // Allocate new capture
263
+ static VALUE Memory_Tracker_Capture_alloc(VALUE klass) {
264
+ struct Memory_Tracker_Capture *capture;
265
+ VALUE obj = TypedData_Make_Struct(klass, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
266
+
267
+ if (!capture) {
268
+ rb_raise(rb_eRuntimeError, "Failed to allocate Memory::Tracker::Capture");
269
+ }
270
+
271
+ capture->tracked_classes = st_init_numtable();
272
+
273
+ if (!capture->tracked_classes) {
274
+ rb_raise(rb_eRuntimeError, "Failed to initialize hash table");
275
+ }
276
+
277
+ capture->enabled = 0;
278
+
279
+ return obj;
280
+ }
281
+
282
+ // Initialize capture
283
+ static VALUE Memory_Tracker_Capture_initialize(VALUE self) {
284
+ return self;
285
+ }
286
+
287
+ // Start capturing allocations
288
+ static VALUE Memory_Tracker_Capture_start(VALUE self) {
289
+ struct Memory_Tracker_Capture *capture;
290
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
291
+
292
+ if (capture->enabled) return Qfalse;
293
+
294
+ // Add event hook for NEWOBJ and FREEOBJ with RAW_ARG to get trace_arg
295
+ rb_add_event_hook2(
296
+ (rb_event_hook_func_t)Memory_Tracker_Capture_event_callback,
297
+ RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ,
298
+ self,
299
+ RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG
300
+ );
301
+
302
+ capture->enabled = 1;
303
+
304
+ return Qtrue;
305
+ }
306
+
307
+ // Stop capturing allocations
308
+ static VALUE Memory_Tracker_Capture_stop(VALUE self) {
309
+ struct Memory_Tracker_Capture *capture;
310
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
311
+
312
+ if (!capture->enabled) return Qfalse;
313
+
314
+ // Remove event hook using same data (self) we registered with
315
+ rb_remove_event_hook_with_data((rb_event_hook_func_t)Memory_Tracker_Capture_event_callback, self);
316
+
317
+ capture->enabled = 0;
318
+
319
+ return Qtrue;
320
+ }
321
+
322
+ // Add a class to track with optional callback
323
+ // Usage: track(klass) or track(klass) { |obj, klass| ... }
324
+ // Callback can call caller_locations with desired depth
325
+ static VALUE Memory_Tracker_Capture_track(int argc, VALUE *argv, VALUE self) {
326
+ struct Memory_Tracker_Capture *capture;
327
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
328
+
329
+ VALUE klass, callback;
330
+ rb_scan_args(argc, argv, "1&", &klass, &callback);
331
+
332
+ st_data_t record_data;
333
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
334
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)record_data;
335
+ record->callback = callback;
336
+ } else {
337
+ struct Memory_Tracker_Capture_Allocations *record = ALLOC(struct Memory_Tracker_Capture_Allocations);
338
+ record->callback = callback;
339
+ record->new_count = 0;
340
+ record->free_count = 0;
341
+ st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)record);
342
+ }
343
+
344
+ return self;
345
+ }
346
+
347
+ // Stop tracking a class
348
+ static VALUE Memory_Tracker_Capture_untrack(VALUE self, VALUE klass) {
349
+ struct Memory_Tracker_Capture *capture;
350
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
351
+
352
+ st_data_t record_data;
353
+ if (st_delete(capture->tracked_classes, (st_data_t *)&klass, &record_data)) {
354
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)record_data;
355
+ xfree(record);
356
+ }
357
+
358
+ return self;
359
+ }
360
+
361
+ // Check if tracking a class
362
+ static VALUE Memory_Tracker_Capture_tracking_p(VALUE self, VALUE klass) {
363
+ struct Memory_Tracker_Capture *capture;
364
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
365
+
366
+ return st_lookup(capture->tracked_classes, (st_data_t)klass, NULL) ? Qtrue : Qfalse;
367
+ }
368
+
369
+
370
+ // Get count of live objects for a specific class (O(1) lookup!)
371
+ static VALUE Memory_Tracker_Capture_count_for(VALUE self, VALUE klass) {
372
+ struct Memory_Tracker_Capture *capture;
373
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
374
+
375
+ st_data_t record_data;
376
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
377
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)record_data;
378
+ // Return net live count (new_count - free_count)
379
+ // Handle case where more objects freed than allocated (allocated before tracking started)
380
+ if (record->free_count > record->new_count) {
381
+ return INT2FIX(0); // Can't have negative live count
382
+ }
383
+ size_t live_count = record->new_count - record->free_count;
384
+ return SIZET2NUM(live_count);
385
+ }
386
+
387
+ return INT2FIX(0);
388
+ }
389
+
390
+ // Iterator to free and reset each class record
391
+ static int Memory_Tracker_Capture_reset_class(st_data_t key, st_data_t value, st_data_t arg) {
392
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)value;
393
+ record->new_count = 0; // Reset allocation count
394
+ record->free_count = 0; // Reset free count
395
+ record->callback = Qnil; // Clear callback
396
+ return ST_CONTINUE;
397
+ }
398
+
399
+ // Clear all allocation tracking (resets all counts to 0)
400
+ static VALUE Memory_Tracker_Capture_clear(VALUE self) {
401
+ struct Memory_Tracker_Capture *capture;
402
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
403
+
404
+ // Reset all counts to 0 (don't free, just reset)
405
+ st_foreach(capture->tracked_classes, Memory_Tracker_Capture_reset_class, 0);
406
+
407
+ return self;
408
+ }
409
+
410
+ // TypedData for Allocations wrapper
411
+ static const rb_data_type_t Memory_Tracker_Allocations_type = {
412
+ "Memory::Tracker::Allocations",
413
+ {NULL, NULL, NULL}, // No mark/free needed - just a reference wrapper
414
+ 0, 0, 0
415
+ };
416
+
417
+ // Wrap an allocations record
418
+ static VALUE Memory_Tracker_Allocations_wrap(struct Memory_Tracker_Capture_Allocations *record) {
419
+ return TypedData_Wrap_Struct(Memory_Tracker_Allocations, &Memory_Tracker_Allocations_type, record);
420
+ }
421
+
422
+ // Get allocations record from wrapper
423
+ static struct Memory_Tracker_Capture_Allocations* Memory_Tracker_Allocations_get(VALUE self) {
424
+ struct Memory_Tracker_Capture_Allocations *record;
425
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture_Allocations, &Memory_Tracker_Allocations_type, record);
426
+ return record;
427
+ }
428
+
429
+ // Allocations#new_count
430
+ static VALUE Memory_Tracker_Allocations_new_count(VALUE self) {
431
+ struct Memory_Tracker_Capture_Allocations *record = Memory_Tracker_Allocations_get(self);
432
+ return SIZET2NUM(record->new_count);
433
+ }
434
+
435
+ // Allocations#free_count
436
+ static VALUE Memory_Tracker_Allocations_free_count(VALUE self) {
437
+ struct Memory_Tracker_Capture_Allocations *record = Memory_Tracker_Allocations_get(self);
438
+ return SIZET2NUM(record->free_count);
439
+ }
440
+
441
+ // Allocations#retained_count
442
+ static VALUE Memory_Tracker_Allocations_retained_count(VALUE self) {
443
+ struct Memory_Tracker_Capture_Allocations *record = Memory_Tracker_Allocations_get(self);
444
+ // Handle underflow when free_count > new_count
445
+ size_t retained = record->free_count > record->new_count ? 0 : record->new_count - record->free_count;
446
+ return SIZET2NUM(retained);
447
+ }
448
+
449
+ // Allocations#track { |klass| ... }
450
+ static VALUE Memory_Tracker_Allocations_track(int argc, VALUE *argv, VALUE self) {
451
+ struct Memory_Tracker_Capture_Allocations *record = Memory_Tracker_Allocations_get(self);
452
+
453
+ VALUE callback;
454
+ rb_scan_args(argc, argv, "&", &callback);
455
+
456
+ record->callback = callback;
457
+
458
+ return self;
459
+ }
460
+
461
+ // Iterator callback for each
462
+ static int Memory_Tracker_Capture_each_allocation(st_data_t key, st_data_t value, st_data_t arg) {
463
+ VALUE klass = (VALUE)key;
464
+ struct Memory_Tracker_Capture_Allocations *record = (struct Memory_Tracker_Capture_Allocations *)value;
465
+
466
+ // Wrap the allocations record
467
+ VALUE allocations = Memory_Tracker_Allocations_wrap(record);
468
+
469
+ // Yield class and allocations wrapper
470
+ rb_yield_values(2, klass, allocations);
471
+
472
+ return ST_CONTINUE;
473
+ }
474
+
475
+ // Iterate over all tracked classes with their allocation data
476
+ static VALUE Memory_Tracker_Capture_each(VALUE self) {
477
+ struct Memory_Tracker_Capture *capture;
478
+ TypedData_Get_Struct(self, struct Memory_Tracker_Capture, &Memory_Tracker_Capture_type, capture);
479
+
480
+ RETURN_ENUMERATOR(self, 0, 0);
481
+
482
+ st_foreach(capture->tracked_classes, Memory_Tracker_Capture_each_allocation, 0);
483
+
484
+ return self;
485
+ }
486
+
487
+ void Init_Memory_Tracker_Capture(VALUE Memory_Tracker)
488
+ {
489
+ Memory_Tracker_Capture = rb_define_class_under(Memory_Tracker, "Capture", rb_cObject);
490
+ rb_define_alloc_func(Memory_Tracker_Capture, Memory_Tracker_Capture_alloc);
491
+
492
+ rb_define_method(Memory_Tracker_Capture, "initialize", Memory_Tracker_Capture_initialize, 0);
493
+ rb_define_method(Memory_Tracker_Capture, "start", Memory_Tracker_Capture_start, 0);
494
+ rb_define_method(Memory_Tracker_Capture, "stop", Memory_Tracker_Capture_stop, 0);
495
+ rb_define_method(Memory_Tracker_Capture, "track", Memory_Tracker_Capture_track, -1); // -1 to accept block
496
+ rb_define_method(Memory_Tracker_Capture, "untrack", Memory_Tracker_Capture_untrack, 1);
497
+ rb_define_method(Memory_Tracker_Capture, "tracking?", Memory_Tracker_Capture_tracking_p, 1);
498
+ rb_define_method(Memory_Tracker_Capture, "count_for", Memory_Tracker_Capture_count_for, 1);
499
+ rb_define_method(Memory_Tracker_Capture, "each", Memory_Tracker_Capture_each, 0);
500
+ rb_define_method(Memory_Tracker_Capture, "clear", Memory_Tracker_Capture_clear, 0);
501
+
502
+ // Allocations class - wraps allocation data for a specific class
503
+ Memory_Tracker_Allocations = rb_define_class_under(Memory_Tracker, "Allocations", rb_cObject);
504
+
505
+ rb_define_method(Memory_Tracker_Allocations, "new_count", Memory_Tracker_Allocations_new_count, 0);
506
+ rb_define_method(Memory_Tracker_Allocations, "free_count", Memory_Tracker_Allocations_free_count, 0);
507
+ rb_define_method(Memory_Tracker_Allocations, "retained_count", Memory_Tracker_Allocations_retained_count, 0);
508
+ rb_define_method(Memory_Tracker_Allocations, "track", Memory_Tracker_Allocations_track, -1); // -1 to accept block
509
+ }
510
+
@@ -0,0 +1,9 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #pragma once
5
+
6
+ #include <ruby.h>
7
+
8
+ void Init_Memory_Tracker_Capture(VALUE Memory_Tracker);
9
+
@@ -0,0 +1,17 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #include "capture.h"
5
+
6
+ void Init_Memory_Tracker(void)
7
+ {
8
+ #ifdef HAVE_RB_EXT_RACTOR_SAFE
9
+ rb_ext_ractor_safe(true);
10
+ #endif
11
+
12
+ VALUE Memory = rb_const_get(rb_cObject, rb_intern("Memory"));
13
+ VALUE Memory_Tracker = rb_define_module_under(Memory, "Tracker");
14
+
15
+ Init_Memory_Tracker_Capture(Memory_Tracker);
16
+ }
17
+
@@ -0,0 +1,138 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Released under the MIT License.
4
+ # Copyright, 2025, by Samuel Williams.
5
+
6
+ module Memory
7
+ module Tracker
8
+ # Efficient tree structure for tracking allocation call paths.
9
+ # Each node represents a frame in the call stack, with counts of how many
10
+ # allocations occurred at this point in the call path.
11
+ class CallTree
12
+ # Represents a node in the call tree.
13
+ #
14
+ # Each node tracks how many allocations occurred at a specific point in a call path.
15
+ # Nodes form a tree structure where each path from root to leaf represents a unique
16
+ # call stack that led to allocations.
17
+ class Node
18
+ # Create a new call tree node.
19
+ #
20
+ # @parameter location [Thread::Backtrace::Location] The source location for this frame.
21
+ def initialize(location = nil)
22
+ @location = location
23
+ @count = 0
24
+ @children = nil
25
+ end
26
+
27
+ # @attribute [Thread::Backtrace::Location] The location of the call.
28
+ attr_reader :location, :count, :children
29
+
30
+ # Increment the allocation count for this node.
31
+ def increment!
32
+ @count += 1
33
+ end
34
+
35
+ # Check if this node is a leaf (end of a call path).
36
+ #
37
+ # @returns [Boolean] True if this node has no children.
38
+ def leaf?
39
+ @children.nil?
40
+ end
41
+
42
+ # Find or create a child node for the given location.
43
+ #
44
+ # @parameter location [Thread::Backtrace::Location] The frame location for the child node.
45
+ # @returns [Node] The child node for this location.
46
+ def find_or_create_child(location)
47
+ @children ||= {}
48
+ @children[location.to_s] ||= Node.new(location)
49
+ end
50
+
51
+ # Iterate over child nodes.
52
+ #
53
+ # @yields {|child| ...} If a block is given, yields each child node.
54
+ def each_child(&block)
55
+ @children&.each_value(&block)
56
+ end
57
+
58
+ # Enumerate all paths from this node to leaves with their counts
59
+ def each_path(prefix = [], &block)
60
+ current = prefix + [self]
61
+
62
+ if leaf?
63
+ yield current, @count
64
+ end
65
+
66
+ @children&.each_value do |child|
67
+ child.each_path(current, &block)
68
+ end
69
+ end
70
+ end
71
+
72
+ # Create a new call tree for tracking allocation paths.
73
+ def initialize
74
+ @root = Node.new
75
+ end
76
+
77
+ # Record an allocation with the given caller locations
78
+ def record(caller_locations)
79
+ return if caller_locations.empty?
80
+
81
+ current = @root
82
+
83
+ # Build tree, incrementing each node as we traverse:
84
+ caller_locations.each do |location|
85
+ current.increment!
86
+ current = current.find_or_create_child(location)
87
+ end
88
+
89
+ # Increment the final leaf node:
90
+ current.increment!
91
+ end
92
+
93
+ # Get the top N call paths by allocation count
94
+ def top_paths(limit = 10)
95
+ paths = []
96
+
97
+ @root.each_path do |path, count|
98
+ # Filter out root node (has nil location) and map to location strings
99
+ locations = path.select(&:location).map {|node| node.location.to_s}
100
+ paths << [locations, count] unless locations.empty?
101
+ end
102
+
103
+ paths.sort_by {|_, count| -count}.first(limit)
104
+ end
105
+
106
+ # Get hotspot locations (individual frames with highest counts)
107
+ def hotspots(limit = 20)
108
+ frames = Hash.new(0)
109
+
110
+ collect_frames(@root, frames)
111
+
112
+ frames.sort_by {|_, count| -count}.first(limit).to_h
113
+ end
114
+
115
+ # Total number of allocations tracked
116
+ def total_allocations
117
+ @root.instance_variable_get(:@count)
118
+ end
119
+
120
+ # Clear all tracking data
121
+ def clear!
122
+ @root = Node.new
123
+ end
124
+
125
+ private
126
+
127
+ def collect_frames(node, frames)
128
+ # Skip root node (has no location)
129
+ if node.location
130
+ frames[node.location.to_s] += node.count
131
+ end
132
+
133
+ node.each_child {|child| collect_frames(child, frames)}
134
+ end
135
+ end
136
+ end
137
+ end
138
+
@@ -0,0 +1,6 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Released under the MIT License.
4
+ # Copyright, 2025, by Samuel Williams.
5
+
6
+ require "Memory_Tracker"