memory-profiler 1.3.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,10 +4,10 @@
4
4
  #include "capture.h"
5
5
  #include "allocations.h"
6
6
  #include "events.h"
7
+ #include "table.h"
7
8
 
8
- #include "ruby.h"
9
- #include "ruby/debug.h"
10
- #include "ruby/st.h"
9
+ #include <ruby/debug.h>
10
+ #include <ruby/st.h>
11
11
  #include <stdatomic.h>
12
12
  #include <stdio.h>
13
13
 
@@ -20,6 +20,7 @@ static VALUE Memory_Profiler_Capture = Qnil;
20
20
  // Event symbols:
21
21
  static VALUE sym_newobj, sym_freeobj;
22
22
 
23
+
23
24
  // Main capture state (per-instance).
24
25
  struct Memory_Profiler_Capture {
25
26
  // Master switch - is tracking active? (set by start/stop).
@@ -29,15 +30,19 @@ struct Memory_Profiler_Capture {
29
30
  int paused;
30
31
 
31
32
  // Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
32
- st_table *tracked_classes;
33
+ st_table *tracked;
34
+
35
+ // Custom object table: object (address) => state hash
36
+ // Uses system malloc (GC-safe), updates addresses during compaction
37
+ struct Memory_Profiler_Object_Table *states;
33
38
 
34
39
  // Total number of allocations and frees seen since tracking started.
35
40
  size_t new_count;
36
41
  size_t free_count;
37
42
  };
38
43
 
39
- // GC mark callback for tracked_classes table.
40
- static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t value, st_data_t arg) {
44
+ // GC mark callback for tracked table.
45
+ static int Memory_Profiler_Capture_tracked_mark(st_data_t key, st_data_t value, st_data_t arg) {
41
46
  // Mark class as un-movable:
42
47
  // - We don't want to re-index the table if the class moves.
43
48
  // - We don't want objects in `freeobj` to have invalid class pointers (maybe helps).
@@ -54,16 +59,22 @@ static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t
54
59
  static void Memory_Profiler_Capture_mark(void *ptr) {
55
60
  struct Memory_Profiler_Capture *capture = ptr;
56
61
 
57
- if (capture->tracked_classes) {
58
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
62
+ if (capture->tracked) {
63
+ st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_mark, 0);
59
64
  }
65
+
66
+ Memory_Profiler_Object_Table_mark(capture->states);
60
67
  }
61
68
 
62
69
  static void Memory_Profiler_Capture_free(void *ptr) {
63
70
  struct Memory_Profiler_Capture *capture = ptr;
64
71
 
65
- if (capture->tracked_classes) {
66
- st_free_table(capture->tracked_classes);
72
+ if (capture->tracked) {
73
+ st_free_table(capture->tracked);
74
+ }
75
+
76
+ if (capture->states) {
77
+ Memory_Profiler_Object_Table_free(capture->states);
67
78
  }
68
79
 
69
80
  xfree(capture);
@@ -73,20 +84,20 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
73
84
  const struct Memory_Profiler_Capture *capture = ptr;
74
85
  size_t size = sizeof(struct Memory_Profiler_Capture);
75
86
 
76
- if (capture->tracked_classes) {
77
- size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
87
+ if (capture->tracked) {
88
+ size += capture->tracked->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
78
89
  }
79
90
 
80
91
  return size;
81
92
  }
82
93
 
83
94
  // Foreach callback for st_foreach_with_replace (iteration logic).
84
- static int Memory_Profiler_Capture_tracked_classes_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
95
+ static int Memory_Profiler_Capture_tracked_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
85
96
  return ST_REPLACE;
86
97
  }
87
98
 
88
99
  // Replace callback for st_foreach_with_replace (update logic).
89
- static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
100
+ static int Memory_Profiler_Capture_tracked_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
90
101
  // Update wrapped Allocations VALUE if it moved:
91
102
  VALUE old_allocations = (VALUE)*value;
92
103
  VALUE new_allocations = rb_gc_location(old_allocations);
@@ -100,12 +111,17 @@ static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_dat
100
111
  static void Memory_Profiler_Capture_compact(void *ptr) {
101
112
  struct Memory_Profiler_Capture *capture = ptr;
102
113
 
103
- // Update tracked_classes keys and allocations values in-place:
104
- if (capture->tracked_classes && capture->tracked_classes->num_entries > 0) {
105
- if (st_foreach_with_replace(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_foreach, Memory_Profiler_Capture_tracked_classes_update, 0)) {
106
- rb_raise(rb_eRuntimeError, "tracked_classes modified during GC compaction");
114
+ // Update tracked keys and allocations values in-place:
115
+ if (capture->tracked && capture->tracked->num_entries > 0) {
116
+ if (st_foreach_with_replace(capture->tracked, Memory_Profiler_Capture_tracked_foreach, Memory_Profiler_Capture_tracked_update, 0)) {
117
+ rb_raise(rb_eRuntimeError, "tracked modified during GC compaction");
107
118
  }
108
119
  }
120
+
121
+ // Update custom object table (system malloc, safe during GC)
122
+ if (capture->states) {
123
+ Memory_Profiler_Object_Table_compact(capture->states);
124
+ }
109
125
  }
110
126
 
111
127
  static const rb_data_type_t Memory_Profiler_Capture_type = {
@@ -138,6 +154,9 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
138
154
  }
139
155
 
140
156
  // Process a NEWOBJ event. All allocation tracking logic is here.
157
+ // object_id parameter is the Integer object_id, NOT the raw object.
158
+ // Process a NEWOBJ event. All allocation tracking logic is here.
159
+ // object parameter is the actual object being allocated.
141
160
  static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALUE object) {
142
161
  struct Memory_Profiler_Capture *capture;
143
162
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
@@ -153,7 +172,7 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
153
172
  VALUE allocations;
154
173
  struct Memory_Profiler_Capture_Allocations *record;
155
174
 
156
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
175
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
157
176
  // Existing record
158
177
  allocations = (VALUE)allocations_data;
159
178
  record = Memory_Profiler_Allocations_get(allocations);
@@ -164,65 +183,66 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
164
183
  record->callback = Qnil;
165
184
  record->new_count = 1;
166
185
  record->free_count = 0;
167
- record->states = st_init_numtable();
168
186
 
169
187
  allocations = Memory_Profiler_Allocations_wrap(record);
170
- st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
188
+ st_insert(capture->tracked, (st_data_t)klass, (st_data_t)allocations);
171
189
  RB_OBJ_WRITTEN(self, Qnil, klass);
172
190
  RB_OBJ_WRITTEN(self, Qnil, allocations);
173
191
  }
174
192
 
175
- // Only store state if there's a callback
193
+ VALUE data = Qnil;
176
194
  if (!NIL_P(record->callback)) {
177
- VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
178
-
179
- // Store state using object as key (works because object is alive):
180
- st_insert(record->states, (st_data_t)object, (st_data_t)state);
181
- } else {
182
- // Store state as nil:
183
- st_insert(record->states, (st_data_t)object, (st_data_t)Qnil);
195
+ data = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
184
196
  }
185
-
197
+
198
+ struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_insert(capture->states, object);
199
+ RB_OBJ_WRITTEN(self, Qnil, object);
200
+ RB_OBJ_WRITE(self, &entry->klass, klass);
201
+ RB_OBJ_WRITE(self, &entry->data, data);
202
+ RB_OBJ_WRITE(self, &entry->allocations, allocations);
203
+
204
+ if (DEBUG) fprintf(stderr, "[NEWOBJ] Object inserted into table: %p\n", (void*)object);
205
+
186
206
  // Resume the capture:
187
207
  capture->paused -= 1;
188
208
  }
189
209
 
190
210
  // Process a FREEOBJ event. All deallocation tracking logic is here.
191
- static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE object) {
211
+ // freeobj_data parameter is [state_hash, object] array from event handler.
212
+ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE unused_klass, VALUE object) {
192
213
  struct Memory_Profiler_Capture *capture;
193
214
  TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
194
215
 
195
216
  // Pause the capture to prevent infinite loop:
196
217
  capture->paused += 1;
197
218
 
198
- // Look up allocations record for this class
199
- st_data_t allocations_data;
200
- if (!st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
201
- // The class is not tracked, so we are not tracking this object.
219
+ struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_lookup(capture->states, object);
220
+
221
+ if (!entry) {
222
+ if (DEBUG) fprintf(stderr, "[FREEOBJ] Object not found in table: %p\n", (void*)object);
202
223
  goto done;
224
+ } else {
225
+ if (DEBUG) fprintf(stderr, "[FREEOBJ] Object found in table: %p\n", (void*)object);
203
226
  }
204
227
 
205
- VALUE allocations = (VALUE)allocations_data;
228
+ VALUE klass = entry->klass;
229
+ VALUE data = entry->data;
230
+ VALUE allocations = entry->allocations;
231
+
232
+ // Delete by entry pointer (faster - no second lookup!)
233
+ Memory_Profiler_Object_Table_delete_entry(capture->states, entry);
234
+
206
235
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
207
236
 
208
- if (!record->states) {
209
- // There is no state table for this class, so we are not tracking it.
210
- goto done;
211
- }
237
+ // Increment global free count
238
+ capture->free_count++;
212
239
 
213
- st_data_t state_data;
214
- if (st_delete(record->states, (st_data_t *)&object, &state_data)) {
215
- VALUE state = (VALUE)state_data;
216
-
217
- // Increment global free count
218
- capture->free_count++;
219
-
220
- // Increment per-class free count
221
- record->free_count++;
222
-
223
- if (!NIL_P(record->callback) && !NIL_P(state)) {
224
- rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
225
- }
240
+ // Increment per-class free count
241
+ record->free_count++;
242
+
243
+ // Call callback if present
244
+ if (!NIL_P(record->callback) && !NIL_P(data)) {
245
+ rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, data);
226
246
  }
227
247
 
228
248
  done:
@@ -283,11 +303,11 @@ int Memory_Profiler_Capture_trackable_p(VALUE object) {
283
303
 
284
304
  // Event hook callback with RAW_ARG
285
305
  // Signature: (VALUE data, rb_trace_arg_t *trace_arg)
286
- static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
306
+ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
287
307
  rb_trace_arg_t *trace_arg = (rb_trace_arg_t *)ptr;
288
308
 
289
309
  struct Memory_Profiler_Capture *capture;
290
- TypedData_Get_Struct(data, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
310
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
291
311
 
292
312
  VALUE object = rb_tracearg_object(trace_arg);
293
313
 
@@ -295,38 +315,23 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
295
315
  if (!Memory_Profiler_Capture_trackable_p(object)) return;
296
316
 
297
317
  rb_event_flag_t event_flag = rb_tracearg_event_flag(trace_arg);
298
- VALUE klass = rb_class_of(object);
299
- if (!klass) return;
300
318
 
301
- if (DEBUG) {
302
- const char *klass_name = "(ignored)";
303
- if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
304
- klass_name = rb_class2name(klass);
305
- }
306
- fprintf(stderr, "Memory_Profiler_Capture_event_callback: object=%p, event_flag=%s, klass=%s\n", (void*)object, event_flag_name(event_flag), klass_name);
307
- }
308
-
309
319
  if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
310
320
  // Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
311
321
  if (capture->paused) return;
312
322
 
313
- // Check if class is already freed (T_NONE). This can happen when:
314
- // It's safe to unconditionally call here:
315
- object = rb_obj_id(object);
316
-
317
- Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, data, klass, object);
323
+ VALUE klass = rb_obj_class(object);
324
+
325
+ // Skip if klass is not a Class
326
+ if (rb_type(klass) != RUBY_T_CLASS) return;
327
+
328
+ // Enqueue actual object (not object_id) - queue retains it until processed
329
+ // Ruby 3.5 compatible: no need for FL_SEEN_OBJ_ID or rb_obj_id
330
+ if (DEBUG) fprintf(stderr, "[NEWOBJ] Enqueuing event for object: %p\n", (void*)object);
331
+ Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, object);
318
332
  } else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
319
- // We only care about objects that have been seen before (i.e. have an object ID):
320
- if (RB_FL_TEST(object, FL_SEEN_OBJ_ID)) {
321
- // For freeobj, we only care about klasses that we are tracking.
322
- // This prevents us from enqueuing klass objects that might be freed.
323
- if (!st_lookup(capture->tracked_classes, (st_data_t)klass, NULL)) return;
324
-
325
- // It's only safe to call here if the object already has an object ID.
326
- object = rb_obj_id(object);
327
-
328
- Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, data, klass, object);
329
- }
333
+ if (DEBUG) fprintf(stderr, "[FREEOBJ] Enqueuing event for object: %p\n", (void*)object);
334
+ Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object);
330
335
  }
331
336
  }
332
337
 
@@ -339,10 +344,17 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
339
344
  rb_raise(rb_eRuntimeError, "Failed to allocate Memory::Profiler::Capture");
340
345
  }
341
346
 
342
- capture->tracked_classes = st_init_numtable();
347
+ capture->tracked = st_init_numtable();
343
348
 
344
- if (!capture->tracked_classes) {
345
- rb_raise(rb_eRuntimeError, "Failed to initialize hash table");
349
+ if (!capture->tracked) {
350
+ rb_raise(rb_eRuntimeError, "Failed to initialize tracked hash table");
351
+ }
352
+
353
+ // Initialize custom object table (uses system malloc, GC-safe)
354
+ capture->states = Memory_Profiler_Object_Table_new(1024);
355
+ if (!capture->states) {
356
+ st_free_table(capture->tracked);
357
+ rb_raise(rb_eRuntimeError, "Failed to initialize object table");
346
358
  }
347
359
 
348
360
  // Initialize allocation tracking counters
@@ -424,7 +436,7 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
424
436
  st_data_t allocations_data;
425
437
  VALUE allocations;
426
438
 
427
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
439
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
428
440
  allocations = (VALUE)allocations_data;
429
441
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
430
442
  RB_OBJ_WRITE(self, &record->callback, callback);
@@ -433,12 +445,12 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
433
445
  RB_OBJ_WRITE(self, &record->callback, callback);
434
446
  record->new_count = 0;
435
447
  record->free_count = 0;
436
- record->states = st_init_numtable();
448
+ // NOTE: States table removed - now at Capture level
437
449
 
438
450
  // Wrap the record in a VALUE
439
451
  allocations = Memory_Profiler_Allocations_wrap(record);
440
452
 
441
- st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
453
+ st_insert(capture->tracked, (st_data_t)klass, (st_data_t)allocations);
442
454
  RB_OBJ_WRITTEN(self, Qnil, klass);
443
455
  RB_OBJ_WRITTEN(self, Qnil, allocations);
444
456
  }
@@ -452,7 +464,7 @@ static VALUE Memory_Profiler_Capture_untrack(VALUE self, VALUE klass) {
452
464
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
453
465
 
454
466
  st_data_t allocations_data;
455
- if (st_delete(capture->tracked_classes, (st_data_t *)&klass, &allocations_data)) {
467
+ if (st_delete(capture->tracked, (st_data_t *)&klass, &allocations_data)) {
456
468
  // The wrapped Allocations VALUE will be GC'd naturally
457
469
  // No manual cleanup needed
458
470
  }
@@ -465,7 +477,7 @@ static VALUE Memory_Profiler_Capture_tracking_p(VALUE self, VALUE klass) {
465
477
  struct Memory_Profiler_Capture *capture;
466
478
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
467
479
 
468
- return st_lookup(capture->tracked_classes, (st_data_t)klass, NULL) ? Qtrue : Qfalse;
480
+ return st_lookup(capture->tracked, (st_data_t)klass, NULL) ? Qtrue : Qfalse;
469
481
  }
470
482
 
471
483
  // Get count of live objects for a specific class (O(1) lookup!)
@@ -474,7 +486,7 @@ static VALUE Memory_Profiler_Capture_retained_count_of(VALUE self, VALUE klass)
474
486
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
475
487
 
476
488
  st_data_t allocations_data;
477
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
489
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
478
490
  VALUE allocations = (VALUE)allocations_data;
479
491
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
480
492
  if (record->free_count <= record->new_count) {
@@ -486,7 +498,7 @@ static VALUE Memory_Profiler_Capture_retained_count_of(VALUE self, VALUE klass)
486
498
  }
487
499
 
488
500
  // Iterator to reset each class record
489
- static int Memory_Profiler_Capture_tracked_classes_clear(st_data_t key, st_data_t value, st_data_t arg) {
501
+ static int Memory_Profiler_Capture_tracked_clear(st_data_t key, st_data_t value, st_data_t arg) {
490
502
  VALUE allocations = (VALUE)value;
491
503
 
492
504
  Memory_Profiler_Allocations_clear(allocations);
@@ -507,8 +519,14 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
507
519
  rb_raise(rb_eRuntimeError, "Cannot clear while capture is running - call stop() first!");
508
520
  }
509
521
 
510
- // Reset all counts to 0 (don't free, just reset) - pass self for write barriers:
511
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_clear, 0);
522
+ // Reset all counts to 0 (don't free, just reset):
523
+ st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_clear, 0);
524
+
525
+ // Clear custom object table by recreating it
526
+ if (capture->states) {
527
+ Memory_Profiler_Object_Table_free(capture->states);
528
+ capture->states = Memory_Profiler_Object_Table_new(1024);
529
+ }
512
530
 
513
531
  // Reset allocation tracking counters
514
532
  capture->new_count = 0;
@@ -535,18 +553,118 @@ static VALUE Memory_Profiler_Capture_each(VALUE self) {
535
553
 
536
554
  RETURN_ENUMERATOR(self, 0, 0);
537
555
 
538
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_each_allocation, 0);
556
+ st_foreach(capture->tracked, Memory_Profiler_Capture_each_allocation, 0);
539
557
 
540
558
  return self;
541
559
  }
542
560
 
561
+ // Struct for filtering states during each_object iteration
562
+ struct Memory_Profiler_Each_Object_Arguments {
563
+ VALUE self;
564
+
565
+ // The allocations wrapper to filter by (Qnil = no filter).
566
+ VALUE allocations;
567
+ };
568
+
569
+ // Cleanup function to ensure table is made weak again
570
+ static VALUE Memory_Profiler_Capture_each_object_ensure(VALUE arg) {
571
+ struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
572
+ struct Memory_Profiler_Capture *capture;
573
+ TypedData_Get_Struct(arguments->self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
574
+
575
+ // Make table weak again
576
+ Memory_Profiler_Object_Table_decrement_strong(capture->states);
577
+
578
+ return Qnil;
579
+ }
580
+
581
+ // Main iteration function
582
+ static VALUE Memory_Profiler_Capture_each_object_body(VALUE arg) {
583
+ struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
584
+ struct Memory_Profiler_Capture *capture;
585
+ TypedData_Get_Struct(arguments->self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
586
+
587
+ // Iterate custom object table entries
588
+ if (capture->states) {
589
+ if (DEBUG) fprintf(stderr, "[ITER] Iterating table, capacity=%zu, count=%zu\n", capture->states->capacity, capture->states->count);
590
+
591
+ for (size_t i = 0; i < capture->states->capacity; i++) {
592
+ struct Memory_Profiler_Object_Table_Entry *entry = &capture->states->entries[i];
593
+
594
+ // Skip empty or deleted slots (0 = not set)
595
+ if (entry->object == 0) {
596
+ continue;
597
+ }
598
+
599
+ // Filter by allocations if specified
600
+ if (!NIL_P(arguments->allocations)) {
601
+ if (entry->allocations != arguments->allocations) continue;
602
+ }
603
+
604
+ rb_yield_values(2, entry->object, entry->allocations);
605
+ }
606
+ }
607
+
608
+ return arguments->self;
609
+ }
610
+
611
+ // Iterate over tracked object IDs, optionally filtered by class
612
+ // Called as:
613
+ // capture.each_object(String) { |object_id, state| ... } # Specific class
614
+ // capture.each_object { |object_id, state| ... } // All objects
615
+ //
616
+ // Yields object_id as Integer. Caller can:
617
+ // - Format as hex: "0x%x" % object_id
618
+ // - Convert to object with ObjectSpace._id2ref (may raise RangeError if recycled)
619
+ // Future-proof for Ruby 3.5 where _id2ref is deprecated
620
+ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE self) {
621
+ struct Memory_Profiler_Capture *capture;
622
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
623
+
624
+ VALUE klass;
625
+ rb_scan_args(argc, argv, "01", &klass);
626
+
627
+ RETURN_ENUMERATOR(self, argc, argv);
628
+
629
+ // Make table strong so objects won't be collected during iteration
630
+ Memory_Profiler_Object_Table_increment_strong(capture->states);
631
+
632
+ // Process all pending events and run GC to clean up stale objects:
633
+ Memory_Profiler_Events_process_all();
634
+
635
+ // If class provided, look up its allocations wrapper
636
+ VALUE allocations = Qnil;
637
+ if (!NIL_P(klass)) {
638
+ st_data_t allocations_data;
639
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
640
+ allocations = (VALUE)allocations_data;
641
+ } else {
642
+ // Class not tracked - nothing to iterate
643
+ Memory_Profiler_Object_Table_decrement_strong(capture->states);
644
+ return self;
645
+ }
646
+ }
647
+
648
+ // Setup arguments for iteration
649
+ struct Memory_Profiler_Each_Object_Arguments arguments = {
650
+ .self = self,
651
+ .allocations = allocations
652
+ };
653
+
654
+ // Use rb_ensure to guarantee cleanup even if exception is raised
655
+ return rb_ensure(
656
+ Memory_Profiler_Capture_each_object_body, (VALUE)&arguments,
657
+ Memory_Profiler_Capture_each_object_ensure, (VALUE)&arguments
658
+ );
659
+ }
660
+
543
661
  // Get allocations for a specific class
544
662
  static VALUE Memory_Profiler_Capture_aref(VALUE self, VALUE klass) {
545
663
  struct Memory_Profiler_Capture *capture;
546
664
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
547
665
 
548
666
  st_data_t allocations_data;
549
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
667
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
550
668
  return (VALUE)allocations_data;
551
669
  }
552
670
 
@@ -559,20 +677,6 @@ struct Memory_Profiler_Allocations_Statistics {
559
677
  VALUE per_class_counts;
560
678
  };
561
679
 
562
- // Iterator callback to count states per class
563
- static int Memory_Profiler_Capture_count_states(st_data_t key, st_data_t value, st_data_t argument) {
564
- struct Memory_Profiler_Allocations_Statistics *statistics = (struct Memory_Profiler_Allocations_Statistics *)argument;
565
- VALUE klass = (VALUE)key;
566
- VALUE allocations = (VALUE)value;
567
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
568
-
569
- size_t states_count = record->states ? record->states->num_entries : 0;
570
- statistics->total_tracked_objects += states_count;
571
-
572
- rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(states_count));
573
- return ST_CONTINUE;
574
- }
575
-
576
680
  // Get internal statistics for debugging
577
681
  // Returns hash with internal state sizes
578
682
  static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
@@ -582,18 +686,11 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
582
686
  VALUE statistics = rb_hash_new();
583
687
 
584
688
  // Tracked classes count
585
- rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_classes_count")), SIZET2NUM(capture->tracked_classes->num_entries));
586
-
587
- // Count states entries for each tracked class
588
- struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
589
- .total_tracked_objects = 0,
590
- .per_class_counts = rb_hash_new()
591
- };
592
-
593
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_count_states, (st_data_t)&allocations_statistics);
689
+ rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_count")), SIZET2NUM(capture->tracked->num_entries));
594
690
 
595
- rb_hash_aset(statistics, ID2SYM(rb_intern("total_tracked_objects")), SIZET2NUM(allocations_statistics.total_tracked_objects));
596
- rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_objects_per_class")), allocations_statistics.per_class_counts);
691
+ // Custom object table size
692
+ size_t states_size = capture->states ? Memory_Profiler_Object_Table_size(capture->states) : 0;
693
+ rb_hash_aset(statistics, ID2SYM(rb_intern("object_table_size")), SIZET2NUM(states_size));
597
694
 
598
695
  return statistics;
599
696
  }
@@ -643,6 +740,7 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
643
740
  rb_define_method(Memory_Profiler_Capture, "tracking?", Memory_Profiler_Capture_tracking_p, 1);
644
741
  rb_define_method(Memory_Profiler_Capture, "retained_count_of", Memory_Profiler_Capture_retained_count_of, 1);
645
742
  rb_define_method(Memory_Profiler_Capture, "each", Memory_Profiler_Capture_each, 0);
743
+ rb_define_method(Memory_Profiler_Capture, "each_object", Memory_Profiler_Capture_each_object, -1); // -1 = variable args
646
744
  rb_define_method(Memory_Profiler_Capture, "[]", Memory_Profiler_Capture_aref, 1);
647
745
  rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
648
746
  rb_define_method(Memory_Profiler_Capture, "statistics", Memory_Profiler_Capture_statistics, 0);
@@ -1,11 +1,10 @@
1
1
  // Released under the MIT License.
2
2
  // Copyright, 2025, by Samuel Williams.
3
3
 
4
- #include "ruby.h"
5
- #include "ruby/debug.h"
6
-
7
4
  #include "events.h"
8
5
  #include "capture.h"
6
+
7
+ #include <ruby/debug.h>
9
8
  #include <stdio.h>
10
9
 
11
10
  enum {
@@ -105,7 +104,10 @@ static void Memory_Profiler_Events_mark_queue(struct Memory_Profiler_Queue *queu
105
104
 
106
105
  rb_gc_mark_movable(event->capture);
107
106
  rb_gc_mark_movable(event->klass);
108
- rb_gc_mark_movable(event->object);
107
+
108
+ if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
109
+ rb_gc_mark_movable(event->object);
110
+ }
109
111
  }
110
112
  }
111
113
 
@@ -130,7 +132,10 @@ static void Memory_Profiler_Events_compact_queue(struct Memory_Profiler_Queue *q
130
132
 
131
133
  event->capture = rb_gc_location(event->capture);
132
134
  event->klass = rb_gc_location(event->klass);
133
- event->object = rb_gc_location(event->object);
135
+
136
+ if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
137
+ event->object = rb_gc_location(event->object);
138
+ }
134
139
  }
135
140
  }
136
141
 
@@ -179,7 +184,7 @@ int Memory_Profiler_Events_enqueue(
179
184
  VALUE object
180
185
  ) {
181
186
  struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
182
-
187
+
183
188
  // Always enqueue to the available queue - it won't be touched during processing:
184
189
  struct Memory_Profiler_Event *event = Memory_Profiler_Queue_push(events->available);
185
190
  if (event) {
@@ -20,10 +20,10 @@ struct Memory_Profiler_Event {
20
20
  // Which Capture instance this event belongs to:
21
21
  VALUE capture;
22
22
 
23
- // The class of the object:
23
+ // The class of the allocated object (Qnil for FREEOBJ):
24
24
  VALUE klass;
25
25
 
26
- // The object itself (for NEWOBJ and FREEOBJ):
26
+ // The object pointer being alllocated or freed.
27
27
  VALUE object;
28
28
  };
29
29
 
@@ -32,7 +32,11 @@ struct Memory_Profiler_Events;
32
32
  struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void);
33
33
 
34
34
  // Enqueue an event to the global queue.
35
+ // object parameter semantics:
36
+ // - NEWOBJ: the actual object being allocated (queue retains it)
37
+ // - FREEOBJ: Array with state data for postponed processing
35
38
  // Returns non-zero on success, zero on failure.
39
+ // Ruby 3.5 compatible: no FL_SEEN_OBJ_ID or object_id needed
36
40
  int Memory_Profiler_Events_enqueue(
37
41
  enum Memory_Profiler_Event_Type type,
38
42
  VALUE capture,
@@ -3,15 +3,26 @@
3
3
 
4
4
  #include "capture.h"
5
5
 
6
+ // Return the memory address of an object as a hex string
7
+ // This matches the format used by ObjectSpace.dump_all
8
+ static VALUE Memory_Profiler_address_of(VALUE module, VALUE object) {
9
+ char buffer[32];
10
+ snprintf(buffer, sizeof(buffer), "0x%lx", (unsigned long)object);
11
+ return rb_str_new_cstr(buffer);
12
+ }
13
+
6
14
  void Init_Memory_Profiler(void)
7
15
  {
8
16
  #ifdef HAVE_RB_EXT_RACTOR_SAFE
9
- rb_ext_ractor_safe(true);
17
+ rb_ext_ractor_safe(true);
10
18
  #endif
11
-
12
- VALUE Memory = rb_const_get(rb_cObject, rb_intern("Memory"));
13
- VALUE Memory_Profiler = rb_define_module_under(Memory, "Profiler");
14
-
15
- Init_Memory_Profiler_Capture(Memory_Profiler);
19
+
20
+ VALUE Memory = rb_const_get(rb_cObject, rb_intern("Memory"));
21
+ VALUE Memory_Profiler = rb_define_module_under(Memory, "Profiler");
22
+
23
+ // Add Memory::Profiler.address_of(object) module function:
24
+ rb_define_module_function(Memory_Profiler, "address_of", Memory_Profiler_address_of, 1);
25
+
26
+ Init_Memory_Profiler_Capture(Memory_Profiler);
16
27
  }
17
28