memory-profiler 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,9 +5,8 @@
5
5
  #include "allocations.h"
6
6
  #include "events.h"
7
7
 
8
- #include "ruby.h"
9
- #include "ruby/debug.h"
10
- #include "ruby/st.h"
8
+ #include <ruby/debug.h>
9
+ #include <ruby/st.h>
11
10
  #include <stdatomic.h>
12
11
  #include <stdio.h>
13
12
 
@@ -16,10 +15,20 @@ enum {
16
15
  };
17
16
 
18
17
  static VALUE Memory_Profiler_Capture = Qnil;
18
+ static VALUE rb_mObjectSpace = Qnil;
19
+ static ID id_id2ref = Qnil;
19
20
 
20
21
  // Event symbols:
21
22
  static VALUE sym_newobj, sym_freeobj;
22
23
 
24
+ // State state stored in the central states table.
25
+ // Maps object_id => state information.
26
+ struct Memory_Profiler_Capture_State {
27
+ VALUE klass; // The class of the allocated object
28
+ VALUE data; // User-defined state from callback
29
+ VALUE allocations; // The Allocations wrapper for this class
30
+ };
31
+
23
32
  // Main capture state (per-instance).
24
33
  struct Memory_Profiler_Capture {
25
34
  // Master switch - is tracking active? (set by start/stop).
@@ -29,15 +38,20 @@ struct Memory_Profiler_Capture {
29
38
  int paused;
30
39
 
31
40
  // Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
32
- st_table *tracked_classes;
41
+ st_table *tracked;
42
+
43
+ // Central states table: object_id (Integer) => struct Memory_Profiler_Capture_State*
44
+ // This is the single source of truth for which objects are being tracked.
45
+ // FREEOBJ can have class=NULL, so we can't use per-class states tables.
46
+ st_table *states;
33
47
 
34
48
  // Total number of allocations and frees seen since tracking started.
35
49
  size_t new_count;
36
50
  size_t free_count;
37
51
  };
38
52
 
39
- // GC mark callback for tracked_classes table.
40
- static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t value, st_data_t arg) {
53
+ // GC mark callback for tracked table.
54
+ static int Memory_Profiler_Capture_tracked_mark(st_data_t key, st_data_t value, st_data_t arg) {
41
55
  // Mark class as un-movable:
42
56
  // - We don't want to re-index the table if the class moves.
43
57
  // - We don't want objects in `freeobj` to have invalid class pointers (maybe helps).
@@ -51,19 +65,50 @@ static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t
51
65
  return ST_CONTINUE;
52
66
  }
53
67
 
68
+ // GC mark callback for states table.
69
+ static int Memory_Profiler_Capture_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
70
+ // Key is object_id (Integer VALUE) - mark it as un-movable
71
+ rb_gc_mark((VALUE)key);
72
+
73
+ // Value is struct Memory_Profiler_Capture_State* - mark its VALUEs
74
+ struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
75
+ rb_gc_mark(state->klass); // Mark class as un-movable
76
+ rb_gc_mark_movable(state->data); // State can move
77
+ rb_gc_mark_movable(state->allocations); // Allocations wrapper can move
78
+
79
+ return ST_CONTINUE;
80
+ }
81
+
54
82
  static void Memory_Profiler_Capture_mark(void *ptr) {
55
83
  struct Memory_Profiler_Capture *capture = ptr;
56
84
 
57
- if (capture->tracked_classes) {
58
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
85
+ if (capture->tracked) {
86
+ st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_mark, 0);
87
+ }
88
+
89
+ if (capture->states) {
90
+ st_foreach(capture->states, Memory_Profiler_Capture_states_mark, 0);
59
91
  }
60
92
  }
61
93
 
94
+ // Helper to free state states in the states table
95
+ static int Memory_Profiler_Capture_states_free_callback(st_data_t key, st_data_t value, st_data_t arg) {
96
+ struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
97
+ xfree(state);
98
+ return ST_CONTINUE;
99
+ }
100
+
62
101
  static void Memory_Profiler_Capture_free(void *ptr) {
63
102
  struct Memory_Profiler_Capture *capture = ptr;
64
103
 
65
- if (capture->tracked_classes) {
66
- st_free_table(capture->tracked_classes);
104
+ if (capture->tracked) {
105
+ st_free_table(capture->tracked);
106
+ }
107
+
108
+ if (capture->states) {
109
+ // Free all states first
110
+ st_foreach(capture->states, Memory_Profiler_Capture_states_free_callback, 0);
111
+ st_free_table(capture->states);
67
112
  }
68
113
 
69
114
  xfree(capture);
@@ -73,20 +118,20 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
73
118
  const struct Memory_Profiler_Capture *capture = ptr;
74
119
  size_t size = sizeof(struct Memory_Profiler_Capture);
75
120
 
76
- if (capture->tracked_classes) {
77
- size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
121
+ if (capture->tracked) {
122
+ size += capture->tracked->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
78
123
  }
79
124
 
80
125
  return size;
81
126
  }
82
127
 
83
128
  // Foreach callback for st_foreach_with_replace (iteration logic).
84
- static int Memory_Profiler_Capture_tracked_classes_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
129
+ static int Memory_Profiler_Capture_tracked_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
85
130
  return ST_REPLACE;
86
131
  }
87
132
 
88
133
  // Replace callback for st_foreach_with_replace (update logic).
89
- static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
134
+ static int Memory_Profiler_Capture_tracked_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
90
135
  // Update wrapped Allocations VALUE if it moved:
91
136
  VALUE old_allocations = (VALUE)*value;
92
137
  VALUE new_allocations = rb_gc_location(old_allocations);
@@ -97,13 +142,45 @@ static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_dat
97
142
  return ST_CONTINUE;
98
143
  }
99
144
 
145
+ // Foreach callback for states table compaction (iteration logic).
146
+ static int Memory_Profiler_Capture_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
147
+ return ST_REPLACE;
148
+ }
149
+
150
+ // Replace callback for states table compaction (update logic).
151
+ static int Memory_Profiler_Capture_states_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
152
+ // Update VALUEs in the state if they moved
153
+ struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)*value;
154
+
155
+ VALUE old_state = state->data;
156
+ VALUE new_state = rb_gc_location(old_state);
157
+ if (old_state != new_state) {
158
+ state->data = new_state;
159
+ }
160
+
161
+ VALUE old_allocations = state->allocations;
162
+ VALUE new_allocations = rb_gc_location(old_allocations);
163
+ if (old_allocations != new_allocations) {
164
+ state->allocations = new_allocations;
165
+ }
166
+
167
+ return ST_CONTINUE;
168
+ }
169
+
100
170
  static void Memory_Profiler_Capture_compact(void *ptr) {
101
171
  struct Memory_Profiler_Capture *capture = ptr;
102
172
 
103
- // Update tracked_classes keys and allocations values in-place:
104
- if (capture->tracked_classes && capture->tracked_classes->num_entries > 0) {
105
- if (st_foreach_with_replace(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_foreach, Memory_Profiler_Capture_tracked_classes_update, 0)) {
106
- rb_raise(rb_eRuntimeError, "tracked_classes modified during GC compaction");
173
+ // Update tracked keys and allocations values in-place:
174
+ if (capture->tracked && capture->tracked->num_entries > 0) {
175
+ if (st_foreach_with_replace(capture->tracked, Memory_Profiler_Capture_tracked_foreach, Memory_Profiler_Capture_tracked_update, 0)) {
176
+ rb_raise(rb_eRuntimeError, "tracked modified during GC compaction");
177
+ }
178
+ }
179
+
180
+ // Update states table state VALUEs in-place:
181
+ if (capture->states && capture->states->num_entries > 0) {
182
+ if (st_foreach_with_replace(capture->states, Memory_Profiler_Capture_states_foreach, Memory_Profiler_Capture_states_update, 0)) {
183
+ rb_raise(rb_eRuntimeError, "states modified during GC compaction");
107
184
  }
108
185
  }
109
186
  }
@@ -138,7 +215,8 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
138
215
  }
139
216
 
140
217
  // Process a NEWOBJ event. All allocation tracking logic is here.
141
- static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALUE object) {
218
+ // object_id parameter is the Integer object_id, NOT the raw object.
219
+ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALUE object_id) {
142
220
  struct Memory_Profiler_Capture *capture;
143
221
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
144
222
 
@@ -153,7 +231,7 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
153
231
  VALUE allocations;
154
232
  struct Memory_Profiler_Capture_Allocations *record;
155
233
 
156
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
234
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
157
235
  // Existing record
158
236
  allocations = (VALUE)allocations_data;
159
237
  record = Memory_Profiler_Allocations_get(allocations);
@@ -164,67 +242,75 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
164
242
  record->callback = Qnil;
165
243
  record->new_count = 1;
166
244
  record->free_count = 0;
167
- record->states = st_init_numtable();
168
245
 
169
246
  allocations = Memory_Profiler_Allocations_wrap(record);
170
- st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
247
+ st_insert(capture->tracked, (st_data_t)klass, (st_data_t)allocations);
171
248
  RB_OBJ_WRITTEN(self, Qnil, klass);
172
249
  RB_OBJ_WRITTEN(self, Qnil, allocations);
173
250
  }
174
251
 
175
- // Only store state if there's a callback
252
+ // Get state from callback (if present)
253
+ VALUE data = Qnil;
176
254
  if (!NIL_P(record->callback)) {
177
- VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
178
-
179
- // Store state using object as key (works because object is alive):
180
- st_insert(record->states, (st_data_t)object, (st_data_t)state);
181
- } else {
182
- // Store state as nil:
183
- st_insert(record->states, (st_data_t)object, (st_data_t)Qnil);
255
+ data = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
184
256
  }
185
-
257
+
258
+ // Create and store state state in central states table
259
+ struct Memory_Profiler_Capture_State *state = ALLOC(struct Memory_Profiler_Capture_State);
260
+ state->klass = klass;
261
+ state->data = data;
262
+ state->allocations = allocations;
263
+
264
+ st_insert(capture->states, (st_data_t)object_id, (st_data_t)state);
265
+
266
+ // Write barriers for VALUEs we're storing
267
+ RB_OBJ_WRITTEN(self, Qnil, klass);
268
+ RB_OBJ_WRITTEN(self, Qnil, data);
269
+ RB_OBJ_WRITTEN(self, Qnil, allocations);
270
+
186
271
  // Resume the capture:
187
272
  capture->paused -= 1;
188
273
  }
189
274
 
190
275
  // Process a FREEOBJ event. All deallocation tracking logic is here.
191
- static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE object) {
276
+ // object_id parameter is the Integer object_id, NOT the raw object.
277
+ // NOTE: klass parameter can be NULL for FREEOBJ events, so we don't use it!
278
+ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE object_id) {
192
279
  struct Memory_Profiler_Capture *capture;
193
280
  TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
194
281
 
195
282
  // Pause the capture to prevent infinite loop:
196
283
  capture->paused += 1;
197
284
 
198
- // Look up allocations record for this class
199
- st_data_t allocations_data;
200
- if (!st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
201
- // The class is not tracked, so we are not tracking this object.
285
+ // Look up the object_id in the central states table
286
+ // We use object_id because klass can be NULL in FREEOBJ events!
287
+ st_data_t state_data;
288
+ if (!st_delete(capture->states, (st_data_t *)&object_id, &state_data)) {
289
+ // This object_id is not in our states table, so we're not tracking it
202
290
  goto done;
203
291
  }
204
292
 
205
- VALUE allocations = (VALUE)allocations_data;
293
+ // Extract information from the state
294
+ struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)state_data;
295
+ klass = state->klass; // Use class from state, not parameter
296
+ VALUE data = state->data;
297
+ VALUE allocations = state->allocations;
206
298
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
207
299
 
208
- if (!record->states) {
209
- // There is no state table for this class, so we are not tracking it.
210
- goto done;
211
- }
300
+ // Increment global free count
301
+ capture->free_count++;
212
302
 
213
- st_data_t state_data;
214
- if (st_delete(record->states, (st_data_t *)&object, &state_data)) {
215
- VALUE state = (VALUE)state_data;
216
-
217
- // Increment global free count
218
- capture->free_count++;
219
-
220
- // Increment per-class free count
221
- record->free_count++;
222
-
223
- if (!NIL_P(record->callback) && !NIL_P(state)) {
224
- rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
225
- }
303
+ // Increment per-class free count
304
+ record->free_count++;
305
+
306
+ // Call callback if present
307
+ if (!NIL_P(record->callback) && !NIL_P(data)) {
308
+ rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, data);
226
309
  }
227
-
310
+
311
+ // Free the state struct
312
+ xfree(state);
313
+
228
314
  done:
229
315
  // Resume the capture:
230
316
  capture->paused -= 1;
@@ -283,11 +369,11 @@ int Memory_Profiler_Capture_trackable_p(VALUE object) {
283
369
 
284
370
  // Event hook callback with RAW_ARG
285
371
  // Signature: (VALUE data, rb_trace_arg_t *trace_arg)
286
- static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
372
+ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
287
373
  rb_trace_arg_t *trace_arg = (rb_trace_arg_t *)ptr;
288
374
 
289
375
  struct Memory_Profiler_Capture *capture;
290
- TypedData_Get_Struct(data, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
376
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
291
377
 
292
378
  VALUE object = rb_tracearg_object(trace_arg);
293
379
 
@@ -295,37 +381,27 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
295
381
  if (!Memory_Profiler_Capture_trackable_p(object)) return;
296
382
 
297
383
  rb_event_flag_t event_flag = rb_tracearg_event_flag(trace_arg);
298
- VALUE klass = rb_class_of(object);
299
- if (!klass) return;
300
384
 
301
- if (DEBUG) {
302
- const char *klass_name = "(ignored)";
303
- if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
304
- klass_name = rb_class2name(klass);
305
- }
306
- fprintf(stderr, "Memory_Profiler_Capture_event_callback: object=%p, event_flag=%s, klass=%s\n", (void*)object, event_flag_name(event_flag), klass_name);
307
- }
308
-
309
385
  if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
310
386
  // Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
311
387
  if (capture->paused) return;
312
388
 
313
- // Check if class is already freed (T_NONE). This can happen when:
314
- // It's safe to unconditionally call here:
315
- object = rb_obj_id(object);
316
-
317
- Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, data, klass, object);
389
+ VALUE klass = rb_obj_class(object);
390
+ if (!klass) return;
391
+
392
+ // Convert to object_id for storage (Integer VALUE).
393
+ // It's safe to unconditionally call rb_obj_id during NEWOBJ.
394
+ VALUE object_id = rb_obj_id(object);
395
+
396
+ Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, object_id);
318
397
  } else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
319
398
  // We only care about objects that have been seen before (i.e. have an object ID):
320
399
  if (RB_FL_TEST(object, FL_SEEN_OBJ_ID)) {
321
- // For freeobj, we only care about klasses that we are tracking.
322
- // This prevents us from enqueuing klass objects that might be freed.
323
- if (!st_lookup(capture->tracked_classes, (st_data_t)klass, NULL)) return;
400
+ // Convert to object_id for storage (Integer VALUE).
401
+ // It's only safe to call rb_obj_id if the object already has an object ID.
402
+ VALUE object_id = rb_obj_id(object);
324
403
 
325
- // It's only safe to call here if the object already has an object ID.
326
- object = rb_obj_id(object);
327
-
328
- Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, data, klass, object);
404
+ Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object_id);
329
405
  }
330
406
  }
331
407
  }
@@ -339,10 +415,17 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
339
415
  rb_raise(rb_eRuntimeError, "Failed to allocate Memory::Profiler::Capture");
340
416
  }
341
417
 
342
- capture->tracked_classes = st_init_numtable();
418
+ capture->tracked = st_init_numtable();
419
+
420
+ if (!capture->tracked) {
421
+ rb_raise(rb_eRuntimeError, "Failed to initialize tracked hash table");
422
+ }
423
+
424
+ // Initialize central states table (object_id => State_Tuple)
425
+ capture->states = st_init_numtable();
343
426
 
344
- if (!capture->tracked_classes) {
345
- rb_raise(rb_eRuntimeError, "Failed to initialize hash table");
427
+ if (!capture->states) {
428
+ rb_raise(rb_eRuntimeError, "Failed to initialize states hash table");
346
429
  }
347
430
 
348
431
  // Initialize allocation tracking counters
@@ -424,7 +507,7 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
424
507
  st_data_t allocations_data;
425
508
  VALUE allocations;
426
509
 
427
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
510
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
428
511
  allocations = (VALUE)allocations_data;
429
512
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
430
513
  RB_OBJ_WRITE(self, &record->callback, callback);
@@ -433,12 +516,12 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
433
516
  RB_OBJ_WRITE(self, &record->callback, callback);
434
517
  record->new_count = 0;
435
518
  record->free_count = 0;
436
- record->states = st_init_numtable();
519
+ // NOTE: States table removed - now at Capture level
437
520
 
438
521
  // Wrap the record in a VALUE
439
522
  allocations = Memory_Profiler_Allocations_wrap(record);
440
523
 
441
- st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
524
+ st_insert(capture->tracked, (st_data_t)klass, (st_data_t)allocations);
442
525
  RB_OBJ_WRITTEN(self, Qnil, klass);
443
526
  RB_OBJ_WRITTEN(self, Qnil, allocations);
444
527
  }
@@ -452,7 +535,7 @@ static VALUE Memory_Profiler_Capture_untrack(VALUE self, VALUE klass) {
452
535
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
453
536
 
454
537
  st_data_t allocations_data;
455
- if (st_delete(capture->tracked_classes, (st_data_t *)&klass, &allocations_data)) {
538
+ if (st_delete(capture->tracked, (st_data_t *)&klass, &allocations_data)) {
456
539
  // The wrapped Allocations VALUE will be GC'd naturally
457
540
  // No manual cleanup needed
458
541
  }
@@ -465,7 +548,7 @@ static VALUE Memory_Profiler_Capture_tracking_p(VALUE self, VALUE klass) {
465
548
  struct Memory_Profiler_Capture *capture;
466
549
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
467
550
 
468
- return st_lookup(capture->tracked_classes, (st_data_t)klass, NULL) ? Qtrue : Qfalse;
551
+ return st_lookup(capture->tracked, (st_data_t)klass, NULL) ? Qtrue : Qfalse;
469
552
  }
470
553
 
471
554
  // Get count of live objects for a specific class (O(1) lookup!)
@@ -474,7 +557,7 @@ static VALUE Memory_Profiler_Capture_retained_count_of(VALUE self, VALUE klass)
474
557
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
475
558
 
476
559
  st_data_t allocations_data;
477
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
560
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
478
561
  VALUE allocations = (VALUE)allocations_data;
479
562
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
480
563
  if (record->free_count <= record->new_count) {
@@ -486,7 +569,7 @@ static VALUE Memory_Profiler_Capture_retained_count_of(VALUE self, VALUE klass)
486
569
  }
487
570
 
488
571
  // Iterator to reset each class record
489
- static int Memory_Profiler_Capture_tracked_classes_clear(st_data_t key, st_data_t value, st_data_t arg) {
572
+ static int Memory_Profiler_Capture_tracked_clear(st_data_t key, st_data_t value, st_data_t arg) {
490
573
  VALUE allocations = (VALUE)value;
491
574
 
492
575
  Memory_Profiler_Allocations_clear(allocations);
@@ -508,7 +591,13 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
508
591
  }
509
592
 
510
593
  // Reset all counts to 0 (don't free, just reset) - pass self for write barriers:
511
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_clear, 0);
594
+ st_foreach(capture->tracked, Memory_Profiler_Capture_tracked_clear, 0);
595
+
596
+ // Clear central states table - free all states and clear the table
597
+ if (capture->states) {
598
+ st_foreach(capture->states, Memory_Profiler_Capture_states_free_callback, 0);
599
+ st_clear(capture->states);
600
+ }
512
601
 
513
602
  // Reset allocation tracking counters
514
603
  capture->new_count = 0;
@@ -535,7 +624,80 @@ static VALUE Memory_Profiler_Capture_each(VALUE self) {
535
624
 
536
625
  RETURN_ENUMERATOR(self, 0, 0);
537
626
 
538
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_each_allocation, 0);
627
+ st_foreach(capture->tracked, Memory_Profiler_Capture_each_allocation, 0);
628
+
629
+ return self;
630
+ }
631
+
632
+ // Struct for filtering states by allocations wrapper during iteration
633
+ struct Memory_Profiler_Each_Object_Args {
634
+ VALUE allocations; // The allocations wrapper to filter by (Qnil = no filter)
635
+ };
636
+
637
+ // Iterator callback for each_object - yields object and state
638
+ static int Memory_Profiler_Capture_each_object_callback(st_data_t key, st_data_t value, st_data_t arg) {
639
+ VALUE object_id = (VALUE)key;
640
+ struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
641
+ struct Memory_Profiler_Each_Object_Args *args = (struct Memory_Profiler_Each_Object_Args *)arg;
642
+
643
+ // Filter by allocations if specified
644
+ if (!NIL_P(args->allocations) && state->allocations != args->allocations) {
645
+ return ST_CONTINUE;
646
+ }
647
+
648
+ // Convert object_id to object using _id2ref
649
+ VALUE object = rb_funcall(rb_mObjectSpace, id_id2ref, 1, object_id);
650
+
651
+ // Yield object and state
652
+ rb_yield_values(2, object, state->data);
653
+
654
+ return ST_CONTINUE;
655
+ }
656
+
657
+ // Iterate over tracked objects, optionally filtered by class
658
+ // Called as:
659
+ // capture.each_object(String) { |object, state| ... } # Specific class
660
+ // capture.each_object { |object, state| ... } # All objects
661
+ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE self) {
662
+ struct Memory_Profiler_Capture *capture;
663
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
664
+
665
+ VALUE klass;
666
+ rb_scan_args(argc, argv, "01", &klass);
667
+
668
+ RETURN_ENUMERATOR(self, argc, argv);
669
+
670
+ // Process all pending events and run GC to clean up stale object_ids
671
+ Memory_Profiler_Events_process_all();
672
+ rb_gc_start();
673
+ VALUE was_enabled = rb_gc_disable();
674
+ Memory_Profiler_Events_process_all();
675
+
676
+ // If class provided, look up its allocations wrapper
677
+ VALUE allocations = Qnil;
678
+ if (!NIL_P(klass)) {
679
+ st_data_t allocations_data;
680
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
681
+ allocations = (VALUE)allocations_data;
682
+ } else {
683
+ // Class not tracked - nothing to iterate
684
+ if (RTEST(was_enabled)) {
685
+ rb_gc_enable();
686
+ }
687
+ return self;
688
+ }
689
+ }
690
+
691
+ // Iterate states table, optionally filtering by allocations wrapper
692
+ struct Memory_Profiler_Each_Object_Args args = { .allocations = allocations };
693
+
694
+ if (capture->states) {
695
+ st_foreach(capture->states, Memory_Profiler_Capture_each_object_callback, (st_data_t)&args);
696
+ }
697
+
698
+ if (RTEST(was_enabled)) {
699
+ rb_gc_enable();
700
+ }
539
701
 
540
702
  return self;
541
703
  }
@@ -546,7 +708,7 @@ static VALUE Memory_Profiler_Capture_aref(VALUE self, VALUE klass) {
546
708
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
547
709
 
548
710
  st_data_t allocations_data;
549
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
711
+ if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
550
712
  return (VALUE)allocations_data;
551
713
  }
552
714
 
@@ -559,17 +721,20 @@ struct Memory_Profiler_Allocations_Statistics {
559
721
  VALUE per_class_counts;
560
722
  };
561
723
 
562
- // Iterator callback to count states per class
724
+ // Iterator callback to count states per class (from central states table)
563
725
  static int Memory_Profiler_Capture_count_states(st_data_t key, st_data_t value, st_data_t argument) {
564
726
  struct Memory_Profiler_Allocations_Statistics *statistics = (struct Memory_Profiler_Allocations_Statistics *)argument;
565
- VALUE klass = (VALUE)key;
566
- VALUE allocations = (VALUE)value;
567
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
727
+ struct Memory_Profiler_Capture_State *state = (struct Memory_Profiler_Capture_State *)value;
728
+ VALUE klass = state->klass;
729
+
730
+ // Increment total count
731
+ statistics->total_tracked_objects++;
568
732
 
569
- size_t states_count = record->states ? record->states->num_entries : 0;
570
- statistics->total_tracked_objects += states_count;
733
+ // Increment per-class count
734
+ VALUE current_count = rb_hash_lookup(statistics->per_class_counts, klass);
735
+ size_t count = NIL_P(current_count) ? 1 : NUM2SIZET(current_count) + 1;
736
+ rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(count));
571
737
 
572
- rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(states_count));
573
738
  return ST_CONTINUE;
574
739
  }
575
740
 
@@ -582,15 +747,21 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
582
747
  VALUE statistics = rb_hash_new();
583
748
 
584
749
  // Tracked classes count
585
- rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_classes_count")), SIZET2NUM(capture->tracked_classes->num_entries));
750
+ rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_count")), SIZET2NUM(capture->tracked->num_entries));
751
+
752
+ // Total states table size
753
+ size_t states_table_size = capture->states ? capture->states->num_entries : 0;
754
+ rb_hash_aset(statistics, ID2SYM(rb_intern("states_table_size")), SIZET2NUM(states_table_size));
586
755
 
587
- // Count states entries for each tracked class
756
+ // Count states entries for each class (iterate central states table)
588
757
  struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
589
758
  .total_tracked_objects = 0,
590
759
  .per_class_counts = rb_hash_new()
591
760
  };
592
761
 
593
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_count_states, (st_data_t)&allocations_statistics);
762
+ if (capture->states) {
763
+ st_foreach(capture->states, Memory_Profiler_Capture_count_states, (st_data_t)&allocations_statistics);
764
+ }
594
765
 
595
766
  rb_hash_aset(statistics, ID2SYM(rb_intern("total_tracked_objects")), SIZET2NUM(allocations_statistics.total_tracked_objects));
596
767
  rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_objects_per_class")), allocations_statistics.per_class_counts);
@@ -643,6 +814,7 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
643
814
  rb_define_method(Memory_Profiler_Capture, "tracking?", Memory_Profiler_Capture_tracking_p, 1);
644
815
  rb_define_method(Memory_Profiler_Capture, "retained_count_of", Memory_Profiler_Capture_retained_count_of, 1);
645
816
  rb_define_method(Memory_Profiler_Capture, "each", Memory_Profiler_Capture_each, 0);
817
+ rb_define_method(Memory_Profiler_Capture, "each_object", Memory_Profiler_Capture_each_object, -1); // -1 = variable args
646
818
  rb_define_method(Memory_Profiler_Capture, "[]", Memory_Profiler_Capture_aref, 1);
647
819
  rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
648
820
  rb_define_method(Memory_Profiler_Capture, "statistics", Memory_Profiler_Capture_statistics, 0);
@@ -650,6 +822,11 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
650
822
  rb_define_method(Memory_Profiler_Capture, "free_count", Memory_Profiler_Capture_free_count, 0);
651
823
  rb_define_method(Memory_Profiler_Capture, "retained_count", Memory_Profiler_Capture_retained_count, 0);
652
824
 
825
+ // Cache ObjectSpace module for _id2ref calls
826
+ rb_mObjectSpace = rb_const_get(rb_cObject, rb_intern("ObjectSpace"));
827
+ rb_gc_register_mark_object(rb_mObjectSpace);
828
+ id_id2ref = rb_intern("_id2ref");
829
+
653
830
  // Initialize Allocations class
654
831
  Init_Memory_Profiler_Allocations(Memory_Profiler);
655
832
  }
@@ -1,11 +1,10 @@
1
1
  // Released under the MIT License.
2
2
  // Copyright, 2025, by Samuel Williams.
3
3
 
4
- #include "ruby.h"
5
- #include "ruby/debug.h"
6
-
7
4
  #include "events.h"
8
5
  #include "capture.h"
6
+
7
+ #include <ruby/debug.h>
9
8
  #include <stdio.h>
10
9
 
11
10
  enum {
@@ -176,10 +175,10 @@ int Memory_Profiler_Events_enqueue(
176
175
  enum Memory_Profiler_Event_Type type,
177
176
  VALUE capture,
178
177
  VALUE klass,
179
- VALUE object
178
+ VALUE object_id
180
179
  ) {
181
180
  struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
182
-
181
+
183
182
  // Always enqueue to the available queue - it won't be touched during processing:
184
183
  struct Memory_Profiler_Event *event = Memory_Profiler_Queue_push(events->available);
185
184
  if (event) {
@@ -188,7 +187,7 @@ int Memory_Profiler_Events_enqueue(
188
187
  // Use write barriers when storing VALUEs (required for RUBY_TYPED_WB_PROTECTED):
189
188
  RB_OBJ_WRITE(events->self, &event->capture, capture);
190
189
  RB_OBJ_WRITE(events->self, &event->klass, klass);
191
- RB_OBJ_WRITE(events->self, &event->object, object);
190
+ RB_OBJ_WRITE(events->self, &event->object, object_id);
192
191
 
193
192
  if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
194
193
  Memory_Profiler_Event_Type_name(type), events->available->count);