memory-profiler 1.1.1 → 1.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,8 @@
2
2
  // Copyright, 2025, by Samuel Williams.
3
3
 
4
4
  #include "capture.h"
5
+ #include "allocations.h"
6
+ #include "queue.h"
5
7
 
6
8
  #include "ruby.h"
7
9
  #include "ruby/debug.h"
@@ -14,57 +16,47 @@ enum {
14
16
  };
15
17
 
16
18
  static VALUE Memory_Profiler_Capture = Qnil;
17
- static VALUE Memory_Profiler_Allocations = Qnil;
18
19
 
19
20
  // Event symbols
20
21
  static VALUE sym_newobj;
21
22
  static VALUE sym_freeobj;
22
23
 
23
- // Per-class allocation tracking record
24
- struct Memory_Profiler_Capture_Allocations {
25
- VALUE callback; // Optional Ruby proc/lambda to call on allocation
26
- size_t new_count; // Total allocations seen since tracking started
27
- size_t free_count; // Total frees seen since tracking started
28
- // Live count = new_count - free_count
29
-
30
- // For detailed tracking: map object (VALUE) => state (VALUE)
31
- // State is returned from callback on :newobj and passed back on :freeobj
32
- st_table *object_states;
24
+ // Queue item - freed object data to be processed after GC
25
+ struct Memory_Profiler_Queue_Item {
26
+ // The class of the freed object:
27
+ VALUE klass;
28
+
29
+ // The Allocations wrapper:
30
+ VALUE allocations;
31
+
32
+ // The state returned from callback on newobj:
33
+ VALUE state;
33
34
  };
34
35
 
35
36
  // Main capture state
36
37
  struct Memory_Profiler_Capture {
37
- // class => Memory_Profiler_Capture_Allocations.
38
+ // class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
38
39
  st_table *tracked_classes;
39
40
 
40
41
  // Is tracking enabled (via start/stop):
41
42
  int enabled;
43
+
44
+ // Queue for freed objects (processed after GC via postponed job)
45
+ struct Memory_Profiler_Queue freed_queue;
46
+
47
+ // Handle for the postponed job
48
+ rb_postponed_job_handle_t postponed_job_handle;
42
49
  };
43
50
 
44
- // Helper to mark object_states table values
45
- static int Memory_Profiler_Capture_mark_state(st_data_t key, st_data_t value, st_data_t arg) {
46
- // key is VALUE (object) - don't mark it, we're just using it as a key
47
- // value is VALUE (state) - mark it as movable
48
- rb_gc_mark_movable((VALUE)value);
49
- return ST_CONTINUE;
50
- }
51
-
52
51
  // GC mark callback for tracked_classes table
53
- static int Memory_Profiler_Capture_mark_class(st_data_t key, st_data_t value, st_data_t arg) {
54
- VALUE klass = (VALUE)key;
55
-
52
+ static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t value, st_data_t arg) {
56
53
  // Mark class as un-movable as we don't want it moving in freeobj.
54
+ VALUE klass = (VALUE)key;
57
55
  rb_gc_mark(klass);
58
56
 
59
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)value;
60
- if (!NIL_P(record->callback)) {
61
- rb_gc_mark_movable(record->callback); // Mark callback as movable
62
- }
63
-
64
- // Mark object_states table if it exists
65
- if (record->object_states) {
66
- st_foreach(record->object_states, Memory_Profiler_Capture_mark_state, 0);
67
- }
57
+ // Mark the wrapped Allocations VALUE (its own mark function will handle internal refs)
58
+ VALUE allocations = (VALUE)value;
59
+ rb_gc_mark_movable(allocations);
68
60
 
69
61
  return ST_CONTINUE;
70
62
  }
@@ -78,32 +70,32 @@ static void Memory_Profiler_Capture_mark(void *ptr) {
78
70
  }
79
71
 
80
72
  if (capture->tracked_classes) {
81
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_mark_class, 0);
73
+ st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
82
74
  }
83
- }
75
+
76
+ // Mark freed objects in the queue:
77
+ for (size_t i = 0; i < capture->freed_queue.count; i++) {
78
+ struct Memory_Profiler_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freed_queue, i);
79
+ rb_gc_mark_movable(freed->klass);
80
+ rb_gc_mark_movable(freed->allocations);
84
81
 
85
- // Iterator to free each class record
86
- static int Memory_Profiler_Capture_free_class_record(st_data_t key, st_data_t value, st_data_t arg) {
87
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)value;
88
- if (record->object_states) {
89
- st_free_table(record->object_states);
82
+ if (freed->state) {
83
+ rb_gc_mark_movable(freed->state);
84
+ }
90
85
  }
91
- xfree(record);
92
- return ST_CONTINUE;
93
86
  }
94
87
 
95
88
  // GC free function
96
89
  static void Memory_Profiler_Capture_free(void *ptr) {
97
90
  struct Memory_Profiler_Capture *capture = ptr;
98
-
99
- // Event hooks must be removed via stop() before object is freed
100
- // Ruby will automatically remove hooks when the VALUE is GC'd
101
-
91
+
102
92
  if (capture->tracked_classes) {
103
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_free_class_record, 0);
104
93
  st_free_table(capture->tracked_classes);
105
94
  }
106
95
 
96
+ // Free the queue (elements are stored directly, just free the queue)
97
+ Memory_Profiler_Queue_free(&capture->freed_queue);
98
+
107
99
  xfree(capture);
108
100
  }
109
101
 
@@ -116,12 +108,20 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
116
108
  size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
117
109
  }
118
110
 
111
+ // Add size of freed queue (elements stored directly)
112
+ size += capture->freed_queue.capacity * capture->freed_queue.element_size;
113
+
119
114
  return size;
120
115
  }
121
116
 
117
+ // Foreach callback for st_foreach_with_replace (iteration logic)
118
+ static int Memory_Profiler_Capture_tracked_classes_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
119
+ // Return ST_REPLACE to trigger the replace callback for each entry
120
+ return ST_REPLACE;
121
+ }
122
122
 
123
- // Callback for st_foreach_with_replace to update class keys and callback values
124
- static int Memory_Profiler_Capture_update_refs(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
123
+ // Replace callback for st_foreach_with_replace (update logic)
124
+ static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
125
125
  // Update class key if it moved
126
126
  VALUE old_klass = (VALUE)*key;
127
127
  VALUE new_klass = rb_gc_location(old_klass);
@@ -129,14 +129,11 @@ static int Memory_Profiler_Capture_update_refs(st_data_t *key, st_data_t *value,
129
129
  *key = (st_data_t)new_klass;
130
130
  }
131
131
 
132
- // Update callback if it moved
133
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)*value;
134
- if (!NIL_P(record->callback)) {
135
- VALUE old_callback = record->callback;
136
- VALUE new_callback = rb_gc_location(old_callback);
137
- if (old_callback != new_callback) {
138
- record->callback = new_callback;
139
- }
132
+ // Update wrapped Allocations VALUE if it moved (its own compact function handles internal refs)
133
+ VALUE old_allocations = (VALUE)*value;
134
+ VALUE new_allocations = rb_gc_location(old_allocations);
135
+ if (old_allocations != new_allocations) {
136
+ *value = (st_data_t)new_allocations;
140
137
  }
141
138
 
142
139
  return ST_CONTINUE;
@@ -148,10 +145,20 @@ static void Memory_Profiler_Capture_compact(void *ptr) {
148
145
 
149
146
  // Update tracked_classes keys and callback values in-place
150
147
  if (capture->tracked_classes && capture->tracked_classes->num_entries > 0) {
151
- if (st_foreach_with_replace(capture->tracked_classes, NULL, Memory_Profiler_Capture_update_refs, 0)) {
148
+ if (st_foreach_with_replace(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_foreach, Memory_Profiler_Capture_tracked_classes_update, 0)) {
152
149
  rb_raise(rb_eRuntimeError, "tracked_classes modified during GC compaction");
153
150
  }
154
151
  }
152
+
153
+ // Update freed objects in the queue
154
+ for (size_t i = 0; i < capture->freed_queue.count; i++) {
155
+ struct Memory_Profiler_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freed_queue, i);
156
+
157
+ // Update all VALUEs if they moved during compaction
158
+ freed->klass = rb_gc_location(freed->klass);
159
+ freed->allocations = rb_gc_location(freed->allocations);
160
+ freed->state = rb_gc_location(freed->state);
161
+ }
155
162
  }
156
163
 
157
164
  static const rb_data_type_t Memory_Profiler_Capture_type = {
@@ -183,11 +190,42 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
183
190
  }
184
191
  }
185
192
 
193
+ // Postponed job callback - processes queued freed objects
194
+ // This runs after GC completes, when it's safe to call Ruby code
195
+ static void Memory_Profiler_Capture_process_freed_queue(void *arg) {
196
+ VALUE self = (VALUE)arg;
197
+ struct Memory_Profiler_Capture *capture;
198
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
199
+
200
+ if (DEBUG) {
201
+ fprintf(stderr, "Processing freed queue with %zu entries\n", capture->freed_queue.count);
202
+ }
203
+
204
+ // Process all freed objects in the queue
205
+ for (size_t i = 0; i < capture->freed_queue.count; i++) {
206
+ struct Memory_Profiler_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freed_queue, i);
207
+ VALUE klass = freed->klass;
208
+ VALUE allocations = freed->allocations;
209
+ VALUE state = freed->state;
210
+
211
+ struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
212
+
213
+ // Call the Ruby callback with (klass, :freeobj, state)
214
+ if (!NIL_P(record->callback)) {
215
+ rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
216
+ }
217
+ }
218
+
219
+ // Clear the queue (elements are reused on next cycle)
220
+ Memory_Profiler_Queue_clear(&capture->freed_queue);
221
+ }
222
+
186
223
  // Handler for NEWOBJ event
187
- static void Memory_Profiler_Capture_newobj_handler(struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
188
- st_data_t record_data;
189
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
190
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)record_data;
224
+ static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
225
+ st_data_t allocations_data;
226
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
227
+ VALUE allocations = (VALUE)allocations_data;
228
+ struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
191
229
  record->new_count++;
192
230
  if (!NIL_P(record->callback)) {
193
231
  // Invoke callback - runs during NEWOBJ with GC disabled
@@ -207,6 +245,8 @@ static void Memory_Profiler_Capture_newobj_handler(struct Memory_Profiler_Captur
207
245
  record->object_states = st_init_numtable();
208
246
  }
209
247
  st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
248
+ // Notify GC about the state VALUE stored in the table
249
+ RB_OBJ_WRITTEN(self, Qnil, state);
210
250
  }
211
251
  }
212
252
  } else {
@@ -216,23 +256,48 @@ static void Memory_Profiler_Capture_newobj_handler(struct Memory_Profiler_Captur
216
256
  record->new_count = 1; // This is the first allocation
217
257
  record->free_count = 0;
218
258
  record->object_states = NULL;
219
- st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)record);
259
+
260
+ // Wrap the record in a VALUE
261
+ VALUE allocations = Memory_Profiler_Allocations_wrap(record);
262
+
263
+ st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
264
+ // Notify GC about the class VALUE stored as key in the table
265
+ RB_OBJ_WRITTEN(self, Qnil, klass);
266
+ // Notify GC about the allocations VALUE stored as value in the table
267
+ RB_OBJ_WRITTEN(self, Qnil, allocations);
220
268
  }
221
269
  }
222
270
 
223
271
  // Handler for FREEOBJ event
224
- static void Memory_Profiler_Capture_freeobj_handler(struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
225
- st_data_t record_data;
226
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
227
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)record_data;
272
+ // CRITICAL: This runs during GC when no Ruby code can be executed!
273
+ // We MUST NOT call rb_funcall or any Ruby code here - just queue the work.
274
+ static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
275
+ st_data_t allocations_data;
276
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
277
+ VALUE allocations = (VALUE)allocations_data;
278
+ struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
228
279
  record->free_count++;
280
+
281
+ // If we have a callback and detailed tracking, queue the freeobj for later processing
229
282
  if (!NIL_P(record->callback) && record->object_states) {
230
283
  // Look up state stored during NEWOBJ
231
284
  st_data_t state_data;
232
285
  if (st_delete(record->object_states, (st_data_t *)&object, &state_data)) {
233
286
  VALUE state = (VALUE)state_data;
234
- // Call with (klass, :freeobj, state)
235
- rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
287
+
288
+ // Push a new item onto the queue (returns pointer to write to)
289
+ // NOTE: realloc is safe during GC (doesn't trigger Ruby allocation)
290
+ struct Memory_Profiler_Queue_Item *freed = Memory_Profiler_Queue_push(&capture->freed_queue);
291
+ if (freed) {
292
+ // Write directly to the allocated space
293
+ freed->klass = klass;
294
+ freed->allocations = allocations;
295
+ freed->state = state;
296
+
297
+ // Trigger postponed job to process the queue after GC
298
+ rb_postponed_job_trigger(capture->postponed_job_handle);
299
+ }
300
+ // If push failed (out of memory), silently drop this freeobj event
236
301
  }
237
302
  }
238
303
  }
@@ -299,10 +364,10 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
299
364
 
300
365
  if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
301
366
  // self is the newly allocated object
302
- Memory_Profiler_Capture_newobj_handler(capture, klass, object);
367
+ Memory_Profiler_Capture_newobj_handler(data, capture, klass, object);
303
368
  } else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
304
369
  // self is the object being freed
305
- Memory_Profiler_Capture_freeobj_handler(capture, klass, object);
370
+ Memory_Profiler_Capture_freeobj_handler(data, capture, klass, object);
306
371
  }
307
372
  }
308
373
 
@@ -323,6 +388,21 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
323
388
 
324
389
  capture->enabled = 0;
325
390
 
391
+ // Initialize the freed object queue
392
+ Memory_Profiler_Queue_initialize(&capture->freed_queue, sizeof(struct Memory_Profiler_Queue_Item));
393
+
394
+ // Pre-register the postponed job for processing freed objects
395
+ // The job will be triggered whenever we queue freed objects during GC
396
+ capture->postponed_job_handle = rb_postponed_job_preregister(
397
+ 0, // flags
398
+ Memory_Profiler_Capture_process_freed_queue,
399
+ (void *)obj
400
+ );
401
+
402
+ if (capture->postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID) {
403
+ rb_raise(rb_eRuntimeError, "Failed to register postponed job!");
404
+ }
405
+
326
406
  return obj;
327
407
  }
328
408
 
@@ -376,9 +456,10 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
376
456
  VALUE klass, callback;
377
457
  rb_scan_args(argc, argv, "1&", &klass, &callback);
378
458
 
379
- st_data_t record_data;
380
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
381
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)record_data;
459
+ st_data_t allocations_data;
460
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
461
+ VALUE allocations = (VALUE)allocations_data;
462
+ struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
382
463
  RB_OBJ_WRITE(self, &record->callback, callback);
383
464
  } else {
384
465
  struct Memory_Profiler_Capture_Allocations *record = ALLOC(struct Memory_Profiler_Capture_Allocations);
@@ -386,7 +467,15 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
386
467
  record->new_count = 0;
387
468
  record->free_count = 0;
388
469
  record->object_states = NULL;
389
- st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)record);
470
+
471
+ // Wrap the record in a VALUE
472
+ VALUE allocations = Memory_Profiler_Allocations_wrap(record);
473
+
474
+ st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
475
+ // Notify GC about the class VALUE stored as key in the table
476
+ RB_OBJ_WRITTEN(self, Qnil, klass);
477
+ // Notify GC about the allocations VALUE stored as value in the table
478
+ RB_OBJ_WRITTEN(self, Qnil, allocations);
390
479
  // Now inform GC about the callback reference
391
480
  if (!NIL_P(callback)) {
392
481
  RB_OBJ_WRITTEN(self, Qnil, callback);
@@ -401,10 +490,10 @@ static VALUE Memory_Profiler_Capture_untrack(VALUE self, VALUE klass) {
401
490
  struct Memory_Profiler_Capture *capture;
402
491
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
403
492
 
404
- st_data_t record_data;
405
- if (st_delete(capture->tracked_classes, (st_data_t *)&klass, &record_data)) {
406
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)record_data;
407
- xfree(record);
493
+ st_data_t allocations_data;
494
+ if (st_delete(capture->tracked_classes, (st_data_t *)&klass, &allocations_data)) {
495
+ // The wrapped Allocations VALUE will be GC'd naturally
496
+ // No manual cleanup needed
408
497
  }
409
498
 
410
499
  return self;
@@ -424,9 +513,10 @@ static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
424
513
  struct Memory_Profiler_Capture *capture;
425
514
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
426
515
 
427
- st_data_t record_data;
428
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &record_data)) {
429
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)record_data;
516
+ st_data_t allocations_data;
517
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
518
+ VALUE allocations = (VALUE)allocations_data;
519
+ struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
430
520
  // Return net live count (new_count - free_count)
431
521
  // Handle case where more objects freed than allocated (allocated before tracking started)
432
522
  if (record->free_count > record->new_count) {
@@ -439,18 +529,11 @@ static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
439
529
  return INT2FIX(0);
440
530
  }
441
531
 
442
- // Iterator to free and reset each class record
443
- static int Memory_Profiler_Capture_reset_class(st_data_t key, st_data_t value, st_data_t arg) {
444
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)value;
445
- record->new_count = 0; // Reset allocation count
446
- record->free_count = 0; // Reset free count
447
- record->callback = Qnil; // Clear callback
532
+ // Iterator to reset each class record
533
+ static int Memory_Profiler_Capture_tracked_classes_clear(st_data_t key, st_data_t value, st_data_t arg) {
534
+ VALUE allocations = (VALUE)value;
448
535
 
449
- // Clear object states
450
- if (record->object_states) {
451
- st_free_table(record->object_states);
452
- record->object_states = NULL;
453
- }
536
+ Memory_Profiler_Allocations_clear(allocations);
454
537
 
455
538
  return ST_CONTINUE;
456
539
  }
@@ -460,60 +543,8 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
460
543
  struct Memory_Profiler_Capture *capture;
461
544
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
462
545
 
463
- // Reset all counts to 0 (don't free, just reset)
464
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_reset_class, 0);
465
-
466
- return self;
467
- }
468
-
469
- // TypedData for Allocations wrapper - just wraps the record pointer
470
- static const rb_data_type_t Memory_Profiler_Allocations_type = {
471
- "Memory::Profiler::Allocations",
472
- {NULL, NULL, NULL}, // No mark/free needed - record is owned by Capture
473
- 0, 0, 0
474
- };
475
-
476
- // Wrap an allocations record
477
- static VALUE Memory_Profiler_Allocations_wrap(struct Memory_Profiler_Capture_Allocations *record) {
478
- return TypedData_Wrap_Struct(Memory_Profiler_Allocations, &Memory_Profiler_Allocations_type, record);
479
- }
480
-
481
- // Get allocations record from wrapper
482
- static struct Memory_Profiler_Capture_Allocations* Memory_Profiler_Allocations_get(VALUE self) {
483
- struct Memory_Profiler_Capture_Allocations *record;
484
- TypedData_Get_Struct(self, struct Memory_Profiler_Capture_Allocations, &Memory_Profiler_Allocations_type, record);
485
- return record;
486
- }
487
-
488
- // Allocations#new_count
489
- static VALUE Memory_Profiler_Allocations_new_count(VALUE self) {
490
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
491
- return SIZET2NUM(record->new_count);
492
- }
493
-
494
- // Allocations#free_count
495
- static VALUE Memory_Profiler_Allocations_free_count(VALUE self) {
496
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
497
- return SIZET2NUM(record->free_count);
498
- }
499
-
500
- // Allocations#retained_count
501
- static VALUE Memory_Profiler_Allocations_retained_count(VALUE self) {
502
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
503
- // Handle underflow when free_count > new_count
504
- size_t retained = record->free_count > record->new_count ? 0 : record->new_count - record->free_count;
505
- return SIZET2NUM(retained);
506
- }
507
-
508
- // Allocations#track { |klass| ... }
509
- static VALUE Memory_Profiler_Allocations_track(int argc, VALUE *argv, VALUE self) {
510
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
511
-
512
- VALUE callback;
513
- rb_scan_args(argc, argv, "&", &callback);
514
-
515
- // Use write barrier - self (Allocations wrapper) keeps Capture alive, which keeps callback alive
516
- RB_OBJ_WRITE(self, &record->callback, callback);
546
+ // Reset all counts to 0 (don't free, just reset) - pass self for write barriers
547
+ st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_clear, 0);
517
548
 
518
549
  return self;
519
550
  }
@@ -521,10 +552,7 @@ static VALUE Memory_Profiler_Allocations_track(int argc, VALUE *argv, VALUE self
521
552
  // Iterator callback for each
522
553
  static int Memory_Profiler_Capture_each_allocation(st_data_t key, st_data_t value, st_data_t arg) {
523
554
  VALUE klass = (VALUE)key;
524
- struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)value;
525
-
526
- // Wrap the allocations record
527
- VALUE allocations = Memory_Profiler_Allocations_wrap(record);
555
+ VALUE allocations = (VALUE)value; // Already a wrapped VALUE
528
556
 
529
557
  // Yield class and allocations wrapper
530
558
  rb_yield_values(2, klass, allocations);
@@ -565,12 +593,6 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
565
593
  rb_define_method(Memory_Profiler_Capture, "each", Memory_Profiler_Capture_each, 0);
566
594
  rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
567
595
 
568
- // Allocations class - wraps allocation data for a specific class
569
- Memory_Profiler_Allocations = rb_define_class_under(Memory_Profiler, "Allocations", rb_cObject);
570
-
571
- rb_define_method(Memory_Profiler_Allocations, "new_count", Memory_Profiler_Allocations_new_count, 0);
572
- rb_define_method(Memory_Profiler_Allocations, "free_count", Memory_Profiler_Allocations_free_count, 0);
573
- rb_define_method(Memory_Profiler_Allocations, "retained_count", Memory_Profiler_Allocations_retained_count, 0);
574
- rb_define_method(Memory_Profiler_Allocations, "track", Memory_Profiler_Allocations_track, -1); // -1 to accept block
596
+ // Initialize Allocations class
597
+ Init_Memory_Profiler_Allocations(Memory_Profiler);
575
598
  }
576
-
@@ -0,0 +1,122 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ // Provides a simple queue for storing elements directly (not as pointers).
5
+ // Elements are enqueued during GC and batch-processed afterward.
6
+
7
+ #pragma once
8
+
9
+ #include <stdlib.h>
10
+ #include <string.h>
11
+ #include <assert.h>
12
+
13
+ static const size_t MEMORY_PROFILER_QUEUE_DEFAULT_COUNT = 128;
14
+
15
+ struct Memory_Profiler_Queue {
16
+ // The queue storage (elements stored directly, not as pointers):
17
+ void *base;
18
+
19
+ // The allocated capacity (number of elements):
20
+ size_t capacity;
21
+
22
+ // The number of used elements:
23
+ size_t count;
24
+
25
+ // The size of each element in bytes:
26
+ size_t element_size;
27
+ };
28
+
29
+ // Initialize an empty queue
30
+ inline static void Memory_Profiler_Queue_initialize(struct Memory_Profiler_Queue *queue, size_t element_size)
31
+ {
32
+ queue->base = NULL;
33
+ queue->capacity = 0;
34
+ queue->count = 0;
35
+ queue->element_size = element_size;
36
+ }
37
+
38
+ // Free the queue and its contents
39
+ inline static void Memory_Profiler_Queue_free(struct Memory_Profiler_Queue *queue)
40
+ {
41
+ if (queue->base) {
42
+ free(queue->base);
43
+ queue->base = NULL;
44
+ }
45
+
46
+ queue->capacity = 0;
47
+ queue->count = 0;
48
+ }
49
+
50
+ // Resize the queue to have at least the given capacity
51
+ inline static int Memory_Profiler_Queue_resize(struct Memory_Profiler_Queue *queue, size_t required_capacity)
52
+ {
53
+ if (required_capacity <= queue->capacity) {
54
+ // Already big enough:
55
+ return 0;
56
+ }
57
+
58
+ size_t new_capacity = queue->capacity;
59
+
60
+ // If the queue is empty, we need to set the initial size:
61
+ if (new_capacity == 0) {
62
+ new_capacity = MEMORY_PROFILER_QUEUE_DEFAULT_COUNT;
63
+ }
64
+
65
+ // Double until we reach required capacity
66
+ while (new_capacity < required_capacity) {
67
+ // Check for overflow
68
+ if (new_capacity > (SIZE_MAX / (2 * queue->element_size))) {
69
+ return -1; // Would overflow
70
+ }
71
+ new_capacity *= 2;
72
+ }
73
+
74
+ // Check final size doesn't overflow
75
+ if (new_capacity > (SIZE_MAX / queue->element_size)) {
76
+ return -1; // Too large
77
+ }
78
+
79
+ // Reallocate
80
+ void *new_base = realloc(queue->base, new_capacity * queue->element_size);
81
+ if (new_base == NULL) {
82
+ return -1; // Allocation failed
83
+ }
84
+
85
+ queue->base = new_base;
86
+ queue->capacity = new_capacity;
87
+
88
+ return 1; // Success
89
+ }
90
+
91
+ // Push a new element onto the end of the queue, returning pointer to the allocated space
92
+ // WARNING: The returned pointer is only valid until the next push operation
93
+ inline static void* Memory_Profiler_Queue_push(struct Memory_Profiler_Queue *queue)
94
+ {
95
+ // Ensure we have capacity
96
+ size_t new_count = queue->count + 1;
97
+ if (new_count > queue->capacity) {
98
+ if (Memory_Profiler_Queue_resize(queue, new_count) == -1) {
99
+ return NULL;
100
+ }
101
+ }
102
+
103
+ // Calculate pointer to the new element
104
+ void *element = (char*)queue->base + (queue->count * queue->element_size);
105
+ queue->count++;
106
+
107
+ return element;
108
+ }
109
+
110
+ // Clear the queue (reset count to 0, reusing allocated memory)
111
+ inline static void Memory_Profiler_Queue_clear(struct Memory_Profiler_Queue *queue)
112
+ {
113
+ queue->count = 0;
114
+ }
115
+
116
+ // Get element at index (for iteration)
117
+ // WARNING: Do not hold these pointers across push operations
118
+ inline static void* Memory_Profiler_Queue_at(struct Memory_Profiler_Queue *queue, size_t index)
119
+ {
120
+ assert(index < queue->count);
121
+ return (char*)queue->base + (index * queue->element_size);
122
+ }
@@ -7,7 +7,7 @@
7
7
  module Memory
8
8
  # @namespace
9
9
  module Profiler
10
- VERSION = "1.1.1"
10
+ VERSION = "1.1.3"
11
11
  end
12
12
  end
13
13
 
data/readme.md CHANGED
@@ -9,19 +9,23 @@ Efficient memory allocation tracking focused on **retained objects only**. Autom
9
9
  - **Retained Objects Only**: Uses `RUBY_INTERNAL_EVENT_NEWOBJ` and `RUBY_INTERNAL_EVENT_FREEOBJ` to automatically track only objects that survive GC.
10
10
  - **O(1) Live Counts**: Maintains per-class counters updated on alloc/free - no heap enumeration needed\!
11
11
  - **Tree-Based Analysis**: Deduplicates common call paths using an efficient tree structure.
12
- - **Native C Extension**: **Required** - uses Ruby internal events not available in pure Ruby.
13
- - **Configurable Depth**: Control how deep to capture call stacks.
14
12
 
15
13
  ## Usage
16
14
 
17
15
  Please see the [project documentation](https://socketry.github.io/memory-profiler/) for more details.
18
16
 
19
- - [Getting Started](https://socketry.github.io/memory-profiler/guides/getting-started/index) - This guide explains how to use `memory-profiler` to detect and diagnose memory leaks in Ruby applications.
17
+ - [Getting Started](https://socketry.github.io/memory-profiler/guides/getting-started/index) - This guide explains how to use `memory-profiler` to automatically detect and diagnose memory leaks in Ruby applications.
18
+
19
+ - [Rack Integration](https://socketry.github.io/memory-profiler/guides/rack-integration/index) - This guide explains how to integrate `memory-profiler` into Rack applications for automatic memory leak detection.
20
20
 
21
21
  ## Releases
22
22
 
23
23
  Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
24
24
 
25
+ ### v1.1.2
26
+
27
+ - Fix handling of GC compaction (I hope).
28
+
25
29
  ### v0.1.0
26
30
 
27
31
  - Initial implementation.