memory-profiler 1.1.9 → 1.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 85589b15e3229992f9afefb5333efebaa07d7e25da717a46b3db5846e14af63d
4
- data.tar.gz: 82986a841e9964aed258e1d7d94f2af7719f350f517423ca49ffb496237e497c
3
+ metadata.gz: '05249db4d71f31bfdaa1e8eb0814d7c44688aa9b576a6df40e48bb2b0505c802'
4
+ data.tar.gz: 3c39173cf151e64ca8ae17790dc4453f337091f973ba25e5db54f2917e0b3bf1
5
5
  SHA512:
6
- metadata.gz: ed056829d3a27ca9b2cc681107a7254e798338c77bbe539f91332d915be126665a19550c5533db8fe751531f12c66963cdfe692232d3f5d3a5f409dd66e31cab
7
- data.tar.gz: cd44bca86e0db7a75413492a694dad3d202359f6cbe2d386ff71dd08d74adcc13557181565694679d3da1978c58664beff442b54e3a4b2512ef83d16cf96fce8
6
+ metadata.gz: a6aa1342bd86d5dd8dfcdb80497bb50c07937182a60dfb9d296d84e88fbd72f0ebdc4dd6c82fb396a4067b2adc129572f556d8545328a4dd04516bd0ffedf613
7
+ data.tar.gz: 819cb8882b506f4341faa8095469279ec0d053c67392fa1d7d0e6a689b80bf037f196743280e91e4fb70987ea89cfcd492f3f0f39a8a9fcb6bafa7de3c4e4cb1
checksums.yaml.gz.sig CHANGED
Binary file
@@ -10,39 +10,33 @@
10
10
 
11
11
  static VALUE Memory_Profiler_Allocations = Qnil;
12
12
 
13
- // Helper to mark object_states table values
14
- static int Memory_Profiler_Allocations_object_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
15
- // Don't mark the object, we use it as a key but we never use it as an object, and we don't want to retain it.
16
- // VALUE object = (VALUE)key;
17
- // rb_gc_mark_movable(object);
18
-
13
+ // Helper to mark states table (object => state)
14
+ static int Memory_Profiler_Allocations_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
15
+ // Don't mark the object key (weak reference - don't keep objects alive)
16
+ // Mark the state value
19
17
  VALUE state = (VALUE)value;
20
- if (!NIL_P(state)) {
21
- rb_gc_mark_movable(state);
22
- }
18
+ rb_gc_mark_movable(state);
23
19
  return ST_CONTINUE;
24
20
  }
25
21
 
26
22
  // Foreach callback for st_foreach_with_replace (iteration logic)
27
- static int Memory_Profiler_Allocations_object_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
23
+ static int Memory_Profiler_Allocations_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
28
24
  // Return ST_REPLACE to trigger the replace callback for each entry
29
25
  return ST_REPLACE;
30
26
  }
31
27
 
32
- // Replace callback for st_foreach_with_replace to update object_states keys and values during compaction
33
- static int Memory_Profiler_Allocations_object_states_compact(st_data_t *key, st_data_t *value, st_data_t data, int existing) {
34
- VALUE old_object = (VALUE)*key;
28
+ // Replace callback for st_foreach_with_replace to update states during compaction
29
+ static int Memory_Profiler_Allocations_states_compact(st_data_t *key, st_data_t *value, st_data_t data, int existing) {
30
+ // Key is object (VALUE) - we can't update if it moved (would require rehashing/allocation)
35
31
  VALUE old_state = (VALUE)*value;
36
-
37
- VALUE new_object = rb_gc_location(old_object);
38
32
  VALUE new_state = rb_gc_location(old_state);
39
33
 
40
- // Update key if it moved
41
- if (old_object != new_object) {
42
- *key = (st_data_t)new_object;
43
- }
34
+ // NOTE: We can't update object keys if they moved (would require rehashing/allocation).
35
+ // This means lookups may fail after compaction for moved objects.
36
+ // This is acceptable - FREEOBJ will simply not find the state and skip the callback.
37
+ // The state will be cleaned up when Allocations is freed/cleared.
44
38
 
45
- // Update value if it moved
39
+ // Update state value if it moved (this is safe, doesn't rehash)
46
40
  if (old_state != new_state) {
47
41
  *value = (st_data_t)new_state;
48
42
  }
@@ -50,7 +44,6 @@ static int Memory_Profiler_Allocations_object_states_compact(st_data_t *key, st_
50
44
  return ST_CONTINUE;
51
45
  }
52
46
 
53
- // GC mark function for Allocations
54
47
  static void Memory_Profiler_Allocations_mark(void *ptr) {
55
48
  struct Memory_Profiler_Capture_Allocations *record = ptr;
56
49
 
@@ -58,22 +51,19 @@ static void Memory_Profiler_Allocations_mark(void *ptr) {
58
51
  return;
59
52
  }
60
53
 
61
- if (!NIL_P(record->callback)) {
62
- rb_gc_mark_movable(record->callback);
63
- }
54
+ rb_gc_mark_movable(record->callback);
64
55
 
65
- // Mark object_states table if it exists
66
- if (record->object_states) {
67
- st_foreach(record->object_states, Memory_Profiler_Allocations_object_states_mark, 0);
56
+ // Mark states table if it exists
57
+ if (record->states) {
58
+ st_foreach(record->states, Memory_Profiler_Allocations_states_mark, 0);
68
59
  }
69
60
  }
70
61
 
71
- // GC free function for Allocations
72
62
  static void Memory_Profiler_Allocations_free(void *ptr) {
73
63
  struct Memory_Profiler_Capture_Allocations *record = ptr;
74
64
 
75
- if (record->object_states) {
76
- st_free_table(record->object_states);
65
+ if (record->states) {
66
+ st_free_table(record->states);
77
67
  }
78
68
 
79
69
  xfree(record);
@@ -84,14 +74,12 @@ static void Memory_Profiler_Allocations_compact(void *ptr) {
84
74
  struct Memory_Profiler_Capture_Allocations *record = ptr;
85
75
 
86
76
  // Update callback if it moved
87
- if (!NIL_P(record->callback)) {
88
- record->callback = rb_gc_location(record->callback);
89
- }
77
+ record->callback = rb_gc_location(record->callback);
90
78
 
91
- // Update object_states table if it exists
92
- if (record->object_states && record->object_states->num_entries > 0) {
93
- if (st_foreach_with_replace(record->object_states, Memory_Profiler_Allocations_object_states_foreach, Memory_Profiler_Allocations_object_states_compact, 0)) {
94
- rb_raise(rb_eRuntimeError, "object_states modified during GC compaction");
79
+ // Update states table if it exists
80
+ if (record->states && record->states->num_entries > 0) {
81
+ if (st_foreach_with_replace(record->states, Memory_Profiler_Allocations_states_foreach, Memory_Profiler_Allocations_states_compact, 0)) {
82
+ rb_raise(rb_eRuntimeError, "states modified during GC compaction");
95
83
  }
96
84
  }
97
85
  }
@@ -158,10 +146,11 @@ void Memory_Profiler_Allocations_clear(VALUE allocations) {
158
146
  record->free_count = 0; // Reset free count
159
147
  RB_OBJ_WRITE(allocations, &record->callback, Qnil); // Clear callback with write barrier
160
148
 
161
- // Clear object states
162
- if (record->object_states) {
163
- st_free_table(record->object_states);
164
- record->object_states = NULL;
149
+ // Clear states - either clear the table or reinitialize
150
+ if (record->states) {
151
+ st_clear(record->states);
152
+ } else {
153
+ record->states = st_init_numtable();
165
154
  }
166
155
  }
167
156
 
@@ -17,9 +17,9 @@ struct Memory_Profiler_Capture_Allocations {
17
17
  size_t free_count;
18
18
  // Live count = new_count - free_count.
19
19
 
20
- // For detailed tracking: map object (VALUE) => state (VALUE).
21
- // State is returned from callback on `newobj` and passed back on `freeobj`.
22
- st_table *object_states;
20
+ // Map object (VALUE) => state (VALUE).
21
+ // Object keys need compaction updates when they move.
22
+ st_table *states;
23
23
  };
24
24
 
25
25
  // Wrap an allocations record in a VALUE.
@@ -13,7 +13,6 @@
13
13
 
14
14
  enum {
15
15
  DEBUG = 0,
16
- DEBUG_STATE = 0,
17
16
  };
18
17
 
19
18
  static VALUE Memory_Profiler_Capture = Qnil;
@@ -23,19 +22,25 @@ static VALUE sym_newobj, sym_freeobj;
23
22
 
24
23
  // Main capture state (per-instance).
25
24
  struct Memory_Profiler_Capture {
26
- // Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
27
- st_table *tracked_classes;
28
-
29
25
  // Master switch - is tracking active? (set by start/stop).
30
26
  int running;
31
27
 
32
28
  // Should we queue callbacks? (temporarily disabled during queue processing).
33
29
  int enabled;
30
+
31
+ // Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
32
+ st_table *tracked_classes;
33
+
34
+ // Total number of allocations and frees seen since tracking started.
35
+ size_t new_count;
36
+ size_t free_count;
34
37
  };
35
38
 
36
39
  // GC mark callback for tracked_classes table.
37
40
  static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t value, st_data_t arg) {
38
41
  // Mark class as un-movable:
42
+ // - We don't want to re-index the table if the class moves.
43
+ // - We don't want objects in `freeobj` to have invalid class pointers (maybe helps).
39
44
  VALUE klass = (VALUE)key;
40
45
  rb_gc_mark(klass);
41
46
 
@@ -82,13 +87,6 @@ static int Memory_Profiler_Capture_tracked_classes_foreach(st_data_t key, st_dat
82
87
 
83
88
  // Replace callback for st_foreach_with_replace (update logic).
84
89
  static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
85
- // Update class key if it moved:
86
- VALUE old_klass = (VALUE)*key;
87
- VALUE new_klass = rb_gc_location(old_klass);
88
- if (old_klass != new_klass) {
89
- *key = (st_data_t)new_klass;
90
- }
91
-
92
90
  // Update wrapped Allocations VALUE if it moved:
93
91
  VALUE old_allocations = (VALUE)*value;
94
92
  VALUE new_allocations = rb_gc_location(old_allocations);
@@ -139,70 +137,98 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
139
137
  }
140
138
  }
141
139
 
142
- // Process a NEWOBJ event. Handles both callback and non-callback cases, stores appropriate state.
143
- static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE klass, VALUE allocations, VALUE object) {
140
+ // Process a NEWOBJ event. All allocation tracking logic is here.
141
+ static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE klass, VALUE object) {
144
142
  struct Memory_Profiler_Capture *capture;
145
143
  TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
146
144
 
147
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
145
+ // Increment global new count:
146
+ capture->new_count++;
148
147
 
149
- // Ensure object_states table exists:
150
- if (!record->object_states) {
151
- record->object_states = st_init_numtable();
152
- }
148
+ // Look up or create allocations record for this class:
149
+ st_data_t allocations_data;
150
+ VALUE allocations;
151
+ struct Memory_Profiler_Capture_Allocations *record;
153
152
 
154
- VALUE state;
153
+ if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
154
+ // Existing record
155
+ allocations = (VALUE)allocations_data;
156
+ record = Memory_Profiler_Allocations_get(allocations);
157
+ record->new_count++;
158
+ } else {
159
+ // First time seeing this class, create record automatically
160
+ record = ALLOC(struct Memory_Profiler_Capture_Allocations);
161
+ record->callback = Qnil;
162
+ record->new_count = 1;
163
+ record->free_count = 0;
164
+ record->states = st_init_numtable();
165
+
166
+ allocations = Memory_Profiler_Allocations_wrap(record);
167
+ st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
168
+ RB_OBJ_WRITTEN(capture_value, Qnil, klass);
169
+ RB_OBJ_WRITTEN(capture_value, Qnil, allocations);
170
+ }
155
171
 
172
+ // Only store state if there's a callback
156
173
  if (!NIL_P(record->callback)) {
157
- // Temporarily disable queueing to prevent infinite loop:
174
+ // Temporarily disable queueing to prevent infinite loop
158
175
  int was_enabled = capture->enabled;
159
176
  capture->enabled = 0;
160
177
 
161
- state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
178
+ VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
162
179
 
163
180
  capture->enabled = was_enabled;
164
181
 
165
- if (DEBUG_STATE) fprintf(stderr, "Storing callback state for object: %p\n", (void *)object);
182
+ // Store state using object as key (works because object is alive):
183
+ st_insert(record->states, (st_data_t)object, (st_data_t)state);
166
184
  } else {
167
- // No callback, store sentinel (Qnil):
168
- state = Qnil;
169
-
170
- if (DEBUG_STATE) fprintf(stderr, "Storing sentinel (Qnil) for object: %p\n", (void *)object);
185
+ // Store state as nil:
186
+ st_insert(record->states, (st_data_t)object, (st_data_t)Qnil);
171
187
  }
172
-
173
- // Always store something to maintain NEWOBJ/FREEOBJ symmetry:
174
- st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
175
188
  }
176
189
 
177
- // Process a FREEOBJ event. Looks up and deletes state, optionally calls callback.
178
- static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE allocations, VALUE object) {
190
+ // Process a FREEOBJ event. All deallocation tracking logic is here.
191
+ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE klass, VALUE object) {
179
192
  struct Memory_Profiler_Capture *capture;
180
193
  TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
181
194
 
195
+ // Look up allocations record for this class
196
+ st_data_t allocations_data;
197
+ if (!st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
198
+ // The class is not tracked, so we are not tracking this object.
199
+ return;
200
+ }
201
+
202
+ VALUE allocations = (VALUE)allocations_data;
182
203
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
183
204
 
184
- // Try to look up and delete state:
185
- if (record->object_states) {
186
- st_data_t state_data;
187
- if (st_delete(record->object_states, (st_data_t *)&object, &state_data)) {
188
- VALUE state = (VALUE)state_data;
189
-
190
- if (DEBUG_STATE) fprintf(stderr, "Freed tracked object: %p, state=%s\n",
191
- (void *)object, NIL_P(state) ? "Qnil (sentinel)" : "real state");
192
-
193
- // Only call callback if we have both callback AND real state (not Qnil sentinel):
194
- if (!NIL_P(record->callback) && !NIL_P(state)) {
195
- // Temporarily disable queueing to prevent infinite loop:
196
- int was_enabled = capture->enabled;
197
- capture->enabled = 0;
198
-
199
- rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
200
-
201
- capture->enabled = was_enabled;
202
- }
203
- } else {
204
- if (DEBUG_STATE) fprintf(stderr, "Freed pre-existing object: %p (not tracked)\n", (void *)object);
205
- }
205
+ if (!record->states) {
206
+ // There is no state table for this class, so we are not tracking it.
207
+ return;
208
+ }
209
+
210
+ st_data_t state_data;
211
+ if (!st_delete(record->states, (st_data_t *)&object, &state_data)) {
212
+ // There is no state for this object, so we are not tracking it.
213
+ return;
214
+ }
215
+
216
+ VALUE state = (VALUE)state_data;
217
+
218
+ // Increment global free count
219
+ capture->free_count++;
220
+
221
+ // Increment per-class free count
222
+ record->free_count++;
223
+
224
+ if (!NIL_P(record->callback) && !NIL_P(state)) {
225
+ // Temporarily disable queueing to prevent infinite loop
226
+ int was_enabled = capture->enabled;
227
+ capture->enabled = 0;
228
+
229
+ rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
230
+
231
+ capture->enabled = was_enabled;
206
232
  }
207
233
  }
208
234
 
@@ -210,58 +236,16 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE k
210
236
  void Memory_Profiler_Capture_process_event(struct Memory_Profiler_Event *event) {
211
237
  switch (event->type) {
212
238
  case MEMORY_PROFILER_EVENT_TYPE_NEWOBJ:
213
- Memory_Profiler_Capture_process_newobj(event->capture, event->klass, event->allocations, event->object);
239
+ Memory_Profiler_Capture_process_newobj(event->capture, event->klass, event->object);
214
240
  break;
215
241
  case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
216
- Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->allocations, event->object);
242
+ Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->object);
217
243
  break;
218
244
  }
219
245
  }
220
246
 
221
247
  #pragma mark - Event Handlers
222
248
 
223
- // NEWOBJ event handler. Automatically tracks ALL allocations (creates records on demand).
224
- static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
225
- st_data_t allocations_data;
226
- VALUE allocations;
227
-
228
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
229
- // Existing record, increment count:
230
- allocations = (VALUE)allocations_data;
231
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
232
- record->new_count++;
233
- } else {
234
- // First time seeing this class, create record automatically:
235
- struct Memory_Profiler_Capture_Allocations *record = ALLOC(struct Memory_Profiler_Capture_Allocations);
236
- record->callback = Qnil;
237
- record->new_count = 1;
238
- record->free_count = 0;
239
- record->object_states = NULL;
240
-
241
- allocations = Memory_Profiler_Allocations_wrap(record);
242
- st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
243
- RB_OBJ_WRITTEN(self, Qnil, klass);
244
- RB_OBJ_WRITTEN(self, Qnil, allocations);
245
- }
246
-
247
- // Enqueue to global event queue:
248
- Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, allocations, object);
249
- }
250
-
251
- // FREEOBJ event handler. Increments count and enqueues for processing.
252
- static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
253
- st_data_t allocations_data;
254
- if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
255
- VALUE allocations = (VALUE)allocations_data;
256
- struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
257
-
258
- record->free_count++;
259
-
260
- // Enqueue to global event queue:
261
- Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, klass, allocations, object);
262
- }
263
- }
264
-
265
249
  // Check if object type is trackable. Excludes internal types (T_IMEMO, T_NODE, T_ICLASS, etc.) that don't have normal classes.
266
250
  int Memory_Profiler_Capture_trackable_p(VALUE object) {
267
251
  switch (rb_type(object)) {
@@ -305,30 +289,22 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
305
289
  TypedData_Get_Struct(data, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
306
290
 
307
291
  VALUE object = rb_tracearg_object(trace_arg);
308
-
292
+
309
293
  // We don't want to track internal non-Object allocations:
310
294
  if (!Memory_Profiler_Capture_trackable_p(object)) return;
311
-
295
+
312
296
  rb_event_flag_t event_flag = rb_tracearg_event_flag(trace_arg);
313
297
  VALUE klass = rb_class_of(object);
314
298
  if (!klass) return;
315
299
 
316
- if (DEBUG) {
317
- // In events other than NEWOBJ, we are unable to allocate objects (due to GC), so we simply say "ignored":
318
- const char *klass_name = "ignored";
319
- if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
320
- klass_name = rb_class2name(klass);
321
- }
322
-
323
- fprintf(stderr, "Memory_Profiler_Capture_event_callback: %s, Object: %p, Class: %p (%s)\n", event_flag_name(event_flag), (void *)object, (void *)klass, klass_name);
324
- }
325
-
326
300
  if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
327
- // self is the newly allocated object
328
- Memory_Profiler_Capture_newobj_handler(data, capture, klass, object);
301
+ // Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
302
+ if (!capture->enabled) return;
303
+
304
+ Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, data, klass, object);
329
305
  } else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
330
- // self is the object being freed
331
- Memory_Profiler_Capture_freeobj_handler(data, capture, klass, object);
306
+ // Always process FREEOBJ to ensure state cleanup
307
+ Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, data, klass, object);
332
308
  }
333
309
  }
334
310
 
@@ -347,6 +323,10 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
347
323
  rb_raise(rb_eRuntimeError, "Failed to initialize hash table");
348
324
  }
349
325
 
326
+ // Initialize allocation tracking counters
327
+ capture->new_count = 0;
328
+ capture->free_count = 0;
329
+
350
330
  // Initialize state flags - not running, callbacks disabled
351
331
  capture->running = 0;
352
332
  capture->enabled = 0;
@@ -431,7 +411,7 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
431
411
  record->callback = callback; // Initial assignment, no write barrier needed
432
412
  record->new_count = 0;
433
413
  record->free_count = 0;
434
- record->object_states = st_init_numtable();
414
+ record->states = st_init_numtable();
435
415
 
436
416
  // Wrap the record in a VALUE
437
417
  allocations = Memory_Profiler_Allocations_wrap(record);
@@ -481,13 +461,9 @@ static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
481
461
  if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
482
462
  VALUE allocations = (VALUE)allocations_data;
483
463
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
484
- // Return net live count (new_count - free_count)
485
- // Handle case where more objects freed than allocated (allocated before tracking started)
486
- if (record->free_count > record->new_count) {
487
- return INT2FIX(0); // Can't have negative live count
464
+ if (record->free_count <= record->new_count) {
465
+ return SIZET2NUM(record->new_count - record->free_count);
488
466
  }
489
- size_t live_count = record->new_count - record->free_count;
490
- return SIZET2NUM(live_count);
491
467
  }
492
468
 
493
469
  return INT2FIX(0);
@@ -507,9 +483,21 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
507
483
  struct Memory_Profiler_Capture *capture;
508
484
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
509
485
 
510
- // Reset all counts to 0 (don't free, just reset) - pass self for write barriers
486
+ // SAFETY: Don't allow clearing while running - there may be:
487
+ // - Event handlers firing (adding to object_allocations).
488
+ // - Events queued that haven't been processed yet.
489
+ // - Processors trying to access the states table.
490
+ if (capture->running) {
491
+ rb_raise(rb_eRuntimeError, "Cannot clear while capture is running - call stop() first!");
492
+ }
493
+
494
+ // Reset all counts to 0 (don't free, just reset) - pass self for write barriers:
511
495
  st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_clear, 0);
512
496
 
497
+ // Reset allocation tracking counters
498
+ capture->new_count = 0;
499
+ capture->free_count = 0;
500
+
513
501
  return self;
514
502
  }
515
503
 
@@ -555,17 +543,17 @@ struct Memory_Profiler_Allocations_Statistics {
555
543
  VALUE per_class_counts;
556
544
  };
557
545
 
558
- // Iterator callback to count object_states per class
559
- static int Memory_Profiler_Capture_count_object_states(st_data_t key, st_data_t value, st_data_t argument) {
546
+ // Iterator callback to count states per class
547
+ static int Memory_Profiler_Capture_count_states(st_data_t key, st_data_t value, st_data_t argument) {
560
548
  struct Memory_Profiler_Allocations_Statistics *statistics = (struct Memory_Profiler_Allocations_Statistics *)argument;
561
549
  VALUE klass = (VALUE)key;
562
550
  VALUE allocations = (VALUE)value;
563
551
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
564
552
 
565
- size_t object_states_count = record->object_states ? record->object_states->num_entries : 0;
566
- statistics->total_tracked_objects += object_states_count;
553
+ size_t states_count = record->states ? record->states->num_entries : 0;
554
+ statistics->total_tracked_objects += states_count;
567
555
 
568
- rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(object_states_count));
556
+ rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(states_count));
569
557
  return ST_CONTINUE;
570
558
  }
571
559
 
@@ -579,16 +567,14 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
579
567
 
580
568
  // Tracked classes count
581
569
  rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_classes_count")), SIZET2NUM(capture->tracked_classes->num_entries));
582
-
583
- // Note: Global event queue stats are internal to the events module
584
-
585
- // Count object_states entries for each tracked class
570
+
571
+ // Count states entries for each tracked class
586
572
  struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
587
573
  .total_tracked_objects = 0,
588
574
  .per_class_counts = rb_hash_new()
589
575
  };
590
576
 
591
- st_foreach(capture->tracked_classes, Memory_Profiler_Capture_count_object_states, (st_data_t)&allocations_statistics);
577
+ st_foreach(capture->tracked_classes, Memory_Profiler_Capture_count_states, (st_data_t)&allocations_statistics);
592
578
 
593
579
  rb_hash_aset(statistics, ID2SYM(rb_intern("total_tracked_objects")), SIZET2NUM(allocations_statistics.total_tracked_objects));
594
580
  rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_objects_per_class")), allocations_statistics.per_class_counts);
@@ -596,6 +582,32 @@ static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
596
582
  return statistics;
597
583
  }
598
584
 
585
+ // Get total new count across all classes
586
+ static VALUE Memory_Profiler_Capture_new_count(VALUE self) {
587
+ struct Memory_Profiler_Capture *capture;
588
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
589
+
590
+ return SIZET2NUM(capture->new_count);
591
+ }
592
+
593
+ // Get total free count across all classes
594
+ static VALUE Memory_Profiler_Capture_free_count(VALUE self) {
595
+ struct Memory_Profiler_Capture *capture;
596
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
597
+
598
+ return SIZET2NUM(capture->free_count);
599
+ }
600
+
601
+ // Get total retained count (new - free) across all classes
602
+ static VALUE Memory_Profiler_Capture_retained_count(VALUE self) {
603
+ struct Memory_Profiler_Capture *capture;
604
+ TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
605
+
606
+ // Handle underflow if free_count > new_count (shouldn't happen but be safe)
607
+ size_t retained = capture->free_count > capture->new_count ? 0 : capture->new_count - capture->free_count;
608
+ return SIZET2NUM(retained);
609
+ }
610
+
599
611
  void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
600
612
  {
601
613
  // Initialize event symbols
@@ -618,6 +630,9 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
618
630
  rb_define_method(Memory_Profiler_Capture, "[]", Memory_Profiler_Capture_aref, 1);
619
631
  rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
620
632
  rb_define_method(Memory_Profiler_Capture, "statistics", Memory_Profiler_Capture_statistics, 0);
633
+ rb_define_method(Memory_Profiler_Capture, "new_count", Memory_Profiler_Capture_new_count, 0);
634
+ rb_define_method(Memory_Profiler_Capture, "free_count", Memory_Profiler_Capture_free_count, 0);
635
+ rb_define_method(Memory_Profiler_Capture, "retained_count", Memory_Profiler_Capture_retained_count, 0);
621
636
 
622
637
  // Initialize Allocations class
623
638
  Init_Memory_Profiler_Allocations(Memory_Profiler);
@@ -96,13 +96,12 @@ static void Memory_Profiler_Events_mark(void *ptr) {
96
96
  for (size_t i = 0; i < events->queue.count; i++) {
97
97
  struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(&events->queue, i);
98
98
 
99
- // Mark the Capture instance this event belongs to:
99
+ // Mark the Capture instance and class:
100
100
  rb_gc_mark_movable(event->capture);
101
101
  rb_gc_mark_movable(event->klass);
102
- rb_gc_mark_movable(event->allocations);
103
102
 
104
- // For NEWOBJ, mark the object (it's alive).
105
- // For FREEOBJ, DON'T mark (it's being freed - just used as key for lookup).
103
+ // For NEWOBJ: mark the object to keep it alive until processor runs
104
+ // For FREEOBJ: DON'T mark (object is being freed)
106
105
  if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
107
106
  rb_gc_mark_movable(event->object);
108
107
  }
@@ -120,10 +119,9 @@ static void Memory_Profiler_Events_compact(void *ptr) {
120
119
  // Update all VALUEs if they moved during compaction:
121
120
  event->capture = rb_gc_location(event->capture);
122
121
  event->klass = rb_gc_location(event->klass);
123
- event->allocations = rb_gc_location(event->allocations);
124
122
 
125
- // For NEWOBJ, update the object pointer.
126
- // For FREEOBJ, DON'T update (it's being freed, pointer is stale).
123
+ // For NEWOBJ: update object pointer if it moved
124
+ // For FREEOBJ: DON'T update (object is being freed, pointer is stale)
127
125
  if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
128
126
  event->object = rb_gc_location(event->object);
129
127
  }
@@ -147,7 +145,6 @@ int Memory_Profiler_Events_enqueue(
147
145
  enum Memory_Profiler_Event_Type type,
148
146
  VALUE capture,
149
147
  VALUE klass,
150
- VALUE allocations,
151
148
  VALUE object
152
149
  ) {
153
150
  struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
@@ -159,7 +156,6 @@ int Memory_Profiler_Events_enqueue(
159
156
  // Use write barriers when storing VALUEs (required for RUBY_TYPED_WB_PROTECTED):
160
157
  RB_OBJ_WRITE(events->self, &event->capture, capture);
161
158
  RB_OBJ_WRITE(events->self, &event->klass, klass);
162
- RB_OBJ_WRITE(events->self, &event->allocations, allocations);
163
159
  RB_OBJ_WRITE(events->self, &event->object, object);
164
160
 
165
161
  const char *type_name = (type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) ? "NEWOBJ" : "FREEOBJ";
@@ -22,10 +22,7 @@ struct Memory_Profiler_Event {
22
22
  // The class of the object:
23
23
  VALUE klass;
24
24
 
25
- // The Allocations wrapper:
26
- VALUE allocations;
27
-
28
- // The object itself:
25
+ // The object itself (for NEWOBJ and FREEOBJ):
29
26
  VALUE object;
30
27
  };
31
28
 
@@ -39,7 +36,6 @@ int Memory_Profiler_Events_enqueue(
39
36
  enum Memory_Profiler_Event_Type type,
40
37
  VALUE capture,
41
38
  VALUE klass,
42
- VALUE allocations,
43
39
  VALUE object
44
40
  );
45
41
 
@@ -7,7 +7,7 @@
7
7
  module Memory
8
8
  # @namespace
9
9
  module Profiler
10
- VERSION = "1.1.9"
10
+ VERSION = "1.1.10"
11
11
  end
12
12
  end
13
13
 
data/readme.md CHANGED
@@ -22,6 +22,15 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
22
22
 
23
23
  Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
24
24
 
25
+ ### v1.1.10
26
+
27
+ - Added `Capture#new_count` - returns total number of allocations tracked across all classes.
28
+ - Added `Capture#free_count` - returns total number of objects freed across all classes.
29
+ - Added `Capture#retained_count` - returns retained object count (new\_count - free\_count).
30
+ - **Critical:** Fixed GC crash during compaction caused by missing write barriers in event queue.
31
+ - Fixed allocation/deallocation counts being inaccurate when objects are allocated during callbacks or freed after compaction.
32
+ - `Capture#clear` now raises `RuntimeError` if called while capture is running. Call `stop()` before `clear()`.
33
+
25
34
  ### v1.1.9
26
35
 
27
36
  - More write barriers...
data/releases.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # Releases
2
2
 
3
+ ## v1.1.10
4
+
5
+ - Added `Capture#new_count` - returns total number of allocations tracked across all classes.
6
+ - Added `Capture#free_count` - returns total number of objects freed across all classes.
7
+ - Added `Capture#retained_count` - returns retained object count (new\_count - free\_count).
8
+ - **Critical:** Fixed GC crash during compaction caused by missing write barriers in event queue.
9
+ - Fixed allocation/deallocation counts being inaccurate when objects are allocated during callbacks or freed after compaction.
10
+ - `Capture#clear` now raises `RuntimeError` if called while capture is running. Call `stop()` before `clear()`.
11
+
3
12
  ## v1.1.9
4
13
 
5
14
  - More write barriers...
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: memory-profiler
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.9
4
+ version: 1.1.10
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
metadata.gz.sig CHANGED
Binary file