memory-profiler 1.6.1 → 1.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c37b0bd859dce7d49e9bd34e9651f31ba110d94f0547b7a94e1ba03648b1bfd6
4
- data.tar.gz: 584d98ee1122ef8cc0f7d463b7dc17bfb73ea49f1855c9bdc846750063c9f5ee
3
+ metadata.gz: b04c4d645ef5d18343e35171723b10cbfbe76b56d7a5b7555cce748bc125e2b5
4
+ data.tar.gz: 693243af6627cf2ee0b287fffd0687efede4fd3ee567e9c0e4117335361e1e6d
5
5
  SHA512:
6
- metadata.gz: 592a2a9663b43e9bff2ed4782bc3daa7b02ef11a8009927be2b810c4f4b441ef76296dd2cf4edfc6c10c68833c0c9e442bfe950b7e29a2537960455378fe9f73
7
- data.tar.gz: 3c25a8aaa03262f0665dfc5c31325728f721301891c336848d87e71073285eba9bc318bcad122b1835bec5e29346508ffa449636dbb6a82b200c4efd743db315
6
+ metadata.gz: 2e9ec2042c00672b9b3698cfe0f5fc2c3e8635e1ead929074a0b3cc291c06807942f12728d3941f70e4cc9a47e823244cad1ec3b0e07a18079036afb3250941e
7
+ data.tar.gz: 7b6a53f68b17d5121049f0ea307185b8185d9bb858261f64aad14d81a834f4f2f1dc6d04f41473f3af7b116d90578759f8dcac1f8b346d3007e751c1b91e7c0f
checksums.yaml.gz.sig CHANGED
Binary file
@@ -13,6 +13,9 @@
13
13
 
14
14
  enum {
15
15
  DEBUG = 0,
16
+
17
+ // This generates a lot of output:
18
+ DEBUG_EVENT = 0,
16
19
  };
17
20
 
18
21
  static VALUE Memory_Profiler_Capture = Qnil;
@@ -224,10 +227,7 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE u
224
227
  struct Memory_Profiler_Object_Table_Entry *entry = Memory_Profiler_Object_Table_lookup(capture->states, object);
225
228
 
226
229
  if (!entry) {
227
- if (DEBUG) fprintf(stderr, "[FREEOBJ] Object not found in table: %p\n", (void*)object);
228
230
  goto done;
229
- } else {
230
- if (DEBUG) fprintf(stderr, "[FREEOBJ] Object found in table: %p\n", (void*)object);
231
231
  }
232
232
 
233
233
  VALUE klass = entry->klass;
@@ -246,7 +246,7 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE u
246
246
  Memory_Profiler_Object_Table_delete_entry(capture->states, entry);
247
247
 
248
248
  struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
249
-
249
+
250
250
  // Increment global free count
251
251
  capture->free_count++;
252
252
 
@@ -323,13 +323,12 @@ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
323
323
  TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
324
324
 
325
325
  VALUE object = rb_tracearg_object(trace_arg);
326
-
327
- // We don't want to track internal non-Object allocations:
328
- if (!Memory_Profiler_Capture_trackable_p(object)) return;
329
-
330
326
  rb_event_flag_t event_flag = rb_tracearg_event_flag(trace_arg);
331
327
 
332
328
  if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
329
+ // We don't want to track internal non-Object allocations:
330
+ if (!Memory_Profiler_Capture_trackable_p(object)) return;
331
+
333
332
  // Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
334
333
  if (capture->paused) return;
335
334
 
@@ -338,12 +337,10 @@ static void Memory_Profiler_Capture_event_callback(VALUE self, void *ptr) {
338
337
  // Skip if klass is not a Class
339
338
  if (rb_type(klass) != RUBY_T_CLASS) return;
340
339
 
341
- // Enqueue actual object (not object_id) - queue retains it until processed
342
- // Ruby 3.5 compatible: no need for FL_SEEN_OBJ_ID or rb_obj_id
343
- if (DEBUG) fprintf(stderr, "[NEWOBJ] Enqueuing event for object: %p\n", (void*)object);
340
+ if (DEBUG_EVENT) fprintf(stderr, "[NEWOBJ] Enqueuing event for object: %p\n", (void*)object);
344
341
  Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, self, klass, object);
345
342
  } else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
346
- if (DEBUG) fprintf(stderr, "[FREEOBJ] Enqueuing event for object: %p\n", (void*)object);
343
+ if (DEBUG_EVENT) fprintf(stderr, "[FREEOBJ] Enqueuing event for object: %p\n", (void*)object);
347
344
  Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, self, Qnil, object);
348
345
  }
349
346
  }
@@ -596,12 +593,19 @@ struct Memory_Profiler_Each_Object_Arguments {
596
593
 
597
594
  // The allocations wrapper to filter by (Qnil = no filter).
598
595
  VALUE allocations;
596
+
597
+ // Previous GC state (to restore in ensure handler)
598
+ int gc_was_enabled;
599
599
  };
600
600
 
601
- // Cleanup function to re-enable GC
601
+ // Cleanup function to restore GC state
602
602
  static VALUE Memory_Profiler_Capture_each_object_ensure(VALUE arg) {
603
- // Re-enable GC (rb_gc_enable returns previous state, but we don't need it)
604
- rb_gc_enable();
603
+ struct Memory_Profiler_Each_Object_Arguments *arguments = (struct Memory_Profiler_Each_Object_Arguments *)arg;
604
+
605
+ // Restore GC state only if it was enabled before
606
+ if (arguments->gc_was_enabled) {
607
+ rb_gc_enable();
608
+ }
605
609
 
606
610
  return Qnil;
607
611
  }
@@ -619,8 +623,8 @@ static VALUE Memory_Profiler_Capture_each_object_body(VALUE arg) {
619
623
  for (size_t i = 0; i < capture->states->capacity; i++) {
620
624
  struct Memory_Profiler_Object_Table_Entry *entry = &capture->states->entries[i];
621
625
 
622
- // Skip empty or deleted slots (0 = not set)
623
- if (entry->object == 0) {
626
+ // Skip empty or deleted slots (0 = not set, Qnil = deleted)
627
+ if (entry->object == 0 || entry->object == Qnil) {
624
628
  continue;
625
629
  }
626
630
 
@@ -661,13 +665,6 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
661
665
 
662
666
  RETURN_ENUMERATOR(self, argc, argv);
663
667
 
664
- // Disable GC to prevent objects from being collected during iteration
665
- rb_gc_disable();
666
-
667
- // Process all pending events to clean up stale entries
668
- // At this point, all remaining objects in the table should be valid
669
- Memory_Profiler_Events_process_all();
670
-
671
668
  // If class provided, look up its allocations wrapper
672
669
  VALUE allocations = Qnil;
673
670
  if (!NIL_P(klass)) {
@@ -675,17 +672,24 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
675
672
  if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
676
673
  allocations = (VALUE)allocations_data;
677
674
  } else {
678
- // Class not tracked - nothing to iterate
679
- // Re-enable GC before returning
680
- rb_gc_enable();
675
+ // Class not tracked - nothing to iterate:
681
676
  return self;
682
677
  }
683
678
  }
679
+
680
+ // Disable GC to prevent objects from being collected during iteration
681
+ // rb_gc_disable returns the previous state (non-zero = was enabled, 0 = was disabled)
682
+ int gc_was_enabled = RTEST(rb_gc_disable());
683
+
684
+ // Process all pending events to clean up stale entries:
685
+ Memory_Profiler_Events_process_all();
686
+ // At this point, all remaining objects in the table should be valid.
684
687
 
685
688
  // Setup arguments for iteration
686
689
  struct Memory_Profiler_Each_Object_Arguments arguments = {
687
690
  .self = self,
688
- .allocations = allocations
691
+ .allocations = allocations,
692
+ .gc_was_enabled = gc_was_enabled
689
693
  };
690
694
 
691
695
  // Use rb_ensure to guarantee cleanup even if exception is raised
@@ -19,6 +19,9 @@ struct Memory_Profiler_Events {
19
19
  // Double-buffered event queues (contains events from all Capture instances).
20
20
  struct Memory_Profiler_Queue queues[2];
21
21
  struct Memory_Profiler_Queue *available, *processing;
22
+
23
+ // Guard flag to prevent recursive processing (0 = not processing, 1 = processing)
24
+ int processing_flag;
22
25
 
23
26
  // Postponed job handle for processing the queue.
24
27
  // Postponed job handles are an extremely limited resource, so we only register one global event queue.
@@ -59,6 +62,7 @@ static VALUE Memory_Profiler_Events_new(void) {
59
62
  // Start with queues[0] available for incoming events, queues[1] for processing (initially empty):
60
63
  events->available = &events->queues[0];
61
64
  events->processing = &events->queues[1];
65
+ events->processing_flag = 0;
62
66
 
63
67
  // Pre-register the single postponed job for processing the queue:
64
68
  events->postponed_job_handle = rb_postponed_job_preregister(0,
@@ -86,8 +90,6 @@ struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void) {
86
90
  // Pin the global events object so it's never GC'd:
87
91
  rb_gc_register_mark_object(instance);
88
92
 
89
- if (DEBUG) fprintf(stderr, "Global event queue system initialized and pinned\n");
90
-
91
93
  TypedData_Get_Struct(instance, struct Memory_Profiler_Events, &Memory_Profiler_Events_type, events);
92
94
  }
93
95
 
@@ -195,14 +197,16 @@ int Memory_Profiler_Events_enqueue(
195
197
  RB_OBJ_WRITE(events->self, &event->klass, klass);
196
198
  RB_OBJ_WRITE(events->self, &event->object, object);
197
199
 
198
- if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
199
- Memory_Profiler_Event_Type_name(type), events->available->count);
200
+ if (DEBUG) {
201
+ fprintf(stderr, "[EVENTS] Enqueued %s: object=%p available_count=%zu processing_flag=%d\n",
202
+ Memory_Profiler_Event_Type_name(type), (void*)object, events->available->count, events->processing_flag);
203
+ }
200
204
 
201
205
  rb_postponed_job_trigger(events->postponed_job_handle);
202
206
  // Success:
203
207
  return 1;
204
208
  }
205
-
209
+
206
210
  // Queue full:
207
211
  return 0;
208
212
  }
@@ -211,6 +215,12 @@ int Memory_Profiler_Events_enqueue(
211
215
  // Public API function - called from Capture stop() to ensure all events are processed.
212
216
  void Memory_Profiler_Events_process_all(void) {
213
217
  struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
218
+
219
+ // Explicitly prevent re-entrancy here:
220
+ if (events->processing_flag) {
221
+ rb_raise(rb_eRuntimeError, "Recursive call detected!");
222
+ }
223
+
214
224
  Memory_Profiler_Events_process_queue((void *)events);
215
225
  }
216
226
 
@@ -228,17 +238,39 @@ static VALUE Memory_Profiler_Events_process_event_protected(VALUE arg) {
228
238
  static void Memory_Profiler_Events_process_queue(void *arg) {
229
239
  struct Memory_Profiler_Events *events = (struct Memory_Profiler_Events *)arg;
230
240
 
241
+ // Check for recursive call - this would break double buffering!
242
+ if (events->processing_flag) {
243
+ // Explicitly allow re-entrancy here, as the postponed job could be triggered during `process_all`.
244
+ return;
245
+ }
246
+
247
+ // Set processing flag to prevent recursion
248
+ events->processing_flag = 1;
249
+
250
+ if (DEBUG) {
251
+ fprintf(stderr, "[EVENTS] process_queue START: available_count=%zu processing_count=%zu\n",
252
+ events->available->count, events->processing->count);
253
+ }
254
+
231
255
  // Swap the queues: available becomes processing, and the old processing queue (now empty) becomes available. This allows new events to continue enqueueing to the new available queue while we process.
232
256
  struct Memory_Profiler_Queue *queue_to_process = events->available;
233
257
  events->available = events->processing;
234
258
  events->processing = queue_to_process;
235
259
 
236
- if (DEBUG) fprintf(stderr, "Processing event queue: %zu events\n", events->processing->count);
260
+ if (DEBUG) {
261
+ fprintf(stderr, "[EVENTS] Queues swapped: processing_count=%zu (was available), available_count=%zu (was processing)\n",
262
+ events->processing->count, events->available->count);
263
+ }
237
264
 
238
265
  // Process all events in order (maintains NEWOBJ before FREEOBJ for same object):
239
266
  for (size_t i = 0; i < events->processing->count; i++) {
240
267
  struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(events->processing, i);
241
268
 
269
+ if (DEBUG) {
270
+ fprintf(stderr, "[EVENTS] Processing event[%zu]: type=%s object=%p capture=%p\n",
271
+ i, Memory_Profiler_Event_Type_name(event->type), (void*)event->object, (void*)event->capture);
272
+ }
273
+
242
274
  // Process event with rb_protect to catch any exceptions:
243
275
  int state = 0;
244
276
  rb_protect(Memory_Profiler_Events_process_event_protected, (VALUE)event, &state);
@@ -249,6 +281,11 @@ static void Memory_Profiler_Events_process_queue(void *arg) {
249
281
  rb_set_errinfo(Qnil);
250
282
  }
251
283
 
284
+ if (DEBUG) {
285
+ fprintf(stderr, "[EVENTS] Processed event[%zu]: type=%s object=%p (exception=%d)\n",
286
+ i, Memory_Profiler_Event_Type_name(event->type), (void*)event->object, state);
287
+ }
288
+
252
289
  // Clear this event after processing to prevent marking stale data if GC runs:
253
290
  event->type = MEMORY_PROFILER_EVENT_TYPE_NONE;
254
291
  RB_OBJ_WRITE(events->self, &event->capture, Qnil);
@@ -256,6 +293,16 @@ static void Memory_Profiler_Events_process_queue(void *arg) {
256
293
  RB_OBJ_WRITE(events->self, &event->object, Qnil);
257
294
  }
258
295
 
296
+ // Save count before clearing for logging
297
+ size_t processed_count = events->processing->count;
298
+
259
299
  // Clear the processing queue (which is now empty logically):
260
300
  Memory_Profiler_Queue_clear(events->processing);
301
+
302
+ // Clear processing flag
303
+ events->processing_flag = 0;
304
+
305
+ if (DEBUG) {
306
+ fprintf(stderr, "[EVENTS] process_queue END: processed %zu events\n", processed_count);
307
+ }
261
308
  }
@@ -365,7 +365,7 @@ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *t
365
365
  table->count = 0;
366
366
  table->tombstones = 0; // Compaction clears tombstones
367
367
 
368
- // Reinsert all entries with new hash values
368
+ // Reinsert all entries with new hash values:
369
369
  for (size_t i = 0; i < temp_count; i++) {
370
370
  int found;
371
371
  size_t index = find_entry(table->entries, table->capacity, temp_entries[i].object, &found, NULL, "compact");
@@ -7,7 +7,7 @@
7
7
  module Memory
8
8
  # @namespace
9
9
  module Profiler
10
- VERSION = "1.6.1"
10
+ VERSION = "1.6.2"
11
11
  end
12
12
  end
13
13
 
data/readme.md CHANGED
@@ -22,6 +22,11 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
22
22
 
23
23
  Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
24
24
 
25
+ ### v1.6.2
26
+
27
+ - Ensure all `FREEOBJ` events are enqueued.
28
+ - Allow limited re-entrancy in queue processing.
29
+
25
30
  ### v1.6.1
26
31
 
27
32
  - Add `track_all` option to `Sampler`.
@@ -74,11 +79,6 @@ Please see the [project releases](https://socketry.github.io/memory-profiler/rel
74
79
 
75
80
  - Ignore `freeobj` events for objects with anonymous classes that are not tracked (and thus become `T_NONE`).
76
81
 
77
- ### v1.1.13
78
-
79
- - Fix sampler loop interval handling.
80
- - Log capture statistics from sampler run loop.
81
-
82
82
  ## Contributing
83
83
 
84
84
  We welcome contributions to this project.
data/releases.md CHANGED
@@ -1,5 +1,10 @@
1
1
  # Releases
2
2
 
3
+ ## v1.6.2
4
+
5
+ - Ensure all `FREEOBJ` events are enqueued.
6
+ - Allow limited re-entrancy in queue processing.
7
+
3
8
  ## v1.6.1
4
9
 
5
10
  - Add `track_all` option to `Sampler`.
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: memory-profiler
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.6.1
4
+ version: 1.6.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
metadata.gz.sig CHANGED
Binary file