memory-profiler 1.5.1 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/memory/profiler/capture.c +45 -19
- data/ext/memory/profiler/table.c +1 -21
- data/ext/memory/profiler/table.h +1 -11
- data/lib/memory/profiler/sampler.rb +1 -0
- data/lib/memory/profiler/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +1 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 8a65991a6612d28ede2c4ab5652bcf410e7edf92a0f98e8bdeebd921b64232d6
|
|
4
|
+
data.tar.gz: 230a10b0d5ed42f74d19007534dbae1ee49df3c67d0507fcf42a0fc67b1cf5e3
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 7b64a8a6b0a500f84c52689aa5affad466d19e3ddc723db2014af3ef784fe35b7d6704aa2bf363f27c23d8fe75e27bdef5c7e969736a2bedcfc925467dd3e16a
|
|
7
|
+
data.tar.gz: da48ff77a1915aaa978ce162b6c44d310b6b4a77af59b6df18c3e6fc92b19aa40df99473bb461dafd0a160ded3c5721681cce3e22dece393d484c0b2c51e67b4
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
|
@@ -28,6 +28,9 @@ struct Memory_Profiler_Capture {
|
|
|
28
28
|
// Should we queue callbacks? (temporarily disabled during queue processing).
|
|
29
29
|
int paused;
|
|
30
30
|
|
|
31
|
+
// Should we automatically track all classes? (if false, only explicitly tracked classes are tracked).
|
|
32
|
+
int track_all;
|
|
33
|
+
|
|
31
34
|
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
32
35
|
st_table *tracked;
|
|
33
36
|
|
|
@@ -163,21 +166,18 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
|
|
|
163
166
|
// Pause the capture to prevent infinite loop:
|
|
164
167
|
capture->paused += 1;
|
|
165
168
|
|
|
166
|
-
//
|
|
167
|
-
capture->new_count++;
|
|
168
|
-
|
|
169
|
-
// Look up or create allocations record for this class:
|
|
169
|
+
// Look up allocations record for this class:
|
|
170
170
|
st_data_t allocations_data;
|
|
171
171
|
VALUE allocations;
|
|
172
172
|
struct Memory_Profiler_Capture_Allocations *record;
|
|
173
173
|
|
|
174
174
|
if (st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
|
|
175
|
-
// Existing record
|
|
175
|
+
// Existing record - class is explicitly tracked
|
|
176
176
|
allocations = (VALUE)allocations_data;
|
|
177
177
|
record = Memory_Profiler_Allocations_get(allocations);
|
|
178
178
|
record->new_count++;
|
|
179
|
-
} else {
|
|
180
|
-
// First time seeing this class, create record automatically
|
|
179
|
+
} else if (capture->track_all) {
|
|
180
|
+
// First time seeing this class, create record automatically (if track_all is enabled)
|
|
181
181
|
record = ALLOC(struct Memory_Profiler_Capture_Allocations);
|
|
182
182
|
record->callback = Qnil;
|
|
183
183
|
record->new_count = 1;
|
|
@@ -187,8 +187,15 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
|
|
|
187
187
|
st_insert(capture->tracked, (st_data_t)klass, (st_data_t)allocations);
|
|
188
188
|
RB_OBJ_WRITTEN(self, Qnil, klass);
|
|
189
189
|
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
190
|
+
} else {
|
|
191
|
+
// track_all disabled and class not explicitly tracked - skip this allocation entirely
|
|
192
|
+
capture->paused -= 1;
|
|
193
|
+
return;
|
|
190
194
|
}
|
|
191
195
|
|
|
196
|
+
// Increment global new count (only if we're tracking this class):
|
|
197
|
+
capture->new_count++;
|
|
198
|
+
|
|
192
199
|
VALUE data = Qnil;
|
|
193
200
|
if (!NIL_P(record->callback)) {
|
|
194
201
|
data = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
@@ -367,9 +374,10 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
367
374
|
capture->new_count = 0;
|
|
368
375
|
capture->free_count = 0;
|
|
369
376
|
|
|
370
|
-
// Initialize state flags - not running, callbacks disabled
|
|
377
|
+
// Initialize state flags - not running, callbacks disabled, track_all disabled by default
|
|
371
378
|
capture->running = 0;
|
|
372
379
|
capture->paused = 0;
|
|
380
|
+
capture->track_all = 0;
|
|
373
381
|
|
|
374
382
|
// Global event queue system will auto-initialize on first use (lazy initialization)
|
|
375
383
|
|
|
@@ -381,6 +389,24 @@ static VALUE Memory_Profiler_Capture_initialize(VALUE self) {
|
|
|
381
389
|
return self;
|
|
382
390
|
}
|
|
383
391
|
|
|
392
|
+
// Get track_all setting
|
|
393
|
+
static VALUE Memory_Profiler_Capture_track_all_get(VALUE self) {
|
|
394
|
+
struct Memory_Profiler_Capture *capture;
|
|
395
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
396
|
+
|
|
397
|
+
return capture->track_all ? Qtrue : Qfalse;
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
// Set track_all setting
|
|
401
|
+
static VALUE Memory_Profiler_Capture_track_all_set(VALUE self, VALUE value) {
|
|
402
|
+
struct Memory_Profiler_Capture *capture;
|
|
403
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
404
|
+
|
|
405
|
+
capture->track_all = RTEST(value) ? 1 : 0;
|
|
406
|
+
|
|
407
|
+
return value;
|
|
408
|
+
}
|
|
409
|
+
|
|
384
410
|
// Start capturing allocations
|
|
385
411
|
static VALUE Memory_Profiler_Capture_start(VALUE self) {
|
|
386
412
|
struct Memory_Profiler_Capture *capture;
|
|
@@ -572,14 +598,10 @@ struct Memory_Profiler_Each_Object_Arguments {
|
|
|
572
598
|
VALUE allocations;
|
|
573
599
|
};
|
|
574
600
|
|
|
575
|
-
// Cleanup function to
|
|
601
|
+
// Cleanup function to re-enable GC
|
|
576
602
|
static VALUE Memory_Profiler_Capture_each_object_ensure(VALUE arg) {
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
TypedData_Get_Struct(arguments->self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
580
|
-
|
|
581
|
-
// Make table weak again
|
|
582
|
-
Memory_Profiler_Object_Table_decrement_strong(capture->states);
|
|
603
|
+
// Re-enable GC (rb_gc_enable returns previous state, but we don't need it)
|
|
604
|
+
rb_gc_enable();
|
|
583
605
|
|
|
584
606
|
return Qnil;
|
|
585
607
|
}
|
|
@@ -639,10 +661,11 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
|
|
|
639
661
|
|
|
640
662
|
RETURN_ENUMERATOR(self, argc, argv);
|
|
641
663
|
|
|
642
|
-
//
|
|
643
|
-
|
|
664
|
+
// Disable GC to prevent objects from being collected during iteration
|
|
665
|
+
rb_gc_disable();
|
|
644
666
|
|
|
645
|
-
// Process all pending events
|
|
667
|
+
// Process all pending events to clean up stale entries
|
|
668
|
+
// At this point, all remaining objects in the table should be valid
|
|
646
669
|
Memory_Profiler_Events_process_all();
|
|
647
670
|
|
|
648
671
|
// If class provided, look up its allocations wrapper
|
|
@@ -653,7 +676,8 @@ static VALUE Memory_Profiler_Capture_each_object(int argc, VALUE *argv, VALUE se
|
|
|
653
676
|
allocations = (VALUE)allocations_data;
|
|
654
677
|
} else {
|
|
655
678
|
// Class not tracked - nothing to iterate
|
|
656
|
-
|
|
679
|
+
// Re-enable GC before returning
|
|
680
|
+
rb_gc_enable();
|
|
657
681
|
return self;
|
|
658
682
|
}
|
|
659
683
|
}
|
|
@@ -746,6 +770,8 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
|
746
770
|
rb_define_alloc_func(Memory_Profiler_Capture, Memory_Profiler_Capture_alloc);
|
|
747
771
|
|
|
748
772
|
rb_define_method(Memory_Profiler_Capture, "initialize", Memory_Profiler_Capture_initialize, 0);
|
|
773
|
+
rb_define_method(Memory_Profiler_Capture, "track_all", Memory_Profiler_Capture_track_all_get, 0);
|
|
774
|
+
rb_define_method(Memory_Profiler_Capture, "track_all=", Memory_Profiler_Capture_track_all_set, 1);
|
|
749
775
|
rb_define_method(Memory_Profiler_Capture, "start", Memory_Profiler_Capture_start, 0);
|
|
750
776
|
rb_define_method(Memory_Profiler_Capture, "stop", Memory_Profiler_Capture_stop, 0);
|
|
751
777
|
rb_define_method(Memory_Profiler_Capture, "track", Memory_Profiler_Capture_track, -1); // -1 to accept block
|
data/ext/memory/profiler/table.c
CHANGED
|
@@ -37,7 +37,6 @@ struct Memory_Profiler_Object_Table* Memory_Profiler_Object_Table_new(size_t ini
|
|
|
37
37
|
table->capacity = initial_capacity > 0 ? initial_capacity : INITIAL_CAPACITY;
|
|
38
38
|
table->count = 0;
|
|
39
39
|
table->tombstones = 0;
|
|
40
|
-
table->strong = 0; // Start as weak table (strong == 0 means weak)
|
|
41
40
|
|
|
42
41
|
// Use calloc to zero out entries (0 = empty slot)
|
|
43
42
|
table->entries = calloc(table->capacity, sizeof(struct Memory_Profiler_Object_Table_Entry));
|
|
@@ -303,12 +302,7 @@ void Memory_Profiler_Object_Table_mark(struct Memory_Profiler_Object_Table *tabl
|
|
|
303
302
|
struct Memory_Profiler_Object_Table_Entry *entry = &table->entries[i];
|
|
304
303
|
// Skip empty slots and tombstones
|
|
305
304
|
if (entry->object != 0 && entry->object != TOMBSTONE) {
|
|
306
|
-
//
|
|
307
|
-
// When weak (strong == 0), object keys can be GC'd (that's how we detect frees)
|
|
308
|
-
if (table->strong > 0) {
|
|
309
|
-
rb_gc_mark_movable(entry->object);
|
|
310
|
-
}
|
|
311
|
-
|
|
305
|
+
// Don't mark object keys - table is weak (object keys can be GC'd, that's how we detect frees)
|
|
312
306
|
// Always mark the other fields (klass, data) - we own these
|
|
313
307
|
if (entry->klass) rb_gc_mark_movable(entry->klass);
|
|
314
308
|
if (entry->data) rb_gc_mark_movable(entry->data);
|
|
@@ -413,17 +407,3 @@ size_t Memory_Profiler_Object_Table_size(struct Memory_Profiler_Object_Table *ta
|
|
|
413
407
|
return table->count;
|
|
414
408
|
}
|
|
415
409
|
|
|
416
|
-
// Increment strong reference count (makes table strong when > 0)
|
|
417
|
-
void Memory_Profiler_Object_Table_increment_strong(struct Memory_Profiler_Object_Table *table) {
|
|
418
|
-
if (table) {
|
|
419
|
-
table->strong++;
|
|
420
|
-
}
|
|
421
|
-
}
|
|
422
|
-
|
|
423
|
-
// Decrement strong reference count (makes table weak when == 0)
|
|
424
|
-
void Memory_Profiler_Object_Table_decrement_strong(struct Memory_Profiler_Object_Table *table) {
|
|
425
|
-
if (table && table->strong > 0) {
|
|
426
|
-
table->strong--;
|
|
427
|
-
}
|
|
428
|
-
}
|
|
429
|
-
|
data/ext/memory/profiler/table.h
CHANGED
|
@@ -19,10 +19,8 @@ struct Memory_Profiler_Object_Table_Entry {
|
|
|
19
19
|
// Custom object table for tracking allocations during GC.
|
|
20
20
|
// Uses system malloc/free (not ruby_xmalloc) to be safe during GC compaction.
|
|
21
21
|
// Keys are object addresses (updated during compaction).
|
|
22
|
+
// Table is always weak - object keys are not marked, allowing GC to collect them.
|
|
22
23
|
struct Memory_Profiler_Object_Table {
|
|
23
|
-
// Strong reference count: 0 = weak (don't mark keys), >0 = strong (mark keys)
|
|
24
|
-
int strong;
|
|
25
|
-
|
|
26
24
|
size_t capacity; // Total slots
|
|
27
25
|
size_t count; // Used slots (occupied entries)
|
|
28
26
|
size_t tombstones; // Deleted slots (tombstone markers)
|
|
@@ -62,11 +60,3 @@ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *t
|
|
|
62
60
|
// Get current size
|
|
63
61
|
size_t Memory_Profiler_Object_Table_size(struct Memory_Profiler_Object_Table *table);
|
|
64
62
|
|
|
65
|
-
// Increment strong reference count
|
|
66
|
-
// When strong > 0, table is strong and will mark object keys during GC
|
|
67
|
-
void Memory_Profiler_Object_Table_increment_strong(struct Memory_Profiler_Object_Table *table);
|
|
68
|
-
|
|
69
|
-
// Decrement strong reference count
|
|
70
|
-
// When strong == 0, table is weak and will not mark object keys during GC
|
|
71
|
-
void Memory_Profiler_Object_Table_decrement_strong(struct Memory_Profiler_Object_Table *table);
|
|
72
|
-
|
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
metadata.gz.sig
CHANGED
|
Binary file
|