memory-profiler 1.1.5 → 1.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/memory/profiler/capture.c +90 -20
- data/lib/memory/profiler/call_tree.rb +75 -1
- data/lib/memory/profiler/sampler.rb +61 -4
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +10 -0
- data/releases.md +10 -0
- data.tar.gz.sig +0 -0
- metadata +1 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 8bb750e881686089136fbcd20b073e857dc3a7dc7f4784f0a75cf8fbb17444ec
|
|
4
|
+
data.tar.gz: 75d398699455359a70dafbbd1c71ce8dd529faf492f8fd7f595cc9714101061f
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 83261c47a3365c107239cd626b61afe496e51d17e04ee36ac81235d7493997089fd97648965af42747ff1b3f8074f1fe72df31a1ecf6ce774fe9ac1ae2cf0565
|
|
7
|
+
data.tar.gz: 5bf3be79ccabbc16f1d753c94a3af7d89affabfc34d4feb9fd001f0573cf516614363f7430dfbe3553166076c349ffe38cee7e39309f9f6eecef867ba4978d78
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
|
@@ -52,7 +52,10 @@ struct Memory_Profiler_Capture {
|
|
|
52
52
|
// class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
53
53
|
st_table *tracked_classes;
|
|
54
54
|
|
|
55
|
-
//
|
|
55
|
+
// Master switch - is tracking active? (set by start/stop)
|
|
56
|
+
int running;
|
|
57
|
+
|
|
58
|
+
// Internal - should we queue callbacks? (temporarily disabled during queue processing)
|
|
56
59
|
int enabled;
|
|
57
60
|
|
|
58
61
|
// Queue for new objects (processed via postponed job):
|
|
@@ -300,20 +303,23 @@ static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Pro
|
|
|
300
303
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
301
304
|
VALUE allocations = (VALUE)allocations_data;
|
|
302
305
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
306
|
+
|
|
307
|
+
// Always track counts (even during queue processing)
|
|
303
308
|
record->new_count++;
|
|
304
309
|
|
|
305
|
-
//
|
|
306
|
-
if (!NIL_P(record->callback)) {
|
|
310
|
+
// Only queue for callback if tracking is enabled (prevents infinite recursion)
|
|
311
|
+
if (capture->enabled && !NIL_P(record->callback)) {
|
|
307
312
|
// Push a new item onto the queue (returns pointer to write to)
|
|
308
313
|
// NOTE: realloc is safe during allocation (doesn't trigger Ruby allocation)
|
|
309
314
|
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_push(&capture->newobj_queue);
|
|
310
315
|
if (newobj) {
|
|
311
316
|
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued newobj, queue size now: %zu/%zu\n",
|
|
312
317
|
capture->newobj_queue.count, capture->newobj_queue.capacity);
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
newobj->
|
|
316
|
-
newobj->
|
|
318
|
+
|
|
319
|
+
// Write VALUEs with write barriers (combines write + GC notification)
|
|
320
|
+
RB_OBJ_WRITE(self, &newobj->klass, klass);
|
|
321
|
+
RB_OBJ_WRITE(self, &newobj->allocations, allocations);
|
|
322
|
+
RB_OBJ_WRITE(self, &newobj->object, object);
|
|
317
323
|
|
|
318
324
|
// Trigger postponed job to process the queue
|
|
319
325
|
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues\n");
|
|
@@ -350,10 +356,13 @@ static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Pr
|
|
|
350
356
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
351
357
|
VALUE allocations = (VALUE)allocations_data;
|
|
352
358
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
359
|
+
|
|
360
|
+
// Always track counts (even during queue processing)
|
|
353
361
|
record->free_count++;
|
|
354
362
|
|
|
355
|
-
//
|
|
356
|
-
|
|
363
|
+
// Only queue for callback if tracking is enabled and we have state
|
|
364
|
+
// Note: If NEWOBJ didn't queue (enabled=0), there's no state, so this naturally skips
|
|
365
|
+
if (capture->enabled && !NIL_P(record->callback) && record->object_states) {
|
|
357
366
|
if (DEBUG_STATE) fprintf(stderr, "Memory_Profiler_Capture_freeobj_handler: Looking up state for object: %p\n", (void *)object);
|
|
358
367
|
|
|
359
368
|
// Look up state stored during NEWOBJ
|
|
@@ -364,14 +373,16 @@ static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Pr
|
|
|
364
373
|
|
|
365
374
|
// Push a new item onto the queue (returns pointer to write to)
|
|
366
375
|
// NOTE: realloc is safe during GC (doesn't trigger Ruby allocation)
|
|
367
|
-
struct Memory_Profiler_Freeobj_Queue_Item *
|
|
368
|
-
if (
|
|
376
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freeobj = Memory_Profiler_Queue_push(&capture->freeobj_queue);
|
|
377
|
+
if (freeobj) {
|
|
369
378
|
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued freed object, queue size now: %zu/%zu\n",
|
|
370
379
|
capture->freeobj_queue.count, capture->freeobj_queue.capacity);
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
380
|
+
|
|
381
|
+
// Write VALUEs with write barriers (combines write + GC notification)
|
|
382
|
+
// Note: We're during GC/FREEOBJ, but write barriers should be safe
|
|
383
|
+
RB_OBJ_WRITE(self, &freeobj->klass, klass);
|
|
384
|
+
RB_OBJ_WRITE(self, &freeobj->allocations, allocations);
|
|
385
|
+
RB_OBJ_WRITE(self, &freeobj->state, state);
|
|
375
386
|
|
|
376
387
|
// Trigger postponed job to process both queues after GC
|
|
377
388
|
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues after GC\n");
|
|
@@ -428,8 +439,6 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
|
|
|
428
439
|
struct Memory_Profiler_Capture *capture;
|
|
429
440
|
TypedData_Get_Struct(data, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
430
441
|
|
|
431
|
-
if (!capture->enabled) return;
|
|
432
|
-
|
|
433
442
|
VALUE object = rb_tracearg_object(trace_arg);
|
|
434
443
|
|
|
435
444
|
// We don't want to track internal non-Object allocations:
|
|
@@ -473,6 +482,8 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
473
482
|
rb_raise(rb_eRuntimeError, "Failed to initialize hash table");
|
|
474
483
|
}
|
|
475
484
|
|
|
485
|
+
// Initialize state flags - not running, callbacks disabled
|
|
486
|
+
capture->running = 0;
|
|
476
487
|
capture->enabled = 0;
|
|
477
488
|
|
|
478
489
|
// Initialize both queues
|
|
@@ -504,7 +515,7 @@ static VALUE Memory_Profiler_Capture_start(VALUE self) {
|
|
|
504
515
|
struct Memory_Profiler_Capture *capture;
|
|
505
516
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
506
517
|
|
|
507
|
-
if (capture->
|
|
518
|
+
if (capture->running) return Qfalse;
|
|
508
519
|
|
|
509
520
|
// Add event hook for NEWOBJ and FREEOBJ with RAW_ARG to get trace_arg
|
|
510
521
|
rb_add_event_hook2(
|
|
@@ -514,6 +525,8 @@ static VALUE Memory_Profiler_Capture_start(VALUE self) {
|
|
|
514
525
|
RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG
|
|
515
526
|
);
|
|
516
527
|
|
|
528
|
+
// Set both flags - we're now running and callbacks are enabled
|
|
529
|
+
capture->running = 1;
|
|
517
530
|
capture->enabled = 1;
|
|
518
531
|
|
|
519
532
|
return Qtrue;
|
|
@@ -524,11 +537,16 @@ static VALUE Memory_Profiler_Capture_stop(VALUE self) {
|
|
|
524
537
|
struct Memory_Profiler_Capture *capture;
|
|
525
538
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
526
539
|
|
|
527
|
-
if (!capture->
|
|
540
|
+
if (!capture->running) return Qfalse;
|
|
528
541
|
|
|
529
|
-
// Remove event hook using same data (self) we registered with
|
|
542
|
+
// Remove event hook using same data (self) we registered with. No more events will be queued after this point:
|
|
530
543
|
rb_remove_event_hook_with_data((rb_event_hook_func_t)Memory_Profiler_Capture_event_callback, self);
|
|
531
544
|
|
|
545
|
+
// Flush any pending queued events before stopping. This ensures all callbacks are invoked and object_states is properly maintained.
|
|
546
|
+
Memory_Profiler_Capture_process_queues((void *)self);
|
|
547
|
+
|
|
548
|
+
// Clear both flags - we're no longer running and callbacks are disabled
|
|
549
|
+
capture->running = 0;
|
|
532
550
|
capture->enabled = 0;
|
|
533
551
|
|
|
534
552
|
return Qtrue;
|
|
@@ -676,6 +694,57 @@ static VALUE Memory_Profiler_Capture_aref(VALUE self, VALUE klass) {
|
|
|
676
694
|
return Qnil;
|
|
677
695
|
}
|
|
678
696
|
|
|
697
|
+
// Struct to accumulate statistics during iteration
|
|
698
|
+
struct Memory_Profiler_Allocations_Statistics {
|
|
699
|
+
size_t total_tracked_objects;
|
|
700
|
+
VALUE per_class_counts;
|
|
701
|
+
};
|
|
702
|
+
|
|
703
|
+
// Iterator callback to count object_states per class
|
|
704
|
+
static int Memory_Profiler_Capture_count_object_states(st_data_t key, st_data_t value, st_data_t argument) {
|
|
705
|
+
struct Memory_Profiler_Allocations_Statistics *statistics = (struct Memory_Profiler_Allocations_Statistics *)argument;
|
|
706
|
+
VALUE klass = (VALUE)key;
|
|
707
|
+
VALUE allocations = (VALUE)value;
|
|
708
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
709
|
+
|
|
710
|
+
size_t object_states_count = record->object_states ? record->object_states->num_entries : 0;
|
|
711
|
+
statistics->total_tracked_objects += object_states_count;
|
|
712
|
+
|
|
713
|
+
rb_hash_aset(statistics->per_class_counts, klass, SIZET2NUM(object_states_count));
|
|
714
|
+
return ST_CONTINUE;
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
// Get internal statistics for debugging
|
|
718
|
+
// Returns hash with internal state sizes
|
|
719
|
+
static VALUE Memory_Profiler_Capture_statistics(VALUE self) {
|
|
720
|
+
struct Memory_Profiler_Capture *capture;
|
|
721
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
722
|
+
|
|
723
|
+
VALUE statistics = rb_hash_new();
|
|
724
|
+
|
|
725
|
+
// Tracked classes count
|
|
726
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_classes_count")), SIZET2NUM(capture->tracked_classes->num_entries));
|
|
727
|
+
|
|
728
|
+
// Queue sizes
|
|
729
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("newobj_queue_size")), SIZET2NUM(capture->newobj_queue.count));
|
|
730
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("newobj_queue_capacity")), SIZET2NUM(capture->newobj_queue.capacity));
|
|
731
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("freeobj_queue_size")), SIZET2NUM(capture->freeobj_queue.count));
|
|
732
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("freeobj_queue_capacity")), SIZET2NUM(capture->freeobj_queue.capacity));
|
|
733
|
+
|
|
734
|
+
// Count object_states entries for each tracked class
|
|
735
|
+
struct Memory_Profiler_Allocations_Statistics allocations_statistics = {
|
|
736
|
+
.total_tracked_objects = 0,
|
|
737
|
+
.per_class_counts = rb_hash_new()
|
|
738
|
+
};
|
|
739
|
+
|
|
740
|
+
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_count_object_states, (st_data_t)&allocations_statistics);
|
|
741
|
+
|
|
742
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("total_tracked_objects")), SIZET2NUM(allocations_statistics.total_tracked_objects));
|
|
743
|
+
rb_hash_aset(statistics, ID2SYM(rb_intern("tracked_objects_per_class")), allocations_statistics.per_class_counts);
|
|
744
|
+
|
|
745
|
+
return statistics;
|
|
746
|
+
}
|
|
747
|
+
|
|
679
748
|
void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
680
749
|
{
|
|
681
750
|
// Initialize event symbols
|
|
@@ -697,6 +766,7 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
|
697
766
|
rb_define_method(Memory_Profiler_Capture, "each", Memory_Profiler_Capture_each, 0);
|
|
698
767
|
rb_define_method(Memory_Profiler_Capture, "[]", Memory_Profiler_Capture_aref, 1);
|
|
699
768
|
rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
|
|
769
|
+
rb_define_method(Memory_Profiler_Capture, "statistics", Memory_Profiler_Capture_statistics, 0);
|
|
700
770
|
|
|
701
771
|
// Initialize Allocations class
|
|
702
772
|
Init_Memory_Profiler_Allocations(Memory_Profiler);
|
|
@@ -52,6 +52,63 @@ module Memory
|
|
|
52
52
|
end
|
|
53
53
|
end
|
|
54
54
|
|
|
55
|
+
# Prune this node's children, keeping only the top N by retained count.
|
|
56
|
+
# Prunes current level first, then recursively prunes retained children (top-down).
|
|
57
|
+
#
|
|
58
|
+
# @parameter limit [Integer] Number of children to keep.
|
|
59
|
+
# @returns [Integer] Total number of nodes pruned (discarded).
|
|
60
|
+
def prune!(limit)
|
|
61
|
+
return 0 if @children.nil?
|
|
62
|
+
|
|
63
|
+
pruned_count = 0
|
|
64
|
+
|
|
65
|
+
# Prune at this level first - keep only top N children by retained count
|
|
66
|
+
if @children.size > limit
|
|
67
|
+
sorted = @children.sort_by do |_location, child|
|
|
68
|
+
-child.retained_count # Sort descending
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
# Detach and count discarded subtrees before we discard them:
|
|
72
|
+
discarded = sorted.drop(limit)
|
|
73
|
+
discarded.each do |_location, child|
|
|
74
|
+
# detach! breaks references to aid GC and returns node count
|
|
75
|
+
pruned_count += child.detach!
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
@children = sorted.first(limit).to_h
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
# Now recursively prune the retained children (avoid pruning nodes we just discarded)
|
|
82
|
+
@children.each_value {|child| pruned_count += child.prune!(limit)}
|
|
83
|
+
|
|
84
|
+
# Clean up if we ended up with no children
|
|
85
|
+
@children = nil if @children.empty?
|
|
86
|
+
|
|
87
|
+
pruned_count
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
# Detach this node from the tree, breaking parent/child relationships.
|
|
91
|
+
# This helps GC collect pruned nodes that might be retained in object_states.
|
|
92
|
+
#
|
|
93
|
+
# Recursively detaches all descendants and returns total nodes detached.
|
|
94
|
+
#
|
|
95
|
+
# @returns [Integer] Number of nodes detached (including self).
|
|
96
|
+
def detach!
|
|
97
|
+
count = 1 # Self
|
|
98
|
+
|
|
99
|
+
# Recursively detach all children first and sum their counts
|
|
100
|
+
if @children
|
|
101
|
+
@children.each_value {|child| count += child.detach!}
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
# Break all references
|
|
105
|
+
@parent = nil
|
|
106
|
+
@children = nil
|
|
107
|
+
@location = nil
|
|
108
|
+
|
|
109
|
+
return count
|
|
110
|
+
end
|
|
111
|
+
|
|
55
112
|
# Check if this node is a leaf (end of a call path).
|
|
56
113
|
#
|
|
57
114
|
# @returns [Boolean] True if this node has no children.
|
|
@@ -95,8 +152,12 @@ module Memory
|
|
|
95
152
|
# Create a new call tree for tracking allocation paths.
|
|
96
153
|
def initialize
|
|
97
154
|
@root = Node.new
|
|
155
|
+
@insertion_count = 0
|
|
98
156
|
end
|
|
99
157
|
|
|
158
|
+
# @attribute [Integer] Number of insertions (allocations) recorded in this tree.
|
|
159
|
+
attr_accessor :insertion_count
|
|
160
|
+
|
|
100
161
|
# Record an allocation with the given caller locations.
|
|
101
162
|
#
|
|
102
163
|
# @parameter caller_locations [Array<Thread::Backtrace::Location>] The call stack.
|
|
@@ -114,6 +175,9 @@ module Memory
|
|
|
114
175
|
# Increment counts for entire path (from leaf back to root):
|
|
115
176
|
current.increment_path!
|
|
116
177
|
|
|
178
|
+
# Track total insertions
|
|
179
|
+
@insertion_count += 1
|
|
180
|
+
|
|
117
181
|
# Return leaf node for object tracking:
|
|
118
182
|
current
|
|
119
183
|
end
|
|
@@ -122,7 +186,7 @@ module Memory
|
|
|
122
186
|
#
|
|
123
187
|
# @parameter limit [Integer] Maximum number of paths to return.
|
|
124
188
|
# @parameter by [Symbol] Sort by :total or :retained count.
|
|
125
|
-
# @returns [Array
|
|
189
|
+
# @returns [Array(Array)] Array of [locations, total_count, retained_count].
|
|
126
190
|
def top_paths(limit = 10, by: :retained)
|
|
127
191
|
paths = []
|
|
128
192
|
|
|
@@ -169,6 +233,16 @@ module Memory
|
|
|
169
233
|
# Clear all tracking data
|
|
170
234
|
def clear!
|
|
171
235
|
@root = Node.new
|
|
236
|
+
@insertion_count = 0
|
|
237
|
+
end
|
|
238
|
+
|
|
239
|
+
# Prune the tree to keep only the top N children at each level.
|
|
240
|
+
# This controls memory usage by removing low-retained branches.
|
|
241
|
+
#
|
|
242
|
+
# @parameter limit [Integer] Number of children to keep per node (default: 5).
|
|
243
|
+
# @returns [Integer] Total number of nodes pruned (discarded).
|
|
244
|
+
def prune!(limit = 5)
|
|
245
|
+
@root.prune!(limit)
|
|
172
246
|
end
|
|
173
247
|
|
|
174
248
|
private
|
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
# Released under the MIT License.
|
|
4
4
|
# Copyright, 2025, by Samuel Williams.
|
|
5
5
|
|
|
6
|
+
require "console"
|
|
7
|
+
|
|
6
8
|
require_relative "capture"
|
|
7
9
|
require_relative "call_tree"
|
|
8
10
|
|
|
@@ -83,21 +85,47 @@ module Memory
|
|
|
83
85
|
end
|
|
84
86
|
end
|
|
85
87
|
|
|
86
|
-
attr_reader :depth
|
|
87
|
-
|
|
88
88
|
# Create a new memory sampler.
|
|
89
89
|
#
|
|
90
90
|
# @parameter depth [Integer] Number of stack frames to capture for call path analysis.
|
|
91
91
|
# @parameter filter [Proc] Optional filter to exclude frames from call paths.
|
|
92
92
|
# @parameter increases_threshold [Integer] Number of increases before enabling detailed tracking.
|
|
93
|
-
|
|
93
|
+
# @parameter prune_limit [Integer] Keep only top N children per node during pruning (default: 5).
|
|
94
|
+
# @parameter prune_threshold [Integer] Number of insertions before auto-pruning (nil = no auto-pruning).
|
|
95
|
+
def initialize(depth: 4, filter: nil, increases_threshold: 10, prune_limit: 5, prune_threshold: nil)
|
|
94
96
|
@depth = depth
|
|
95
97
|
@filter = filter || default_filter
|
|
96
98
|
@increases_threshold = increases_threshold
|
|
99
|
+
@prune_limit = prune_limit
|
|
100
|
+
@prune_threshold = prune_threshold
|
|
97
101
|
@capture = Capture.new
|
|
98
102
|
@call_trees = {}
|
|
99
103
|
@samples = {}
|
|
100
104
|
end
|
|
105
|
+
|
|
106
|
+
# @attribute [Integer] The depth of the call tree.
|
|
107
|
+
attr :depth
|
|
108
|
+
|
|
109
|
+
# @attribute [Proc] The filter to exclude frames from call paths.
|
|
110
|
+
attr :filter
|
|
111
|
+
|
|
112
|
+
# @attribute [Integer] The number of increases before enabling detailed tracking.
|
|
113
|
+
attr :increases_threshold
|
|
114
|
+
|
|
115
|
+
# @attribute [Integer] The number of insertions before auto-pruning (nil = no auto-pruning).
|
|
116
|
+
attr :prune_limit
|
|
117
|
+
|
|
118
|
+
# @attribute [Integer | Nil] The number of insertions before auto-pruning (nil = no auto-pruning).
|
|
119
|
+
attr :prune_threshold
|
|
120
|
+
|
|
121
|
+
# @attribute [Capture] The capture object.
|
|
122
|
+
attr :capture
|
|
123
|
+
|
|
124
|
+
# @attribute [Hash] The call trees.
|
|
125
|
+
attr :call_trees
|
|
126
|
+
|
|
127
|
+
# @attribute [Hash] The samples for each class being tracked.
|
|
128
|
+
attr :samples
|
|
101
129
|
|
|
102
130
|
# Start capturing allocations.
|
|
103
131
|
def start
|
|
@@ -148,6 +176,9 @@ module Memory
|
|
|
148
176
|
yield sample if block_given?
|
|
149
177
|
end
|
|
150
178
|
end
|
|
179
|
+
|
|
180
|
+
# Prune call trees to control memory usage
|
|
181
|
+
prune_call_trees!
|
|
151
182
|
end
|
|
152
183
|
|
|
153
184
|
# Start tracking with call path analysis.
|
|
@@ -256,11 +287,37 @@ module Memory
|
|
|
256
287
|
@call_trees.clear
|
|
257
288
|
end
|
|
258
289
|
|
|
259
|
-
|
|
290
|
+
private
|
|
260
291
|
|
|
261
292
|
def default_filter
|
|
262
293
|
->(location) {!location.path.match?(%r{/(gems|ruby)/|\A\(eval\)})}
|
|
263
294
|
end
|
|
295
|
+
|
|
296
|
+
def prune_call_trees!
|
|
297
|
+
return if @prune_threshold.nil?
|
|
298
|
+
|
|
299
|
+
@call_trees.each do |klass, tree|
|
|
300
|
+
# Only prune if insertions exceed threshold:
|
|
301
|
+
insertions = tree.insertion_count
|
|
302
|
+
next if insertions < @prune_threshold
|
|
303
|
+
|
|
304
|
+
# Prune the tree
|
|
305
|
+
pruned_count = tree.prune!(@prune_limit)
|
|
306
|
+
|
|
307
|
+
# Reset insertion counter after pruning
|
|
308
|
+
tree.insertion_count = 0
|
|
309
|
+
|
|
310
|
+
# Log pruning activity for visibility
|
|
311
|
+
if pruned_count > 0 && defined?(Console)
|
|
312
|
+
Console.debug(klass, "Pruned call tree:",
|
|
313
|
+
pruned_nodes: pruned_count,
|
|
314
|
+
insertions_since_last_prune: insertions,
|
|
315
|
+
total: tree.total_allocations,
|
|
316
|
+
retained: tree.retained_allocations
|
|
317
|
+
)
|
|
318
|
+
end
|
|
319
|
+
end
|
|
320
|
+
end
|
|
264
321
|
end
|
|
265
322
|
end
|
|
266
323
|
end
|
data/readme.md
CHANGED
|
@@ -22,6 +22,16 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.7
|
|
26
|
+
|
|
27
|
+
- Expose `Capture#statistics` for debugging internal memory tracking state.
|
|
28
|
+
|
|
29
|
+
### v1.1.6
|
|
30
|
+
|
|
31
|
+
- Write barriers all the things.
|
|
32
|
+
- Better state handling and object increment/decrement counting.
|
|
33
|
+
- Better call tree handling - including support for `prune!`.
|
|
34
|
+
|
|
25
35
|
### v1.1.5
|
|
26
36
|
|
|
27
37
|
- Use queue for `newobj` too to avoid invoking user code during object allocation.
|
data/releases.md
CHANGED
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
# Releases
|
|
2
2
|
|
|
3
|
+
## v1.1.7
|
|
4
|
+
|
|
5
|
+
- Expose `Capture#statistics` for debugging internal memory tracking state.
|
|
6
|
+
|
|
7
|
+
## v1.1.6
|
|
8
|
+
|
|
9
|
+
- Write barriers all the things.
|
|
10
|
+
- Better state handling and object increment/decrement counting.
|
|
11
|
+
- Better call tree handling - including support for `prune!`.
|
|
12
|
+
|
|
3
13
|
## v1.1.5
|
|
4
14
|
|
|
5
15
|
- Use queue for `newobj` too to avoid invoking user code during object allocation.
|
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
metadata.gz.sig
CHANGED
|
Binary file
|