memory-profiler 1.1.1 → 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +1 -1
- data/ext/memory/profiler/allocations.c +179 -0
- data/ext/memory/profiler/allocations.h +31 -0
- data/ext/memory/profiler/capture.c +75 -160
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +4 -0
- data/releases.md +4 -0
- data.tar.gz.sig +0 -0
- metadata +3 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 06ea6c4892f9d17ac8fa69ba374d2a6a94ab8b809be840b26cb544b18b529edc
|
|
4
|
+
data.tar.gz: a1c283a60721f988cac65878551eba4472a4dd889649d42b1409ba9a7b8b6756
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 22a34ac3cd4ad0c0eead78deef38cd79f20f6c96338f0af5eff191a3d2f2d99ea7dc44a40993838ead2678cb52a257fbed2548e08248e5e389277801c5895cae
|
|
7
|
+
data.tar.gz: 49d267900c65e5481b6d784766e17721ba9919b984dcaf84737022622f4bbb9379773a0c98166a5d2b767192c787dd9fd13a8c43e98db3abd8d3dcb86aec74c5
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
data/ext/extconf.rb
CHANGED
|
@@ -16,7 +16,7 @@ if ENV.key?("RUBY_DEBUG")
|
|
|
16
16
|
append_cflags(["-DRUBY_DEBUG", "-O0"])
|
|
17
17
|
end
|
|
18
18
|
|
|
19
|
-
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c"]
|
|
19
|
+
$srcs = ["memory/profiler/profiler.c", "memory/profiler/capture.c", "memory/profiler/allocations.c"]
|
|
20
20
|
$VPATH << "$(srcdir)/memory/profiler"
|
|
21
21
|
|
|
22
22
|
# Check for required headers
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
// Released under the MIT License.
|
|
2
|
+
// Copyright, 2025, by Samuel Williams.
|
|
3
|
+
|
|
4
|
+
#include "allocations.h"
|
|
5
|
+
|
|
6
|
+
#include "ruby.h"
|
|
7
|
+
#include "ruby/debug.h"
|
|
8
|
+
#include "ruby/st.h"
|
|
9
|
+
#include <stdio.h>
|
|
10
|
+
|
|
11
|
+
static VALUE Memory_Profiler_Allocations = Qnil;
|
|
12
|
+
|
|
13
|
+
// Helper to mark object_states table values
|
|
14
|
+
static int Memory_Profiler_Allocations_object_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
15
|
+
VALUE object = (VALUE)key;
|
|
16
|
+
rb_gc_mark_movable(object);
|
|
17
|
+
|
|
18
|
+
VALUE state = (VALUE)value;
|
|
19
|
+
if (!NIL_P(state)) {
|
|
20
|
+
rb_gc_mark_movable(state);
|
|
21
|
+
}
|
|
22
|
+
return ST_CONTINUE;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
// Foreach callback for st_foreach_with_replace (iteration logic)
|
|
26
|
+
static int Memory_Profiler_Allocations_object_states_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
|
|
27
|
+
// Return ST_REPLACE to trigger the replace callback for each entry
|
|
28
|
+
return ST_REPLACE;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Replace callback for st_foreach_with_replace to update object_states keys and values during compaction
|
|
32
|
+
static int Memory_Profiler_Allocations_object_states_compact(st_data_t *key, st_data_t *value, st_data_t data, int existing) {
|
|
33
|
+
VALUE old_object = (VALUE)*key;
|
|
34
|
+
VALUE old_state = (VALUE)*value;
|
|
35
|
+
|
|
36
|
+
VALUE new_object = rb_gc_location(old_object);
|
|
37
|
+
VALUE new_state = rb_gc_location(old_state);
|
|
38
|
+
|
|
39
|
+
// Update key if it moved
|
|
40
|
+
if (old_object != new_object) {
|
|
41
|
+
*key = (st_data_t)new_object;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Update value if it moved
|
|
45
|
+
if (old_state != new_state) {
|
|
46
|
+
*value = (st_data_t)new_state;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
return ST_CONTINUE;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// GC mark function for Allocations
|
|
53
|
+
static void Memory_Profiler_Allocations_mark(void *ptr) {
|
|
54
|
+
struct Memory_Profiler_Capture_Allocations *record = ptr;
|
|
55
|
+
|
|
56
|
+
if (!record) {
|
|
57
|
+
return;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
if (!NIL_P(record->callback)) {
|
|
61
|
+
rb_gc_mark_movable(record->callback);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Mark object_states table if it exists
|
|
65
|
+
if (record->object_states) {
|
|
66
|
+
st_foreach(record->object_states, Memory_Profiler_Allocations_object_states_mark, 0);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// GC free function for Allocations
|
|
71
|
+
static void Memory_Profiler_Allocations_free(void *ptr) {
|
|
72
|
+
struct Memory_Profiler_Capture_Allocations *record = ptr;
|
|
73
|
+
|
|
74
|
+
if (record->object_states) {
|
|
75
|
+
st_free_table(record->object_states);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
xfree(record);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// GC compact function for Allocations
|
|
82
|
+
static void Memory_Profiler_Allocations_compact(void *ptr) {
|
|
83
|
+
struct Memory_Profiler_Capture_Allocations *record = ptr;
|
|
84
|
+
|
|
85
|
+
// Update callback if it moved
|
|
86
|
+
if (!NIL_P(record->callback)) {
|
|
87
|
+
record->callback = rb_gc_location(record->callback);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Update object_states table if it exists
|
|
91
|
+
if (record->object_states && record->object_states->num_entries > 0) {
|
|
92
|
+
if (st_foreach_with_replace(record->object_states, Memory_Profiler_Allocations_object_states_foreach, Memory_Profiler_Allocations_object_states_compact, 0)) {
|
|
93
|
+
rb_raise(rb_eRuntimeError, "object_states modified during GC compaction");
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
static const rb_data_type_t Memory_Profiler_Allocations_type = {
|
|
99
|
+
"Memory::Profiler::Allocations",
|
|
100
|
+
{
|
|
101
|
+
.dmark = Memory_Profiler_Allocations_mark,
|
|
102
|
+
.dcompact = Memory_Profiler_Allocations_compact,
|
|
103
|
+
.dfree = Memory_Profiler_Allocations_free,
|
|
104
|
+
},
|
|
105
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
// Wrap an allocations record
|
|
109
|
+
VALUE Memory_Profiler_Allocations_wrap(struct Memory_Profiler_Capture_Allocations *record) {
|
|
110
|
+
return TypedData_Wrap_Struct(Memory_Profiler_Allocations, &Memory_Profiler_Allocations_type, record);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Get allocations record from wrapper
|
|
114
|
+
struct Memory_Profiler_Capture_Allocations* Memory_Profiler_Allocations_get(VALUE self) {
|
|
115
|
+
struct Memory_Profiler_Capture_Allocations *record;
|
|
116
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture_Allocations, &Memory_Profiler_Allocations_type, record);
|
|
117
|
+
return record;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Allocations#new_count
|
|
121
|
+
static VALUE Memory_Profiler_Allocations_new_count(VALUE self) {
|
|
122
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
123
|
+
return SIZET2NUM(record->new_count);
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Allocations#free_count
|
|
127
|
+
static VALUE Memory_Profiler_Allocations_free_count(VALUE self) {
|
|
128
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
129
|
+
return SIZET2NUM(record->free_count);
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// Allocations#retained_count
|
|
133
|
+
static VALUE Memory_Profiler_Allocations_retained_count(VALUE self) {
|
|
134
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
135
|
+
// Handle underflow when free_count > new_count
|
|
136
|
+
size_t retained = record->free_count > record->new_count ? 0 : record->new_count - record->free_count;
|
|
137
|
+
return SIZET2NUM(retained);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// Allocations#track { |klass| ... }
|
|
141
|
+
static VALUE Memory_Profiler_Allocations_track(int argc, VALUE *argv, VALUE self) {
|
|
142
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
143
|
+
|
|
144
|
+
VALUE callback;
|
|
145
|
+
rb_scan_args(argc, argv, "&", &callback);
|
|
146
|
+
|
|
147
|
+
// Use write barrier - self (Allocations wrapper) keeps Capture alive, which keeps callback alive
|
|
148
|
+
RB_OBJ_WRITE(self, &record->callback, callback);
|
|
149
|
+
|
|
150
|
+
return self;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// Clear/reset allocation counts and state for a record
|
|
154
|
+
void Memory_Profiler_Allocations_clear(VALUE allocations) {
|
|
155
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
156
|
+
record->new_count = 0; // Reset allocation count
|
|
157
|
+
record->free_count = 0; // Reset free count
|
|
158
|
+
RB_OBJ_WRITE(allocations, &record->callback, Qnil); // Clear callback with write barrier
|
|
159
|
+
|
|
160
|
+
// Clear object states
|
|
161
|
+
if (record->object_states) {
|
|
162
|
+
st_free_table(record->object_states);
|
|
163
|
+
record->object_states = NULL;
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
void Init_Memory_Profiler_Allocations(VALUE Memory_Profiler)
|
|
168
|
+
{
|
|
169
|
+
// Allocations class - wraps allocation data for a specific class
|
|
170
|
+
Memory_Profiler_Allocations = rb_define_class_under(Memory_Profiler, "Allocations", rb_cObject);
|
|
171
|
+
|
|
172
|
+
// Allocations objects are only created internally via wrap, never from Ruby:
|
|
173
|
+
rb_undef_alloc_func(Memory_Profiler_Allocations);
|
|
174
|
+
|
|
175
|
+
rb_define_method(Memory_Profiler_Allocations, "new_count", Memory_Profiler_Allocations_new_count, 0);
|
|
176
|
+
rb_define_method(Memory_Profiler_Allocations, "free_count", Memory_Profiler_Allocations_free_count, 0);
|
|
177
|
+
rb_define_method(Memory_Profiler_Allocations, "retained_count", Memory_Profiler_Allocations_retained_count, 0);
|
|
178
|
+
rb_define_method(Memory_Profiler_Allocations, "track", Memory_Profiler_Allocations_track, -1); // -1 to accept block
|
|
179
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
// Released under the MIT License.
|
|
2
|
+
// Copyright, 2025, by Samuel Williams.
|
|
3
|
+
|
|
4
|
+
#pragma once
|
|
5
|
+
|
|
6
|
+
#include "ruby.h"
|
|
7
|
+
#include "ruby/st.h"
|
|
8
|
+
|
|
9
|
+
// Per-class allocation tracking record
|
|
10
|
+
struct Memory_Profiler_Capture_Allocations {
|
|
11
|
+
VALUE callback; // Optional Ruby proc/lambda to call on allocation
|
|
12
|
+
size_t new_count; // Total allocations seen since tracking started
|
|
13
|
+
size_t free_count; // Total frees seen since tracking started
|
|
14
|
+
// Live count = new_count - free_count
|
|
15
|
+
|
|
16
|
+
// For detailed tracking: map object (VALUE) => state (VALUE)
|
|
17
|
+
// State is returned from callback on :newobj and passed back on :freeobj
|
|
18
|
+
st_table *object_states;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
// Wrap an allocations record in a VALUE
|
|
22
|
+
VALUE Memory_Profiler_Allocations_wrap(struct Memory_Profiler_Capture_Allocations *record);
|
|
23
|
+
|
|
24
|
+
// Get allocations record from wrapper VALUE
|
|
25
|
+
struct Memory_Profiler_Capture_Allocations* Memory_Profiler_Allocations_get(VALUE self);
|
|
26
|
+
|
|
27
|
+
// Clear/reset allocation counts and state for a record
|
|
28
|
+
void Memory_Profiler_Allocations_clear(VALUE allocations);
|
|
29
|
+
|
|
30
|
+
// Initialize the Allocations class
|
|
31
|
+
void Init_Memory_Profiler_Allocations(VALUE Memory_Profiler);
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
// Copyright, 2025, by Samuel Williams.
|
|
3
3
|
|
|
4
4
|
#include "capture.h"
|
|
5
|
+
#include "allocations.h"
|
|
5
6
|
|
|
6
7
|
#include "ruby.h"
|
|
7
8
|
#include "ruby/debug.h"
|
|
@@ -14,57 +15,29 @@ enum {
|
|
|
14
15
|
};
|
|
15
16
|
|
|
16
17
|
static VALUE Memory_Profiler_Capture = Qnil;
|
|
17
|
-
static VALUE Memory_Profiler_Allocations = Qnil;
|
|
18
18
|
|
|
19
19
|
// Event symbols
|
|
20
20
|
static VALUE sym_newobj;
|
|
21
21
|
static VALUE sym_freeobj;
|
|
22
22
|
|
|
23
|
-
// Per-class allocation tracking record
|
|
24
|
-
struct Memory_Profiler_Capture_Allocations {
|
|
25
|
-
VALUE callback; // Optional Ruby proc/lambda to call on allocation
|
|
26
|
-
size_t new_count; // Total allocations seen since tracking started
|
|
27
|
-
size_t free_count; // Total frees seen since tracking started
|
|
28
|
-
// Live count = new_count - free_count
|
|
29
|
-
|
|
30
|
-
// For detailed tracking: map object (VALUE) => state (VALUE)
|
|
31
|
-
// State is returned from callback on :newobj and passed back on :freeobj
|
|
32
|
-
st_table *object_states;
|
|
33
|
-
};
|
|
34
|
-
|
|
35
23
|
// Main capture state
|
|
36
24
|
struct Memory_Profiler_Capture {
|
|
37
|
-
// class => Memory_Profiler_Capture_Allocations.
|
|
25
|
+
// class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
38
26
|
st_table *tracked_classes;
|
|
39
27
|
|
|
40
28
|
// Is tracking enabled (via start/stop):
|
|
41
29
|
int enabled;
|
|
42
30
|
};
|
|
43
31
|
|
|
44
|
-
// Helper to mark object_states table values
|
|
45
|
-
static int Memory_Profiler_Capture_mark_state(st_data_t key, st_data_t value, st_data_t arg) {
|
|
46
|
-
// key is VALUE (object) - don't mark it, we're just using it as a key
|
|
47
|
-
// value is VALUE (state) - mark it as movable
|
|
48
|
-
rb_gc_mark_movable((VALUE)value);
|
|
49
|
-
return ST_CONTINUE;
|
|
50
|
-
}
|
|
51
|
-
|
|
52
32
|
// GC mark callback for tracked_classes table
|
|
53
|
-
static int
|
|
54
|
-
VALUE klass = (VALUE)key;
|
|
55
|
-
|
|
33
|
+
static int Memory_Profiler_Capture_tracked_classes_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
56
34
|
// Mark class as un-movable as we don't want it moving in freeobj.
|
|
35
|
+
VALUE klass = (VALUE)key;
|
|
57
36
|
rb_gc_mark(klass);
|
|
58
37
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
// Mark object_states table if it exists
|
|
65
|
-
if (record->object_states) {
|
|
66
|
-
st_foreach(record->object_states, Memory_Profiler_Capture_mark_state, 0);
|
|
67
|
-
}
|
|
38
|
+
// Mark the wrapped Allocations VALUE (its own mark function will handle internal refs)
|
|
39
|
+
VALUE allocations = (VALUE)value;
|
|
40
|
+
rb_gc_mark_movable(allocations);
|
|
68
41
|
|
|
69
42
|
return ST_CONTINUE;
|
|
70
43
|
}
|
|
@@ -78,29 +51,15 @@ static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
|
78
51
|
}
|
|
79
52
|
|
|
80
53
|
if (capture->tracked_classes) {
|
|
81
|
-
st_foreach(capture->tracked_classes,
|
|
54
|
+
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
|
|
82
55
|
}
|
|
83
56
|
}
|
|
84
57
|
|
|
85
|
-
// Iterator to free each class record
|
|
86
|
-
static int Memory_Profiler_Capture_free_class_record(st_data_t key, st_data_t value, st_data_t arg) {
|
|
87
|
-
struct Memory_Profiler_Capture_Allocations *record = (struct Memory_Profiler_Capture_Allocations *)value;
|
|
88
|
-
if (record->object_states) {
|
|
89
|
-
st_free_table(record->object_states);
|
|
90
|
-
}
|
|
91
|
-
xfree(record);
|
|
92
|
-
return ST_CONTINUE;
|
|
93
|
-
}
|
|
94
|
-
|
|
95
58
|
// GC free function
|
|
96
59
|
static void Memory_Profiler_Capture_free(void *ptr) {
|
|
97
60
|
struct Memory_Profiler_Capture *capture = ptr;
|
|
98
|
-
|
|
99
|
-
// Event hooks must be removed via stop() before object is freed
|
|
100
|
-
// Ruby will automatically remove hooks when the VALUE is GC'd
|
|
101
|
-
|
|
61
|
+
|
|
102
62
|
if (capture->tracked_classes) {
|
|
103
|
-
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_free_class_record, 0);
|
|
104
63
|
st_free_table(capture->tracked_classes);
|
|
105
64
|
}
|
|
106
65
|
|
|
@@ -119,9 +78,14 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
|
|
|
119
78
|
return size;
|
|
120
79
|
}
|
|
121
80
|
|
|
81
|
+
// Foreach callback for st_foreach_with_replace (iteration logic)
|
|
82
|
+
static int Memory_Profiler_Capture_tracked_classes_foreach(st_data_t key, st_data_t value, st_data_t argp, int error) {
|
|
83
|
+
// Return ST_REPLACE to trigger the replace callback for each entry
|
|
84
|
+
return ST_REPLACE;
|
|
85
|
+
}
|
|
122
86
|
|
|
123
|
-
//
|
|
124
|
-
static int
|
|
87
|
+
// Replace callback for st_foreach_with_replace (update logic)
|
|
88
|
+
static int Memory_Profiler_Capture_tracked_classes_update(st_data_t *key, st_data_t *value, st_data_t argp, int existing) {
|
|
125
89
|
// Update class key if it moved
|
|
126
90
|
VALUE old_klass = (VALUE)*key;
|
|
127
91
|
VALUE new_klass = rb_gc_location(old_klass);
|
|
@@ -129,14 +93,11 @@ static int Memory_Profiler_Capture_update_refs(st_data_t *key, st_data_t *value,
|
|
|
129
93
|
*key = (st_data_t)new_klass;
|
|
130
94
|
}
|
|
131
95
|
|
|
132
|
-
// Update
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
if (old_callback != new_callback) {
|
|
138
|
-
record->callback = new_callback;
|
|
139
|
-
}
|
|
96
|
+
// Update wrapped Allocations VALUE if it moved (its own compact function handles internal refs)
|
|
97
|
+
VALUE old_allocations = (VALUE)*value;
|
|
98
|
+
VALUE new_allocations = rb_gc_location(old_allocations);
|
|
99
|
+
if (old_allocations != new_allocations) {
|
|
100
|
+
*value = (st_data_t)new_allocations;
|
|
140
101
|
}
|
|
141
102
|
|
|
142
103
|
return ST_CONTINUE;
|
|
@@ -148,7 +109,7 @@ static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
|
148
109
|
|
|
149
110
|
// Update tracked_classes keys and callback values in-place
|
|
150
111
|
if (capture->tracked_classes && capture->tracked_classes->num_entries > 0) {
|
|
151
|
-
if (st_foreach_with_replace(capture->tracked_classes,
|
|
112
|
+
if (st_foreach_with_replace(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_foreach, Memory_Profiler_Capture_tracked_classes_update, 0)) {
|
|
152
113
|
rb_raise(rb_eRuntimeError, "tracked_classes modified during GC compaction");
|
|
153
114
|
}
|
|
154
115
|
}
|
|
@@ -184,10 +145,11 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
|
|
|
184
145
|
}
|
|
185
146
|
|
|
186
147
|
// Handler for NEWOBJ event
|
|
187
|
-
static void Memory_Profiler_Capture_newobj_handler(struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
188
|
-
st_data_t
|
|
189
|
-
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &
|
|
190
|
-
|
|
148
|
+
static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
149
|
+
st_data_t allocations_data;
|
|
150
|
+
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
151
|
+
VALUE allocations = (VALUE)allocations_data;
|
|
152
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
191
153
|
record->new_count++;
|
|
192
154
|
if (!NIL_P(record->callback)) {
|
|
193
155
|
// Invoke callback - runs during NEWOBJ with GC disabled
|
|
@@ -207,6 +169,8 @@ static void Memory_Profiler_Capture_newobj_handler(struct Memory_Profiler_Captur
|
|
|
207
169
|
record->object_states = st_init_numtable();
|
|
208
170
|
}
|
|
209
171
|
st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
|
|
172
|
+
// Notify GC about the state VALUE stored in the table
|
|
173
|
+
RB_OBJ_WRITTEN(self, Qnil, state);
|
|
210
174
|
}
|
|
211
175
|
}
|
|
212
176
|
} else {
|
|
@@ -216,15 +180,24 @@ static void Memory_Profiler_Capture_newobj_handler(struct Memory_Profiler_Captur
|
|
|
216
180
|
record->new_count = 1; // This is the first allocation
|
|
217
181
|
record->free_count = 0;
|
|
218
182
|
record->object_states = NULL;
|
|
219
|
-
|
|
183
|
+
|
|
184
|
+
// Wrap the record in a VALUE
|
|
185
|
+
VALUE allocations = Memory_Profiler_Allocations_wrap(record);
|
|
186
|
+
|
|
187
|
+
st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
|
|
188
|
+
// Notify GC about the class VALUE stored as key in the table
|
|
189
|
+
RB_OBJ_WRITTEN(self, Qnil, klass);
|
|
190
|
+
// Notify GC about the allocations VALUE stored as value in the table
|
|
191
|
+
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
220
192
|
}
|
|
221
193
|
}
|
|
222
194
|
|
|
223
195
|
// Handler for FREEOBJ event
|
|
224
|
-
static void Memory_Profiler_Capture_freeobj_handler(struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
225
|
-
st_data_t
|
|
226
|
-
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &
|
|
227
|
-
|
|
196
|
+
static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
197
|
+
st_data_t allocations_data;
|
|
198
|
+
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
199
|
+
VALUE allocations = (VALUE)allocations_data;
|
|
200
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
228
201
|
record->free_count++;
|
|
229
202
|
if (!NIL_P(record->callback) && record->object_states) {
|
|
230
203
|
// Look up state stored during NEWOBJ
|
|
@@ -299,10 +272,10 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
|
|
|
299
272
|
|
|
300
273
|
if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
|
|
301
274
|
// self is the newly allocated object
|
|
302
|
-
Memory_Profiler_Capture_newobj_handler(capture, klass, object);
|
|
275
|
+
Memory_Profiler_Capture_newobj_handler(data, capture, klass, object);
|
|
303
276
|
} else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
|
|
304
277
|
// self is the object being freed
|
|
305
|
-
Memory_Profiler_Capture_freeobj_handler(capture, klass, object);
|
|
278
|
+
Memory_Profiler_Capture_freeobj_handler(data, capture, klass, object);
|
|
306
279
|
}
|
|
307
280
|
}
|
|
308
281
|
|
|
@@ -376,9 +349,10 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
|
376
349
|
VALUE klass, callback;
|
|
377
350
|
rb_scan_args(argc, argv, "1&", &klass, &callback);
|
|
378
351
|
|
|
379
|
-
st_data_t
|
|
380
|
-
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &
|
|
381
|
-
|
|
352
|
+
st_data_t allocations_data;
|
|
353
|
+
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
354
|
+
VALUE allocations = (VALUE)allocations_data;
|
|
355
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
382
356
|
RB_OBJ_WRITE(self, &record->callback, callback);
|
|
383
357
|
} else {
|
|
384
358
|
struct Memory_Profiler_Capture_Allocations *record = ALLOC(struct Memory_Profiler_Capture_Allocations);
|
|
@@ -386,7 +360,15 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
|
386
360
|
record->new_count = 0;
|
|
387
361
|
record->free_count = 0;
|
|
388
362
|
record->object_states = NULL;
|
|
389
|
-
|
|
363
|
+
|
|
364
|
+
// Wrap the record in a VALUE
|
|
365
|
+
VALUE allocations = Memory_Profiler_Allocations_wrap(record);
|
|
366
|
+
|
|
367
|
+
st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
|
|
368
|
+
// Notify GC about the class VALUE stored as key in the table
|
|
369
|
+
RB_OBJ_WRITTEN(self, Qnil, klass);
|
|
370
|
+
// Notify GC about the allocations VALUE stored as value in the table
|
|
371
|
+
RB_OBJ_WRITTEN(self, Qnil, allocations);
|
|
390
372
|
// Now inform GC about the callback reference
|
|
391
373
|
if (!NIL_P(callback)) {
|
|
392
374
|
RB_OBJ_WRITTEN(self, Qnil, callback);
|
|
@@ -401,10 +383,10 @@ static VALUE Memory_Profiler_Capture_untrack(VALUE self, VALUE klass) {
|
|
|
401
383
|
struct Memory_Profiler_Capture *capture;
|
|
402
384
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
403
385
|
|
|
404
|
-
st_data_t
|
|
405
|
-
if (st_delete(capture->tracked_classes, (st_data_t *)&klass, &
|
|
406
|
-
|
|
407
|
-
|
|
386
|
+
st_data_t allocations_data;
|
|
387
|
+
if (st_delete(capture->tracked_classes, (st_data_t *)&klass, &allocations_data)) {
|
|
388
|
+
// The wrapped Allocations VALUE will be GC'd naturally
|
|
389
|
+
// No manual cleanup needed
|
|
408
390
|
}
|
|
409
391
|
|
|
410
392
|
return self;
|
|
@@ -424,9 +406,10 @@ static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
|
|
|
424
406
|
struct Memory_Profiler_Capture *capture;
|
|
425
407
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
426
408
|
|
|
427
|
-
st_data_t
|
|
428
|
-
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &
|
|
429
|
-
|
|
409
|
+
st_data_t allocations_data;
|
|
410
|
+
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
411
|
+
VALUE allocations = (VALUE)allocations_data;
|
|
412
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
430
413
|
// Return net live count (new_count - free_count)
|
|
431
414
|
// Handle case where more objects freed than allocated (allocated before tracking started)
|
|
432
415
|
if (record->free_count > record->new_count) {
|
|
@@ -439,18 +422,11 @@ static VALUE Memory_Profiler_Capture_count_for(VALUE self, VALUE klass) {
|
|
|
439
422
|
return INT2FIX(0);
|
|
440
423
|
}
|
|
441
424
|
|
|
442
|
-
// Iterator to
|
|
443
|
-
static int
|
|
444
|
-
|
|
445
|
-
record->new_count = 0; // Reset allocation count
|
|
446
|
-
record->free_count = 0; // Reset free count
|
|
447
|
-
record->callback = Qnil; // Clear callback
|
|
425
|
+
// Iterator to reset each class record
|
|
426
|
+
static int Memory_Profiler_Capture_tracked_classes_clear(st_data_t key, st_data_t value, st_data_t arg) {
|
|
427
|
+
VALUE allocations = (VALUE)value;
|
|
448
428
|
|
|
449
|
-
|
|
450
|
-
if (record->object_states) {
|
|
451
|
-
st_free_table(record->object_states);
|
|
452
|
-
record->object_states = NULL;
|
|
453
|
-
}
|
|
429
|
+
Memory_Profiler_Allocations_clear(allocations);
|
|
454
430
|
|
|
455
431
|
return ST_CONTINUE;
|
|
456
432
|
}
|
|
@@ -460,60 +436,8 @@ static VALUE Memory_Profiler_Capture_clear(VALUE self) {
|
|
|
460
436
|
struct Memory_Profiler_Capture *capture;
|
|
461
437
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
462
438
|
|
|
463
|
-
// Reset all counts to 0 (don't free, just reset)
|
|
464
|
-
st_foreach(capture->tracked_classes,
|
|
465
|
-
|
|
466
|
-
return self;
|
|
467
|
-
}
|
|
468
|
-
|
|
469
|
-
// TypedData for Allocations wrapper - just wraps the record pointer
|
|
470
|
-
static const rb_data_type_t Memory_Profiler_Allocations_type = {
|
|
471
|
-
"Memory::Profiler::Allocations",
|
|
472
|
-
{NULL, NULL, NULL}, // No mark/free needed - record is owned by Capture
|
|
473
|
-
0, 0, 0
|
|
474
|
-
};
|
|
475
|
-
|
|
476
|
-
// Wrap an allocations record
|
|
477
|
-
static VALUE Memory_Profiler_Allocations_wrap(struct Memory_Profiler_Capture_Allocations *record) {
|
|
478
|
-
return TypedData_Wrap_Struct(Memory_Profiler_Allocations, &Memory_Profiler_Allocations_type, record);
|
|
479
|
-
}
|
|
480
|
-
|
|
481
|
-
// Get allocations record from wrapper
|
|
482
|
-
static struct Memory_Profiler_Capture_Allocations* Memory_Profiler_Allocations_get(VALUE self) {
|
|
483
|
-
struct Memory_Profiler_Capture_Allocations *record;
|
|
484
|
-
TypedData_Get_Struct(self, struct Memory_Profiler_Capture_Allocations, &Memory_Profiler_Allocations_type, record);
|
|
485
|
-
return record;
|
|
486
|
-
}
|
|
487
|
-
|
|
488
|
-
// Allocations#new_count
|
|
489
|
-
static VALUE Memory_Profiler_Allocations_new_count(VALUE self) {
|
|
490
|
-
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
491
|
-
return SIZET2NUM(record->new_count);
|
|
492
|
-
}
|
|
493
|
-
|
|
494
|
-
// Allocations#free_count
|
|
495
|
-
static VALUE Memory_Profiler_Allocations_free_count(VALUE self) {
|
|
496
|
-
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
497
|
-
return SIZET2NUM(record->free_count);
|
|
498
|
-
}
|
|
499
|
-
|
|
500
|
-
// Allocations#retained_count
|
|
501
|
-
static VALUE Memory_Profiler_Allocations_retained_count(VALUE self) {
|
|
502
|
-
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
503
|
-
// Handle underflow when free_count > new_count
|
|
504
|
-
size_t retained = record->free_count > record->new_count ? 0 : record->new_count - record->free_count;
|
|
505
|
-
return SIZET2NUM(retained);
|
|
506
|
-
}
|
|
507
|
-
|
|
508
|
-
// Allocations#track { |klass| ... }
|
|
509
|
-
static VALUE Memory_Profiler_Allocations_track(int argc, VALUE *argv, VALUE self) {
|
|
510
|
-
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(self);
|
|
511
|
-
|
|
512
|
-
VALUE callback;
|
|
513
|
-
rb_scan_args(argc, argv, "&", &callback);
|
|
514
|
-
|
|
515
|
-
// Use write barrier - self (Allocations wrapper) keeps Capture alive, which keeps callback alive
|
|
516
|
-
RB_OBJ_WRITE(self, &record->callback, callback);
|
|
439
|
+
// Reset all counts to 0 (don't free, just reset) - pass self for write barriers
|
|
440
|
+
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_clear, 0);
|
|
517
441
|
|
|
518
442
|
return self;
|
|
519
443
|
}
|
|
@@ -521,10 +445,7 @@ static VALUE Memory_Profiler_Allocations_track(int argc, VALUE *argv, VALUE self
|
|
|
521
445
|
// Iterator callback for each
|
|
522
446
|
static int Memory_Profiler_Capture_each_allocation(st_data_t key, st_data_t value, st_data_t arg) {
|
|
523
447
|
VALUE klass = (VALUE)key;
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
// Wrap the allocations record
|
|
527
|
-
VALUE allocations = Memory_Profiler_Allocations_wrap(record);
|
|
448
|
+
VALUE allocations = (VALUE)value; // Already a wrapped VALUE
|
|
528
449
|
|
|
529
450
|
// Yield class and allocations wrapper
|
|
530
451
|
rb_yield_values(2, klass, allocations);
|
|
@@ -565,12 +486,6 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
|
565
486
|
rb_define_method(Memory_Profiler_Capture, "each", Memory_Profiler_Capture_each, 0);
|
|
566
487
|
rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
|
|
567
488
|
|
|
568
|
-
// Allocations class
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
rb_define_method(Memory_Profiler_Allocations, "new_count", Memory_Profiler_Allocations_new_count, 0);
|
|
572
|
-
rb_define_method(Memory_Profiler_Allocations, "free_count", Memory_Profiler_Allocations_free_count, 0);
|
|
573
|
-
rb_define_method(Memory_Profiler_Allocations, "retained_count", Memory_Profiler_Allocations_retained_count, 0);
|
|
574
|
-
rb_define_method(Memory_Profiler_Allocations, "track", Memory_Profiler_Allocations_track, -1); // -1 to accept block
|
|
489
|
+
// Initialize Allocations class
|
|
490
|
+
Init_Memory_Profiler_Allocations(Memory_Profiler);
|
|
575
491
|
}
|
|
576
|
-
|
data/readme.md
CHANGED
|
@@ -22,6 +22,10 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.2
|
|
26
|
+
|
|
27
|
+
- Fix handling of GC compaction (I hope).
|
|
28
|
+
|
|
25
29
|
### v0.1.0
|
|
26
30
|
|
|
27
31
|
- Initial implementation.
|
data/releases.md
CHANGED
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: memory-profiler
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.1.
|
|
4
|
+
version: 1.1.2
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Samuel Williams
|
|
@@ -46,6 +46,8 @@ files:
|
|
|
46
46
|
- context/getting-started.md
|
|
47
47
|
- context/index.yaml
|
|
48
48
|
- ext/extconf.rb
|
|
49
|
+
- ext/memory/profiler/allocations.c
|
|
50
|
+
- ext/memory/profiler/allocations.h
|
|
49
51
|
- ext/memory/profiler/capture.c
|
|
50
52
|
- ext/memory/profiler/capture.h
|
|
51
53
|
- ext/memory/profiler/profiler.c
|
metadata.gz.sig
CHANGED
|
Binary file
|