memory-profiler 1.1.10 → 1.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +1 -2
- data/ext/memory/profiler/capture.c +33 -32
- data/ext/memory/profiler/events.c +83 -29
- data/ext/memory/profiler/events.h +1 -0
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +4 -0
- data/releases.md +4 -0
- data.tar.gz.sig +0 -0
- metadata +1 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 1237585b03f1c87b2c4cafb01668200bc33d518c35f832539315a7f6f37dc4f2
|
|
4
|
+
data.tar.gz: 13a79c36183a794df061076e3e09dd2a8a1e14eadfcdb0dc5e3317a6902f622c
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 98c104bac6001ec5cde21d984a80a69ac992a113677ea672a81d1d1e468e754331f1170ea58c3a6ba8f1fe4a453ec669d143f7af71ab797e0d206a50bd8bbe8a
|
|
7
|
+
data.tar.gz: 2bf799367a7cd59a760f52c89f8da0ab3ee252867bdcca431ca9340349172bcf0962f626f4cada76fcfd721084fb8de8dbe0d8692eb9593a5b4a56f625587436
|
checksums.yaml.gz.sig
CHANGED
|
@@ -1,2 +1 @@
|
|
|
1
|
-
|
|
2
|
-
�|YP�� �TH@xDm�֞�V� �TQ4Vu�̖f� �L���b!���&b�+���!�9_��)%?1(?��7��mO��Rbf�8v��s�1fL
|
|
1
|
+
?#�nV����h����;4�Mq%��M��`+�*e!-��6;S�qX� 6+�̀,D5�A�_�3?|�
|
|
@@ -26,7 +26,7 @@ struct Memory_Profiler_Capture {
|
|
|
26
26
|
int running;
|
|
27
27
|
|
|
28
28
|
// Should we queue callbacks? (temporarily disabled during queue processing).
|
|
29
|
-
int
|
|
29
|
+
int paused;
|
|
30
30
|
|
|
31
31
|
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
32
32
|
st_table *tracked_classes;
|
|
@@ -142,6 +142,9 @@ static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE kl
|
|
|
142
142
|
struct Memory_Profiler_Capture *capture;
|
|
143
143
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
144
144
|
|
|
145
|
+
// Pause the capture to prevent infinite loop:
|
|
146
|
+
capture->paused += 1;
|
|
147
|
+
|
|
145
148
|
// Increment global new count:
|
|
146
149
|
capture->new_count++;
|
|
147
150
|
|
|
@@ -171,20 +174,17 @@ static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE kl
|
|
|
171
174
|
|
|
172
175
|
// Only store state if there's a callback
|
|
173
176
|
if (!NIL_P(record->callback)) {
|
|
174
|
-
// Temporarily disable queueing to prevent infinite loop
|
|
175
|
-
int was_enabled = capture->enabled;
|
|
176
|
-
capture->enabled = 0;
|
|
177
|
-
|
|
178
177
|
VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
179
178
|
|
|
180
|
-
capture->enabled = was_enabled;
|
|
181
|
-
|
|
182
179
|
// Store state using object as key (works because object is alive):
|
|
183
180
|
st_insert(record->states, (st_data_t)object, (st_data_t)state);
|
|
184
181
|
} else {
|
|
185
182
|
// Store state as nil:
|
|
186
183
|
st_insert(record->states, (st_data_t)object, (st_data_t)Qnil);
|
|
187
184
|
}
|
|
185
|
+
|
|
186
|
+
// Resume the capture:
|
|
187
|
+
capture->paused -= 1;
|
|
188
188
|
}
|
|
189
189
|
|
|
190
190
|
// Process a FREEOBJ event. All deallocation tracking logic is here.
|
|
@@ -192,11 +192,14 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE k
|
|
|
192
192
|
struct Memory_Profiler_Capture *capture;
|
|
193
193
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
194
194
|
|
|
195
|
+
// Pause the capture to prevent infinite loop:
|
|
196
|
+
capture->paused += 1;
|
|
197
|
+
|
|
195
198
|
// Look up allocations record for this class
|
|
196
199
|
st_data_t allocations_data;
|
|
197
200
|
if (!st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
198
201
|
// The class is not tracked, so we are not tracking this object.
|
|
199
|
-
|
|
202
|
+
goto done;
|
|
200
203
|
}
|
|
201
204
|
|
|
202
205
|
VALUE allocations = (VALUE)allocations_data;
|
|
@@ -204,32 +207,27 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE k
|
|
|
204
207
|
|
|
205
208
|
if (!record->states) {
|
|
206
209
|
// There is no state table for this class, so we are not tracking it.
|
|
207
|
-
|
|
210
|
+
goto done;
|
|
208
211
|
}
|
|
209
212
|
|
|
210
213
|
st_data_t state_data;
|
|
211
|
-
if (
|
|
212
|
-
|
|
213
|
-
return;
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
VALUE state = (VALUE)state_data;
|
|
217
|
-
|
|
218
|
-
// Increment global free count
|
|
219
|
-
capture->free_count++;
|
|
220
|
-
|
|
221
|
-
// Increment per-class free count
|
|
222
|
-
record->free_count++;
|
|
223
|
-
|
|
224
|
-
if (!NIL_P(record->callback) && !NIL_P(state)) {
|
|
225
|
-
// Temporarily disable queueing to prevent infinite loop
|
|
226
|
-
int was_enabled = capture->enabled;
|
|
227
|
-
capture->enabled = 0;
|
|
214
|
+
if (st_delete(record->states, (st_data_t *)&object, &state_data)) {
|
|
215
|
+
VALUE state = (VALUE)state_data;
|
|
228
216
|
|
|
229
|
-
|
|
217
|
+
// Increment global free count
|
|
218
|
+
capture->free_count++;
|
|
230
219
|
|
|
231
|
-
|
|
220
|
+
// Increment per-class free count
|
|
221
|
+
record->free_count++;
|
|
222
|
+
|
|
223
|
+
if (!NIL_P(record->callback) && !NIL_P(state)) {
|
|
224
|
+
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
|
|
225
|
+
}
|
|
232
226
|
}
|
|
227
|
+
|
|
228
|
+
done:
|
|
229
|
+
// Resume the capture:
|
|
230
|
+
capture->paused -= 1;
|
|
233
231
|
}
|
|
234
232
|
|
|
235
233
|
// Process a single event (NEWOBJ or FREEOBJ). Called from events.c via rb_protect to catch exceptions.
|
|
@@ -241,6 +239,9 @@ void Memory_Profiler_Capture_process_event(struct Memory_Profiler_Event *event)
|
|
|
241
239
|
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
|
|
242
240
|
Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->object);
|
|
243
241
|
break;
|
|
242
|
+
default:
|
|
243
|
+
// Ignore.
|
|
244
|
+
break;
|
|
244
245
|
}
|
|
245
246
|
}
|
|
246
247
|
|
|
@@ -299,7 +300,7 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
|
|
|
299
300
|
|
|
300
301
|
if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
|
|
301
302
|
// Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
|
|
302
|
-
if (
|
|
303
|
+
if (capture->paused) return;
|
|
303
304
|
|
|
304
305
|
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, data, klass, object);
|
|
305
306
|
} else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
|
|
@@ -329,7 +330,7 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
329
330
|
|
|
330
331
|
// Initialize state flags - not running, callbacks disabled
|
|
331
332
|
capture->running = 0;
|
|
332
|
-
capture->
|
|
333
|
+
capture->paused = 0;
|
|
333
334
|
|
|
334
335
|
// Global event queue system will auto-initialize on first use (lazy initialization)
|
|
335
336
|
|
|
@@ -362,7 +363,7 @@ static VALUE Memory_Profiler_Capture_start(VALUE self) {
|
|
|
362
363
|
|
|
363
364
|
// Set both flags - we're now running and callbacks are enabled
|
|
364
365
|
capture->running = 1;
|
|
365
|
-
capture->
|
|
366
|
+
capture->paused = 0;
|
|
366
367
|
|
|
367
368
|
return Qtrue;
|
|
368
369
|
}
|
|
@@ -383,7 +384,7 @@ static VALUE Memory_Profiler_Capture_stop(VALUE self) {
|
|
|
383
384
|
|
|
384
385
|
// Clear both flags - we're no longer running and callbacks are disabled
|
|
385
386
|
capture->running = 0;
|
|
386
|
-
capture->
|
|
387
|
+
capture->paused = 0;
|
|
387
388
|
|
|
388
389
|
return Qtrue;
|
|
389
390
|
}
|
|
@@ -17,9 +17,10 @@ struct Memory_Profiler_Events {
|
|
|
17
17
|
// The VALUE wrapper for this struct (needed for write barriers).
|
|
18
18
|
VALUE self;
|
|
19
19
|
|
|
20
|
-
//
|
|
21
|
-
struct Memory_Profiler_Queue
|
|
22
|
-
|
|
20
|
+
// Double-buffered event queues (contains events from all Capture instances).
|
|
21
|
+
struct Memory_Profiler_Queue queues[2];
|
|
22
|
+
struct Memory_Profiler_Queue *available, *processing;
|
|
23
|
+
|
|
23
24
|
// Postponed job handle for processing the queue.
|
|
24
25
|
// Postponed job handles are an extremely limited resource, so we only register one global event queue.
|
|
25
26
|
rb_postponed_job_handle_t postponed_job_handle;
|
|
@@ -51,8 +52,13 @@ static VALUE Memory_Profiler_Events_new(void) {
|
|
|
51
52
|
// Store the VALUE wrapper for write barriers:
|
|
52
53
|
events->self = self;
|
|
53
54
|
|
|
54
|
-
// Initialize
|
|
55
|
-
Memory_Profiler_Queue_initialize(&events->
|
|
55
|
+
// Initialize both queues for double buffering:
|
|
56
|
+
Memory_Profiler_Queue_initialize(&events->queues[0], sizeof(struct Memory_Profiler_Event));
|
|
57
|
+
Memory_Profiler_Queue_initialize(&events->queues[1], sizeof(struct Memory_Profiler_Event));
|
|
58
|
+
|
|
59
|
+
// Start with queues[0] available for incoming events, queues[1] for processing (initially empty):
|
|
60
|
+
events->available = &events->queues[0];
|
|
61
|
+
events->processing = &events->queues[1];
|
|
56
62
|
|
|
57
63
|
// Pre-register the single postponed job for processing the queue:
|
|
58
64
|
events->postponed_job_handle = rb_postponed_job_preregister(0,
|
|
@@ -88,13 +94,13 @@ struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void) {
|
|
|
88
94
|
return events;
|
|
89
95
|
}
|
|
90
96
|
|
|
91
|
-
//
|
|
92
|
-
static void
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
97
|
+
// Helper to mark events in a queue.
|
|
98
|
+
static void Memory_Profiler_Events_mark_queue(struct Memory_Profiler_Queue *queue, int skip_none) {
|
|
99
|
+
for (size_t i = 0; i < queue->count; i++) {
|
|
100
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(queue, i);
|
|
101
|
+
|
|
102
|
+
// Skip already-processed events if requested:
|
|
103
|
+
if (skip_none && event->type == MEMORY_PROFILER_EVENT_TYPE_NONE) continue;
|
|
98
104
|
|
|
99
105
|
// Mark the Capture instance and class:
|
|
100
106
|
rb_gc_mark_movable(event->capture);
|
|
@@ -108,13 +114,24 @@ static void Memory_Profiler_Events_mark(void *ptr) {
|
|
|
108
114
|
}
|
|
109
115
|
}
|
|
110
116
|
|
|
111
|
-
// GC
|
|
112
|
-
static void
|
|
117
|
+
// GC mark callback - mark all VALUEs in both event queues.
|
|
118
|
+
static void Memory_Profiler_Events_mark(void *ptr) {
|
|
113
119
|
struct Memory_Profiler_Events *events = ptr;
|
|
114
120
|
|
|
115
|
-
//
|
|
116
|
-
|
|
117
|
-
|
|
121
|
+
// Mark all events in the available queue (receiving new events):
|
|
122
|
+
Memory_Profiler_Events_mark_queue(events->available, 0);
|
|
123
|
+
|
|
124
|
+
// Mark all events in the processing queue (currently being processed):
|
|
125
|
+
Memory_Profiler_Events_mark_queue(events->processing, 1);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Helper to compact events in a queue.
|
|
129
|
+
static void Memory_Profiler_Events_compact_queue(struct Memory_Profiler_Queue *queue, int skip_none) {
|
|
130
|
+
for (size_t i = 0; i < queue->count; i++) {
|
|
131
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(queue, i);
|
|
132
|
+
|
|
133
|
+
// Skip already-processed events if requested:
|
|
134
|
+
if (skip_none && event->type == MEMORY_PROFILER_EVENT_TYPE_NONE) continue;
|
|
118
135
|
|
|
119
136
|
// Update all VALUEs if they moved during compaction:
|
|
120
137
|
event->capture = rb_gc_location(event->capture);
|
|
@@ -128,19 +145,44 @@ static void Memory_Profiler_Events_compact(void *ptr) {
|
|
|
128
145
|
}
|
|
129
146
|
}
|
|
130
147
|
|
|
148
|
+
// GC compact callback - update all VALUEs in both event queues.
|
|
149
|
+
static void Memory_Profiler_Events_compact(void *ptr) {
|
|
150
|
+
struct Memory_Profiler_Events *events = ptr;
|
|
151
|
+
|
|
152
|
+
// Update objects in the available queue:
|
|
153
|
+
Memory_Profiler_Events_compact_queue(events->available, 0);
|
|
154
|
+
|
|
155
|
+
// Update objects in the processing queue:
|
|
156
|
+
Memory_Profiler_Events_compact_queue(events->processing, 1);
|
|
157
|
+
}
|
|
158
|
+
|
|
131
159
|
// GC free callback.
|
|
132
160
|
static void Memory_Profiler_Events_free(void *ptr) {
|
|
133
161
|
struct Memory_Profiler_Events *events = ptr;
|
|
134
|
-
Memory_Profiler_Queue_free(&events->
|
|
162
|
+
Memory_Profiler_Queue_free(&events->queues[0]);
|
|
163
|
+
Memory_Profiler_Queue_free(&events->queues[1]);
|
|
135
164
|
}
|
|
136
165
|
|
|
137
166
|
// GC memsize callback.
|
|
138
167
|
static size_t Memory_Profiler_Events_memsize(const void *ptr) {
|
|
139
168
|
const struct Memory_Profiler_Events *events = ptr;
|
|
140
|
-
return sizeof(struct Memory_Profiler_Events)
|
|
169
|
+
return sizeof(struct Memory_Profiler_Events)
|
|
170
|
+
+ (events->queues[0].capacity * events->queues[0].element_size)
|
|
171
|
+
+ (events->queues[1].capacity * events->queues[1].element_size);
|
|
141
172
|
}
|
|
142
173
|
|
|
143
|
-
|
|
174
|
+
const char *Memory_Profiler_Event_Type_name(enum Memory_Profiler_Event_Type type) {
|
|
175
|
+
switch (type) {
|
|
176
|
+
case MEMORY_PROFILER_EVENT_TYPE_NEWOBJ:
|
|
177
|
+
return "NEWOBJ";
|
|
178
|
+
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
|
|
179
|
+
return "FREEOBJ";
|
|
180
|
+
default:
|
|
181
|
+
return "NONE";
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// Enqueue an event to the available queue (can be called anytime, even during processing).
|
|
144
186
|
int Memory_Profiler_Events_enqueue(
|
|
145
187
|
enum Memory_Profiler_Event_Type type,
|
|
146
188
|
VALUE capture,
|
|
@@ -148,8 +190,9 @@ int Memory_Profiler_Events_enqueue(
|
|
|
148
190
|
VALUE object
|
|
149
191
|
) {
|
|
150
192
|
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
|
|
151
|
-
|
|
152
|
-
|
|
193
|
+
|
|
194
|
+
// Always enqueue to the available queue - it won't be touched during processing:
|
|
195
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_push(events->available);
|
|
153
196
|
if (event) {
|
|
154
197
|
event->type = type;
|
|
155
198
|
|
|
@@ -158,8 +201,8 @@ int Memory_Profiler_Events_enqueue(
|
|
|
158
201
|
RB_OBJ_WRITE(events->self, &event->klass, klass);
|
|
159
202
|
RB_OBJ_WRITE(events->self, &event->object, object);
|
|
160
203
|
|
|
161
|
-
|
|
162
|
-
|
|
204
|
+
if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
|
|
205
|
+
Memory_Profiler_Event_Type_name(type), events->available->count);
|
|
163
206
|
|
|
164
207
|
rb_postponed_job_trigger(events->postponed_job_handle);
|
|
165
208
|
// Success:
|
|
@@ -191,11 +234,16 @@ static VALUE Memory_Profiler_Events_process_event_protected(VALUE arg) {
|
|
|
191
234
|
static void Memory_Profiler_Events_process_queue(void *arg) {
|
|
192
235
|
struct Memory_Profiler_Events *events = (struct Memory_Profiler_Events *)arg;
|
|
193
236
|
|
|
194
|
-
|
|
237
|
+
// Swap the queues: available becomes processing, and the old processing queue (now empty) becomes available. This allows new events to continue enqueueing to the new available queue while we process.
|
|
238
|
+
struct Memory_Profiler_Queue *queue_to_process = events->available;
|
|
239
|
+
events->available = events->processing;
|
|
240
|
+
events->processing = queue_to_process;
|
|
195
241
|
|
|
242
|
+
if (DEBUG) fprintf(stderr, "Processing event queue: %zu events\n", events->processing->count);
|
|
243
|
+
|
|
196
244
|
// Process all events in order (maintains NEWOBJ before FREEOBJ for same object):
|
|
197
|
-
for (size_t i = 0; i < events->
|
|
198
|
-
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(
|
|
245
|
+
for (size_t i = 0; i < events->processing->count; i++) {
|
|
246
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(events->processing, i);
|
|
199
247
|
|
|
200
248
|
// Process event with rb_protect to catch any exceptions:
|
|
201
249
|
int state = 0;
|
|
@@ -206,8 +254,14 @@ static void Memory_Profiler_Events_process_queue(void *arg) {
|
|
|
206
254
|
rb_warning("Exception in event processing callback (caught and suppressed): %"PRIsVALUE, rb_errinfo());
|
|
207
255
|
rb_set_errinfo(Qnil);
|
|
208
256
|
}
|
|
257
|
+
|
|
258
|
+
// Clear this event after processing to prevent marking stale data if GC runs:
|
|
259
|
+
event->type = MEMORY_PROFILER_EVENT_TYPE_NONE;
|
|
260
|
+
RB_OBJ_WRITE(events->self, &event->capture, Qnil);
|
|
261
|
+
RB_OBJ_WRITE(events->self, &event->klass, Qnil);
|
|
262
|
+
RB_OBJ_WRITE(events->self, &event->object, Qnil);
|
|
209
263
|
}
|
|
210
264
|
|
|
211
|
-
//
|
|
212
|
-
Memory_Profiler_Queue_clear(
|
|
265
|
+
// Clear the processing queue (which is now empty logically):
|
|
266
|
+
Memory_Profiler_Queue_clear(events->processing);
|
|
213
267
|
}
|
data/readme.md
CHANGED
|
@@ -22,6 +22,10 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.11
|
|
26
|
+
|
|
27
|
+
- Double buffer shared events queues to fix queue corruption.
|
|
28
|
+
|
|
25
29
|
### v1.1.10
|
|
26
30
|
|
|
27
31
|
- Added `Capture#new_count` - returns total number of allocations tracked across all classes.
|
data/releases.md
CHANGED
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
metadata.gz.sig
CHANGED
|
Binary file
|