memory-profiler 1.1.4 → 1.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/memory/profiler/capture.c +127 -54
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +5 -1
- data/releases.md +5 -1
- data.tar.gz.sig +0 -0
- metadata +1 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 45474399b764a755546268feed4710ae9c647a8265cd933be82e35b1f390e820
|
|
4
|
+
data.tar.gz: 47d263fc3fa31b8013d7eded37f22e74b8912b3029d6b5d9051f3a35e1b4aa6a
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: d4afc54d3841e0517e966ca0e5015320cacdc747a0ed1b2004b3b5ed043345139e3d7125ee79a4ff8bc3781f04d88399f2373553fe870143adf92b7b745fe97c
|
|
7
|
+
data.tar.gz: e99bbeda7a53e23c30e90981711f88651fd7efd453d86ba9748b11485dd2fcad6562038b91390a1f692694508e71c0aa4221ec25f2ebe61663835f667973292e
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
|
|
14
14
|
enum {
|
|
15
15
|
DEBUG = 0,
|
|
16
|
-
|
|
16
|
+
DEBUG_EVENT_QUEUES = 0,
|
|
17
17
|
DEBUG_STATE = 0,
|
|
18
18
|
};
|
|
19
19
|
|
|
@@ -23,8 +23,20 @@ static VALUE Memory_Profiler_Capture = Qnil;
|
|
|
23
23
|
static VALUE sym_newobj;
|
|
24
24
|
static VALUE sym_freeobj;
|
|
25
25
|
|
|
26
|
-
// Queue item -
|
|
27
|
-
struct
|
|
26
|
+
// Queue item - new object data to be processed via postponed job
|
|
27
|
+
struct Memory_Profiler_Newobj_Queue_Item {
|
|
28
|
+
// The class of the new object:
|
|
29
|
+
VALUE klass;
|
|
30
|
+
|
|
31
|
+
// The Allocations wrapper:
|
|
32
|
+
VALUE allocations;
|
|
33
|
+
|
|
34
|
+
// The newly allocated object:
|
|
35
|
+
VALUE object;
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
// Queue item - freed object data to be processed via postponed job
|
|
39
|
+
struct Memory_Profiler_Freeobj_Queue_Item {
|
|
28
40
|
// The class of the freed object:
|
|
29
41
|
VALUE klass;
|
|
30
42
|
|
|
@@ -43,10 +55,13 @@ struct Memory_Profiler_Capture {
|
|
|
43
55
|
// Is tracking enabled (via start/stop):
|
|
44
56
|
int enabled;
|
|
45
57
|
|
|
46
|
-
// Queue for
|
|
47
|
-
struct Memory_Profiler_Queue
|
|
58
|
+
// Queue for new objects (processed via postponed job):
|
|
59
|
+
struct Memory_Profiler_Queue newobj_queue;
|
|
60
|
+
|
|
61
|
+
// Queue for freed objects (processed via postponed job):
|
|
62
|
+
struct Memory_Profiler_Queue freeobj_queue;
|
|
48
63
|
|
|
49
|
-
// Handle for the postponed job
|
|
64
|
+
// Handle for the postponed job (processes both queues)
|
|
50
65
|
rb_postponed_job_handle_t postponed_job_handle;
|
|
51
66
|
};
|
|
52
67
|
|
|
@@ -75,9 +90,17 @@ static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
|
75
90
|
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
|
|
76
91
|
}
|
|
77
92
|
|
|
93
|
+
// Mark new objects in the queue:
|
|
94
|
+
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
95
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
96
|
+
rb_gc_mark_movable(newobj->klass);
|
|
97
|
+
rb_gc_mark_movable(newobj->allocations);
|
|
98
|
+
rb_gc_mark_movable(newobj->object);
|
|
99
|
+
}
|
|
100
|
+
|
|
78
101
|
// Mark freed objects in the queue:
|
|
79
|
-
for (size_t i = 0; i < capture->
|
|
80
|
-
struct
|
|
102
|
+
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
103
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
81
104
|
rb_gc_mark_movable(freed->klass);
|
|
82
105
|
rb_gc_mark_movable(freed->allocations);
|
|
83
106
|
|
|
@@ -95,8 +118,9 @@ static void Memory_Profiler_Capture_free(void *ptr) {
|
|
|
95
118
|
st_free_table(capture->tracked_classes);
|
|
96
119
|
}
|
|
97
120
|
|
|
98
|
-
// Free
|
|
99
|
-
Memory_Profiler_Queue_free(&capture->
|
|
121
|
+
// Free both queues (elements are stored directly, just free the queues)
|
|
122
|
+
Memory_Profiler_Queue_free(&capture->newobj_queue);
|
|
123
|
+
Memory_Profiler_Queue_free(&capture->freeobj_queue);
|
|
100
124
|
|
|
101
125
|
xfree(capture);
|
|
102
126
|
}
|
|
@@ -110,8 +134,9 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
|
|
|
110
134
|
size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
|
|
111
135
|
}
|
|
112
136
|
|
|
113
|
-
// Add size of
|
|
114
|
-
size += capture->
|
|
137
|
+
// Add size of both queues (elements stored directly)
|
|
138
|
+
size += capture->newobj_queue.capacity * capture->newobj_queue.element_size;
|
|
139
|
+
size += capture->freeobj_queue.capacity * capture->freeobj_queue.element_size;
|
|
115
140
|
|
|
116
141
|
return size;
|
|
117
142
|
}
|
|
@@ -152,9 +177,19 @@ static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
|
152
177
|
}
|
|
153
178
|
}
|
|
154
179
|
|
|
180
|
+
// Update new objects in the queue
|
|
181
|
+
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
182
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
183
|
+
|
|
184
|
+
// Update all VALUEs if they moved during compaction
|
|
185
|
+
newobj->klass = rb_gc_location(newobj->klass);
|
|
186
|
+
newobj->allocations = rb_gc_location(newobj->allocations);
|
|
187
|
+
newobj->object = rb_gc_location(newobj->object);
|
|
188
|
+
}
|
|
189
|
+
|
|
155
190
|
// Update freed objects in the queue
|
|
156
|
-
for (size_t i = 0; i < capture->
|
|
157
|
-
struct
|
|
191
|
+
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
192
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
158
193
|
|
|
159
194
|
// Update all VALUEs if they moved during compaction
|
|
160
195
|
freed->klass = rb_gc_location(freed->klass);
|
|
@@ -192,18 +227,52 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
|
|
|
192
227
|
}
|
|
193
228
|
}
|
|
194
229
|
|
|
195
|
-
// Postponed job callback - processes queued freed objects
|
|
196
|
-
// This runs
|
|
197
|
-
|
|
230
|
+
// Postponed job callback - processes queued new and freed objects
|
|
231
|
+
// This runs when it's safe to call Ruby code (not during allocation or GC)
|
|
232
|
+
// IMPORTANT: Process newobj queue first, then freeobj queue to maintain order
|
|
233
|
+
static void Memory_Profiler_Capture_process_queues(void *arg) {
|
|
198
234
|
VALUE self = (VALUE)arg;
|
|
199
235
|
struct Memory_Profiler_Capture *capture;
|
|
200
236
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
201
237
|
|
|
202
|
-
if (
|
|
238
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Processing queues: %zu newobj, %zu freeobj\n",
|
|
239
|
+
capture->newobj_queue.count, capture->freeobj_queue.count);
|
|
240
|
+
|
|
241
|
+
// Disable tracking during queue processing to prevent infinite loop
|
|
242
|
+
// (rb_funcall can allocate, which would trigger more NEWOBJ events)
|
|
243
|
+
int was_enabled = capture->enabled;
|
|
244
|
+
capture->enabled = 0;
|
|
245
|
+
|
|
246
|
+
// First, process all new objects in the queue
|
|
247
|
+
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
248
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
249
|
+
VALUE klass = newobj->klass;
|
|
250
|
+
VALUE allocations = newobj->allocations;
|
|
251
|
+
VALUE object = newobj->object;
|
|
252
|
+
|
|
253
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
254
|
+
|
|
255
|
+
// Call the Ruby callback with (klass, :newobj, nil) - callback returns state to store
|
|
256
|
+
if (!NIL_P(record->callback)) {
|
|
257
|
+
VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
258
|
+
|
|
259
|
+
// Store the state if callback returned something
|
|
260
|
+
if (!NIL_P(state)) {
|
|
261
|
+
if (!record->object_states) {
|
|
262
|
+
record->object_states = st_init_numtable();
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (DEBUG_STATE) fprintf(stderr, "Memory_Profiler_Capture_process_queues: Storing state for object: %p (%s)\n",
|
|
266
|
+
(void *)object, rb_class2name(klass));
|
|
267
|
+
|
|
268
|
+
st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
}
|
|
203
272
|
|
|
204
|
-
//
|
|
205
|
-
for (size_t i = 0; i < capture->
|
|
206
|
-
struct
|
|
273
|
+
// Then, process all freed objects in the queue
|
|
274
|
+
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
275
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
207
276
|
VALUE klass = freed->klass;
|
|
208
277
|
VALUE allocations = freed->allocations;
|
|
209
278
|
VALUE state = freed->state;
|
|
@@ -216,41 +285,43 @@ static void Memory_Profiler_Capture_process_freed_queue(void *arg) {
|
|
|
216
285
|
}
|
|
217
286
|
}
|
|
218
287
|
|
|
219
|
-
// Clear
|
|
220
|
-
Memory_Profiler_Queue_clear(&capture->
|
|
288
|
+
// Clear both queues (elements are reused on next cycle)
|
|
289
|
+
Memory_Profiler_Queue_clear(&capture->newobj_queue);
|
|
290
|
+
Memory_Profiler_Queue_clear(&capture->freeobj_queue);
|
|
291
|
+
|
|
292
|
+
// Restore tracking state
|
|
293
|
+
capture->enabled = was_enabled;
|
|
221
294
|
}
|
|
222
295
|
|
|
223
296
|
// Handler for NEWOBJ event
|
|
297
|
+
// SAFE: No longer calls Ruby code directly - queues for deferred processing
|
|
224
298
|
static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
225
299
|
st_data_t allocations_data;
|
|
226
300
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
227
301
|
VALUE allocations = (VALUE)allocations_data;
|
|
228
302
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
229
303
|
record->new_count++;
|
|
304
|
+
|
|
305
|
+
// If we have a callback, queue the newobj for later processing
|
|
230
306
|
if (!NIL_P(record->callback)) {
|
|
231
|
-
//
|
|
232
|
-
//
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
// Store the state if callback returned something
|
|
243
|
-
if (!NIL_P(state)) {
|
|
244
|
-
if (!record->object_states) {
|
|
245
|
-
record->object_states = st_init_numtable();
|
|
246
|
-
}
|
|
307
|
+
// Push a new item onto the queue (returns pointer to write to)
|
|
308
|
+
// NOTE: realloc is safe during allocation (doesn't trigger Ruby allocation)
|
|
309
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_push(&capture->newobj_queue);
|
|
310
|
+
if (newobj) {
|
|
311
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued newobj, queue size now: %zu/%zu\n",
|
|
312
|
+
capture->newobj_queue.count, capture->newobj_queue.capacity);
|
|
313
|
+
// Write directly to the allocated space
|
|
314
|
+
newobj->klass = klass;
|
|
315
|
+
newobj->allocations = allocations;
|
|
316
|
+
newobj->object = object;
|
|
247
317
|
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
318
|
+
// Trigger postponed job to process the queue
|
|
319
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues\n");
|
|
320
|
+
rb_postponed_job_trigger(capture->postponed_job_handle);
|
|
321
|
+
} else {
|
|
322
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Failed to queue newobj, out of memory\n");
|
|
253
323
|
}
|
|
324
|
+
// If push failed (out of memory), silently drop this newobj event
|
|
254
325
|
}
|
|
255
326
|
} else {
|
|
256
327
|
// Create record for this class (first time seeing it)
|
|
@@ -293,19 +364,20 @@ static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Pr
|
|
|
293
364
|
|
|
294
365
|
// Push a new item onto the queue (returns pointer to write to)
|
|
295
366
|
// NOTE: realloc is safe during GC (doesn't trigger Ruby allocation)
|
|
296
|
-
struct
|
|
367
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_push(&capture->freeobj_queue);
|
|
297
368
|
if (freed) {
|
|
298
|
-
if (
|
|
369
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued freed object, queue size now: %zu/%zu\n",
|
|
370
|
+
capture->freeobj_queue.count, capture->freeobj_queue.capacity);
|
|
299
371
|
// Write directly to the allocated space
|
|
300
372
|
freed->klass = klass;
|
|
301
373
|
freed->allocations = allocations;
|
|
302
374
|
freed->state = state;
|
|
303
375
|
|
|
304
|
-
// Trigger postponed job to process
|
|
305
|
-
if (
|
|
376
|
+
// Trigger postponed job to process both queues after GC
|
|
377
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues after GC\n");
|
|
306
378
|
rb_postponed_job_trigger(capture->postponed_job_handle);
|
|
307
379
|
} else {
|
|
308
|
-
if (
|
|
380
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Failed to queue freed object, out of memory\n");
|
|
309
381
|
}
|
|
310
382
|
// If push failed (out of memory), silently drop this freeobj event
|
|
311
383
|
}
|
|
@@ -403,14 +475,15 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
403
475
|
|
|
404
476
|
capture->enabled = 0;
|
|
405
477
|
|
|
406
|
-
// Initialize
|
|
407
|
-
Memory_Profiler_Queue_initialize(&capture->
|
|
478
|
+
// Initialize both queues
|
|
479
|
+
Memory_Profiler_Queue_initialize(&capture->newobj_queue, sizeof(struct Memory_Profiler_Newobj_Queue_Item));
|
|
480
|
+
Memory_Profiler_Queue_initialize(&capture->freeobj_queue, sizeof(struct Memory_Profiler_Freeobj_Queue_Item));
|
|
408
481
|
|
|
409
|
-
// Pre-register the postponed job for processing
|
|
410
|
-
// The job will be triggered whenever we queue
|
|
482
|
+
// Pre-register the postponed job for processing both queues
|
|
483
|
+
// The job will be triggered whenever we queue newobj or freeobj events
|
|
411
484
|
capture->postponed_job_handle = rb_postponed_job_preregister(
|
|
412
485
|
0, // flags
|
|
413
|
-
|
|
486
|
+
Memory_Profiler_Capture_process_queues,
|
|
414
487
|
(void *)obj
|
|
415
488
|
);
|
|
416
489
|
|
data/readme.md
CHANGED
|
@@ -22,9 +22,13 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.5
|
|
26
|
+
|
|
27
|
+
- Use queue for `newobj` too to avoid invoking user code during object allocation.
|
|
28
|
+
|
|
25
29
|
### v1.1.2
|
|
26
30
|
|
|
27
|
-
|
|
31
|
+
- Fix handling of GC compaction (I hope).
|
|
28
32
|
|
|
29
33
|
### v0.1.0
|
|
30
34
|
|
data/releases.md
CHANGED
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
metadata.gz.sig
CHANGED
|
Binary file
|