memory-profiler 1.1.3 → 1.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/memory/profiler/allocations.c +3 -2
- data/ext/memory/profiler/capture.c +158 -53
- data/lib/memory/profiler/sampler.rb +16 -41
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +5 -1
- data/releases.md +5 -1
- data.tar.gz.sig +0 -0
- metadata +2 -2
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 45474399b764a755546268feed4710ae9c647a8265cd933be82e35b1f390e820
|
|
4
|
+
data.tar.gz: 47d263fc3fa31b8013d7eded37f22e74b8912b3029d6b5d9051f3a35e1b4aa6a
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: d4afc54d3841e0517e966ca0e5015320cacdc747a0ed1b2004b3b5ed043345139e3d7125ee79a4ff8bc3781f04d88399f2373553fe870143adf92b7b745fe97c
|
|
7
|
+
data.tar.gz: e99bbeda7a53e23c30e90981711f88651fd7efd453d86ba9748b11485dd2fcad6562038b91390a1f692694508e71c0aa4221ec25f2ebe61663835f667973292e
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
|
@@ -12,8 +12,9 @@ static VALUE Memory_Profiler_Allocations = Qnil;
|
|
|
12
12
|
|
|
13
13
|
// Helper to mark object_states table values
|
|
14
14
|
static int Memory_Profiler_Allocations_object_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
15
|
-
|
|
16
|
-
|
|
15
|
+
// Don't mark the object, we use it as a key but we never use it as an object, and we don't want to retain it.
|
|
16
|
+
// VALUE object = (VALUE)key;
|
|
17
|
+
// rb_gc_mark_movable(object);
|
|
17
18
|
|
|
18
19
|
VALUE state = (VALUE)value;
|
|
19
20
|
if (!NIL_P(state)) {
|
|
@@ -13,6 +13,8 @@
|
|
|
13
13
|
|
|
14
14
|
enum {
|
|
15
15
|
DEBUG = 0,
|
|
16
|
+
DEBUG_EVENT_QUEUES = 0,
|
|
17
|
+
DEBUG_STATE = 0,
|
|
16
18
|
};
|
|
17
19
|
|
|
18
20
|
static VALUE Memory_Profiler_Capture = Qnil;
|
|
@@ -21,8 +23,20 @@ static VALUE Memory_Profiler_Capture = Qnil;
|
|
|
21
23
|
static VALUE sym_newobj;
|
|
22
24
|
static VALUE sym_freeobj;
|
|
23
25
|
|
|
24
|
-
// Queue item -
|
|
25
|
-
struct
|
|
26
|
+
// Queue item - new object data to be processed via postponed job
|
|
27
|
+
struct Memory_Profiler_Newobj_Queue_Item {
|
|
28
|
+
// The class of the new object:
|
|
29
|
+
VALUE klass;
|
|
30
|
+
|
|
31
|
+
// The Allocations wrapper:
|
|
32
|
+
VALUE allocations;
|
|
33
|
+
|
|
34
|
+
// The newly allocated object:
|
|
35
|
+
VALUE object;
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
// Queue item - freed object data to be processed via postponed job
|
|
39
|
+
struct Memory_Profiler_Freeobj_Queue_Item {
|
|
26
40
|
// The class of the freed object:
|
|
27
41
|
VALUE klass;
|
|
28
42
|
|
|
@@ -41,10 +55,13 @@ struct Memory_Profiler_Capture {
|
|
|
41
55
|
// Is tracking enabled (via start/stop):
|
|
42
56
|
int enabled;
|
|
43
57
|
|
|
44
|
-
// Queue for
|
|
45
|
-
struct Memory_Profiler_Queue
|
|
58
|
+
// Queue for new objects (processed via postponed job):
|
|
59
|
+
struct Memory_Profiler_Queue newobj_queue;
|
|
60
|
+
|
|
61
|
+
// Queue for freed objects (processed via postponed job):
|
|
62
|
+
struct Memory_Profiler_Queue freeobj_queue;
|
|
46
63
|
|
|
47
|
-
// Handle for the postponed job
|
|
64
|
+
// Handle for the postponed job (processes both queues)
|
|
48
65
|
rb_postponed_job_handle_t postponed_job_handle;
|
|
49
66
|
};
|
|
50
67
|
|
|
@@ -73,9 +90,17 @@ static void Memory_Profiler_Capture_mark(void *ptr) {
|
|
|
73
90
|
st_foreach(capture->tracked_classes, Memory_Profiler_Capture_tracked_classes_mark, 0);
|
|
74
91
|
}
|
|
75
92
|
|
|
93
|
+
// Mark new objects in the queue:
|
|
94
|
+
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
95
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
96
|
+
rb_gc_mark_movable(newobj->klass);
|
|
97
|
+
rb_gc_mark_movable(newobj->allocations);
|
|
98
|
+
rb_gc_mark_movable(newobj->object);
|
|
99
|
+
}
|
|
100
|
+
|
|
76
101
|
// Mark freed objects in the queue:
|
|
77
|
-
for (size_t i = 0; i < capture->
|
|
78
|
-
struct
|
|
102
|
+
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
103
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
79
104
|
rb_gc_mark_movable(freed->klass);
|
|
80
105
|
rb_gc_mark_movable(freed->allocations);
|
|
81
106
|
|
|
@@ -93,8 +118,9 @@ static void Memory_Profiler_Capture_free(void *ptr) {
|
|
|
93
118
|
st_free_table(capture->tracked_classes);
|
|
94
119
|
}
|
|
95
120
|
|
|
96
|
-
// Free
|
|
97
|
-
Memory_Profiler_Queue_free(&capture->
|
|
121
|
+
// Free both queues (elements are stored directly, just free the queues)
|
|
122
|
+
Memory_Profiler_Queue_free(&capture->newobj_queue);
|
|
123
|
+
Memory_Profiler_Queue_free(&capture->freeobj_queue);
|
|
98
124
|
|
|
99
125
|
xfree(capture);
|
|
100
126
|
}
|
|
@@ -108,8 +134,9 @@ static size_t Memory_Profiler_Capture_memsize(const void *ptr) {
|
|
|
108
134
|
size += capture->tracked_classes->num_entries * (sizeof(st_data_t) + sizeof(struct Memory_Profiler_Capture_Allocations));
|
|
109
135
|
}
|
|
110
136
|
|
|
111
|
-
// Add size of
|
|
112
|
-
size += capture->
|
|
137
|
+
// Add size of both queues (elements stored directly)
|
|
138
|
+
size += capture->newobj_queue.capacity * capture->newobj_queue.element_size;
|
|
139
|
+
size += capture->freeobj_queue.capacity * capture->freeobj_queue.element_size;
|
|
113
140
|
|
|
114
141
|
return size;
|
|
115
142
|
}
|
|
@@ -150,9 +177,19 @@ static void Memory_Profiler_Capture_compact(void *ptr) {
|
|
|
150
177
|
}
|
|
151
178
|
}
|
|
152
179
|
|
|
180
|
+
// Update new objects in the queue
|
|
181
|
+
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
182
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
183
|
+
|
|
184
|
+
// Update all VALUEs if they moved during compaction
|
|
185
|
+
newobj->klass = rb_gc_location(newobj->klass);
|
|
186
|
+
newobj->allocations = rb_gc_location(newobj->allocations);
|
|
187
|
+
newobj->object = rb_gc_location(newobj->object);
|
|
188
|
+
}
|
|
189
|
+
|
|
153
190
|
// Update freed objects in the queue
|
|
154
|
-
for (size_t i = 0; i < capture->
|
|
155
|
-
struct
|
|
191
|
+
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
192
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
156
193
|
|
|
157
194
|
// Update all VALUEs if they moved during compaction
|
|
158
195
|
freed->klass = rb_gc_location(freed->klass);
|
|
@@ -190,20 +227,52 @@ const char *event_flag_name(rb_event_flag_t event_flag) {
|
|
|
190
227
|
}
|
|
191
228
|
}
|
|
192
229
|
|
|
193
|
-
// Postponed job callback - processes queued freed objects
|
|
194
|
-
// This runs
|
|
195
|
-
|
|
230
|
+
// Postponed job callback - processes queued new and freed objects
|
|
231
|
+
// This runs when it's safe to call Ruby code (not during allocation or GC)
|
|
232
|
+
// IMPORTANT: Process newobj queue first, then freeobj queue to maintain order
|
|
233
|
+
static void Memory_Profiler_Capture_process_queues(void *arg) {
|
|
196
234
|
VALUE self = (VALUE)arg;
|
|
197
235
|
struct Memory_Profiler_Capture *capture;
|
|
198
236
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
199
237
|
|
|
200
|
-
if (
|
|
201
|
-
|
|
238
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Processing queues: %zu newobj, %zu freeobj\n",
|
|
239
|
+
capture->newobj_queue.count, capture->freeobj_queue.count);
|
|
240
|
+
|
|
241
|
+
// Disable tracking during queue processing to prevent infinite loop
|
|
242
|
+
// (rb_funcall can allocate, which would trigger more NEWOBJ events)
|
|
243
|
+
int was_enabled = capture->enabled;
|
|
244
|
+
capture->enabled = 0;
|
|
245
|
+
|
|
246
|
+
// First, process all new objects in the queue
|
|
247
|
+
for (size_t i = 0; i < capture->newobj_queue.count; i++) {
|
|
248
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_at(&capture->newobj_queue, i);
|
|
249
|
+
VALUE klass = newobj->klass;
|
|
250
|
+
VALUE allocations = newobj->allocations;
|
|
251
|
+
VALUE object = newobj->object;
|
|
252
|
+
|
|
253
|
+
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
254
|
+
|
|
255
|
+
// Call the Ruby callback with (klass, :newobj, nil) - callback returns state to store
|
|
256
|
+
if (!NIL_P(record->callback)) {
|
|
257
|
+
VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
258
|
+
|
|
259
|
+
// Store the state if callback returned something
|
|
260
|
+
if (!NIL_P(state)) {
|
|
261
|
+
if (!record->object_states) {
|
|
262
|
+
record->object_states = st_init_numtable();
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (DEBUG_STATE) fprintf(stderr, "Memory_Profiler_Capture_process_queues: Storing state for object: %p (%s)\n",
|
|
266
|
+
(void *)object, rb_class2name(klass));
|
|
267
|
+
|
|
268
|
+
st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
202
271
|
}
|
|
203
272
|
|
|
204
|
-
//
|
|
205
|
-
for (size_t i = 0; i < capture->
|
|
206
|
-
struct
|
|
273
|
+
// Then, process all freed objects in the queue
|
|
274
|
+
for (size_t i = 0; i < capture->freeobj_queue.count; i++) {
|
|
275
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_at(&capture->freeobj_queue, i);
|
|
207
276
|
VALUE klass = freed->klass;
|
|
208
277
|
VALUE allocations = freed->allocations;
|
|
209
278
|
VALUE state = freed->state;
|
|
@@ -216,38 +285,43 @@ static void Memory_Profiler_Capture_process_freed_queue(void *arg) {
|
|
|
216
285
|
}
|
|
217
286
|
}
|
|
218
287
|
|
|
219
|
-
// Clear
|
|
220
|
-
Memory_Profiler_Queue_clear(&capture->
|
|
288
|
+
// Clear both queues (elements are reused on next cycle)
|
|
289
|
+
Memory_Profiler_Queue_clear(&capture->newobj_queue);
|
|
290
|
+
Memory_Profiler_Queue_clear(&capture->freeobj_queue);
|
|
291
|
+
|
|
292
|
+
// Restore tracking state
|
|
293
|
+
capture->enabled = was_enabled;
|
|
221
294
|
}
|
|
222
295
|
|
|
223
296
|
// Handler for NEWOBJ event
|
|
297
|
+
// SAFE: No longer calls Ruby code directly - queues for deferred processing
|
|
224
298
|
static void Memory_Profiler_Capture_newobj_handler(VALUE self, struct Memory_Profiler_Capture *capture, VALUE klass, VALUE object) {
|
|
225
299
|
st_data_t allocations_data;
|
|
226
300
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
227
301
|
VALUE allocations = (VALUE)allocations_data;
|
|
228
302
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
229
303
|
record->new_count++;
|
|
304
|
+
|
|
305
|
+
// If we have a callback, queue the newobj for later processing
|
|
230
306
|
if (!NIL_P(record->callback)) {
|
|
231
|
-
//
|
|
232
|
-
//
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
st_insert(record->object_states, (st_data_t)object, (st_data_t)state);
|
|
248
|
-
// Notify GC about the state VALUE stored in the table
|
|
249
|
-
RB_OBJ_WRITTEN(self, Qnil, state);
|
|
307
|
+
// Push a new item onto the queue (returns pointer to write to)
|
|
308
|
+
// NOTE: realloc is safe during allocation (doesn't trigger Ruby allocation)
|
|
309
|
+
struct Memory_Profiler_Newobj_Queue_Item *newobj = Memory_Profiler_Queue_push(&capture->newobj_queue);
|
|
310
|
+
if (newobj) {
|
|
311
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued newobj, queue size now: %zu/%zu\n",
|
|
312
|
+
capture->newobj_queue.count, capture->newobj_queue.capacity);
|
|
313
|
+
// Write directly to the allocated space
|
|
314
|
+
newobj->klass = klass;
|
|
315
|
+
newobj->allocations = allocations;
|
|
316
|
+
newobj->object = object;
|
|
317
|
+
|
|
318
|
+
// Trigger postponed job to process the queue
|
|
319
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues\n");
|
|
320
|
+
rb_postponed_job_trigger(capture->postponed_job_handle);
|
|
321
|
+
} else {
|
|
322
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Failed to queue newobj, out of memory\n");
|
|
250
323
|
}
|
|
324
|
+
// If push failed (out of memory), silently drop this newobj event
|
|
251
325
|
}
|
|
252
326
|
} else {
|
|
253
327
|
// Create record for this class (first time seeing it)
|
|
@@ -280,22 +354,30 @@ static void Memory_Profiler_Capture_freeobj_handler(VALUE self, struct Memory_Pr
|
|
|
280
354
|
|
|
281
355
|
// If we have a callback and detailed tracking, queue the freeobj for later processing
|
|
282
356
|
if (!NIL_P(record->callback) && record->object_states) {
|
|
357
|
+
if (DEBUG_STATE) fprintf(stderr, "Memory_Profiler_Capture_freeobj_handler: Looking up state for object: %p\n", (void *)object);
|
|
358
|
+
|
|
283
359
|
// Look up state stored during NEWOBJ
|
|
284
360
|
st_data_t state_data;
|
|
285
361
|
if (st_delete(record->object_states, (st_data_t *)&object, &state_data)) {
|
|
362
|
+
if (DEBUG_STATE) fprintf(stderr, "Found state for object: %p\n", (void *)object);
|
|
286
363
|
VALUE state = (VALUE)state_data;
|
|
287
364
|
|
|
288
365
|
// Push a new item onto the queue (returns pointer to write to)
|
|
289
366
|
// NOTE: realloc is safe during GC (doesn't trigger Ruby allocation)
|
|
290
|
-
struct
|
|
367
|
+
struct Memory_Profiler_Freeobj_Queue_Item *freed = Memory_Profiler_Queue_push(&capture->freeobj_queue);
|
|
291
368
|
if (freed) {
|
|
369
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Queued freed object, queue size now: %zu/%zu\n",
|
|
370
|
+
capture->freeobj_queue.count, capture->freeobj_queue.capacity);
|
|
292
371
|
// Write directly to the allocated space
|
|
293
372
|
freed->klass = klass;
|
|
294
373
|
freed->allocations = allocations;
|
|
295
374
|
freed->state = state;
|
|
296
375
|
|
|
297
|
-
// Trigger postponed job to process
|
|
376
|
+
// Trigger postponed job to process both queues after GC
|
|
377
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Triggering postponed job to process queues after GC\n");
|
|
298
378
|
rb_postponed_job_trigger(capture->postponed_job_handle);
|
|
379
|
+
} else {
|
|
380
|
+
if (DEBUG_EVENT_QUEUES) fprintf(stderr, "Failed to queue freed object, out of memory\n");
|
|
299
381
|
}
|
|
300
382
|
// If push failed (out of memory), silently drop this freeobj event
|
|
301
383
|
}
|
|
@@ -358,7 +440,12 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
|
|
|
358
440
|
if (!klass) return;
|
|
359
441
|
|
|
360
442
|
if (DEBUG) {
|
|
361
|
-
|
|
443
|
+
// In events other than NEWOBJ, we are unable to allocate objects (due to GC), so we simply say "ignored":
|
|
444
|
+
const char *klass_name = "ignored";
|
|
445
|
+
if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
|
|
446
|
+
klass_name = rb_class2name(klass);
|
|
447
|
+
}
|
|
448
|
+
|
|
362
449
|
fprintf(stderr, "Memory_Profiler_Capture_event_callback: %s, Object: %p, Class: %p (%s)\n", event_flag_name(event_flag), (void *)object, (void *)klass, klass_name);
|
|
363
450
|
}
|
|
364
451
|
|
|
@@ -388,14 +475,15 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
388
475
|
|
|
389
476
|
capture->enabled = 0;
|
|
390
477
|
|
|
391
|
-
// Initialize
|
|
392
|
-
Memory_Profiler_Queue_initialize(&capture->
|
|
478
|
+
// Initialize both queues
|
|
479
|
+
Memory_Profiler_Queue_initialize(&capture->newobj_queue, sizeof(struct Memory_Profiler_Newobj_Queue_Item));
|
|
480
|
+
Memory_Profiler_Queue_initialize(&capture->freeobj_queue, sizeof(struct Memory_Profiler_Freeobj_Queue_Item));
|
|
393
481
|
|
|
394
|
-
// Pre-register the postponed job for processing
|
|
395
|
-
// The job will be triggered whenever we queue
|
|
482
|
+
// Pre-register the postponed job for processing both queues
|
|
483
|
+
// The job will be triggered whenever we queue newobj or freeobj events
|
|
396
484
|
capture->postponed_job_handle = rb_postponed_job_preregister(
|
|
397
485
|
0, // flags
|
|
398
|
-
|
|
486
|
+
Memory_Profiler_Capture_process_queues,
|
|
399
487
|
(void *)obj
|
|
400
488
|
);
|
|
401
489
|
|
|
@@ -449,6 +537,7 @@ static VALUE Memory_Profiler_Capture_stop(VALUE self) {
|
|
|
449
537
|
// Add a class to track with optional callback
|
|
450
538
|
// Usage: track(klass) or track(klass) { |obj, klass| ... }
|
|
451
539
|
// Callback can call caller_locations with desired depth
|
|
540
|
+
// Returns the Allocations object for the tracked class
|
|
452
541
|
static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
453
542
|
struct Memory_Profiler_Capture *capture;
|
|
454
543
|
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
@@ -457,8 +546,10 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
|
457
546
|
rb_scan_args(argc, argv, "1&", &klass, &callback);
|
|
458
547
|
|
|
459
548
|
st_data_t allocations_data;
|
|
549
|
+
VALUE allocations;
|
|
550
|
+
|
|
460
551
|
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
461
|
-
|
|
552
|
+
allocations = (VALUE)allocations_data;
|
|
462
553
|
struct Memory_Profiler_Capture_Allocations *record = Memory_Profiler_Allocations_get(allocations);
|
|
463
554
|
RB_OBJ_WRITE(self, &record->callback, callback);
|
|
464
555
|
} else {
|
|
@@ -469,7 +560,7 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
|
469
560
|
record->object_states = NULL;
|
|
470
561
|
|
|
471
562
|
// Wrap the record in a VALUE
|
|
472
|
-
|
|
563
|
+
allocations = Memory_Profiler_Allocations_wrap(record);
|
|
473
564
|
|
|
474
565
|
st_insert(capture->tracked_classes, (st_data_t)klass, (st_data_t)allocations);
|
|
475
566
|
// Notify GC about the class VALUE stored as key in the table
|
|
@@ -482,7 +573,7 @@ static VALUE Memory_Profiler_Capture_track(int argc, VALUE *argv, VALUE self) {
|
|
|
482
573
|
}
|
|
483
574
|
}
|
|
484
575
|
|
|
485
|
-
return
|
|
576
|
+
return allocations;
|
|
486
577
|
}
|
|
487
578
|
|
|
488
579
|
// Stop tracking a class
|
|
@@ -572,6 +663,19 @@ static VALUE Memory_Profiler_Capture_each(VALUE self) {
|
|
|
572
663
|
return self;
|
|
573
664
|
}
|
|
574
665
|
|
|
666
|
+
// Get allocations for a specific class
|
|
667
|
+
static VALUE Memory_Profiler_Capture_aref(VALUE self, VALUE klass) {
|
|
668
|
+
struct Memory_Profiler_Capture *capture;
|
|
669
|
+
TypedData_Get_Struct(self, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
670
|
+
|
|
671
|
+
st_data_t allocations_data;
|
|
672
|
+
if (st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
673
|
+
return (VALUE)allocations_data;
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
return Qnil;
|
|
677
|
+
}
|
|
678
|
+
|
|
575
679
|
void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
576
680
|
{
|
|
577
681
|
// Initialize event symbols
|
|
@@ -591,6 +695,7 @@ void Init_Memory_Profiler_Capture(VALUE Memory_Profiler)
|
|
|
591
695
|
rb_define_method(Memory_Profiler_Capture, "tracking?", Memory_Profiler_Capture_tracking_p, 1);
|
|
592
696
|
rb_define_method(Memory_Profiler_Capture, "count_for", Memory_Profiler_Capture_count_for, 1);
|
|
593
697
|
rb_define_method(Memory_Profiler_Capture, "each", Memory_Profiler_Capture_each, 0);
|
|
698
|
+
rb_define_method(Memory_Profiler_Capture, "[]", Memory_Profiler_Capture_aref, 1);
|
|
594
699
|
rb_define_method(Memory_Profiler_Capture, "clear", Memory_Profiler_Capture_clear, 0);
|
|
595
700
|
|
|
596
701
|
// Initialize Allocations class
|
|
@@ -141,7 +141,7 @@ module Memory
|
|
|
141
141
|
if sample.sample!(count)
|
|
142
142
|
# Check if we should enable detailed tracking
|
|
143
143
|
if sample.increases >= @increases_threshold && !@call_trees.key?(klass)
|
|
144
|
-
|
|
144
|
+
track(klass, allocations)
|
|
145
145
|
end
|
|
146
146
|
|
|
147
147
|
# Notify about growth if block given
|
|
@@ -150,13 +150,19 @@ module Memory
|
|
|
150
150
|
end
|
|
151
151
|
end
|
|
152
152
|
|
|
153
|
-
#
|
|
154
|
-
|
|
153
|
+
# Start tracking with call path analysis.
|
|
154
|
+
#
|
|
155
|
+
# @parameter klass [Class] The class to track with detailed analysis.
|
|
156
|
+
def track(klass, allocations = nil)
|
|
157
|
+
# Track the class and get the allocations object
|
|
158
|
+
allocations ||= @capture.track(klass)
|
|
159
|
+
|
|
160
|
+
# Set up call tree for this class
|
|
155
161
|
tree = @call_trees[klass] = CallTree.new
|
|
156
162
|
depth = @depth
|
|
157
163
|
filter = @filter
|
|
158
164
|
|
|
159
|
-
# Register callback on allocations object
|
|
165
|
+
# Register callback on allocations object:
|
|
160
166
|
# - On :newobj - returns state (leaf node) which C extension stores
|
|
161
167
|
# - On :freeobj - receives state back from C extension
|
|
162
168
|
allocations.track do |klass, event, state|
|
|
@@ -166,43 +172,16 @@ module Memory
|
|
|
166
172
|
locations = caller_locations(1, depth)
|
|
167
173
|
filtered = locations.select(&filter)
|
|
168
174
|
unless filtered.empty?
|
|
169
|
-
# Record returns the leaf node - return it so C can store it
|
|
175
|
+
# Record returns the leaf node - return it so C can store it:
|
|
170
176
|
tree.record(filtered)
|
|
171
177
|
end
|
|
172
|
-
# Return nil or the node - C will store whatever we return
|
|
178
|
+
# Return nil or the node - C will store whatever we return.
|
|
173
179
|
when :freeobj
|
|
174
|
-
# Decrement using the state (leaf node) passed back from
|
|
175
|
-
|
|
176
|
-
state.decrement_path!
|
|
177
|
-
end
|
|
180
|
+
# Decrement using the state (leaf node) passed back from then native extension:
|
|
181
|
+
state&.decrement_path!
|
|
178
182
|
end
|
|
179
183
|
rescue Exception => error
|
|
180
|
-
warn "Error in
|
|
181
|
-
end
|
|
182
|
-
end
|
|
183
|
-
|
|
184
|
-
# Start tracking allocations for a class (count only).
|
|
185
|
-
def track(klass)
|
|
186
|
-
return if @capture.tracking?(klass)
|
|
187
|
-
|
|
188
|
-
@capture.track(klass)
|
|
189
|
-
end
|
|
190
|
-
|
|
191
|
-
# Start tracking with call path analysis.
|
|
192
|
-
#
|
|
193
|
-
# @parameter klass [Class] The class to track with detailed analysis.
|
|
194
|
-
def track_with_analysis(klass)
|
|
195
|
-
# Track the class if not already tracked
|
|
196
|
-
unless @capture.tracking?(klass)
|
|
197
|
-
@capture.track(klass)
|
|
198
|
-
end
|
|
199
|
-
|
|
200
|
-
# Enable analysis by setting callback on the allocations object
|
|
201
|
-
@capture.each do |tracked_klass, allocations|
|
|
202
|
-
if tracked_klass == klass
|
|
203
|
-
track_with_analysis_internal(klass, allocations)
|
|
204
|
-
break
|
|
205
|
-
end
|
|
184
|
+
warn "Error in allocation tracking: #{error.message}\n#{error.backtrace.join("\n")}"
|
|
206
185
|
end
|
|
207
186
|
end
|
|
208
187
|
|
|
@@ -280,11 +259,7 @@ module Memory
|
|
|
280
259
|
private
|
|
281
260
|
|
|
282
261
|
def default_filter
|
|
283
|
-
->(location) {
|
|
284
|
-
!path.include?("/gems/") &&
|
|
285
|
-
!path.include?("/ruby/") &&
|
|
286
|
-
!path.start_with?("(eval)")
|
|
287
|
-
}
|
|
262
|
+
->(location) {!location.path.match?(%r{/(gems|ruby)/|\A\(eval\)})}
|
|
288
263
|
end
|
|
289
264
|
end
|
|
290
265
|
end
|
data/readme.md
CHANGED
|
@@ -22,9 +22,13 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.5
|
|
26
|
+
|
|
27
|
+
- Use queue for `newobj` too to avoid invoking user code during object allocation.
|
|
28
|
+
|
|
25
29
|
### v1.1.2
|
|
26
30
|
|
|
27
|
-
|
|
31
|
+
- Fix handling of GC compaction (I hope).
|
|
28
32
|
|
|
29
33
|
### v0.1.0
|
|
30
34
|
|
data/releases.md
CHANGED
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: memory-profiler
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.1.
|
|
4
|
+
version: 1.1.5
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Samuel Williams
|
|
@@ -74,7 +74,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
|
74
74
|
requirements:
|
|
75
75
|
- - ">="
|
|
76
76
|
- !ruby/object:Gem::Version
|
|
77
|
-
version: '3.
|
|
77
|
+
version: '3.3'
|
|
78
78
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
|
79
79
|
requirements:
|
|
80
80
|
- - ">="
|
metadata.gz.sig
CHANGED
|
Binary file
|