memory-profiler 1.1.10 → 1.1.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/memory/profiler/allocations.c +4 -15
- data/ext/memory/profiler/capture.c +43 -34
- data/ext/memory/profiler/events.c +84 -35
- data/ext/memory/profiler/events.h +1 -0
- data/lib/memory/profiler/version.rb +1 -1
- data/readme.md +8 -0
- data/releases.md +8 -0
- data.tar.gz.sig +0 -0
- metadata +1 -1
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 4fb4dabaaccdf849f1226fc675a2ffed0893ee3b1298263c40b42667c544f0b4
|
|
4
|
+
data.tar.gz: de34c582331bcd65fa65bda39e41ea5c9fd07ab44fa9c325729011c47f0a5353
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: c8b7a4c1c3d3eff6c2ff9531847544bf7b9c8be7008615162b9442726066df94d48ab77ce34d0af106ac1376e14a8ede49ed90be8b2e29818b781c6ac34ae8f3
|
|
7
|
+
data.tar.gz: bdf055ac783a19b8662a271de7b5493c4c7cbac6b01c0c1a515809a04058262ec6d2c42c96a8f2cbef558edf0c28e35fdbcc3f818c4e5e9d2fc7fb6a11d57d1c
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
|
@@ -12,8 +12,9 @@ static VALUE Memory_Profiler_Allocations = Qnil;
|
|
|
12
12
|
|
|
13
13
|
// Helper to mark states table (object => state)
|
|
14
14
|
static int Memory_Profiler_Allocations_states_mark(st_data_t key, st_data_t value, st_data_t arg) {
|
|
15
|
-
//
|
|
16
|
-
|
|
15
|
+
// We don't want the key to move - we can't rehash the table if it does.
|
|
16
|
+
rb_gc_mark((VALUE)key);
|
|
17
|
+
|
|
17
18
|
VALUE state = (VALUE)value;
|
|
18
19
|
rb_gc_mark_movable(state);
|
|
19
20
|
return ST_CONTINUE;
|
|
@@ -27,19 +28,7 @@ static int Memory_Profiler_Allocations_states_foreach(st_data_t key, st_data_t v
|
|
|
27
28
|
|
|
28
29
|
// Replace callback for st_foreach_with_replace to update states during compaction
|
|
29
30
|
static int Memory_Profiler_Allocations_states_compact(st_data_t *key, st_data_t *value, st_data_t data, int existing) {
|
|
30
|
-
|
|
31
|
-
VALUE old_state = (VALUE)*value;
|
|
32
|
-
VALUE new_state = rb_gc_location(old_state);
|
|
33
|
-
|
|
34
|
-
// NOTE: We can't update object keys if they moved (would require rehashing/allocation).
|
|
35
|
-
// This means lookups may fail after compaction for moved objects.
|
|
36
|
-
// This is acceptable - FREEOBJ will simply not find the state and skip the callback.
|
|
37
|
-
// The state will be cleaned up when Allocations is freed/cleared.
|
|
38
|
-
|
|
39
|
-
// Update state value if it moved (this is safe, doesn't rehash)
|
|
40
|
-
if (old_state != new_state) {
|
|
41
|
-
*value = (st_data_t)new_state;
|
|
42
|
-
}
|
|
31
|
+
*value = (st_data_t)rb_gc_location((VALUE)*value);
|
|
43
32
|
|
|
44
33
|
return ST_CONTINUE;
|
|
45
34
|
}
|
|
@@ -26,7 +26,7 @@ struct Memory_Profiler_Capture {
|
|
|
26
26
|
int running;
|
|
27
27
|
|
|
28
28
|
// Should we queue callbacks? (temporarily disabled during queue processing).
|
|
29
|
-
int
|
|
29
|
+
int paused;
|
|
30
30
|
|
|
31
31
|
// Tracked classes: class => VALUE (wrapped Memory_Profiler_Capture_Allocations).
|
|
32
32
|
st_table *tracked_classes;
|
|
@@ -142,6 +142,9 @@ static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE kl
|
|
|
142
142
|
struct Memory_Profiler_Capture *capture;
|
|
143
143
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
144
144
|
|
|
145
|
+
// Pause the capture to prevent infinite loop:
|
|
146
|
+
capture->paused += 1;
|
|
147
|
+
|
|
145
148
|
// Increment global new count:
|
|
146
149
|
capture->new_count++;
|
|
147
150
|
|
|
@@ -171,20 +174,17 @@ static void Memory_Profiler_Capture_process_newobj(VALUE capture_value, VALUE kl
|
|
|
171
174
|
|
|
172
175
|
// Only store state if there's a callback
|
|
173
176
|
if (!NIL_P(record->callback)) {
|
|
174
|
-
// Temporarily disable queueing to prevent infinite loop
|
|
175
|
-
int was_enabled = capture->enabled;
|
|
176
|
-
capture->enabled = 0;
|
|
177
|
-
|
|
178
177
|
VALUE state = rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_newobj, Qnil);
|
|
179
178
|
|
|
180
|
-
capture->enabled = was_enabled;
|
|
181
|
-
|
|
182
179
|
// Store state using object as key (works because object is alive):
|
|
183
180
|
st_insert(record->states, (st_data_t)object, (st_data_t)state);
|
|
184
181
|
} else {
|
|
185
182
|
// Store state as nil:
|
|
186
183
|
st_insert(record->states, (st_data_t)object, (st_data_t)Qnil);
|
|
187
184
|
}
|
|
185
|
+
|
|
186
|
+
// Resume the capture:
|
|
187
|
+
capture->paused -= 1;
|
|
188
188
|
}
|
|
189
189
|
|
|
190
190
|
// Process a FREEOBJ event. All deallocation tracking logic is here.
|
|
@@ -192,11 +192,14 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE k
|
|
|
192
192
|
struct Memory_Profiler_Capture *capture;
|
|
193
193
|
TypedData_Get_Struct(capture_value, struct Memory_Profiler_Capture, &Memory_Profiler_Capture_type, capture);
|
|
194
194
|
|
|
195
|
+
// Pause the capture to prevent infinite loop:
|
|
196
|
+
capture->paused += 1;
|
|
197
|
+
|
|
195
198
|
// Look up allocations record for this class
|
|
196
199
|
st_data_t allocations_data;
|
|
197
200
|
if (!st_lookup(capture->tracked_classes, (st_data_t)klass, &allocations_data)) {
|
|
198
201
|
// The class is not tracked, so we are not tracking this object.
|
|
199
|
-
|
|
202
|
+
goto done;
|
|
200
203
|
}
|
|
201
204
|
|
|
202
205
|
VALUE allocations = (VALUE)allocations_data;
|
|
@@ -204,32 +207,27 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE k
|
|
|
204
207
|
|
|
205
208
|
if (!record->states) {
|
|
206
209
|
// There is no state table for this class, so we are not tracking it.
|
|
207
|
-
|
|
210
|
+
goto done;
|
|
208
211
|
}
|
|
209
212
|
|
|
210
213
|
st_data_t state_data;
|
|
211
|
-
if (
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
VALUE state = (VALUE)state_data;
|
|
217
|
-
|
|
218
|
-
// Increment global free count
|
|
219
|
-
capture->free_count++;
|
|
220
|
-
|
|
221
|
-
// Increment per-class free count
|
|
222
|
-
record->free_count++;
|
|
223
|
-
|
|
224
|
-
if (!NIL_P(record->callback) && !NIL_P(state)) {
|
|
225
|
-
// Temporarily disable queueing to prevent infinite loop
|
|
226
|
-
int was_enabled = capture->enabled;
|
|
227
|
-
capture->enabled = 0;
|
|
214
|
+
if (st_delete(record->states, (st_data_t *)&object, &state_data)) {
|
|
215
|
+
VALUE state = (VALUE)state_data;
|
|
216
|
+
|
|
217
|
+
// Increment global free count
|
|
218
|
+
capture->free_count++;
|
|
228
219
|
|
|
229
|
-
|
|
220
|
+
// Increment per-class free count
|
|
221
|
+
record->free_count++;
|
|
230
222
|
|
|
231
|
-
|
|
223
|
+
if (!NIL_P(record->callback) && !NIL_P(state)) {
|
|
224
|
+
rb_funcall(record->callback, rb_intern("call"), 3, klass, sym_freeobj, state);
|
|
225
|
+
}
|
|
232
226
|
}
|
|
227
|
+
|
|
228
|
+
done:
|
|
229
|
+
// Resume the capture:
|
|
230
|
+
capture->paused -= 1;
|
|
233
231
|
}
|
|
234
232
|
|
|
235
233
|
// Process a single event (NEWOBJ or FREEOBJ). Called from events.c via rb_protect to catch exceptions.
|
|
@@ -241,6 +239,9 @@ void Memory_Profiler_Capture_process_event(struct Memory_Profiler_Event *event)
|
|
|
241
239
|
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
|
|
242
240
|
Memory_Profiler_Capture_process_freeobj(event->capture, event->klass, event->object);
|
|
243
241
|
break;
|
|
242
|
+
default:
|
|
243
|
+
// Ignore.
|
|
244
|
+
break;
|
|
244
245
|
}
|
|
245
246
|
}
|
|
246
247
|
|
|
@@ -299,12 +300,20 @@ static void Memory_Profiler_Capture_event_callback(VALUE data, void *ptr) {
|
|
|
299
300
|
|
|
300
301
|
if (event_flag == RUBY_INTERNAL_EVENT_NEWOBJ) {
|
|
301
302
|
// Skip NEWOBJ if disabled (during callback) to prevent infinite recursion
|
|
302
|
-
if (
|
|
303
|
+
if (capture->paused) return;
|
|
303
304
|
|
|
305
|
+
// It's safe to unconditionally call here:
|
|
306
|
+
object = rb_obj_id(object);
|
|
307
|
+
|
|
304
308
|
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_NEWOBJ, data, klass, object);
|
|
305
309
|
} else if (event_flag == RUBY_INTERNAL_EVENT_FREEOBJ) {
|
|
306
|
-
//
|
|
307
|
-
|
|
310
|
+
// We only care about objects that have been seen before (i.e. have an object ID):
|
|
311
|
+
if (RB_FL_TEST(object, FL_SEEN_OBJ_ID)) {
|
|
312
|
+
// It's only safe to call here if the object already has an object ID.
|
|
313
|
+
object = rb_obj_id(object);
|
|
314
|
+
|
|
315
|
+
Memory_Profiler_Events_enqueue(MEMORY_PROFILER_EVENT_TYPE_FREEOBJ, data, klass, object);
|
|
316
|
+
}
|
|
308
317
|
}
|
|
309
318
|
}
|
|
310
319
|
|
|
@@ -329,7 +338,7 @@ static VALUE Memory_Profiler_Capture_alloc(VALUE klass) {
|
|
|
329
338
|
|
|
330
339
|
// Initialize state flags - not running, callbacks disabled
|
|
331
340
|
capture->running = 0;
|
|
332
|
-
capture->
|
|
341
|
+
capture->paused = 0;
|
|
333
342
|
|
|
334
343
|
// Global event queue system will auto-initialize on first use (lazy initialization)
|
|
335
344
|
|
|
@@ -362,7 +371,7 @@ static VALUE Memory_Profiler_Capture_start(VALUE self) {
|
|
|
362
371
|
|
|
363
372
|
// Set both flags - we're now running and callbacks are enabled
|
|
364
373
|
capture->running = 1;
|
|
365
|
-
capture->
|
|
374
|
+
capture->paused = 0;
|
|
366
375
|
|
|
367
376
|
return Qtrue;
|
|
368
377
|
}
|
|
@@ -383,7 +392,7 @@ static VALUE Memory_Profiler_Capture_stop(VALUE self) {
|
|
|
383
392
|
|
|
384
393
|
// Clear both flags - we're no longer running and callbacks are disabled
|
|
385
394
|
capture->running = 0;
|
|
386
|
-
capture->
|
|
395
|
+
capture->paused = 0;
|
|
387
396
|
|
|
388
397
|
return Qtrue;
|
|
389
398
|
}
|
|
@@ -17,9 +17,10 @@ struct Memory_Profiler_Events {
|
|
|
17
17
|
// The VALUE wrapper for this struct (needed for write barriers).
|
|
18
18
|
VALUE self;
|
|
19
19
|
|
|
20
|
-
//
|
|
21
|
-
struct Memory_Profiler_Queue
|
|
22
|
-
|
|
20
|
+
// Double-buffered event queues (contains events from all Capture instances).
|
|
21
|
+
struct Memory_Profiler_Queue queues[2];
|
|
22
|
+
struct Memory_Profiler_Queue *available, *processing;
|
|
23
|
+
|
|
23
24
|
// Postponed job handle for processing the queue.
|
|
24
25
|
// Postponed job handles are an extremely limited resource, so we only register one global event queue.
|
|
25
26
|
rb_postponed_job_handle_t postponed_job_handle;
|
|
@@ -51,8 +52,13 @@ static VALUE Memory_Profiler_Events_new(void) {
|
|
|
51
52
|
// Store the VALUE wrapper for write barriers:
|
|
52
53
|
events->self = self;
|
|
53
54
|
|
|
54
|
-
// Initialize
|
|
55
|
-
Memory_Profiler_Queue_initialize(&events->
|
|
55
|
+
// Initialize both queues for double buffering:
|
|
56
|
+
Memory_Profiler_Queue_initialize(&events->queues[0], sizeof(struct Memory_Profiler_Event));
|
|
57
|
+
Memory_Profiler_Queue_initialize(&events->queues[1], sizeof(struct Memory_Profiler_Event));
|
|
58
|
+
|
|
59
|
+
// Start with queues[0] available for incoming events, queues[1] for processing (initially empty):
|
|
60
|
+
events->available = &events->queues[0];
|
|
61
|
+
events->processing = &events->queues[1];
|
|
56
62
|
|
|
57
63
|
// Pre-register the single postponed job for processing the queue:
|
|
58
64
|
events->postponed_job_handle = rb_postponed_job_preregister(0,
|
|
@@ -88,13 +94,13 @@ struct Memory_Profiler_Events* Memory_Profiler_Events_instance(void) {
|
|
|
88
94
|
return events;
|
|
89
95
|
}
|
|
90
96
|
|
|
91
|
-
//
|
|
92
|
-
static void
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
97
|
+
// Helper to mark events in a queue.
|
|
98
|
+
static void Memory_Profiler_Events_mark_queue(struct Memory_Profiler_Queue *queue, int skip_none) {
|
|
99
|
+
for (size_t i = 0; i < queue->count; i++) {
|
|
100
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(queue, i);
|
|
101
|
+
|
|
102
|
+
// Skip already-processed events if requested:
|
|
103
|
+
if (skip_none && event->type == MEMORY_PROFILER_EVENT_TYPE_NONE) continue;
|
|
98
104
|
|
|
99
105
|
// Mark the Capture instance and class:
|
|
100
106
|
rb_gc_mark_movable(event->capture);
|
|
@@ -108,39 +114,70 @@ static void Memory_Profiler_Events_mark(void *ptr) {
|
|
|
108
114
|
}
|
|
109
115
|
}
|
|
110
116
|
|
|
111
|
-
// GC
|
|
112
|
-
static void
|
|
117
|
+
// GC mark callback - mark all VALUEs in both event queues.
|
|
118
|
+
static void Memory_Profiler_Events_mark(void *ptr) {
|
|
113
119
|
struct Memory_Profiler_Events *events = ptr;
|
|
114
120
|
|
|
115
|
-
//
|
|
116
|
-
|
|
117
|
-
|
|
121
|
+
// Mark all events in the available queue (receiving new events):
|
|
122
|
+
Memory_Profiler_Events_mark_queue(events->available, 0);
|
|
123
|
+
|
|
124
|
+
// Mark all events in the processing queue (currently being processed):
|
|
125
|
+
Memory_Profiler_Events_mark_queue(events->processing, 1);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Helper to compact events in a queue.
|
|
129
|
+
static void Memory_Profiler_Events_compact_queue(struct Memory_Profiler_Queue *queue, int skip_none) {
|
|
130
|
+
for (size_t i = 0; i < queue->count; i++) {
|
|
131
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(queue, i);
|
|
132
|
+
|
|
133
|
+
// Skip already-processed events if requested:
|
|
134
|
+
if (skip_none && event->type == MEMORY_PROFILER_EVENT_TYPE_NONE) continue;
|
|
118
135
|
|
|
119
136
|
// Update all VALUEs if they moved during compaction:
|
|
120
137
|
event->capture = rb_gc_location(event->capture);
|
|
121
138
|
event->klass = rb_gc_location(event->klass);
|
|
122
|
-
|
|
123
|
-
// For NEWOBJ: update object pointer if it moved
|
|
124
|
-
// For FREEOBJ: DON'T update (object is being freed, pointer is stale)
|
|
125
|
-
if (event->type == MEMORY_PROFILER_EVENT_TYPE_NEWOBJ) {
|
|
126
|
-
event->object = rb_gc_location(event->object);
|
|
127
|
-
}
|
|
139
|
+
event->object = rb_gc_location(event->object);
|
|
128
140
|
}
|
|
129
141
|
}
|
|
130
142
|
|
|
143
|
+
// GC compact callback - update all VALUEs in both event queues.
|
|
144
|
+
static void Memory_Profiler_Events_compact(void *ptr) {
|
|
145
|
+
struct Memory_Profiler_Events *events = ptr;
|
|
146
|
+
|
|
147
|
+
// Update objects in the available queue:
|
|
148
|
+
Memory_Profiler_Events_compact_queue(events->available, 0);
|
|
149
|
+
|
|
150
|
+
// Update objects in the processing queue:
|
|
151
|
+
Memory_Profiler_Events_compact_queue(events->processing, 1);
|
|
152
|
+
}
|
|
153
|
+
|
|
131
154
|
// GC free callback.
|
|
132
155
|
static void Memory_Profiler_Events_free(void *ptr) {
|
|
133
156
|
struct Memory_Profiler_Events *events = ptr;
|
|
134
|
-
Memory_Profiler_Queue_free(&events->
|
|
157
|
+
Memory_Profiler_Queue_free(&events->queues[0]);
|
|
158
|
+
Memory_Profiler_Queue_free(&events->queues[1]);
|
|
135
159
|
}
|
|
136
160
|
|
|
137
161
|
// GC memsize callback.
|
|
138
162
|
static size_t Memory_Profiler_Events_memsize(const void *ptr) {
|
|
139
163
|
const struct Memory_Profiler_Events *events = ptr;
|
|
140
|
-
return sizeof(struct Memory_Profiler_Events)
|
|
164
|
+
return sizeof(struct Memory_Profiler_Events)
|
|
165
|
+
+ (events->queues[0].capacity * events->queues[0].element_size)
|
|
166
|
+
+ (events->queues[1].capacity * events->queues[1].element_size);
|
|
141
167
|
}
|
|
142
168
|
|
|
143
|
-
|
|
169
|
+
const char *Memory_Profiler_Event_Type_name(enum Memory_Profiler_Event_Type type) {
|
|
170
|
+
switch (type) {
|
|
171
|
+
case MEMORY_PROFILER_EVENT_TYPE_NEWOBJ:
|
|
172
|
+
return "NEWOBJ";
|
|
173
|
+
case MEMORY_PROFILER_EVENT_TYPE_FREEOBJ:
|
|
174
|
+
return "FREEOBJ";
|
|
175
|
+
default:
|
|
176
|
+
return "NONE";
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Enqueue an event to the available queue (can be called anytime, even during processing).
|
|
144
181
|
int Memory_Profiler_Events_enqueue(
|
|
145
182
|
enum Memory_Profiler_Event_Type type,
|
|
146
183
|
VALUE capture,
|
|
@@ -148,8 +185,9 @@ int Memory_Profiler_Events_enqueue(
|
|
|
148
185
|
VALUE object
|
|
149
186
|
) {
|
|
150
187
|
struct Memory_Profiler_Events *events = Memory_Profiler_Events_instance();
|
|
151
|
-
|
|
152
|
-
|
|
188
|
+
|
|
189
|
+
// Always enqueue to the available queue - it won't be touched during processing:
|
|
190
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_push(events->available);
|
|
153
191
|
if (event) {
|
|
154
192
|
event->type = type;
|
|
155
193
|
|
|
@@ -158,8 +196,8 @@ int Memory_Profiler_Events_enqueue(
|
|
|
158
196
|
RB_OBJ_WRITE(events->self, &event->klass, klass);
|
|
159
197
|
RB_OBJ_WRITE(events->self, &event->object, object);
|
|
160
198
|
|
|
161
|
-
|
|
162
|
-
|
|
199
|
+
if (DEBUG) fprintf(stderr, "Queued %s to available queue, size: %zu\n",
|
|
200
|
+
Memory_Profiler_Event_Type_name(type), events->available->count);
|
|
163
201
|
|
|
164
202
|
rb_postponed_job_trigger(events->postponed_job_handle);
|
|
165
203
|
// Success:
|
|
@@ -191,11 +229,16 @@ static VALUE Memory_Profiler_Events_process_event_protected(VALUE arg) {
|
|
|
191
229
|
static void Memory_Profiler_Events_process_queue(void *arg) {
|
|
192
230
|
struct Memory_Profiler_Events *events = (struct Memory_Profiler_Events *)arg;
|
|
193
231
|
|
|
194
|
-
|
|
232
|
+
// Swap the queues: available becomes processing, and the old processing queue (now empty) becomes available. This allows new events to continue enqueueing to the new available queue while we process.
|
|
233
|
+
struct Memory_Profiler_Queue *queue_to_process = events->available;
|
|
234
|
+
events->available = events->processing;
|
|
235
|
+
events->processing = queue_to_process;
|
|
195
236
|
|
|
237
|
+
if (DEBUG) fprintf(stderr, "Processing event queue: %zu events\n", events->processing->count);
|
|
238
|
+
|
|
196
239
|
// Process all events in order (maintains NEWOBJ before FREEOBJ for same object):
|
|
197
|
-
for (size_t i = 0; i < events->
|
|
198
|
-
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(
|
|
240
|
+
for (size_t i = 0; i < events->processing->count; i++) {
|
|
241
|
+
struct Memory_Profiler_Event *event = Memory_Profiler_Queue_at(events->processing, i);
|
|
199
242
|
|
|
200
243
|
// Process event with rb_protect to catch any exceptions:
|
|
201
244
|
int state = 0;
|
|
@@ -206,8 +249,14 @@ static void Memory_Profiler_Events_process_queue(void *arg) {
|
|
|
206
249
|
rb_warning("Exception in event processing callback (caught and suppressed): %"PRIsVALUE, rb_errinfo());
|
|
207
250
|
rb_set_errinfo(Qnil);
|
|
208
251
|
}
|
|
252
|
+
|
|
253
|
+
// Clear this event after processing to prevent marking stale data if GC runs:
|
|
254
|
+
event->type = MEMORY_PROFILER_EVENT_TYPE_NONE;
|
|
255
|
+
RB_OBJ_WRITE(events->self, &event->capture, Qnil);
|
|
256
|
+
RB_OBJ_WRITE(events->self, &event->klass, Qnil);
|
|
257
|
+
RB_OBJ_WRITE(events->self, &event->object, Qnil);
|
|
209
258
|
}
|
|
210
259
|
|
|
211
|
-
//
|
|
212
|
-
Memory_Profiler_Queue_clear(
|
|
260
|
+
// Clear the processing queue (which is now empty logically):
|
|
261
|
+
Memory_Profiler_Queue_clear(events->processing);
|
|
213
262
|
}
|
data/readme.md
CHANGED
|
@@ -22,6 +22,14 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
|
|
|
22
22
|
|
|
23
23
|
Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
|
|
24
24
|
|
|
25
|
+
### v1.1.12
|
|
26
|
+
|
|
27
|
+
- Use `rb_obj_id` for tracking object states to avoid compaction issues.
|
|
28
|
+
|
|
29
|
+
### v1.1.11
|
|
30
|
+
|
|
31
|
+
- Double buffer shared events queues to fix queue corruption.
|
|
32
|
+
|
|
25
33
|
### v1.1.10
|
|
26
34
|
|
|
27
35
|
- Added `Capture#new_count` - returns total number of allocations tracked across all classes.
|
data/releases.md
CHANGED
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# Releases
|
|
2
2
|
|
|
3
|
+
## v1.1.12
|
|
4
|
+
|
|
5
|
+
- Use `rb_obj_id` for tracking object states to avoid compaction issues.
|
|
6
|
+
|
|
7
|
+
## v1.1.11
|
|
8
|
+
|
|
9
|
+
- Double buffer shared events queues to fix queue corruption.
|
|
10
|
+
|
|
3
11
|
## v1.1.10
|
|
4
12
|
|
|
5
13
|
- Added `Capture#new_count` - returns total number of allocations tracked across all classes.
|
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
metadata.gz.sig
CHANGED
|
Binary file
|