memory-profiler 1.3.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,343 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #include "table.h"
5
+ #include <stdlib.h>
6
+ #include <string.h>
7
+
8
+ // Use the Entry struct from header
9
+ // (No local definition needed)
10
+
11
+ #define INITIAL_CAPACITY 1024
12
+ #define LOAD_FACTOR 0.75
13
+
14
+ // Create a new table
15
+ struct Memory_Profiler_Object_Table* Memory_Profiler_Object_Table_new(size_t initial_capacity) {
16
+ struct Memory_Profiler_Object_Table *table = malloc(sizeof(struct Memory_Profiler_Object_Table));
17
+
18
+ if (!table) {
19
+ return NULL;
20
+ }
21
+
22
+ table->capacity = initial_capacity > 0 ? initial_capacity : INITIAL_CAPACITY;
23
+ table->count = 0;
24
+ table->strong = 0; // Start as weak table (strong == 0 means weak)
25
+
26
+ // Use calloc to zero out entries (Qnil = 0)
27
+ table->entries = calloc(table->capacity, sizeof(struct Memory_Profiler_Object_Table_Entry));
28
+
29
+ if (!table->entries) {
30
+ free(table);
31
+ return NULL;
32
+ }
33
+
34
+ return table;
35
+ }
36
+
37
+ // Free the table
38
+ void Memory_Profiler_Object_Table_free(struct Memory_Profiler_Object_Table *table) {
39
+ if (table) {
40
+ free(table->entries);
41
+ free(table);
42
+ }
43
+ }
44
+
45
+ // Simple hash function for object addresses
46
+ static inline size_t hash_object(VALUE object, size_t capacity) {
47
+ // Use address bits, shift right to ignore alignment
48
+ return ((size_t)object >> 3) % capacity;
49
+ }
50
+
51
+ // Find entry index for an object (linear probing)
52
+ // Returns index if found, or index of empty slot if not found
53
+ static size_t find_entry(struct Memory_Profiler_Object_Table_Entry *entries, size_t capacity, VALUE object, int *found) {
54
+ size_t index = hash_object(object, capacity);
55
+ size_t start = index;
56
+
57
+ *found = 0;
58
+
59
+ do {
60
+ if (entries[index].object == 0) {
61
+ // Empty slot (calloc zeros memory)
62
+ return index;
63
+ }
64
+
65
+ if (entries[index].object == object) {
66
+ // Found it
67
+ *found = 1;
68
+ return index;
69
+ }
70
+
71
+ // Linear probe
72
+ index = (index + 1) % capacity;
73
+ } while (index != start);
74
+
75
+ // Table is full (shouldn't happen with load factor)
76
+ return index;
77
+ }
78
+
79
+ // Resize the table (only called from insert, not during GC)
80
+ static void resize_table(struct Memory_Profiler_Object_Table *table) {
81
+ size_t old_capacity = table->capacity;
82
+ struct Memory_Profiler_Object_Table_Entry *old_entries = table->entries;
83
+
84
+ // Double capacity
85
+ table->capacity = old_capacity * 2;
86
+ table->count = 0;
87
+ table->entries = calloc(table->capacity, sizeof(struct Memory_Profiler_Object_Table_Entry));
88
+
89
+ if (!table->entries) {
90
+ // Resize failed - restore old state
91
+ table->capacity = old_capacity;
92
+ table->entries = old_entries;
93
+ return;
94
+ }
95
+
96
+ // Rehash all entries
97
+ for (size_t i = 0; i < old_capacity; i++) {
98
+ if (old_entries[i].object != Qnil) {
99
+ int found;
100
+ size_t new_index = find_entry(table->entries, table->capacity, old_entries[i].object, &found);
101
+ table->entries[new_index] = old_entries[i];
102
+ table->count++;
103
+ }
104
+ }
105
+
106
+ free(old_entries);
107
+ }
108
+
109
+ // Insert object, returns pointer to entry for caller to fill
110
+ struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_insert(struct Memory_Profiler_Object_Table *table, VALUE object) {
111
+ // Resize if load factor exceeded
112
+ if ((double)table->count / table->capacity > LOAD_FACTOR) {
113
+ resize_table(table);
114
+ }
115
+
116
+ int found;
117
+ size_t index = find_entry(table->entries, table->capacity, object, &found);
118
+
119
+ if (!found) {
120
+ table->count++;
121
+ // Zero out the entry
122
+ table->entries[index].object = object;
123
+ table->entries[index].klass = 0;
124
+ table->entries[index].data = 0;
125
+ table->entries[index].allocations = 0;
126
+ }
127
+
128
+ // Set object (might be updating existing entry)
129
+ table->entries[index].object = object;
130
+
131
+ // Return pointer for caller to fill fields
132
+ return &table->entries[index];
133
+ }
134
+
135
+ // Lookup entry for object - returns pointer or NULL
136
+ struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_lookup(struct Memory_Profiler_Object_Table *table, VALUE object) {
137
+ int found;
138
+ size_t index = find_entry(table->entries, table->capacity, object, &found);
139
+
140
+ if (found) {
141
+ return &table->entries[index];
142
+ }
143
+
144
+ return NULL;
145
+ }
146
+
147
+ // Delete object from table
148
+ void Memory_Profiler_Object_Table_delete(struct Memory_Profiler_Object_Table *table, VALUE object) {
149
+ int found;
150
+ size_t index = find_entry(table->entries, table->capacity, object, &found);
151
+
152
+ if (!found) {
153
+ return;
154
+ }
155
+
156
+ // Mark as deleted (set to 0/NULL)
157
+ table->entries[index].object = 0;
158
+ table->entries[index].klass = 0;
159
+ table->entries[index].data = 0;
160
+ table->entries[index].allocations = 0;
161
+ table->count--;
162
+
163
+ // Rehash following entries to fix probe chains
164
+ size_t next = (index + 1) % table->capacity;
165
+ while (table->entries[next].object != 0) {
166
+ // Save entry values
167
+ VALUE obj = table->entries[next].object;
168
+ VALUE k = table->entries[next].klass;
169
+ VALUE d = table->entries[next].data;
170
+ VALUE a = table->entries[next].allocations;
171
+
172
+ // Remove this entry (set to 0/NULL)
173
+ table->entries[next].object = 0;
174
+ table->entries[next].klass = 0;
175
+ table->entries[next].data = 0;
176
+ table->entries[next].allocations = 0;
177
+ table->count--;
178
+
179
+ // Reinsert (will find correct spot and fill fields)
180
+ struct Memory_Profiler_Object_Table_Entry *new_entry = Memory_Profiler_Object_Table_insert(table, obj);
181
+ new_entry->klass = k;
182
+ new_entry->data = d;
183
+ new_entry->allocations = a;
184
+
185
+ next = (next + 1) % table->capacity;
186
+ }
187
+ }
188
+
189
+ // Mark all entries for GC
190
+ void Memory_Profiler_Object_Table_mark(struct Memory_Profiler_Object_Table *table) {
191
+ if (!table) return;
192
+
193
+ for (size_t i = 0; i < table->capacity; i++) {
194
+ struct Memory_Profiler_Object_Table_Entry *entry = &table->entries[i];
195
+ if (entry->object != 0) {
196
+ // Mark object key if table is strong (strong > 0)
197
+ // When weak (strong == 0), object keys can be GC'd (that's how we detect frees)
198
+ if (table->strong > 0) {
199
+ rb_gc_mark_movable(entry->object);
200
+ }
201
+
202
+ // Always mark the other fields (klass, data, allocations) - we own these
203
+ if (entry->klass) rb_gc_mark_movable(entry->klass);
204
+ if (entry->data) rb_gc_mark_movable(entry->data);
205
+ if (entry->allocations) rb_gc_mark_movable(entry->allocations);
206
+ }
207
+ }
208
+ }
209
+
210
+ // Update object pointers during compaction
211
+ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *table) {
212
+ if (!table || table->count == 0) return;
213
+
214
+ // First pass: check if any objects moved
215
+ int any_moved = 0;
216
+ for (size_t i = 0; i < table->capacity; i++) {
217
+ if (table->entries[i].object != 0) {
218
+ VALUE new_loc = rb_gc_location(table->entries[i].object);
219
+ if (new_loc != table->entries[i].object) {
220
+ any_moved = 1;
221
+ break;
222
+ }
223
+ }
224
+ }
225
+
226
+ // If nothing moved, just update VALUE fields and we're done
227
+ if (!any_moved) {
228
+ for (size_t i = 0; i < table->capacity; i++) {
229
+ if (table->entries[i].object != 0) {
230
+ // Update VALUE fields if they moved
231
+ table->entries[i].klass = rb_gc_location(table->entries[i].klass);
232
+ table->entries[i].data = rb_gc_location(table->entries[i].data);
233
+ table->entries[i].allocations = rb_gc_location(table->entries[i].allocations);
234
+ }
235
+ }
236
+ return;
237
+ }
238
+
239
+ // Something moved - need to rehash entire table
240
+ // Collect all entries into temporary array (use system malloc, not Ruby's)
241
+ struct Memory_Profiler_Object_Table_Entry *temp_entries = malloc(table->count * sizeof(struct Memory_Profiler_Object_Table_Entry));
242
+ if (!temp_entries) {
243
+ // Allocation failed - this is bad, but can't do much during GC
244
+ return;
245
+ }
246
+
247
+ size_t temp_count = 0;
248
+ for (size_t i = 0; i < table->capacity; i++) {
249
+ if (table->entries[i].object != 0) {
250
+ // Update all pointers first
251
+ temp_entries[temp_count].object = rb_gc_location(table->entries[i].object);
252
+ temp_entries[temp_count].klass = rb_gc_location(table->entries[i].klass);
253
+ temp_entries[temp_count].data = rb_gc_location(table->entries[i].data);
254
+ temp_entries[temp_count].allocations = rb_gc_location(table->entries[i].allocations);
255
+ temp_count++;
256
+ }
257
+ }
258
+
259
+ // Clear the table (zero out all entries)
260
+ memset(table->entries, 0, table->capacity * sizeof(struct Memory_Profiler_Object_Table_Entry));
261
+ table->count = 0;
262
+
263
+ // Reinsert all entries with new hash values
264
+ for (size_t i = 0; i < temp_count; i++) {
265
+ int found;
266
+ size_t index = find_entry(table->entries, table->capacity, temp_entries[i].object, &found);
267
+
268
+ // Insert at new location
269
+ table->entries[index] = temp_entries[i];
270
+ table->count++;
271
+ }
272
+
273
+ // Free temporary array
274
+ free(temp_entries);
275
+ }
276
+
277
+ // Delete by entry pointer (faster - avoids second lookup)
278
+ void Memory_Profiler_Object_Table_delete_entry(struct Memory_Profiler_Object_Table *table, struct Memory_Profiler_Object_Table_Entry *entry) {
279
+ // Calculate index from pointer
280
+ size_t index = entry - table->entries;
281
+
282
+ // Validate it's within our table
283
+ if (index >= table->capacity) {
284
+ return; // Invalid pointer
285
+ }
286
+
287
+ // Check if entry is actually occupied
288
+ if (entry->object == 0) {
289
+ return; // Already deleted
290
+ }
291
+
292
+ // Mark as deleted (set to 0/NULL)
293
+ entry->object = 0;
294
+ entry->klass = 0;
295
+ entry->data = 0;
296
+ entry->allocations = 0;
297
+ table->count--;
298
+
299
+ // Rehash following entries to fix probe chains
300
+ size_t next = (index + 1) % table->capacity;
301
+ while (table->entries[next].object != 0) {
302
+ // Save entry values
303
+ VALUE obj = table->entries[next].object;
304
+ VALUE k = table->entries[next].klass;
305
+ VALUE d = table->entries[next].data;
306
+ VALUE a = table->entries[next].allocations;
307
+
308
+ // Remove this entry (set to 0/NULL)
309
+ table->entries[next].object = 0;
310
+ table->entries[next].klass = 0;
311
+ table->entries[next].data = 0;
312
+ table->entries[next].allocations = 0;
313
+ table->count--;
314
+
315
+ // Reinsert (will find correct spot and fill fields)
316
+ struct Memory_Profiler_Object_Table_Entry *new_entry = Memory_Profiler_Object_Table_insert(table, obj);
317
+ new_entry->klass = k;
318
+ new_entry->data = d;
319
+ new_entry->allocations = a;
320
+
321
+ next = (next + 1) % table->capacity;
322
+ }
323
+ }
324
+
325
+ // Get current size
326
+ size_t Memory_Profiler_Object_Table_size(struct Memory_Profiler_Object_Table *table) {
327
+ return table->count;
328
+ }
329
+
330
+ // Increment strong reference count (makes table strong when > 0)
331
+ void Memory_Profiler_Object_Table_increment_strong(struct Memory_Profiler_Object_Table *table) {
332
+ if (table) {
333
+ table->strong++;
334
+ }
335
+ }
336
+
337
+ // Decrement strong reference count (makes table weak when == 0)
338
+ void Memory_Profiler_Object_Table_decrement_strong(struct Memory_Profiler_Object_Table *table) {
339
+ if (table && table->strong > 0) {
340
+ table->strong--;
341
+ }
342
+ }
343
+
@@ -0,0 +1,73 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #pragma once
5
+
6
+ #include <ruby.h>
7
+ #include <stddef.h>
8
+
9
+ // Entry in the object table
10
+ struct Memory_Profiler_Object_Table_Entry {
11
+ // Object pointer (key):
12
+ VALUE object;
13
+ // The class of the allocated object:
14
+ VALUE klass;
15
+ // User-defined state from callback:
16
+ VALUE data;
17
+ // The Allocations wrapper for this class:
18
+ VALUE allocations;
19
+ };
20
+
21
+ // Custom object table for tracking allocations during GC.
22
+ // Uses system malloc/free (not ruby_xmalloc) to be safe during GC compaction.
23
+ // Keys are object addresses (updated during compaction).
24
+ struct Memory_Profiler_Object_Table {
25
+ // Strong reference count: 0 = weak (don't mark keys), >0 = strong (mark keys)
26
+ int strong;
27
+
28
+ size_t capacity; // Total slots
29
+ size_t count; // Used slots
30
+ struct Memory_Profiler_Object_Table_Entry *entries; // System malloc'd array
31
+ };
32
+
33
+ // Create a new object table with initial capacity
34
+ struct Memory_Profiler_Object_Table* Memory_Profiler_Object_Table_new(size_t initial_capacity);
35
+
36
+ // Free the table and all its memory
37
+ void Memory_Profiler_Object_Table_free(struct Memory_Profiler_Object_Table *table);
38
+
39
+ // Insert an object, returns pointer to entry for caller to fill fields.
40
+ // Safe to call from postponed job (not during GC).
41
+ struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_insert(struct Memory_Profiler_Object_Table *table, VALUE object);
42
+
43
+ // Lookup entry for an object. Returns pointer to entry or NULL if not found.
44
+ // Safe to call during FREEOBJ event handler (no allocation) - READ ONLY!
45
+ struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_lookup(struct Memory_Profiler_Object_Table *table, VALUE object);
46
+
47
+ // Delete an object. Safe to call from postponed job (not during GC).
48
+ void Memory_Profiler_Object_Table_delete(struct Memory_Profiler_Object_Table *table, VALUE object);
49
+
50
+ // Delete by entry pointer (faster - no second lookup needed).
51
+ // Safe to call from postponed job (not during GC).
52
+ // entry must be a valid pointer from Object_Table_lookup.
53
+ void Memory_Profiler_Object_Table_delete_entry(struct Memory_Profiler_Object_Table *table, struct Memory_Profiler_Object_Table_Entry *entry);
54
+
55
+ // Mark all entries for GC.
56
+ // Must be called from dmark callback.
57
+ void Memory_Profiler_Object_Table_mark(struct Memory_Profiler_Object_Table *table);
58
+
59
+ // Update object pointers after compaction.
60
+ // Must be called from dcompact callback.
61
+ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *table);
62
+
63
+ // Get current size
64
+ size_t Memory_Profiler_Object_Table_size(struct Memory_Profiler_Object_Table *table);
65
+
66
+ // Increment strong reference count
67
+ // When strong > 0, table is strong and will mark object keys during GC
68
+ void Memory_Profiler_Object_Table_increment_strong(struct Memory_Profiler_Object_Table *table);
69
+
70
+ // Decrement strong reference count
71
+ // When strong == 0, table is weak and will not mark object keys during GC
72
+ void Memory_Profiler_Object_Table_decrement_strong(struct Memory_Profiler_Object_Table *table);
73
+
@@ -3,4 +3,4 @@
3
3
  # Released under the MIT License.
4
4
  # Copyright, 2025, by Samuel Williams.
5
5
 
6
- require "Memory_Profiler"
6
+ require_relative "native"
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Released under the MIT License.
4
+ # Copyright, 2025, by Samuel Williams.
5
+
6
+ # Load objspace first so InternalObjectWrapper is available
7
+ require "objspace"
8
+
9
+ require "Memory_Profiler"
@@ -142,6 +142,28 @@ module Memory
142
142
  @capture.stop
143
143
  end
144
144
 
145
+ # Clear tracking data for a class.
146
+ def clear(klass)
147
+ tree = @call_trees[klass]
148
+ tree&.clear!
149
+ end
150
+
151
+ # Clear all tracking data.
152
+ def clear_all!
153
+ @call_trees.each_value(&:clear!)
154
+ @capture.clear
155
+ end
156
+
157
+ # Stop all tracking and clean up.
158
+ def stop!
159
+ @capture.stop
160
+ @call_trees.each_key do |klass|
161
+ @capture.untrack(klass)
162
+ end
163
+ @capture.clear
164
+ @call_trees.clear
165
+ end
166
+
145
167
  # Run periodic sampling in a loop.
146
168
  #
147
169
  # Samples allocation counts at the specified interval and reports when
@@ -210,9 +232,9 @@ module Memory
210
232
  tree = @call_trees[klass] = CallTree.new
211
233
 
212
234
  # Register callback on allocations object:
213
- # - On :newobj - returns state (leaf node) which C extension stores
214
- # - On :freeobj - receives state back from C extension
215
- allocations.track do |klass, event, state|
235
+ # - On :newobj - returns data (leaf node) which C extension stores
236
+ # - On :freeobj - receives data back from C extension
237
+ allocations.track do |klass, event, data|
216
238
  case event
217
239
  when :newobj
218
240
  # Capture call stack and record in tree
@@ -224,8 +246,8 @@ module Memory
224
246
  end
225
247
  # Return nil or the node - C will store whatever we return.
226
248
  when :freeobj
227
- # Decrement using the state (leaf node) passed back from then native extension:
228
- state&.decrement_path!
249
+ # Decrement using the data (leaf node) passed back from then native extension:
250
+ data&.decrement_path!
229
251
  end
230
252
  rescue Exception => error
231
253
  warn "Error in allocation tracking: #{error.message}\n#{error.backtrace.join("\n")}"
@@ -256,42 +278,40 @@ module Memory
256
278
  # Get allocation statistics for a tracked class.
257
279
  #
258
280
  # @parameter klass [Class] The class to get statistics for.
259
- # @returns [Hash] Statistics including total, retained, paths, and hotspots.
260
- def analyze(klass)
261
- call_tree = @call_trees[klass]
262
- allocations = @capture[klass]
281
+ # @parameter allocation_roots [Boolean] Include call tree showing where allocations occurred (default: true if available).
282
+ # @parameter retained_roots [Boolean] Compute object graph showing what's retaining allocations (default: false, can be slow for large graphs).
283
+ # @parameter retained_addresses [Boolean | Integer] Include memory addresses of retained objects for correlation with heap dumps (default: 1000).
284
+ # @returns [Hash] Statistics including allocations, allocation_roots (call tree), retained_roots (object graph), and retained_addresses (array of memory addresses) .
285
+ def analyze(klass, allocation_roots: true, retained_addresses: 1000, retained_minimum: 100)
286
+ unless allocations = @capture[klass]
287
+ return nil
288
+ end
263
289
 
264
- return nil unless call_tree or allocations
290
+ if retained_minimum && allocations.retained_count < retained_minimum
291
+ return nil
292
+ end
265
293
 
266
- {
294
+ result = {
267
295
  allocations: allocations&.as_json,
268
- call_tree: call_tree&.as_json
269
296
  }
270
- end
271
-
272
- # @deprecated Use {analyze} instead.
273
- alias statistics analyze
274
-
275
- # Clear tracking data for a class.
276
- def clear(klass)
277
- tree = @call_trees[klass]
278
- tree&.clear!
279
- end
280
-
281
- # Clear all tracking data.
282
- def clear_all!
283
- @call_trees.each_value(&:clear!)
284
- @capture.clear
285
- end
286
-
287
- # Stop all tracking and clean up.
288
- def stop!
289
- @capture.stop
290
- @call_trees.each_key do |klass|
291
- @capture.untrack(klass)
297
+
298
+ if allocation_roots
299
+ if call_tree_data = @call_trees[klass]
300
+ result[:allocation_roots] = call_tree_data.as_json
301
+ end
292
302
  end
293
- @capture.clear
294
- @call_trees.clear
303
+
304
+ if retained_addresses
305
+ addresses = []
306
+ @capture.each_object(klass) do |object, state|
307
+ addresses << Memory::Profiler.address_of(object)
308
+ break if retained_addresses.is_a?(Integer) && addresses.size >= retained_addresses
309
+ end
310
+
311
+ result[:retained_addresses] = addresses
312
+ end
313
+
314
+ result
295
315
  end
296
316
 
297
317
  private
@@ -7,7 +7,7 @@
7
7
  module Memory
8
8
  # @namespace
9
9
  module Profiler
10
- VERSION = "1.3.0"
10
+ VERSION = "1.5.0"
11
11
  end
12
12
  end
13
13
 
data/readme.md CHANGED
@@ -22,6 +22,19 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
22
22
 
23
23
  Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
24
24
 
25
+ ### v1.5.0
26
+
27
+ - Add `Capture#each_object` for getting all retained objects.
28
+ - Add `retained_addresses:` option to `Sampler#analyze` to capture addresses.
29
+ - Add `Sampler#analyze(retained_minimum: 100)` - if the retained\_size is less than this, the analyse won't proceed.
30
+ - Remove `Memory::Profiler::Graph` - it's too slow for practical use.
31
+ - Add `Memory::Profiler.address_of(object)` to get the memory address of an object.
32
+
33
+ ### v1.4.0
34
+
35
+ - Implement [Cooper-Harvey-Kennedy](https://www.cs.tufts.edu/~nr/cs257/archive/keith-cooper/dom14.pdf) algorithm for finding root objects in memory leaks.
36
+ - Rework capture to track objects by `object_id` exclusively.
37
+
25
38
  ### v1.3.0
26
39
 
27
40
  - **Breaking**: Renamed `Capture#count_for` to `Capture#retained_count_of` for better clarity and consistency.
@@ -71,14 +84,6 @@ Please see the [project releases](https://socketry.github.io/memory-profiler/rel
71
84
  - Fixed allocation/deallocation counts being inaccurate when objects are allocated during callbacks or freed after compaction.
72
85
  - `Capture#clear` now raises `RuntimeError` if called while capture is running. Call `stop()` before `clear()`.
73
86
 
74
- ### v1.1.9
75
-
76
- - More write barriers...
77
-
78
- ### v1.1.8
79
-
80
- - Use single global queue for event handling to avoid incorrect ordering.
81
-
82
87
  ## Contributing
83
88
 
84
89
  We welcome contributions to this project.
data/releases.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # Releases
2
2
 
3
+ ## v1.5.0
4
+
5
+ - Add `Capture#each_object` for getting all retained objects.
6
+ - Add `retained_addresses:` option to `Sampler#analyze` to capture addresses.
7
+ - Add `Sampler#analyze(retained_minimum: 100)` - if the retained\_size is less than this, the analyse won't proceed.
8
+ - Remove `Memory::Profiler::Graph` - it's too slow for practical use.
9
+ - Add `Memory::Profiler.address_of(object)` to get the memory address of an object.
10
+
11
+ ## v1.4.0
12
+
13
+ - Implement [Cooper-Harvey-Kennedy](https://www.cs.tufts.edu/~nr/cs257/archive/keith-cooper/dom14.pdf) algorithm for finding root objects in memory leaks.
14
+ - Rework capture to track objects by `object_id` exclusively.
15
+
3
16
  ## v1.3.0
4
17
 
5
18
  - **Breaking**: Renamed `Capture#count_for` to `Capture#retained_count_of` for better clarity and consistency.
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: memory-profiler
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.3.0
4
+ version: 1.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
@@ -55,10 +55,13 @@ files:
55
55
  - ext/memory/profiler/events.h
56
56
  - ext/memory/profiler/profiler.c
57
57
  - ext/memory/profiler/queue.h
58
+ - ext/memory/profiler/table.c
59
+ - ext/memory/profiler/table.h
58
60
  - lib/memory/profiler.rb
59
61
  - lib/memory/profiler/allocations.rb
60
62
  - lib/memory/profiler/call_tree.rb
61
63
  - lib/memory/profiler/capture.rb
64
+ - lib/memory/profiler/native.rb
62
65
  - lib/memory/profiler/sampler.rb
63
66
  - lib/memory/profiler/version.rb
64
67
  - license.md
metadata.gz.sig CHANGED
Binary file