memory-profiler 1.5.0 → 1.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b936617787e483bcf790aee25bb224bf10a199f431dbb8ac51eff005504a44d4
4
- data.tar.gz: 587f38cef39c639c9d9e2efbffdd325cffdfbd3000dddbca46f5aba895a66a21
3
+ metadata.gz: c1f0d854f80d796037192a81dfe2912291e0653510755e06d0a7e85e8b84a4ea
4
+ data.tar.gz: 11e5bbe54085baa3cc2c085c947b2ef9c74f3e85a44a59249ea2c5e41bc4a2bd
5
5
  SHA512:
6
- metadata.gz: c483112cf1dd1c82ea679afe3b86007bf20b2674558ac676fc2c66cd07f651787a09198b9f49fb3dd28f1f9b64865dfac9cc249d91b6b819e3849e0cc9c9ab0b
7
- data.tar.gz: 0de0b5d28eb0f5af2b72ec0aa44ceaf8d0401822949ee4512f922b6caa730e1d69a6b2997fac633140168b3b8de93bdbf4c76223ae6ef789520ae5566ef23f08
6
+ metadata.gz: '05866071c2c1a20e868d8ca0c11f9951595bac35b86e051863ecde8c11e581d7c92780826c4816c087764f9cdc078859402e6d718e272f6a337dc59ab69d2b63'
7
+ data.tar.gz: e070faca1aa563faff377a56cb7287e1dbf369b65dc534c623425ad33c83e7d938925a25f470821e2134848ed2db7adce94408736e0fead52bba6cbc07ec5a9d
checksums.yaml.gz.sig CHANGED
Binary file
@@ -20,7 +20,6 @@ static VALUE Memory_Profiler_Capture = Qnil;
20
20
  // Event symbols:
21
21
  static VALUE sym_newobj, sym_freeobj;
22
22
 
23
-
24
23
  // Main capture state (per-instance).
25
24
  struct Memory_Profiler_Capture {
26
25
  // Master switch - is tracking active? (set by start/stop).
@@ -199,7 +198,6 @@ static void Memory_Profiler_Capture_process_newobj(VALUE self, VALUE klass, VALU
199
198
  RB_OBJ_WRITTEN(self, Qnil, object);
200
199
  RB_OBJ_WRITE(self, &entry->klass, klass);
201
200
  RB_OBJ_WRITE(self, &entry->data, data);
202
- RB_OBJ_WRITE(self, &entry->allocations, allocations);
203
201
 
204
202
  if (DEBUG) fprintf(stderr, "[NEWOBJ] Object inserted into table: %p\n", (void*)object);
205
203
 
@@ -227,7 +225,15 @@ static void Memory_Profiler_Capture_process_freeobj(VALUE capture_value, VALUE u
227
225
 
228
226
  VALUE klass = entry->klass;
229
227
  VALUE data = entry->data;
230
- VALUE allocations = entry->allocations;
228
+
229
+ // Look up allocations from tracked table:
230
+ st_data_t allocations_data;
231
+ if (!st_lookup(capture->tracked, (st_data_t)klass, &allocations_data)) {
232
+ // Class not tracked - shouldn't happen, but be defensive:
233
+ if (DEBUG) fprintf(stderr, "[FREEOBJ] Class not found in tracked: %p\n", (void*)klass);
234
+ goto done;
235
+ }
236
+ VALUE allocations = (VALUE)allocations_data;
231
237
 
232
238
  // Delete by entry pointer (faster - no second lookup!)
233
239
  Memory_Profiler_Object_Table_delete_entry(capture->states, entry);
@@ -596,12 +602,19 @@ static VALUE Memory_Profiler_Capture_each_object_body(VALUE arg) {
596
602
  continue;
597
603
  }
598
604
 
605
+ // Look up allocations from klass
606
+ st_data_t allocations_data;
607
+ VALUE allocations = Qnil;
608
+ if (st_lookup(capture->tracked, (st_data_t)entry->klass, &allocations_data)) {
609
+ allocations = (VALUE)allocations_data;
610
+ }
611
+
599
612
  // Filter by allocations if specified
600
613
  if (!NIL_P(arguments->allocations)) {
601
- if (entry->allocations != arguments->allocations) continue;
614
+ if (allocations != arguments->allocations) continue;
602
615
  }
603
616
 
604
- rb_yield_values(2, entry->object, entry->allocations);
617
+ rb_yield_values(2, entry->object, allocations);
605
618
  }
606
619
  }
607
620
 
@@ -4,12 +4,27 @@
4
4
  #include "table.h"
5
5
  #include <stdlib.h>
6
6
  #include <string.h>
7
+ #include <stdio.h>
8
+
9
+ enum {
10
+ DEBUG = 1,
11
+
12
+ // Performance monitoring thresholds
13
+
14
+ // Log warning if probe chain exceeds this
15
+ WARN_PROBE_LENGTH = 100,
16
+
17
+ // Safety limit - abort search if exceeded
18
+ MAX_PROBE_LENGTH = 10000,
19
+ };
7
20
 
8
21
  // Use the Entry struct from header
9
22
  // (No local definition needed)
23
+ const size_t INITIAL_CAPACITY = 1024;
24
+ const float LOAD_FACTOR = 0.50; // Reduced from 0.75 to avoid clustering
10
25
 
11
- #define INITIAL_CAPACITY 1024
12
- #define LOAD_FACTOR 0.75
26
+
27
+ VALUE TOMBSTONE = Qnil;
13
28
 
14
29
  // Create a new table
15
30
  struct Memory_Profiler_Object_Table* Memory_Profiler_Object_Table_new(size_t initial_capacity) {
@@ -21,9 +36,10 @@ struct Memory_Profiler_Object_Table* Memory_Profiler_Object_Table_new(size_t ini
21
36
 
22
37
  table->capacity = initial_capacity > 0 ? initial_capacity : INITIAL_CAPACITY;
23
38
  table->count = 0;
39
+ table->tombstones = 0;
24
40
  table->strong = 0; // Start as weak table (strong == 0 means weak)
25
41
 
26
- // Use calloc to zero out entries (Qnil = 0)
42
+ // Use calloc to zero out entries (0 = empty slot)
27
43
  table->entries = calloc(table->capacity, sizeof(struct Memory_Profiler_Object_Table_Entry));
28
44
 
29
45
  if (!table->entries) {
@@ -42,41 +58,153 @@ void Memory_Profiler_Object_Table_free(struct Memory_Profiler_Object_Table *tabl
42
58
  }
43
59
  }
44
60
 
45
- // Simple hash function for object addresses
61
+ // Hash function for object addresses
62
+ // Uses multiplicative hashing with bit mixing to reduce clustering
46
63
  static inline size_t hash_object(VALUE object, size_t capacity) {
47
- // Use address bits, shift right to ignore alignment
48
- return ((size_t)object >> 3) % capacity;
64
+ size_t hash = (size_t)object;
65
+
66
+ // Remove alignment bits (objects are typically 8-byte aligned)
67
+ hash >>= 3;
68
+
69
+ // Multiplicative hashing (Knuth's golden ratio method)
70
+ // This helps distribute consecutive addresses across the table
71
+ hash *= 2654435761UL; // 2^32 / phi (golden ratio)
72
+
73
+ // Mix high bits into low bits for better distribution
74
+ hash ^= (hash >> 16);
75
+ hash *= 0x85ebca6b;
76
+ hash ^= (hash >> 13);
77
+ hash *= 0xc2b2ae35;
78
+ hash ^= (hash >> 16);
79
+
80
+ return hash % capacity;
49
81
  }
50
82
 
51
83
  // Find entry index for an object (linear probing)
52
84
  // Returns index if found, or index of empty slot if not found
53
- static size_t find_entry(struct Memory_Profiler_Object_Table_Entry *entries, size_t capacity, VALUE object, int *found) {
85
+ // If table is provided (not NULL), logs statistics when probe length is excessive
86
+ static size_t find_entry(struct Memory_Profiler_Object_Table_Entry *entries, size_t capacity, VALUE object, int *found, struct Memory_Profiler_Object_Table *table, const char *operation) {
54
87
  size_t index = hash_object(object, capacity);
55
88
  size_t start = index;
89
+ size_t probe_count = 0;
56
90
 
57
91
  *found = 0;
58
92
 
59
93
  do {
94
+ probe_count++;
95
+
96
+ // Safety check - prevent infinite loops
97
+ if (probe_count > MAX_PROBE_LENGTH) {
98
+ if (DEBUG && table) {
99
+ double load = (double)table->count / capacity;
100
+ double tomb_ratio = (double)table->tombstones / capacity;
101
+ fprintf(stderr, "{\"subject\":\"Memory::Profiler::ObjectTable\",\"level\":\"critical\",\"operation\":\"%s\",\"event\":\"max_probes_exceeded\",\"probe_count\":%zu,\"capacity\":%zu,\"count\":%zu,\"tombstones\":%zu,\"load_factor\":%.3f,\"tombstone_ratio\":%.3f}\n",
102
+ operation, probe_count, capacity, table->count, table->tombstones, load, tomb_ratio);
103
+ } else if (DEBUG) {
104
+ fprintf(stderr, "{\"subject\":\"Memory::Profiler::ObjectTable\",\"level\":\"critical\",\"operation\":\"%s\",\"event\":\"max_probes_exceeded\",\"probe_count\":%zu,\"capacity\":%zu}\n",
105
+ operation, probe_count, capacity);
106
+ }
107
+ return index;
108
+ }
109
+
110
+ // Log warning for excessive probing
111
+ if (DEBUG && probe_count == WARN_PROBE_LENGTH && table) {
112
+ double load = (double)table->count / capacity;
113
+ double tomb_ratio = (double)table->tombstones / capacity;
114
+ fprintf(stderr, "{\"subject\":\"Memory::Profiler::ObjectTable\",\"level\":\"warning\",\"operation\":\"%s\",\"event\":\"long_probe_chain\",\"probe_count\":%zu,\"capacity\":%zu,\"count\":%zu,\"tombstones\":%zu,\"load_factor\":%.3f,\"tombstone_ratio\":%.3f}\n",
115
+ operation, probe_count, capacity, table->count, table->tombstones, load, tomb_ratio);
116
+ }
117
+
60
118
  if (entries[index].object == 0) {
61
- // Empty slot (calloc zeros memory)
62
119
  return index;
63
120
  }
64
121
 
65
- if (entries[index].object == object) {
66
- // Found it
122
+ if (entries[index].object != TOMBSTONE && entries[index].object == object) {
67
123
  *found = 1;
68
124
  return index;
69
125
  }
70
126
 
71
- // Linear probe
72
127
  index = (index + 1) % capacity;
73
128
  } while (index != start);
74
129
 
75
- // Table is full (shouldn't happen with load factor)
130
+ // Table is full
131
+ if (DEBUG && table) {
132
+ double load = (double)table->count / capacity;
133
+ double tomb_ratio = (double)table->tombstones / capacity;
134
+ fprintf(stderr, "{\"subject\":\"Memory::Profiler::ObjectTable\",\"level\":\"error\",\"operation\":\"%s\",\"event\":\"table_full\",\"probe_count\":%zu,\"capacity\":%zu,\"count\":%zu,\"tombstones\":%zu,\"load_factor\":%.3f,\"tombstone_ratio\":%.3f}\n",
135
+ operation, probe_count, capacity, table->count, table->tombstones, load, tomb_ratio);
136
+ }
76
137
  return index;
77
138
  }
78
139
 
140
+ // Find slot for inserting an object (linear probing)
141
+ // Returns index to insert at - reuses tombstone slots if found
142
+ // If object exists, returns its index with found=1
143
+ static size_t find_insert_slot(struct Memory_Profiler_Object_Table *table, VALUE object, int *found) {
144
+ struct Memory_Profiler_Object_Table_Entry *entries = table->entries;
145
+ size_t capacity = table->capacity;
146
+ size_t index = hash_object(object, capacity);
147
+ size_t start = index;
148
+ size_t first_tombstone = SIZE_MAX; // Track first tombstone we encounter
149
+ size_t probe_count = 0;
150
+
151
+ *found = 0;
152
+
153
+ do {
154
+ probe_count++;
155
+
156
+ // Safety check - prevent infinite loops
157
+ if (probe_count > MAX_PROBE_LENGTH) {
158
+ if (DEBUG) {
159
+ double load = (double)table->count / capacity;
160
+ double tomb_ratio = (double)table->tombstones / capacity;
161
+ fprintf(stderr, "{\"subject\":\"Memory::Profiler::ObjectTable\",\"level\":\"critical\",\"operation\":\"insert\",\"event\":\"max_probes_exceeded\",\"probe_count\":%zu,\"capacity\":%zu,\"count\":%zu,\"tombstones\":%zu,\"load_factor\":%.3f,\"tombstone_ratio\":%.3f}\n",
162
+ probe_count, capacity, table->count, table->tombstones, load, tomb_ratio);
163
+ }
164
+ // Return tombstone if we found one, otherwise current position
165
+ return (first_tombstone != SIZE_MAX) ? first_tombstone : index;
166
+ }
167
+
168
+ // Log warning for excessive probing
169
+ if (DEBUG && probe_count == WARN_PROBE_LENGTH) {
170
+ double load = (double)table->count / capacity;
171
+ double tomb_ratio = (double)table->tombstones / capacity;
172
+ fprintf(stderr, "{\"subject\":\"Memory::Profiler::ObjectTable\",\"level\":\"warning\",\"operation\":\"insert\",\"event\":\"long_probe_chain\",\"probe_count\":%zu,\"capacity\":%zu,\"count\":%zu,\"tombstones\":%zu,\"load_factor\":%.3f,\"tombstone_ratio\":%.3f}\n",
173
+ probe_count, capacity, table->count, table->tombstones, load, tomb_ratio);
174
+ }
175
+
176
+ if (entries[index].object == 0) {
177
+ // Empty slot - use tombstone if we found one, otherwise this slot
178
+ return (first_tombstone != SIZE_MAX) ? first_tombstone : index;
179
+ }
180
+
181
+ if (entries[index].object == TOMBSTONE) {
182
+ // Remember first tombstone (but keep searching for existing object)
183
+ if (first_tombstone == SIZE_MAX) {
184
+ first_tombstone = index;
185
+ }
186
+ } else if (entries[index].object == object) {
187
+ // Found existing entry
188
+ *found = 1;
189
+ return index;
190
+ }
191
+
192
+ index = (index + 1) % capacity;
193
+ } while (index != start);
194
+
195
+ // Table is full
196
+ if (DEBUG) {
197
+ double load = (double)table->count / capacity;
198
+ double tomb_ratio = (double)table->tombstones / capacity;
199
+ fprintf(stderr, "{\"subject\":\"Memory::Profiler::ObjectTable\",\"level\":\"error\",\"operation\":\"insert\",\"event\":\"table_full\",\"probe_count\":%zu,\"capacity\":%zu,\"count\":%zu,\"tombstones\":%zu,\"load_factor\":%.3f,\"tombstone_ratio\":%.3f}\n",
200
+ probe_count, capacity, table->count, table->tombstones, load, tomb_ratio);
201
+ }
202
+ // Use tombstone slot if we found one
203
+ return (first_tombstone != SIZE_MAX) ? first_tombstone : index;
204
+ }
205
+
79
206
  // Resize the table (only called from insert, not during GC)
207
+ // This clears all tombstones
80
208
  static void resize_table(struct Memory_Profiler_Object_Table *table) {
81
209
  size_t old_capacity = table->capacity;
82
210
  struct Memory_Profiler_Object_Table_Entry *old_entries = table->entries;
@@ -84,6 +212,7 @@ static void resize_table(struct Memory_Profiler_Object_Table *table) {
84
212
  // Double capacity
85
213
  table->capacity = old_capacity * 2;
86
214
  table->count = 0;
215
+ table->tombstones = 0; // Reset tombstones
87
216
  table->entries = calloc(table->capacity, sizeof(struct Memory_Profiler_Object_Table_Entry));
88
217
 
89
218
  if (!table->entries) {
@@ -93,11 +222,12 @@ static void resize_table(struct Memory_Profiler_Object_Table *table) {
93
222
  return;
94
223
  }
95
224
 
96
- // Rehash all entries
225
+ // Rehash all non-tombstone entries
97
226
  for (size_t i = 0; i < old_capacity; i++) {
98
- if (old_entries[i].object != Qnil) {
227
+ // Skip empty slots and tombstones
228
+ if (old_entries[i].object != 0 && old_entries[i].object != TOMBSTONE) {
99
229
  int found;
100
- size_t new_index = find_entry(table->entries, table->capacity, old_entries[i].object, &found);
230
+ size_t new_index = find_entry(table->entries, table->capacity, old_entries[i].object, &found, NULL, "resize");
101
231
  table->entries[new_index] = old_entries[i];
102
232
  table->count++;
103
233
  }
@@ -108,26 +238,30 @@ static void resize_table(struct Memory_Profiler_Object_Table *table) {
108
238
 
109
239
  // Insert object, returns pointer to entry for caller to fill
110
240
  struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_insert(struct Memory_Profiler_Object_Table *table, VALUE object) {
111
- // Resize if load factor exceeded
112
- if ((double)table->count / table->capacity > LOAD_FACTOR) {
241
+ // Resize if load factor exceeded (count + tombstones)
242
+ // This clears tombstones and gives us fresh space
243
+ if ((double)(table->count + table->tombstones) / table->capacity > LOAD_FACTOR) {
113
244
  resize_table(table);
114
245
  }
115
246
 
116
247
  int found;
117
- size_t index = find_entry(table->entries, table->capacity, object, &found);
248
+ size_t index = find_insert_slot(table, object, &found);
118
249
 
119
250
  if (!found) {
251
+ // New entry - check if we're reusing a tombstone slot
252
+ if (table->entries[index].object == TOMBSTONE) {
253
+ table->tombstones--; // Reusing tombstone
254
+ }
120
255
  table->count++;
121
256
  // Zero out the entry
122
257
  table->entries[index].object = object;
123
258
  table->entries[index].klass = 0;
124
259
  table->entries[index].data = 0;
125
- table->entries[index].allocations = 0;
260
+ } else {
261
+ // Updating existing entry
262
+ table->entries[index].object = object;
126
263
  }
127
264
 
128
- // Set object (might be updating existing entry)
129
- table->entries[index].object = object;
130
-
131
265
  // Return pointer for caller to fill fields
132
266
  return &table->entries[index];
133
267
  }
@@ -135,7 +269,7 @@ struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_insert(s
135
269
  // Lookup entry for object - returns pointer or NULL
136
270
  struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_lookup(struct Memory_Profiler_Object_Table *table, VALUE object) {
137
271
  int found;
138
- size_t index = find_entry(table->entries, table->capacity, object, &found);
272
+ size_t index = find_entry(table->entries, table->capacity, object, &found, table, "lookup");
139
273
 
140
274
  if (found) {
141
275
  return &table->entries[index];
@@ -147,43 +281,18 @@ struct Memory_Profiler_Object_Table_Entry* Memory_Profiler_Object_Table_lookup(s
147
281
  // Delete object from table
148
282
  void Memory_Profiler_Object_Table_delete(struct Memory_Profiler_Object_Table *table, VALUE object) {
149
283
  int found;
150
- size_t index = find_entry(table->entries, table->capacity, object, &found);
284
+ size_t index = find_entry(table->entries, table->capacity, object, &found, table, "delete");
151
285
 
152
286
  if (!found) {
153
287
  return;
154
288
  }
155
289
 
156
- // Mark as deleted (set to 0/NULL)
157
- table->entries[index].object = 0;
290
+ // Mark as tombstone - no rehashing needed!
291
+ table->entries[index].object = TOMBSTONE;
158
292
  table->entries[index].klass = 0;
159
293
  table->entries[index].data = 0;
160
- table->entries[index].allocations = 0;
161
294
  table->count--;
162
-
163
- // Rehash following entries to fix probe chains
164
- size_t next = (index + 1) % table->capacity;
165
- while (table->entries[next].object != 0) {
166
- // Save entry values
167
- VALUE obj = table->entries[next].object;
168
- VALUE k = table->entries[next].klass;
169
- VALUE d = table->entries[next].data;
170
- VALUE a = table->entries[next].allocations;
171
-
172
- // Remove this entry (set to 0/NULL)
173
- table->entries[next].object = 0;
174
- table->entries[next].klass = 0;
175
- table->entries[next].data = 0;
176
- table->entries[next].allocations = 0;
177
- table->count--;
178
-
179
- // Reinsert (will find correct spot and fill fields)
180
- struct Memory_Profiler_Object_Table_Entry *new_entry = Memory_Profiler_Object_Table_insert(table, obj);
181
- new_entry->klass = k;
182
- new_entry->data = d;
183
- new_entry->allocations = a;
184
-
185
- next = (next + 1) % table->capacity;
186
- }
295
+ table->tombstones++;
187
296
  }
188
297
 
189
298
  // Mark all entries for GC
@@ -192,17 +301,17 @@ void Memory_Profiler_Object_Table_mark(struct Memory_Profiler_Object_Table *tabl
192
301
 
193
302
  for (size_t i = 0; i < table->capacity; i++) {
194
303
  struct Memory_Profiler_Object_Table_Entry *entry = &table->entries[i];
195
- if (entry->object != 0) {
304
+ // Skip empty slots and tombstones
305
+ if (entry->object != 0 && entry->object != TOMBSTONE) {
196
306
  // Mark object key if table is strong (strong > 0)
197
307
  // When weak (strong == 0), object keys can be GC'd (that's how we detect frees)
198
308
  if (table->strong > 0) {
199
309
  rb_gc_mark_movable(entry->object);
200
310
  }
201
311
 
202
- // Always mark the other fields (klass, data, allocations) - we own these
312
+ // Always mark the other fields (klass, data) - we own these
203
313
  if (entry->klass) rb_gc_mark_movable(entry->klass);
204
314
  if (entry->data) rb_gc_mark_movable(entry->data);
205
- if (entry->allocations) rb_gc_mark_movable(entry->allocations);
206
315
  }
207
316
  }
208
317
  }
@@ -214,7 +323,8 @@ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *t
214
323
  // First pass: check if any objects moved
215
324
  int any_moved = 0;
216
325
  for (size_t i = 0; i < table->capacity; i++) {
217
- if (table->entries[i].object != 0) {
326
+ // Skip empty slots and tombstones
327
+ if (table->entries[i].object != 0 && table->entries[i].object != TOMBSTONE) {
218
328
  VALUE new_loc = rb_gc_location(table->entries[i].object);
219
329
  if (new_loc != table->entries[i].object) {
220
330
  any_moved = 1;
@@ -226,11 +336,11 @@ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *t
226
336
  // If nothing moved, just update VALUE fields and we're done
227
337
  if (!any_moved) {
228
338
  for (size_t i = 0; i < table->capacity; i++) {
229
- if (table->entries[i].object != 0) {
339
+ // Skip empty slots and tombstones
340
+ if (table->entries[i].object != 0 && table->entries[i].object != TOMBSTONE) {
230
341
  // Update VALUE fields if they moved
231
342
  table->entries[i].klass = rb_gc_location(table->entries[i].klass);
232
343
  table->entries[i].data = rb_gc_location(table->entries[i].data);
233
- table->entries[i].allocations = rb_gc_location(table->entries[i].allocations);
234
344
  }
235
345
  }
236
346
  return;
@@ -246,24 +356,25 @@ void Memory_Profiler_Object_Table_compact(struct Memory_Profiler_Object_Table *t
246
356
 
247
357
  size_t temp_count = 0;
248
358
  for (size_t i = 0; i < table->capacity; i++) {
249
- if (table->entries[i].object != 0) {
359
+ // Skip empty slots and tombstones
360
+ if (table->entries[i].object != 0 && table->entries[i].object != TOMBSTONE) {
250
361
  // Update all pointers first
251
362
  temp_entries[temp_count].object = rb_gc_location(table->entries[i].object);
252
363
  temp_entries[temp_count].klass = rb_gc_location(table->entries[i].klass);
253
364
  temp_entries[temp_count].data = rb_gc_location(table->entries[i].data);
254
- temp_entries[temp_count].allocations = rb_gc_location(table->entries[i].allocations);
255
365
  temp_count++;
256
366
  }
257
367
  }
258
368
 
259
- // Clear the table (zero out all entries)
369
+ // Clear the table (zero out all entries, clears tombstones too)
260
370
  memset(table->entries, 0, table->capacity * sizeof(struct Memory_Profiler_Object_Table_Entry));
261
371
  table->count = 0;
372
+ table->tombstones = 0; // Compaction clears tombstones
262
373
 
263
374
  // Reinsert all entries with new hash values
264
375
  for (size_t i = 0; i < temp_count; i++) {
265
376
  int found;
266
- size_t index = find_entry(table->entries, table->capacity, temp_entries[i].object, &found);
377
+ size_t index = find_entry(table->entries, table->capacity, temp_entries[i].object, &found, NULL, "compact");
267
378
 
268
379
  // Insert at new location
269
380
  table->entries[index] = temp_entries[i];
@@ -284,42 +395,17 @@ void Memory_Profiler_Object_Table_delete_entry(struct Memory_Profiler_Object_Tab
284
395
  return; // Invalid pointer
285
396
  }
286
397
 
287
- // Check if entry is actually occupied
288
- if (entry->object == 0) {
289
- return; // Already deleted
398
+ // Check if entry is actually occupied (not empty or tombstone)
399
+ if (entry->object == 0 || entry->object == TOMBSTONE) {
400
+ return; // Already deleted or empty
290
401
  }
291
402
 
292
- // Mark as deleted (set to 0/NULL)
293
- entry->object = 0;
403
+ // Mark as tombstone - no rehashing needed!
404
+ entry->object = TOMBSTONE;
294
405
  entry->klass = 0;
295
406
  entry->data = 0;
296
- entry->allocations = 0;
297
407
  table->count--;
298
-
299
- // Rehash following entries to fix probe chains
300
- size_t next = (index + 1) % table->capacity;
301
- while (table->entries[next].object != 0) {
302
- // Save entry values
303
- VALUE obj = table->entries[next].object;
304
- VALUE k = table->entries[next].klass;
305
- VALUE d = table->entries[next].data;
306
- VALUE a = table->entries[next].allocations;
307
-
308
- // Remove this entry (set to 0/NULL)
309
- table->entries[next].object = 0;
310
- table->entries[next].klass = 0;
311
- table->entries[next].data = 0;
312
- table->entries[next].allocations = 0;
313
- table->count--;
314
-
315
- // Reinsert (will find correct spot and fill fields)
316
- struct Memory_Profiler_Object_Table_Entry *new_entry = Memory_Profiler_Object_Table_insert(table, obj);
317
- new_entry->klass = k;
318
- new_entry->data = d;
319
- new_entry->allocations = a;
320
-
321
- next = (next + 1) % table->capacity;
322
- }
408
+ table->tombstones++;
323
409
  }
324
410
 
325
411
  // Get current size
@@ -14,8 +14,6 @@ struct Memory_Profiler_Object_Table_Entry {
14
14
  VALUE klass;
15
15
  // User-defined state from callback:
16
16
  VALUE data;
17
- // The Allocations wrapper for this class:
18
- VALUE allocations;
19
17
  };
20
18
 
21
19
  // Custom object table for tracking allocations during GC.
@@ -25,8 +23,9 @@ struct Memory_Profiler_Object_Table {
25
23
  // Strong reference count: 0 = weak (don't mark keys), >0 = strong (mark keys)
26
24
  int strong;
27
25
 
28
- size_t capacity; // Total slots
29
- size_t count; // Used slots
26
+ size_t capacity; // Total slots
27
+ size_t count; // Used slots (occupied entries)
28
+ size_t tombstones; // Deleted slots (tombstone markers)
30
29
  struct Memory_Profiler_Object_Table_Entry *entries; // System malloc'd array
31
30
  };
32
31
 
@@ -282,7 +282,7 @@ module Memory
282
282
  # @parameter retained_roots [Boolean] Compute object graph showing what's retaining allocations (default: false, can be slow for large graphs).
283
283
  # @parameter retained_addresses [Boolean | Integer] Include memory addresses of retained objects for correlation with heap dumps (default: 1000).
284
284
  # @returns [Hash] Statistics including allocations, allocation_roots (call tree), retained_roots (object graph), and retained_addresses (array of memory addresses) .
285
- def analyze(klass, allocation_roots: true, retained_addresses: 1000, retained_minimum: 100)
285
+ def analyze(klass, allocation_roots: true, retained_addresses: 100, retained_minimum: 100)
286
286
  unless allocations = @capture[klass]
287
287
  return nil
288
288
  end
@@ -318,7 +318,7 @@ module Memory
318
318
 
319
319
  # Default filter to include all locations.
320
320
  def default_filter
321
- ->(location) {true}
321
+ ->(location){true}
322
322
  end
323
323
 
324
324
  def prune_call_trees!
@@ -7,7 +7,7 @@
7
7
  module Memory
8
8
  # @namespace
9
9
  module Profiler
10
- VERSION = "1.5.0"
10
+ VERSION = "1.5.1"
11
11
  end
12
12
  end
13
13
 
data/readme.md CHANGED
@@ -22,6 +22,10 @@ Please see the [project documentation](https://socketry.github.io/memory-profile
22
22
 
23
23
  Please see the [project releases](https://socketry.github.io/memory-profiler/releases/index) for all releases.
24
24
 
25
+ ### v1.5.1
26
+
27
+ - Improve performance of object table.
28
+
25
29
  ### v1.5.0
26
30
 
27
31
  - Add `Capture#each_object` for getting all retained objects.
@@ -75,15 +79,6 @@ Please see the [project releases](https://socketry.github.io/memory-profiler/rel
75
79
 
76
80
  - Double buffer shared events queues to fix queue corruption.
77
81
 
78
- ### v1.1.10
79
-
80
- - Added `Capture#new_count` - returns total number of allocations tracked across all classes.
81
- - Added `Capture#free_count` - returns total number of objects freed across all classes.
82
- - Added `Capture#retained_count` - returns retained object count (new\_count - free\_count).
83
- - **Critical:** Fixed GC crash during compaction caused by missing write barriers in event queue.
84
- - Fixed allocation/deallocation counts being inaccurate when objects are allocated during callbacks or freed after compaction.
85
- - `Capture#clear` now raises `RuntimeError` if called while capture is running. Call `stop()` before `clear()`.
86
-
87
82
  ## Contributing
88
83
 
89
84
  We welcome contributions to this project.
data/releases.md CHANGED
@@ -1,5 +1,9 @@
1
1
  # Releases
2
2
 
3
+ ## v1.5.1
4
+
5
+ - Improve performance of object table.
6
+
3
7
  ## v1.5.0
4
8
 
5
9
  - Add `Capture#each_object` for getting all retained objects.
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: memory-profiler
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.5.0
4
+ version: 1.5.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
metadata.gz.sig CHANGED
Binary file