thread_safety 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,11 +15,8 @@
15
15
  # include <sys/user.h>
16
16
  #endif
17
17
 
18
- #ifdef BUILDING_MODULAR_GC
19
- # define nlz_int64(x) (x == 0 ? 64 : (unsigned int)__builtin_clzll((unsigned long long)x))
20
- #else
21
- # include "internal/bits.h"
22
- #endif
18
+ #include "internal/bits.h"
19
+ #include "internal/hash.h"
23
20
 
24
21
  #include "ruby/ruby.h"
25
22
  #include "ruby/atomic.h"
@@ -37,26 +34,8 @@
37
34
  # include "probes.h"
38
35
  #endif
39
36
 
40
- #ifdef BUILDING_MODULAR_GC
41
- # define RB_DEBUG_COUNTER_INC(_name) ((void)0)
42
- # define RB_DEBUG_COUNTER_INC_IF(_name, cond) (!!(cond))
43
- #else
44
- # include "debug_counter.h"
45
- #endif
46
-
47
- #ifdef BUILDING_MODULAR_GC
48
- # define rb_asan_poison_object(_obj) (0)
49
- # define rb_asan_unpoison_object(_obj, _newobj_p) (0)
50
- # define asan_unpoisoning_object(_obj) if (true)
51
- # define asan_poison_memory_region(_ptr, _size) (0)
52
- # define asan_unpoison_memory_region(_ptr, _size, _malloc_p) (0)
53
- # define asan_unpoisoning_memory_region(_ptr, _size) if (true)
54
-
55
- # define VALGRIND_MAKE_MEM_DEFINED(_ptr, _size) (0)
56
- # define VALGRIND_MAKE_MEM_UNDEFINED(_ptr, _size) (0)
57
- #else
58
- # include "internal/sanitizers.h"
59
- #endif
37
+ #include "debug_counter.h"
38
+ #include "internal/sanitizers.h"
60
39
 
61
40
  /* MALLOC_HEADERS_BEGIN */
62
41
  #ifndef HAVE_MALLOC_USABLE_SIZE
@@ -182,7 +161,6 @@
182
161
  typedef struct ractor_newobj_heap_cache {
183
162
  struct free_slot *freelist;
184
163
  struct heap_page *using_page;
185
- size_t allocated_objects_count;
186
164
  } rb_ractor_newobj_heap_cache_t;
187
165
 
188
166
  typedef struct ractor_newobj_cache {
@@ -305,7 +283,7 @@ int ruby_rgengc_debug;
305
283
  #endif
306
284
 
307
285
  #ifndef GC_DEBUG_STRESS_TO_CLASS
308
- # define GC_DEBUG_STRESS_TO_CLASS 1
286
+ # define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
309
287
  #endif
310
288
 
311
289
  typedef enum {
@@ -494,6 +472,7 @@ typedef struct rb_objspace {
494
472
  } flags;
495
473
 
496
474
  rb_event_flag_t hook_events;
475
+ unsigned long long next_object_id;
497
476
 
498
477
  rb_heap_t heaps[HEAP_COUNT];
499
478
  size_t empty_pages_count;
@@ -612,6 +591,9 @@ typedef struct rb_objspace {
612
591
  size_t step_slots;
613
592
  } rincgc;
614
593
 
594
+ st_table *id_to_obj_tbl;
595
+ st_table *obj_to_id_tbl;
596
+
615
597
  #if GC_DEBUG_STRESS_TO_CLASS
616
598
  VALUE stress_to_class;
617
599
  #endif
@@ -648,9 +630,7 @@ struct rvalue_overhead {
648
630
  size_t rb_gc_impl_obj_slot_size(VALUE obj);
649
631
  # define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_impl_obj_slot_size(obj)))
650
632
  #else
651
- # ifndef RVALUE_OVERHEAD
652
- # define RVALUE_OVERHEAD 0
653
- # endif
633
+ # define RVALUE_OVERHEAD 0
654
634
  #endif
655
635
 
656
636
  #define BASE_SLOT_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]) + RVALUE_OVERHEAD)
@@ -824,6 +804,8 @@ heap_page_in_global_empty_pages_pool(rb_objspace_t *objspace, struct heap_page *
824
804
  #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
825
805
  #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
826
806
 
807
+ #define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
808
+
827
809
  #define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
828
810
  #define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
829
811
 
@@ -868,8 +850,8 @@ RVALUE_AGE_SET(VALUE obj, int age)
868
850
  #define stress_to_class objspace->stress_to_class
869
851
  #define set_stress_to_class(c) (stress_to_class = (c))
870
852
  #else
871
- #define stress_to_class ((void)objspace, 0)
872
- #define set_stress_to_class(c) ((void)objspace, (c))
853
+ #define stress_to_class (objspace, 0)
854
+ #define set_stress_to_class(c) (objspace, (c))
873
855
  #endif
874
856
 
875
857
  #if 0
@@ -1001,9 +983,9 @@ struct RZombie {
1001
983
 
1002
984
  #define RZOMBIE(o) ((struct RZombie *)(o))
1003
985
 
1004
- static bool ruby_enable_autocompact = false;
986
+ int ruby_enable_autocompact = 0;
1005
987
  #if RGENGC_CHECK_MODE
1006
- static gc_compact_compare_func ruby_autocompact_compare_func;
988
+ gc_compact_compare_func ruby_autocompact_compare_func;
1007
989
  #endif
1008
990
 
1009
991
  static void init_mark_stack(mark_stack_t *stack);
@@ -1532,6 +1514,31 @@ minimum_slots_for_heap(rb_objspace_t *objspace, rb_heap_t *heap)
1532
1514
  return gc_params.heap_init_slots[heap_idx];
1533
1515
  }
1534
1516
 
1517
+ static int
1518
+ object_id_cmp(st_data_t x, st_data_t y)
1519
+ {
1520
+ if (RB_TYPE_P(x, T_BIGNUM)) {
1521
+ return !rb_big_eql(x, y);
1522
+ }
1523
+ else {
1524
+ return x != y;
1525
+ }
1526
+ }
1527
+
1528
+ static st_index_t
1529
+ object_id_hash(st_data_t n)
1530
+ {
1531
+ return FIX2LONG(rb_hash((VALUE)n));
1532
+ }
1533
+
1534
+ #define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1535
+ #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
1536
+
1537
+ static const struct st_hash_type object_id_hash_type = {
1538
+ object_id_cmp,
1539
+ object_id_hash,
1540
+ };
1541
+
1535
1542
  /* garbage objects will be collected soon. */
1536
1543
  bool
1537
1544
  rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE ptr)
@@ -1557,6 +1564,56 @@ rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE ptr)
1557
1564
  !RVALUE_MARKED(objspace, ptr);
1558
1565
  }
1559
1566
 
1567
+ VALUE
1568
+ rb_gc_impl_object_id_to_ref(void *objspace_ptr, VALUE object_id)
1569
+ {
1570
+ rb_objspace_t *objspace = objspace_ptr;
1571
+
1572
+ VALUE obj;
1573
+ if (st_lookup(objspace->id_to_obj_tbl, object_id, &obj) &&
1574
+ !rb_gc_impl_garbage_object_p(objspace, obj)) {
1575
+ return obj;
1576
+ }
1577
+
1578
+ if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(objspace->next_object_id))) {
1579
+ rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1580
+ }
1581
+ else {
1582
+ rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1583
+ }
1584
+ }
1585
+
1586
+ VALUE
1587
+ rb_gc_impl_object_id(void *objspace_ptr, VALUE obj)
1588
+ {
1589
+ VALUE id;
1590
+ rb_objspace_t *objspace = objspace_ptr;
1591
+
1592
+ unsigned int lev = rb_gc_vm_lock();
1593
+ if (FL_TEST(obj, FL_SEEN_OBJ_ID)) {
1594
+ st_data_t val;
1595
+ if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &val)) {
1596
+ id = (VALUE)val;
1597
+ }
1598
+ else {
1599
+ rb_bug("rb_gc_impl_object_id: FL_SEEN_OBJ_ID flag set but not found in table");
1600
+ }
1601
+ }
1602
+ else {
1603
+ GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, NULL));
1604
+
1605
+ id = ULL2NUM(objspace->next_object_id);
1606
+ objspace->next_object_id += OBJ_ID_INCREMENT;
1607
+
1608
+ st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
1609
+ st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
1610
+ FL_SET(obj, FL_SEEN_OBJ_ID);
1611
+ }
1612
+ rb_gc_vm_unlock(lev);
1613
+
1614
+ return id;
1615
+ }
1616
+
1560
1617
  static void free_stack_chunks(mark_stack_t *);
1561
1618
  static void mark_stack_free_cache(mark_stack_t *);
1562
1619
  static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
@@ -1714,7 +1771,13 @@ heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1714
1771
  static void
1715
1772
  heap_pages_free_unused_pages(rb_objspace_t *objspace)
1716
1773
  {
1717
- if (objspace->empty_pages != NULL && heap_pages_freeable_pages > 0) {
1774
+ size_t pages_to_keep_count =
1775
+ // Get number of pages estimated for the smallest size pool
1776
+ CEILDIV(objspace->heap_pages.allocatable_slots, HEAP_PAGE_OBJ_LIMIT) *
1777
+ // Estimate the average slot size multiple
1778
+ (1 << (HEAP_COUNT / 2));
1779
+
1780
+ if (objspace->empty_pages != NULL && objspace->empty_pages_count > pages_to_keep_count) {
1718
1781
  GC_ASSERT(objspace->empty_pages_count > 0);
1719
1782
  objspace->empty_pages = NULL;
1720
1783
  objspace->empty_pages_count = 0;
@@ -1723,15 +1786,15 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace)
1723
1786
  for (i = j = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
1724
1787
  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
1725
1788
 
1726
- if (heap_page_in_global_empty_pages_pool(objspace, page) && heap_pages_freeable_pages > 0) {
1789
+ if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count == 0) {
1727
1790
  heap_page_free(objspace, page);
1728
- heap_pages_freeable_pages--;
1729
1791
  }
1730
1792
  else {
1731
- if (heap_page_in_global_empty_pages_pool(objspace, page)) {
1793
+ if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count > 0) {
1732
1794
  page->free_next = objspace->empty_pages;
1733
1795
  objspace->empty_pages = page;
1734
1796
  objspace->empty_pages_count++;
1797
+ pages_to_keep_count--;
1735
1798
  }
1736
1799
 
1737
1800
  if (i != j) {
@@ -1898,7 +1961,7 @@ heap_page_allocate(rb_objspace_t *objspace)
1898
1961
  }
1899
1962
  }
1900
1963
 
1901
- rb_darray_insert_without_gc(&objspace->heap_pages.sorted, hi, page);
1964
+ rb_darray_insert(&objspace->heap_pages.sorted, hi, page);
1902
1965
 
1903
1966
  if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
1904
1967
  if (heap_pages_himem < end) heap_pages_himem = end;
@@ -1963,33 +2026,29 @@ heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1963
2026
  static int
1964
2027
  heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_heap_t *heap)
1965
2028
  {
1966
- gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", "
2029
+ if (objspace->heap_pages.allocatable_slots > 0) {
2030
+ gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", "
1967
2031
  "allocatable_slots: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
1968
2032
  rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_slots, heap->total_pages);
1969
2033
 
1970
- bool allocated = false;
1971
- struct heap_page *page = heap_page_resurrect(objspace);
1972
-
1973
- if (page == NULL && objspace->heap_pages.allocatable_slots > 0) {
1974
- page = heap_page_allocate(objspace);
1975
- allocated = true;
1976
- }
1977
-
1978
- if (page != NULL) {
2034
+ struct heap_page *page = heap_page_resurrect(objspace);
2035
+ if (page == NULL) {
2036
+ page = heap_page_allocate(objspace);
2037
+ }
1979
2038
  heap_add_page(objspace, heap, page);
1980
2039
  heap_add_freepage(heap, page);
1981
2040
 
1982
- if (allocated) {
1983
- if (objspace->heap_pages.allocatable_slots > (size_t)page->total_slots) {
1984
- objspace->heap_pages.allocatable_slots -= page->total_slots;
1985
- }
1986
- else {
1987
- objspace->heap_pages.allocatable_slots = 0;
1988
- }
2041
+ if (objspace->heap_pages.allocatable_slots > (size_t)page->total_slots) {
2042
+ objspace->heap_pages.allocatable_slots -= page->total_slots;
1989
2043
  }
2044
+ else {
2045
+ objspace->heap_pages.allocatable_slots = 0;
2046
+ }
2047
+
2048
+ return true;
1990
2049
  }
1991
2050
 
1992
- return page != NULL;
2051
+ return false;
1993
2052
  }
1994
2053
 
1995
2054
  static void
@@ -2091,10 +2150,10 @@ heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2091
2150
  static inline VALUE
2092
2151
  newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2093
2152
  {
2094
- VALUE *p = (VALUE *)(obj + sizeof(struct RBasic));
2095
- p[0] = v1;
2096
- p[1] = v2;
2097
- p[2] = v3;
2153
+ VALUE *p = (VALUE *)obj;
2154
+ p[2] = v1;
2155
+ p[3] = v2;
2156
+ p[4] = v3;
2098
2157
  return obj;
2099
2158
  }
2100
2159
 
@@ -2119,13 +2178,12 @@ rb_gc_impl_source_location_cstr(int *ptr)
2119
2178
  static inline VALUE
2120
2179
  newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2121
2180
  {
2181
+ #if !__has_feature(memory_sanitizer)
2122
2182
  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2123
2183
  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2184
+ #endif
2124
2185
  RBASIC(obj)->flags = flags;
2125
2186
  *((VALUE *)&RBASIC(obj)->klass) = klass;
2126
- #if RBASIC_SHAPE_ID_FIELD
2127
- RBASIC(obj)->shape_id = 0;
2128
- #endif
2129
2187
 
2130
2188
  int t = flags & RUBY_T_MASK;
2131
2189
  if (t == T_CLASS || t == T_MODULE || t == T_ICLASS) {
@@ -2180,7 +2238,7 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace,
2180
2238
 
2181
2239
  gc_report(5, objspace, "newobj: %s\n", rb_obj_info(obj));
2182
2240
 
2183
- // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, rb_obj_info(obj));
2241
+ RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, rb_obj_info(obj));
2184
2242
  return obj;
2185
2243
  }
2186
2244
 
@@ -2213,8 +2271,6 @@ rb_gc_impl_size_allocatable_p(size_t size)
2213
2271
  return size <= heap_slot_size(HEAP_COUNT - 1);
2214
2272
  }
2215
2273
 
2216
- static const size_t ALLOCATED_COUNT_STEP = 1024;
2217
-
2218
2274
  static inline VALUE
2219
2275
  ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2220
2276
  size_t heap_idx)
@@ -2237,22 +2293,6 @@ ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *ca
2237
2293
  VALUE obj = (VALUE)p;
2238
2294
  rb_asan_unpoison_object(obj, true);
2239
2295
  heap_cache->freelist = p->next;
2240
-
2241
- if (rb_gc_multi_ractor_p()) {
2242
- heap_cache->allocated_objects_count++;
2243
- rb_heap_t *heap = &heaps[heap_idx];
2244
- if (heap_cache->allocated_objects_count >= ALLOCATED_COUNT_STEP) {
2245
- RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, heap_cache->allocated_objects_count);
2246
- heap_cache->allocated_objects_count = 0;
2247
- }
2248
- }
2249
- else {
2250
- rb_heap_t *heap = &heaps[heap_idx];
2251
- heap->total_allocated_objects++;
2252
- GC_ASSERT(heap->total_slots >=
2253
- (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
2254
- }
2255
-
2256
2296
  #if RGENGC_CHECK_MODE
2257
2297
  GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == heap_slot_size(heap_idx));
2258
2298
  // zero clear
@@ -2364,6 +2404,7 @@ newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size
2364
2404
 
2365
2405
  if (!vm_locked) {
2366
2406
  lev = rb_gc_cr_lock();
2407
+ vm_locked = true;
2367
2408
  unlock_vm = true;
2368
2409
  }
2369
2410
 
@@ -2405,6 +2446,12 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t he
2405
2446
  obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked);
2406
2447
  }
2407
2448
 
2449
+ rb_heap_t *heap = &heaps[heap_idx];
2450
+ heap->total_allocated_objects++;
2451
+ GC_ASSERT(rb_gc_multi_ractor_p() ||
2452
+ heap->total_slots >=
2453
+ (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
2454
+
2408
2455
  return obj;
2409
2456
  }
2410
2457
 
@@ -2470,8 +2517,9 @@ rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags
2470
2517
  (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2471
2518
 
2472
2519
  if (RB_UNLIKELY(stress_to_class)) {
2473
- if (rb_hash_lookup2(stress_to_class, klass, Qundef) != Qundef) {
2474
- rb_memerror();
2520
+ long cnt = RARRAY_LEN(stress_to_class);
2521
+ for (long i = 0; i < cnt; i++) {
2522
+ if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2475
2523
  }
2476
2524
  }
2477
2525
 
@@ -2570,7 +2618,7 @@ rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
2570
2618
  return is_pointer_to_heap(objspace_ptr, ptr);
2571
2619
  }
2572
2620
 
2573
- #define ZOMBIE_OBJ_KEPT_FLAGS (FL_FINALIZE)
2621
+ #define ZOMBIE_OBJ_KEPT_FLAGS (FL_SEEN_OBJ_ID | FL_FINALIZE)
2574
2622
 
2575
2623
  void
2576
2624
  rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
@@ -2592,6 +2640,23 @@ rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), voi
2592
2640
  page->heap->final_slots_count++;
2593
2641
  }
2594
2642
 
2643
+ static void
2644
+ obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
2645
+ {
2646
+ st_data_t o = (st_data_t)obj, id;
2647
+
2648
+ GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE || FL_TEST(obj, FL_SEEN_OBJ_ID));
2649
+ FL_UNSET(obj, FL_SEEN_OBJ_ID);
2650
+
2651
+ if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
2652
+ GC_ASSERT(id);
2653
+ st_delete(objspace->id_to_obj_tbl, &id, NULL);
2654
+ }
2655
+ else {
2656
+ rb_bug("Object ID seen, but not in mapping table: %s", rb_obj_info(obj));
2657
+ }
2658
+ }
2659
+
2595
2660
  typedef int each_obj_callback(void *, void *, size_t, void *);
2596
2661
  typedef int each_page_callback(struct heap_page *, void *);
2597
2662
 
@@ -2753,8 +2818,6 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
2753
2818
 
2754
2819
  RBASIC(obj)->flags |= FL_FINALIZE;
2755
2820
 
2756
- int lev = rb_gc_vm_lock();
2757
-
2758
2821
  if (st_lookup(finalizer_table, obj, &data)) {
2759
2822
  table = (VALUE)data;
2760
2823
 
@@ -2766,7 +2829,6 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
2766
2829
  for (i = 0; i < len; i++) {
2767
2830
  VALUE recv = RARRAY_AREF(table, i);
2768
2831
  if (rb_equal(recv, block)) {
2769
- rb_gc_vm_unlock(lev);
2770
2832
  return recv;
2771
2833
  }
2772
2834
  }
@@ -2775,13 +2837,11 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
2775
2837
  rb_ary_push(table, block);
2776
2838
  }
2777
2839
  else {
2778
- table = rb_ary_new3(2, rb_obj_id(obj), block);
2840
+ table = rb_ary_new3(1, block);
2779
2841
  rb_obj_hide(table);
2780
2842
  st_add_direct(finalizer_table, obj, table);
2781
2843
  }
2782
2844
 
2783
- rb_gc_vm_unlock(lev);
2784
-
2785
2845
  return block;
2786
2846
  }
2787
2847
 
@@ -2793,11 +2853,7 @@ rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
2793
2853
  GC_ASSERT(!OBJ_FROZEN(obj));
2794
2854
 
2795
2855
  st_data_t data = obj;
2796
-
2797
- int lev = rb_gc_vm_lock();
2798
2856
  st_delete(finalizer_table, &data, 0);
2799
- rb_gc_vm_unlock(lev);
2800
-
2801
2857
  FL_UNSET(obj, FL_FINALIZE);
2802
2858
  }
2803
2859
 
@@ -2810,17 +2866,27 @@ rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
2810
2866
 
2811
2867
  if (!FL_TEST(obj, FL_FINALIZE)) return;
2812
2868
 
2813
- int lev = rb_gc_vm_lock();
2814
2869
  if (RB_LIKELY(st_lookup(finalizer_table, obj, &data))) {
2815
- table = rb_ary_dup((VALUE)data);
2816
- RARRAY_ASET(table, 0, rb_obj_id(dest));
2870
+ table = (VALUE)data;
2817
2871
  st_insert(finalizer_table, dest, table);
2818
2872
  FL_SET(dest, FL_FINALIZE);
2819
2873
  }
2820
2874
  else {
2821
2875
  rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
2822
2876
  }
2823
- rb_gc_vm_unlock(lev);
2877
+ }
2878
+
2879
+ static VALUE
2880
+ get_object_id_in_finalizer(rb_objspace_t *objspace, VALUE obj)
2881
+ {
2882
+ if (FL_TEST(obj, FL_SEEN_OBJ_ID)) {
2883
+ return rb_gc_impl_object_id(objspace, obj);
2884
+ }
2885
+ else {
2886
+ VALUE id = ULL2NUM(objspace->next_object_id);
2887
+ objspace->next_object_id += OBJ_ID_INCREMENT;
2888
+ return id;
2889
+ }
2824
2890
  }
2825
2891
 
2826
2892
  static VALUE
@@ -2828,7 +2894,7 @@ get_final(long i, void *data)
2828
2894
  {
2829
2895
  VALUE table = (VALUE)data;
2830
2896
 
2831
- return RARRAY_AREF(table, i + 1);
2897
+ return RARRAY_AREF(table, i);
2832
2898
  }
2833
2899
 
2834
2900
  static void
@@ -2843,7 +2909,7 @@ run_final(rb_objspace_t *objspace, VALUE zombie)
2843
2909
  FL_UNSET(zombie, FL_FINALIZE);
2844
2910
  st_data_t table;
2845
2911
  if (st_delete(finalizer_table, &key, &table)) {
2846
- rb_gc_run_obj_finalizer(RARRAY_AREF(table, 0), RARRAY_LEN(table) - 1, get_final, (void *)table);
2912
+ rb_gc_run_obj_finalizer(get_object_id_in_finalizer(objspace, zombie), RARRAY_LEN(table), get_final, (void *)table);
2847
2913
  }
2848
2914
  else {
2849
2915
  rb_bug("FL_FINALIZE flag is set, but finalizers are not found");
@@ -2864,11 +2930,15 @@ finalize_list(rb_objspace_t *objspace, VALUE zombie)
2864
2930
  next_zombie = RZOMBIE(zombie)->next;
2865
2931
  page = GET_HEAP_PAGE(zombie);
2866
2932
 
2867
- int lev = rb_gc_vm_lock();
2868
-
2869
2933
  run_final(objspace, zombie);
2934
+
2935
+ int lev = rb_gc_vm_lock();
2870
2936
  {
2871
2937
  GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
2938
+ if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
2939
+ obj_free_object_id(objspace, zombie);
2940
+ }
2941
+
2872
2942
  GC_ASSERT(page->heap->final_slots_count > 0);
2873
2943
  GC_ASSERT(page->final_slots > 0);
2874
2944
 
@@ -2980,15 +3050,16 @@ rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
2980
3050
  }
2981
3051
 
2982
3052
  static int
2983
- rb_gc_impl_shutdown_call_finalizer_i(st_data_t key, st_data_t val, st_data_t _data)
3053
+ rb_gc_impl_shutdown_call_finalizer_i(st_data_t key, st_data_t val, st_data_t data)
2984
3054
  {
3055
+ rb_objspace_t *objspace = (rb_objspace_t *)data;
2985
3056
  VALUE obj = (VALUE)key;
2986
3057
  VALUE table = (VALUE)val;
2987
3058
 
2988
3059
  GC_ASSERT(RB_FL_TEST(obj, FL_FINALIZE));
2989
3060
  GC_ASSERT(RB_BUILTIN_TYPE(val) == T_ARRAY);
2990
3061
 
2991
- rb_gc_run_obj_finalizer(RARRAY_AREF(table, 0), RARRAY_LEN(table) - 1, get_final, (void *)table);
3062
+ rb_gc_run_obj_finalizer(rb_gc_impl_object_id(objspace, obj), RARRAY_LEN(table), get_final, (void *)table);
2992
3063
 
2993
3064
  FL_UNSET(obj, FL_FINALIZE);
2994
3065
 
@@ -3015,7 +3086,7 @@ rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
3015
3086
  }
3016
3087
 
3017
3088
  while (finalizer_table->num_entries) {
3018
- st_foreach(finalizer_table, rb_gc_impl_shutdown_call_finalizer_i, 0);
3089
+ st_foreach(finalizer_table, rb_gc_impl_shutdown_call_finalizer_i, (st_data_t)objspace);
3019
3090
  }
3020
3091
 
3021
3092
  /* run finalizers */
@@ -3130,10 +3201,6 @@ protect_page_body(struct heap_page_body *body, DWORD protect)
3130
3201
  DWORD old_protect;
3131
3202
  return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
3132
3203
  }
3133
- #elif defined(__wasi__)
3134
- // wasi-libc's mprotect emulation does not support PROT_NONE
3135
- enum {HEAP_PAGE_LOCK, HEAP_PAGE_UNLOCK};
3136
- #define protect_page_body(body, protect) 1
3137
3204
  #else
3138
3205
  enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
3139
3206
  #define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
@@ -3472,8 +3539,12 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bit
3472
3539
 
3473
3540
  rb_gc_event_hook(vp, RUBY_INTERNAL_EVENT_FREEOBJ);
3474
3541
 
3542
+ bool has_object_id = FL_TEST(vp, FL_SEEN_OBJ_ID);
3475
3543
  rb_gc_obj_free_vm_weak_references(vp);
3476
3544
  if (rb_gc_obj_free(objspace, vp)) {
3545
+ if (has_object_id) {
3546
+ obj_free_object_id(objspace, vp);
3547
+ }
3477
3548
  // always add free slots back to the swept pages freelist,
3478
3549
  // so that if we're compacting, we can re-use the slots
3479
3550
  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
@@ -3685,19 +3756,13 @@ static int compare_pinned_slots(const void *left, const void *right, void *d);
3685
3756
  static void
3686
3757
  gc_ractor_newobj_cache_clear(void *c, void *data)
3687
3758
  {
3688
- rb_objspace_t *objspace = rb_gc_get_objspace();
3689
3759
  rb_ractor_newobj_cache_t *newobj_cache = c;
3690
3760
 
3691
3761
  newobj_cache->incremental_mark_step_allocated_slots = 0;
3692
3762
 
3693
3763
  for (size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
3694
-
3695
3764
  rb_ractor_newobj_heap_cache_t *cache = &newobj_cache->heap_caches[heap_idx];
3696
3765
 
3697
- rb_heap_t *heap = &heaps[heap_idx];
3698
- RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, cache->allocated_objects_count);
3699
- cache->allocated_objects_count = 0;
3700
-
3701
3766
  struct heap_page *page = cache->using_page;
3702
3767
  struct free_slot *freelist = cache->freelist;
3703
3768
  RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
@@ -3714,6 +3779,7 @@ gc_sweep_start(rb_objspace_t *objspace)
3714
3779
  {
3715
3780
  gc_mode_transition(objspace, gc_mode_sweeping);
3716
3781
  objspace->rincgc.pooled_slots = 0;
3782
+ objspace->heap_pages.allocatable_slots = 0;
3717
3783
 
3718
3784
  #if GC_CAN_COMPILE_COMPACTION
3719
3785
  if (objspace->flags.during_compacting) {
@@ -3750,7 +3816,7 @@ gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3750
3816
 
3751
3817
  if (swept_slots < min_free_slots &&
3752
3818
  /* The heap is a growth heap if it freed more slots than had empty slots. */
3753
- ((heap->empty_slots == 0 && total_slots > 0) || heap->freed_slots > heap->empty_slots)) {
3819
+ (heap->empty_slots == 0 || heap->freed_slots > heap->empty_slots)) {
3754
3820
  /* If we don't have enough slots and we have pages on the tomb heap, move
3755
3821
  * pages from the tomb heap to the eden heap. This may prevent page
3756
3822
  * creation thrashing (frequently allocating and deallocting pages) and
@@ -3766,12 +3832,10 @@ gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3766
3832
 
3767
3833
  if (swept_slots < min_free_slots) {
3768
3834
  /* Grow this heap if we are in a major GC or if we haven't run at least
3769
- * RVALUE_OLD_AGE minor GC since the last major GC. */
3835
+ * RVALUE_OLD_AGE minor GC since the last major GC. */
3770
3836
  if (is_full_marking(objspace) ||
3771
3837
  objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
3772
- if (objspace->heap_pages.allocatable_slots < min_free_slots) {
3773
- heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots);
3774
- }
3838
+ heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots);
3775
3839
  }
3776
3840
  else {
3777
3841
  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
@@ -3821,6 +3885,7 @@ static int
3821
3885
  gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
3822
3886
  {
3823
3887
  struct heap_page *sweep_page = heap->sweeping_page;
3888
+ int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
3824
3889
  int swept_slots = 0;
3825
3890
  int pooled_slots = 0;
3826
3891
 
@@ -3844,7 +3909,11 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
3844
3909
 
3845
3910
  heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
3846
3911
 
3847
- if (free_slots == sweep_page->total_slots) {
3912
+ if (free_slots == sweep_page->total_slots &&
3913
+ heap_pages_freeable_pages > 0 &&
3914
+ unlink_limit > 0) {
3915
+ heap_pages_freeable_pages--;
3916
+ unlink_limit--;
3848
3917
  /* There are no living objects, so move this page to the global empty pages. */
3849
3918
  heap_unlink_page(objspace, heap, sweep_page);
3850
3919
 
@@ -3923,7 +3992,9 @@ gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *sweep_heap)
3923
3992
  for (int i = 0; i < HEAP_COUNT; i++) {
3924
3993
  rb_heap_t *heap = &heaps[i];
3925
3994
  if (!gc_sweep_step(objspace, heap)) {
3926
- if (heap == sweep_heap && objspace->empty_pages_count == 0 && objspace->heap_pages.allocatable_slots == 0) {
3995
+ /* sweep_heap requires a free slot but sweeping did not yield any
3996
+ * and we cannot allocate a new page. */
3997
+ if (heap == sweep_heap && objspace->heap_pages.allocatable_slots == 0) {
3927
3998
  /* Not allowed to create a new page so finish sweeping. */
3928
3999
  gc_sweep_rest(objspace);
3929
4000
  break;
@@ -4491,7 +4562,11 @@ rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr)
4491
4562
 
4492
4563
  rgengc_check_relation(objspace, obj);
4493
4564
 
4494
- rb_darray_append_without_gc(&objspace->weak_references, ptr);
4565
+ DURING_GC_COULD_MALLOC_REGION_START();
4566
+ {
4567
+ rb_darray_append(&objspace->weak_references, ptr);
4568
+ }
4569
+ DURING_GC_COULD_MALLOC_REGION_END();
4495
4570
 
4496
4571
  objspace->profile.weak_references_count++;
4497
4572
  }
@@ -4539,6 +4614,8 @@ mark_roots(rb_objspace_t *objspace, const char **categoryp)
4539
4614
  st_foreach(finalizer_table, pin_value, (st_data_t)objspace);
4540
4615
  }
4541
4616
 
4617
+ st_foreach(objspace->obj_to_id_tbl, gc_mark_tbl_no_pin_i, (st_data_t)objspace);
4618
+
4542
4619
  if (stress_to_class) rb_gc_mark(stress_to_class);
4543
4620
 
4544
4621
  rb_gc_save_machine_context();
@@ -5304,7 +5381,11 @@ gc_update_weak_references(rb_objspace_t *objspace)
5304
5381
  objspace->profile.retained_weak_references_count = retained_weak_references_count;
5305
5382
 
5306
5383
  rb_darray_clear(objspace->weak_references);
5307
- rb_darray_resize_capa_without_gc(&objspace->weak_references, retained_weak_references_count);
5384
+ DURING_GC_COULD_MALLOC_REGION_START();
5385
+ {
5386
+ rb_darray_resize_capa(&objspace->weak_references, retained_weak_references_count);
5387
+ }
5388
+ DURING_GC_COULD_MALLOC_REGION_END();
5308
5389
  }
5309
5390
 
5310
5391
  static void
@@ -5387,10 +5468,6 @@ gc_marks_finish(rb_objspace_t *objspace)
5387
5468
  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
5388
5469
  }
5389
5470
  }
5390
-
5391
- if (full_marking) {
5392
- heap_allocatable_slots_expand(objspace, NULL, sweep_slots, total_slots);
5393
- }
5394
5471
  }
5395
5472
 
5396
5473
  if (full_marking) {
@@ -6006,10 +6083,9 @@ rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
6006
6083
 
6007
6084
  if (RGENGC_CHECK_MODE) {
6008
6085
  if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
6086
+ if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
6009
6087
  }
6010
6088
 
6011
- if (SPECIAL_CONST_P(b)) return;
6012
-
6013
6089
  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_NONE);
6014
6090
  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_MOVED);
6015
6091
  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_ZOMBIE);
@@ -6119,58 +6195,33 @@ rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
6119
6195
  }
6120
6196
  }
6121
6197
 
6122
- struct rb_gc_object_metadata_names {
6123
- // Must be ID only
6124
- ID ID_wb_protected, ID_age, ID_old, ID_uncollectible, ID_marking,
6125
- ID_marked, ID_pinned, ID_object_id, ID_shareable;
6126
- };
6127
-
6128
- #define RB_GC_OBJECT_METADATA_ENTRY_COUNT (sizeof(struct rb_gc_object_metadata_names) / sizeof(ID))
6129
- static struct rb_gc_object_metadata_entry object_metadata_entries[RB_GC_OBJECT_METADATA_ENTRY_COUNT + 1];
6130
-
6131
- struct rb_gc_object_metadata_entry *
6132
- rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj)
6198
+ // TODO: rearchitect this function to work for a generic GC
6199
+ size_t
6200
+ rb_gc_impl_obj_flags(void *objspace_ptr, VALUE obj, ID* flags, size_t max)
6133
6201
  {
6134
6202
  rb_objspace_t *objspace = objspace_ptr;
6135
6203
  size_t n = 0;
6136
- static struct rb_gc_object_metadata_names names;
6204
+ static ID ID_marked;
6205
+ static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6137
6206
 
6138
- if (!names.ID_marked) {
6139
- #define I(s) names.ID_##s = rb_intern(#s)
6207
+ if (!ID_marked) {
6208
+ #define I(s) ID_##s = rb_intern(#s);
6209
+ I(marked);
6140
6210
  I(wb_protected);
6141
- I(age);
6142
6211
  I(old);
6143
- I(uncollectible);
6144
6212
  I(marking);
6145
- I(marked);
6213
+ I(uncollectible);
6146
6214
  I(pinned);
6147
- I(object_id);
6148
- I(shareable);
6149
6215
  #undef I
6150
6216
  }
6151
6217
 
6152
- #define SET_ENTRY(na, v) do { \
6153
- GC_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
6154
- object_metadata_entries[n].name = names.ID_##na; \
6155
- object_metadata_entries[n].val = v; \
6156
- n++; \
6157
- } while (0)
6158
-
6159
- if (!RVALUE_WB_UNPROTECTED(objspace, obj)) SET_ENTRY(wb_protected, Qtrue);
6160
- SET_ENTRY(age, INT2FIX(RVALUE_AGE_GET(obj)));
6161
- if (RVALUE_OLD_P(objspace, obj)) SET_ENTRY(old, Qtrue);
6162
- if (RVALUE_UNCOLLECTIBLE(objspace, obj)) SET_ENTRY(uncollectible, Qtrue);
6163
- if (RVALUE_MARKING(objspace, obj)) SET_ENTRY(marking, Qtrue);
6164
- if (RVALUE_MARKED(objspace, obj)) SET_ENTRY(marked, Qtrue);
6165
- if (RVALUE_PINNED(objspace, obj)) SET_ENTRY(pinned, Qtrue);
6166
- if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
6167
- if (FL_TEST(obj, FL_SHAREABLE)) SET_ENTRY(shareable, Qtrue);
6168
-
6169
- object_metadata_entries[n].name = 0;
6170
- object_metadata_entries[n].val = 0;
6171
- #undef SET_ENTRY
6172
-
6173
- return object_metadata_entries;
6218
+ if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0 && n < max) flags[n++] = ID_wb_protected;
6219
+ if (RVALUE_OLD_P(objspace, obj) && n < max) flags[n++] = ID_old;
6220
+ if (RVALUE_UNCOLLECTIBLE(objspace, obj) && n < max) flags[n++] = ID_uncollectible;
6221
+ if (RVALUE_MARKING(objspace, obj) && n < max) flags[n++] = ID_marking;
6222
+ if (RVALUE_MARKED(objspace, obj) && n < max) flags[n++] = ID_marked;
6223
+ if (RVALUE_PINNED(objspace, obj) && n < max) flags[n++] = ID_pinned;
6224
+ return n;
6174
6225
  }
6175
6226
 
6176
6227
  void *
@@ -6189,6 +6240,7 @@ rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache)
6189
6240
  rb_objspace_t *objspace = objspace_ptr;
6190
6241
 
6191
6242
  objspace->live_ractor_cache_count--;
6243
+
6192
6244
  gc_ractor_newobj_cache_clear(cache, NULL);
6193
6245
  free(cache);
6194
6246
  }
@@ -6388,6 +6440,7 @@ gc_start(rb_objspace_t *objspace, unsigned int reason)
6388
6440
  reason,
6389
6441
  do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
6390
6442
 
6443
+ #if USE_DEBUG_COUNTER
6391
6444
  RB_DEBUG_COUNTER_INC(gc_count);
6392
6445
 
6393
6446
  if (reason & GPR_FLAG_MAJOR_MASK) {
@@ -6406,6 +6459,7 @@ gc_start(rb_objspace_t *objspace, unsigned int reason)
6406
6459
  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
6407
6460
  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
6408
6461
  }
6462
+ #endif
6409
6463
 
6410
6464
  objspace->profile.count++;
6411
6465
  objspace->profile.latest_gc_info = reason;
@@ -6779,9 +6833,7 @@ rb_gc_impl_prepare_heap(void *objspace_ptr)
6779
6833
  gc_params.heap_free_slots_max_ratio = orig_max_free_slots;
6780
6834
 
6781
6835
  objspace->heap_pages.allocatable_slots = 0;
6782
- heap_pages_freeable_pages = objspace->empty_pages_count;
6783
6836
  heap_pages_free_unused_pages(objspace_ptr);
6784
- GC_ASSERT(heap_pages_freeable_pages == 0);
6785
6837
  GC_ASSERT(objspace->empty_pages_count == 0);
6786
6838
  objspace->heap_pages.allocatable_slots = orig_allocatable_slots;
6787
6839
 
@@ -6830,7 +6882,7 @@ gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
6830
6882
  case T_RATIONAL:
6831
6883
  case T_NODE:
6832
6884
  case T_CLASS:
6833
- if (FL_TEST_RAW(obj, FL_FINALIZE)) {
6885
+ if (FL_TEST(obj, FL_FINALIZE)) {
6834
6886
  /* The finalizer table is a numtable. It looks up objects by address.
6835
6887
  * We can't mark the keys in the finalizer table because that would
6836
6888
  * prevent the objects from being collected. This check prevents
@@ -6883,6 +6935,36 @@ gc_move(rb_objspace_t *objspace, VALUE src, VALUE dest, size_t src_slot_size, si
6883
6935
  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(src), src);
6884
6936
  CLEAR_IN_BITMAP(GET_HEAP_PAGE(src)->remembered_bits, src);
6885
6937
 
6938
+ if (FL_TEST(src, FL_EXIVAR)) {
6939
+ /* Resizing the st table could cause a malloc */
6940
+ DURING_GC_COULD_MALLOC_REGION_START();
6941
+ {
6942
+ rb_mv_generic_ivar(src, dest);
6943
+ }
6944
+ DURING_GC_COULD_MALLOC_REGION_END();
6945
+ }
6946
+
6947
+ if (FL_TEST(src, FL_SEEN_OBJ_ID)) {
6948
+ /* If the source object's object_id has been seen, we need to update
6949
+ * the object to object id mapping. */
6950
+ st_data_t srcid = (st_data_t)src, id;
6951
+
6952
+ gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
6953
+ /* Resizing the st table could cause a malloc */
6954
+ DURING_GC_COULD_MALLOC_REGION_START();
6955
+ {
6956
+ if (!st_delete(objspace->obj_to_id_tbl, &srcid, &id)) {
6957
+ rb_bug("gc_move: object ID seen, but not in mapping table: %s", rb_obj_info((VALUE)src));
6958
+ }
6959
+
6960
+ st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
6961
+ }
6962
+ DURING_GC_COULD_MALLOC_REGION_END();
6963
+ }
6964
+ else {
6965
+ GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)src, NULL));
6966
+ }
6967
+
6886
6968
  /* Move the object */
6887
6969
  memcpy((void *)dest, (void *)src, MIN(src_slot_size, slot_size));
6888
6970
 
@@ -7045,24 +7127,6 @@ gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t *objspace,
7045
7127
  return 0;
7046
7128
  }
7047
7129
 
7048
- static int
7049
- gc_update_references_weak_table_i(VALUE obj, void *data)
7050
- {
7051
- int ret;
7052
- asan_unpoisoning_object(obj) {
7053
- ret = BUILTIN_TYPE(obj) == T_MOVED ? ST_REPLACE : ST_CONTINUE;
7054
- }
7055
- return ret;
7056
- }
7057
-
7058
- static int
7059
- gc_update_references_weak_table_replace_i(VALUE *obj, void *data)
7060
- {
7061
- *obj = rb_gc_location(*obj);
7062
-
7063
- return ST_CONTINUE;
7064
- }
7065
-
7066
7130
  static void
7067
7131
  gc_update_references(rb_objspace_t *objspace)
7068
7132
  {
@@ -7087,21 +7151,12 @@ gc_update_references(rb_objspace_t *objspace)
7087
7151
  }
7088
7152
  }
7089
7153
  }
7090
-
7154
+ gc_ref_update_table_values_only(objspace->obj_to_id_tbl);
7155
+ gc_update_table_refs(objspace->id_to_obj_tbl);
7091
7156
  gc_update_table_refs(finalizer_table);
7092
7157
 
7093
7158
  rb_gc_update_vm_references((void *)objspace);
7094
7159
 
7095
- for (int table = 0; table < RB_GC_VM_WEAK_TABLE_COUNT; table++) {
7096
- rb_gc_vm_weak_table_foreach(
7097
- gc_update_references_weak_table_i,
7098
- gc_update_references_weak_table_replace_i,
7099
- NULL,
7100
- false,
7101
- table
7102
- );
7103
- }
7104
-
7105
7160
  objspace->flags.during_reference_updating = false;
7106
7161
  }
7107
7162
 
@@ -7662,7 +7717,7 @@ rb_gc_impl_config_get(void *objspace_ptr)
7662
7717
  }
7663
7718
 
7664
7719
  static int
7665
- gc_config_set_key(VALUE key, VALUE value, VALUE data)
7720
+ gc_config_set_key(st_data_t key, st_data_t value, st_data_t data)
7666
7721
  {
7667
7722
  rb_objspace_t *objspace = (rb_objspace_t *)data;
7668
7723
  if (rb_sym2id(key) == rb_intern("rgengc_allow_full_mark")) {
@@ -7681,7 +7736,7 @@ rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
7681
7736
  rb_raise(rb_eArgError, "expected keyword arguments");
7682
7737
  }
7683
7738
 
7684
- rb_hash_foreach(hash, gc_config_set_key, (st_data_t)objspace);
7739
+ rb_hash_stlike_foreach(hash, gc_config_set_key, (st_data_t)objspace);
7685
7740
  }
7686
7741
 
7687
7742
  VALUE
@@ -7889,11 +7944,10 @@ static inline size_t
7889
7944
  objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
7890
7945
  {
7891
7946
  #ifdef HAVE_MALLOC_USABLE_SIZE
7892
- if (!hint) {
7893
- hint = malloc_usable_size(ptr);
7894
- }
7895
- #endif
7947
+ return malloc_usable_size(ptr);
7948
+ #else
7896
7949
  return hint;
7950
+ #endif
7897
7951
  }
7898
7952
 
7899
7953
  enum memop_type {
@@ -9191,7 +9245,7 @@ rb_gc_impl_objspace_free(void *objspace_ptr)
9191
9245
  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
9192
9246
  heap_page_free(objspace, rb_darray_get(objspace->heap_pages.sorted, i));
9193
9247
  }
9194
- rb_darray_free_without_gc(objspace->heap_pages.sorted);
9248
+ rb_darray_free(objspace->heap_pages.sorted);
9195
9249
  heap_pages_lomem = 0;
9196
9250
  heap_pages_himem = 0;
9197
9251
 
@@ -9201,10 +9255,13 @@ rb_gc_impl_objspace_free(void *objspace_ptr)
9201
9255
  heap->total_slots = 0;
9202
9256
  }
9203
9257
 
9258
+ st_free_table(objspace->id_to_obj_tbl);
9259
+ st_free_table(objspace->obj_to_id_tbl);
9260
+
9204
9261
  free_stack_chunks(&objspace->mark_stack);
9205
9262
  mark_stack_free_cache(&objspace->mark_stack);
9206
9263
 
9207
- rb_darray_free_without_gc(objspace->weak_references);
9264
+ rb_darray_free(objspace->weak_references);
9208
9265
 
9209
9266
  free(objspace);
9210
9267
  }
@@ -9244,63 +9301,7 @@ gc_malloc_allocations(VALUE self)
9244
9301
  #endif
9245
9302
 
9246
9303
  void rb_gc_impl_before_fork(void *objspace_ptr) { /* no-op */ }
9247
- void rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid) {
9248
- if (pid == 0) { /* child process */
9249
- rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
9250
- }
9251
- }
9252
-
9253
- VALUE rb_ident_hash_new_with_size(st_index_t size);
9254
-
9255
- /*
9256
- * call-seq:
9257
- * GC.add_stress_to_class(class[, ...])
9258
- *
9259
- * Raises NoMemoryError when allocating an instance of the given classes.
9260
- *
9261
- */
9262
- static VALUE
9263
- rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
9264
- {
9265
- rb_objspace_t *objspace = rb_gc_get_objspace();
9266
-
9267
- if (!stress_to_class) {
9268
- set_stress_to_class(rb_ident_hash_new_with_size(argc));
9269
- }
9270
-
9271
- for (int i = 0; i < argc; i++) {
9272
- VALUE klass = argv[i];
9273
- rb_hash_aset(stress_to_class, klass, Qtrue);
9274
- }
9275
-
9276
- return self;
9277
- }
9278
-
9279
- /*
9280
- * call-seq:
9281
- * GC.remove_stress_to_class(class[, ...])
9282
- *
9283
- * No longer raises NoMemoryError when allocating an instance of the
9284
- * given classes.
9285
- *
9286
- */
9287
- static VALUE
9288
- rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
9289
- {
9290
- rb_objspace_t *objspace = rb_gc_get_objspace();
9291
-
9292
- if (stress_to_class) {
9293
- for (int i = 0; i < argc; ++i) {
9294
- rb_hash_delete(stress_to_class, argv[i]);
9295
- }
9296
-
9297
- if (rb_hash_size(stress_to_class) == 0) {
9298
- stress_to_class = 0;
9299
- }
9300
- }
9301
-
9302
- return Qnil;
9303
- }
9304
+ void rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid) { /* no-op */ }
9304
9305
 
9305
9306
  void *
9306
9307
  rb_gc_impl_objspace_alloc(void)
@@ -9332,8 +9333,8 @@ rb_gc_impl_objspace_init(void *objspace_ptr)
9332
9333
  ccan_list_head_init(&heap->pages);
9333
9334
  }
9334
9335
 
9335
- rb_darray_make_without_gc(&objspace->heap_pages.sorted, 0);
9336
- rb_darray_make_without_gc(&objspace->weak_references, 0);
9336
+ rb_darray_make(&objspace->heap_pages.sorted, 0);
9337
+ rb_darray_make(&objspace->weak_references, 0);
9337
9338
 
9338
9339
  // TODO: debug why on Windows Ruby crashes on boot when GC is on.
9339
9340
  #ifdef _WIN32
@@ -9344,6 +9345,9 @@ rb_gc_impl_objspace_init(void *objspace_ptr)
9344
9345
  /* Need to determine if we can use mmap at runtime. */
9345
9346
  heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
9346
9347
  #endif
9348
+ objspace->next_object_id = OBJ_ID_INITIAL;
9349
+ objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
9350
+ objspace->obj_to_id_tbl = st_init_numtable();
9347
9351
  #if RGENGC_ESTIMATE_OLDMALLOC
9348
9352
  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9349
9353
  #endif
@@ -9365,7 +9369,6 @@ rb_gc_impl_init(void)
9365
9369
  VALUE gc_constants = rb_hash_new();
9366
9370
  rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), GC_DEBUG ? Qtrue : Qfalse);
9367
9371
  rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
9368
- rb_hash_aset(gc_constants, ID2SYM(rb_intern("RBASIC_SIZE")), SIZET2NUM(sizeof(struct RBasic)));
9369
9372
  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
9370
9373
  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
9371
9374
  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
@@ -9395,11 +9398,6 @@ rb_gc_impl_init(void)
9395
9398
  rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
9396
9399
  }
9397
9400
 
9398
- if (GC_DEBUG_STRESS_TO_CLASS) {
9399
- rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
9400
- rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
9401
- }
9402
-
9403
9401
  /* internal methods */
9404
9402
  rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
9405
9403