thread_safety 0.1.2 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +7 -6
- data/README.md +69 -2
- data/Rakefile +15 -3
- data/exe/thread_safety +1 -0
- data/ext/extconf.rb +18 -3
- data/ext/gc-3.4/darray.h +220 -0
- data/ext/gc-3.4/debug_counter.h +442 -0
- data/ext/{gc → gc-3.4/gc}/default/default.c +276 -278
- data/ext/{gc → gc-3.4/gc}/gc.h +4 -5
- data/ext/{gc → gc-3.4/gc}/gc_impl.h +3 -11
- data/ext/gc-3.4/internal/bits.h +667 -0
- data/ext/gc-3.4/internal/compilers.h +116 -0
- data/ext/gc-3.4/internal/hash.h +201 -0
- data/ext/gc-3.4/internal/sanitizers.h +327 -0
- data/ext/gc-3.4/internal/static_assert.h +25 -0
- data/ext/gc-3.4/internal/warnings.h +25 -0
- data/ext/gc-3.4/patches/0001-allow-rvalue-overhead-override.patch +18 -0
- data/ext/gc-4.0/gc/default/default.c +9629 -0
- data/ext/gc-4.0/gc/gc.h +262 -0
- data/ext/gc-4.0/gc/gc_impl.h +124 -0
- data/ext/thread_safety.c +127 -28
- data/lib/thread_safety/offense.rb +10 -18
- data/lib/thread_safety/patches.rb +12 -0
- data/lib/thread_safety/version.rb +1 -1
- data/lib/thread_safety.rb +8 -8
- metadata +78 -8
- /data/ext/{darray.h → gc-4.0/darray.h} +0 -0
data/ext/gc-4.0/gc/gc.h
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
#ifndef GC_GC_H
|
|
2
|
+
#define GC_GC_H
|
|
3
|
+
/**
|
|
4
|
+
* @author Ruby developers <ruby-core@ruby-lang.org>
|
|
5
|
+
* @copyright This file is a part of the programming language Ruby.
|
|
6
|
+
* Permission is hereby granted, to either redistribute and/or
|
|
7
|
+
* modify this file, provided that the conditions mentioned in the
|
|
8
|
+
* file COPYING are met. Consult the file for details.
|
|
9
|
+
* @brief Private header for the default GC and other GC implementations
|
|
10
|
+
* first introduced for [Feature #20470].
|
|
11
|
+
*/
|
|
12
|
+
#include "ruby/ruby.h"
|
|
13
|
+
|
|
14
|
+
#if USE_MODULAR_GC
|
|
15
|
+
#include "ruby/thread_native.h"
|
|
16
|
+
|
|
17
|
+
struct rb_gc_vm_context {
|
|
18
|
+
rb_nativethread_lock_t lock;
|
|
19
|
+
|
|
20
|
+
struct rb_execution_context_struct *ec;
|
|
21
|
+
};
|
|
22
|
+
#endif
|
|
23
|
+
|
|
24
|
+
typedef int (*vm_table_foreach_callback_func)(VALUE value, void *data);
|
|
25
|
+
typedef int (*vm_table_update_callback_func)(VALUE *value, void *data);
|
|
26
|
+
|
|
27
|
+
enum rb_gc_vm_weak_tables {
|
|
28
|
+
RB_GC_VM_CI_TABLE,
|
|
29
|
+
RB_GC_VM_OVERLOADED_CME_TABLE,
|
|
30
|
+
RB_GC_VM_GLOBAL_SYMBOLS_TABLE,
|
|
31
|
+
RB_GC_VM_ID2REF_TABLE,
|
|
32
|
+
RB_GC_VM_GENERIC_FIELDS_TABLE,
|
|
33
|
+
RB_GC_VM_FROZEN_STRINGS_TABLE,
|
|
34
|
+
RB_GC_VM_CC_REFINEMENT_TABLE,
|
|
35
|
+
RB_GC_VM_WEAK_TABLE_COUNT
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
#define RB_GC_VM_LOCK() rb_gc_vm_lock(__FILE__, __LINE__)
|
|
39
|
+
#define RB_GC_VM_UNLOCK(lev) rb_gc_vm_unlock(lev, __FILE__, __LINE__)
|
|
40
|
+
#define RB_GC_CR_LOCK() rb_gc_cr_lock(__FILE__, __LINE__)
|
|
41
|
+
#define RB_GC_CR_UNLOCK(lev) rb_gc_cr_unlock(lev, __FILE__, __LINE__)
|
|
42
|
+
#define RB_GC_VM_LOCK_NO_BARRIER() rb_gc_vm_lock_no_barrier(__FILE__, __LINE__)
|
|
43
|
+
#define RB_GC_VM_UNLOCK_NO_BARRIER(lev) rb_gc_vm_unlock_no_barrier(lev, __FILE__, __LINE__)
|
|
44
|
+
|
|
45
|
+
#if USE_MODULAR_GC
|
|
46
|
+
# define MODULAR_GC_FN
|
|
47
|
+
#else
|
|
48
|
+
// This takes advantage of internal linkage winning when appearing first.
|
|
49
|
+
// See C99 6.2.2p4.
|
|
50
|
+
# define MODULAR_GC_FN static
|
|
51
|
+
#endif
|
|
52
|
+
|
|
53
|
+
#if USE_MODULAR_GC
|
|
54
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
|
55
|
+
#endif
|
|
56
|
+
|
|
57
|
+
// These functions cannot be defined as static because they are used by other
|
|
58
|
+
// files in Ruby.
|
|
59
|
+
size_t rb_size_mul_or_raise(size_t x, size_t y, VALUE exc);
|
|
60
|
+
void rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data);
|
|
61
|
+
const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
|
|
62
|
+
const char *rb_obj_info(VALUE obj);
|
|
63
|
+
size_t rb_obj_memsize_of(VALUE obj);
|
|
64
|
+
bool ruby_free_at_exit_p(void);
|
|
65
|
+
void rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data);
|
|
66
|
+
void rb_gc_verify_shareable(VALUE);
|
|
67
|
+
|
|
68
|
+
MODULAR_GC_FN unsigned int rb_gc_vm_lock(const char *file, int line);
|
|
69
|
+
MODULAR_GC_FN void rb_gc_vm_unlock(unsigned int lev, const char *file, int line);
|
|
70
|
+
MODULAR_GC_FN unsigned int rb_gc_cr_lock(const char *file, int line);
|
|
71
|
+
MODULAR_GC_FN void rb_gc_cr_unlock(unsigned int lev, const char *file, int line);
|
|
72
|
+
MODULAR_GC_FN unsigned int rb_gc_vm_lock_no_barrier(const char *file, int line);
|
|
73
|
+
MODULAR_GC_FN void rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line);
|
|
74
|
+
MODULAR_GC_FN void rb_gc_vm_barrier(void);
|
|
75
|
+
MODULAR_GC_FN size_t rb_gc_obj_optimal_size(VALUE obj);
|
|
76
|
+
MODULAR_GC_FN void rb_gc_mark_children(void *objspace, VALUE obj);
|
|
77
|
+
MODULAR_GC_FN void rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback, vm_table_update_callback_func update_callback, void *data, bool weak_only, enum rb_gc_vm_weak_tables table);
|
|
78
|
+
MODULAR_GC_FN void rb_gc_update_object_references(void *objspace, VALUE obj);
|
|
79
|
+
MODULAR_GC_FN void rb_gc_update_vm_references(void *objspace);
|
|
80
|
+
MODULAR_GC_FN void rb_gc_event_hook(VALUE obj, rb_event_flag_t event);
|
|
81
|
+
MODULAR_GC_FN void *rb_gc_get_objspace(void);
|
|
82
|
+
MODULAR_GC_FN void rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data);
|
|
83
|
+
MODULAR_GC_FN void rb_gc_set_pending_interrupt(void);
|
|
84
|
+
MODULAR_GC_FN void rb_gc_unset_pending_interrupt(void);
|
|
85
|
+
MODULAR_GC_FN void rb_gc_obj_free_vm_weak_references(VALUE obj);
|
|
86
|
+
MODULAR_GC_FN bool rb_gc_obj_free(void *objspace, VALUE obj);
|
|
87
|
+
MODULAR_GC_FN void rb_gc_save_machine_context(void);
|
|
88
|
+
MODULAR_GC_FN void rb_gc_mark_roots(void *objspace, const char **categoryp);
|
|
89
|
+
MODULAR_GC_FN void rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data);
|
|
90
|
+
MODULAR_GC_FN bool rb_gc_multi_ractor_p(void);
|
|
91
|
+
MODULAR_GC_FN bool rb_gc_shutdown_call_finalizer_p(VALUE obj);
|
|
92
|
+
MODULAR_GC_FN uint32_t rb_gc_get_shape(VALUE obj);
|
|
93
|
+
MODULAR_GC_FN void rb_gc_set_shape(VALUE obj, uint32_t shape_id);
|
|
94
|
+
MODULAR_GC_FN uint32_t rb_gc_rebuild_shape(VALUE obj, size_t heap_id);
|
|
95
|
+
MODULAR_GC_FN void rb_gc_prepare_heap_process_object(VALUE obj);
|
|
96
|
+
MODULAR_GC_FN bool rb_memerror_reentered(void);
|
|
97
|
+
MODULAR_GC_FN bool rb_obj_id_p(VALUE);
|
|
98
|
+
MODULAR_GC_FN void rb_gc_before_updating_jit_code(void);
|
|
99
|
+
MODULAR_GC_FN void rb_gc_after_updating_jit_code(void);
|
|
100
|
+
MODULAR_GC_FN bool rb_gc_obj_shareable_p(VALUE);
|
|
101
|
+
MODULAR_GC_FN void rb_gc_rp(VALUE);
|
|
102
|
+
|
|
103
|
+
#if USE_MODULAR_GC
|
|
104
|
+
MODULAR_GC_FN bool rb_gc_event_hook_required_p(rb_event_flag_t event);
|
|
105
|
+
MODULAR_GC_FN void *rb_gc_get_ractor_newobj_cache(void);
|
|
106
|
+
MODULAR_GC_FN void rb_gc_initialize_vm_context(struct rb_gc_vm_context *context);
|
|
107
|
+
MODULAR_GC_FN void rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context);
|
|
108
|
+
MODULAR_GC_FN void rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context);
|
|
109
|
+
#endif
|
|
110
|
+
|
|
111
|
+
#if USE_MODULAR_GC
|
|
112
|
+
RUBY_SYMBOL_EXPORT_END
|
|
113
|
+
#endif
|
|
114
|
+
|
|
115
|
+
void rb_ractor_finish_marking(void);
|
|
116
|
+
|
|
117
|
+
// -------------------Private section begin------------------------
|
|
118
|
+
// Functions in this section are private to the default GC and gc.c
|
|
119
|
+
|
|
120
|
+
#ifdef BUILDING_MODULAR_GC
|
|
121
|
+
RBIMPL_WARNING_PUSH()
|
|
122
|
+
RBIMPL_WARNING_IGNORED(-Wunused-function)
|
|
123
|
+
#endif
|
|
124
|
+
|
|
125
|
+
/* RGENGC_CHECK_MODE
|
|
126
|
+
* 0: disable all assertions
|
|
127
|
+
* 1: enable assertions (to debug RGenGC)
|
|
128
|
+
* 2: enable internal consistency check at each GC (for debugging)
|
|
129
|
+
* 3: enable internal consistency check at each GC steps (for debugging)
|
|
130
|
+
* 4: enable liveness check
|
|
131
|
+
* 5: show all references
|
|
132
|
+
*/
|
|
133
|
+
#ifndef RGENGC_CHECK_MODE
|
|
134
|
+
# define RGENGC_CHECK_MODE 0
|
|
135
|
+
#endif
|
|
136
|
+
|
|
137
|
+
#ifndef GC_ASSERT
|
|
138
|
+
# define GC_ASSERT(expr, ...) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
|
|
139
|
+
#endif
|
|
140
|
+
|
|
141
|
+
static int
|
|
142
|
+
hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
|
|
143
|
+
{
|
|
144
|
+
if (rb_gc_location((VALUE)value) != (VALUE)value) {
|
|
145
|
+
return ST_REPLACE;
|
|
146
|
+
}
|
|
147
|
+
return ST_CONTINUE;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
static int
|
|
151
|
+
hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
|
|
152
|
+
{
|
|
153
|
+
*value = rb_gc_location((VALUE)*value);
|
|
154
|
+
|
|
155
|
+
return ST_CONTINUE;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
static void
|
|
159
|
+
gc_ref_update_table_values_only(st_table *tbl)
|
|
160
|
+
{
|
|
161
|
+
if (!tbl || tbl->num_entries == 0) return;
|
|
162
|
+
|
|
163
|
+
if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, 0)) {
|
|
164
|
+
rb_raise(rb_eRuntimeError, "hash modified during iteration");
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
static int
|
|
169
|
+
gc_mark_tbl_no_pin_i(st_data_t key, st_data_t value, st_data_t data)
|
|
170
|
+
{
|
|
171
|
+
rb_gc_mark_movable((VALUE)value);
|
|
172
|
+
|
|
173
|
+
return ST_CONTINUE;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
static int
|
|
177
|
+
hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
|
|
178
|
+
{
|
|
179
|
+
if (rb_gc_location((VALUE)key) != (VALUE)key) {
|
|
180
|
+
return ST_REPLACE;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
if (rb_gc_location((VALUE)value) != (VALUE)value) {
|
|
184
|
+
return ST_REPLACE;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
return ST_CONTINUE;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
static int
|
|
191
|
+
hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
|
|
192
|
+
{
|
|
193
|
+
if (rb_gc_location((VALUE)*key) != (VALUE)*key) {
|
|
194
|
+
*key = rb_gc_location((VALUE)*key);
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
if (rb_gc_location((VALUE)*value) != (VALUE)*value) {
|
|
198
|
+
*value = rb_gc_location((VALUE)*value);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return ST_CONTINUE;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
static void
|
|
205
|
+
gc_update_table_refs(st_table *tbl)
|
|
206
|
+
{
|
|
207
|
+
if (!tbl || tbl->num_entries == 0) return;
|
|
208
|
+
|
|
209
|
+
if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, 0)) {
|
|
210
|
+
rb_raise(rb_eRuntimeError, "hash modified during iteration");
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
static inline size_t
|
|
215
|
+
xmalloc2_size(const size_t count, const size_t elsize)
|
|
216
|
+
{
|
|
217
|
+
return rb_size_mul_or_raise(count, elsize, rb_eArgError);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
static VALUE
|
|
221
|
+
type_sym(size_t type)
|
|
222
|
+
{
|
|
223
|
+
switch (type) {
|
|
224
|
+
#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
|
|
225
|
+
COUNT_TYPE(T_NONE);
|
|
226
|
+
COUNT_TYPE(T_OBJECT);
|
|
227
|
+
COUNT_TYPE(T_CLASS);
|
|
228
|
+
COUNT_TYPE(T_MODULE);
|
|
229
|
+
COUNT_TYPE(T_FLOAT);
|
|
230
|
+
COUNT_TYPE(T_STRING);
|
|
231
|
+
COUNT_TYPE(T_REGEXP);
|
|
232
|
+
COUNT_TYPE(T_ARRAY);
|
|
233
|
+
COUNT_TYPE(T_HASH);
|
|
234
|
+
COUNT_TYPE(T_STRUCT);
|
|
235
|
+
COUNT_TYPE(T_BIGNUM);
|
|
236
|
+
COUNT_TYPE(T_FILE);
|
|
237
|
+
COUNT_TYPE(T_DATA);
|
|
238
|
+
COUNT_TYPE(T_MATCH);
|
|
239
|
+
COUNT_TYPE(T_COMPLEX);
|
|
240
|
+
COUNT_TYPE(T_RATIONAL);
|
|
241
|
+
COUNT_TYPE(T_NIL);
|
|
242
|
+
COUNT_TYPE(T_TRUE);
|
|
243
|
+
COUNT_TYPE(T_FALSE);
|
|
244
|
+
COUNT_TYPE(T_SYMBOL);
|
|
245
|
+
COUNT_TYPE(T_FIXNUM);
|
|
246
|
+
COUNT_TYPE(T_IMEMO);
|
|
247
|
+
COUNT_TYPE(T_UNDEF);
|
|
248
|
+
COUNT_TYPE(T_NODE);
|
|
249
|
+
COUNT_TYPE(T_ICLASS);
|
|
250
|
+
COUNT_TYPE(T_ZOMBIE);
|
|
251
|
+
COUNT_TYPE(T_MOVED);
|
|
252
|
+
#undef COUNT_TYPE
|
|
253
|
+
default: return SIZET2NUM(type); break;
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
#ifdef BUILDING_MODULAR_GC
|
|
258
|
+
RBIMPL_WARNING_POP()
|
|
259
|
+
#endif
|
|
260
|
+
// -------------------Private section end------------------------
|
|
261
|
+
|
|
262
|
+
#endif
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
#ifndef GC_GC_IMPL_H
|
|
2
|
+
#define GC_GC_IMPL_H
|
|
3
|
+
/**
|
|
4
|
+
* @author Ruby developers <ruby-core@ruby-lang.org>
|
|
5
|
+
* @copyright This file is a part of the programming language Ruby.
|
|
6
|
+
* Permission is hereby granted, to either redistribute and/or
|
|
7
|
+
* modify this file, provided that the conditions mentioned in the
|
|
8
|
+
* file COPYING are met. Consult the file for details.
|
|
9
|
+
* @brief Header for GC implementations introduced in [Feature #20470].
|
|
10
|
+
*/
|
|
11
|
+
#include "ruby/ruby.h"
|
|
12
|
+
|
|
13
|
+
#ifndef RB_GC_OBJECT_METADATA_ENTRY_DEFINED
|
|
14
|
+
# define RB_GC_OBJECT_METADATA_ENTRY_DEFINED
|
|
15
|
+
struct rb_gc_object_metadata_entry {
|
|
16
|
+
ID name;
|
|
17
|
+
VALUE val;
|
|
18
|
+
};
|
|
19
|
+
#endif
|
|
20
|
+
|
|
21
|
+
#ifdef BUILDING_MODULAR_GC
|
|
22
|
+
# define GC_IMPL_FN
|
|
23
|
+
#else
|
|
24
|
+
// `GC_IMPL_FN` is an implementation detail of `!USE_MODULAR_GC` builds
|
|
25
|
+
// to have the default GC in the same translation unit as gc.c for
|
|
26
|
+
// the sake of optimizer visibility. It expands to nothing unless
|
|
27
|
+
// you're the default GC.
|
|
28
|
+
//
|
|
29
|
+
// For the default GC, do not copy-paste this when implementing
|
|
30
|
+
// these functions. This takes advantage of internal linkage winning
|
|
31
|
+
// when appearing first. See C99 6.2.2p4.
|
|
32
|
+
# define GC_IMPL_FN static
|
|
33
|
+
#endif
|
|
34
|
+
|
|
35
|
+
// Bootup
|
|
36
|
+
GC_IMPL_FN void *rb_gc_impl_objspace_alloc(void);
|
|
37
|
+
GC_IMPL_FN void rb_gc_impl_objspace_init(void *objspace_ptr);
|
|
38
|
+
GC_IMPL_FN void *rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor);
|
|
39
|
+
GC_IMPL_FN void rb_gc_impl_set_params(void *objspace_ptr);
|
|
40
|
+
GC_IMPL_FN void rb_gc_impl_init(void);
|
|
41
|
+
GC_IMPL_FN size_t *rb_gc_impl_heap_sizes(void *objspace_ptr);
|
|
42
|
+
// Shutdown
|
|
43
|
+
GC_IMPL_FN void rb_gc_impl_shutdown_free_objects(void *objspace_ptr);
|
|
44
|
+
GC_IMPL_FN void rb_gc_impl_objspace_free(void *objspace_ptr);
|
|
45
|
+
GC_IMPL_FN void rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache);
|
|
46
|
+
// GC
|
|
47
|
+
GC_IMPL_FN void rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
|
|
48
|
+
GC_IMPL_FN bool rb_gc_impl_during_gc_p(void *objspace_ptr);
|
|
49
|
+
GC_IMPL_FN void rb_gc_impl_prepare_heap(void *objspace_ptr);
|
|
50
|
+
GC_IMPL_FN void rb_gc_impl_gc_enable(void *objspace_ptr);
|
|
51
|
+
GC_IMPL_FN void rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc);
|
|
52
|
+
GC_IMPL_FN bool rb_gc_impl_gc_enabled_p(void *objspace_ptr);
|
|
53
|
+
GC_IMPL_FN void rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag);
|
|
54
|
+
GC_IMPL_FN VALUE rb_gc_impl_stress_get(void *objspace_ptr);
|
|
55
|
+
GC_IMPL_FN VALUE rb_gc_impl_config_get(void *objspace_ptr);
|
|
56
|
+
GC_IMPL_FN void rb_gc_impl_config_set(void *objspace_ptr, VALUE hash);
|
|
57
|
+
// Object allocation
|
|
58
|
+
GC_IMPL_FN VALUE rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
|
|
59
|
+
GC_IMPL_FN size_t rb_gc_impl_obj_slot_size(VALUE obj);
|
|
60
|
+
GC_IMPL_FN size_t rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size);
|
|
61
|
+
GC_IMPL_FN bool rb_gc_impl_size_allocatable_p(size_t size);
|
|
62
|
+
// Malloc
|
|
63
|
+
/*
|
|
64
|
+
* BEWARE: These functions may or may not run under GVL.
|
|
65
|
+
*
|
|
66
|
+
* You might want to make them thread-safe.
|
|
67
|
+
* Garbage collecting inside is possible if and only if you
|
|
68
|
+
* already have GVL. Also raising exceptions without one is a
|
|
69
|
+
* total disaster.
|
|
70
|
+
*
|
|
71
|
+
* When you absolutely cannot allocate the requested amount of
|
|
72
|
+
* memory just return NULL (with appropriate errno set).
|
|
73
|
+
* The caller side takes care of that situation.
|
|
74
|
+
*/
|
|
75
|
+
GC_IMPL_FN void *rb_gc_impl_malloc(void *objspace_ptr, size_t size, bool gc_allowed);
|
|
76
|
+
GC_IMPL_FN void *rb_gc_impl_calloc(void *objspace_ptr, size_t size, bool gc_allowed);
|
|
77
|
+
GC_IMPL_FN void *rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
|
|
78
|
+
GC_IMPL_FN void rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size);
|
|
79
|
+
GC_IMPL_FN void rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff);
|
|
80
|
+
// Marking
|
|
81
|
+
GC_IMPL_FN void rb_gc_impl_mark(void *objspace_ptr, VALUE obj);
|
|
82
|
+
GC_IMPL_FN void rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr);
|
|
83
|
+
GC_IMPL_FN void rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj);
|
|
84
|
+
GC_IMPL_FN void rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj);
|
|
85
|
+
GC_IMPL_FN void rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr);
|
|
86
|
+
GC_IMPL_FN void rb_gc_impl_remove_weak(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
|
|
87
|
+
// Compaction
|
|
88
|
+
GC_IMPL_FN bool rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj);
|
|
89
|
+
GC_IMPL_FN VALUE rb_gc_impl_location(void *objspace_ptr, VALUE value);
|
|
90
|
+
// Write barriers
|
|
91
|
+
GC_IMPL_FN void rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b);
|
|
92
|
+
GC_IMPL_FN void rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj);
|
|
93
|
+
GC_IMPL_FN void rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj);
|
|
94
|
+
// Heap walking
|
|
95
|
+
GC_IMPL_FN void rb_gc_impl_each_objects(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
|
|
96
|
+
GC_IMPL_FN void rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
|
|
97
|
+
// Finalizers
|
|
98
|
+
GC_IMPL_FN void rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
|
|
99
|
+
GC_IMPL_FN VALUE rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block);
|
|
100
|
+
GC_IMPL_FN void rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj);
|
|
101
|
+
GC_IMPL_FN void rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj);
|
|
102
|
+
GC_IMPL_FN void rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr);
|
|
103
|
+
// Forking
|
|
104
|
+
GC_IMPL_FN void rb_gc_impl_before_fork(void *objspace_ptr);
|
|
105
|
+
GC_IMPL_FN void rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid);
|
|
106
|
+
// Statistics
|
|
107
|
+
GC_IMPL_FN void rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag);
|
|
108
|
+
GC_IMPL_FN bool rb_gc_impl_get_measure_total_time(void *objspace_ptr);
|
|
109
|
+
GC_IMPL_FN unsigned long long rb_gc_impl_get_total_time(void *objspace_ptr);
|
|
110
|
+
GC_IMPL_FN size_t rb_gc_impl_gc_count(void *objspace_ptr);
|
|
111
|
+
GC_IMPL_FN VALUE rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE key);
|
|
112
|
+
GC_IMPL_FN VALUE rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym);
|
|
113
|
+
GC_IMPL_FN VALUE rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
|
|
114
|
+
GC_IMPL_FN const char *rb_gc_impl_active_gc_name(void);
|
|
115
|
+
// Miscellaneous
|
|
116
|
+
GC_IMPL_FN struct rb_gc_object_metadata_entry *rb_gc_impl_object_metadata(void *objspace_ptr, VALUE obj);
|
|
117
|
+
GC_IMPL_FN bool rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr);
|
|
118
|
+
GC_IMPL_FN bool rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE obj);
|
|
119
|
+
GC_IMPL_FN void rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event);
|
|
120
|
+
GC_IMPL_FN void rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj);
|
|
121
|
+
|
|
122
|
+
#undef GC_IMPL_FN
|
|
123
|
+
|
|
124
|
+
#endif
|
data/ext/thread_safety.c
CHANGED
|
@@ -1,43 +1,94 @@
|
|
|
1
1
|
#include "ruby/ruby.h"
|
|
2
|
+
#include "ruby/version.h"
|
|
2
3
|
#include "ruby/debug.h"
|
|
3
4
|
#include "gc/gc.h"
|
|
4
5
|
#include "gc/gc_impl.h"
|
|
5
6
|
|
|
6
|
-
|
|
7
|
+
// RVALUE_OVERHEAD must account for both ractor checking (when enabled) and our
|
|
8
|
+
// fiber/thread tracking. The ractor system uses index 0 (as uint32_t, but aligned
|
|
9
|
+
// to sizeof(VALUE)), so we use indices 1 and 2 for our data.
|
|
10
|
+
#define RVALUE_OVERHEAD (sizeof(VALUE) * 3)
|
|
7
11
|
|
|
8
|
-
|
|
12
|
+
#define OVERHEAD_FIBER_INDEX 1
|
|
13
|
+
#define OVERHEAD_THREAD_INDEX 2
|
|
14
|
+
|
|
15
|
+
static VALUE root_fiber = Qnil;
|
|
16
|
+
static VALUE rb_cFiber;
|
|
17
|
+
static bool ruby_vm_booted = false, thread_safety_globally_enabled = false;
|
|
9
18
|
static VALUE mThreadSafety;
|
|
10
|
-
static ID id_report_offense,
|
|
19
|
+
static ID id_report_offense, id_thread_safety_enabled;
|
|
11
20
|
|
|
12
21
|
size_t rb_gc_impl_obj_slot_size(VALUE obj);
|
|
13
22
|
|
|
14
23
|
static VALUE
|
|
15
|
-
|
|
24
|
+
get_owner_fiber(VALUE obj)
|
|
16
25
|
{
|
|
17
|
-
|
|
26
|
+
VALUE fiber = ((VALUE *)(obj + rb_gc_impl_obj_slot_size(obj)))[OVERHEAD_FIBER_INDEX];
|
|
27
|
+
return fiber == Qnil ? root_fiber : fiber;
|
|
18
28
|
}
|
|
19
29
|
|
|
20
30
|
static VALUE
|
|
21
|
-
|
|
31
|
+
get_owner_thread(VALUE obj)
|
|
22
32
|
{
|
|
23
|
-
VALUE
|
|
33
|
+
return ((VALUE *)(obj + rb_gc_impl_obj_slot_size(obj)))[OVERHEAD_THREAD_INDEX];
|
|
34
|
+
}
|
|
24
35
|
|
|
25
|
-
|
|
26
|
-
|
|
36
|
+
static VALUE
|
|
37
|
+
get_current_fiber(void)
|
|
38
|
+
{
|
|
39
|
+
if (!ruby_vm_booted) {
|
|
40
|
+
return Qnil;
|
|
27
41
|
}
|
|
28
42
|
|
|
29
|
-
return
|
|
43
|
+
return rb_fiber_current();
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
static VALUE
|
|
47
|
+
get_current_thread(void)
|
|
48
|
+
{
|
|
49
|
+
VALUE thread = rb_thread_current();
|
|
50
|
+
return thread == 0 ? Qnil : thread;
|
|
30
51
|
}
|
|
31
52
|
|
|
53
|
+
static void
|
|
54
|
+
rb_gc_mark_children_patched(void *objspace, VALUE obj)
|
|
55
|
+
{
|
|
56
|
+
rb_gc_mark(get_owner_fiber(obj));
|
|
57
|
+
rb_gc_mark(get_owner_thread(obj));
|
|
58
|
+
|
|
59
|
+
rb_gc_mark_children(objspace, obj);
|
|
60
|
+
}
|
|
61
|
+
#define rb_gc_mark_children rb_gc_mark_children_patched
|
|
62
|
+
|
|
32
63
|
#define PATCH_GC_FUNC(ret_type, name, ...) \
|
|
33
64
|
ret_type rb_gc_impl_##name##_original(__VA_ARGS__); \
|
|
34
65
|
ret_type rb_gc_impl_##name(__VA_ARGS__)
|
|
35
66
|
|
|
67
|
+
// Ruby 4.0 changed the modular GC interface - removed v1, v2, v3 from new_obj
|
|
68
|
+
#if RUBY_API_VERSION_MAJOR >= 4
|
|
69
|
+
PATCH_GC_FUNC(VALUE, new_obj, void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size)
|
|
70
|
+
{
|
|
71
|
+
VALUE obj = rb_gc_impl_new_obj_original(objspace_ptr, cache_ptr, klass, flags, wb_protected, alloc_size);
|
|
72
|
+
#else
|
|
36
73
|
PATCH_GC_FUNC(VALUE, new_obj, void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size)
|
|
37
74
|
{
|
|
38
75
|
VALUE obj = rb_gc_impl_new_obj_original(objspace_ptr, cache_ptr, klass, flags, v1, v2, v3, wb_protected, alloc_size);
|
|
39
|
-
|
|
40
|
-
|
|
76
|
+
#endif
|
|
77
|
+
|
|
78
|
+
if (// Skip imemo types like call caches.
|
|
79
|
+
(flags & RUBY_T_MASK) != T_IMEMO &&
|
|
80
|
+
// klass of 0 means that it is a hidden object.
|
|
81
|
+
klass != 0 &&
|
|
82
|
+
// First time calling rb_fiber_current will allocate a Fiber object,
|
|
83
|
+
// which would cause an infinite loop here because it calls rb_fiber_current.
|
|
84
|
+
klass != rb_cFiber) {
|
|
85
|
+
((VALUE *)(obj + rb_gc_impl_obj_slot_size(obj)))[OVERHEAD_FIBER_INDEX] = get_current_fiber();
|
|
86
|
+
((VALUE *)(obj + rb_gc_impl_obj_slot_size(obj)))[OVERHEAD_THREAD_INDEX] = get_current_thread();
|
|
87
|
+
}
|
|
88
|
+
else {
|
|
89
|
+
((VALUE *)(obj + rb_gc_impl_obj_slot_size(obj)))[OVERHEAD_FIBER_INDEX] = Qfalse;
|
|
90
|
+
((VALUE *)(obj + rb_gc_impl_obj_slot_size(obj)))[OVERHEAD_THREAD_INDEX] = Qfalse;
|
|
91
|
+
}
|
|
41
92
|
|
|
42
93
|
return obj;
|
|
43
94
|
}
|
|
@@ -45,43 +96,91 @@ PATCH_GC_FUNC(VALUE, new_obj, void *objspace_ptr, void *cache_ptr, VALUE klass,
|
|
|
45
96
|
|
|
46
97
|
PATCH_GC_FUNC(void, writebarrier, void *objspace_ptr, VALUE a, VALUE b)
|
|
47
98
|
{
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
VALUE
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
99
|
+
if (!ruby_vm_booted) goto skip;
|
|
100
|
+
|
|
101
|
+
VALUE current_fiber = rb_fiber_current();
|
|
102
|
+
int type_a = RB_BUILTIN_TYPE(a);
|
|
103
|
+
if (// Skip imemo types like call caches.
|
|
104
|
+
type_a != T_IMEMO &&
|
|
105
|
+
// Skip Class and Module objects (internal Ruby operations).
|
|
106
|
+
type_a != T_CLASS &&
|
|
107
|
+
type_a != T_MODULE &&
|
|
108
|
+
type_a != T_ICLASS &&
|
|
54
109
|
// Skip imemo types like call caches.
|
|
55
|
-
(
|
|
110
|
+
(RB_SPECIAL_CONST_P(b) || RB_BUILTIN_TYPE(b) != T_IMEMO) &&
|
|
56
111
|
// Class of 0 means that it is a hidden object.
|
|
57
112
|
CLASS_OF(a) != 0 &&
|
|
58
113
|
// When we create a new thread, the owner is the parent thread but
|
|
59
114
|
// may be written to during execution.
|
|
60
|
-
CLASS_OF(a) != rb_cThread
|
|
61
|
-
|
|
62
|
-
bool prev_gc_enabled = !rb_gc_impl_gc_enabled_p(objspace_ptr);
|
|
63
|
-
if (prev_gc_enabled) rb_gc_impl_gc_disable(objspace_ptr, false);
|
|
115
|
+
CLASS_OF(a) != rb_cThread) {
|
|
116
|
+
VALUE current_thread = rb_thread_current();
|
|
64
117
|
|
|
65
|
-
|
|
118
|
+
VALUE fiber_enabled = rb_ivar_get(current_fiber, id_thread_safety_enabled);
|
|
119
|
+
VALUE thread_enabled = rb_ivar_get(current_thread, id_thread_safety_enabled);
|
|
66
120
|
|
|
67
|
-
|
|
121
|
+
// Skip if thread_safety is not explicitly enabled on this Fiber or Thread and not enabled globally.
|
|
122
|
+
if (fiber_enabled == Qnil && thread_enabled == Qnil && !thread_safety_globally_enabled) goto skip;
|
|
123
|
+
// Skip if thread_safety is explicitly turned off on this Fiber or Thread.
|
|
124
|
+
if (fiber_enabled == Qfalse || thread_enabled == Qfalse) goto skip;
|
|
68
125
|
|
|
69
|
-
|
|
126
|
+
VALUE owner_fiber = get_owner_fiber(a);
|
|
127
|
+
VALUE owner_thread = get_owner_thread(a);
|
|
70
128
|
|
|
71
|
-
if (
|
|
129
|
+
if (owner_fiber != current_fiber) {
|
|
130
|
+
bool prev_gc_enabled = !rb_gc_impl_gc_enabled_p(objspace_ptr);
|
|
131
|
+
if (prev_gc_enabled) rb_gc_impl_gc_disable(objspace_ptr, false);
|
|
132
|
+
|
|
133
|
+
rb_ivar_set(current_fiber, id_thread_safety_enabled, Qfalse);
|
|
134
|
+
|
|
135
|
+
rb_funcall(mThreadSafety, id_report_offense, 3, a, owner_fiber, owner_thread);
|
|
136
|
+
|
|
137
|
+
rb_ivar_set(current_fiber, id_thread_safety_enabled, fiber_enabled);
|
|
138
|
+
|
|
139
|
+
if (prev_gc_enabled) rb_gc_impl_gc_enable(objspace_ptr);
|
|
140
|
+
}
|
|
72
141
|
}
|
|
73
142
|
|
|
143
|
+
skip:
|
|
74
144
|
rb_gc_impl_writebarrier_original(objspace_ptr, a, b);
|
|
75
145
|
}
|
|
76
146
|
#define rb_gc_impl_writebarrier rb_gc_impl_writebarrier_original
|
|
77
147
|
|
|
148
|
+
PATCH_GC_FUNC(const char *, active_gc_name, void)
|
|
149
|
+
{
|
|
150
|
+
return "thread_safety";
|
|
151
|
+
}
|
|
152
|
+
#define rb_gc_impl_active_gc_name rb_gc_impl_active_gc_name_original
|
|
153
|
+
|
|
154
|
+
static VALUE
|
|
155
|
+
thread_safety_vm_booted_bang(VALUE self)
|
|
156
|
+
{
|
|
157
|
+
root_fiber = rb_fiber_current();
|
|
158
|
+
rb_cFiber = rb_const_get(rb_cObject, rb_intern("Fiber"));
|
|
159
|
+
ruby_vm_booted = true;
|
|
160
|
+
|
|
161
|
+
return Qnil;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
static VALUE
|
|
165
|
+
thread_safety_enabled_set(VALUE self, VALUE val)
|
|
166
|
+
{
|
|
167
|
+
thread_safety_globally_enabled = RTEST(val);
|
|
168
|
+
|
|
169
|
+
return Qnil;
|
|
170
|
+
}
|
|
171
|
+
|
|
78
172
|
PATCH_GC_FUNC(void, init, void)
|
|
79
173
|
{
|
|
80
174
|
rb_gc_register_address(&mThreadSafety);
|
|
81
175
|
mThreadSafety = rb_define_module("ThreadSafety");
|
|
176
|
+
rb_define_private_method(rb_singleton_class(mThreadSafety), "vm_booted!", thread_safety_vm_booted_bang, 0);
|
|
177
|
+
rb_define_singleton_method(mThreadSafety, "enabled=", thread_safety_enabled_set, 1);
|
|
82
178
|
|
|
83
179
|
id_report_offense = rb_intern("report_offense");
|
|
84
|
-
|
|
180
|
+
id_thread_safety_enabled = rb_intern("@thread_safety_enabled");
|
|
181
|
+
|
|
182
|
+
rb_global_variable(&root_fiber);
|
|
183
|
+
rb_global_variable(&rb_cFiber);
|
|
85
184
|
|
|
86
185
|
rb_gc_impl_init_original();
|
|
87
186
|
}
|
|
@@ -2,30 +2,22 @@
|
|
|
2
2
|
|
|
3
3
|
module ThreadSafety
|
|
4
4
|
class Offense
|
|
5
|
-
attr_reader :object, :backtrace, :access_thread
|
|
5
|
+
attr_reader :object, :backtrace, :created_fiber, :created_thread, :access_fiber, :access_thread
|
|
6
6
|
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
@created_thread_obj_id = created_thread_obj_id
|
|
10
|
-
@access_thread = access_thread
|
|
11
|
-
@backtrace = caller_locations(2).freeze
|
|
12
|
-
end
|
|
7
|
+
# Ruby 3.4 includes an extra `Class#new` frame in the call stack compared to Ruby 4.0
|
|
8
|
+
CALLER_SKIP = RUBY_VERSION >= "4.0" ? 2 : 3
|
|
13
9
|
|
|
14
|
-
def created_thread
|
|
15
|
-
@
|
|
10
|
+
def initialize(object, created_fiber, created_thread)
|
|
11
|
+
@object = object
|
|
12
|
+
@created_fiber = created_fiber
|
|
13
|
+
@created_thread = created_thread
|
|
14
|
+
@access_fiber = Fiber.current
|
|
15
|
+
@access_thread = Thread.current
|
|
16
|
+
@backtrace = caller_locations(CALLER_SKIP).freeze
|
|
16
17
|
end
|
|
17
18
|
|
|
18
19
|
def to_s
|
|
19
20
|
"#<ThreadSafety::Offense object=#{object} created_thread=#{created_thread} access_thread=#{access_thread} backtrace=#{backtrace[0]}>"
|
|
20
21
|
end
|
|
21
|
-
|
|
22
|
-
private
|
|
23
|
-
|
|
24
|
-
def find_thread_from_obj_id(obj_id)
|
|
25
|
-
# -1 is the main_thread_id
|
|
26
|
-
return Thread.main if obj_id == -1
|
|
27
|
-
|
|
28
|
-
Thread.list.find { |t| t.object_id == obj_id }
|
|
29
|
-
end
|
|
30
22
|
end
|
|
31
23
|
end
|