bootsnap 1.10.3 → 1.18.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +106 -0
- data/README.md +21 -12
- data/ext/bootsnap/bootsnap.c +260 -120
- data/ext/bootsnap/extconf.rb +20 -13
- data/lib/bootsnap/bundler.rb +1 -1
- data/lib/bootsnap/cli/worker_pool.rb +72 -0
- data/lib/bootsnap/cli.rb +30 -26
- data/lib/bootsnap/compile_cache/iseq.rb +15 -9
- data/lib/bootsnap/compile_cache/json.rb +18 -17
- data/lib/bootsnap/compile_cache/yaml.rb +46 -60
- data/lib/bootsnap/compile_cache.rb +11 -17
- data/lib/bootsnap/explicit_require.rb +5 -0
- data/lib/bootsnap/load_path_cache/cache.rb +24 -21
- data/lib/bootsnap/load_path_cache/change_observer.rb +19 -2
- data/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb +8 -45
- data/lib/bootsnap/load_path_cache/loaded_features_index.rb +3 -3
- data/lib/bootsnap/load_path_cache/path.rb +37 -17
- data/lib/bootsnap/load_path_cache/path_scanner.rb +7 -1
- data/lib/bootsnap/load_path_cache/store.rb +34 -17
- data/lib/bootsnap/load_path_cache.rb +36 -15
- data/lib/bootsnap/setup.rb +1 -1
- data/lib/bootsnap/version.rb +1 -1
- data/lib/bootsnap.rb +63 -36
- metadata +4 -8
- data/lib/bootsnap/load_path_cache/realpath_cache.rb +0 -33
data/ext/bootsnap/bootsnap.c
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
* Suggested reading order:
|
3
3
|
* 1. Skim Init_bootsnap
|
4
4
|
* 2. Skim bs_fetch
|
5
|
-
* 3. The rest of
|
5
|
+
* 3. The rest of everything
|
6
6
|
*
|
7
7
|
* Init_bootsnap sets up the ruby objects and binds bs_fetch to
|
8
8
|
* Bootsnap::CompileCache::Native.fetch.
|
@@ -18,12 +18,17 @@
|
|
18
18
|
#include <sys/types.h>
|
19
19
|
#include <errno.h>
|
20
20
|
#include <fcntl.h>
|
21
|
+
#include <unistd.h>
|
21
22
|
#include <sys/stat.h>
|
22
|
-
|
23
|
-
#
|
23
|
+
|
24
|
+
#ifdef __APPLE__
|
25
|
+
// The symbol is present, however not in the headers
|
26
|
+
// See: https://github.com/Shopify/bootsnap/issues/470
|
27
|
+
extern int fdatasync(int);
|
24
28
|
#endif
|
25
|
-
|
26
|
-
#
|
29
|
+
|
30
|
+
#ifndef O_NOATIME
|
31
|
+
#define O_NOATIME 0
|
27
32
|
#endif
|
28
33
|
|
29
34
|
/* 1000 is an arbitrary limit; FNV64 plus some slashes brings the cap down to
|
@@ -36,7 +41,7 @@
|
|
36
41
|
#define MAX_CREATE_TEMPFILE_ATTEMPT 3
|
37
42
|
|
38
43
|
#ifndef RB_UNLIKELY
|
39
|
-
|
44
|
+
#define RB_UNLIKELY(x) (x)
|
40
45
|
#endif
|
41
46
|
|
42
47
|
/*
|
@@ -60,8 +65,10 @@ struct bs_cache_key {
|
|
60
65
|
uint32_t ruby_revision;
|
61
66
|
uint64_t size;
|
62
67
|
uint64_t mtime;
|
63
|
-
uint64_t data_size;
|
64
|
-
|
68
|
+
uint64_t data_size; //
|
69
|
+
uint64_t digest;
|
70
|
+
uint8_t digest_set;
|
71
|
+
uint8_t pad[15];
|
65
72
|
} __attribute__((packed));
|
66
73
|
|
67
74
|
/*
|
@@ -75,7 +82,7 @@ struct bs_cache_key {
|
|
75
82
|
STATIC_ASSERT(sizeof(struct bs_cache_key) == KEY_SIZE);
|
76
83
|
|
77
84
|
/* Effectively a schema version. Bumping invalidates all previous caches */
|
78
|
-
static const uint32_t current_version =
|
85
|
+
static const uint32_t current_version = 6;
|
79
86
|
|
80
87
|
/* hash of e.g. "x86_64-darwin17", invalidating when ruby is recompiled on a
|
81
88
|
* new OS ABI, etc. */
|
@@ -93,23 +100,36 @@ static VALUE rb_mBootsnap_CompileCache;
|
|
93
100
|
static VALUE rb_mBootsnap_CompileCache_Native;
|
94
101
|
static VALUE rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
95
102
|
static ID instrumentation_method;
|
96
|
-
static VALUE sym_miss;
|
97
|
-
static VALUE sym_stale;
|
103
|
+
static VALUE sym_hit, sym_miss, sym_stale, sym_revalidated;
|
98
104
|
static bool instrumentation_enabled = false;
|
105
|
+
static bool readonly = false;
|
106
|
+
static bool revalidation = false;
|
107
|
+
static bool perm_issue = false;
|
99
108
|
|
100
109
|
/* Functions exposed as module functions on Bootsnap::CompileCache::Native */
|
101
110
|
static VALUE bs_instrumentation_enabled_set(VALUE self, VALUE enabled);
|
111
|
+
static VALUE bs_readonly_set(VALUE self, VALUE enabled);
|
112
|
+
static VALUE bs_revalidation_set(VALUE self, VALUE enabled);
|
102
113
|
static VALUE bs_compile_option_crc32_set(VALUE self, VALUE crc32_v);
|
103
114
|
static VALUE bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler, VALUE args);
|
104
115
|
static VALUE bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler);
|
105
116
|
|
106
117
|
/* Helpers */
|
118
|
+
enum cache_status {
|
119
|
+
miss,
|
120
|
+
hit,
|
121
|
+
stale,
|
122
|
+
};
|
107
123
|
static void bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_CACHEPATH_SIZE]);
|
108
124
|
static int bs_read_key(int fd, struct bs_cache_key * key);
|
109
|
-
static
|
125
|
+
static enum cache_status cache_key_equal_fast_path(struct bs_cache_key * k1, struct bs_cache_key * k2);
|
126
|
+
static int cache_key_equal_slow_path(struct bs_cache_key * current_key, struct bs_cache_key * cached_key, const VALUE input_data);
|
127
|
+
static int update_cache_key(struct bs_cache_key *current_key, struct bs_cache_key *old_key, int cache_fd, const char ** errno_provenance);
|
128
|
+
|
129
|
+
static void bs_cache_key_digest(struct bs_cache_key * key, const VALUE input_data);
|
110
130
|
static VALUE bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args);
|
111
131
|
static VALUE bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler);
|
112
|
-
static int open_current_file(char * path, struct bs_cache_key * key, const char ** errno_provenance);
|
132
|
+
static int open_current_file(const char * path, struct bs_cache_key * key, const char ** errno_provenance);
|
113
133
|
static int fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance);
|
114
134
|
static uint32_t get_ruby_revision(void);
|
115
135
|
static uint32_t get_ruby_platform(void);
|
@@ -126,13 +146,10 @@ struct s2o_data;
|
|
126
146
|
struct i2o_data;
|
127
147
|
struct i2s_data;
|
128
148
|
|
129
|
-
/* https://bugs.ruby-lang.org/issues/13667 */
|
130
|
-
extern VALUE rb_get_coverages(void);
|
131
149
|
static VALUE
|
132
|
-
|
150
|
+
bs_rb_get_path(VALUE self, VALUE fname)
|
133
151
|
{
|
134
|
-
|
135
|
-
return RTEST(cov) ? Qtrue : Qfalse;
|
152
|
+
return rb_get_path(fname);
|
136
153
|
}
|
137
154
|
|
138
155
|
/*
|
@@ -146,6 +163,9 @@ void
|
|
146
163
|
Init_bootsnap(void)
|
147
164
|
{
|
148
165
|
rb_mBootsnap = rb_define_module("Bootsnap");
|
166
|
+
|
167
|
+
rb_define_singleton_method(rb_mBootsnap, "rb_get_path", bs_rb_get_path, 1);
|
168
|
+
|
149
169
|
rb_mBootsnap_CompileCache = rb_define_module_under(rb_mBootsnap, "CompileCache");
|
150
170
|
rb_mBootsnap_CompileCache_Native = rb_define_module_under(rb_mBootsnap_CompileCache, "Native");
|
151
171
|
rb_cBootsnap_CompileCache_UNCOMPILABLE = rb_const_get(rb_mBootsnap_CompileCache, rb_intern("UNCOMPILABLE"));
|
@@ -156,14 +176,14 @@ Init_bootsnap(void)
|
|
156
176
|
|
157
177
|
instrumentation_method = rb_intern("_instrument");
|
158
178
|
|
179
|
+
sym_hit = ID2SYM(rb_intern("hit"));
|
159
180
|
sym_miss = ID2SYM(rb_intern("miss"));
|
160
|
-
rb_global_variable(&sym_miss);
|
161
|
-
|
162
181
|
sym_stale = ID2SYM(rb_intern("stale"));
|
163
|
-
|
182
|
+
sym_revalidated = ID2SYM(rb_intern("revalidated"));
|
164
183
|
|
165
184
|
rb_define_module_function(rb_mBootsnap, "instrumentation_enabled=", bs_instrumentation_enabled_set, 1);
|
166
|
-
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "
|
185
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "readonly=", bs_readonly_set, 1);
|
186
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "revalidation=", bs_revalidation_set, 1);
|
167
187
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "fetch", bs_rb_fetch, 4);
|
168
188
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "precompile", bs_rb_precompile, 3);
|
169
189
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "compile_option_crc32=", bs_compile_option_crc32_set, 1);
|
@@ -179,6 +199,28 @@ bs_instrumentation_enabled_set(VALUE self, VALUE enabled)
|
|
179
199
|
return enabled;
|
180
200
|
}
|
181
201
|
|
202
|
+
static inline void
|
203
|
+
bs_instrumentation(VALUE event, VALUE path)
|
204
|
+
{
|
205
|
+
if (RB_UNLIKELY(instrumentation_enabled)) {
|
206
|
+
rb_funcall(rb_mBootsnap, instrumentation_method, 2, event, path);
|
207
|
+
}
|
208
|
+
}
|
209
|
+
|
210
|
+
static VALUE
|
211
|
+
bs_readonly_set(VALUE self, VALUE enabled)
|
212
|
+
{
|
213
|
+
readonly = RTEST(enabled);
|
214
|
+
return enabled;
|
215
|
+
}
|
216
|
+
|
217
|
+
static VALUE
|
218
|
+
bs_revalidation_set(VALUE self, VALUE enabled)
|
219
|
+
{
|
220
|
+
revalidation = RTEST(enabled);
|
221
|
+
return enabled;
|
222
|
+
}
|
223
|
+
|
182
224
|
/*
|
183
225
|
* Bootsnap's ruby code registers a hook that notifies us via this function
|
184
226
|
* when compile_option changes. These changes invalidate all existing caches.
|
@@ -196,29 +238,6 @@ bs_compile_option_crc32_set(VALUE self, VALUE crc32_v)
|
|
196
238
|
return Qnil;
|
197
239
|
}
|
198
240
|
|
199
|
-
/*
|
200
|
-
* We use FNV1a-64 to derive cache paths. The choice is somewhat arbitrary but
|
201
|
-
* it has several nice properties:
|
202
|
-
*
|
203
|
-
* - Tiny implementation
|
204
|
-
* - No external dependency
|
205
|
-
* - Solid performance
|
206
|
-
* - Solid randomness
|
207
|
-
* - 32 bits doesn't feel collision-resistant enough; 64 is nice.
|
208
|
-
*/
|
209
|
-
static uint64_t
|
210
|
-
fnv1a_64_iter_cstr(uint64_t h, const char *str)
|
211
|
-
{
|
212
|
-
unsigned char *s = (unsigned char *)str;
|
213
|
-
|
214
|
-
while (*s) {
|
215
|
-
h ^= (uint64_t)*s++;
|
216
|
-
h += (h << 1) + (h << 4) + (h << 5) + (h << 7) + (h << 8) + (h << 40);
|
217
|
-
}
|
218
|
-
|
219
|
-
return h;
|
220
|
-
}
|
221
|
-
|
222
241
|
static uint64_t
|
223
242
|
fnv1a_64_iter(uint64_t h, const VALUE str)
|
224
243
|
{
|
@@ -263,10 +282,6 @@ get_ruby_revision(void)
|
|
263
282
|
/*
|
264
283
|
* When ruby's version doesn't change, but it's recompiled on a different OS
|
265
284
|
* (or OS version), we need to invalidate the cache.
|
266
|
-
*
|
267
|
-
* We actually factor in some extra information here, to be extra confident
|
268
|
-
* that we don't try to re-use caches that will not be compatible, by factoring
|
269
|
-
* in utsname.version.
|
270
285
|
*/
|
271
286
|
static uint32_t
|
272
287
|
get_ruby_platform(void)
|
@@ -276,22 +291,7 @@ get_ruby_platform(void)
|
|
276
291
|
|
277
292
|
ruby_platform = rb_const_get(rb_cObject, rb_intern("RUBY_PLATFORM"));
|
278
293
|
hash = fnv1a_64(ruby_platform);
|
279
|
-
|
280
|
-
#ifdef _WIN32
|
281
|
-
return (uint32_t)(hash >> 32) ^ (uint32_t)GetVersion();
|
282
|
-
#elif defined(__GLIBC__)
|
283
|
-
hash = fnv1a_64_iter_cstr(hash, gnu_get_libc_version());
|
284
|
-
return (uint32_t)(hash >> 32);
|
285
|
-
#else
|
286
|
-
struct utsname utsname;
|
287
|
-
|
288
|
-
/* Not worth crashing if this fails; lose extra cache invalidation potential */
|
289
|
-
if (uname(&utsname) >= 0) {
|
290
|
-
hash = fnv1a_64_iter_cstr(hash, utsname.version);
|
291
|
-
}
|
292
|
-
|
293
294
|
return (uint32_t)(hash >> 32);
|
294
|
-
#endif
|
295
295
|
}
|
296
296
|
|
297
297
|
/*
|
@@ -319,17 +319,59 @@ bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_C
|
|
319
319
|
* The data_size member is not compared, as it serves more of a "header"
|
320
320
|
* function.
|
321
321
|
*/
|
322
|
-
static
|
323
|
-
|
322
|
+
static enum cache_status cache_key_equal_fast_path(struct bs_cache_key *k1,
|
323
|
+
struct bs_cache_key *k2) {
|
324
|
+
if (k1->version == k2->version &&
|
325
|
+
k1->ruby_platform == k2->ruby_platform &&
|
326
|
+
k1->compile_option == k2->compile_option &&
|
327
|
+
k1->ruby_revision == k2->ruby_revision && k1->size == k2->size) {
|
328
|
+
if (k1->mtime == k2->mtime) {
|
329
|
+
return hit;
|
330
|
+
}
|
331
|
+
if (revalidation) {
|
332
|
+
return stale;
|
333
|
+
}
|
334
|
+
}
|
335
|
+
return miss;
|
336
|
+
}
|
337
|
+
|
338
|
+
static int cache_key_equal_slow_path(struct bs_cache_key *current_key,
|
339
|
+
struct bs_cache_key *cached_key,
|
340
|
+
const VALUE input_data)
|
341
|
+
{
|
342
|
+
bs_cache_key_digest(current_key, input_data);
|
343
|
+
return current_key->digest == cached_key->digest;
|
344
|
+
}
|
345
|
+
|
346
|
+
static int update_cache_key(struct bs_cache_key *current_key, struct bs_cache_key *old_key, int cache_fd, const char ** errno_provenance)
|
324
347
|
{
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
348
|
+
old_key->mtime = current_key->mtime;
|
349
|
+
lseek(cache_fd, 0, SEEK_SET);
|
350
|
+
ssize_t nwrite = write(cache_fd, old_key, KEY_SIZE);
|
351
|
+
if (nwrite < 0) {
|
352
|
+
*errno_provenance = "update_cache_key:write";
|
353
|
+
return -1;
|
354
|
+
}
|
355
|
+
|
356
|
+
#ifdef HAVE_FDATASYNC
|
357
|
+
if (fdatasync(cache_fd) < 0) {
|
358
|
+
*errno_provenance = "update_cache_key:fdatasync";
|
359
|
+
return -1;
|
360
|
+
}
|
361
|
+
#endif
|
362
|
+
|
363
|
+
return 0;
|
364
|
+
}
|
365
|
+
|
366
|
+
/*
|
367
|
+
* Fills the cache key digest.
|
368
|
+
*/
|
369
|
+
static void bs_cache_key_digest(struct bs_cache_key *key,
|
370
|
+
const VALUE input_data) {
|
371
|
+
if (key->digest_set)
|
372
|
+
return;
|
373
|
+
key->digest = fnv1a_64(input_data);
|
374
|
+
key->digest_set = 1;
|
333
375
|
}
|
334
376
|
|
335
377
|
/*
|
@@ -385,17 +427,34 @@ bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler)
|
|
385
427
|
|
386
428
|
return bs_precompile(path, path_v, cache_path, handler);
|
387
429
|
}
|
430
|
+
|
431
|
+
static int bs_open_noatime(const char *path, int flags) {
|
432
|
+
int fd = 1;
|
433
|
+
if (!perm_issue) {
|
434
|
+
fd = open(path, flags | O_NOATIME);
|
435
|
+
if (fd < 0 && errno == EPERM) {
|
436
|
+
errno = 0;
|
437
|
+
perm_issue = true;
|
438
|
+
}
|
439
|
+
}
|
440
|
+
|
441
|
+
if (perm_issue) {
|
442
|
+
fd = open(path, flags);
|
443
|
+
}
|
444
|
+
return fd;
|
445
|
+
}
|
446
|
+
|
388
447
|
/*
|
389
448
|
* Open the file we want to load/cache and generate a cache key for it if it
|
390
449
|
* was loaded.
|
391
450
|
*/
|
392
451
|
static int
|
393
|
-
open_current_file(char * path, struct bs_cache_key * key, const char ** errno_provenance)
|
452
|
+
open_current_file(const char * path, struct bs_cache_key * key, const char ** errno_provenance)
|
394
453
|
{
|
395
454
|
struct stat statbuf;
|
396
455
|
int fd;
|
397
456
|
|
398
|
-
fd =
|
457
|
+
fd = bs_open_noatime(path, O_RDONLY);
|
399
458
|
if (fd < 0) {
|
400
459
|
*errno_provenance = "bs_fetch:open_current_file:open";
|
401
460
|
return fd;
|
@@ -406,7 +465,9 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
406
465
|
|
407
466
|
if (fstat(fd, &statbuf) < 0) {
|
408
467
|
*errno_provenance = "bs_fetch:open_current_file:fstat";
|
468
|
+
int previous_errno = errno;
|
409
469
|
close(fd);
|
470
|
+
errno = previous_errno;
|
410
471
|
return -1;
|
411
472
|
}
|
412
473
|
|
@@ -416,6 +477,7 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
416
477
|
key->ruby_revision = current_ruby_revision;
|
417
478
|
key->size = (uint64_t)statbuf.st_size;
|
418
479
|
key->mtime = (uint64_t)statbuf.st_mtime;
|
480
|
+
key->digest_set = false;
|
419
481
|
|
420
482
|
return fd;
|
421
483
|
}
|
@@ -459,7 +521,12 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
459
521
|
{
|
460
522
|
int fd, res;
|
461
523
|
|
462
|
-
|
524
|
+
if (readonly || !revalidation) {
|
525
|
+
fd = bs_open_noatime(path, O_RDONLY);
|
526
|
+
} else {
|
527
|
+
fd = bs_open_noatime(path, O_RDWR);
|
528
|
+
}
|
529
|
+
|
463
530
|
if (fd < 0) {
|
464
531
|
*errno_provenance = "bs_fetch:open_cache_file:open";
|
465
532
|
return CACHE_MISS;
|
@@ -496,7 +563,6 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
496
563
|
static int
|
497
564
|
fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance)
|
498
565
|
{
|
499
|
-
char * data = NULL;
|
500
566
|
ssize_t nread;
|
501
567
|
int ret;
|
502
568
|
|
@@ -508,8 +574,8 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
508
574
|
ret = ERROR_WITH_ERRNO;
|
509
575
|
goto done;
|
510
576
|
}
|
511
|
-
|
512
|
-
nread = read(fd,
|
577
|
+
storage_data = rb_str_buf_new(data_size);
|
578
|
+
nread = read(fd, RSTRING_PTR(storage_data), data_size);
|
513
579
|
if (nread < 0) {
|
514
580
|
*errno_provenance = "bs_fetch:fetch_cached_data:read";
|
515
581
|
ret = ERROR_WITH_ERRNO;
|
@@ -520,7 +586,7 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
520
586
|
goto done;
|
521
587
|
}
|
522
588
|
|
523
|
-
storage_data
|
589
|
+
rb_str_set_len(storage_data, nread);
|
524
590
|
|
525
591
|
*exception_tag = bs_storage_to_output(handler, args, storage_data, output_data);
|
526
592
|
if (*output_data == rb_cBootsnap_CompileCache_UNCOMPILABLE) {
|
@@ -529,7 +595,6 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
529
595
|
}
|
530
596
|
ret = 0;
|
531
597
|
done:
|
532
|
-
if (data != NULL) xfree(data);
|
533
598
|
return ret;
|
534
599
|
}
|
535
600
|
|
@@ -636,17 +701,22 @@ atomic_write_cache_file(char * path, struct bs_cache_key * key, VALUE data, cons
|
|
636
701
|
|
637
702
|
|
638
703
|
/* Read contents from an fd, whose contents are asserted to be +size+ bytes
|
639
|
-
* long,
|
640
|
-
static
|
641
|
-
bs_read_contents(int fd, size_t size,
|
704
|
+
* long, returning a Ruby string on success and Qfalse on failure */
|
705
|
+
static VALUE
|
706
|
+
bs_read_contents(int fd, size_t size, const char ** errno_provenance)
|
642
707
|
{
|
708
|
+
VALUE contents;
|
643
709
|
ssize_t nread;
|
644
|
-
|
645
|
-
nread = read(fd,
|
710
|
+
contents = rb_str_buf_new(size);
|
711
|
+
nread = read(fd, RSTRING_PTR(contents), size);
|
712
|
+
|
646
713
|
if (nread < 0) {
|
647
714
|
*errno_provenance = "bs_fetch:bs_read_contents:read";
|
715
|
+
return Qfalse;
|
716
|
+
} else {
|
717
|
+
rb_str_set_len(contents, nread);
|
718
|
+
return contents;
|
648
719
|
}
|
649
|
-
return nread;
|
650
720
|
}
|
651
721
|
|
652
722
|
/*
|
@@ -697,38 +767,67 @@ static VALUE
|
|
697
767
|
bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args)
|
698
768
|
{
|
699
769
|
struct bs_cache_key cached_key, current_key;
|
700
|
-
char * contents = NULL;
|
701
770
|
int cache_fd = -1, current_fd = -1;
|
702
771
|
int res, valid_cache = 0, exception_tag = 0;
|
703
772
|
const char * errno_provenance = NULL;
|
704
773
|
|
705
|
-
VALUE
|
774
|
+
VALUE status = Qfalse;
|
775
|
+
VALUE input_data = Qfalse; /* data read from source file, e.g. YAML or ruby source */
|
706
776
|
VALUE storage_data; /* compiled data, e.g. msgpack / binary iseq */
|
707
777
|
VALUE output_data; /* return data, e.g. ruby hash or loaded iseq */
|
708
778
|
|
709
779
|
VALUE exception; /* ruby exception object to raise instead of returning */
|
780
|
+
VALUE exception_message; /* ruby exception string to use instead of errno_provenance */
|
710
781
|
|
711
782
|
/* Open the source file and generate a cache key for it */
|
712
783
|
current_fd = open_current_file(path, ¤t_key, &errno_provenance);
|
713
|
-
if (current_fd < 0)
|
784
|
+
if (current_fd < 0) {
|
785
|
+
exception_message = path_v;
|
786
|
+
goto fail_errno;
|
787
|
+
}
|
714
788
|
|
715
789
|
/* Open the cache key if it exists, and read its cache key in */
|
716
790
|
cache_fd = open_cache_file(cache_path, &cached_key, &errno_provenance);
|
717
791
|
if (cache_fd == CACHE_MISS || cache_fd == CACHE_STALE) {
|
718
792
|
/* This is ok: valid_cache remains false, we re-populate it. */
|
719
|
-
|
720
|
-
rb_funcall(rb_mBootsnap, instrumentation_method, 2, cache_fd == CACHE_MISS ? sym_miss : sym_stale, path_v);
|
721
|
-
}
|
793
|
+
bs_instrumentation(cache_fd == CACHE_MISS ? sym_miss : sym_stale, path_v);
|
722
794
|
} else if (cache_fd < 0) {
|
795
|
+
exception_message = rb_str_new_cstr(cache_path);
|
723
796
|
goto fail_errno;
|
724
797
|
} else {
|
725
798
|
/* True if the cache existed and no invalidating changes have occurred since
|
726
799
|
* it was generated. */
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
800
|
+
|
801
|
+
switch(cache_key_equal_fast_path(¤t_key, &cached_key)) {
|
802
|
+
case hit:
|
803
|
+
status = sym_hit;
|
804
|
+
valid_cache = true;
|
805
|
+
break;
|
806
|
+
case miss:
|
807
|
+
valid_cache = false;
|
808
|
+
break;
|
809
|
+
case stale:
|
810
|
+
valid_cache = false;
|
811
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size,
|
812
|
+
&errno_provenance)) == Qfalse) {
|
813
|
+
exception_message = path_v;
|
814
|
+
goto fail_errno;
|
815
|
+
}
|
816
|
+
valid_cache = cache_key_equal_slow_path(¤t_key, &cached_key, input_data);
|
817
|
+
if (valid_cache) {
|
818
|
+
if (!readonly) {
|
819
|
+
if (update_cache_key(¤t_key, &cached_key, cache_fd, &errno_provenance)) {
|
820
|
+
exception_message = path_v;
|
821
|
+
goto fail_errno;
|
822
|
+
}
|
823
|
+
}
|
824
|
+
status = sym_revalidated;
|
731
825
|
}
|
826
|
+
break;
|
827
|
+
};
|
828
|
+
|
829
|
+
if (!valid_cache) {
|
830
|
+
status = sym_stale;
|
732
831
|
}
|
733
832
|
}
|
734
833
|
|
@@ -742,13 +841,18 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
742
841
|
else if (res == CACHE_UNCOMPILABLE) {
|
743
842
|
/* If fetch_cached_data returned `Uncompilable` we fallback to `input_to_output`
|
744
843
|
This happens if we have say, an unsafe YAML cache, but try to load it in safe mode */
|
745
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
746
|
-
|
844
|
+
if (input_data == Qfalse && (input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
845
|
+
exception_message = path_v;
|
846
|
+
goto fail_errno;
|
847
|
+
}
|
747
848
|
bs_input_to_output(handler, args, input_data, &output_data, &exception_tag);
|
748
849
|
if (exception_tag != 0) goto raise;
|
749
850
|
goto succeed;
|
750
851
|
} else if (res == CACHE_MISS || res == CACHE_STALE) valid_cache = 0;
|
751
|
-
else if (res == ERROR_WITH_ERRNO)
|
852
|
+
else if (res == ERROR_WITH_ERRNO){
|
853
|
+
exception_message = rb_str_new_cstr(cache_path);
|
854
|
+
goto fail_errno;
|
855
|
+
}
|
752
856
|
else if (!NIL_P(output_data)) goto succeed; /* fast-path, goal */
|
753
857
|
}
|
754
858
|
close(cache_fd);
|
@@ -756,8 +860,10 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
756
860
|
/* Cache is stale, invalid, or missing. Regenerate and write it out. */
|
757
861
|
|
758
862
|
/* Read the contents of the source file into a buffer */
|
759
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
760
|
-
|
863
|
+
if (input_data == Qfalse && (input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
864
|
+
exception_message = path_v;
|
865
|
+
goto fail_errno;
|
866
|
+
}
|
761
867
|
|
762
868
|
/* Try to compile the input_data using input_to_storage(input_data) */
|
763
869
|
exception_tag = bs_input_to_storage(handler, args, input_data, path_v, &storage_data);
|
@@ -776,6 +882,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
776
882
|
* We do however ignore any failures to persist the cache, as it's better
|
777
883
|
* to move along, than to interrupt the process.
|
778
884
|
*/
|
885
|
+
bs_cache_key_digest(¤t_key, input_data);
|
779
886
|
atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
780
887
|
|
781
888
|
/* Having written the cache, now convert storage_data to output_data */
|
@@ -794,6 +901,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
794
901
|
* No point raising an error */
|
795
902
|
if (errno != ENOENT) {
|
796
903
|
errno_provenance = "bs_fetch:unlink";
|
904
|
+
exception_message = rb_str_new_cstr(cache_path);
|
797
905
|
goto fail_errno;
|
798
906
|
}
|
799
907
|
}
|
@@ -804,16 +912,22 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
804
912
|
goto succeed; /* output_data is now the correct return. */
|
805
913
|
|
806
914
|
#define CLEANUP \
|
807
|
-
if (contents != NULL) xfree(contents); \
|
808
915
|
if (current_fd >= 0) close(current_fd); \
|
809
|
-
if (cache_fd >= 0) close(cache_fd);
|
916
|
+
if (cache_fd >= 0) close(cache_fd); \
|
917
|
+
if (status != Qfalse) bs_instrumentation(status, path_v);
|
810
918
|
|
811
919
|
succeed:
|
812
920
|
CLEANUP;
|
813
921
|
return output_data;
|
814
922
|
fail_errno:
|
815
923
|
CLEANUP;
|
816
|
-
|
924
|
+
if (errno_provenance) {
|
925
|
+
exception_message = rb_str_concat(
|
926
|
+
rb_str_new_cstr(errno_provenance),
|
927
|
+
rb_str_concat(rb_str_new_cstr(": "), exception_message)
|
928
|
+
);
|
929
|
+
}
|
930
|
+
exception = rb_syserr_new_str(errno, exception_message);
|
817
931
|
rb_exc_raise(exception);
|
818
932
|
__builtin_unreachable();
|
819
933
|
raise:
|
@@ -831,13 +945,16 @@ invalid_type_storage_data:
|
|
831
945
|
static VALUE
|
832
946
|
bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
833
947
|
{
|
948
|
+
if (readonly) {
|
949
|
+
return Qfalse;
|
950
|
+
}
|
951
|
+
|
834
952
|
struct bs_cache_key cached_key, current_key;
|
835
|
-
char * contents = NULL;
|
836
953
|
int cache_fd = -1, current_fd = -1;
|
837
954
|
int res, valid_cache = 0, exception_tag = 0;
|
838
955
|
const char * errno_provenance = NULL;
|
839
956
|
|
840
|
-
VALUE input_data; /* data read from source file, e.g. YAML or ruby source */
|
957
|
+
VALUE input_data = Qfalse; /* data read from source file, e.g. YAML or ruby source */
|
841
958
|
VALUE storage_data; /* compiled data, e.g. msgpack / binary iseq */
|
842
959
|
|
843
960
|
/* Open the source file and generate a cache key for it */
|
@@ -853,7 +970,26 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
853
970
|
} else {
|
854
971
|
/* True if the cache existed and no invalidating changes have occurred since
|
855
972
|
* it was generated. */
|
856
|
-
|
973
|
+
switch(cache_key_equal_fast_path(¤t_key, &cached_key)) {
|
974
|
+
case hit:
|
975
|
+
valid_cache = true;
|
976
|
+
break;
|
977
|
+
case miss:
|
978
|
+
valid_cache = false;
|
979
|
+
break;
|
980
|
+
case stale:
|
981
|
+
valid_cache = false;
|
982
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
983
|
+
goto fail;
|
984
|
+
}
|
985
|
+
valid_cache = cache_key_equal_slow_path(¤t_key, &cached_key, input_data);
|
986
|
+
if (valid_cache) {
|
987
|
+
if (update_cache_key(¤t_key, &cached_key, cache_fd, &errno_provenance)) {
|
988
|
+
goto fail;
|
989
|
+
}
|
990
|
+
}
|
991
|
+
break;
|
992
|
+
};
|
857
993
|
}
|
858
994
|
|
859
995
|
if (valid_cache) {
|
@@ -865,8 +1001,7 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
865
1001
|
/* Cache is stale, invalid, or missing. Regenerate and write it out. */
|
866
1002
|
|
867
1003
|
/* Read the contents of the source file into a buffer */
|
868
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
869
|
-
input_data = rb_str_new(contents, current_key.size);
|
1004
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) goto fail;
|
870
1005
|
|
871
1006
|
/* Try to compile the input_data using input_to_storage(input_data) */
|
872
1007
|
exception_tag = bs_input_to_storage(handler, Qnil, input_data, path_v, &storage_data);
|
@@ -881,13 +1016,13 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
881
1016
|
if (!RB_TYPE_P(storage_data, T_STRING)) goto fail;
|
882
1017
|
|
883
1018
|
/* Write the cache key and storage_data to the cache directory */
|
1019
|
+
bs_cache_key_digest(¤t_key, input_data);
|
884
1020
|
res = atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
885
1021
|
if (res < 0) goto fail;
|
886
1022
|
|
887
1023
|
goto succeed;
|
888
1024
|
|
889
1025
|
#define CLEANUP \
|
890
|
-
if (contents != NULL) xfree(contents); \
|
891
1026
|
if (current_fd >= 0) close(current_fd); \
|
892
1027
|
if (cache_fd >= 0) close(cache_fd);
|
893
1028
|
|
@@ -984,12 +1119,17 @@ try_input_to_storage(VALUE arg)
|
|
984
1119
|
static int
|
985
1120
|
bs_input_to_storage(VALUE handler, VALUE args, VALUE input_data, VALUE pathval, VALUE * storage_data)
|
986
1121
|
{
|
987
|
-
|
988
|
-
|
989
|
-
|
990
|
-
|
991
|
-
|
992
|
-
|
993
|
-
|
994
|
-
|
1122
|
+
if (readonly) {
|
1123
|
+
*storage_data = rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
1124
|
+
return 0;
|
1125
|
+
} else {
|
1126
|
+
int state;
|
1127
|
+
struct i2s_data i2s_data = {
|
1128
|
+
.handler = handler,
|
1129
|
+
.input_data = input_data,
|
1130
|
+
.pathval = pathval,
|
1131
|
+
};
|
1132
|
+
*storage_data = rb_protect(try_input_to_storage, (VALUE)&i2s_data, &state);
|
1133
|
+
return state;
|
1134
|
+
}
|
995
1135
|
}
|