bootsnap 1.11.1 → 1.18.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +76 -0
- data/README.md +19 -10
- data/ext/bootsnap/bootsnap.c +253 -112
- data/ext/bootsnap/extconf.rb +20 -13
- data/lib/bootsnap/bundler.rb +1 -1
- data/lib/bootsnap/cli.rb +18 -16
- data/lib/bootsnap/compile_cache/iseq.rb +14 -8
- data/lib/bootsnap/compile_cache/json.rb +18 -17
- data/lib/bootsnap/compile_cache/yaml.rb +46 -60
- data/lib/bootsnap/compile_cache.rb +11 -15
- data/lib/bootsnap/load_path_cache/cache.rb +20 -21
- data/lib/bootsnap/load_path_cache/change_observer.rb +19 -2
- data/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb +7 -35
- data/lib/bootsnap/load_path_cache/loaded_features_index.rb +2 -2
- data/lib/bootsnap/load_path_cache/path.rb +16 -18
- data/lib/bootsnap/load_path_cache/path_scanner.rb +7 -1
- data/lib/bootsnap/load_path_cache/store.rb +11 -14
- data/lib/bootsnap/load_path_cache.rb +36 -13
- data/lib/bootsnap/setup.rb +1 -1
- data/lib/bootsnap/version.rb +1 -1
- data/lib/bootsnap.rb +36 -32
- metadata +4 -4
data/ext/bootsnap/bootsnap.c
CHANGED
@@ -18,12 +18,17 @@
|
|
18
18
|
#include <sys/types.h>
|
19
19
|
#include <errno.h>
|
20
20
|
#include <fcntl.h>
|
21
|
+
#include <unistd.h>
|
21
22
|
#include <sys/stat.h>
|
22
|
-
|
23
|
-
#
|
23
|
+
|
24
|
+
#ifdef __APPLE__
|
25
|
+
// The symbol is present, however not in the headers
|
26
|
+
// See: https://github.com/Shopify/bootsnap/issues/470
|
27
|
+
extern int fdatasync(int);
|
24
28
|
#endif
|
25
|
-
|
26
|
-
#
|
29
|
+
|
30
|
+
#ifndef O_NOATIME
|
31
|
+
#define O_NOATIME 0
|
27
32
|
#endif
|
28
33
|
|
29
34
|
/* 1000 is an arbitrary limit; FNV64 plus some slashes brings the cap down to
|
@@ -36,7 +41,7 @@
|
|
36
41
|
#define MAX_CREATE_TEMPFILE_ATTEMPT 3
|
37
42
|
|
38
43
|
#ifndef RB_UNLIKELY
|
39
|
-
|
44
|
+
#define RB_UNLIKELY(x) (x)
|
40
45
|
#endif
|
41
46
|
|
42
47
|
/*
|
@@ -60,8 +65,10 @@ struct bs_cache_key {
|
|
60
65
|
uint32_t ruby_revision;
|
61
66
|
uint64_t size;
|
62
67
|
uint64_t mtime;
|
63
|
-
uint64_t data_size;
|
64
|
-
|
68
|
+
uint64_t data_size; //
|
69
|
+
uint64_t digest;
|
70
|
+
uint8_t digest_set;
|
71
|
+
uint8_t pad[15];
|
65
72
|
} __attribute__((packed));
|
66
73
|
|
67
74
|
/*
|
@@ -75,7 +82,7 @@ struct bs_cache_key {
|
|
75
82
|
STATIC_ASSERT(sizeof(struct bs_cache_key) == KEY_SIZE);
|
76
83
|
|
77
84
|
/* Effectively a schema version. Bumping invalidates all previous caches */
|
78
|
-
static const uint32_t current_version =
|
85
|
+
static const uint32_t current_version = 5;
|
79
86
|
|
80
87
|
/* hash of e.g. "x86_64-darwin17", invalidating when ruby is recompiled on a
|
81
88
|
* new OS ABI, etc. */
|
@@ -93,23 +100,36 @@ static VALUE rb_mBootsnap_CompileCache;
|
|
93
100
|
static VALUE rb_mBootsnap_CompileCache_Native;
|
94
101
|
static VALUE rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
95
102
|
static ID instrumentation_method;
|
96
|
-
static VALUE sym_miss;
|
97
|
-
static VALUE sym_stale;
|
103
|
+
static VALUE sym_hit, sym_miss, sym_stale, sym_revalidated;
|
98
104
|
static bool instrumentation_enabled = false;
|
105
|
+
static bool readonly = false;
|
106
|
+
static bool revalidation = false;
|
107
|
+
static bool perm_issue = false;
|
99
108
|
|
100
109
|
/* Functions exposed as module functions on Bootsnap::CompileCache::Native */
|
101
110
|
static VALUE bs_instrumentation_enabled_set(VALUE self, VALUE enabled);
|
111
|
+
static VALUE bs_readonly_set(VALUE self, VALUE enabled);
|
112
|
+
static VALUE bs_revalidation_set(VALUE self, VALUE enabled);
|
102
113
|
static VALUE bs_compile_option_crc32_set(VALUE self, VALUE crc32_v);
|
103
114
|
static VALUE bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler, VALUE args);
|
104
115
|
static VALUE bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler);
|
105
116
|
|
106
117
|
/* Helpers */
|
118
|
+
enum cache_status {
|
119
|
+
miss,
|
120
|
+
hit,
|
121
|
+
stale,
|
122
|
+
};
|
107
123
|
static void bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_CACHEPATH_SIZE]);
|
108
124
|
static int bs_read_key(int fd, struct bs_cache_key * key);
|
109
|
-
static
|
125
|
+
static enum cache_status cache_key_equal_fast_path(struct bs_cache_key * k1, struct bs_cache_key * k2);
|
126
|
+
static int cache_key_equal_slow_path(struct bs_cache_key * current_key, struct bs_cache_key * cached_key, const VALUE input_data);
|
127
|
+
static int update_cache_key(struct bs_cache_key *current_key, struct bs_cache_key *old_key, int cache_fd, const char ** errno_provenance);
|
128
|
+
|
129
|
+
static void bs_cache_key_digest(struct bs_cache_key * key, const VALUE input_data);
|
110
130
|
static VALUE bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args);
|
111
131
|
static VALUE bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler);
|
112
|
-
static int open_current_file(char * path, struct bs_cache_key * key, const char ** errno_provenance);
|
132
|
+
static int open_current_file(const char * path, struct bs_cache_key * key, const char ** errno_provenance);
|
113
133
|
static int fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance);
|
114
134
|
static uint32_t get_ruby_revision(void);
|
115
135
|
static uint32_t get_ruby_platform(void);
|
@@ -165,13 +185,14 @@ Init_bootsnap(void)
|
|
165
185
|
|
166
186
|
instrumentation_method = rb_intern("_instrument");
|
167
187
|
|
188
|
+
sym_hit = ID2SYM(rb_intern("hit"));
|
168
189
|
sym_miss = ID2SYM(rb_intern("miss"));
|
169
|
-
rb_global_variable(&sym_miss);
|
170
|
-
|
171
190
|
sym_stale = ID2SYM(rb_intern("stale"));
|
172
|
-
|
191
|
+
sym_revalidated = ID2SYM(rb_intern("revalidated"));
|
173
192
|
|
174
193
|
rb_define_module_function(rb_mBootsnap, "instrumentation_enabled=", bs_instrumentation_enabled_set, 1);
|
194
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "readonly=", bs_readonly_set, 1);
|
195
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "revalidation=", bs_revalidation_set, 1);
|
175
196
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "coverage_running?", bs_rb_coverage_running, 0);
|
176
197
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "fetch", bs_rb_fetch, 4);
|
177
198
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "precompile", bs_rb_precompile, 3);
|
@@ -188,6 +209,28 @@ bs_instrumentation_enabled_set(VALUE self, VALUE enabled)
|
|
188
209
|
return enabled;
|
189
210
|
}
|
190
211
|
|
212
|
+
static inline void
|
213
|
+
bs_instrumentation(VALUE event, VALUE path)
|
214
|
+
{
|
215
|
+
if (RB_UNLIKELY(instrumentation_enabled)) {
|
216
|
+
rb_funcall(rb_mBootsnap, instrumentation_method, 2, event, path);
|
217
|
+
}
|
218
|
+
}
|
219
|
+
|
220
|
+
static VALUE
|
221
|
+
bs_readonly_set(VALUE self, VALUE enabled)
|
222
|
+
{
|
223
|
+
readonly = RTEST(enabled);
|
224
|
+
return enabled;
|
225
|
+
}
|
226
|
+
|
227
|
+
static VALUE
|
228
|
+
bs_revalidation_set(VALUE self, VALUE enabled)
|
229
|
+
{
|
230
|
+
revalidation = RTEST(enabled);
|
231
|
+
return enabled;
|
232
|
+
}
|
233
|
+
|
191
234
|
/*
|
192
235
|
* Bootsnap's ruby code registers a hook that notifies us via this function
|
193
236
|
* when compile_option changes. These changes invalidate all existing caches.
|
@@ -205,29 +248,6 @@ bs_compile_option_crc32_set(VALUE self, VALUE crc32_v)
|
|
205
248
|
return Qnil;
|
206
249
|
}
|
207
250
|
|
208
|
-
/*
|
209
|
-
* We use FNV1a-64 to derive cache paths. The choice is somewhat arbitrary but
|
210
|
-
* it has several nice properties:
|
211
|
-
*
|
212
|
-
* - Tiny implementation
|
213
|
-
* - No external dependency
|
214
|
-
* - Solid performance
|
215
|
-
* - Solid randomness
|
216
|
-
* - 32 bits doesn't feel collision-resistant enough; 64 is nice.
|
217
|
-
*/
|
218
|
-
static uint64_t
|
219
|
-
fnv1a_64_iter_cstr(uint64_t h, const char *str)
|
220
|
-
{
|
221
|
-
unsigned char *s = (unsigned char *)str;
|
222
|
-
|
223
|
-
while (*s) {
|
224
|
-
h ^= (uint64_t)*s++;
|
225
|
-
h += (h << 1) + (h << 4) + (h << 5) + (h << 7) + (h << 8) + (h << 40);
|
226
|
-
}
|
227
|
-
|
228
|
-
return h;
|
229
|
-
}
|
230
|
-
|
231
251
|
static uint64_t
|
232
252
|
fnv1a_64_iter(uint64_t h, const VALUE str)
|
233
253
|
{
|
@@ -272,10 +292,6 @@ get_ruby_revision(void)
|
|
272
292
|
/*
|
273
293
|
* When ruby's version doesn't change, but it's recompiled on a different OS
|
274
294
|
* (or OS version), we need to invalidate the cache.
|
275
|
-
*
|
276
|
-
* We actually factor in some extra information here, to be extra confident
|
277
|
-
* that we don't try to re-use caches that will not be compatible, by factoring
|
278
|
-
* in utsname.version.
|
279
295
|
*/
|
280
296
|
static uint32_t
|
281
297
|
get_ruby_platform(void)
|
@@ -285,22 +301,7 @@ get_ruby_platform(void)
|
|
285
301
|
|
286
302
|
ruby_platform = rb_const_get(rb_cObject, rb_intern("RUBY_PLATFORM"));
|
287
303
|
hash = fnv1a_64(ruby_platform);
|
288
|
-
|
289
|
-
#ifdef _WIN32
|
290
|
-
return (uint32_t)(hash >> 32) ^ (uint32_t)GetVersion();
|
291
|
-
#elif defined(__GLIBC__)
|
292
|
-
hash = fnv1a_64_iter_cstr(hash, gnu_get_libc_version());
|
293
|
-
return (uint32_t)(hash >> 32);
|
294
|
-
#else
|
295
|
-
struct utsname utsname;
|
296
|
-
|
297
|
-
/* Not worth crashing if this fails; lose extra cache invalidation potential */
|
298
|
-
if (uname(&utsname) >= 0) {
|
299
|
-
hash = fnv1a_64_iter_cstr(hash, utsname.version);
|
300
|
-
}
|
301
|
-
|
302
304
|
return (uint32_t)(hash >> 32);
|
303
|
-
#endif
|
304
305
|
}
|
305
306
|
|
306
307
|
/*
|
@@ -328,17 +329,59 @@ bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_C
|
|
328
329
|
* The data_size member is not compared, as it serves more of a "header"
|
329
330
|
* function.
|
330
331
|
*/
|
331
|
-
static
|
332
|
-
|
332
|
+
static enum cache_status cache_key_equal_fast_path(struct bs_cache_key *k1,
|
333
|
+
struct bs_cache_key *k2) {
|
334
|
+
if (k1->version == k2->version &&
|
335
|
+
k1->ruby_platform == k2->ruby_platform &&
|
336
|
+
k1->compile_option == k2->compile_option &&
|
337
|
+
k1->ruby_revision == k2->ruby_revision && k1->size == k2->size) {
|
338
|
+
if (k1->mtime == k2->mtime) {
|
339
|
+
return hit;
|
340
|
+
}
|
341
|
+
if (revalidation) {
|
342
|
+
return stale;
|
343
|
+
}
|
344
|
+
}
|
345
|
+
return miss;
|
346
|
+
}
|
347
|
+
|
348
|
+
static int cache_key_equal_slow_path(struct bs_cache_key *current_key,
|
349
|
+
struct bs_cache_key *cached_key,
|
350
|
+
const VALUE input_data)
|
333
351
|
{
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
);
|
352
|
+
bs_cache_key_digest(current_key, input_data);
|
353
|
+
return current_key->digest == cached_key->digest;
|
354
|
+
}
|
355
|
+
|
356
|
+
static int update_cache_key(struct bs_cache_key *current_key, struct bs_cache_key *old_key, int cache_fd, const char ** errno_provenance)
|
357
|
+
{
|
358
|
+
old_key->mtime = current_key->mtime;
|
359
|
+
lseek(cache_fd, 0, SEEK_SET);
|
360
|
+
ssize_t nwrite = write(cache_fd, old_key, KEY_SIZE);
|
361
|
+
if (nwrite < 0) {
|
362
|
+
*errno_provenance = "update_cache_key:write";
|
363
|
+
return -1;
|
364
|
+
}
|
365
|
+
|
366
|
+
#ifdef HAVE_FDATASYNC
|
367
|
+
if (fdatasync(cache_fd) < 0) {
|
368
|
+
*errno_provenance = "update_cache_key:fdatasync";
|
369
|
+
return -1;
|
370
|
+
}
|
371
|
+
#endif
|
372
|
+
|
373
|
+
return 0;
|
374
|
+
}
|
375
|
+
|
376
|
+
/*
|
377
|
+
* Fills the cache key digest.
|
378
|
+
*/
|
379
|
+
static void bs_cache_key_digest(struct bs_cache_key *key,
|
380
|
+
const VALUE input_data) {
|
381
|
+
if (key->digest_set)
|
382
|
+
return;
|
383
|
+
key->digest = fnv1a_64(input_data);
|
384
|
+
key->digest_set = 1;
|
342
385
|
}
|
343
386
|
|
344
387
|
/*
|
@@ -394,17 +437,34 @@ bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler)
|
|
394
437
|
|
395
438
|
return bs_precompile(path, path_v, cache_path, handler);
|
396
439
|
}
|
440
|
+
|
441
|
+
static int bs_open_noatime(const char *path, int flags) {
|
442
|
+
int fd = 1;
|
443
|
+
if (!perm_issue) {
|
444
|
+
fd = open(path, flags | O_NOATIME);
|
445
|
+
if (fd < 0 && errno == EPERM) {
|
446
|
+
errno = 0;
|
447
|
+
perm_issue = true;
|
448
|
+
}
|
449
|
+
}
|
450
|
+
|
451
|
+
if (perm_issue) {
|
452
|
+
fd = open(path, flags);
|
453
|
+
}
|
454
|
+
return fd;
|
455
|
+
}
|
456
|
+
|
397
457
|
/*
|
398
458
|
* Open the file we want to load/cache and generate a cache key for it if it
|
399
459
|
* was loaded.
|
400
460
|
*/
|
401
461
|
static int
|
402
|
-
open_current_file(char * path, struct bs_cache_key * key, const char ** errno_provenance)
|
462
|
+
open_current_file(const char * path, struct bs_cache_key * key, const char ** errno_provenance)
|
403
463
|
{
|
404
464
|
struct stat statbuf;
|
405
465
|
int fd;
|
406
466
|
|
407
|
-
fd =
|
467
|
+
fd = bs_open_noatime(path, O_RDONLY);
|
408
468
|
if (fd < 0) {
|
409
469
|
*errno_provenance = "bs_fetch:open_current_file:open";
|
410
470
|
return fd;
|
@@ -415,7 +475,9 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
415
475
|
|
416
476
|
if (fstat(fd, &statbuf) < 0) {
|
417
477
|
*errno_provenance = "bs_fetch:open_current_file:fstat";
|
478
|
+
int previous_errno = errno;
|
418
479
|
close(fd);
|
480
|
+
errno = previous_errno;
|
419
481
|
return -1;
|
420
482
|
}
|
421
483
|
|
@@ -425,6 +487,7 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
425
487
|
key->ruby_revision = current_ruby_revision;
|
426
488
|
key->size = (uint64_t)statbuf.st_size;
|
427
489
|
key->mtime = (uint64_t)statbuf.st_mtime;
|
490
|
+
key->digest_set = false;
|
428
491
|
|
429
492
|
return fd;
|
430
493
|
}
|
@@ -468,7 +531,12 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
468
531
|
{
|
469
532
|
int fd, res;
|
470
533
|
|
471
|
-
|
534
|
+
if (readonly || !revalidation) {
|
535
|
+
fd = bs_open_noatime(path, O_RDONLY);
|
536
|
+
} else {
|
537
|
+
fd = bs_open_noatime(path, O_RDWR);
|
538
|
+
}
|
539
|
+
|
472
540
|
if (fd < 0) {
|
473
541
|
*errno_provenance = "bs_fetch:open_cache_file:open";
|
474
542
|
return CACHE_MISS;
|
@@ -505,7 +573,6 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
505
573
|
static int
|
506
574
|
fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance)
|
507
575
|
{
|
508
|
-
char * data = NULL;
|
509
576
|
ssize_t nread;
|
510
577
|
int ret;
|
511
578
|
|
@@ -517,8 +584,8 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
517
584
|
ret = ERROR_WITH_ERRNO;
|
518
585
|
goto done;
|
519
586
|
}
|
520
|
-
|
521
|
-
nread = read(fd,
|
587
|
+
storage_data = rb_str_buf_new(data_size);
|
588
|
+
nread = read(fd, RSTRING_PTR(storage_data), data_size);
|
522
589
|
if (nread < 0) {
|
523
590
|
*errno_provenance = "bs_fetch:fetch_cached_data:read";
|
524
591
|
ret = ERROR_WITH_ERRNO;
|
@@ -529,7 +596,7 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
529
596
|
goto done;
|
530
597
|
}
|
531
598
|
|
532
|
-
storage_data
|
599
|
+
rb_str_set_len(storage_data, nread);
|
533
600
|
|
534
601
|
*exception_tag = bs_storage_to_output(handler, args, storage_data, output_data);
|
535
602
|
if (*output_data == rb_cBootsnap_CompileCache_UNCOMPILABLE) {
|
@@ -538,7 +605,6 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
538
605
|
}
|
539
606
|
ret = 0;
|
540
607
|
done:
|
541
|
-
if (data != NULL) xfree(data);
|
542
608
|
return ret;
|
543
609
|
}
|
544
610
|
|
@@ -645,17 +711,22 @@ atomic_write_cache_file(char * path, struct bs_cache_key * key, VALUE data, cons
|
|
645
711
|
|
646
712
|
|
647
713
|
/* Read contents from an fd, whose contents are asserted to be +size+ bytes
|
648
|
-
* long,
|
649
|
-
static
|
650
|
-
bs_read_contents(int fd, size_t size,
|
714
|
+
* long, returning a Ruby string on success and Qfalse on failure */
|
715
|
+
static VALUE
|
716
|
+
bs_read_contents(int fd, size_t size, const char ** errno_provenance)
|
651
717
|
{
|
718
|
+
VALUE contents;
|
652
719
|
ssize_t nread;
|
653
|
-
|
654
|
-
nread = read(fd,
|
720
|
+
contents = rb_str_buf_new(size);
|
721
|
+
nread = read(fd, RSTRING_PTR(contents), size);
|
722
|
+
|
655
723
|
if (nread < 0) {
|
656
724
|
*errno_provenance = "bs_fetch:bs_read_contents:read";
|
725
|
+
return Qfalse;
|
726
|
+
} else {
|
727
|
+
rb_str_set_len(contents, nread);
|
728
|
+
return contents;
|
657
729
|
}
|
658
|
-
return nread;
|
659
730
|
}
|
660
731
|
|
661
732
|
/*
|
@@ -706,38 +777,67 @@ static VALUE
|
|
706
777
|
bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args)
|
707
778
|
{
|
708
779
|
struct bs_cache_key cached_key, current_key;
|
709
|
-
char * contents = NULL;
|
710
780
|
int cache_fd = -1, current_fd = -1;
|
711
781
|
int res, valid_cache = 0, exception_tag = 0;
|
712
782
|
const char * errno_provenance = NULL;
|
713
783
|
|
714
|
-
VALUE
|
784
|
+
VALUE status = Qfalse;
|
785
|
+
VALUE input_data = Qfalse; /* data read from source file, e.g. YAML or ruby source */
|
715
786
|
VALUE storage_data; /* compiled data, e.g. msgpack / binary iseq */
|
716
787
|
VALUE output_data; /* return data, e.g. ruby hash or loaded iseq */
|
717
788
|
|
718
789
|
VALUE exception; /* ruby exception object to raise instead of returning */
|
790
|
+
VALUE exception_message; /* ruby exception string to use instead of errno_provenance */
|
719
791
|
|
720
792
|
/* Open the source file and generate a cache key for it */
|
721
793
|
current_fd = open_current_file(path, ¤t_key, &errno_provenance);
|
722
|
-
if (current_fd < 0)
|
794
|
+
if (current_fd < 0) {
|
795
|
+
exception_message = path_v;
|
796
|
+
goto fail_errno;
|
797
|
+
}
|
723
798
|
|
724
799
|
/* Open the cache key if it exists, and read its cache key in */
|
725
800
|
cache_fd = open_cache_file(cache_path, &cached_key, &errno_provenance);
|
726
801
|
if (cache_fd == CACHE_MISS || cache_fd == CACHE_STALE) {
|
727
802
|
/* This is ok: valid_cache remains false, we re-populate it. */
|
728
|
-
|
729
|
-
rb_funcall(rb_mBootsnap, instrumentation_method, 2, cache_fd == CACHE_MISS ? sym_miss : sym_stale, path_v);
|
730
|
-
}
|
803
|
+
bs_instrumentation(cache_fd == CACHE_MISS ? sym_miss : sym_stale, path_v);
|
731
804
|
} else if (cache_fd < 0) {
|
805
|
+
exception_message = rb_str_new_cstr(cache_path);
|
732
806
|
goto fail_errno;
|
733
807
|
} else {
|
734
808
|
/* True if the cache existed and no invalidating changes have occurred since
|
735
809
|
* it was generated. */
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
810
|
+
|
811
|
+
switch(cache_key_equal_fast_path(¤t_key, &cached_key)) {
|
812
|
+
case hit:
|
813
|
+
status = sym_hit;
|
814
|
+
valid_cache = true;
|
815
|
+
break;
|
816
|
+
case miss:
|
817
|
+
valid_cache = false;
|
818
|
+
break;
|
819
|
+
case stale:
|
820
|
+
valid_cache = false;
|
821
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size,
|
822
|
+
&errno_provenance)) == Qfalse) {
|
823
|
+
exception_message = path_v;
|
824
|
+
goto fail_errno;
|
825
|
+
}
|
826
|
+
valid_cache = cache_key_equal_slow_path(¤t_key, &cached_key, input_data);
|
827
|
+
if (valid_cache) {
|
828
|
+
if (!readonly) {
|
829
|
+
if (update_cache_key(¤t_key, &cached_key, cache_fd, &errno_provenance)) {
|
830
|
+
exception_message = path_v;
|
831
|
+
goto fail_errno;
|
832
|
+
}
|
833
|
+
}
|
834
|
+
status = sym_revalidated;
|
740
835
|
}
|
836
|
+
break;
|
837
|
+
};
|
838
|
+
|
839
|
+
if (!valid_cache) {
|
840
|
+
status = sym_stale;
|
741
841
|
}
|
742
842
|
}
|
743
843
|
|
@@ -751,13 +851,18 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
751
851
|
else if (res == CACHE_UNCOMPILABLE) {
|
752
852
|
/* If fetch_cached_data returned `Uncompilable` we fallback to `input_to_output`
|
753
853
|
This happens if we have say, an unsafe YAML cache, but try to load it in safe mode */
|
754
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
755
|
-
|
854
|
+
if (input_data == Qfalse && (input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
855
|
+
exception_message = path_v;
|
856
|
+
goto fail_errno;
|
857
|
+
}
|
756
858
|
bs_input_to_output(handler, args, input_data, &output_data, &exception_tag);
|
757
859
|
if (exception_tag != 0) goto raise;
|
758
860
|
goto succeed;
|
759
861
|
} else if (res == CACHE_MISS || res == CACHE_STALE) valid_cache = 0;
|
760
|
-
else if (res == ERROR_WITH_ERRNO)
|
862
|
+
else if (res == ERROR_WITH_ERRNO){
|
863
|
+
exception_message = rb_str_new_cstr(cache_path);
|
864
|
+
goto fail_errno;
|
865
|
+
}
|
761
866
|
else if (!NIL_P(output_data)) goto succeed; /* fast-path, goal */
|
762
867
|
}
|
763
868
|
close(cache_fd);
|
@@ -765,8 +870,10 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
765
870
|
/* Cache is stale, invalid, or missing. Regenerate and write it out. */
|
766
871
|
|
767
872
|
/* Read the contents of the source file into a buffer */
|
768
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
769
|
-
|
873
|
+
if (input_data == Qfalse && (input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
874
|
+
exception_message = path_v;
|
875
|
+
goto fail_errno;
|
876
|
+
}
|
770
877
|
|
771
878
|
/* Try to compile the input_data using input_to_storage(input_data) */
|
772
879
|
exception_tag = bs_input_to_storage(handler, args, input_data, path_v, &storage_data);
|
@@ -785,6 +892,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
785
892
|
* We do however ignore any failures to persist the cache, as it's better
|
786
893
|
* to move along, than to interrupt the process.
|
787
894
|
*/
|
895
|
+
bs_cache_key_digest(¤t_key, input_data);
|
788
896
|
atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
789
897
|
|
790
898
|
/* Having written the cache, now convert storage_data to output_data */
|
@@ -803,6 +911,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
803
911
|
* No point raising an error */
|
804
912
|
if (errno != ENOENT) {
|
805
913
|
errno_provenance = "bs_fetch:unlink";
|
914
|
+
exception_message = rb_str_new_cstr(cache_path);
|
806
915
|
goto fail_errno;
|
807
916
|
}
|
808
917
|
}
|
@@ -813,7 +922,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
813
922
|
goto succeed; /* output_data is now the correct return. */
|
814
923
|
|
815
924
|
#define CLEANUP \
|
816
|
-
if (
|
925
|
+
if (status != Qfalse) bs_instrumentation(status, path_v); \
|
817
926
|
if (current_fd >= 0) close(current_fd); \
|
818
927
|
if (cache_fd >= 0) close(cache_fd);
|
819
928
|
|
@@ -822,7 +931,13 @@ succeed:
|
|
822
931
|
return output_data;
|
823
932
|
fail_errno:
|
824
933
|
CLEANUP;
|
825
|
-
|
934
|
+
if (errno_provenance) {
|
935
|
+
exception_message = rb_str_concat(
|
936
|
+
rb_str_new_cstr(errno_provenance),
|
937
|
+
rb_str_concat(rb_str_new_cstr(": "), exception_message)
|
938
|
+
);
|
939
|
+
}
|
940
|
+
exception = rb_syserr_new_str(errno, exception_message);
|
826
941
|
rb_exc_raise(exception);
|
827
942
|
__builtin_unreachable();
|
828
943
|
raise:
|
@@ -840,13 +955,16 @@ invalid_type_storage_data:
|
|
840
955
|
static VALUE
|
841
956
|
bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
842
957
|
{
|
958
|
+
if (readonly) {
|
959
|
+
return Qfalse;
|
960
|
+
}
|
961
|
+
|
843
962
|
struct bs_cache_key cached_key, current_key;
|
844
|
-
char * contents = NULL;
|
845
963
|
int cache_fd = -1, current_fd = -1;
|
846
964
|
int res, valid_cache = 0, exception_tag = 0;
|
847
965
|
const char * errno_provenance = NULL;
|
848
966
|
|
849
|
-
VALUE input_data; /* data read from source file, e.g. YAML or ruby source */
|
967
|
+
VALUE input_data = Qfalse; /* data read from source file, e.g. YAML or ruby source */
|
850
968
|
VALUE storage_data; /* compiled data, e.g. msgpack / binary iseq */
|
851
969
|
|
852
970
|
/* Open the source file and generate a cache key for it */
|
@@ -862,7 +980,26 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
862
980
|
} else {
|
863
981
|
/* True if the cache existed and no invalidating changes have occurred since
|
864
982
|
* it was generated. */
|
865
|
-
|
983
|
+
switch(cache_key_equal_fast_path(¤t_key, &cached_key)) {
|
984
|
+
case hit:
|
985
|
+
valid_cache = true;
|
986
|
+
break;
|
987
|
+
case miss:
|
988
|
+
valid_cache = false;
|
989
|
+
break;
|
990
|
+
case stale:
|
991
|
+
valid_cache = false;
|
992
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
993
|
+
goto fail;
|
994
|
+
}
|
995
|
+
valid_cache = cache_key_equal_slow_path(¤t_key, &cached_key, input_data);
|
996
|
+
if (valid_cache) {
|
997
|
+
if (update_cache_key(¤t_key, &cached_key, cache_fd, &errno_provenance)) {
|
998
|
+
goto fail;
|
999
|
+
}
|
1000
|
+
}
|
1001
|
+
break;
|
1002
|
+
};
|
866
1003
|
}
|
867
1004
|
|
868
1005
|
if (valid_cache) {
|
@@ -874,8 +1011,7 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
874
1011
|
/* Cache is stale, invalid, or missing. Regenerate and write it out. */
|
875
1012
|
|
876
1013
|
/* Read the contents of the source file into a buffer */
|
877
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
878
|
-
input_data = rb_str_new(contents, current_key.size);
|
1014
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) goto fail;
|
879
1015
|
|
880
1016
|
/* Try to compile the input_data using input_to_storage(input_data) */
|
881
1017
|
exception_tag = bs_input_to_storage(handler, Qnil, input_data, path_v, &storage_data);
|
@@ -890,13 +1026,13 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
890
1026
|
if (!RB_TYPE_P(storage_data, T_STRING)) goto fail;
|
891
1027
|
|
892
1028
|
/* Write the cache key and storage_data to the cache directory */
|
1029
|
+
bs_cache_key_digest(¤t_key, input_data);
|
893
1030
|
res = atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
894
1031
|
if (res < 0) goto fail;
|
895
1032
|
|
896
1033
|
goto succeed;
|
897
1034
|
|
898
1035
|
#define CLEANUP \
|
899
|
-
if (contents != NULL) xfree(contents); \
|
900
1036
|
if (current_fd >= 0) close(current_fd); \
|
901
1037
|
if (cache_fd >= 0) close(cache_fd);
|
902
1038
|
|
@@ -993,12 +1129,17 @@ try_input_to_storage(VALUE arg)
|
|
993
1129
|
static int
|
994
1130
|
bs_input_to_storage(VALUE handler, VALUE args, VALUE input_data, VALUE pathval, VALUE * storage_data)
|
995
1131
|
{
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1132
|
+
if (readonly) {
|
1133
|
+
*storage_data = rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
1134
|
+
return 0;
|
1135
|
+
} else {
|
1136
|
+
int state;
|
1137
|
+
struct i2s_data i2s_data = {
|
1138
|
+
.handler = handler,
|
1139
|
+
.input_data = input_data,
|
1140
|
+
.pathval = pathval,
|
1141
|
+
};
|
1142
|
+
*storage_data = rb_protect(try_input_to_storage, (VALUE)&i2s_data, &state);
|
1143
|
+
return state;
|
1144
|
+
}
|
1004
1145
|
}
|