bootsnap 1.11.1 → 1.18.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +65 -0
- data/README.md +19 -10
- data/ext/bootsnap/bootsnap.c +225 -113
- data/ext/bootsnap/extconf.rb +20 -13
- data/lib/bootsnap/bundler.rb +1 -1
- data/lib/bootsnap/cli.rb +18 -16
- data/lib/bootsnap/compile_cache/iseq.rb +14 -8
- data/lib/bootsnap/compile_cache/json.rb +18 -17
- data/lib/bootsnap/compile_cache/yaml.rb +46 -60
- data/lib/bootsnap/compile_cache.rb +10 -15
- data/lib/bootsnap/load_path_cache/cache.rb +20 -21
- data/lib/bootsnap/load_path_cache/change_observer.rb +19 -2
- data/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb +7 -35
- data/lib/bootsnap/load_path_cache/loaded_features_index.rb +2 -2
- data/lib/bootsnap/load_path_cache/path.rb +16 -18
- data/lib/bootsnap/load_path_cache/path_scanner.rb +7 -1
- data/lib/bootsnap/load_path_cache/store.rb +11 -14
- data/lib/bootsnap/load_path_cache.rb +36 -13
- data/lib/bootsnap/setup.rb +1 -1
- data/lib/bootsnap/version.rb +1 -1
- data/lib/bootsnap.rb +34 -32
- metadata +4 -4
data/ext/bootsnap/bootsnap.c
CHANGED
@@ -19,11 +19,9 @@
|
|
19
19
|
#include <errno.h>
|
20
20
|
#include <fcntl.h>
|
21
21
|
#include <sys/stat.h>
|
22
|
-
|
23
|
-
#
|
24
|
-
#
|
25
|
-
#ifdef __GLIBC__
|
26
|
-
#include <gnu/libc-version.h>
|
22
|
+
|
23
|
+
#ifndef O_NOATIME
|
24
|
+
#define O_NOATIME 0
|
27
25
|
#endif
|
28
26
|
|
29
27
|
/* 1000 is an arbitrary limit; FNV64 plus some slashes brings the cap down to
|
@@ -36,7 +34,7 @@
|
|
36
34
|
#define MAX_CREATE_TEMPFILE_ATTEMPT 3
|
37
35
|
|
38
36
|
#ifndef RB_UNLIKELY
|
39
|
-
|
37
|
+
#define RB_UNLIKELY(x) (x)
|
40
38
|
#endif
|
41
39
|
|
42
40
|
/*
|
@@ -60,8 +58,10 @@ struct bs_cache_key {
|
|
60
58
|
uint32_t ruby_revision;
|
61
59
|
uint64_t size;
|
62
60
|
uint64_t mtime;
|
63
|
-
uint64_t data_size;
|
64
|
-
|
61
|
+
uint64_t data_size; //
|
62
|
+
uint64_t digest;
|
63
|
+
uint8_t digest_set;
|
64
|
+
uint8_t pad[15];
|
65
65
|
} __attribute__((packed));
|
66
66
|
|
67
67
|
/*
|
@@ -75,7 +75,7 @@ struct bs_cache_key {
|
|
75
75
|
STATIC_ASSERT(sizeof(struct bs_cache_key) == KEY_SIZE);
|
76
76
|
|
77
77
|
/* Effectively a schema version. Bumping invalidates all previous caches */
|
78
|
-
static const uint32_t current_version =
|
78
|
+
static const uint32_t current_version = 5;
|
79
79
|
|
80
80
|
/* hash of e.g. "x86_64-darwin17", invalidating when ruby is recompiled on a
|
81
81
|
* new OS ABI, etc. */
|
@@ -93,23 +93,34 @@ static VALUE rb_mBootsnap_CompileCache;
|
|
93
93
|
static VALUE rb_mBootsnap_CompileCache_Native;
|
94
94
|
static VALUE rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
95
95
|
static ID instrumentation_method;
|
96
|
-
static VALUE sym_miss;
|
97
|
-
static VALUE sym_stale;
|
96
|
+
static VALUE sym_hit, sym_miss, sym_stale, sym_revalidated;
|
98
97
|
static bool instrumentation_enabled = false;
|
98
|
+
static bool readonly = false;
|
99
|
+
static bool perm_issue = false;
|
99
100
|
|
100
101
|
/* Functions exposed as module functions on Bootsnap::CompileCache::Native */
|
101
102
|
static VALUE bs_instrumentation_enabled_set(VALUE self, VALUE enabled);
|
103
|
+
static VALUE bs_readonly_set(VALUE self, VALUE enabled);
|
102
104
|
static VALUE bs_compile_option_crc32_set(VALUE self, VALUE crc32_v);
|
103
105
|
static VALUE bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler, VALUE args);
|
104
106
|
static VALUE bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler);
|
105
107
|
|
106
108
|
/* Helpers */
|
109
|
+
enum cache_status {
|
110
|
+
miss,
|
111
|
+
hit,
|
112
|
+
stale,
|
113
|
+
};
|
107
114
|
static void bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_CACHEPATH_SIZE]);
|
108
115
|
static int bs_read_key(int fd, struct bs_cache_key * key);
|
109
|
-
static
|
116
|
+
static enum cache_status cache_key_equal_fast_path(struct bs_cache_key * k1, struct bs_cache_key * k2);
|
117
|
+
static int cache_key_equal_slow_path(struct bs_cache_key * current_key, struct bs_cache_key * cached_key, const VALUE input_data);
|
118
|
+
static int update_cache_key(struct bs_cache_key *current_key, int cache_fd, const char ** errno_provenance);
|
119
|
+
|
120
|
+
static void bs_cache_key_digest(struct bs_cache_key * key, const VALUE input_data);
|
110
121
|
static VALUE bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args);
|
111
122
|
static VALUE bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler);
|
112
|
-
static int open_current_file(char * path, struct bs_cache_key * key, const char ** errno_provenance);
|
123
|
+
static int open_current_file(const char * path, struct bs_cache_key * key, const char ** errno_provenance);
|
113
124
|
static int fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance);
|
114
125
|
static uint32_t get_ruby_revision(void);
|
115
126
|
static uint32_t get_ruby_platform(void);
|
@@ -165,13 +176,13 @@ Init_bootsnap(void)
|
|
165
176
|
|
166
177
|
instrumentation_method = rb_intern("_instrument");
|
167
178
|
|
179
|
+
sym_hit = ID2SYM(rb_intern("hit"));
|
168
180
|
sym_miss = ID2SYM(rb_intern("miss"));
|
169
|
-
rb_global_variable(&sym_miss);
|
170
|
-
|
171
181
|
sym_stale = ID2SYM(rb_intern("stale"));
|
172
|
-
|
182
|
+
sym_revalidated = ID2SYM(rb_intern("revalidated"));
|
173
183
|
|
174
184
|
rb_define_module_function(rb_mBootsnap, "instrumentation_enabled=", bs_instrumentation_enabled_set, 1);
|
185
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "readonly=", bs_readonly_set, 1);
|
175
186
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "coverage_running?", bs_rb_coverage_running, 0);
|
176
187
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "fetch", bs_rb_fetch, 4);
|
177
188
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "precompile", bs_rb_precompile, 3);
|
@@ -188,6 +199,21 @@ bs_instrumentation_enabled_set(VALUE self, VALUE enabled)
|
|
188
199
|
return enabled;
|
189
200
|
}
|
190
201
|
|
202
|
+
static inline void
|
203
|
+
bs_instrumentation(VALUE event, VALUE path)
|
204
|
+
{
|
205
|
+
if (RB_UNLIKELY(instrumentation_enabled)) {
|
206
|
+
rb_funcall(rb_mBootsnap, instrumentation_method, 2, event, path);
|
207
|
+
}
|
208
|
+
}
|
209
|
+
|
210
|
+
static VALUE
|
211
|
+
bs_readonly_set(VALUE self, VALUE enabled)
|
212
|
+
{
|
213
|
+
readonly = RTEST(enabled);
|
214
|
+
return enabled;
|
215
|
+
}
|
216
|
+
|
191
217
|
/*
|
192
218
|
* Bootsnap's ruby code registers a hook that notifies us via this function
|
193
219
|
* when compile_option changes. These changes invalidate all existing caches.
|
@@ -205,29 +231,6 @@ bs_compile_option_crc32_set(VALUE self, VALUE crc32_v)
|
|
205
231
|
return Qnil;
|
206
232
|
}
|
207
233
|
|
208
|
-
/*
|
209
|
-
* We use FNV1a-64 to derive cache paths. The choice is somewhat arbitrary but
|
210
|
-
* it has several nice properties:
|
211
|
-
*
|
212
|
-
* - Tiny implementation
|
213
|
-
* - No external dependency
|
214
|
-
* - Solid performance
|
215
|
-
* - Solid randomness
|
216
|
-
* - 32 bits doesn't feel collision-resistant enough; 64 is nice.
|
217
|
-
*/
|
218
|
-
static uint64_t
|
219
|
-
fnv1a_64_iter_cstr(uint64_t h, const char *str)
|
220
|
-
{
|
221
|
-
unsigned char *s = (unsigned char *)str;
|
222
|
-
|
223
|
-
while (*s) {
|
224
|
-
h ^= (uint64_t)*s++;
|
225
|
-
h += (h << 1) + (h << 4) + (h << 5) + (h << 7) + (h << 8) + (h << 40);
|
226
|
-
}
|
227
|
-
|
228
|
-
return h;
|
229
|
-
}
|
230
|
-
|
231
234
|
static uint64_t
|
232
235
|
fnv1a_64_iter(uint64_t h, const VALUE str)
|
233
236
|
{
|
@@ -272,10 +275,6 @@ get_ruby_revision(void)
|
|
272
275
|
/*
|
273
276
|
* When ruby's version doesn't change, but it's recompiled on a different OS
|
274
277
|
* (or OS version), we need to invalidate the cache.
|
275
|
-
*
|
276
|
-
* We actually factor in some extra information here, to be extra confident
|
277
|
-
* that we don't try to re-use caches that will not be compatible, by factoring
|
278
|
-
* in utsname.version.
|
279
278
|
*/
|
280
279
|
static uint32_t
|
281
280
|
get_ruby_platform(void)
|
@@ -285,22 +284,7 @@ get_ruby_platform(void)
|
|
285
284
|
|
286
285
|
ruby_platform = rb_const_get(rb_cObject, rb_intern("RUBY_PLATFORM"));
|
287
286
|
hash = fnv1a_64(ruby_platform);
|
288
|
-
|
289
|
-
#ifdef _WIN32
|
290
|
-
return (uint32_t)(hash >> 32) ^ (uint32_t)GetVersion();
|
291
|
-
#elif defined(__GLIBC__)
|
292
|
-
hash = fnv1a_64_iter_cstr(hash, gnu_get_libc_version());
|
293
287
|
return (uint32_t)(hash >> 32);
|
294
|
-
#else
|
295
|
-
struct utsname utsname;
|
296
|
-
|
297
|
-
/* Not worth crashing if this fails; lose extra cache invalidation potential */
|
298
|
-
if (uname(&utsname) >= 0) {
|
299
|
-
hash = fnv1a_64_iter_cstr(hash, utsname.version);
|
300
|
-
}
|
301
|
-
|
302
|
-
return (uint32_t)(hash >> 32);
|
303
|
-
#endif
|
304
288
|
}
|
305
289
|
|
306
290
|
/*
|
@@ -328,17 +312,53 @@ bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_C
|
|
328
312
|
* The data_size member is not compared, as it serves more of a "header"
|
329
313
|
* function.
|
330
314
|
*/
|
331
|
-
static
|
332
|
-
|
315
|
+
static enum cache_status cache_key_equal_fast_path(struct bs_cache_key *k1,
|
316
|
+
struct bs_cache_key *k2) {
|
317
|
+
if (k1->version == k2->version &&
|
318
|
+
k1->ruby_platform == k2->ruby_platform &&
|
319
|
+
k1->compile_option == k2->compile_option &&
|
320
|
+
k1->ruby_revision == k2->ruby_revision && k1->size == k2->size) {
|
321
|
+
return (k1->mtime == k2->mtime) ? hit : stale;
|
322
|
+
}
|
323
|
+
return miss;
|
324
|
+
}
|
325
|
+
|
326
|
+
static int cache_key_equal_slow_path(struct bs_cache_key *current_key,
|
327
|
+
struct bs_cache_key *cached_key,
|
328
|
+
const VALUE input_data)
|
329
|
+
{
|
330
|
+
bs_cache_key_digest(current_key, input_data);
|
331
|
+
return current_key->digest == cached_key->digest;
|
332
|
+
}
|
333
|
+
|
334
|
+
static int update_cache_key(struct bs_cache_key *current_key, int cache_fd, const char ** errno_provenance)
|
333
335
|
{
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
336
|
+
lseek(cache_fd, 0, SEEK_SET);
|
337
|
+
ssize_t nwrite = write(cache_fd, current_key, KEY_SIZE);
|
338
|
+
if (nwrite < 0) {
|
339
|
+
*errno_provenance = "update_cache_key:write";
|
340
|
+
return -1;
|
341
|
+
}
|
342
|
+
|
343
|
+
#ifdef HAVE_FDATASYNC
|
344
|
+
if (fdatasync(cache_fd) < 0) {
|
345
|
+
*errno_provenance = "update_cache_key:fdatasync";
|
346
|
+
return -1;
|
347
|
+
}
|
348
|
+
#endif
|
349
|
+
|
350
|
+
return 0;
|
351
|
+
}
|
352
|
+
|
353
|
+
/*
|
354
|
+
* Fills the cache key digest.
|
355
|
+
*/
|
356
|
+
static void bs_cache_key_digest(struct bs_cache_key *key,
|
357
|
+
const VALUE input_data) {
|
358
|
+
if (key->digest_set)
|
359
|
+
return;
|
360
|
+
key->digest = fnv1a_64(input_data);
|
361
|
+
key->digest_set = 1;
|
342
362
|
}
|
343
363
|
|
344
364
|
/*
|
@@ -394,17 +414,34 @@ bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler)
|
|
394
414
|
|
395
415
|
return bs_precompile(path, path_v, cache_path, handler);
|
396
416
|
}
|
417
|
+
|
418
|
+
static int bs_open_noatime(const char *path, int flags) {
|
419
|
+
int fd = 1;
|
420
|
+
if (!perm_issue) {
|
421
|
+
fd = open(path, flags | O_NOATIME);
|
422
|
+
if (fd < 0 && errno == EPERM) {
|
423
|
+
errno = 0;
|
424
|
+
perm_issue = true;
|
425
|
+
}
|
426
|
+
}
|
427
|
+
|
428
|
+
if (perm_issue) {
|
429
|
+
fd = open(path, flags);
|
430
|
+
}
|
431
|
+
return fd;
|
432
|
+
}
|
433
|
+
|
397
434
|
/*
|
398
435
|
* Open the file we want to load/cache and generate a cache key for it if it
|
399
436
|
* was loaded.
|
400
437
|
*/
|
401
438
|
static int
|
402
|
-
open_current_file(char * path, struct bs_cache_key * key, const char ** errno_provenance)
|
439
|
+
open_current_file(const char * path, struct bs_cache_key * key, const char ** errno_provenance)
|
403
440
|
{
|
404
441
|
struct stat statbuf;
|
405
442
|
int fd;
|
406
443
|
|
407
|
-
fd =
|
444
|
+
fd = bs_open_noatime(path, O_RDONLY);
|
408
445
|
if (fd < 0) {
|
409
446
|
*errno_provenance = "bs_fetch:open_current_file:open";
|
410
447
|
return fd;
|
@@ -415,7 +452,9 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
415
452
|
|
416
453
|
if (fstat(fd, &statbuf) < 0) {
|
417
454
|
*errno_provenance = "bs_fetch:open_current_file:fstat";
|
455
|
+
int previous_errno = errno;
|
418
456
|
close(fd);
|
457
|
+
errno = previous_errno;
|
419
458
|
return -1;
|
420
459
|
}
|
421
460
|
|
@@ -425,6 +464,7 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
425
464
|
key->ruby_revision = current_ruby_revision;
|
426
465
|
key->size = (uint64_t)statbuf.st_size;
|
427
466
|
key->mtime = (uint64_t)statbuf.st_mtime;
|
467
|
+
key->digest_set = false;
|
428
468
|
|
429
469
|
return fd;
|
430
470
|
}
|
@@ -468,7 +508,12 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
468
508
|
{
|
469
509
|
int fd, res;
|
470
510
|
|
471
|
-
|
511
|
+
if (readonly) {
|
512
|
+
fd = bs_open_noatime(path, O_RDONLY);
|
513
|
+
} else {
|
514
|
+
fd = bs_open_noatime(path, O_RDWR);
|
515
|
+
}
|
516
|
+
|
472
517
|
if (fd < 0) {
|
473
518
|
*errno_provenance = "bs_fetch:open_cache_file:open";
|
474
519
|
return CACHE_MISS;
|
@@ -505,7 +550,6 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
505
550
|
static int
|
506
551
|
fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance)
|
507
552
|
{
|
508
|
-
char * data = NULL;
|
509
553
|
ssize_t nread;
|
510
554
|
int ret;
|
511
555
|
|
@@ -517,8 +561,8 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
517
561
|
ret = ERROR_WITH_ERRNO;
|
518
562
|
goto done;
|
519
563
|
}
|
520
|
-
|
521
|
-
nread = read(fd,
|
564
|
+
storage_data = rb_str_buf_new(data_size);
|
565
|
+
nread = read(fd, RSTRING_PTR(storage_data), data_size);
|
522
566
|
if (nread < 0) {
|
523
567
|
*errno_provenance = "bs_fetch:fetch_cached_data:read";
|
524
568
|
ret = ERROR_WITH_ERRNO;
|
@@ -529,7 +573,7 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
529
573
|
goto done;
|
530
574
|
}
|
531
575
|
|
532
|
-
storage_data
|
576
|
+
rb_str_set_len(storage_data, nread);
|
533
577
|
|
534
578
|
*exception_tag = bs_storage_to_output(handler, args, storage_data, output_data);
|
535
579
|
if (*output_data == rb_cBootsnap_CompileCache_UNCOMPILABLE) {
|
@@ -538,7 +582,6 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE *
|
|
538
582
|
}
|
539
583
|
ret = 0;
|
540
584
|
done:
|
541
|
-
if (data != NULL) xfree(data);
|
542
585
|
return ret;
|
543
586
|
}
|
544
587
|
|
@@ -645,17 +688,22 @@ atomic_write_cache_file(char * path, struct bs_cache_key * key, VALUE data, cons
|
|
645
688
|
|
646
689
|
|
647
690
|
/* Read contents from an fd, whose contents are asserted to be +size+ bytes
|
648
|
-
* long,
|
649
|
-
static
|
650
|
-
bs_read_contents(int fd, size_t size,
|
691
|
+
* long, returning a Ruby string on success and Qfalse on failure */
|
692
|
+
static VALUE
|
693
|
+
bs_read_contents(int fd, size_t size, const char ** errno_provenance)
|
651
694
|
{
|
695
|
+
VALUE contents;
|
652
696
|
ssize_t nread;
|
653
|
-
|
654
|
-
nread = read(fd,
|
697
|
+
contents = rb_str_buf_new(size);
|
698
|
+
nread = read(fd, RSTRING_PTR(contents), size);
|
699
|
+
|
655
700
|
if (nread < 0) {
|
656
701
|
*errno_provenance = "bs_fetch:bs_read_contents:read";
|
702
|
+
return Qfalse;
|
703
|
+
} else {
|
704
|
+
rb_str_set_len(contents, nread);
|
705
|
+
return contents;
|
657
706
|
}
|
658
|
-
return nread;
|
659
707
|
}
|
660
708
|
|
661
709
|
/*
|
@@ -706,38 +754,67 @@ static VALUE
|
|
706
754
|
bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args)
|
707
755
|
{
|
708
756
|
struct bs_cache_key cached_key, current_key;
|
709
|
-
char * contents = NULL;
|
710
757
|
int cache_fd = -1, current_fd = -1;
|
711
758
|
int res, valid_cache = 0, exception_tag = 0;
|
712
759
|
const char * errno_provenance = NULL;
|
713
760
|
|
714
|
-
VALUE
|
761
|
+
VALUE status = Qfalse;
|
762
|
+
VALUE input_data = Qfalse; /* data read from source file, e.g. YAML or ruby source */
|
715
763
|
VALUE storage_data; /* compiled data, e.g. msgpack / binary iseq */
|
716
764
|
VALUE output_data; /* return data, e.g. ruby hash or loaded iseq */
|
717
765
|
|
718
766
|
VALUE exception; /* ruby exception object to raise instead of returning */
|
767
|
+
VALUE exception_message; /* ruby exception string to use instead of errno_provenance */
|
719
768
|
|
720
769
|
/* Open the source file and generate a cache key for it */
|
721
770
|
current_fd = open_current_file(path, ¤t_key, &errno_provenance);
|
722
|
-
if (current_fd < 0)
|
771
|
+
if (current_fd < 0) {
|
772
|
+
exception_message = path_v;
|
773
|
+
goto fail_errno;
|
774
|
+
}
|
723
775
|
|
724
776
|
/* Open the cache key if it exists, and read its cache key in */
|
725
777
|
cache_fd = open_cache_file(cache_path, &cached_key, &errno_provenance);
|
726
778
|
if (cache_fd == CACHE_MISS || cache_fd == CACHE_STALE) {
|
727
779
|
/* This is ok: valid_cache remains false, we re-populate it. */
|
728
|
-
|
729
|
-
rb_funcall(rb_mBootsnap, instrumentation_method, 2, cache_fd == CACHE_MISS ? sym_miss : sym_stale, path_v);
|
730
|
-
}
|
780
|
+
bs_instrumentation(cache_fd == CACHE_MISS ? sym_miss : sym_stale, path_v);
|
731
781
|
} else if (cache_fd < 0) {
|
782
|
+
exception_message = rb_str_new_cstr(cache_path);
|
732
783
|
goto fail_errno;
|
733
784
|
} else {
|
734
785
|
/* True if the cache existed and no invalidating changes have occurred since
|
735
786
|
* it was generated. */
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
787
|
+
|
788
|
+
switch(cache_key_equal_fast_path(¤t_key, &cached_key)) {
|
789
|
+
case hit:
|
790
|
+
status = sym_hit;
|
791
|
+
valid_cache = true;
|
792
|
+
break;
|
793
|
+
case miss:
|
794
|
+
valid_cache = false;
|
795
|
+
break;
|
796
|
+
case stale:
|
797
|
+
valid_cache = false;
|
798
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size,
|
799
|
+
&errno_provenance)) == Qfalse) {
|
800
|
+
exception_message = path_v;
|
801
|
+
goto fail_errno;
|
802
|
+
}
|
803
|
+
valid_cache = cache_key_equal_slow_path(¤t_key, &cached_key, input_data);
|
804
|
+
if (valid_cache) {
|
805
|
+
if (!readonly) {
|
806
|
+
if (update_cache_key(¤t_key, cache_fd, &errno_provenance)) {
|
807
|
+
exception_message = path_v;
|
808
|
+
goto fail_errno;
|
809
|
+
}
|
810
|
+
}
|
811
|
+
status = sym_revalidated;
|
740
812
|
}
|
813
|
+
break;
|
814
|
+
};
|
815
|
+
|
816
|
+
if (!valid_cache) {
|
817
|
+
status = sym_stale;
|
741
818
|
}
|
742
819
|
}
|
743
820
|
|
@@ -751,13 +828,18 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
751
828
|
else if (res == CACHE_UNCOMPILABLE) {
|
752
829
|
/* If fetch_cached_data returned `Uncompilable` we fallback to `input_to_output`
|
753
830
|
This happens if we have say, an unsafe YAML cache, but try to load it in safe mode */
|
754
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
755
|
-
|
831
|
+
if (input_data == Qfalse && (input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
832
|
+
exception_message = path_v;
|
833
|
+
goto fail_errno;
|
834
|
+
}
|
756
835
|
bs_input_to_output(handler, args, input_data, &output_data, &exception_tag);
|
757
836
|
if (exception_tag != 0) goto raise;
|
758
837
|
goto succeed;
|
759
838
|
} else if (res == CACHE_MISS || res == CACHE_STALE) valid_cache = 0;
|
760
|
-
else if (res == ERROR_WITH_ERRNO)
|
839
|
+
else if (res == ERROR_WITH_ERRNO){
|
840
|
+
exception_message = rb_str_new_cstr(cache_path);
|
841
|
+
goto fail_errno;
|
842
|
+
}
|
761
843
|
else if (!NIL_P(output_data)) goto succeed; /* fast-path, goal */
|
762
844
|
}
|
763
845
|
close(cache_fd);
|
@@ -765,8 +847,10 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
765
847
|
/* Cache is stale, invalid, or missing. Regenerate and write it out. */
|
766
848
|
|
767
849
|
/* Read the contents of the source file into a buffer */
|
768
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
769
|
-
|
850
|
+
if (input_data == Qfalse && (input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
851
|
+
exception_message = path_v;
|
852
|
+
goto fail_errno;
|
853
|
+
}
|
770
854
|
|
771
855
|
/* Try to compile the input_data using input_to_storage(input_data) */
|
772
856
|
exception_tag = bs_input_to_storage(handler, args, input_data, path_v, &storage_data);
|
@@ -785,6 +869,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
785
869
|
* We do however ignore any failures to persist the cache, as it's better
|
786
870
|
* to move along, than to interrupt the process.
|
787
871
|
*/
|
872
|
+
bs_cache_key_digest(¤t_key, input_data);
|
788
873
|
atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
789
874
|
|
790
875
|
/* Having written the cache, now convert storage_data to output_data */
|
@@ -803,6 +888,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
803
888
|
* No point raising an error */
|
804
889
|
if (errno != ENOENT) {
|
805
890
|
errno_provenance = "bs_fetch:unlink";
|
891
|
+
exception_message = rb_str_new_cstr(cache_path);
|
806
892
|
goto fail_errno;
|
807
893
|
}
|
808
894
|
}
|
@@ -813,7 +899,7 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args
|
|
813
899
|
goto succeed; /* output_data is now the correct return. */
|
814
900
|
|
815
901
|
#define CLEANUP \
|
816
|
-
if (
|
902
|
+
if (status != Qfalse) bs_instrumentation(status, path_v); \
|
817
903
|
if (current_fd >= 0) close(current_fd); \
|
818
904
|
if (cache_fd >= 0) close(cache_fd);
|
819
905
|
|
@@ -822,7 +908,7 @@ succeed:
|
|
822
908
|
return output_data;
|
823
909
|
fail_errno:
|
824
910
|
CLEANUP;
|
825
|
-
exception =
|
911
|
+
exception = rb_syserr_new_str(errno, exception_message);
|
826
912
|
rb_exc_raise(exception);
|
827
913
|
__builtin_unreachable();
|
828
914
|
raise:
|
@@ -840,13 +926,16 @@ invalid_type_storage_data:
|
|
840
926
|
static VALUE
|
841
927
|
bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
842
928
|
{
|
929
|
+
if (readonly) {
|
930
|
+
return Qfalse;
|
931
|
+
}
|
932
|
+
|
843
933
|
struct bs_cache_key cached_key, current_key;
|
844
|
-
char * contents = NULL;
|
845
934
|
int cache_fd = -1, current_fd = -1;
|
846
935
|
int res, valid_cache = 0, exception_tag = 0;
|
847
936
|
const char * errno_provenance = NULL;
|
848
937
|
|
849
|
-
VALUE input_data; /* data read from source file, e.g. YAML or ruby source */
|
938
|
+
VALUE input_data = Qfalse; /* data read from source file, e.g. YAML or ruby source */
|
850
939
|
VALUE storage_data; /* compiled data, e.g. msgpack / binary iseq */
|
851
940
|
|
852
941
|
/* Open the source file and generate a cache key for it */
|
@@ -862,7 +951,26 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
862
951
|
} else {
|
863
952
|
/* True if the cache existed and no invalidating changes have occurred since
|
864
953
|
* it was generated. */
|
865
|
-
|
954
|
+
switch(cache_key_equal_fast_path(¤t_key, &cached_key)) {
|
955
|
+
case hit:
|
956
|
+
valid_cache = true;
|
957
|
+
break;
|
958
|
+
case miss:
|
959
|
+
valid_cache = false;
|
960
|
+
break;
|
961
|
+
case stale:
|
962
|
+
valid_cache = false;
|
963
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) {
|
964
|
+
goto fail;
|
965
|
+
}
|
966
|
+
valid_cache = cache_key_equal_slow_path(¤t_key, &cached_key, input_data);
|
967
|
+
if (valid_cache) {
|
968
|
+
if (update_cache_key(¤t_key, cache_fd, &errno_provenance)) {
|
969
|
+
goto fail;
|
970
|
+
}
|
971
|
+
}
|
972
|
+
break;
|
973
|
+
};
|
866
974
|
}
|
867
975
|
|
868
976
|
if (valid_cache) {
|
@@ -874,8 +982,7 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
874
982
|
/* Cache is stale, invalid, or missing. Regenerate and write it out. */
|
875
983
|
|
876
984
|
/* Read the contents of the source file into a buffer */
|
877
|
-
if (bs_read_contents(current_fd, current_key.size, &
|
878
|
-
input_data = rb_str_new(contents, current_key.size);
|
985
|
+
if ((input_data = bs_read_contents(current_fd, current_key.size, &errno_provenance)) == Qfalse) goto fail;
|
879
986
|
|
880
987
|
/* Try to compile the input_data using input_to_storage(input_data) */
|
881
988
|
exception_tag = bs_input_to_storage(handler, Qnil, input_data, path_v, &storage_data);
|
@@ -890,13 +997,13 @@ bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
890
997
|
if (!RB_TYPE_P(storage_data, T_STRING)) goto fail;
|
891
998
|
|
892
999
|
/* Write the cache key and storage_data to the cache directory */
|
1000
|
+
bs_cache_key_digest(¤t_key, input_data);
|
893
1001
|
res = atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
894
1002
|
if (res < 0) goto fail;
|
895
1003
|
|
896
1004
|
goto succeed;
|
897
1005
|
|
898
1006
|
#define CLEANUP \
|
899
|
-
if (contents != NULL) xfree(contents); \
|
900
1007
|
if (current_fd >= 0) close(current_fd); \
|
901
1008
|
if (cache_fd >= 0) close(cache_fd);
|
902
1009
|
|
@@ -993,12 +1100,17 @@ try_input_to_storage(VALUE arg)
|
|
993
1100
|
static int
|
994
1101
|
bs_input_to_storage(VALUE handler, VALUE args, VALUE input_data, VALUE pathval, VALUE * storage_data)
|
995
1102
|
{
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1103
|
+
if (readonly) {
|
1104
|
+
*storage_data = rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
1105
|
+
return 0;
|
1106
|
+
} else {
|
1107
|
+
int state;
|
1108
|
+
struct i2s_data i2s_data = {
|
1109
|
+
.handler = handler,
|
1110
|
+
.input_data = input_data,
|
1111
|
+
.pathval = pathval,
|
1112
|
+
};
|
1113
|
+
*storage_data = rb_protect(try_input_to_storage, (VALUE)&i2s_data, &state);
|
1114
|
+
return state;
|
1115
|
+
}
|
1004
1116
|
}
|
data/ext/bootsnap/extconf.rb
CHANGED
@@ -1,26 +1,33 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "mkmf"
|
4
4
|
|
5
|
-
if RUBY_ENGINE
|
6
|
-
|
7
|
-
|
5
|
+
if %w[ruby truffleruby].include?(RUBY_ENGINE)
|
6
|
+
have_func "fdatasync", "fcntl.h"
|
7
|
+
|
8
|
+
unless RUBY_PLATFORM.match?(/mswin|mingw|cygwin/)
|
9
|
+
append_cppflags ["_GNU_SOURCE"] # Needed of O_NOATIME
|
10
|
+
end
|
11
|
+
|
12
|
+
append_cflags ["-O3", "-std=c99"]
|
8
13
|
|
9
14
|
# ruby.h has some -Wpedantic fails in some cases
|
10
15
|
# (e.g. https://github.com/Shopify/bootsnap/issues/15)
|
11
16
|
unless ["0", "", nil].include?(ENV["BOOTSNAP_PEDANTIC"])
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
17
|
+
append_cflags([
|
18
|
+
"-Wall",
|
19
|
+
"-Werror",
|
20
|
+
"-Wextra",
|
21
|
+
"-Wpedantic",
|
16
22
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
23
|
+
"-Wno-unused-parameter", # VALUE self has to be there but we don't care what it is.
|
24
|
+
"-Wno-keyword-macro", # hiding return
|
25
|
+
"-Wno-gcc-compat", # ruby.h 2.6.0 on macos 10.14, dunno
|
26
|
+
"-Wno-compound-token-split-by-macro",
|
27
|
+
])
|
21
28
|
end
|
22
29
|
|
23
30
|
create_makefile("bootsnap/bootsnap")
|
24
31
|
else
|
25
|
-
File.write("Makefile", dummy_makefile($srcdir).join
|
32
|
+
File.write("Makefile", dummy_makefile($srcdir).join)
|
26
33
|
end
|