bootsnap 1.4.8 → 1.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +204 -0
- data/LICENSE.txt +1 -1
- data/README.md +57 -20
- data/exe/bootsnap +5 -0
- data/ext/bootsnap/bootsnap.c +260 -127
- data/ext/bootsnap/extconf.rb +21 -14
- data/lib/bootsnap/bundler.rb +1 -0
- data/lib/bootsnap/cli/worker_pool.rb +136 -0
- data/lib/bootsnap/cli.rb +281 -0
- data/lib/bootsnap/compile_cache/iseq.rb +63 -19
- data/lib/bootsnap/compile_cache/json.rb +93 -0
- data/lib/bootsnap/compile_cache/yaml.rb +332 -42
- data/lib/bootsnap/compile_cache.rb +25 -8
- data/lib/bootsnap/explicit_require.rb +4 -3
- data/lib/bootsnap/load_path_cache/cache.rb +63 -35
- data/lib/bootsnap/load_path_cache/change_observer.rb +17 -2
- data/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb +27 -95
- data/lib/bootsnap/load_path_cache/core_ext/loaded_features.rb +1 -0
- data/lib/bootsnap/load_path_cache/loaded_features_index.rb +36 -25
- data/lib/bootsnap/load_path_cache/path.rb +40 -18
- data/lib/bootsnap/load_path_cache/path_scanner.rb +25 -7
- data/lib/bootsnap/load_path_cache/store.rb +64 -24
- data/lib/bootsnap/load_path_cache.rb +31 -38
- data/lib/bootsnap/setup.rb +2 -36
- data/lib/bootsnap/version.rb +2 -1
- data/lib/bootsnap.rb +125 -36
- metadata +15 -81
- data/lib/bootsnap/load_path_cache/core_ext/active_support.rb +0 -107
- data/lib/bootsnap/load_path_cache/realpath_cache.rb +0 -32
data/ext/bootsnap/bootsnap.c
CHANGED
@@ -14,16 +14,11 @@
|
|
14
14
|
#include "bootsnap.h"
|
15
15
|
#include "ruby.h"
|
16
16
|
#include <stdint.h>
|
17
|
+
#include <stdbool.h>
|
17
18
|
#include <sys/types.h>
|
18
19
|
#include <errno.h>
|
19
20
|
#include <fcntl.h>
|
20
21
|
#include <sys/stat.h>
|
21
|
-
#ifndef _WIN32
|
22
|
-
#include <sys/utsname.h>
|
23
|
-
#endif
|
24
|
-
#ifdef __GLIBC__
|
25
|
-
#include <gnu/libc-version.h>
|
26
|
-
#endif
|
27
22
|
|
28
23
|
/* 1000 is an arbitrary limit; FNV64 plus some slashes brings the cap down to
|
29
24
|
* 981 for the cache dir */
|
@@ -34,6 +29,10 @@
|
|
34
29
|
|
35
30
|
#define MAX_CREATE_TEMPFILE_ATTEMPT 3
|
36
31
|
|
32
|
+
#ifndef RB_UNLIKELY
|
33
|
+
#define RB_UNLIKELY(x) (x)
|
34
|
+
#endif
|
35
|
+
|
37
36
|
/*
|
38
37
|
* An instance of this key is written as the first 64 bytes of each cache file.
|
39
38
|
* The mtime and size members track whether the file contents have changed, and
|
@@ -70,7 +69,7 @@ struct bs_cache_key {
|
|
70
69
|
STATIC_ASSERT(sizeof(struct bs_cache_key) == KEY_SIZE);
|
71
70
|
|
72
71
|
/* Effectively a schema version. Bumping invalidates all previous caches */
|
73
|
-
static const uint32_t current_version =
|
72
|
+
static const uint32_t current_version = 4;
|
74
73
|
|
75
74
|
/* hash of e.g. "x86_64-darwin17", invalidating when ruby is recompiled on a
|
76
75
|
* new OS ABI, etc. */
|
@@ -86,21 +85,28 @@ static mode_t current_umask;
|
|
86
85
|
static VALUE rb_mBootsnap;
|
87
86
|
static VALUE rb_mBootsnap_CompileCache;
|
88
87
|
static VALUE rb_mBootsnap_CompileCache_Native;
|
89
|
-
static VALUE
|
90
|
-
static ID
|
88
|
+
static VALUE rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
89
|
+
static ID instrumentation_method;
|
90
|
+
static VALUE sym_miss;
|
91
|
+
static VALUE sym_stale;
|
92
|
+
static bool instrumentation_enabled = false;
|
93
|
+
static bool readonly = false;
|
91
94
|
|
92
95
|
/* Functions exposed as module functions on Bootsnap::CompileCache::Native */
|
96
|
+
static VALUE bs_instrumentation_enabled_set(VALUE self, VALUE enabled);
|
97
|
+
static VALUE bs_readonly_set(VALUE self, VALUE enabled);
|
93
98
|
static VALUE bs_compile_option_crc32_set(VALUE self, VALUE crc32_v);
|
94
|
-
static VALUE bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler);
|
99
|
+
static VALUE bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler, VALUE args);
|
100
|
+
static VALUE bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler);
|
95
101
|
|
96
102
|
/* Helpers */
|
97
|
-
static
|
98
|
-
static void bs_cache_path(const char * cachedir, const char * path, char (* cache_path)[MAX_CACHEPATH_SIZE]);
|
103
|
+
static void bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_CACHEPATH_SIZE]);
|
99
104
|
static int bs_read_key(int fd, struct bs_cache_key * key);
|
100
105
|
static int cache_key_equal(struct bs_cache_key * k1, struct bs_cache_key * k2);
|
101
|
-
static VALUE bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler);
|
106
|
+
static VALUE bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args);
|
107
|
+
static VALUE bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler);
|
102
108
|
static int open_current_file(char * path, struct bs_cache_key * key, const char ** errno_provenance);
|
103
|
-
static int fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE * output_data, int * exception_tag, const char ** errno_provenance);
|
109
|
+
static int fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance);
|
104
110
|
static uint32_t get_ruby_revision(void);
|
105
111
|
static uint32_t get_ruby_platform(void);
|
106
112
|
|
@@ -108,12 +114,10 @@ static uint32_t get_ruby_platform(void);
|
|
108
114
|
* Helper functions to call ruby methods on handler object without crashing on
|
109
115
|
* exception.
|
110
116
|
*/
|
111
|
-
static int bs_storage_to_output(VALUE handler, VALUE storage_data, VALUE * output_data);
|
112
|
-
static VALUE prot_storage_to_output(VALUE arg);
|
117
|
+
static int bs_storage_to_output(VALUE handler, VALUE args, VALUE storage_data, VALUE * output_data);
|
113
118
|
static VALUE prot_input_to_output(VALUE arg);
|
114
|
-
static void bs_input_to_output(VALUE handler, VALUE input_data, VALUE * output_data, int * exception_tag);
|
115
|
-
static
|
116
|
-
static int bs_input_to_storage(VALUE handler, VALUE input_data, VALUE pathval, VALUE * storage_data);
|
119
|
+
static void bs_input_to_output(VALUE handler, VALUE args, VALUE input_data, VALUE * output_data, int * exception_tag);
|
120
|
+
static int bs_input_to_storage(VALUE handler, VALUE args, VALUE input_data, VALUE pathval, VALUE * storage_data);
|
117
121
|
struct s2o_data;
|
118
122
|
struct i2o_data;
|
119
123
|
struct i2s_data;
|
@@ -127,6 +131,12 @@ bs_rb_coverage_running(VALUE self)
|
|
127
131
|
return RTEST(cov) ? Qtrue : Qfalse;
|
128
132
|
}
|
129
133
|
|
134
|
+
static VALUE
|
135
|
+
bs_rb_get_path(VALUE self, VALUE fname)
|
136
|
+
{
|
137
|
+
return rb_get_path(fname);
|
138
|
+
}
|
139
|
+
|
130
140
|
/*
|
131
141
|
* Ruby C extensions are initialized by calling Init_<extname>.
|
132
142
|
*
|
@@ -138,23 +148,50 @@ void
|
|
138
148
|
Init_bootsnap(void)
|
139
149
|
{
|
140
150
|
rb_mBootsnap = rb_define_module("Bootsnap");
|
151
|
+
|
152
|
+
rb_define_singleton_method(rb_mBootsnap, "rb_get_path", bs_rb_get_path, 1);
|
153
|
+
|
141
154
|
rb_mBootsnap_CompileCache = rb_define_module_under(rb_mBootsnap, "CompileCache");
|
142
155
|
rb_mBootsnap_CompileCache_Native = rb_define_module_under(rb_mBootsnap_CompileCache, "Native");
|
143
|
-
|
156
|
+
rb_cBootsnap_CompileCache_UNCOMPILABLE = rb_const_get(rb_mBootsnap_CompileCache, rb_intern("UNCOMPILABLE"));
|
157
|
+
rb_global_variable(&rb_cBootsnap_CompileCache_UNCOMPILABLE);
|
144
158
|
|
145
159
|
current_ruby_revision = get_ruby_revision();
|
146
160
|
current_ruby_platform = get_ruby_platform();
|
147
161
|
|
148
|
-
|
162
|
+
instrumentation_method = rb_intern("_instrument");
|
163
|
+
|
164
|
+
sym_miss = ID2SYM(rb_intern("miss"));
|
165
|
+
rb_global_variable(&sym_miss);
|
149
166
|
|
167
|
+
sym_stale = ID2SYM(rb_intern("stale"));
|
168
|
+
rb_global_variable(&sym_stale);
|
169
|
+
|
170
|
+
rb_define_module_function(rb_mBootsnap, "instrumentation_enabled=", bs_instrumentation_enabled_set, 1);
|
171
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "readonly=", bs_readonly_set, 1);
|
150
172
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "coverage_running?", bs_rb_coverage_running, 0);
|
151
|
-
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "fetch", bs_rb_fetch,
|
173
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "fetch", bs_rb_fetch, 4);
|
174
|
+
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "precompile", bs_rb_precompile, 3);
|
152
175
|
rb_define_module_function(rb_mBootsnap_CompileCache_Native, "compile_option_crc32=", bs_compile_option_crc32_set, 1);
|
153
176
|
|
154
177
|
current_umask = umask(0777);
|
155
178
|
umask(current_umask);
|
156
179
|
}
|
157
180
|
|
181
|
+
static VALUE
|
182
|
+
bs_instrumentation_enabled_set(VALUE self, VALUE enabled)
|
183
|
+
{
|
184
|
+
instrumentation_enabled = RTEST(enabled);
|
185
|
+
return enabled;
|
186
|
+
}
|
187
|
+
|
188
|
+
static VALUE
|
189
|
+
bs_readonly_set(VALUE self, VALUE enabled)
|
190
|
+
{
|
191
|
+
readonly = RTEST(enabled);
|
192
|
+
return enabled;
|
193
|
+
}
|
194
|
+
|
158
195
|
/*
|
159
196
|
* Bootsnap's ruby code registers a hook that notifies us via this function
|
160
197
|
* when compile_option changes. These changes invalidate all existing caches.
|
@@ -172,22 +209,13 @@ bs_compile_option_crc32_set(VALUE self, VALUE crc32_v)
|
|
172
209
|
return Qnil;
|
173
210
|
}
|
174
211
|
|
175
|
-
/*
|
176
|
-
* We use FNV1a-64 to derive cache paths. The choice is somewhat arbitrary but
|
177
|
-
* it has several nice properties:
|
178
|
-
*
|
179
|
-
* - Tiny implementation
|
180
|
-
* - No external dependency
|
181
|
-
* - Solid performance
|
182
|
-
* - Solid randomness
|
183
|
-
* - 32 bits doesn't feel collision-resistant enough; 64 is nice.
|
184
|
-
*/
|
185
212
|
static uint64_t
|
186
|
-
fnv1a_64_iter(uint64_t h, const
|
213
|
+
fnv1a_64_iter(uint64_t h, const VALUE str)
|
187
214
|
{
|
188
|
-
unsigned char *s = (unsigned char *)str;
|
215
|
+
unsigned char *s = (unsigned char *)RSTRING_PTR(str);
|
216
|
+
unsigned char *str_end = (unsigned char *)RSTRING_PTR(str) + RSTRING_LEN(str);
|
189
217
|
|
190
|
-
while (
|
218
|
+
while (s < str_end) {
|
191
219
|
h ^= (uint64_t)*s++;
|
192
220
|
h += (h << 1) + (h << 4) + (h << 5) + (h << 7) + (h << 8) + (h << 40);
|
193
221
|
}
|
@@ -196,7 +224,7 @@ fnv1a_64_iter(uint64_t h, const char *str)
|
|
196
224
|
}
|
197
225
|
|
198
226
|
static uint64_t
|
199
|
-
fnv1a_64(const
|
227
|
+
fnv1a_64(const VALUE str)
|
200
228
|
{
|
201
229
|
uint64_t h = (uint64_t)0xcbf29ce484222325ULL;
|
202
230
|
return fnv1a_64_iter(h, str);
|
@@ -217,7 +245,7 @@ get_ruby_revision(void)
|
|
217
245
|
} else {
|
218
246
|
uint64_t hash;
|
219
247
|
|
220
|
-
hash = fnv1a_64(
|
248
|
+
hash = fnv1a_64(ruby_revision);
|
221
249
|
return (uint32_t)(hash >> 32);
|
222
250
|
}
|
223
251
|
}
|
@@ -225,10 +253,6 @@ get_ruby_revision(void)
|
|
225
253
|
/*
|
226
254
|
* When ruby's version doesn't change, but it's recompiled on a different OS
|
227
255
|
* (or OS version), we need to invalidate the cache.
|
228
|
-
*
|
229
|
-
* We actually factor in some extra information here, to be extra confident
|
230
|
-
* that we don't try to re-use caches that will not be compatible, by factoring
|
231
|
-
* in utsname.version.
|
232
256
|
*/
|
233
257
|
static uint32_t
|
234
258
|
get_ruby_platform(void)
|
@@ -237,23 +261,8 @@ get_ruby_platform(void)
|
|
237
261
|
VALUE ruby_platform;
|
238
262
|
|
239
263
|
ruby_platform = rb_const_get(rb_cObject, rb_intern("RUBY_PLATFORM"));
|
240
|
-
hash = fnv1a_64(
|
241
|
-
|
242
|
-
#ifdef _WIN32
|
243
|
-
return (uint32_t)(hash >> 32) ^ (uint32_t)GetVersion();
|
244
|
-
#elif defined(__GLIBC__)
|
245
|
-
hash = fnv1a_64_iter(hash, gnu_get_libc_version());
|
264
|
+
hash = fnv1a_64(ruby_platform);
|
246
265
|
return (uint32_t)(hash >> 32);
|
247
|
-
#else
|
248
|
-
struct utsname utsname;
|
249
|
-
|
250
|
-
/* Not worth crashing if this fails; lose extra cache invalidation potential */
|
251
|
-
if (uname(&utsname) >= 0) {
|
252
|
-
hash = fnv1a_64_iter(hash, utsname.version);
|
253
|
-
}
|
254
|
-
|
255
|
-
return (uint32_t)(hash >> 32);
|
256
|
-
#endif
|
257
266
|
}
|
258
267
|
|
259
268
|
/*
|
@@ -264,14 +273,13 @@ get_ruby_platform(void)
|
|
264
273
|
* The path will look something like: <cachedir>/12/34567890abcdef
|
265
274
|
*/
|
266
275
|
static void
|
267
|
-
bs_cache_path(const char * cachedir, const
|
276
|
+
bs_cache_path(const char * cachedir, const VALUE path, char (* cache_path)[MAX_CACHEPATH_SIZE])
|
268
277
|
{
|
269
278
|
uint64_t hash = fnv1a_64(path);
|
270
|
-
|
271
279
|
uint8_t first_byte = (hash >> (64 - 8));
|
272
280
|
uint64_t remainder = hash & 0x00ffffffffffffff;
|
273
281
|
|
274
|
-
sprintf(*cache_path, "%s/%
|
282
|
+
sprintf(*cache_path, "%s/%02"PRIx8"/%014"PRIx64, cachedir, first_byte, remainder);
|
275
283
|
}
|
276
284
|
|
277
285
|
/*
|
@@ -301,7 +309,7 @@ cache_key_equal(struct bs_cache_key * k1, struct bs_cache_key * k2)
|
|
301
309
|
* conversions on the ruby VALUE arguments before passing them along.
|
302
310
|
*/
|
303
311
|
static VALUE
|
304
|
-
bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler)
|
312
|
+
bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler, VALUE args)
|
305
313
|
{
|
306
314
|
FilePathValue(path_v);
|
307
315
|
|
@@ -317,11 +325,37 @@ bs_rb_fetch(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler)
|
|
317
325
|
char cache_path[MAX_CACHEPATH_SIZE];
|
318
326
|
|
319
327
|
/* generate cache path to cache_path */
|
320
|
-
bs_cache_path(cachedir,
|
328
|
+
bs_cache_path(cachedir, path_v, &cache_path);
|
321
329
|
|
322
|
-
return bs_fetch(path, path_v, cache_path, handler);
|
330
|
+
return bs_fetch(path, path_v, cache_path, handler, args);
|
323
331
|
}
|
324
332
|
|
333
|
+
/*
|
334
|
+
* Entrypoint for Bootsnap::CompileCache::Native.precompile.
|
335
|
+
* Similar to fetch, but it only generate the cache if missing
|
336
|
+
* and doesn't return the content.
|
337
|
+
*/
|
338
|
+
static VALUE
|
339
|
+
bs_rb_precompile(VALUE self, VALUE cachedir_v, VALUE path_v, VALUE handler)
|
340
|
+
{
|
341
|
+
FilePathValue(path_v);
|
342
|
+
|
343
|
+
Check_Type(cachedir_v, T_STRING);
|
344
|
+
Check_Type(path_v, T_STRING);
|
345
|
+
|
346
|
+
if (RSTRING_LEN(cachedir_v) > MAX_CACHEDIR_SIZE) {
|
347
|
+
rb_raise(rb_eArgError, "cachedir too long");
|
348
|
+
}
|
349
|
+
|
350
|
+
char * cachedir = RSTRING_PTR(cachedir_v);
|
351
|
+
char * path = RSTRING_PTR(path_v);
|
352
|
+
char cache_path[MAX_CACHEPATH_SIZE];
|
353
|
+
|
354
|
+
/* generate cache path to cache_path */
|
355
|
+
bs_cache_path(cachedir, path_v, &cache_path);
|
356
|
+
|
357
|
+
return bs_precompile(path, path_v, cache_path, handler);
|
358
|
+
}
|
325
359
|
/*
|
326
360
|
* Open the file we want to load/cache and generate a cache key for it if it
|
327
361
|
* was loaded.
|
@@ -358,7 +392,9 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
358
392
|
}
|
359
393
|
|
360
394
|
#define ERROR_WITH_ERRNO -1
|
361
|
-
#define
|
395
|
+
#define CACHE_MISS -2
|
396
|
+
#define CACHE_STALE -3
|
397
|
+
#define CACHE_UNCOMPILABLE -4
|
362
398
|
|
363
399
|
/*
|
364
400
|
* Read the cache key from the given fd, which must have position 0 (e.g.
|
@@ -366,15 +402,16 @@ open_current_file(char * path, struct bs_cache_key * key, const char ** errno_pr
|
|
366
402
|
*
|
367
403
|
* Possible return values:
|
368
404
|
* - 0 (OK, key was loaded)
|
369
|
-
* - CACHE_MISSING_OR_INVALID (-2)
|
370
405
|
* - ERROR_WITH_ERRNO (-1, errno is set)
|
406
|
+
* - CACHE_MISS (-2)
|
407
|
+
* - CACHE_STALE (-3)
|
371
408
|
*/
|
372
409
|
static int
|
373
410
|
bs_read_key(int fd, struct bs_cache_key * key)
|
374
411
|
{
|
375
412
|
ssize_t nread = read(fd, key, KEY_SIZE);
|
376
413
|
if (nread < 0) return ERROR_WITH_ERRNO;
|
377
|
-
if (nread < KEY_SIZE) return
|
414
|
+
if (nread < KEY_SIZE) return CACHE_STALE;
|
378
415
|
return 0;
|
379
416
|
}
|
380
417
|
|
@@ -384,7 +421,8 @@ bs_read_key(int fd, struct bs_cache_key * key)
|
|
384
421
|
*
|
385
422
|
* Possible return values:
|
386
423
|
* - 0 (OK, key was loaded)
|
387
|
-
* -
|
424
|
+
* - CACHE_MISS (-2)
|
425
|
+
* - CACHE_STALE (-3)
|
388
426
|
* - ERROR_WITH_ERRNO (-1, errno is set)
|
389
427
|
*/
|
390
428
|
static int
|
@@ -395,8 +433,7 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
395
433
|
fd = open(path, O_RDONLY);
|
396
434
|
if (fd < 0) {
|
397
435
|
*errno_provenance = "bs_fetch:open_cache_file:open";
|
398
|
-
|
399
|
-
return ERROR_WITH_ERRNO;
|
436
|
+
return CACHE_MISS;
|
400
437
|
}
|
401
438
|
#ifdef _WIN32
|
402
439
|
setmode(fd, O_BINARY);
|
@@ -428,7 +465,7 @@ open_cache_file(const char * path, struct bs_cache_key * key, const char ** errn
|
|
428
465
|
* or exception, will be the final data returnable to the user.
|
429
466
|
*/
|
430
467
|
static int
|
431
|
-
fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE * output_data, int * exception_tag, const char ** errno_provenance)
|
468
|
+
fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE args, VALUE * output_data, int * exception_tag, const char ** errno_provenance)
|
432
469
|
{
|
433
470
|
char * data = NULL;
|
434
471
|
ssize_t nread;
|
@@ -439,24 +476,28 @@ fetch_cached_data(int fd, ssize_t data_size, VALUE handler, VALUE * output_data,
|
|
439
476
|
if (data_size > 100000000000) {
|
440
477
|
*errno_provenance = "bs_fetch:fetch_cached_data:datasize";
|
441
478
|
errno = EINVAL; /* because wtf? */
|
442
|
-
ret =
|
479
|
+
ret = ERROR_WITH_ERRNO;
|
443
480
|
goto done;
|
444
481
|
}
|
445
482
|
data = ALLOC_N(char, data_size);
|
446
483
|
nread = read(fd, data, data_size);
|
447
484
|
if (nread < 0) {
|
448
485
|
*errno_provenance = "bs_fetch:fetch_cached_data:read";
|
449
|
-
ret =
|
486
|
+
ret = ERROR_WITH_ERRNO;
|
450
487
|
goto done;
|
451
488
|
}
|
452
489
|
if (nread != data_size) {
|
453
|
-
ret =
|
490
|
+
ret = CACHE_STALE;
|
454
491
|
goto done;
|
455
492
|
}
|
456
493
|
|
457
|
-
storage_data =
|
494
|
+
storage_data = rb_str_new(data, data_size);
|
458
495
|
|
459
|
-
*exception_tag = bs_storage_to_output(handler, storage_data, output_data);
|
496
|
+
*exception_tag = bs_storage_to_output(handler, args, storage_data, output_data);
|
497
|
+
if (*output_data == rb_cBootsnap_CompileCache_UNCOMPILABLE) {
|
498
|
+
ret = CACHE_UNCOMPILABLE;
|
499
|
+
goto done;
|
500
|
+
}
|
460
501
|
ret = 0;
|
461
502
|
done:
|
462
503
|
if (data != NULL) xfree(data);
|
@@ -624,7 +665,7 @@ bs_read_contents(int fd, size_t size, char ** contents, const char ** errno_prov
|
|
624
665
|
* - Return storage_to_output(storage_data)
|
625
666
|
*/
|
626
667
|
static VALUE
|
627
|
-
bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
668
|
+
bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler, VALUE args)
|
628
669
|
{
|
629
670
|
struct bs_cache_key cached_key, current_key;
|
630
671
|
char * contents = NULL;
|
@@ -644,26 +685,42 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
644
685
|
|
645
686
|
/* Open the cache key if it exists, and read its cache key in */
|
646
687
|
cache_fd = open_cache_file(cache_path, &cached_key, &errno_provenance);
|
647
|
-
if (cache_fd ==
|
688
|
+
if (cache_fd == CACHE_MISS || cache_fd == CACHE_STALE) {
|
648
689
|
/* This is ok: valid_cache remains false, we re-populate it. */
|
690
|
+
if (RB_UNLIKELY(instrumentation_enabled)) {
|
691
|
+
rb_funcall(rb_mBootsnap, instrumentation_method, 2, cache_fd == CACHE_MISS ? sym_miss : sym_stale, path_v);
|
692
|
+
}
|
649
693
|
} else if (cache_fd < 0) {
|
650
694
|
goto fail_errno;
|
651
695
|
} else {
|
652
696
|
/* True if the cache existed and no invalidating changes have occurred since
|
653
697
|
* it was generated. */
|
654
698
|
valid_cache = cache_key_equal(¤t_key, &cached_key);
|
699
|
+
if (RB_UNLIKELY(instrumentation_enabled)) {
|
700
|
+
if (!valid_cache) {
|
701
|
+
rb_funcall(rb_mBootsnap, instrumentation_method, 2, sym_stale, path_v);
|
702
|
+
}
|
703
|
+
}
|
655
704
|
}
|
656
705
|
|
657
706
|
if (valid_cache) {
|
658
707
|
/* Fetch the cache data and return it if we're able to load it successfully */
|
659
708
|
res = fetch_cached_data(
|
660
|
-
cache_fd, (ssize_t)cached_key.data_size, handler,
|
709
|
+
cache_fd, (ssize_t)cached_key.data_size, handler, args,
|
661
710
|
&output_data, &exception_tag, &errno_provenance
|
662
711
|
);
|
663
|
-
if (exception_tag != 0)
|
664
|
-
else if (res ==
|
665
|
-
|
666
|
-
|
712
|
+
if (exception_tag != 0) goto raise;
|
713
|
+
else if (res == CACHE_UNCOMPILABLE) {
|
714
|
+
/* If fetch_cached_data returned `Uncompilable` we fallback to `input_to_output`
|
715
|
+
This happens if we have say, an unsafe YAML cache, but try to load it in safe mode */
|
716
|
+
if (bs_read_contents(current_fd, current_key.size, &contents, &errno_provenance) < 0) goto fail_errno;
|
717
|
+
input_data = rb_str_new(contents, current_key.size);
|
718
|
+
bs_input_to_output(handler, args, input_data, &output_data, &exception_tag);
|
719
|
+
if (exception_tag != 0) goto raise;
|
720
|
+
goto succeed;
|
721
|
+
} else if (res == CACHE_MISS || res == CACHE_STALE) valid_cache = 0;
|
722
|
+
else if (res == ERROR_WITH_ERRNO) goto fail_errno;
|
723
|
+
else if (!NIL_P(output_data)) goto succeed; /* fast-path, goal */
|
667
724
|
}
|
668
725
|
close(cache_fd);
|
669
726
|
cache_fd = -1;
|
@@ -671,37 +728,47 @@ bs_fetch(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
|
671
728
|
|
672
729
|
/* Read the contents of the source file into a buffer */
|
673
730
|
if (bs_read_contents(current_fd, current_key.size, &contents, &errno_provenance) < 0) goto fail_errno;
|
674
|
-
input_data =
|
731
|
+
input_data = rb_str_new(contents, current_key.size);
|
675
732
|
|
676
733
|
/* Try to compile the input_data using input_to_storage(input_data) */
|
677
|
-
exception_tag = bs_input_to_storage(handler, input_data, path_v, &storage_data);
|
734
|
+
exception_tag = bs_input_to_storage(handler, args, input_data, path_v, &storage_data);
|
678
735
|
if (exception_tag != 0) goto raise;
|
679
736
|
/* If input_to_storage raised Bootsnap::CompileCache::Uncompilable, don't try
|
680
737
|
* to cache anything; just return input_to_output(input_data) */
|
681
|
-
if (storage_data ==
|
682
|
-
bs_input_to_output(handler, input_data, &output_data, &exception_tag);
|
738
|
+
if (storage_data == rb_cBootsnap_CompileCache_UNCOMPILABLE) {
|
739
|
+
bs_input_to_output(handler, args, input_data, &output_data, &exception_tag);
|
683
740
|
if (exception_tag != 0) goto raise;
|
684
741
|
goto succeed;
|
685
742
|
}
|
686
743
|
/* If storage_data isn't a string, we can't cache it */
|
687
744
|
if (!RB_TYPE_P(storage_data, T_STRING)) goto invalid_type_storage_data;
|
688
745
|
|
689
|
-
/*
|
690
|
-
|
691
|
-
|
746
|
+
/* Attempt to write the cache key and storage_data to the cache directory.
|
747
|
+
* We do however ignore any failures to persist the cache, as it's better
|
748
|
+
* to move along, than to interrupt the process.
|
749
|
+
*/
|
750
|
+
atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
692
751
|
|
693
752
|
/* Having written the cache, now convert storage_data to output_data */
|
694
|
-
exception_tag = bs_storage_to_output(handler, storage_data, &output_data);
|
753
|
+
exception_tag = bs_storage_to_output(handler, args, storage_data, &output_data);
|
695
754
|
if (exception_tag != 0) goto raise;
|
696
755
|
|
697
|
-
|
698
|
-
|
699
|
-
|
756
|
+
if (output_data == rb_cBootsnap_CompileCache_UNCOMPILABLE) {
|
757
|
+
/* If storage_to_output returned `Uncompilable` we fallback to `input_to_output` */
|
758
|
+
bs_input_to_output(handler, args, input_data, &output_data, &exception_tag);
|
759
|
+
if (exception_tag != 0) goto raise;
|
760
|
+
} else if (NIL_P(output_data)) {
|
761
|
+
/* If output_data is nil, delete the cache entry and generate the output
|
762
|
+
* using input_to_output */
|
700
763
|
if (unlink(cache_path) < 0) {
|
701
|
-
|
702
|
-
|
764
|
+
/* If the cache was already deleted, it might be that another process did it before us.
|
765
|
+
* No point raising an error */
|
766
|
+
if (errno != ENOENT) {
|
767
|
+
errno_provenance = "bs_fetch:unlink";
|
768
|
+
goto fail_errno;
|
769
|
+
}
|
703
770
|
}
|
704
|
-
bs_input_to_output(handler, input_data, &output_data, &exception_tag);
|
771
|
+
bs_input_to_output(handler, args, input_data, &output_data, &exception_tag);
|
705
772
|
if (exception_tag != 0) goto raise;
|
706
773
|
}
|
707
774
|
|
@@ -732,6 +799,79 @@ invalid_type_storage_data:
|
|
732
799
|
#undef CLEANUP
|
733
800
|
}
|
734
801
|
|
802
|
+
static VALUE
|
803
|
+
bs_precompile(char * path, VALUE path_v, char * cache_path, VALUE handler)
|
804
|
+
{
|
805
|
+
struct bs_cache_key cached_key, current_key;
|
806
|
+
char * contents = NULL;
|
807
|
+
int cache_fd = -1, current_fd = -1;
|
808
|
+
int res, valid_cache = 0, exception_tag = 0;
|
809
|
+
const char * errno_provenance = NULL;
|
810
|
+
|
811
|
+
VALUE input_data; /* data read from source file, e.g. YAML or ruby source */
|
812
|
+
VALUE storage_data; /* compiled data, e.g. msgpack / binary iseq */
|
813
|
+
|
814
|
+
/* Open the source file and generate a cache key for it */
|
815
|
+
current_fd = open_current_file(path, ¤t_key, &errno_provenance);
|
816
|
+
if (current_fd < 0) goto fail;
|
817
|
+
|
818
|
+
/* Open the cache key if it exists, and read its cache key in */
|
819
|
+
cache_fd = open_cache_file(cache_path, &cached_key, &errno_provenance);
|
820
|
+
if (cache_fd == CACHE_MISS || cache_fd == CACHE_STALE) {
|
821
|
+
/* This is ok: valid_cache remains false, we re-populate it. */
|
822
|
+
} else if (cache_fd < 0) {
|
823
|
+
goto fail;
|
824
|
+
} else {
|
825
|
+
/* True if the cache existed and no invalidating changes have occurred since
|
826
|
+
* it was generated. */
|
827
|
+
valid_cache = cache_key_equal(¤t_key, &cached_key);
|
828
|
+
}
|
829
|
+
|
830
|
+
if (valid_cache) {
|
831
|
+
goto succeed;
|
832
|
+
}
|
833
|
+
|
834
|
+
close(cache_fd);
|
835
|
+
cache_fd = -1;
|
836
|
+
/* Cache is stale, invalid, or missing. Regenerate and write it out. */
|
837
|
+
|
838
|
+
/* Read the contents of the source file into a buffer */
|
839
|
+
if (bs_read_contents(current_fd, current_key.size, &contents, &errno_provenance) < 0) goto fail;
|
840
|
+
input_data = rb_str_new(contents, current_key.size);
|
841
|
+
|
842
|
+
/* Try to compile the input_data using input_to_storage(input_data) */
|
843
|
+
exception_tag = bs_input_to_storage(handler, Qnil, input_data, path_v, &storage_data);
|
844
|
+
if (exception_tag != 0) goto fail;
|
845
|
+
|
846
|
+
/* If input_to_storage raised Bootsnap::CompileCache::Uncompilable, don't try
|
847
|
+
* to cache anything; just return false */
|
848
|
+
if (storage_data == rb_cBootsnap_CompileCache_UNCOMPILABLE) {
|
849
|
+
goto fail;
|
850
|
+
}
|
851
|
+
/* If storage_data isn't a string, we can't cache it */
|
852
|
+
if (!RB_TYPE_P(storage_data, T_STRING)) goto fail;
|
853
|
+
|
854
|
+
/* Write the cache key and storage_data to the cache directory */
|
855
|
+
res = atomic_write_cache_file(cache_path, ¤t_key, storage_data, &errno_provenance);
|
856
|
+
if (res < 0) goto fail;
|
857
|
+
|
858
|
+
goto succeed;
|
859
|
+
|
860
|
+
#define CLEANUP \
|
861
|
+
if (contents != NULL) xfree(contents); \
|
862
|
+
if (current_fd >= 0) close(current_fd); \
|
863
|
+
if (cache_fd >= 0) close(cache_fd);
|
864
|
+
|
865
|
+
succeed:
|
866
|
+
CLEANUP;
|
867
|
+
return Qtrue;
|
868
|
+
fail:
|
869
|
+
CLEANUP;
|
870
|
+
return Qfalse;
|
871
|
+
#undef CLEANUP
|
872
|
+
}
|
873
|
+
|
874
|
+
|
735
875
|
/*****************************************************************************/
|
736
876
|
/********************* Handler Wrappers **************************************/
|
737
877
|
/*****************************************************************************
|
@@ -751,11 +891,13 @@ invalid_type_storage_data:
|
|
751
891
|
|
752
892
|
struct s2o_data {
|
753
893
|
VALUE handler;
|
894
|
+
VALUE args;
|
754
895
|
VALUE storage_data;
|
755
896
|
};
|
756
897
|
|
757
898
|
struct i2o_data {
|
758
899
|
VALUE handler;
|
900
|
+
VALUE args;
|
759
901
|
VALUE input_data;
|
760
902
|
};
|
761
903
|
|
@@ -766,29 +908,31 @@ struct i2s_data {
|
|
766
908
|
};
|
767
909
|
|
768
910
|
static VALUE
|
769
|
-
|
911
|
+
try_storage_to_output(VALUE arg)
|
770
912
|
{
|
771
913
|
struct s2o_data * data = (struct s2o_data *)arg;
|
772
|
-
return rb_funcall(data->handler, rb_intern("storage_to_output"),
|
914
|
+
return rb_funcall(data->handler, rb_intern("storage_to_output"), 2, data->storage_data, data->args);
|
773
915
|
}
|
774
916
|
|
775
917
|
static int
|
776
|
-
bs_storage_to_output(VALUE handler, VALUE storage_data, VALUE * output_data)
|
918
|
+
bs_storage_to_output(VALUE handler, VALUE args, VALUE storage_data, VALUE * output_data)
|
777
919
|
{
|
778
920
|
int state;
|
779
921
|
struct s2o_data s2o_data = {
|
780
922
|
.handler = handler,
|
923
|
+
.args = args,
|
781
924
|
.storage_data = storage_data,
|
782
925
|
};
|
783
|
-
*output_data = rb_protect(
|
926
|
+
*output_data = rb_protect(try_storage_to_output, (VALUE)&s2o_data, &state);
|
784
927
|
return state;
|
785
928
|
}
|
786
929
|
|
787
930
|
static void
|
788
|
-
bs_input_to_output(VALUE handler, VALUE input_data, VALUE * output_data, int * exception_tag)
|
931
|
+
bs_input_to_output(VALUE handler, VALUE args, VALUE input_data, VALUE * output_data, int * exception_tag)
|
789
932
|
{
|
790
933
|
struct i2o_data i2o_data = {
|
791
934
|
.handler = handler,
|
935
|
+
.args = args,
|
792
936
|
.input_data = input_data,
|
793
937
|
};
|
794
938
|
*output_data = rb_protect(prot_input_to_output, (VALUE)&i2o_data, exception_tag);
|
@@ -798,7 +942,7 @@ static VALUE
|
|
798
942
|
prot_input_to_output(VALUE arg)
|
799
943
|
{
|
800
944
|
struct i2o_data * data = (struct i2o_data *)arg;
|
801
|
-
return rb_funcall(data->handler, rb_intern("input_to_output"),
|
945
|
+
return rb_funcall(data->handler, rb_intern("input_to_output"), 2, data->input_data, data->args);
|
802
946
|
}
|
803
947
|
|
804
948
|
static VALUE
|
@@ -808,31 +952,20 @@ try_input_to_storage(VALUE arg)
|
|
808
952
|
return rb_funcall(data->handler, rb_intern("input_to_storage"), 2, data->input_data, data->pathval);
|
809
953
|
}
|
810
954
|
|
811
|
-
static VALUE
|
812
|
-
rescue_input_to_storage(VALUE arg, VALUE e)
|
813
|
-
{
|
814
|
-
return uncompilable;
|
815
|
-
}
|
816
|
-
|
817
|
-
static VALUE
|
818
|
-
prot_input_to_storage(VALUE arg)
|
819
|
-
{
|
820
|
-
struct i2s_data * data = (struct i2s_data *)arg;
|
821
|
-
return rb_rescue2(
|
822
|
-
try_input_to_storage, (VALUE)data,
|
823
|
-
rescue_input_to_storage, Qnil,
|
824
|
-
rb_eBootsnap_CompileCache_Uncompilable, 0);
|
825
|
-
}
|
826
|
-
|
827
955
|
static int
|
828
|
-
bs_input_to_storage(VALUE handler, VALUE input_data, VALUE pathval, VALUE * storage_data)
|
956
|
+
bs_input_to_storage(VALUE handler, VALUE args, VALUE input_data, VALUE pathval, VALUE * storage_data)
|
829
957
|
{
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
958
|
+
if (readonly) {
|
959
|
+
*storage_data = rb_cBootsnap_CompileCache_UNCOMPILABLE;
|
960
|
+
return 0;
|
961
|
+
} else {
|
962
|
+
int state;
|
963
|
+
struct i2s_data i2s_data = {
|
964
|
+
.handler = handler,
|
965
|
+
.input_data = input_data,
|
966
|
+
.pathval = pathval,
|
967
|
+
};
|
968
|
+
*storage_data = rb_protect(try_input_to_storage, (VALUE)&i2s_data, &state);
|
969
|
+
return state;
|
970
|
+
}
|
838
971
|
}
|