mini_racer 0.17.0.pre5 → 0.17.0.pre6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG +4 -0
- data/ext/mini_racer_extension/extconf.rb +2 -0
- data/ext/mini_racer_extension/mini_racer_extension.c +1564 -0
- data/ext/mini_racer_extension/mini_racer_v8.cc +840 -0
- data/ext/mini_racer_extension/mini_racer_v8.h +56 -0
- data/ext/mini_racer_extension/serde.c +747 -0
- data/lib/mini_racer/truffleruby.rb +31 -4
- data/lib/mini_racer/version.rb +1 -1
- data/lib/mini_racer.rb +14 -387
- metadata +10 -7
- data/ext/mini_racer_extension/mini_racer_extension.cc +0 -1942
@@ -0,0 +1,1564 @@
|
|
1
|
+
#include <stdatomic.h>
|
2
|
+
#include <stdio.h>
|
3
|
+
#include <stdlib.h>
|
4
|
+
#include <string.h>
|
5
|
+
#include <pthread.h>
|
6
|
+
|
7
|
+
#include "ruby.h"
|
8
|
+
#include "ruby/encoding.h"
|
9
|
+
#include "ruby/version.h"
|
10
|
+
#include "ruby/thread.h"
|
11
|
+
#include "serde.c"
|
12
|
+
#include "mini_racer_v8.h"
|
13
|
+
|
14
|
+
#if RUBY_API_VERSION_CODE < 3*10000+4*100 // 3.4.0
|
15
|
+
static inline void rb_thread_lock_native_thread(void)
|
16
|
+
{
|
17
|
+
// Without rb_thread_lock_native_thread, V8 in single-threaded mode is
|
18
|
+
// prone to crash with debug checks like this...
|
19
|
+
//
|
20
|
+
// # Fatal error in ../deps/v8/src/base/platform/platform-posix.cc, line 1350
|
21
|
+
// # Debug check failed: MainThreadIsCurrentThread().
|
22
|
+
//
|
23
|
+
// ...because the Ruby runtime clobbers thread-local variables when it
|
24
|
+
// context-switches threads. You have been warned.
|
25
|
+
}
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#define countof(x) (sizeof(x) / sizeof(*(x)))
|
29
|
+
#define endof(x) ((x) + countof(x))
|
30
|
+
|
31
|
+
// mostly RO: assigned once by platform_set_flag1 while holding |flags_mtx|,
|
32
|
+
// from then on read-only and accessible without holding locks
|
33
|
+
int single_threaded;
|
34
|
+
|
35
|
+
// work around missing pthread_barrier_t on macOS
|
36
|
+
typedef struct Barrier
|
37
|
+
{
|
38
|
+
pthread_mutex_t mtx;
|
39
|
+
pthread_cond_t cv;
|
40
|
+
int count, in, out;
|
41
|
+
} Barrier;
|
42
|
+
|
43
|
+
static inline int barrier_init(Barrier *b, int count)
|
44
|
+
{
|
45
|
+
int r;
|
46
|
+
|
47
|
+
if ((r = pthread_mutex_init(&b->mtx, NULL)))
|
48
|
+
return r;
|
49
|
+
if ((r = pthread_cond_init(&b->cv, NULL))) {
|
50
|
+
pthread_mutex_destroy(&b->mtx);
|
51
|
+
return r;
|
52
|
+
}
|
53
|
+
b->count = count;
|
54
|
+
b->out = 0;
|
55
|
+
b->in = 0;
|
56
|
+
return 0;
|
57
|
+
}
|
58
|
+
|
59
|
+
static inline void barrier_destroy(Barrier *b)
|
60
|
+
{
|
61
|
+
pthread_mutex_destroy(&b->mtx);
|
62
|
+
pthread_cond_destroy(&b->cv);
|
63
|
+
}
|
64
|
+
|
65
|
+
static inline int barrier_wait(Barrier *b)
|
66
|
+
{
|
67
|
+
int last;
|
68
|
+
|
69
|
+
pthread_mutex_lock(&b->mtx);
|
70
|
+
while (b->out)
|
71
|
+
pthread_cond_wait(&b->cv, &b->mtx);
|
72
|
+
if (++b->in == b->count) {
|
73
|
+
b->in = 0;
|
74
|
+
b->out = b->count;
|
75
|
+
pthread_cond_broadcast(&b->cv);
|
76
|
+
} else {
|
77
|
+
do
|
78
|
+
pthread_cond_wait(&b->cv, &b->mtx);
|
79
|
+
while (b->in);
|
80
|
+
}
|
81
|
+
last = (--b->out == 0);
|
82
|
+
if (last)
|
83
|
+
pthread_cond_broadcast(&b->cv);
|
84
|
+
pthread_mutex_unlock(&b->mtx);
|
85
|
+
return last;
|
86
|
+
}
|
87
|
+
|
88
|
+
typedef struct Context
|
89
|
+
{
|
90
|
+
int depth; // call depth, protected by |rr_mtx|
|
91
|
+
// protected by |mtx|; RW for ruby threads, RO for v8 thread;
|
92
|
+
// atomic because context_stop (which can be called from other ruby
|
93
|
+
// threads) writes it without holding |mtx|, to avoid deadlocking
|
94
|
+
// 1=shut down v8, 2=free memory; note that only the v8 thread
|
95
|
+
// frees the memory and it intentionally stays around until
|
96
|
+
// the ruby object is gc'd, otherwise lifecycle management
|
97
|
+
// gets too complicated
|
98
|
+
atomic_int quit;
|
99
|
+
int verbose_exceptions;
|
100
|
+
int64_t idle_gc, max_memory, timeout;
|
101
|
+
struct State *pst; // used by v8 thread
|
102
|
+
VALUE procs; // array of js -> ruby callbacks
|
103
|
+
VALUE exception; // pending exception or Qnil
|
104
|
+
Buf req, res; // ruby->v8 request/response, mediated by |mtx| and |cv|
|
105
|
+
Buf snapshot;
|
106
|
+
// |rr_mtx| stands for "recursive ruby mutex"; it's used to exclude
|
107
|
+
// other ruby threads but allow reentrancy from the same ruby thread
|
108
|
+
// (think ruby->js->ruby->js calls)
|
109
|
+
pthread_mutex_t rr_mtx;
|
110
|
+
pthread_mutex_t mtx;
|
111
|
+
pthread_cond_t cv;
|
112
|
+
struct {
|
113
|
+
pthread_mutex_t mtx;
|
114
|
+
pthread_cond_t cv;
|
115
|
+
int cancel;
|
116
|
+
} wd; // watchdog
|
117
|
+
Barrier early_init, late_init;
|
118
|
+
} Context;
|
119
|
+
|
120
|
+
typedef struct Snapshot {
|
121
|
+
VALUE blob;
|
122
|
+
} Snapshot;
|
123
|
+
|
124
|
+
static void context_destroy(Context *c);
|
125
|
+
static void context_free(void *arg);
|
126
|
+
static void context_mark(void *arg);
|
127
|
+
static size_t context_size(const void *arg);
|
128
|
+
|
129
|
+
static const rb_data_type_t context_type = {
|
130
|
+
.wrap_struct_name = "mini_racer/context",
|
131
|
+
.function = {
|
132
|
+
.dfree = context_free,
|
133
|
+
.dmark = context_mark,
|
134
|
+
.dsize = context_size,
|
135
|
+
},
|
136
|
+
};
|
137
|
+
|
138
|
+
static void snapshot_free(void *arg);
|
139
|
+
static void snapshot_mark(void *arg);
|
140
|
+
static size_t snapshot_size(const void *arg);
|
141
|
+
|
142
|
+
static const rb_data_type_t snapshot_type = {
|
143
|
+
.wrap_struct_name = "mini_racer/snapshot",
|
144
|
+
.function = {
|
145
|
+
.dfree = snapshot_free,
|
146
|
+
.dmark = snapshot_mark,
|
147
|
+
.dsize = snapshot_size,
|
148
|
+
},
|
149
|
+
};
|
150
|
+
|
151
|
+
static VALUE platform_init_error;
|
152
|
+
static VALUE context_disposed_error;
|
153
|
+
static VALUE parse_error;
|
154
|
+
static VALUE memory_error;
|
155
|
+
static VALUE runtime_error;
|
156
|
+
static VALUE internal_error;
|
157
|
+
static VALUE snapshot_error;
|
158
|
+
static VALUE terminated_error;
|
159
|
+
static VALUE context_class;
|
160
|
+
static VALUE snapshot_class;
|
161
|
+
static VALUE date_time_class;
|
162
|
+
static VALUE js_function_class;
|
163
|
+
|
164
|
+
static pthread_mutex_t flags_mtx = PTHREAD_MUTEX_INITIALIZER;
|
165
|
+
static Buf flags; // protected by |flags_mtx|
|
166
|
+
|
167
|
+
struct rendezvous_nogvl
|
168
|
+
{
|
169
|
+
Context *context;
|
170
|
+
Buf *req, *res;
|
171
|
+
};
|
172
|
+
|
173
|
+
// arg == &(struct rendezvous_nogvl){...}
|
174
|
+
static void *rendezvous_callback(void *arg);
|
175
|
+
|
176
|
+
// note: must be stack-allocated or VALUEs won't be visible to ruby's GC
|
177
|
+
typedef struct State
|
178
|
+
{
|
179
|
+
VALUE a, b;
|
180
|
+
} State;
|
181
|
+
|
182
|
+
// note: must be stack-allocated or VALUEs won't be visible to ruby's GC
|
183
|
+
typedef struct DesCtx
|
184
|
+
{
|
185
|
+
State *tos;
|
186
|
+
VALUE refs; // array
|
187
|
+
char err[64];
|
188
|
+
State stack[512];
|
189
|
+
} DesCtx;
|
190
|
+
|
191
|
+
static void DesCtx_init(DesCtx *c)
|
192
|
+
{
|
193
|
+
c->tos = c->stack;
|
194
|
+
c->refs = rb_ary_new();
|
195
|
+
*c->tos = (State){Qundef, Qundef};
|
196
|
+
*c->err = '\0';
|
197
|
+
}
|
198
|
+
|
199
|
+
static void put(DesCtx *c, VALUE v)
|
200
|
+
{
|
201
|
+
VALUE *a, *b;
|
202
|
+
|
203
|
+
if (*c->err)
|
204
|
+
return;
|
205
|
+
a = &c->tos->a;
|
206
|
+
b = &c->tos->b;
|
207
|
+
switch (TYPE(*a)) {
|
208
|
+
case T_ARRAY:
|
209
|
+
rb_ary_push(*a, v);
|
210
|
+
break;
|
211
|
+
case T_HASH:
|
212
|
+
if (*b == Qundef) {
|
213
|
+
*b = v;
|
214
|
+
} else {
|
215
|
+
*b = rb_funcall(*b, rb_intern("to_s"), 0);
|
216
|
+
rb_hash_aset(*a, *b, v);
|
217
|
+
*b = Qundef;
|
218
|
+
}
|
219
|
+
break;
|
220
|
+
case T_UNDEF:
|
221
|
+
*a = v;
|
222
|
+
break;
|
223
|
+
default:
|
224
|
+
snprintf(c->err, sizeof(c->err), "bad state");
|
225
|
+
return;
|
226
|
+
}
|
227
|
+
}
|
228
|
+
|
229
|
+
static void push(DesCtx *c, VALUE v)
|
230
|
+
{
|
231
|
+
if (*c->err)
|
232
|
+
return;
|
233
|
+
if (c->tos == endof(c->stack)) {
|
234
|
+
snprintf(c->err, sizeof(c->err), "stack overflow");
|
235
|
+
return;
|
236
|
+
}
|
237
|
+
*++c->tos = (State){v, Qundef};
|
238
|
+
rb_ary_push(c->refs, v);
|
239
|
+
}
|
240
|
+
|
241
|
+
// see also des_named_props_end
|
242
|
+
static void pop(DesCtx *c)
|
243
|
+
{
|
244
|
+
if (*c->err)
|
245
|
+
return;
|
246
|
+
if (c->tos == c->stack) {
|
247
|
+
snprintf(c->err, sizeof(c->err), "stack underflow");
|
248
|
+
return;
|
249
|
+
}
|
250
|
+
put(c, (*c->tos--).a);
|
251
|
+
}
|
252
|
+
|
253
|
+
static void des_null(void *arg)
|
254
|
+
{
|
255
|
+
put(arg, Qnil);
|
256
|
+
}
|
257
|
+
|
258
|
+
static void des_undefined(void *arg)
|
259
|
+
{
|
260
|
+
put(arg, Qnil);
|
261
|
+
}
|
262
|
+
|
263
|
+
static void des_bool(void *arg, int v)
|
264
|
+
{
|
265
|
+
put(arg, v ? Qtrue : Qfalse);
|
266
|
+
}
|
267
|
+
|
268
|
+
static void des_int(void *arg, int64_t v)
|
269
|
+
{
|
270
|
+
put(arg, LONG2FIX(v));
|
271
|
+
}
|
272
|
+
|
273
|
+
static void des_num(void *arg, double v)
|
274
|
+
{
|
275
|
+
put(arg, DBL2NUM(v));
|
276
|
+
}
|
277
|
+
|
278
|
+
static void des_date(void *arg, double v)
|
279
|
+
{
|
280
|
+
double sec, usec;
|
281
|
+
|
282
|
+
if (!isfinite(v))
|
283
|
+
rb_raise(rb_eRangeError, "invalid Date");
|
284
|
+
sec = v/1e3;
|
285
|
+
usec = 1e3 * fmod(v, 1e3);
|
286
|
+
put(arg, rb_time_new(sec, usec));
|
287
|
+
}
|
288
|
+
|
289
|
+
// note: v8 stores bigints in 1's complement, ruby in 2's complement,
|
290
|
+
// so we have to take additional steps to ensure correct conversion
|
291
|
+
static void des_bigint(void *arg, const void *p, size_t n, int sign)
|
292
|
+
{
|
293
|
+
VALUE v;
|
294
|
+
size_t i;
|
295
|
+
DesCtx *c;
|
296
|
+
unsigned long *a, t, limbs[65]; // +1 to suppress sign extension
|
297
|
+
|
298
|
+
c = arg;
|
299
|
+
if (*c->err)
|
300
|
+
return;
|
301
|
+
if (n > sizeof(limbs) - sizeof(*limbs)) {
|
302
|
+
snprintf(c->err, sizeof(c->err), "bigint too big");
|
303
|
+
return;
|
304
|
+
}
|
305
|
+
a = limbs;
|
306
|
+
t = 0;
|
307
|
+
for (i = 0; i < n; a++, i += sizeof(*a)) {
|
308
|
+
memcpy(a, (char *)p + i, sizeof(*a));
|
309
|
+
t = *a;
|
310
|
+
}
|
311
|
+
if (t >> 63)
|
312
|
+
*a++ = 0; // suppress sign extension
|
313
|
+
v = rb_big_unpack(limbs, a-limbs);
|
314
|
+
if (sign < 0)
|
315
|
+
v = rb_big_mul(v, LONG2FIX(-1));
|
316
|
+
put(c, v);
|
317
|
+
}
|
318
|
+
|
319
|
+
static void des_string(void *arg, const char *s, size_t n)
|
320
|
+
{
|
321
|
+
put(arg, rb_utf8_str_new(s, n));
|
322
|
+
}
|
323
|
+
|
324
|
+
static void des_string8(void *arg, const uint8_t *s, size_t n)
|
325
|
+
{
|
326
|
+
put(arg, rb_enc_str_new((char *)s, n, rb_ascii8bit_encoding()));
|
327
|
+
}
|
328
|
+
|
329
|
+
// des_string16: |s| is not word aligned
|
330
|
+
// des_string16: |n| is in bytes, not code points
|
331
|
+
static void des_string16(void *arg, const void *s, size_t n)
|
332
|
+
{
|
333
|
+
rb_encoding *e;
|
334
|
+
DesCtx *c;
|
335
|
+
|
336
|
+
c = arg;
|
337
|
+
if (*c->err)
|
338
|
+
return;
|
339
|
+
// TODO(bnoordhuis) replace this hack with something more principled
|
340
|
+
if (n == sizeof(js_function_marker) && !memcmp(js_function_marker, s, n))
|
341
|
+
return put(c, rb_funcall(js_function_class, rb_intern("new"), 0));
|
342
|
+
e = rb_enc_find("UTF-16LE"); // TODO cache?
|
343
|
+
if (!e) {
|
344
|
+
snprintf(c->err, sizeof(c->err), "no UTF16-LE encoding");
|
345
|
+
return;
|
346
|
+
}
|
347
|
+
put(c, rb_enc_str_new((char *)s, n, e));
|
348
|
+
}
|
349
|
+
|
350
|
+
// ruby doesn't really have a concept of a byte array so store it as
|
351
|
+
// an 8-bit string instead; it's either that or a regular array of
|
352
|
+
// numbers, but the latter is markedly less efficient, storage-wise
|
353
|
+
static void des_arraybuffer(void *arg, const void *s, size_t n)
|
354
|
+
{
|
355
|
+
put(arg, rb_enc_str_new((char *)s, n, rb_ascii8bit_encoding()));
|
356
|
+
}
|
357
|
+
|
358
|
+
static void des_array_begin(void *arg)
|
359
|
+
{
|
360
|
+
push(arg, rb_ary_new());
|
361
|
+
}
|
362
|
+
|
363
|
+
static void des_array_end(void *arg)
|
364
|
+
{
|
365
|
+
pop(arg);
|
366
|
+
}
|
367
|
+
|
368
|
+
static void des_named_props_begin(void *arg)
|
369
|
+
{
|
370
|
+
push(arg, rb_hash_new());
|
371
|
+
}
|
372
|
+
|
373
|
+
// see also pop
|
374
|
+
static void des_named_props_end(void *arg)
|
375
|
+
{
|
376
|
+
DesCtx *c;
|
377
|
+
|
378
|
+
c = arg;
|
379
|
+
if (*c->err)
|
380
|
+
return;
|
381
|
+
if (c->tos == c->stack) {
|
382
|
+
snprintf(c->err, sizeof(c->err), "stack underflow");
|
383
|
+
return;
|
384
|
+
}
|
385
|
+
c->tos--; // dropped, no way to represent in ruby
|
386
|
+
}
|
387
|
+
|
388
|
+
static void des_object_begin(void *arg)
|
389
|
+
{
|
390
|
+
push(arg, rb_hash_new());
|
391
|
+
}
|
392
|
+
|
393
|
+
static void des_object_end(void *arg)
|
394
|
+
{
|
395
|
+
pop(arg);
|
396
|
+
}
|
397
|
+
|
398
|
+
static void des_object_ref(void *arg, uint32_t id)
|
399
|
+
{
|
400
|
+
DesCtx *c;
|
401
|
+
VALUE v;
|
402
|
+
|
403
|
+
c = arg;
|
404
|
+
v = rb_ary_entry(c->refs, id);
|
405
|
+
put(c, v);
|
406
|
+
}
|
407
|
+
|
408
|
+
static void des_error_begin(void *arg)
|
409
|
+
{
|
410
|
+
push(arg, rb_class_new_instance(0, NULL, rb_eRuntimeError));
|
411
|
+
}
|
412
|
+
|
413
|
+
static void des_error_end(void *arg)
|
414
|
+
{
|
415
|
+
pop(arg);
|
416
|
+
}
|
417
|
+
|
418
|
+
static int collect(VALUE k, VALUE v, VALUE a)
|
419
|
+
{
|
420
|
+
rb_ary_push(a, k);
|
421
|
+
rb_ary_push(a, v);
|
422
|
+
return ST_CONTINUE;
|
423
|
+
}
|
424
|
+
|
425
|
+
static int serialize1(Ser *s, VALUE refs, VALUE v)
|
426
|
+
{
|
427
|
+
unsigned long limbs[64];
|
428
|
+
VALUE a, t, id;
|
429
|
+
size_t i, n;
|
430
|
+
int sign;
|
431
|
+
|
432
|
+
if (*s->err)
|
433
|
+
return -1;
|
434
|
+
switch (TYPE(v)) {
|
435
|
+
case T_ARRAY:
|
436
|
+
id = rb_hash_lookup(refs, v);
|
437
|
+
if (NIL_P(id)) {
|
438
|
+
n = RARRAY_LENINT(v);
|
439
|
+
i = rb_hash_size_num(refs);
|
440
|
+
rb_hash_aset(refs, v, LONG2FIX(i));
|
441
|
+
ser_array_begin(s, n);
|
442
|
+
for (i = 0; i < n; i++)
|
443
|
+
if (serialize1(s, refs, rb_ary_entry(v, i)))
|
444
|
+
return -1;
|
445
|
+
ser_array_end(s, n);
|
446
|
+
} else {
|
447
|
+
ser_object_ref(s, FIX2LONG(id));
|
448
|
+
}
|
449
|
+
break;
|
450
|
+
case T_HASH:
|
451
|
+
id = rb_hash_lookup(refs, v);
|
452
|
+
if (NIL_P(id)) {
|
453
|
+
a = rb_ary_new();
|
454
|
+
i = rb_hash_size_num(refs);
|
455
|
+
n = rb_hash_size_num(v);
|
456
|
+
rb_hash_aset(refs, v, LONG2FIX(i));
|
457
|
+
rb_hash_foreach(v, collect, a);
|
458
|
+
for (i = 0; i < 2*n; i += 2) {
|
459
|
+
t = rb_ary_entry(a, i);
|
460
|
+
switch (TYPE(t)) {
|
461
|
+
case T_FIXNUM:
|
462
|
+
case T_STRING:
|
463
|
+
case T_SYMBOL:
|
464
|
+
continue;
|
465
|
+
}
|
466
|
+
break;
|
467
|
+
}
|
468
|
+
if (i == 2*n) {
|
469
|
+
ser_object_begin(s);
|
470
|
+
for (i = 0; i < 2*n; i += 2) {
|
471
|
+
if (serialize1(s, refs, rb_ary_entry(a, i+0)))
|
472
|
+
return -1;
|
473
|
+
if (serialize1(s, refs, rb_ary_entry(a, i+1)))
|
474
|
+
return -1;
|
475
|
+
}
|
476
|
+
ser_object_end(s, n);
|
477
|
+
} else {
|
478
|
+
return bail(&s->err, "TODO serialize as Map");
|
479
|
+
}
|
480
|
+
} else {
|
481
|
+
ser_object_ref(s, FIX2LONG(id));
|
482
|
+
}
|
483
|
+
break;
|
484
|
+
case T_DATA:
|
485
|
+
if (date_time_class == CLASS_OF(v)) {
|
486
|
+
v = rb_funcall(v, rb_intern("to_time"), 0);
|
487
|
+
}
|
488
|
+
if (rb_cTime == CLASS_OF(v)) {
|
489
|
+
struct timeval tv = rb_time_timeval(v);
|
490
|
+
ser_date(s, tv.tv_sec*1e3 + tv.tv_usec/1e3);
|
491
|
+
} else {
|
492
|
+
static const char undefined_conversion[] = "Undefined Conversion";
|
493
|
+
ser_string(s, undefined_conversion, sizeof(undefined_conversion)-1);
|
494
|
+
}
|
495
|
+
break;
|
496
|
+
case T_NIL:
|
497
|
+
ser_null(s);
|
498
|
+
break;
|
499
|
+
case T_UNDEF:
|
500
|
+
ser_undefined(s);
|
501
|
+
break;
|
502
|
+
case T_TRUE:
|
503
|
+
ser_bool(s, 1);
|
504
|
+
break;
|
505
|
+
case T_FALSE:
|
506
|
+
ser_bool(s, 0);
|
507
|
+
break;
|
508
|
+
case T_BIGNUM:
|
509
|
+
// note: v8 stores bigints in 1's complement, ruby in 2's complement,
|
510
|
+
// so we have to take additional steps to ensure correct conversion
|
511
|
+
memset(limbs, 0, sizeof(limbs));
|
512
|
+
sign = rb_big_sign(v) ? 1 : -1;
|
513
|
+
if (sign < 0)
|
514
|
+
v = rb_big_mul(v, LONG2FIX(-1));
|
515
|
+
rb_big_pack(v, limbs, countof(limbs));
|
516
|
+
ser_bigint(s, limbs, countof(limbs), sign);
|
517
|
+
break;
|
518
|
+
case T_FIXNUM:
|
519
|
+
ser_int(s, FIX2LONG(v));
|
520
|
+
break;
|
521
|
+
case T_FLOAT:
|
522
|
+
ser_num(s, NUM2DBL(v));
|
523
|
+
break;
|
524
|
+
case T_SYMBOL:
|
525
|
+
v = rb_sym2str(v);
|
526
|
+
// fallthru
|
527
|
+
case T_STRING:
|
528
|
+
ser_string(s, RSTRING_PTR(v), RSTRING_LENINT(v));
|
529
|
+
break;
|
530
|
+
default:
|
531
|
+
snprintf(s->err, sizeof(s->err), "unsupported type %x", TYPE(v));
|
532
|
+
return -1;
|
533
|
+
}
|
534
|
+
return 0;
|
535
|
+
}
|
536
|
+
|
537
|
+
static struct timespec deadline_ms(int ms)
|
538
|
+
{
|
539
|
+
static const int64_t ns_per_sec = 1000*1000*1000;
|
540
|
+
struct timespec t;
|
541
|
+
|
542
|
+
#ifdef __APPLE__
|
543
|
+
clock_gettime(CLOCK_REALTIME, &t);
|
544
|
+
#else
|
545
|
+
clock_gettime(CLOCK_MONOTONIC, &t);
|
546
|
+
#endif
|
547
|
+
t.tv_sec += ms/1000;
|
548
|
+
t.tv_nsec += ms%1000 * ns_per_sec/1000;
|
549
|
+
while (t.tv_nsec >= ns_per_sec) {
|
550
|
+
t.tv_nsec -= ns_per_sec;
|
551
|
+
t.tv_sec++;
|
552
|
+
}
|
553
|
+
return t;
|
554
|
+
}
|
555
|
+
|
556
|
+
static int timespec_le(struct timespec a, struct timespec b)
|
557
|
+
{
|
558
|
+
if (a.tv_sec < b.tv_sec) return 1;
|
559
|
+
return a.tv_sec == b.tv_sec && a.tv_nsec <= b.tv_nsec;
|
560
|
+
}
|
561
|
+
|
562
|
+
static int deadline_exceeded(struct timespec deadline)
|
563
|
+
{
|
564
|
+
return timespec_le(deadline, deadline_ms(0));
|
565
|
+
}
|
566
|
+
|
567
|
+
static void *v8_watchdog(void *arg)
|
568
|
+
{
|
569
|
+
struct timespec deadline;
|
570
|
+
Context *c;
|
571
|
+
|
572
|
+
c = arg;
|
573
|
+
deadline = deadline_ms(c->timeout);
|
574
|
+
pthread_mutex_lock(&c->wd.mtx);
|
575
|
+
for (;;) {
|
576
|
+
if (c->wd.cancel)
|
577
|
+
break;
|
578
|
+
pthread_cond_timedwait(&c->wd.cv, &c->wd.mtx, &deadline);
|
579
|
+
if (c->wd.cancel)
|
580
|
+
break;
|
581
|
+
if (deadline_exceeded(deadline)) {
|
582
|
+
v8_terminate_execution(c->pst);
|
583
|
+
break;
|
584
|
+
}
|
585
|
+
}
|
586
|
+
pthread_mutex_unlock(&c->wd.mtx);
|
587
|
+
return NULL;
|
588
|
+
}
|
589
|
+
|
590
|
+
static void v8_timedwait(Context *c, const uint8_t *p, size_t n,
|
591
|
+
void (*func)(struct State *pst, const uint8_t *p, size_t n))
|
592
|
+
{
|
593
|
+
pthread_t thr;
|
594
|
+
int r;
|
595
|
+
|
596
|
+
r = -1;
|
597
|
+
if (c->timeout > 0 && (r = pthread_create(&thr, NULL, v8_watchdog, c))) {
|
598
|
+
fprintf(stderr, "mini_racer: watchdog: pthread_create: %s\n", strerror(r));
|
599
|
+
fflush(stderr);
|
600
|
+
}
|
601
|
+
func(c->pst, p, n);
|
602
|
+
if (r)
|
603
|
+
return;
|
604
|
+
pthread_mutex_lock(&c->wd.mtx);
|
605
|
+
c->wd.cancel = 1;
|
606
|
+
pthread_cond_signal(&c->wd.cv);
|
607
|
+
pthread_mutex_unlock(&c->wd.mtx);
|
608
|
+
pthread_join(thr, NULL);
|
609
|
+
c->wd.cancel = 0;
|
610
|
+
}
|
611
|
+
|
612
|
+
static void dispatch1(Context *c, const uint8_t *p, size_t n)
|
613
|
+
{
|
614
|
+
uint8_t b;
|
615
|
+
|
616
|
+
assert(n > 0);
|
617
|
+
switch (*p) {
|
618
|
+
case 'A': return v8_attach(c->pst, p+1, n-1);
|
619
|
+
case 'C': return v8_timedwait(c, p+1, n-1, v8_call);
|
620
|
+
case 'E': return v8_timedwait(c, p+1, n-1, v8_eval);
|
621
|
+
case 'H': return v8_heap_snapshot(c->pst);
|
622
|
+
case 'I': return v8_idle_notification(c->pst, p+1, n-1);
|
623
|
+
case 'P': return v8_pump_message_loop(c->pst);
|
624
|
+
case 'S': return v8_heap_stats(c->pst);
|
625
|
+
case 'T': return v8_snapshot(c->pst, p+1, n-1);
|
626
|
+
case 'W': return v8_warmup(c->pst, p+1, n-1);
|
627
|
+
case 'L':
|
628
|
+
b = 0;
|
629
|
+
v8_reply(c, &b, 1); // doesn't matter what as long as it's not empty
|
630
|
+
return v8_low_memory_notification(c->pst);
|
631
|
+
}
|
632
|
+
fprintf(stderr, "mini_racer: bad request %02x\n", *p);
|
633
|
+
fflush(stderr);
|
634
|
+
}
|
635
|
+
|
636
|
+
static void dispatch(Context *c)
|
637
|
+
{
|
638
|
+
buf_reset(&c->res);
|
639
|
+
dispatch1(c, c->req.buf, c->req.len);
|
640
|
+
buf_reset(&c->req);
|
641
|
+
}
|
642
|
+
|
643
|
+
// called by v8_isolate_and_context
|
644
|
+
void v8_thread_main(Context *c, struct State *pst)
|
645
|
+
{
|
646
|
+
struct timespec deadline;
|
647
|
+
|
648
|
+
c->pst = pst;
|
649
|
+
barrier_wait(&c->late_init);
|
650
|
+
pthread_mutex_lock(&c->mtx);
|
651
|
+
while (!c->quit) {
|
652
|
+
if (!c->req.len) {
|
653
|
+
if (c->idle_gc > 0) {
|
654
|
+
deadline = deadline_ms(c->idle_gc);
|
655
|
+
pthread_cond_timedwait(&c->cv, &c->mtx, &deadline);
|
656
|
+
if (deadline_exceeded(deadline))
|
657
|
+
v8_low_memory_notification(c->pst);
|
658
|
+
} else {
|
659
|
+
pthread_cond_wait(&c->cv, &c->mtx);
|
660
|
+
}
|
661
|
+
}
|
662
|
+
if (!c->req.len)
|
663
|
+
continue; // spurious wakeup or quit signal from other thread
|
664
|
+
dispatch(c);
|
665
|
+
pthread_cond_signal(&c->cv);
|
666
|
+
}
|
667
|
+
}
|
668
|
+
|
669
|
+
// called by v8_thread_main and from mini_racer_v8.cc,
|
670
|
+
// in all cases with Context.mtx held
|
671
|
+
void v8_dispatch(Context *c)
|
672
|
+
{
|
673
|
+
dispatch1(c, c->req.buf, c->req.len);
|
674
|
+
buf_reset(&c->req);
|
675
|
+
}
|
676
|
+
|
677
|
+
// called from mini_racer_v8.cc with Context.mtx held
|
678
|
+
// only called when inside v8_call, v8_eval, or v8_pump_message_loop
|
679
|
+
void v8_roundtrip(Context *c, const uint8_t **p, size_t *n)
|
680
|
+
{
|
681
|
+
struct rendezvous_nogvl *args;
|
682
|
+
|
683
|
+
buf_reset(&c->req);
|
684
|
+
if (single_threaded) {
|
685
|
+
assert(*c->res.buf == 'c'); // js -> ruby callback
|
686
|
+
args = &(struct rendezvous_nogvl){c, &c->req, &c->res};
|
687
|
+
rb_thread_call_with_gvl(rendezvous_callback, args);
|
688
|
+
} else {
|
689
|
+
pthread_cond_signal(&c->cv);
|
690
|
+
while (!c->req.len)
|
691
|
+
pthread_cond_wait(&c->cv, &c->mtx);
|
692
|
+
}
|
693
|
+
buf_reset(&c->res);
|
694
|
+
*p = c->req.buf;
|
695
|
+
*n = c->req.len;
|
696
|
+
}
|
697
|
+
|
698
|
+
// called from mini_racer_v8.cc with Context.mtx held
|
699
|
+
void v8_reply(Context *c, const uint8_t *p, size_t n)
|
700
|
+
{
|
701
|
+
buf_put(&c->res, p, n);
|
702
|
+
}
|
703
|
+
|
704
|
+
static void v8_once_init(void)
|
705
|
+
{
|
706
|
+
static pthread_once_t once = PTHREAD_ONCE_INIT;
|
707
|
+
pthread_once(&once, v8_global_init);
|
708
|
+
}
|
709
|
+
|
710
|
+
static void *v8_thread_start(void *arg)
|
711
|
+
{
|
712
|
+
Context *c;
|
713
|
+
|
714
|
+
c = arg;
|
715
|
+
barrier_wait(&c->early_init);
|
716
|
+
v8_once_init();
|
717
|
+
v8_thread_init(c, c->snapshot.buf, c->snapshot.len, c->max_memory, c->verbose_exceptions);
|
718
|
+
while (c->quit < 2)
|
719
|
+
pthread_cond_wait(&c->cv, &c->mtx);
|
720
|
+
context_destroy(c);
|
721
|
+
return NULL;
|
722
|
+
}
|
723
|
+
|
724
|
+
static VALUE deserialize1(const uint8_t *p, size_t n)
|
725
|
+
{
|
726
|
+
char err[64];
|
727
|
+
DesCtx d;
|
728
|
+
|
729
|
+
DesCtx_init(&d);
|
730
|
+
if (des(&err, p, n, &d))
|
731
|
+
rb_raise(runtime_error, "%s", err);
|
732
|
+
if (d.tos != d.stack) // should not happen
|
733
|
+
rb_raise(runtime_error, "parse stack not empty");
|
734
|
+
return d.tos->a;
|
735
|
+
}
|
736
|
+
|
737
|
+
static VALUE deserialize(VALUE arg)
|
738
|
+
{
|
739
|
+
Buf *b;
|
740
|
+
|
741
|
+
b = (void *)arg;
|
742
|
+
return deserialize1(b->buf, b->len);
|
743
|
+
}
|
744
|
+
|
745
|
+
// called with |rr_mtx| and GVL held; can raise exception
|
746
|
+
static VALUE rendezvous_callback_do(VALUE arg)
|
747
|
+
{
|
748
|
+
struct rendezvous_nogvl *a;
|
749
|
+
VALUE func, args;
|
750
|
+
Context *c;
|
751
|
+
Buf *b;
|
752
|
+
|
753
|
+
a = (void *)arg;
|
754
|
+
b = a->res;
|
755
|
+
c = a->context;
|
756
|
+
assert(b->len > 0);
|
757
|
+
assert(*b->buf == 'c');
|
758
|
+
args = deserialize1(b->buf+1, b->len-1); // skip 'c' marker
|
759
|
+
func = rb_ary_pop(args); // callback id
|
760
|
+
func = rb_ary_entry(c->procs, FIX2LONG(func));
|
761
|
+
return rb_funcall2(func, rb_intern("call"), RARRAY_LENINT(args), RARRAY_PTR(args));
|
762
|
+
}
|
763
|
+
|
764
|
+
// called with |rr_mtx| and GVL held; |mtx| is unlocked
|
765
|
+
// callback data is in |a->res|, serialized result goes in |a->req|
|
766
|
+
static void *rendezvous_callback(void *arg)
|
767
|
+
{
|
768
|
+
struct rendezvous_nogvl *a;
|
769
|
+
Context *c;
|
770
|
+
int exc;
|
771
|
+
VALUE r;
|
772
|
+
Ser s;
|
773
|
+
|
774
|
+
a = arg;
|
775
|
+
c = a->context;
|
776
|
+
r = rb_protect(rendezvous_callback_do, (VALUE)a, &exc);
|
777
|
+
if (exc) {
|
778
|
+
c->exception = rb_errinfo();
|
779
|
+
rb_set_errinfo(Qnil);
|
780
|
+
goto fail;
|
781
|
+
}
|
782
|
+
ser_init1(&s, 'c'); // callback reply
|
783
|
+
ser_array_begin(&s, 2);
|
784
|
+
// either [result, undefined] or [undefined, err]
|
785
|
+
if (exc)
|
786
|
+
ser_undefined(&s);
|
787
|
+
if (serialize1(&s, rb_hash_new(), r)) { // should not happen
|
788
|
+
c->exception = rb_exc_new_cstr(internal_error, s.err);
|
789
|
+
ser_reset(&s);
|
790
|
+
goto fail;
|
791
|
+
}
|
792
|
+
if (!exc)
|
793
|
+
ser_undefined(&s);
|
794
|
+
ser_array_end(&s, 2);
|
795
|
+
out:
|
796
|
+
buf_move(&s.b, a->req);
|
797
|
+
return NULL;
|
798
|
+
fail:
|
799
|
+
ser_init1(&s, 'e'); // exception pending
|
800
|
+
goto out;
|
801
|
+
}
|
802
|
+
|
803
|
+
static inline void *rendezvous_nogvl(void *arg)
|
804
|
+
{
|
805
|
+
struct rendezvous_nogvl *a;
|
806
|
+
Context *c;
|
807
|
+
|
808
|
+
a = arg;
|
809
|
+
c = a->context;
|
810
|
+
pthread_mutex_lock(&c->rr_mtx);
|
811
|
+
if (c->depth > 0 && c->depth%50 == 0) { // TODO stop steep recursion
|
812
|
+
fprintf(stderr, "mini_racer: deep js->ruby->js recursion, depth=%d\n", c->depth);
|
813
|
+
fflush(stderr);
|
814
|
+
}
|
815
|
+
c->depth++;
|
816
|
+
next:
|
817
|
+
pthread_mutex_lock(&c->mtx);
|
818
|
+
assert(c->req.len == 0);
|
819
|
+
assert(c->res.len == 0);
|
820
|
+
buf_move(a->req, &c->req); // v8 thread takes ownership of req
|
821
|
+
if (single_threaded) {
|
822
|
+
v8_single_threaded_enter(c->pst, c, dispatch);
|
823
|
+
} else {
|
824
|
+
pthread_cond_signal(&c->cv);
|
825
|
+
do pthread_cond_wait(&c->cv, &c->mtx); while (!c->res.len);
|
826
|
+
}
|
827
|
+
buf_move(&c->res, a->res);
|
828
|
+
pthread_mutex_unlock(&c->mtx);
|
829
|
+
if (*a->res->buf == 'c') { // js -> ruby callback?
|
830
|
+
rb_thread_call_with_gvl(rendezvous_callback, a);
|
831
|
+
goto next;
|
832
|
+
}
|
833
|
+
c->depth--;
|
834
|
+
pthread_mutex_unlock(&c->rr_mtx);
|
835
|
+
return NULL;
|
836
|
+
}
|
837
|
+
|
838
|
+
static void rendezvous_no_des(Context *c, Buf *req, Buf *res)
|
839
|
+
{
|
840
|
+
if (atomic_load(&c->quit)) {
|
841
|
+
buf_reset(req);
|
842
|
+
rb_raise(context_disposed_error, "disposed context");
|
843
|
+
}
|
844
|
+
rb_nogvl(rendezvous_nogvl, &(struct rendezvous_nogvl){c, req, res},
|
845
|
+
NULL, NULL, 0);
|
846
|
+
}
|
847
|
+
|
848
|
+
// send request to & receive reply from v8 thread; takes ownership of |req|
|
849
|
+
// can raise exceptions and longjmp away but won't leak |req|
|
850
|
+
static VALUE rendezvous(Context *c, Buf *req)
|
851
|
+
{
|
852
|
+
VALUE r;
|
853
|
+
Buf res;
|
854
|
+
int exc;
|
855
|
+
|
856
|
+
rendezvous_no_des(c, req, &res); // takes ownership of |req|
|
857
|
+
r = rb_protect(deserialize, (VALUE)&res, &exc);
|
858
|
+
buf_reset(&res);
|
859
|
+
if (exc) {
|
860
|
+
r = rb_errinfo();
|
861
|
+
rb_set_errinfo(Qnil);
|
862
|
+
rb_exc_raise(r);
|
863
|
+
}
|
864
|
+
if (!NIL_P(c->exception)) {
|
865
|
+
r = c->exception;
|
866
|
+
c->exception = Qnil;
|
867
|
+
rb_exc_raise(r);
|
868
|
+
}
|
869
|
+
return r;
|
870
|
+
}
|
871
|
+
|
872
|
+
static void handle_exception(VALUE e)
|
873
|
+
{
|
874
|
+
const char *s;
|
875
|
+
VALUE klass;
|
876
|
+
|
877
|
+
if (NIL_P(e))
|
878
|
+
return;
|
879
|
+
StringValue(e);
|
880
|
+
s = RSTRING_PTR(e);
|
881
|
+
switch (*s) {
|
882
|
+
case NO_ERROR:
|
883
|
+
return;
|
884
|
+
case INTERNAL_ERROR:
|
885
|
+
klass = internal_error;
|
886
|
+
break;
|
887
|
+
case MEMORY_ERROR:
|
888
|
+
klass = memory_error;
|
889
|
+
break;
|
890
|
+
case PARSE_ERROR:
|
891
|
+
klass = parse_error;
|
892
|
+
break;
|
893
|
+
case RUNTIME_ERROR:
|
894
|
+
klass = runtime_error;
|
895
|
+
break;
|
896
|
+
case TERMINATED_ERROR:
|
897
|
+
klass = terminated_error;
|
898
|
+
break;
|
899
|
+
default:
|
900
|
+
rb_raise(internal_error, "bad error class %02x", *s);
|
901
|
+
}
|
902
|
+
rb_raise(klass, "%s", s+1);
|
903
|
+
}
|
904
|
+
|
905
|
+
static VALUE context_alloc(VALUE klass)
|
906
|
+
{
|
907
|
+
pthread_mutexattr_t mattr;
|
908
|
+
pthread_condattr_t cattr;
|
909
|
+
const char *cause;
|
910
|
+
Context *c;
|
911
|
+
VALUE f, a;
|
912
|
+
int r;
|
913
|
+
|
914
|
+
// Safe to lazy init because we hold the GVL
|
915
|
+
if (NIL_P(date_time_class)) {
|
916
|
+
f = rb_intern("const_defined?");
|
917
|
+
a = rb_str_new_cstr("DateTime");
|
918
|
+
if (Qtrue == rb_funcall(rb_cObject, f, 1, a))
|
919
|
+
date_time_class = rb_const_get(rb_cObject, rb_intern("DateTime"));
|
920
|
+
}
|
921
|
+
c = ruby_xmalloc(sizeof(*c));
|
922
|
+
memset(c, 0, sizeof(*c));
|
923
|
+
c->exception = Qnil;
|
924
|
+
c->procs = rb_ary_new();
|
925
|
+
buf_init(&c->snapshot);
|
926
|
+
buf_init(&c->req);
|
927
|
+
buf_init(&c->res);
|
928
|
+
cause = "pthread_condattr_init";
|
929
|
+
if ((r = pthread_condattr_init(&cattr)))
|
930
|
+
goto fail0;
|
931
|
+
#ifndef __APPLE__
|
932
|
+
pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC);
|
933
|
+
#endif
|
934
|
+
cause = "pthread_mutexattr_init";
|
935
|
+
if ((r = pthread_mutexattr_init(&mattr)))
|
936
|
+
goto fail1;
|
937
|
+
pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
|
938
|
+
cause = "pthread_mutex_init";
|
939
|
+
r = pthread_mutex_init(&c->rr_mtx, &mattr);
|
940
|
+
pthread_mutexattr_destroy(&mattr);
|
941
|
+
if (r)
|
942
|
+
goto fail1;
|
943
|
+
if (pthread_mutex_init(&c->mtx, NULL))
|
944
|
+
goto fail2;
|
945
|
+
cause = "pthread_cond_init";
|
946
|
+
if ((r = pthread_cond_init(&c->cv, &cattr)))
|
947
|
+
goto fail3;
|
948
|
+
cause = "pthread_mutex_init";
|
949
|
+
if ((r = pthread_mutex_init(&c->wd.mtx, NULL)))
|
950
|
+
goto fail4;
|
951
|
+
cause = "pthread_cond_init";
|
952
|
+
if (pthread_cond_init(&c->wd.cv, &cattr))
|
953
|
+
goto fail5;
|
954
|
+
cause = "barrier_init";
|
955
|
+
if ((r = barrier_init(&c->early_init, 2)))
|
956
|
+
goto fail6;
|
957
|
+
cause = "barrier_init";
|
958
|
+
if ((r = barrier_init(&c->late_init, 2)))
|
959
|
+
goto fail7;
|
960
|
+
pthread_condattr_destroy(&cattr);
|
961
|
+
return TypedData_Wrap_Struct(klass, &context_type, c);
|
962
|
+
fail7:
|
963
|
+
barrier_destroy(&c->early_init);
|
964
|
+
fail6:
|
965
|
+
pthread_cond_destroy(&c->wd.cv);
|
966
|
+
fail5:
|
967
|
+
pthread_mutex_destroy(&c->wd.mtx);
|
968
|
+
fail4:
|
969
|
+
pthread_cond_destroy(&c->cv);
|
970
|
+
fail3:
|
971
|
+
pthread_mutex_destroy(&c->mtx);
|
972
|
+
fail2:
|
973
|
+
pthread_mutex_destroy(&c->rr_mtx);
|
974
|
+
fail1:
|
975
|
+
pthread_condattr_destroy(&cattr);
|
976
|
+
fail0:
|
977
|
+
ruby_xfree(c);
|
978
|
+
rb_raise(runtime_error, "%s: %s", cause, strerror(r));
|
979
|
+
return Qnil; // pacify compiler
|
980
|
+
}
|
981
|
+
|
982
|
+
static void *context_free_thread_do(void *arg)
|
983
|
+
{
|
984
|
+
Context *c;
|
985
|
+
|
986
|
+
c = arg;
|
987
|
+
v8_single_threaded_dispose(c->pst);
|
988
|
+
context_destroy(c);
|
989
|
+
return NULL;
|
990
|
+
}
|
991
|
+
|
992
|
+
static void context_free_thread(Context *c)
|
993
|
+
{
|
994
|
+
pthread_t thr;
|
995
|
+
int r;
|
996
|
+
|
997
|
+
// dispose on another thread so we don't block when trying to
|
998
|
+
// enter an isolate that's in a stuck state; that *should* be
|
999
|
+
// impossible but apparently it happened regularly before the
|
1000
|
+
// rewrite and I'm carrying it over out of an abundance of caution
|
1001
|
+
if ((r = pthread_create(&thr, NULL, context_free_thread_do, c))) {
|
1002
|
+
fprintf(stderr, "mini_racer: pthread_create: %s", strerror(r));
|
1003
|
+
fflush(stderr);
|
1004
|
+
context_free_thread_do(c);
|
1005
|
+
} else {
|
1006
|
+
pthread_detach(thr);
|
1007
|
+
}
|
1008
|
+
}
|
1009
|
+
|
1010
|
+
static void context_free(void *arg)
|
1011
|
+
{
|
1012
|
+
Context *c;
|
1013
|
+
|
1014
|
+
c = arg;
|
1015
|
+
if (single_threaded) {
|
1016
|
+
context_free_thread(c);
|
1017
|
+
} else {
|
1018
|
+
pthread_mutex_lock(&c->mtx);
|
1019
|
+
c->quit = 2; // 2 = v8 thread frees
|
1020
|
+
pthread_cond_signal(&c->cv);
|
1021
|
+
pthread_mutex_unlock(&c->mtx);
|
1022
|
+
}
|
1023
|
+
}
|
1024
|
+
|
1025
|
+
static void context_destroy(Context *c)
|
1026
|
+
{
|
1027
|
+
pthread_mutex_unlock(&c->mtx);
|
1028
|
+
pthread_mutex_destroy(&c->mtx);
|
1029
|
+
pthread_cond_destroy(&c->cv);
|
1030
|
+
barrier_destroy(&c->early_init);
|
1031
|
+
barrier_destroy(&c->late_init);
|
1032
|
+
pthread_mutex_destroy(&c->wd.mtx);
|
1033
|
+
pthread_cond_destroy(&c->wd.cv);
|
1034
|
+
buf_reset(&c->snapshot);
|
1035
|
+
buf_reset(&c->req);
|
1036
|
+
buf_reset(&c->res);
|
1037
|
+
ruby_xfree(c);
|
1038
|
+
}
|
1039
|
+
|
1040
|
+
static void context_mark(void *arg)
|
1041
|
+
{
|
1042
|
+
Context *c;
|
1043
|
+
|
1044
|
+
c = arg;
|
1045
|
+
rb_gc_mark(c->procs);
|
1046
|
+
rb_gc_mark(c->exception);
|
1047
|
+
}
|
1048
|
+
|
1049
|
+
static size_t context_size(const void *arg)
|
1050
|
+
{
|
1051
|
+
const Context *c = arg;
|
1052
|
+
return sizeof(*c);
|
1053
|
+
}
|
1054
|
+
|
1055
|
+
static VALUE context_attach(VALUE self, VALUE name, VALUE proc)
|
1056
|
+
{
|
1057
|
+
Context *c;
|
1058
|
+
VALUE e;
|
1059
|
+
Ser s;
|
1060
|
+
|
1061
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1062
|
+
// request is (A)ttach, [name, id] array
|
1063
|
+
ser_init1(&s, 'A');
|
1064
|
+
ser_array_begin(&s, 2);
|
1065
|
+
ser_string(&s, RSTRING_PTR(name), RSTRING_LENINT(name));
|
1066
|
+
ser_int(&s, RARRAY_LENINT(c->procs));
|
1067
|
+
ser_array_end(&s, 2);
|
1068
|
+
rb_ary_push(c->procs, proc);
|
1069
|
+
// response is an exception or undefined
|
1070
|
+
e = rendezvous(c, &s.b);
|
1071
|
+
handle_exception(e);
|
1072
|
+
return Qnil;
|
1073
|
+
}
|
1074
|
+
|
1075
|
+
static void *context_dispose_do(void *arg)
|
1076
|
+
{
|
1077
|
+
Context *c;
|
1078
|
+
|
1079
|
+
c = arg;
|
1080
|
+
if (single_threaded) {
|
1081
|
+
atomic_store(&c->quit, 1); // disposed
|
1082
|
+
// intentionally a no-op for now
|
1083
|
+
} else {
|
1084
|
+
pthread_mutex_lock(&c->mtx);
|
1085
|
+
while (c->req.len || c->res.len)
|
1086
|
+
pthread_cond_wait(&c->cv, &c->mtx);
|
1087
|
+
atomic_store(&c->quit, 1); // disposed
|
1088
|
+
pthread_cond_signal(&c->cv); // wake up v8 thread
|
1089
|
+
pthread_mutex_unlock(&c->mtx);
|
1090
|
+
}
|
1091
|
+
return NULL;
|
1092
|
+
}
|
1093
|
+
|
1094
|
+
static VALUE context_dispose(VALUE self)
|
1095
|
+
{
|
1096
|
+
Context *c;
|
1097
|
+
|
1098
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1099
|
+
rb_thread_call_without_gvl(context_dispose_do, c, NULL, NULL);
|
1100
|
+
return Qnil;
|
1101
|
+
}
|
1102
|
+
|
1103
|
+
static VALUE context_stop(VALUE self)
|
1104
|
+
{
|
1105
|
+
Context *c;
|
1106
|
+
|
1107
|
+
// does not grab |mtx| because Context.stop can be called from another
|
1108
|
+
// thread and then we deadlock if e.g. the V8 thread busy-loops in JS
|
1109
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1110
|
+
if (atomic_load(&c->quit))
|
1111
|
+
rb_raise(context_disposed_error, "disposed context");
|
1112
|
+
v8_terminate_execution(c->pst);
|
1113
|
+
return Qnil;
|
1114
|
+
}
|
1115
|
+
|
1116
|
+
static VALUE context_call(int argc, VALUE *argv, VALUE self)
|
1117
|
+
{
|
1118
|
+
VALUE a, e, h;
|
1119
|
+
Context *c;
|
1120
|
+
int i;
|
1121
|
+
Ser s;
|
1122
|
+
|
1123
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1124
|
+
rb_scan_args(argc, argv, "1*", &a, &e);
|
1125
|
+
Check_Type(a, T_STRING);
|
1126
|
+
// request is (C)all, [name, args...] array
|
1127
|
+
ser_init1(&s, 'C');
|
1128
|
+
ser_array_begin(&s, argc);
|
1129
|
+
h = rb_hash_new();
|
1130
|
+
for (i = 0; i < argc; i++) {
|
1131
|
+
if (serialize1(&s, h, argv[i])) {
|
1132
|
+
ser_reset(&s);
|
1133
|
+
rb_raise(runtime_error, "Context.call: %s", s.err);
|
1134
|
+
}
|
1135
|
+
}
|
1136
|
+
ser_array_end(&s, argc);
|
1137
|
+
// response is [result, err] array
|
1138
|
+
a = rendezvous(c, &s.b); // takes ownership of |s.b|
|
1139
|
+
e = rb_ary_pop(a);
|
1140
|
+
handle_exception(e);
|
1141
|
+
return rb_ary_pop(a);
|
1142
|
+
}
|
1143
|
+
|
1144
|
+
static VALUE context_eval(int argc, VALUE *argv, VALUE self)
|
1145
|
+
{
|
1146
|
+
VALUE a, e, source, filename, kwargs;
|
1147
|
+
Context *c;
|
1148
|
+
Ser s;
|
1149
|
+
|
1150
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1151
|
+
filename = Qnil;
|
1152
|
+
rb_scan_args(argc, argv, "1:", &source, &kwargs);
|
1153
|
+
Check_Type(source, T_STRING);
|
1154
|
+
if (!NIL_P(kwargs))
|
1155
|
+
filename = rb_hash_aref(kwargs, rb_id2sym(rb_intern("filename")));
|
1156
|
+
if (NIL_P(filename))
|
1157
|
+
filename = rb_str_new_cstr("<eval>");
|
1158
|
+
Check_Type(filename, T_STRING);
|
1159
|
+
// request is (E)val, [filename, source] array
|
1160
|
+
ser_init1(&s, 'E');
|
1161
|
+
ser_array_begin(&s, 2);
|
1162
|
+
ser_string(&s, RSTRING_PTR(filename), RSTRING_LENINT(filename));
|
1163
|
+
ser_string(&s, RSTRING_PTR(source), RSTRING_LENINT(source));
|
1164
|
+
ser_array_end(&s, 2);
|
1165
|
+
// response is [result, errname] array
|
1166
|
+
a = rendezvous(c, &s.b); // takes ownership of |s.b|
|
1167
|
+
e = rb_ary_pop(a);
|
1168
|
+
handle_exception(e);
|
1169
|
+
return rb_ary_pop(a);
|
1170
|
+
}
|
1171
|
+
|
1172
|
+
static VALUE context_heap_stats(VALUE self)
|
1173
|
+
{
|
1174
|
+
VALUE a, h, k, v;
|
1175
|
+
Context *c;
|
1176
|
+
int i, n;
|
1177
|
+
Buf b;
|
1178
|
+
|
1179
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1180
|
+
buf_init(&b);
|
1181
|
+
buf_putc(&b, 'S'); // (S)tats, returns object
|
1182
|
+
h = rendezvous(c, &b); // takes ownership of |b|
|
1183
|
+
a = rb_ary_new();
|
1184
|
+
rb_hash_foreach(h, collect, a);
|
1185
|
+
for (i = 0, n = RARRAY_LENINT(a); i < n; i += 2) {
|
1186
|
+
k = rb_ary_entry(a, i+0);
|
1187
|
+
v = rb_ary_entry(a, i+1);
|
1188
|
+
rb_hash_delete(h, k);
|
1189
|
+
rb_hash_aset(h, rb_str_intern(k), v); // turn "key" into :key
|
1190
|
+
}
|
1191
|
+
return h;
|
1192
|
+
}
|
1193
|
+
|
1194
|
+
static VALUE context_heap_snapshot(VALUE self)
|
1195
|
+
{
|
1196
|
+
Buf req, res;
|
1197
|
+
Context *c;
|
1198
|
+
|
1199
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1200
|
+
buf_init(&req);
|
1201
|
+
buf_putc(&req, 'H'); // (H)eap snapshot, returns plain bytes
|
1202
|
+
rendezvous_no_des(c, &req, &res); // takes ownership of |req|
|
1203
|
+
return rb_utf8_str_new((char *)res.buf, res.len);
|
1204
|
+
}
|
1205
|
+
|
1206
|
+
static VALUE context_pump_message_loop(VALUE self)
|
1207
|
+
{
|
1208
|
+
Context *c;
|
1209
|
+
Buf b;
|
1210
|
+
|
1211
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1212
|
+
buf_init(&b);
|
1213
|
+
buf_putc(&b, 'P'); // (P)ump, returns bool
|
1214
|
+
return rendezvous(c, &b); // takes ownership of |b|
|
1215
|
+
}
|
1216
|
+
|
1217
|
+
static VALUE context_idle_notification(VALUE self, VALUE arg)
|
1218
|
+
{
|
1219
|
+
Context *c;
|
1220
|
+
Ser s;
|
1221
|
+
|
1222
|
+
Check_Type(arg, T_FIXNUM);
|
1223
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1224
|
+
// request is (I)dle notification, idle_time_in_seconds
|
1225
|
+
ser_init1(&s, 'I');
|
1226
|
+
ser_num(&s, LONG2FIX(arg) / 1e3);
|
1227
|
+
// response is |undefined|
|
1228
|
+
return rendezvous(c, &s.b); // takes ownership of |s.b|
|
1229
|
+
}
|
1230
|
+
|
1231
|
+
static VALUE context_low_memory_notification(VALUE self)
|
1232
|
+
{
|
1233
|
+
Buf req, res;
|
1234
|
+
Context *c;
|
1235
|
+
|
1236
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1237
|
+
buf_init(&req);
|
1238
|
+
buf_putc(&req, 'L'); // (L)ow memory notification, returns nothing
|
1239
|
+
rendezvous_no_des(c, &req, &res); // takes ownership of |req|
|
1240
|
+
return Qnil;
|
1241
|
+
}
|
1242
|
+
|
1243
|
+
static int platform_set_flag1(VALUE k, VALUE v)
|
1244
|
+
{
|
1245
|
+
char *p, *q, buf[256];
|
1246
|
+
int ok;
|
1247
|
+
|
1248
|
+
k = rb_funcall(k, rb_intern("to_s"), 0);
|
1249
|
+
Check_Type(k, T_STRING);
|
1250
|
+
if (!NIL_P(v)) {
|
1251
|
+
v = rb_funcall(v, rb_intern("to_s"), 0);
|
1252
|
+
Check_Type(v, T_STRING);
|
1253
|
+
}
|
1254
|
+
p = RSTRING_PTR(k);
|
1255
|
+
if (!strncmp(p, "--", 2))
|
1256
|
+
p += 2;
|
1257
|
+
if (NIL_P(v)) {
|
1258
|
+
snprintf(buf, sizeof(buf), "--%s", p);
|
1259
|
+
} else {
|
1260
|
+
snprintf(buf, sizeof(buf), "--%s=%s", p, RSTRING_PTR(v));
|
1261
|
+
}
|
1262
|
+
p = buf;
|
1263
|
+
pthread_mutex_lock(&flags_mtx);
|
1264
|
+
if (!flags.buf)
|
1265
|
+
buf_init(&flags);
|
1266
|
+
ok = (*flags.buf != 1);
|
1267
|
+
if (ok) {
|
1268
|
+
buf_put(&flags, p, 1+strlen(p)); // include trailing \0
|
1269
|
+
// strip dashes and underscores to reduce the number of variant
|
1270
|
+
// spellings (--no-single-threaded, --nosingle-threaded,
|
1271
|
+
// --no_single_threaded, etc.)
|
1272
|
+
p = q = buf;
|
1273
|
+
for (;;) {
|
1274
|
+
if (*p != '-')
|
1275
|
+
if (*p != '_')
|
1276
|
+
*q++ = *p;
|
1277
|
+
if (!*p++)
|
1278
|
+
break;
|
1279
|
+
}
|
1280
|
+
if (!strcmp(buf, "singlethreaded")) {
|
1281
|
+
single_threaded = 1;
|
1282
|
+
} else if (!strcmp(buf, "nosinglethreaded")) {
|
1283
|
+
single_threaded = 0;
|
1284
|
+
}
|
1285
|
+
}
|
1286
|
+
pthread_mutex_unlock(&flags_mtx);
|
1287
|
+
return ok;
|
1288
|
+
}
|
1289
|
+
|
1290
|
+
static VALUE platform_set_flags(int argc, VALUE *argv, VALUE klass)
|
1291
|
+
{
|
1292
|
+
VALUE args, kwargs, k, v;
|
1293
|
+
int i, n;
|
1294
|
+
|
1295
|
+
(void)&klass;
|
1296
|
+
rb_scan_args(argc, argv, "*:", &args, &kwargs);
|
1297
|
+
Check_Type(args, T_ARRAY);
|
1298
|
+
for (i = 0, n = RARRAY_LENINT(args); i < n; i++) {
|
1299
|
+
k = rb_ary_entry(args, i);
|
1300
|
+
v = Qnil;
|
1301
|
+
if (!platform_set_flag1(k, v))
|
1302
|
+
goto fail;
|
1303
|
+
}
|
1304
|
+
if (NIL_P(kwargs))
|
1305
|
+
return Qnil;
|
1306
|
+
Check_Type(kwargs, T_HASH);
|
1307
|
+
args = rb_ary_new();
|
1308
|
+
rb_hash_foreach(kwargs, collect, args);
|
1309
|
+
for (i = 0, n = RARRAY_LENINT(args); i < n; i += 2) {
|
1310
|
+
k = rb_ary_entry(args, i+0);
|
1311
|
+
v = rb_ary_entry(args, i+1);
|
1312
|
+
if (!platform_set_flag1(k, v))
|
1313
|
+
goto fail;
|
1314
|
+
}
|
1315
|
+
return Qnil;
|
1316
|
+
fail:
|
1317
|
+
rb_raise(platform_init_error, "platform already initialized");
|
1318
|
+
}
|
1319
|
+
|
1320
|
+
// called by v8_global_init; caller must free |*p| with free()
|
1321
|
+
void v8_get_flags(char **p, size_t *n)
|
1322
|
+
{
|
1323
|
+
*p = NULL;
|
1324
|
+
*n = 0;
|
1325
|
+
pthread_mutex_lock(&flags_mtx);
|
1326
|
+
if (!flags.len)
|
1327
|
+
goto out;
|
1328
|
+
*p = malloc(flags.len);
|
1329
|
+
if (!*p)
|
1330
|
+
goto out;
|
1331
|
+
*n = flags.len;
|
1332
|
+
memcpy(*p, flags.buf, *n);
|
1333
|
+
buf_reset(&flags);
|
1334
|
+
out:
|
1335
|
+
buf_init(&flags);
|
1336
|
+
buf_putc(&flags, 1); // marker to indicate it's been cleared
|
1337
|
+
pthread_mutex_unlock(&flags_mtx);
|
1338
|
+
if (single_threaded)
|
1339
|
+
rb_thread_lock_native_thread();
|
1340
|
+
}
|
1341
|
+
|
1342
|
+
static VALUE context_initialize(int argc, VALUE *argv, VALUE self)
|
1343
|
+
{
|
1344
|
+
VALUE kwargs, a, k, v;
|
1345
|
+
pthread_attr_t attr;
|
1346
|
+
const char *cause;
|
1347
|
+
pthread_t thr;
|
1348
|
+
Snapshot *ss;
|
1349
|
+
Context *c;
|
1350
|
+
char *s;
|
1351
|
+
int r;
|
1352
|
+
|
1353
|
+
TypedData_Get_Struct(self, Context, &context_type, c);
|
1354
|
+
rb_scan_args(argc, argv, ":", &kwargs);
|
1355
|
+
if (NIL_P(kwargs))
|
1356
|
+
goto init;
|
1357
|
+
a = rb_ary_new();
|
1358
|
+
rb_hash_foreach(kwargs, collect, a);
|
1359
|
+
while (RARRAY_LENINT(a)) {
|
1360
|
+
v = rb_ary_pop(a);
|
1361
|
+
k = rb_ary_pop(a);
|
1362
|
+
k = rb_sym2str(k);
|
1363
|
+
s = RSTRING_PTR(k);
|
1364
|
+
if (!strcmp(s, "ensure_gc_after_idle")) {
|
1365
|
+
Check_Type(v, T_FIXNUM);
|
1366
|
+
c->idle_gc = FIX2LONG(v);
|
1367
|
+
if (c->idle_gc < 0 || c->idle_gc > INT32_MAX)
|
1368
|
+
rb_raise(rb_eArgError, "bad ensure_gc_after_idle");
|
1369
|
+
} else if (!strcmp(s, "max_memory")) {
|
1370
|
+
Check_Type(v, T_FIXNUM);
|
1371
|
+
c->max_memory = FIX2LONG(v);
|
1372
|
+
if (c->max_memory < 0 || c->max_memory >= UINT32_MAX)
|
1373
|
+
rb_raise(rb_eArgError, "bad max_memory");
|
1374
|
+
} else if (!strcmp(s, "marshal_stack_depth")) { // backcompat, ignored
|
1375
|
+
Check_Type(v, T_FIXNUM);
|
1376
|
+
} else if (!strcmp(s, "timeout")) {
|
1377
|
+
Check_Type(v, T_FIXNUM);
|
1378
|
+
c->timeout = FIX2LONG(v);
|
1379
|
+
if (c->timeout < 0 || c->timeout > INT32_MAX)
|
1380
|
+
rb_raise(rb_eArgError, "bad timeout");
|
1381
|
+
} else if (!strcmp(s, "snapshot")) {
|
1382
|
+
if (NIL_P(v))
|
1383
|
+
continue;
|
1384
|
+
TypedData_Get_Struct(v, Snapshot, &snapshot_type, ss);
|
1385
|
+
if (buf_put(&c->snapshot, RSTRING_PTR(ss->blob), RSTRING_LENINT(ss->blob)))
|
1386
|
+
rb_raise(runtime_error, "out of memory");
|
1387
|
+
} else if (!strcmp(s, "verbose_exceptions")) {
|
1388
|
+
c->verbose_exceptions = !(v == Qfalse || v == Qnil);
|
1389
|
+
} else {
|
1390
|
+
rb_raise(runtime_error, "bad keyword: %s", s);
|
1391
|
+
}
|
1392
|
+
}
|
1393
|
+
init:
|
1394
|
+
if (single_threaded) {
|
1395
|
+
v8_once_init();
|
1396
|
+
c->pst = v8_thread_init(c, c->snapshot.buf, c->snapshot.len, c->max_memory, c->verbose_exceptions);
|
1397
|
+
} else {
|
1398
|
+
cause = "pthread_attr_init";
|
1399
|
+
if ((r = pthread_attr_init(&attr)))
|
1400
|
+
goto fail;
|
1401
|
+
pthread_attr_setstacksize(&attr, 2<<20); // 2 MiB
|
1402
|
+
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
|
1403
|
+
// v8 thread takes ownership of |c|
|
1404
|
+
cause = "pthread_create";
|
1405
|
+
r = pthread_create(&thr, &attr, v8_thread_start, c);
|
1406
|
+
pthread_attr_destroy(&attr);
|
1407
|
+
if (r)
|
1408
|
+
goto fail;
|
1409
|
+
barrier_wait(&c->early_init);
|
1410
|
+
barrier_wait(&c->late_init);
|
1411
|
+
}
|
1412
|
+
return Qnil;
|
1413
|
+
fail:
|
1414
|
+
rb_raise(runtime_error, "Context.initialize: %s: %s", cause, strerror(r));
|
1415
|
+
return Qnil; // pacify compiler
|
1416
|
+
}
|
1417
|
+
|
1418
|
+
static VALUE snapshot_alloc(VALUE klass)
|
1419
|
+
{
|
1420
|
+
Snapshot *ss;
|
1421
|
+
|
1422
|
+
ss = ruby_xmalloc(sizeof(*ss));
|
1423
|
+
ss->blob = rb_enc_str_new("", 0, rb_ascii8bit_encoding());
|
1424
|
+
return TypedData_Wrap_Struct(klass, &snapshot_type, ss);
|
1425
|
+
}
|
1426
|
+
|
1427
|
+
static void snapshot_free(void *arg)
|
1428
|
+
{
|
1429
|
+
ruby_xfree(arg);
|
1430
|
+
}
|
1431
|
+
|
1432
|
+
static void snapshot_mark(void *arg)
|
1433
|
+
{
|
1434
|
+
Snapshot *ss;
|
1435
|
+
|
1436
|
+
ss = arg;
|
1437
|
+
rb_gc_mark(ss->blob);
|
1438
|
+
}
|
1439
|
+
|
1440
|
+
static size_t snapshot_size(const void *arg)
|
1441
|
+
{
|
1442
|
+
const Snapshot *ss;
|
1443
|
+
|
1444
|
+
ss = arg;
|
1445
|
+
return sizeof(*ss) + RSTRING_LENINT(ss->blob);
|
1446
|
+
}
|
1447
|
+
|
1448
|
+
static VALUE snapshot_initialize(int argc, VALUE *argv, VALUE self)
|
1449
|
+
{
|
1450
|
+
VALUE a, e, code, cv;
|
1451
|
+
Snapshot *ss;
|
1452
|
+
Context *c;
|
1453
|
+
Ser s;
|
1454
|
+
|
1455
|
+
TypedData_Get_Struct(self, Snapshot, &snapshot_type, ss);
|
1456
|
+
rb_scan_args(argc, argv, "01", &code);
|
1457
|
+
if (NIL_P(code))
|
1458
|
+
code = rb_str_new_cstr("");
|
1459
|
+
Check_Type(code, T_STRING);
|
1460
|
+
cv = context_alloc(context_class);
|
1461
|
+
context_initialize(0, NULL, cv);
|
1462
|
+
TypedData_Get_Struct(cv, Context, &context_type, c);
|
1463
|
+
// request is snapsho(T), "code"
|
1464
|
+
ser_init1(&s, 'T');
|
1465
|
+
ser_string(&s, RSTRING_PTR(code), RSTRING_LENINT(code));
|
1466
|
+
// response is [arraybuffer, error]
|
1467
|
+
a = rendezvous(c, &s.b);
|
1468
|
+
e = rb_ary_pop(a);
|
1469
|
+
context_dispose(cv);
|
1470
|
+
if (*RSTRING_PTR(e))
|
1471
|
+
rb_raise(snapshot_error, "%s", RSTRING_PTR(e)+1);
|
1472
|
+
ss->blob = rb_ary_pop(a);
|
1473
|
+
return Qnil;
|
1474
|
+
}
|
1475
|
+
|
1476
|
+
static VALUE snapshot_warmup(VALUE self, VALUE arg)
|
1477
|
+
{
|
1478
|
+
VALUE a, e, cv;
|
1479
|
+
Snapshot *ss;
|
1480
|
+
Context *c;
|
1481
|
+
Ser s;
|
1482
|
+
|
1483
|
+
TypedData_Get_Struct(self, Snapshot, &snapshot_type, ss);
|
1484
|
+
Check_Type(arg, T_STRING);
|
1485
|
+
cv = context_alloc(context_class);
|
1486
|
+
context_initialize(0, NULL, cv);
|
1487
|
+
TypedData_Get_Struct(cv, Context, &context_type, c);
|
1488
|
+
// request is (W)armup, [snapshot, "warmup code"]
|
1489
|
+
ser_init1(&s, 'W');
|
1490
|
+
ser_array_begin(&s, 2);
|
1491
|
+
ser_string8(&s, (const uint8_t *)RSTRING_PTR(ss->blob), RSTRING_LENINT(ss->blob));
|
1492
|
+
ser_string(&s, RSTRING_PTR(arg), RSTRING_LENINT(arg));
|
1493
|
+
ser_array_end(&s, 2);
|
1494
|
+
// response is [arraybuffer, error]
|
1495
|
+
a = rendezvous(c, &s.b);
|
1496
|
+
e = rb_ary_pop(a);
|
1497
|
+
context_dispose(cv);
|
1498
|
+
if (*RSTRING_PTR(e))
|
1499
|
+
rb_raise(snapshot_error, "%s", RSTRING_PTR(e)+1);
|
1500
|
+
ss->blob = rb_ary_pop(a);
|
1501
|
+
return self;
|
1502
|
+
}
|
1503
|
+
|
1504
|
+
static VALUE snapshot_dump(VALUE self)
|
1505
|
+
{
|
1506
|
+
Snapshot *ss;
|
1507
|
+
|
1508
|
+
TypedData_Get_Struct(self, Snapshot, &snapshot_type, ss);
|
1509
|
+
return ss->blob;
|
1510
|
+
}
|
1511
|
+
|
1512
|
+
static VALUE snapshot_size0(VALUE self)
|
1513
|
+
{
|
1514
|
+
Snapshot *ss;
|
1515
|
+
|
1516
|
+
TypedData_Get_Struct(self, Snapshot, &snapshot_type, ss);
|
1517
|
+
return LONG2FIX(RSTRING_LENINT(ss->blob));
|
1518
|
+
}
|
1519
|
+
|
1520
|
+
__attribute__((visibility("default")))
|
1521
|
+
void Init_mini_racer_extension(void)
|
1522
|
+
{
|
1523
|
+
VALUE c, m;
|
1524
|
+
|
1525
|
+
m = rb_define_module("MiniRacer");
|
1526
|
+
c = rb_define_class_under(m, "Error", rb_eStandardError);
|
1527
|
+
snapshot_error = rb_define_class_under(m, "SnapshotError", c);
|
1528
|
+
platform_init_error = rb_define_class_under(m, "PlatformAlreadyInitialized", c);
|
1529
|
+
context_disposed_error = rb_define_class_under(m, "ContextDisposedError", c);
|
1530
|
+
|
1531
|
+
c = rb_define_class_under(m, "EvalError", c);
|
1532
|
+
parse_error = rb_define_class_under(m, "ParseError", c);
|
1533
|
+
memory_error = rb_define_class_under(m, "V8OutOfMemoryError", c);
|
1534
|
+
runtime_error = rb_define_class_under(m, "RuntimeError", c);
|
1535
|
+
internal_error = rb_define_class_under(m, "InternalError", c);
|
1536
|
+
terminated_error = rb_define_class_under(m, "ScriptTerminatedError", c);
|
1537
|
+
|
1538
|
+
c = context_class = rb_define_class_under(m, "Context", rb_cObject);
|
1539
|
+
rb_define_method(c, "initialize", context_initialize, -1);
|
1540
|
+
rb_define_method(c, "attach", context_attach, 2);
|
1541
|
+
rb_define_method(c, "dispose", context_dispose, 0);
|
1542
|
+
rb_define_method(c, "stop", context_stop, 0);
|
1543
|
+
rb_define_method(c, "call", context_call, -1);
|
1544
|
+
rb_define_method(c, "eval", context_eval, -1);
|
1545
|
+
rb_define_method(c, "heap_stats", context_heap_stats, 0);
|
1546
|
+
rb_define_method(c, "heap_snapshot", context_heap_snapshot, 0);
|
1547
|
+
rb_define_method(c, "pump_message_loop", context_pump_message_loop, 0);
|
1548
|
+
rb_define_method(c, "idle_notification", context_idle_notification, 1);
|
1549
|
+
rb_define_method(c, "low_memory_notification", context_low_memory_notification, 0);
|
1550
|
+
rb_define_alloc_func(c, context_alloc);
|
1551
|
+
|
1552
|
+
c = snapshot_class = rb_define_class_under(m, "Snapshot", rb_cObject);
|
1553
|
+
rb_define_method(c, "initialize", snapshot_initialize, -1);
|
1554
|
+
rb_define_method(c, "warmup!", snapshot_warmup, 1);
|
1555
|
+
rb_define_method(c, "dump", snapshot_dump, 0);
|
1556
|
+
rb_define_method(c, "size", snapshot_size0, 0);
|
1557
|
+
rb_define_alloc_func(c, snapshot_alloc);
|
1558
|
+
|
1559
|
+
c = rb_define_class_under(m, "Platform", rb_cObject);
|
1560
|
+
rb_define_singleton_method(c, "set_flags!", platform_set_flags, -1);
|
1561
|
+
|
1562
|
+
date_time_class = Qnil; // lazy init
|
1563
|
+
js_function_class = rb_define_class_under(m, "JavaScriptFunction", rb_cObject);
|
1564
|
+
}
|