libv8 8.4.255.0-universal-darwin-19

Sign up to get free protection for your applications and to get access to all the features.
Files changed (53) hide show
  1. checksums.yaml +7 -0
  2. data/ext/libv8/.location.yml +1 -0
  3. data/ext/libv8/location.rb +89 -0
  4. data/ext/libv8/paths.rb +28 -0
  5. data/lib/libv8.rb +9 -0
  6. data/lib/libv8/version.rb +3 -0
  7. data/vendor/v8/include/cppgc/allocation.h +124 -0
  8. data/vendor/v8/include/cppgc/garbage-collected.h +192 -0
  9. data/vendor/v8/include/cppgc/heap.h +50 -0
  10. data/vendor/v8/include/cppgc/internal/accessors.h +26 -0
  11. data/vendor/v8/include/cppgc/internal/api-constants.h +44 -0
  12. data/vendor/v8/include/cppgc/internal/compiler-specific.h +26 -0
  13. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
  14. data/vendor/v8/include/cppgc/internal/gc-info.h +43 -0
  15. data/vendor/v8/include/cppgc/internal/logging.h +50 -0
  16. data/vendor/v8/include/cppgc/internal/persistent-node.h +109 -0
  17. data/vendor/v8/include/cppgc/internal/pointer-policies.h +133 -0
  18. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +31 -0
  19. data/vendor/v8/include/cppgc/liveness-broker.h +50 -0
  20. data/vendor/v8/include/cppgc/macros.h +26 -0
  21. data/vendor/v8/include/cppgc/member.h +206 -0
  22. data/vendor/v8/include/cppgc/persistent.h +304 -0
  23. data/vendor/v8/include/cppgc/platform.h +31 -0
  24. data/vendor/v8/include/cppgc/prefinalizer.h +54 -0
  25. data/vendor/v8/include/cppgc/source-location.h +59 -0
  26. data/vendor/v8/include/cppgc/trace-trait.h +67 -0
  27. data/vendor/v8/include/cppgc/type-traits.h +109 -0
  28. data/vendor/v8/include/cppgc/visitor.h +137 -0
  29. data/vendor/v8/include/libplatform/libplatform-export.h +29 -0
  30. data/vendor/v8/include/libplatform/libplatform.h +85 -0
  31. data/vendor/v8/include/libplatform/v8-tracing.h +332 -0
  32. data/vendor/v8/include/v8-fast-api-calls.h +412 -0
  33. data/vendor/v8/include/v8-inspector-protocol.h +13 -0
  34. data/vendor/v8/include/v8-inspector.h +327 -0
  35. data/vendor/v8/include/v8-internal.h +389 -0
  36. data/vendor/v8/include/v8-platform.h +577 -0
  37. data/vendor/v8/include/v8-profiler.h +1059 -0
  38. data/vendor/v8/include/v8-util.h +652 -0
  39. data/vendor/v8/include/v8-value-serializer-version.h +24 -0
  40. data/vendor/v8/include/v8-version-string.h +38 -0
  41. data/vendor/v8/include/v8-version.h +20 -0
  42. data/vendor/v8/include/v8-wasm-trap-handler-posix.h +31 -0
  43. data/vendor/v8/include/v8-wasm-trap-handler-win.h +28 -0
  44. data/vendor/v8/include/v8.h +12018 -0
  45. data/vendor/v8/include/v8config.h +465 -0
  46. data/vendor/v8/out.gn/libv8/obj/libv8_libbase.a +0 -0
  47. data/vendor/v8/out.gn/libv8/obj/libv8_libplatform.a +0 -0
  48. data/vendor/v8/out.gn/libv8/obj/libv8_monolith.a +0 -0
  49. data/vendor/v8/out.gn/libv8/obj/third_party/icu/libicui18n.a +0 -0
  50. data/vendor/v8/out.gn/libv8/obj/third_party/icu/libicuuc.a +0 -0
  51. data/vendor/v8/out.gn/libv8/obj/third_party/zlib/google/libcompression_utils_portable.a +0 -0
  52. data/vendor/v8/out.gn/libv8/obj/third_party/zlib/libchrome_zlib.a +0 -0
  53. metadata +138 -0
@@ -0,0 +1,389 @@
1
+ // Copyright 2018 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef INCLUDE_V8_INTERNAL_H_
6
+ #define INCLUDE_V8_INTERNAL_H_
7
+
8
+ #include <stddef.h>
9
+ #include <stdint.h>
10
+ #include <string.h>
11
+ #include <type_traits>
12
+
13
+ #include "v8-version.h" // NOLINT(build/include_directory)
14
+ #include "v8config.h" // NOLINT(build/include_directory)
15
+
16
+ namespace v8 {
17
+
18
+ class Context;
19
+ class Data;
20
+ class Isolate;
21
+
22
+ namespace internal {
23
+
24
+ class Isolate;
25
+
26
+ typedef uintptr_t Address;
27
+ static const Address kNullAddress = 0;
28
+
29
+ /**
30
+ * Configuration of tagging scheme.
31
+ */
32
+ const int kApiSystemPointerSize = sizeof(void*);
33
+ const int kApiDoubleSize = sizeof(double);
34
+ const int kApiInt32Size = sizeof(int32_t);
35
+ const int kApiInt64Size = sizeof(int64_t);
36
+
37
+ // Tag information for HeapObject.
38
+ const int kHeapObjectTag = 1;
39
+ const int kWeakHeapObjectTag = 3;
40
+ const int kHeapObjectTagSize = 2;
41
+ const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
42
+
43
+ // Tag information for Smi.
44
+ const int kSmiTag = 0;
45
+ const int kSmiTagSize = 1;
46
+ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
47
+
48
+ template <size_t tagged_ptr_size>
49
+ struct SmiTagging;
50
+
51
+ constexpr intptr_t kIntptrAllBitsSet = intptr_t{-1};
52
+ constexpr uintptr_t kUintptrAllBitsSet =
53
+ static_cast<uintptr_t>(kIntptrAllBitsSet);
54
+
55
+ // Smi constants for systems where tagged pointer is a 32-bit value.
56
+ template <>
57
+ struct SmiTagging<4> {
58
+ enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
59
+
60
+ static constexpr intptr_t kSmiMinValue =
61
+ static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
62
+ static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
63
+
64
+ V8_INLINE static int SmiToInt(const internal::Address value) {
65
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
66
+ // Truncate and shift down (requires >> to be sign extending).
67
+ return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits;
68
+ }
69
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
70
+ // Is value in range [kSmiMinValue, kSmiMaxValue].
71
+ // Use unsigned operations in order to avoid undefined behaviour in case of
72
+ // signed integer overflow.
73
+ return (static_cast<uintptr_t>(value) -
74
+ static_cast<uintptr_t>(kSmiMinValue)) <=
75
+ (static_cast<uintptr_t>(kSmiMaxValue) -
76
+ static_cast<uintptr_t>(kSmiMinValue));
77
+ }
78
+ };
79
+
80
+ // Smi constants for systems where tagged pointer is a 64-bit value.
81
+ template <>
82
+ struct SmiTagging<8> {
83
+ enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
84
+
85
+ static constexpr intptr_t kSmiMinValue =
86
+ static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
87
+ static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
88
+
89
+ V8_INLINE static int SmiToInt(const internal::Address value) {
90
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
91
+ // Shift down and throw away top 32 bits.
92
+ return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits);
93
+ }
94
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
95
+ // To be representable as a long smi, the value must be a 32-bit integer.
96
+ return (value == static_cast<int32_t>(value));
97
+ }
98
+ };
99
+
100
+ #ifdef V8_COMPRESS_POINTERS
101
+ static_assert(
102
+ kApiSystemPointerSize == kApiInt64Size,
103
+ "Pointer compression can be enabled only for 64-bit architectures");
104
+ const int kApiTaggedSize = kApiInt32Size;
105
+ #else
106
+ const int kApiTaggedSize = kApiSystemPointerSize;
107
+ #endif
108
+
109
+ constexpr bool PointerCompressionIsEnabled() {
110
+ return kApiTaggedSize != kApiSystemPointerSize;
111
+ }
112
+
113
+ #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
114
+ using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
115
+ #else
116
+ using PlatformSmiTagging = SmiTagging<kApiTaggedSize>;
117
+ #endif
118
+
119
+ // TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize
120
+ // since it's used much more often than the inividual constants.
121
+ const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
122
+ const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
123
+ const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
124
+ const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue);
125
+ constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
126
+ constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
127
+
128
+ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
129
+ return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) |
130
+ kSmiTag;
131
+ }
132
+
133
+ /**
134
+ * This class exports constants and functionality from within v8 that
135
+ * is necessary to implement inline functions in the v8 api. Don't
136
+ * depend on functions and constants defined here.
137
+ */
138
+ class Internals {
139
+ public:
140
+ // These values match non-compiler-dependent values defined within
141
+ // the implementation of v8.
142
+ static const int kHeapObjectMapOffset = 0;
143
+ static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiInt32Size;
144
+ static const int kStringResourceOffset =
145
+ 1 * kApiTaggedSize + 2 * kApiInt32Size;
146
+
147
+ static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
148
+ static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
149
+ static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
150
+ static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
151
+ static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
152
+ static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
153
+ static const int kFullStringRepresentationMask = 0x0f;
154
+ static const int kStringEncodingMask = 0x8;
155
+ static const int kExternalTwoByteRepresentationTag = 0x02;
156
+ static const int kExternalOneByteRepresentationTag = 0x0a;
157
+
158
+ static const uint32_t kNumIsolateDataSlots = 4;
159
+
160
+ // IsolateData layout guarantees.
161
+ static const int kIsolateEmbedderDataOffset = 0;
162
+ static const int kExternalMemoryOffset =
163
+ kNumIsolateDataSlots * kApiSystemPointerSize;
164
+ static const int kExternalMemoryLimitOffset =
165
+ kExternalMemoryOffset + kApiInt64Size;
166
+ static const int kExternalMemoryLowSinceMarkCompactOffset =
167
+ kExternalMemoryLimitOffset + kApiInt64Size;
168
+ static const int kIsolateFastCCallCallerFpOffset =
169
+ kExternalMemoryLowSinceMarkCompactOffset + kApiInt64Size;
170
+ static const int kIsolateFastCCallCallerPcOffset =
171
+ kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
172
+ static const int kIsolateStackGuardOffset =
173
+ kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
174
+ static const int kIsolateRootsOffset =
175
+ kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;
176
+
177
+ static const int kUndefinedValueRootIndex = 4;
178
+ static const int kTheHoleValueRootIndex = 5;
179
+ static const int kNullValueRootIndex = 6;
180
+ static const int kTrueValueRootIndex = 7;
181
+ static const int kFalseValueRootIndex = 8;
182
+ static const int kEmptyStringRootIndex = 9;
183
+
184
+ static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize;
185
+ static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3;
186
+ static const int kNodeStateMask = 0x7;
187
+ static const int kNodeStateIsWeakValue = 2;
188
+ static const int kNodeStateIsPendingValue = 3;
189
+
190
+ static const int kFirstNonstringType = 0x40;
191
+ static const int kOddballType = 0x43;
192
+ static const int kForeignType = 0x46;
193
+ static const int kJSSpecialApiObjectType = 0x410;
194
+ static const int kJSApiObjectType = 0x420;
195
+ static const int kJSObjectType = 0x421;
196
+
197
+ static const int kUndefinedOddballKind = 5;
198
+ static const int kNullOddballKind = 3;
199
+
200
+ // Constants used by PropertyCallbackInfo to check if we should throw when an
201
+ // error occurs.
202
+ static const int kThrowOnError = 0;
203
+ static const int kDontThrow = 1;
204
+ static const int kInferShouldThrowMode = 2;
205
+
206
+ // Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
207
+ // incremental GC once the external memory reaches this limit.
208
+ static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
209
+
210
+ V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
211
+ V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
212
+ #ifdef V8_ENABLE_CHECKS
213
+ CheckInitializedImpl(isolate);
214
+ #endif
215
+ }
216
+
217
+ V8_INLINE static bool HasHeapObjectTag(const internal::Address value) {
218
+ return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag);
219
+ }
220
+
221
+ V8_INLINE static int SmiValue(const internal::Address value) {
222
+ return PlatformSmiTagging::SmiToInt(value);
223
+ }
224
+
225
+ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
226
+ return internal::IntToSmi(value);
227
+ }
228
+
229
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
230
+ return PlatformSmiTagging::IsValidSmi(value);
231
+ }
232
+
233
+ V8_INLINE static int GetInstanceType(const internal::Address obj) {
234
+ typedef internal::Address A;
235
+ A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
236
+ return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
237
+ }
238
+
239
+ V8_INLINE static int GetOddballKind(const internal::Address obj) {
240
+ return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
241
+ }
242
+
243
+ V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
244
+ int representation = (instance_type & kFullStringRepresentationMask);
245
+ return representation == kExternalTwoByteRepresentationTag;
246
+ }
247
+
248
+ V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) {
249
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
250
+ return *addr & static_cast<uint8_t>(1U << shift);
251
+ }
252
+
253
+ V8_INLINE static void UpdateNodeFlag(internal::Address* obj, bool value,
254
+ int shift) {
255
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
256
+ uint8_t mask = static_cast<uint8_t>(1U << shift);
257
+ *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
258
+ }
259
+
260
+ V8_INLINE static uint8_t GetNodeState(internal::Address* obj) {
261
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
262
+ return *addr & kNodeStateMask;
263
+ }
264
+
265
+ V8_INLINE static void UpdateNodeState(internal::Address* obj, uint8_t value) {
266
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
267
+ *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
268
+ }
269
+
270
+ V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
271
+ void* data) {
272
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
273
+ kIsolateEmbedderDataOffset +
274
+ slot * kApiSystemPointerSize;
275
+ *reinterpret_cast<void**>(addr) = data;
276
+ }
277
+
278
+ V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
279
+ uint32_t slot) {
280
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
281
+ kIsolateEmbedderDataOffset +
282
+ slot * kApiSystemPointerSize;
283
+ return *reinterpret_cast<void* const*>(addr);
284
+ }
285
+
286
+ V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) {
287
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
288
+ kIsolateRootsOffset +
289
+ index * kApiSystemPointerSize;
290
+ return reinterpret_cast<internal::Address*>(addr);
291
+ }
292
+
293
+ template <typename T>
294
+ V8_INLINE static T ReadRawField(internal::Address heap_object_ptr,
295
+ int offset) {
296
+ internal::Address addr = heap_object_ptr + offset - kHeapObjectTag;
297
+ #ifdef V8_COMPRESS_POINTERS
298
+ if (sizeof(T) > kApiTaggedSize) {
299
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
300
+ // fields (external pointers, doubles and BigInt data) are only
301
+ // kTaggedSize aligned so we have to use unaligned pointer friendly way of
302
+ // accessing them in order to avoid undefined behavior in C++ code.
303
+ T r;
304
+ memcpy(&r, reinterpret_cast<void*>(addr), sizeof(T));
305
+ return r;
306
+ }
307
+ #endif
308
+ return *reinterpret_cast<const T*>(addr);
309
+ }
310
+
311
+ V8_INLINE static internal::Address ReadTaggedPointerField(
312
+ internal::Address heap_object_ptr, int offset) {
313
+ #ifdef V8_COMPRESS_POINTERS
314
+ uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
315
+ internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
316
+ return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
317
+ #else
318
+ return ReadRawField<internal::Address>(heap_object_ptr, offset);
319
+ #endif
320
+ }
321
+
322
+ V8_INLINE static internal::Address ReadTaggedSignedField(
323
+ internal::Address heap_object_ptr, int offset) {
324
+ #ifdef V8_COMPRESS_POINTERS
325
+ uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
326
+ return static_cast<internal::Address>(static_cast<uintptr_t>(value));
327
+ #else
328
+ return ReadRawField<internal::Address>(heap_object_ptr, offset);
329
+ #endif
330
+ }
331
+
332
+ #ifdef V8_COMPRESS_POINTERS
333
+ // See v8:7703 or src/ptr-compr.* for details about pointer compression.
334
+ static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
335
+ static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
336
+
337
+ V8_INLINE static internal::Address GetRootFromOnHeapAddress(
338
+ internal::Address addr) {
339
+ return addr & -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
340
+ }
341
+
342
+ V8_INLINE static internal::Address DecompressTaggedAnyField(
343
+ internal::Address heap_object_ptr, uint32_t value) {
344
+ internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
345
+ return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
346
+ }
347
+ #endif // V8_COMPRESS_POINTERS
348
+ };
349
+
350
+ // Only perform cast check for types derived from v8::Data since
351
+ // other types do not implement the Cast method.
352
+ template <bool PerformCheck>
353
+ struct CastCheck {
354
+ template <class T>
355
+ static void Perform(T* data);
356
+ };
357
+
358
+ template <>
359
+ template <class T>
360
+ void CastCheck<true>::Perform(T* data) {
361
+ T::Cast(data);
362
+ }
363
+
364
+ template <>
365
+ template <class T>
366
+ void CastCheck<false>::Perform(T* data) {}
367
+
368
+ template <class T>
369
+ V8_INLINE void PerformCastCheck(T* data) {
370
+ CastCheck<std::is_base_of<Data, T>::value>::Perform(data);
371
+ }
372
+
373
+ // {obj} must be the raw tagged pointer representation of a HeapObject
374
+ // that's guaranteed to never be in ReadOnlySpace.
375
+ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
376
+
377
+ // Returns if we need to throw when an error occurs. This infers the language
378
+ // mode based on the current context and the closure. This returns true if the
379
+ // language mode is strict.
380
+ V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
381
+
382
+ // A base class for backing stores, which is needed due to vagaries of
383
+ // how static casts work with std::shared_ptr.
384
+ class BackingStoreBase {};
385
+
386
+ } // namespace internal
387
+ } // namespace v8
388
+
389
+ #endif // INCLUDE_V8_INTERNAL_H_
@@ -0,0 +1,577 @@
1
+ // Copyright 2013 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_V8_PLATFORM_H_
6
+ #define V8_V8_PLATFORM_H_
7
+
8
+ #include <stddef.h>
9
+ #include <stdint.h>
10
+ #include <stdlib.h> // For abort.
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ #include "v8config.h" // NOLINT(build/include_directory)
15
+
16
+ namespace v8 {
17
+
18
+ class Isolate;
19
+
20
+ // Valid priorities supported by the task scheduling infrastructure.
21
+ enum class TaskPriority : uint8_t {
22
+ /**
23
+ * Best effort tasks are not critical for performance of the application. The
24
+ * platform implementation should preempt such tasks if higher priority tasks
25
+ * arrive.
26
+ */
27
+ kBestEffort,
28
+ /**
29
+ * User visible tasks are long running background tasks that will
30
+ * improve performance and memory usage of the application upon completion.
31
+ * Example: background compilation and garbage collection.
32
+ */
33
+ kUserVisible,
34
+ /**
35
+ * User blocking tasks are highest priority tasks that block the execution
36
+ * thread (e.g. major garbage collection). They must be finished as soon as
37
+ * possible.
38
+ */
39
+ kUserBlocking,
40
+ };
41
+
42
+ /**
43
+ * A Task represents a unit of work.
44
+ */
45
+ class Task {
46
+ public:
47
+ virtual ~Task() = default;
48
+
49
+ virtual void Run() = 0;
50
+ };
51
+
52
+ /**
53
+ * An IdleTask represents a unit of work to be performed in idle time.
54
+ * The Run method is invoked with an argument that specifies the deadline in
55
+ * seconds returned by MonotonicallyIncreasingTime().
56
+ * The idle task is expected to complete by this deadline.
57
+ */
58
+ class IdleTask {
59
+ public:
60
+ virtual ~IdleTask() = default;
61
+ virtual void Run(double deadline_in_seconds) = 0;
62
+ };
63
+
64
+ /**
65
+ * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
66
+ * post tasks after the isolate gets destructed, but these tasks may not get
67
+ * executed anymore. All tasks posted to a given TaskRunner will be invoked in
68
+ * sequence. Tasks can be posted from any thread.
69
+ */
70
+ class TaskRunner {
71
+ public:
72
+ /**
73
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
74
+ * implementation takes ownership of |task|.
75
+ */
76
+ virtual void PostTask(std::unique_ptr<Task> task) = 0;
77
+
78
+ /**
79
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
80
+ * implementation takes ownership of |task|. The |task| cannot be nested
81
+ * within other task executions.
82
+ *
83
+ * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
84
+ */
85
+ virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
86
+
87
+ /**
88
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
89
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
90
+ * implementation takes ownership of |task|.
91
+ */
92
+ virtual void PostDelayedTask(std::unique_ptr<Task> task,
93
+ double delay_in_seconds) = 0;
94
+
95
+ /**
96
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
97
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
98
+ * implementation takes ownership of |task|. The |task| cannot be nested
99
+ * within other task executions.
100
+ *
101
+ * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
102
+ */
103
+ virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
104
+ double delay_in_seconds) {}
105
+
106
+ /**
107
+ * Schedules an idle task to be invoked by this TaskRunner. The task is
108
+ * scheduled when the embedder is idle. Requires that
109
+ * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
110
+ * relative to other task types and may be starved for an arbitrarily long
111
+ * time if no idle time is available. The TaskRunner implementation takes
112
+ * ownership of |task|.
113
+ */
114
+ virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
115
+
116
+ /**
117
+ * Returns true if idle tasks are enabled for this TaskRunner.
118
+ */
119
+ virtual bool IdleTasksEnabled() = 0;
120
+
121
+ /**
122
+ * Returns true if non-nestable tasks are enabled for this TaskRunner.
123
+ */
124
+ virtual bool NonNestableTasksEnabled() const { return false; }
125
+
126
+ /**
127
+ * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
128
+ */
129
+ virtual bool NonNestableDelayedTasksEnabled() const { return false; }
130
+
131
+ TaskRunner() = default;
132
+ virtual ~TaskRunner() = default;
133
+
134
+ TaskRunner(const TaskRunner&) = delete;
135
+ TaskRunner& operator=(const TaskRunner&) = delete;
136
+ };
137
+
138
+ /**
139
+ * Delegate that's passed to Job's worker task, providing an entry point to
140
+ * communicate with the scheduler.
141
+ */
142
+ class JobDelegate {
143
+ public:
144
+ /**
145
+ * Returns true if this thread should return from the worker task on the
146
+ * current thread ASAP. Workers should periodically invoke ShouldYield (or
147
+ * YieldIfNeeded()) as often as is reasonable.
148
+ */
149
+ virtual bool ShouldYield() = 0;
150
+
151
+ /**
152
+ * Notifies the scheduler that max concurrency was increased, and the number
153
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
154
+ * details.
155
+ */
156
+ virtual void NotifyConcurrencyIncrease() = 0;
157
+ };
158
+
159
+ /**
160
+ * Handle returned when posting a Job. Provides methods to control execution of
161
+ * the posted Job.
162
+ */
163
+ class JobHandle {
164
+ public:
165
+ virtual ~JobHandle() = default;
166
+
167
+ /**
168
+ * Notifies the scheduler that max concurrency was increased, and the number
169
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
170
+ * details.
171
+ */
172
+ virtual void NotifyConcurrencyIncrease() = 0;
173
+
174
+ /**
175
+ * Contributes to the job on this thread. Doesn't return until all tasks have
176
+ * completed and max concurrency becomes 0. When Join() is called and max
177
+ * concurrency reaches 0, it should not increase again. This also promotes
178
+ * this Job's priority to be at least as high as the calling thread's
179
+ * priority.
180
+ */
181
+ virtual void Join() = 0;
182
+
183
+ /**
184
+ * Forces all existing workers to yield ASAP. Waits until they have all
185
+ * returned from the Job's callback before returning.
186
+ */
187
+ virtual void Cancel() = 0;
188
+
189
+ /**
190
+ * Returns true if associated with a Job and other methods may be called.
191
+ * Returns false after Join() or Cancel() was called.
192
+ */
193
+ virtual bool IsRunning() = 0;
194
+ };
195
+
196
+ /**
197
+ * A JobTask represents work to run in parallel from Platform::PostJob().
198
+ */
199
+ class JobTask {
200
+ public:
201
+ virtual ~JobTask() = default;
202
+
203
+ virtual void Run(JobDelegate* delegate) = 0;
204
+
205
+ /**
206
+ * Controls the maximum number of threads calling Run() concurrently. Run() is
207
+ * only invoked if the number of threads previously running Run() was less
208
+ * than the value returned. Since GetMaxConcurrency() is a leaf function, it
209
+ * must not call back any JobHandle methods.
210
+ */
211
+ virtual size_t GetMaxConcurrency() const = 0;
212
+ };
213
+
214
+ /**
215
+ * The interface represents complex arguments to trace events.
216
+ */
217
+ class ConvertableToTraceFormat {
218
+ public:
219
+ virtual ~ConvertableToTraceFormat() = default;
220
+
221
+ /**
222
+ * Append the class info to the provided |out| string. The appended
223
+ * data must be a valid JSON object. Strings must be properly quoted, and
224
+ * escaped. There is no processing applied to the content after it is
225
+ * appended.
226
+ */
227
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
228
+ };
229
+
230
+ /**
231
+ * V8 Tracing controller.
232
+ *
233
+ * Can be implemented by an embedder to record trace events from V8.
234
+ */
235
+ class TracingController {
236
+ public:
237
+ virtual ~TracingController() = default;
238
+
239
+ // In Perfetto mode, trace events are written using Perfetto's Track Event
240
+ // API directly without going through the embedder. However, it is still
241
+ // possible to observe tracing being enabled and disabled.
242
+ #if !defined(V8_USE_PERFETTO)
243
+ /**
244
+ * Called by TRACE_EVENT* macros, don't call this directly.
245
+ * The name parameter is a category group for example:
246
+ * TRACE_EVENT0("v8,parse", "V8.Parse")
247
+ * The pointer returned points to a value with zero or more of the bits
248
+ * defined in CategoryGroupEnabledFlags.
249
+ **/
250
+ virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
251
+ static uint8_t no = 0;
252
+ return &no;
253
+ }
254
+
255
+ /**
256
+ * Adds a trace event to the platform tracing system. These function calls are
257
+ * usually the result of a TRACE_* macro from trace_event_common.h when
258
+ * tracing and the category of the particular trace are enabled. It is not
259
+ * advisable to call these functions on their own; they are really only meant
260
+ * to be used by the trace macros. The returned handle can be used by
261
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
262
+ */
263
+ virtual uint64_t AddTraceEvent(
264
+ char phase, const uint8_t* category_enabled_flag, const char* name,
265
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
266
+ const char** arg_names, const uint8_t* arg_types,
267
+ const uint64_t* arg_values,
268
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
269
+ unsigned int flags) {
270
+ return 0;
271
+ }
272
+ virtual uint64_t AddTraceEventWithTimestamp(
273
+ char phase, const uint8_t* category_enabled_flag, const char* name,
274
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
275
+ const char** arg_names, const uint8_t* arg_types,
276
+ const uint64_t* arg_values,
277
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
278
+ unsigned int flags, int64_t timestamp) {
279
+ return 0;
280
+ }
281
+
282
+ /**
283
+ * Sets the duration field of a COMPLETE trace event. It must be called with
284
+ * the handle returned from AddTraceEvent().
285
+ **/
286
+ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
287
+ const char* name, uint64_t handle) {}
288
+ #endif // !defined(V8_USE_PERFETTO)
289
+
290
+ class TraceStateObserver {
291
+ public:
292
+ virtual ~TraceStateObserver() = default;
293
+ virtual void OnTraceEnabled() = 0;
294
+ virtual void OnTraceDisabled() = 0;
295
+ };
296
+
297
+ /** Adds tracing state change observer. */
298
+ virtual void AddTraceStateObserver(TraceStateObserver*) {}
299
+
300
+ /** Removes tracing state change observer. */
301
+ virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
302
+ };
303
+
304
+ /**
305
+ * A V8 memory page allocator.
306
+ *
307
+ * Can be implemented by an embedder to manage large host OS allocations.
308
+ */
309
+ class PageAllocator {
310
+ public:
311
+ virtual ~PageAllocator() = default;
312
+
313
+ /**
314
+ * Gets the page granularity for AllocatePages and FreePages. Addresses and
315
+ * lengths for those calls should be multiples of AllocatePageSize().
316
+ */
317
+ virtual size_t AllocatePageSize() = 0;
318
+
319
+ /**
320
+ * Gets the page granularity for SetPermissions and ReleasePages. Addresses
321
+ * and lengths for those calls should be multiples of CommitPageSize().
322
+ */
323
+ virtual size_t CommitPageSize() = 0;
324
+
325
+ /**
326
+ * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
327
+ * sequences of random mmap addresses.
328
+ */
329
+ virtual void SetRandomMmapSeed(int64_t seed) = 0;
330
+
331
+ /**
332
+ * Returns a randomized address, suitable for memory allocation under ASLR.
333
+ * The address will be aligned to AllocatePageSize.
334
+ */
335
+ virtual void* GetRandomMmapAddr() = 0;
336
+
337
+ /**
338
+ * Memory permissions.
339
+ */
340
+ enum Permission {
341
+ kNoAccess,
342
+ kRead,
343
+ kReadWrite,
344
+ // TODO(hpayer): Remove this flag. Memory should never be rwx.
345
+ kReadWriteExecute,
346
+ kReadExecute
347
+ };
348
+
349
+ /**
350
+ * Allocates memory in range with the given alignment and permission.
351
+ */
352
+ virtual void* AllocatePages(void* address, size_t length, size_t alignment,
353
+ Permission permissions) = 0;
354
+
355
+ /**
356
+ * Frees memory in a range that was allocated by a call to AllocatePages.
357
+ */
358
+ virtual bool FreePages(void* address, size_t length) = 0;
359
+
360
+ /**
361
+ * Releases memory in a range that was allocated by a call to AllocatePages.
362
+ */
363
+ virtual bool ReleasePages(void* address, size_t length,
364
+ size_t new_length) = 0;
365
+
366
+ /**
367
+ * Sets permissions on pages in an allocated range.
368
+ */
369
+ virtual bool SetPermissions(void* address, size_t length,
370
+ Permission permissions) = 0;
371
+
372
+ /**
373
+ * Frees memory in the given [address, address + size) range. address and size
374
+ * should be operating system page-aligned. The next write to this
375
+ * memory area brings the memory transparently back.
376
+ */
377
+ virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
378
+ };
379
+
380
+ /**
381
+ * V8 Platform abstraction layer.
382
+ *
383
+ * The embedder has to provide an implementation of this interface before
384
+ * initializing the rest of V8.
385
+ */
386
+ class Platform {
387
+ public:
388
+ virtual ~Platform() = default;
389
+
390
+ /**
391
+ * Allows the embedder to manage memory page allocations.
392
+ */
393
+ virtual PageAllocator* GetPageAllocator() {
394
+ // TODO(bbudge) Make this abstract after all embedders implement this.
395
+ return nullptr;
396
+ }
397
+
398
+ /**
399
+ * Enables the embedder to respond in cases where V8 can't allocate large
400
+ * blocks of memory. V8 retries the failed allocation once after calling this
401
+ * method. On success, execution continues; otherwise V8 exits with a fatal
402
+ * error.
403
+ * Embedder overrides of this function must NOT call back into V8.
404
+ */
405
+ virtual void OnCriticalMemoryPressure() {
406
+ // TODO(bbudge) Remove this when embedders override the following method.
407
+ // See crbug.com/634547.
408
+ }
409
+
410
+ /**
411
+ * Enables the embedder to respond in cases where V8 can't allocate large
412
+ * memory regions. The |length| parameter is the amount of memory needed.
413
+ * Returns true if memory is now available. Returns false if no memory could
414
+ * be made available. V8 will retry allocations until this method returns
415
+ * false.
416
+ *
417
+ * Embedder overrides of this function must NOT call back into V8.
418
+ */
419
+ virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
420
+
421
+ /**
422
+ * Gets the number of worker threads used by
423
+ * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
424
+ * of tasks a work package should be split into. A return value of 0 means
425
+ * that there are no worker threads available. Note that a value of 0 won't
426
+ * prohibit V8 from posting tasks using |CallOnWorkerThread|.
427
+ */
428
+ virtual int NumberOfWorkerThreads() = 0;
429
+
430
+ /**
431
+ * Returns a TaskRunner which can be used to post a task on the foreground.
432
+ * The TaskRunner's NonNestableTasksEnabled() must be true. This function
433
+ * should only be called from a foreground thread.
434
+ */
435
+ virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
436
+ Isolate* isolate) = 0;
437
+
438
+ /**
439
+ * Schedules a task to be invoked on a worker thread.
440
+ */
441
+ virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
442
+
443
+ /**
444
+ * Schedules a task that blocks the main thread to be invoked with
445
+ * high-priority on a worker thread.
446
+ */
447
+ virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
448
+ // Embedders may optionally override this to process these tasks in a high
449
+ // priority pool.
450
+ CallOnWorkerThread(std::move(task));
451
+ }
452
+
453
+ /**
454
+ * Schedules a task to be invoked with low-priority on a worker thread.
455
+ */
456
+ virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
457
+ // Embedders may optionally override this to process these tasks in a low
458
+ // priority pool.
459
+ CallOnWorkerThread(std::move(task));
460
+ }
461
+
462
+ /**
463
+ * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
464
+ * expires.
465
+ */
466
+ virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
467
+ double delay_in_seconds) = 0;
468
+
469
+ /**
470
+ * Returns true if idle tasks are enabled for the given |isolate|.
471
+ */
472
+ virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
473
+
474
+ /**
475
+ * Posts |job_task| to run in parallel. Returns a JobHandle associated with
476
+ * the Job, which can be joined or canceled.
477
+ * This avoids degenerate cases:
478
+ * - Calling CallOnWorkerThread() for each work item, causing significant
479
+ * overhead.
480
+ * - Fixed number of CallOnWorkerThread() calls that split the work and might
481
+ * run for a long time. This is problematic when many components post
482
+ * "num cores" tasks and all expect to use all the cores. In these cases,
483
+ * the scheduler lacks context to be fair to multiple same-priority requests
484
+ * and/or ability to request lower priority work to yield when high priority
485
+ * work comes in.
486
+ * A canonical implementation of |job_task| looks like:
487
+ * class MyJobTask : public JobTask {
488
+ * public:
489
+ * MyJobTask(...) : worker_queue_(...) {}
490
+ * // JobTask:
491
+ * void Run(JobDelegate* delegate) override {
492
+ * while (!delegate->ShouldYield()) {
493
+ * // Smallest unit of work.
494
+ * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
495
+ * if (!work_item) return;
496
+ * ProcessWork(work_item);
497
+ * }
498
+ * }
499
+ *
500
+ * size_t GetMaxConcurrency() const override {
501
+ * return worker_queue_.GetSize(); // Thread safe.
502
+ * }
503
+ * };
504
+ * auto handle = PostJob(TaskPriority::kUserVisible,
505
+ * std::make_unique<MyJobTask>(...));
506
+ * handle->Join();
507
+ *
508
+ * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
509
+ * called while holding a lock that could be acquired by JobTask::Run or
510
+ * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
511
+ * because [1] JobTask::GetMaxConcurrency may be invoked while holding
512
+ * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
513
+ * if that lock is *never* held while calling back into JobHandle from any
514
+ * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
515
+ * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
516
+ * (B=>JobHandle::foo=>B deadlock).
517
+ *
518
+ * A sufficient PostJob() implementation that uses the default Job provided in
519
+ * libplatform looks like:
520
+ * std::unique_ptr<JobHandle> PostJob(
521
+ * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
522
+ * return std::make_unique<DefaultJobHandle>(
523
+ * std::make_shared<DefaultJobState>(
524
+ * this, std::move(job_task), kNumThreads));
525
+ * }
526
+ */
527
+ virtual std::unique_ptr<JobHandle> PostJob(
528
+ TaskPriority priority, std::unique_ptr<JobTask> job_task) {
529
+ return nullptr;
530
+ }
531
+
532
+ /**
533
+ * Monotonically increasing time in seconds from an arbitrary fixed point in
534
+ * the past. This function is expected to return at least
535
+ * millisecond-precision values. For this reason,
536
+ * it is recommended that the fixed point be no further in the past than
537
+ * the epoch.
538
+ **/
539
+ virtual double MonotonicallyIncreasingTime() = 0;
540
+
541
+ /**
542
+ * Current wall-clock time in milliseconds since epoch.
543
+ * This function is expected to return at least millisecond-precision values.
544
+ */
545
+ virtual double CurrentClockTimeMillis() = 0;
546
+
547
+ typedef void (*StackTracePrinter)();
548
+
549
+ /**
550
+ * Returns a function pointer that print a stack trace of the current stack
551
+ * on invocation. Disables printing of the stack trace if nullptr.
552
+ */
553
+ virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
554
+
555
+ /**
556
+ * Returns an instance of a v8::TracingController. This must be non-nullptr.
557
+ */
558
+ virtual TracingController* GetTracingController() = 0;
559
+
560
+ /**
561
+ * Tells the embedder to generate and upload a crashdump during an unexpected
562
+ * but non-critical scenario.
563
+ */
564
+ virtual void DumpWithoutCrashing() {}
565
+
566
+ protected:
567
+ /**
568
+ * Default implementation of current wall-clock time in milliseconds
569
+ * since epoch. Useful for implementing |CurrentClockTimeMillis| if
570
+ * nothing special needed.
571
+ */
572
+ V8_EXPORT static double SystemClockTimeMillis();
573
+ };
574
+
575
+ } // namespace v8
576
+
577
+ #endif // V8_V8_PLATFORM_H_