libv8-node 16.10.0.0-x86_64-darwin

Sign up to get free protection for your applications and to get access to all the features.
Files changed (68) hide show
  1. checksums.yaml +7 -0
  2. data/ext/libv8-node/.location.yml +1 -0
  3. data/ext/libv8-node/location.rb +76 -0
  4. data/ext/libv8-node/paths.rb +34 -0
  5. data/lib/libv8/node/version.rb +7 -0
  6. data/lib/libv8/node.rb +11 -0
  7. data/lib/libv8-node.rb +1 -0
  8. data/vendor/v8/include/cppgc/allocation.h +232 -0
  9. data/vendor/v8/include/cppgc/common.h +29 -0
  10. data/vendor/v8/include/cppgc/cross-thread-persistent.h +384 -0
  11. data/vendor/v8/include/cppgc/custom-space.h +97 -0
  12. data/vendor/v8/include/cppgc/default-platform.h +75 -0
  13. data/vendor/v8/include/cppgc/ephemeron-pair.h +30 -0
  14. data/vendor/v8/include/cppgc/explicit-management.h +82 -0
  15. data/vendor/v8/include/cppgc/garbage-collected.h +117 -0
  16. data/vendor/v8/include/cppgc/heap-consistency.h +236 -0
  17. data/vendor/v8/include/cppgc/heap-state.h +70 -0
  18. data/vendor/v8/include/cppgc/heap-statistics.h +120 -0
  19. data/vendor/v8/include/cppgc/heap.h +201 -0
  20. data/vendor/v8/include/cppgc/internal/api-constants.h +47 -0
  21. data/vendor/v8/include/cppgc/internal/atomic-entry-flag.h +48 -0
  22. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +68 -0
  23. data/vendor/v8/include/cppgc/internal/compiler-specific.h +38 -0
  24. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
  25. data/vendor/v8/include/cppgc/internal/gc-info.h +76 -0
  26. data/vendor/v8/include/cppgc/internal/logging.h +50 -0
  27. data/vendor/v8/include/cppgc/internal/name-trait.h +111 -0
  28. data/vendor/v8/include/cppgc/internal/persistent-node.h +172 -0
  29. data/vendor/v8/include/cppgc/internal/pointer-policies.h +175 -0
  30. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +30 -0
  31. data/vendor/v8/include/cppgc/internal/write-barrier.h +396 -0
  32. data/vendor/v8/include/cppgc/liveness-broker.h +74 -0
  33. data/vendor/v8/include/cppgc/macros.h +26 -0
  34. data/vendor/v8/include/cppgc/member.h +286 -0
  35. data/vendor/v8/include/cppgc/name-provider.h +65 -0
  36. data/vendor/v8/include/cppgc/object-size-trait.h +58 -0
  37. data/vendor/v8/include/cppgc/persistent.h +365 -0
  38. data/vendor/v8/include/cppgc/platform.h +153 -0
  39. data/vendor/v8/include/cppgc/prefinalizer.h +52 -0
  40. data/vendor/v8/include/cppgc/process-heap-statistics.h +36 -0
  41. data/vendor/v8/include/cppgc/sentinel-pointer.h +32 -0
  42. data/vendor/v8/include/cppgc/source-location.h +92 -0
  43. data/vendor/v8/include/cppgc/testing.h +99 -0
  44. data/vendor/v8/include/cppgc/trace-trait.h +116 -0
  45. data/vendor/v8/include/cppgc/type-traits.h +247 -0
  46. data/vendor/v8/include/cppgc/visitor.h +377 -0
  47. data/vendor/v8/include/libplatform/libplatform-export.h +29 -0
  48. data/vendor/v8/include/libplatform/libplatform.h +117 -0
  49. data/vendor/v8/include/libplatform/v8-tracing.h +334 -0
  50. data/vendor/v8/include/v8-cppgc.h +325 -0
  51. data/vendor/v8/include/v8-fast-api-calls.h +791 -0
  52. data/vendor/v8/include/v8-inspector-protocol.h +13 -0
  53. data/vendor/v8/include/v8-inspector.h +348 -0
  54. data/vendor/v8/include/v8-internal.h +499 -0
  55. data/vendor/v8/include/v8-metrics.h +202 -0
  56. data/vendor/v8/include/v8-platform.h +709 -0
  57. data/vendor/v8/include/v8-profiler.h +1123 -0
  58. data/vendor/v8/include/v8-unwinder-state.h +30 -0
  59. data/vendor/v8/include/v8-util.h +652 -0
  60. data/vendor/v8/include/v8-value-serializer-version.h +24 -0
  61. data/vendor/v8/include/v8-version-string.h +38 -0
  62. data/vendor/v8/include/v8-version.h +20 -0
  63. data/vendor/v8/include/v8-wasm-trap-handler-posix.h +31 -0
  64. data/vendor/v8/include/v8-wasm-trap-handler-win.h +28 -0
  65. data/vendor/v8/include/v8.h +12648 -0
  66. data/vendor/v8/include/v8config.h +515 -0
  67. data/vendor/v8/x86_64-darwin/libv8/obj/libv8_monolith.a +0 -0
  68. metadata +138 -0
@@ -0,0 +1,499 @@
1
+ // Copyright 2018 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef INCLUDE_V8_INTERNAL_H_
6
+ #define INCLUDE_V8_INTERNAL_H_
7
+
8
+ #include <stddef.h>
9
+ #include <stdint.h>
10
+ #include <string.h>
11
+ #include <type_traits>
12
+
13
+ #include "v8-version.h" // NOLINT(build/include_directory)
14
+ #include "v8config.h" // NOLINT(build/include_directory)
15
+
16
+ namespace v8 {
17
+
18
+ class Context;
19
+ class Data;
20
+ class Isolate;
21
+
22
+ namespace internal {
23
+
24
+ class Isolate;
25
+
26
+ typedef uintptr_t Address;
27
+ static const Address kNullAddress = 0;
28
+
29
+ /**
30
+ * Configuration of tagging scheme.
31
+ */
32
+ const int kApiSystemPointerSize = sizeof(void*);
33
+ const int kApiDoubleSize = sizeof(double);
34
+ const int kApiInt32Size = sizeof(int32_t);
35
+ const int kApiInt64Size = sizeof(int64_t);
36
+
37
+ // Tag information for HeapObject.
38
+ const int kHeapObjectTag = 1;
39
+ const int kWeakHeapObjectTag = 3;
40
+ const int kHeapObjectTagSize = 2;
41
+ const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
42
+
43
+ // Tag information for fowarding pointers stored in object headers.
44
+ // 0b00 at the lowest 2 bits in the header indicates that the map word is a
45
+ // forwarding pointer.
46
+ const int kForwardingTag = 0;
47
+ const int kForwardingTagSize = 2;
48
+ const intptr_t kForwardingTagMask = (1 << kForwardingTagSize) - 1;
49
+
50
+ // Tag information for Smi.
51
+ const int kSmiTag = 0;
52
+ const int kSmiTagSize = 1;
53
+ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
54
+
55
+ template <size_t tagged_ptr_size>
56
+ struct SmiTagging;
57
+
58
+ constexpr intptr_t kIntptrAllBitsSet = intptr_t{-1};
59
+ constexpr uintptr_t kUintptrAllBitsSet =
60
+ static_cast<uintptr_t>(kIntptrAllBitsSet);
61
+
62
+ // Smi constants for systems where tagged pointer is a 32-bit value.
63
+ template <>
64
+ struct SmiTagging<4> {
65
+ enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
66
+
67
+ static constexpr intptr_t kSmiMinValue =
68
+ static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
69
+ static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
70
+
71
+ V8_INLINE static int SmiToInt(const internal::Address value) {
72
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
73
+ // Truncate and shift down (requires >> to be sign extending).
74
+ return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits;
75
+ }
76
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
77
+ // Is value in range [kSmiMinValue, kSmiMaxValue].
78
+ // Use unsigned operations in order to avoid undefined behaviour in case of
79
+ // signed integer overflow.
80
+ return (static_cast<uintptr_t>(value) -
81
+ static_cast<uintptr_t>(kSmiMinValue)) <=
82
+ (static_cast<uintptr_t>(kSmiMaxValue) -
83
+ static_cast<uintptr_t>(kSmiMinValue));
84
+ }
85
+ };
86
+
87
+ // Smi constants for systems where tagged pointer is a 64-bit value.
88
+ template <>
89
+ struct SmiTagging<8> {
90
+ enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
91
+
92
+ static constexpr intptr_t kSmiMinValue =
93
+ static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
94
+ static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
95
+
96
+ V8_INLINE static int SmiToInt(const internal::Address value) {
97
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
98
+ // Shift down and throw away top 32 bits.
99
+ return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits);
100
+ }
101
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
102
+ // To be representable as a long smi, the value must be a 32-bit integer.
103
+ return (value == static_cast<int32_t>(value));
104
+ }
105
+ };
106
+
107
+ #ifdef V8_COMPRESS_POINTERS
108
+ static_assert(
109
+ kApiSystemPointerSize == kApiInt64Size,
110
+ "Pointer compression can be enabled only for 64-bit architectures");
111
+ const int kApiTaggedSize = kApiInt32Size;
112
+ #else
113
+ const int kApiTaggedSize = kApiSystemPointerSize;
114
+ #endif
115
+
116
+ constexpr bool PointerCompressionIsEnabled() {
117
+ return kApiTaggedSize != kApiSystemPointerSize;
118
+ }
119
+
120
+ constexpr bool HeapSandboxIsEnabled() {
121
+ #ifdef V8_HEAP_SANDBOX
122
+ return true;
123
+ #else
124
+ return false;
125
+ #endif
126
+ }
127
+
128
+ using ExternalPointer_t = Address;
129
+
130
+ // If the heap sandbox is enabled, these tag values will be ORed with the
131
+ // external pointers in the external pointer table to prevent use of pointers of
132
+ // the wrong type. When a pointer is loaded, it is ANDed with the inverse of the
133
+ // expected type's tag. The tags are constructed in a way that guarantees that a
134
+ // failed type check will result in one or more of the top bits of the pointer
135
+ // to be set, rendering the pointer inacessible. This construction allows
136
+ // performing the type check and removing GC marking bits from the pointer at
137
+ // the same time.
138
+ enum ExternalPointerTag : uint64_t {
139
+ kExternalPointerNullTag = 0x0000000000000000,
140
+ kArrayBufferBackingStoreTag = 0x00ff000000000000, // 0b000000011111111
141
+ kTypedArrayExternalPointerTag = 0x017f000000000000, // 0b000000101111111
142
+ kDataViewDataPointerTag = 0x01bf000000000000, // 0b000000110111111
143
+ kExternalStringResourceTag = 0x01df000000000000, // 0b000000111011111
144
+ kExternalStringResourceDataTag = 0x01ef000000000000, // 0b000000111101111
145
+ kForeignForeignAddressTag = 0x01f7000000000000, // 0b000000111110111
146
+ kNativeContextMicrotaskQueueTag = 0x01fb000000000000, // 0b000000111111011
147
+ kEmbedderDataSlotPayloadTag = 0x01fd000000000000, // 0b000000111111101
148
+ kCodeEntryPointTag = 0x01fe000000000000, // 0b000000111111110
149
+ };
150
+
151
+ constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
152
+
153
+ #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
154
+ using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
155
+ #else
156
+ using PlatformSmiTagging = SmiTagging<kApiTaggedSize>;
157
+ #endif
158
+
159
+ // TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize
160
+ // since it's used much more often than the inividual constants.
161
+ const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
162
+ const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
163
+ const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
164
+ const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue);
165
+ constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
166
+ constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
167
+
168
+ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
169
+ return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) |
170
+ kSmiTag;
171
+ }
172
+
173
+ // Converts encoded external pointer to address.
174
+ V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate,
175
+ ExternalPointer_t pointer,
176
+ ExternalPointerTag tag);
177
+
178
+ // {obj} must be the raw tagged pointer representation of a HeapObject
179
+ // that's guaranteed to never be in ReadOnlySpace.
180
+ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
181
+
182
+ // Returns if we need to throw when an error occurs. This infers the language
183
+ // mode based on the current context and the closure. This returns true if the
184
+ // language mode is strict.
185
+ V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
186
+
187
+ /**
188
+ * This class exports constants and functionality from within v8 that
189
+ * is necessary to implement inline functions in the v8 api. Don't
190
+ * depend on functions and constants defined here.
191
+ */
192
+ class Internals {
193
+ #ifdef V8_MAP_PACKING
194
+ V8_INLINE static constexpr internal::Address UnpackMapWord(
195
+ internal::Address mapword) {
196
+ // TODO(wenyuzhao): Clear header metadata.
197
+ return mapword ^ kMapWordXorMask;
198
+ }
199
+ #endif
200
+
201
+ public:
202
+ // These values match non-compiler-dependent values defined within
203
+ // the implementation of v8.
204
+ static const int kHeapObjectMapOffset = 0;
205
+ static const int kMapInstanceTypeOffset = 1 * kApiTaggedSize + kApiInt32Size;
206
+ static const int kStringResourceOffset =
207
+ 1 * kApiTaggedSize + 2 * kApiInt32Size;
208
+
209
+ static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
210
+ static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
211
+ static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
212
+ static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
213
+ static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
214
+ #ifdef V8_HEAP_SANDBOX
215
+ static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize;
216
+ #endif
217
+ static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
218
+ static const int kFullStringRepresentationMask = 0x0f;
219
+ static const int kStringEncodingMask = 0x8;
220
+ static const int kExternalTwoByteRepresentationTag = 0x02;
221
+ static const int kExternalOneByteRepresentationTag = 0x0a;
222
+
223
+ static const uint32_t kNumIsolateDataSlots = 4;
224
+
225
+ // IsolateData layout guarantees.
226
+ static const int kIsolateEmbedderDataOffset = 0;
227
+ static const int kIsolateFastCCallCallerFpOffset =
228
+ kNumIsolateDataSlots * kApiSystemPointerSize;
229
+ static const int kIsolateFastCCallCallerPcOffset =
230
+ kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
231
+ static const int kIsolateFastApiCallTargetOffset =
232
+ kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
233
+ static const int kIsolateStackGuardOffset =
234
+ kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
235
+ static const int kIsolateRootsOffset =
236
+ kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;
237
+
238
+ static const int kExternalPointerTableBufferOffset = 0;
239
+ static const int kExternalPointerTableLengthOffset =
240
+ kExternalPointerTableBufferOffset + kApiSystemPointerSize;
241
+ static const int kExternalPointerTableCapacityOffset =
242
+ kExternalPointerTableLengthOffset + kApiInt32Size;
243
+
244
+ static const int kUndefinedValueRootIndex = 4;
245
+ static const int kTheHoleValueRootIndex = 5;
246
+ static const int kNullValueRootIndex = 6;
247
+ static const int kTrueValueRootIndex = 7;
248
+ static const int kFalseValueRootIndex = 8;
249
+ static const int kEmptyStringRootIndex = 9;
250
+
251
+ static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize;
252
+ static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3;
253
+ static const int kNodeStateMask = 0x7;
254
+ static const int kNodeStateIsWeakValue = 2;
255
+ static const int kNodeStateIsPendingValue = 3;
256
+
257
+ static const int kFirstNonstringType = 0x40;
258
+ static const int kOddballType = 0x43;
259
+ static const int kForeignType = 0x46;
260
+ static const int kJSSpecialApiObjectType = 0x410;
261
+ static const int kJSApiObjectType = 0x420;
262
+ static const int kJSObjectType = 0x421;
263
+
264
+ static const int kUndefinedOddballKind = 5;
265
+ static const int kNullOddballKind = 3;
266
+
267
+ // Constants used by PropertyCallbackInfo to check if we should throw when an
268
+ // error occurs.
269
+ static const int kThrowOnError = 0;
270
+ static const int kDontThrow = 1;
271
+ static const int kInferShouldThrowMode = 2;
272
+
273
+ // Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
274
+ // incremental GC once the external memory reaches this limit.
275
+ static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
276
+
277
+ #ifdef V8_MAP_PACKING
278
+ static const uintptr_t kMapWordMetadataMask = 0xffffULL << 48;
279
+ // The lowest two bits of mapwords are always `0b10`
280
+ static const uintptr_t kMapWordSignature = 0b10;
281
+ // XORing a (non-compressed) map with this mask ensures that the two
282
+ // low-order bits are 0b10. The 0 at the end makes this look like a Smi,
283
+ // although real Smis have all lower 32 bits unset. We only rely on these
284
+ // values passing as Smis in very few places.
285
+ static const int kMapWordXorMask = 0b11;
286
+ #endif
287
+
288
+ V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
289
+ V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
290
+ #ifdef V8_ENABLE_CHECKS
291
+ CheckInitializedImpl(isolate);
292
+ #endif
293
+ }
294
+
295
+ V8_INLINE static bool HasHeapObjectTag(const internal::Address value) {
296
+ return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag);
297
+ }
298
+
299
+ V8_INLINE static int SmiValue(const internal::Address value) {
300
+ return PlatformSmiTagging::SmiToInt(value);
301
+ }
302
+
303
+ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
304
+ return internal::IntToSmi(value);
305
+ }
306
+
307
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
308
+ return PlatformSmiTagging::IsValidSmi(value);
309
+ }
310
+
311
+ V8_INLINE static int GetInstanceType(const internal::Address obj) {
312
+ typedef internal::Address A;
313
+ A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
314
+ #ifdef V8_MAP_PACKING
315
+ map = UnpackMapWord(map);
316
+ #endif
317
+ return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
318
+ }
319
+
320
+ V8_INLINE static int GetOddballKind(const internal::Address obj) {
321
+ return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
322
+ }
323
+
324
+ V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
325
+ int representation = (instance_type & kFullStringRepresentationMask);
326
+ return representation == kExternalTwoByteRepresentationTag;
327
+ }
328
+
329
+ V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) {
330
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
331
+ return *addr & static_cast<uint8_t>(1U << shift);
332
+ }
333
+
334
+ V8_INLINE static void UpdateNodeFlag(internal::Address* obj, bool value,
335
+ int shift) {
336
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
337
+ uint8_t mask = static_cast<uint8_t>(1U << shift);
338
+ *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
339
+ }
340
+
341
+ V8_INLINE static uint8_t GetNodeState(internal::Address* obj) {
342
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
343
+ return *addr & kNodeStateMask;
344
+ }
345
+
346
+ V8_INLINE static void UpdateNodeState(internal::Address* obj, uint8_t value) {
347
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
348
+ *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
349
+ }
350
+
351
+ V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
352
+ void* data) {
353
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
354
+ kIsolateEmbedderDataOffset +
355
+ slot * kApiSystemPointerSize;
356
+ *reinterpret_cast<void**>(addr) = data;
357
+ }
358
+
359
+ V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
360
+ uint32_t slot) {
361
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
362
+ kIsolateEmbedderDataOffset +
363
+ slot * kApiSystemPointerSize;
364
+ return *reinterpret_cast<void* const*>(addr);
365
+ }
366
+
367
+ V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) {
368
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
369
+ kIsolateRootsOffset +
370
+ index * kApiSystemPointerSize;
371
+ return reinterpret_cast<internal::Address*>(addr);
372
+ }
373
+
374
+ template <typename T>
375
+ V8_INLINE static T ReadRawField(internal::Address heap_object_ptr,
376
+ int offset) {
377
+ internal::Address addr = heap_object_ptr + offset - kHeapObjectTag;
378
+ #ifdef V8_COMPRESS_POINTERS
379
+ if (sizeof(T) > kApiTaggedSize) {
380
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
381
+ // fields (external pointers, doubles and BigInt data) are only
382
+ // kTaggedSize aligned so we have to use unaligned pointer friendly way of
383
+ // accessing them in order to avoid undefined behavior in C++ code.
384
+ T r;
385
+ memcpy(&r, reinterpret_cast<void*>(addr), sizeof(T));
386
+ return r;
387
+ }
388
+ #endif
389
+ return *reinterpret_cast<const T*>(addr);
390
+ }
391
+
392
+ V8_INLINE static internal::Address ReadTaggedPointerField(
393
+ internal::Address heap_object_ptr, int offset) {
394
+ #ifdef V8_COMPRESS_POINTERS
395
+ uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
396
+ internal::Address base =
397
+ GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
398
+ return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
399
+ #else
400
+ return ReadRawField<internal::Address>(heap_object_ptr, offset);
401
+ #endif
402
+ }
403
+
404
+ V8_INLINE static internal::Address ReadTaggedSignedField(
405
+ internal::Address heap_object_ptr, int offset) {
406
+ #ifdef V8_COMPRESS_POINTERS
407
+ uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
408
+ return static_cast<internal::Address>(static_cast<uintptr_t>(value));
409
+ #else
410
+ return ReadRawField<internal::Address>(heap_object_ptr, offset);
411
+ #endif
412
+ }
413
+
414
+ V8_INLINE static internal::Isolate* GetIsolateForHeapSandbox(
415
+ internal::Address obj) {
416
+ #ifdef V8_HEAP_SANDBOX
417
+ return internal::IsolateFromNeverReadOnlySpaceObject(obj);
418
+ #else
419
+ // Not used in non-sandbox mode.
420
+ return nullptr;
421
+ #endif
422
+ }
423
+
424
+ V8_INLINE static Address DecodeExternalPointer(
425
+ const Isolate* isolate, ExternalPointer_t encoded_pointer,
426
+ ExternalPointerTag tag) {
427
+ #ifdef V8_HEAP_SANDBOX
428
+ return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag);
429
+ #else
430
+ return encoded_pointer;
431
+ #endif
432
+ }
433
+
434
+ V8_INLINE static internal::Address ReadExternalPointerField(
435
+ internal::Isolate* isolate, internal::Address heap_object_ptr, int offset,
436
+ ExternalPointerTag tag) {
437
+ #ifdef V8_HEAP_SANDBOX
438
+ internal::ExternalPointer_t encoded_value =
439
+ ReadRawField<uint32_t>(heap_object_ptr, offset);
440
+ // We currently have to treat zero as nullptr in embedder slots.
441
+ return encoded_value ? DecodeExternalPointer(isolate, encoded_value, tag)
442
+ : 0;
443
+ #else
444
+ return ReadRawField<Address>(heap_object_ptr, offset);
445
+ #endif
446
+ }
447
+
448
+ #ifdef V8_COMPRESS_POINTERS
449
+ // See v8:7703 or src/ptr-compr.* for details about pointer compression.
450
+ static constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
451
+ static constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
452
+
453
+ V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
454
+ internal::Address addr) {
455
+ return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
456
+ }
457
+
458
+ V8_INLINE static internal::Address DecompressTaggedAnyField(
459
+ internal::Address heap_object_ptr, uint32_t value) {
460
+ internal::Address base =
461
+ GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
462
+ return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
463
+ }
464
+
465
+ #endif // V8_COMPRESS_POINTERS
466
+ };
467
+
468
+ // Only perform cast check for types derived from v8::Data since
469
+ // other types do not implement the Cast method.
470
+ template <bool PerformCheck>
471
+ struct CastCheck {
472
+ template <class T>
473
+ static void Perform(T* data);
474
+ };
475
+
476
+ template <>
477
+ template <class T>
478
+ void CastCheck<true>::Perform(T* data) {
479
+ T::Cast(data);
480
+ }
481
+
482
+ template <>
483
+ template <class T>
484
+ void CastCheck<false>::Perform(T* data) {}
485
+
486
+ template <class T>
487
+ V8_INLINE void PerformCastCheck(T* data) {
488
+ CastCheck<std::is_base_of<Data, T>::value &&
489
+ !std::is_same<Data, std::remove_cv_t<T>>::value>::Perform(data);
490
+ }
491
+
492
+ // A base class for backing stores, which is needed due to vagaries of
493
+ // how static casts work with std::shared_ptr.
494
+ class BackingStoreBase {};
495
+
496
+ } // namespace internal
497
+ } // namespace v8
498
+
499
+ #endif // INCLUDE_V8_INTERNAL_H_
@@ -0,0 +1,202 @@
1
+ // Copyright 2020 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_METRICS_H_
6
+ #define V8_METRICS_H_
7
+
8
+ #include "v8.h" // NOLINT(build/include_directory)
9
+
10
+ namespace v8 {
11
+ namespace metrics {
12
+
13
+ struct GarbageCollectionPhases {
14
+ int64_t compact_wall_clock_duration_in_us = -1;
15
+ int64_t mark_wall_clock_duration_in_us = -1;
16
+ int64_t sweep_wall_clock_duration_in_us = -1;
17
+ int64_t weak_wall_clock_duration_in_us = -1;
18
+ };
19
+
20
+ struct GarbageCollectionSizes {
21
+ int64_t bytes_before = -1;
22
+ int64_t bytes_after = -1;
23
+ int64_t bytes_freed = -1;
24
+ };
25
+
26
+ struct GarbageCollectionFullCycle {
27
+ GarbageCollectionPhases total;
28
+ GarbageCollectionPhases total_cpp;
29
+ GarbageCollectionPhases main_thread;
30
+ GarbageCollectionPhases main_thread_cpp;
31
+ GarbageCollectionPhases main_thread_atomic;
32
+ GarbageCollectionPhases main_thread_atomic_cpp;
33
+ GarbageCollectionPhases main_thread_incremental;
34
+ GarbageCollectionPhases main_thread_incremental_cpp;
35
+ GarbageCollectionSizes objects;
36
+ GarbageCollectionSizes objects_cpp;
37
+ GarbageCollectionSizes memory;
38
+ GarbageCollectionSizes memory_cpp;
39
+ double collection_rate_in_percent;
40
+ double collection_rate_cpp_in_percent;
41
+ double efficiency_in_bytes_per_us;
42
+ double efficiency_cpp_in_bytes_per_us;
43
+ double main_thread_efficiency_in_bytes_per_us;
44
+ double main_thread_efficiency_cpp_in_bytes_per_us;
45
+ };
46
+
47
+ struct GarbageCollectionFullMainThreadIncrementalMark {
48
+ int64_t wall_clock_duration_in_us = -1;
49
+ int64_t cpp_wall_clock_duration_in_us = -1;
50
+ };
51
+
52
+ struct GarbageCollectionFullMainThreadBatchedIncrementalMark {
53
+ std::vector<GarbageCollectionFullMainThreadIncrementalMark> events;
54
+ };
55
+
56
+ struct GarbageCollectionFullMainThreadIncrementalSweep {
57
+ int64_t wall_clock_duration_in_us = -1;
58
+ int64_t cpp_wall_clock_duration_in_us = -1;
59
+ };
60
+
61
+ struct GarbageCollectionFullMainThreadBatchedIncrementalSweep {
62
+ std::vector<GarbageCollectionFullMainThreadIncrementalSweep> events;
63
+ };
64
+
65
+ struct GarbageCollectionYoungCycle {
66
+ int64_t total_wall_clock_duration_in_us = -1;
67
+ int64_t main_thread_wall_clock_duration_in_us = -1;
68
+ double collection_rate_in_percent;
69
+ double efficiency_in_bytes_per_us;
70
+ double main_thread_efficiency_in_bytes_per_us;
71
+ };
72
+
73
+ struct WasmModuleDecoded {
74
+ bool async = false;
75
+ bool streamed = false;
76
+ bool success = false;
77
+ size_t module_size_in_bytes = 0;
78
+ size_t function_count = 0;
79
+ int64_t wall_clock_duration_in_us = -1;
80
+ int64_t cpu_duration_in_us = -1;
81
+ };
82
+
83
+ struct WasmModuleCompiled {
84
+ bool async = false;
85
+ bool streamed = false;
86
+ bool cached = false;
87
+ bool deserialized = false;
88
+ bool lazy = false;
89
+ bool success = false;
90
+ size_t code_size_in_bytes = 0;
91
+ size_t liftoff_bailout_count = 0;
92
+ int64_t wall_clock_duration_in_us = -1;
93
+ int64_t cpu_duration_in_us = -1;
94
+ };
95
+
96
+ struct WasmModuleInstantiated {
97
+ bool async = false;
98
+ bool success = false;
99
+ size_t imported_function_count = 0;
100
+ int64_t wall_clock_duration_in_us = -1;
101
+ };
102
+
103
+ struct WasmModuleTieredUp {
104
+ bool lazy = false;
105
+ size_t code_size_in_bytes = 0;
106
+ int64_t wall_clock_duration_in_us = -1;
107
+ int64_t cpu_duration_in_us = -1;
108
+ };
109
+
110
+ struct WasmModulesPerIsolate {
111
+ size_t count = 0;
112
+ };
113
+
114
+ #define V8_MAIN_THREAD_METRICS_EVENTS(V) \
115
+ V(GarbageCollectionFullCycle) \
116
+ V(GarbageCollectionFullMainThreadIncrementalMark) \
117
+ V(GarbageCollectionFullMainThreadBatchedIncrementalMark) \
118
+ V(GarbageCollectionFullMainThreadIncrementalSweep) \
119
+ V(GarbageCollectionFullMainThreadBatchedIncrementalSweep) \
120
+ V(GarbageCollectionYoungCycle) \
121
+ V(WasmModuleDecoded) \
122
+ V(WasmModuleCompiled) \
123
+ V(WasmModuleInstantiated) \
124
+ V(WasmModuleTieredUp)
125
+
126
+ #define V8_THREAD_SAFE_METRICS_EVENTS(V) V(WasmModulesPerIsolate)
127
+
128
+ /**
129
+ * This class serves as a base class for recording event-based metrics in V8.
130
+ * There a two kinds of metrics, those which are expected to be thread-safe and
131
+ * whose implementation is required to fulfill this requirement and those whose
132
+ * implementation does not have that requirement and only needs to be
133
+ * executable on the main thread. If such an event is triggered from a
134
+ * background thread, it will be delayed and executed by the foreground task
135
+ * runner.
136
+ *
137
+ * The thread-safe events are listed in the V8_THREAD_SAFE_METRICS_EVENTS
138
+ * macro above while the main thread event are listed in
139
+ * V8_MAIN_THREAD_METRICS_EVENTS above. For the former, a virtual method
140
+ * AddMainThreadEvent(const E& event, v8::Context::Token token) will be
141
+ * generated and for the latter AddThreadSafeEvent(const E& event).
142
+ *
143
+ * Thread-safe events are not allowed to access the context and therefore do
144
+ * not carry a context ID with them. These IDs can be generated using
145
+ * Recorder::GetContextId() and the ID will be valid throughout the lifetime
146
+ * of the isolate. It is not guaranteed that the ID will still resolve to
147
+ * a valid context using Recorder::GetContext() at the time the metric is
148
+ * recorded. In this case, an empty handle will be returned.
149
+ *
150
+ * The embedder is expected to call v8::Isolate::SetMetricsRecorder()
151
+ * providing its implementation and have the virtual methods overwritten
152
+ * for the events it cares about.
153
+ */
154
+ class V8_EXPORT Recorder {
155
+ public:
156
+ // A unique identifier for a context in this Isolate.
157
+ // It is guaranteed to not be reused throughout the lifetime of the Isolate.
158
+ class ContextId {
159
+ public:
160
+ ContextId() : id_(kEmptyId) {}
161
+
162
+ bool IsEmpty() const { return id_ == kEmptyId; }
163
+ static const ContextId Empty() { return ContextId{kEmptyId}; }
164
+
165
+ bool operator==(const ContextId& other) const { return id_ == other.id_; }
166
+ bool operator!=(const ContextId& other) const { return id_ != other.id_; }
167
+
168
+ private:
169
+ friend class ::v8::Context;
170
+ friend class ::v8::internal::Isolate;
171
+
172
+ explicit ContextId(uintptr_t id) : id_(id) {}
173
+
174
+ static constexpr uintptr_t kEmptyId = 0;
175
+ uintptr_t id_;
176
+ };
177
+
178
+ virtual ~Recorder() = default;
179
+
180
+ #define ADD_MAIN_THREAD_EVENT(E) \
181
+ virtual void AddMainThreadEvent(const E& event, ContextId context_id) {}
182
+ V8_MAIN_THREAD_METRICS_EVENTS(ADD_MAIN_THREAD_EVENT)
183
+ #undef ADD_MAIN_THREAD_EVENT
184
+
185
+ #define ADD_THREAD_SAFE_EVENT(E) \
186
+ virtual void AddThreadSafeEvent(const E& event) {}
187
+ V8_THREAD_SAFE_METRICS_EVENTS(ADD_THREAD_SAFE_EVENT)
188
+ #undef ADD_THREAD_SAFE_EVENT
189
+
190
+ virtual void NotifyIsolateDisposal() {}
191
+
192
+ // Return the context with the given id or an empty handle if the context
193
+ // was already garbage collected.
194
+ static MaybeLocal<Context> GetContext(Isolate* isolate, ContextId id);
195
+ // Return the unique id corresponding to the given context.
196
+ static ContextId GetContextId(Local<Context> context);
197
+ };
198
+
199
+ } // namespace metrics
200
+ } // namespace v8
201
+
202
+ #endif // V8_METRICS_H_