libv8-node 18.13.0.1-arm64-darwin → 20.2.0.0-arm64-darwin

Sign up to get free protection for your applications and to get access to all the features.
Files changed (70) hide show
  1. checksums.yaml +4 -4
  2. data/lib/libv8/node/version.rb +3 -3
  3. data/vendor/v8/arm64-darwin/libv8/obj/libv8_monolith.a +0 -0
  4. data/vendor/v8/include/cppgc/common.h +0 -1
  5. data/vendor/v8/include/cppgc/cross-thread-persistent.h +11 -10
  6. data/vendor/v8/include/cppgc/heap-consistency.h +46 -3
  7. data/vendor/v8/include/cppgc/heap-handle.h +48 -0
  8. data/vendor/v8/include/cppgc/heap-statistics.h +2 -2
  9. data/vendor/v8/include/cppgc/heap.h +3 -7
  10. data/vendor/v8/include/cppgc/internal/api-constants.h +14 -1
  11. data/vendor/v8/include/cppgc/internal/base-page-handle.h +45 -0
  12. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +40 -8
  13. data/vendor/v8/include/cppgc/internal/caged-heap.h +61 -0
  14. data/vendor/v8/include/cppgc/internal/gc-info.h +35 -34
  15. data/vendor/v8/include/cppgc/internal/member-storage.h +248 -0
  16. data/vendor/v8/include/cppgc/internal/name-trait.h +21 -6
  17. data/vendor/v8/include/cppgc/internal/persistent-node.h +11 -13
  18. data/vendor/v8/include/cppgc/internal/pointer-policies.h +65 -8
  19. data/vendor/v8/include/cppgc/internal/write-barrier.h +153 -101
  20. data/vendor/v8/include/cppgc/liveness-broker.h +8 -7
  21. data/vendor/v8/include/cppgc/macros.h +10 -1
  22. data/vendor/v8/include/cppgc/member.h +424 -111
  23. data/vendor/v8/include/cppgc/name-provider.h +4 -4
  24. data/vendor/v8/include/cppgc/persistent.h +27 -24
  25. data/vendor/v8/include/cppgc/platform.h +7 -5
  26. data/vendor/v8/include/cppgc/sentinel-pointer.h +1 -1
  27. data/vendor/v8/include/cppgc/trace-trait.h +4 -0
  28. data/vendor/v8/include/cppgc/type-traits.h +13 -3
  29. data/vendor/v8/include/cppgc/visitor.h +104 -57
  30. data/vendor/v8/include/libplatform/v8-tracing.h +2 -2
  31. data/vendor/v8/include/v8-array-buffer.h +59 -0
  32. data/vendor/v8/include/v8-callbacks.h +32 -5
  33. data/vendor/v8/include/v8-context.h +63 -11
  34. data/vendor/v8/include/v8-cppgc.h +22 -0
  35. data/vendor/v8/include/v8-data.h +1 -1
  36. data/vendor/v8/include/v8-date.h +5 -0
  37. data/vendor/v8/include/v8-embedder-heap.h +0 -164
  38. data/vendor/v8/include/v8-exception.h +1 -1
  39. data/vendor/v8/include/v8-fast-api-calls.h +49 -31
  40. data/vendor/v8/include/v8-function-callback.h +69 -42
  41. data/vendor/v8/include/v8-function.h +9 -0
  42. data/vendor/v8/include/v8-initialization.h +23 -49
  43. data/vendor/v8/include/v8-inspector.h +32 -11
  44. data/vendor/v8/include/v8-internal.h +480 -183
  45. data/vendor/v8/include/v8-isolate.h +52 -77
  46. data/vendor/v8/include/v8-local-handle.h +86 -53
  47. data/vendor/v8/include/v8-locker.h +0 -11
  48. data/vendor/v8/include/v8-maybe.h +24 -1
  49. data/vendor/v8/include/v8-message.h +2 -4
  50. data/vendor/v8/include/v8-metrics.h +48 -40
  51. data/vendor/v8/include/v8-microtask-queue.h +6 -1
  52. data/vendor/v8/include/v8-object.h +29 -18
  53. data/vendor/v8/include/v8-persistent-handle.h +25 -18
  54. data/vendor/v8/include/v8-platform.h +133 -35
  55. data/vendor/v8/include/v8-primitive.h +27 -20
  56. data/vendor/v8/include/v8-profiler.h +133 -53
  57. data/vendor/v8/include/v8-regexp.h +2 -1
  58. data/vendor/v8/include/v8-script.h +91 -7
  59. data/vendor/v8/include/v8-snapshot.h +4 -8
  60. data/vendor/v8/include/v8-template.h +16 -77
  61. data/vendor/v8/include/v8-traced-handle.h +22 -28
  62. data/vendor/v8/include/v8-unwinder-state.h +4 -4
  63. data/vendor/v8/include/v8-util.h +11 -7
  64. data/vendor/v8/include/v8-value-serializer.h +46 -23
  65. data/vendor/v8/include/v8-value.h +31 -4
  66. data/vendor/v8/include/v8-version.h +4 -4
  67. data/vendor/v8/include/v8-wasm.h +7 -63
  68. data/vendor/v8/include/v8-weak-callback-info.h +0 -7
  69. data/vendor/v8/include/v8config.h +353 -15
  70. metadata +5 -1
@@ -8,6 +8,8 @@
8
8
  #include <stddef.h>
9
9
  #include <stdint.h>
10
10
  #include <string.h>
11
+
12
+ #include <atomic>
11
13
  #include <type_traits>
12
14
 
13
15
  #include "v8-version.h" // NOLINT(build/include_directory)
@@ -19,15 +21,13 @@ class Array;
19
21
  class Context;
20
22
  class Data;
21
23
  class Isolate;
22
- template <typename T>
23
- class Local;
24
24
 
25
25
  namespace internal {
26
26
 
27
27
  class Isolate;
28
28
 
29
29
  typedef uintptr_t Address;
30
- static const Address kNullAddress = 0;
30
+ static constexpr Address kNullAddress = 0;
31
31
 
32
32
  constexpr int KB = 1024;
33
33
  constexpr int MB = KB * 1024;
@@ -50,6 +50,7 @@ const int kHeapObjectTag = 1;
50
50
  const int kWeakHeapObjectTag = 3;
51
51
  const int kHeapObjectTagSize = 2;
52
52
  const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
53
+ const intptr_t kHeapObjectReferenceTagMask = 1 << (kHeapObjectTagSize - 1);
53
54
 
54
55
  // Tag information for fowarding pointers stored in object headers.
55
56
  // 0b00 at the lowest 2 bits in the header indicates that the map word is a
@@ -79,7 +80,7 @@ struct SmiTagging<4> {
79
80
  static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
80
81
  static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
81
82
 
82
- V8_INLINE static int SmiToInt(const internal::Address value) {
83
+ V8_INLINE static int SmiToInt(Address value) {
83
84
  int shift_bits = kSmiTagSize + kSmiShiftSize;
84
85
  // Truncate and shift down (requires >> to be sign extending).
85
86
  return static_cast<int32_t>(static_cast<uint32_t>(value)) >> shift_bits;
@@ -104,7 +105,7 @@ struct SmiTagging<8> {
104
105
  static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
105
106
  static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
106
107
 
107
- V8_INLINE static int SmiToInt(const internal::Address value) {
108
+ V8_INLINE static int SmiToInt(Address value) {
108
109
  int shift_bits = kSmiTagSize + kSmiShiftSize;
109
110
  // Shift down and throw away top 32 bits.
110
111
  return static_cast<int>(static_cast<intptr_t>(value) >> shift_bits);
@@ -147,8 +148,9 @@ const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
147
148
  const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue);
148
149
  constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
149
150
  constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
151
+ constexpr bool Is64() { return kApiSystemPointerSize == sizeof(int64_t); }
150
152
 
151
- V8_INLINE static constexpr internal::Address IntToSmi(int value) {
153
+ V8_INLINE static constexpr Address IntToSmi(int value) {
152
154
  return (static_cast<Address>(value) << (kSmiTagSize + kSmiShiftSize)) |
153
155
  kSmiTag;
154
156
  }
@@ -157,15 +159,7 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
157
159
  * Sandbox related types, constants, and functions.
158
160
  */
159
161
  constexpr bool SandboxIsEnabled() {
160
- #ifdef V8_SANDBOX
161
- return true;
162
- #else
163
- return false;
164
- #endif
165
- }
166
-
167
- constexpr bool SandboxedExternalPointersAreEnabled() {
168
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
162
+ #ifdef V8_ENABLE_SANDBOX
169
163
  return true;
170
164
  #else
171
165
  return false;
@@ -176,19 +170,18 @@ constexpr bool SandboxedExternalPointersAreEnabled() {
176
170
  // for example by storing them as offset rather than as raw pointers.
177
171
  using SandboxedPointer_t = Address;
178
172
 
179
- // ExternalPointers point to objects located outside the sandbox. When sandboxed
180
- // external pointers are enabled, these are stored in an external pointer table
181
- // and referenced from HeapObjects through indices.
182
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
183
- using ExternalPointer_t = uint32_t;
184
- #else
185
- using ExternalPointer_t = Address;
186
- #endif
187
-
188
- #ifdef V8_SANDBOX_IS_AVAILABLE
173
+ #ifdef V8_ENABLE_SANDBOX
189
174
 
190
175
  // Size of the sandbox, excluding the guard regions surrounding it.
176
+ #ifdef V8_TARGET_OS_ANDROID
177
+ // On Android, most 64-bit devices seem to be configured with only 39 bits of
178
+ // virtual address space for userspace. As such, limit the sandbox to 128GB (a
179
+ // quarter of the total available address space).
180
+ constexpr size_t kSandboxSizeLog2 = 37; // 128 GB
181
+ #else
182
+ // Everywhere else use a 1TB sandbox.
191
183
  constexpr size_t kSandboxSizeLog2 = 40; // 1 TB
184
+ #endif // V8_TARGET_OS_ANDROID
192
185
  constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2;
193
186
 
194
187
  // Required alignment of the sandbox. For simplicity, we require the
@@ -213,20 +206,6 @@ static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0,
213
206
  "The size of the guard regions around the sandbox must be a "
214
207
  "multiple of its required alignment.");
215
208
 
216
- // Minimum size of the sandbox, excluding the guard regions surrounding it. If
217
- // the virtual memory reservation for the sandbox fails, its size is currently
218
- // halved until either the reservation succeeds or the minimum size is reached.
219
- // A minimum of 32GB allows the 4GB pointer compression region as well as the
220
- // ArrayBuffer partition and two 10GB Wasm memory cages to fit into the
221
- // sandbox. 32GB should also be the minimum possible size of the userspace
222
- // address space as there are some machine configurations with only 36 virtual
223
- // address bits.
224
- constexpr size_t kSandboxMinimumSize = 32ULL * GB;
225
-
226
- static_assert(kSandboxMinimumSize <= kSandboxSize,
227
- "The minimal size of the sandbox must be smaller or equal to the "
228
- "regular size.");
229
-
230
209
  // On OSes where reserving virtual memory is too expensive to reserve the
231
210
  // entire address space backing the sandbox, notably Windows pre 8.1, we create
232
211
  // a partially reserved sandbox that doesn't actually reserve most of the
@@ -239,82 +218,258 @@ static_assert(kSandboxMinimumSize <= kSandboxSize,
239
218
  // well as the ArrayBuffer partition.
240
219
  constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB;
241
220
 
242
- static_assert(kSandboxMinimumSize > kPtrComprCageReservationSize,
243
- "The sandbox must be larger than the pointer compression cage "
244
- "contained within it.");
245
221
  static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize,
246
222
  "The minimum reservation size for a sandbox must be larger than "
247
223
  "the pointer compression cage contained within it.");
248
224
 
249
- // For now, even if the sandbox is enabled, we still allow backing stores to be
250
- // allocated outside of it as fallback. This will simplify the initial rollout.
251
- // However, if sandboxed pointers are also enabled, we must always place
252
- // backing stores inside the sandbox as they will be referenced though them.
253
- #ifdef V8_SANDBOXED_POINTERS
254
- constexpr bool kAllowBackingStoresOutsideSandbox = false;
255
- #else
256
- constexpr bool kAllowBackingStoresOutsideSandbox = true;
257
- #endif // V8_SANDBOXED_POINTERS
225
+ // The maximum buffer size allowed inside the sandbox. This is mostly dependent
226
+ // on the size of the guard regions around the sandbox: an attacker must not be
227
+ // able to construct a buffer that appears larger than the guard regions and
228
+ // thereby "reach out of" the sandbox.
229
+ constexpr size_t kMaxSafeBufferSizeForSandbox = 32ULL * GB - 1;
230
+ static_assert(kMaxSafeBufferSizeForSandbox <= kSandboxGuardRegionSize,
231
+ "The maximum allowed buffer size must not be larger than the "
232
+ "sandbox's guard regions");
233
+
234
+ constexpr size_t kBoundedSizeShift = 29;
235
+ static_assert(1ULL << (64 - kBoundedSizeShift) ==
236
+ kMaxSafeBufferSizeForSandbox + 1,
237
+ "The maximum size of a BoundedSize must be synchronized with the "
238
+ "kMaxSafeBufferSizeForSandbox");
258
239
 
240
+ #endif // V8_ENABLE_SANDBOX
241
+
242
+ #ifdef V8_COMPRESS_POINTERS
243
+
244
+ #ifdef V8_TARGET_OS_ANDROID
259
245
  // The size of the virtual memory reservation for an external pointer table.
260
246
  // This determines the maximum number of entries in a table. Using a maximum
261
247
  // size allows omitting bounds checks on table accesses if the indices are
262
248
  // guaranteed (e.g. through shifting) to be below the maximum index. This
263
249
  // value must be a power of two.
264
- static const size_t kExternalPointerTableReservationSize = 128 * MB;
265
-
266
- // The maximum number of entries in an external pointer table.
267
- static const size_t kMaxSandboxedExternalPointers =
268
- kExternalPointerTableReservationSize / kApiSystemPointerSize;
250
+ static const size_t kExternalPointerTableReservationSize = 512 * MB;
269
251
 
270
252
  // The external pointer table indices stored in HeapObjects as external
271
253
  // pointers are shifted to the left by this amount to guarantee that they are
272
254
  // smaller than the maximum table size.
273
- static const uint32_t kExternalPointerIndexShift = 8;
274
- static_assert((1 << (32 - kExternalPointerIndexShift)) ==
275
- kMaxSandboxedExternalPointers,
255
+ static const uint32_t kExternalPointerIndexShift = 6;
256
+ #else
257
+ static const size_t kExternalPointerTableReservationSize = 1024 * MB;
258
+ static const uint32_t kExternalPointerIndexShift = 5;
259
+ #endif // V8_TARGET_OS_ANDROID
260
+
261
+ // The maximum number of entries in an external pointer table.
262
+ static const size_t kMaxExternalPointers =
263
+ kExternalPointerTableReservationSize / kApiSystemPointerSize;
264
+ static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers,
276
265
  "kExternalPointerTableReservationSize and "
277
266
  "kExternalPointerIndexShift don't match");
278
267
 
279
- #endif // V8_SANDBOX_IS_AVAILABLE
280
-
281
- // If sandboxed external pointers are enabled, these tag values will be ORed
282
- // with the external pointers in the external pointer table to prevent use of
283
- // pointers of the wrong type. When a pointer is loaded, it is ANDed with the
284
- // inverse of the expected type's tag. The tags are constructed in a way that
285
- // guarantees that a failed type check will result in one or more of the top
286
- // bits of the pointer to be set, rendering the pointer inacessible. Besides
287
- // the type tag bits (48 through 62), the tags also have the GC mark bit (63)
288
- // set, so that the mark bit is automatically set when a pointer is written
289
- // into the external pointer table (in which case it is clearly alive) and is
290
- // cleared when the pointer is loaded. The exception to this is the free entry
291
- // tag, which doesn't have the mark bit set, as the entry is not alive. This
268
+ #else // !V8_COMPRESS_POINTERS
269
+
270
+ // Needed for the V8.SandboxedExternalPointersCount histogram.
271
+ static const size_t kMaxExternalPointers = 0;
272
+
273
+ #endif // V8_COMPRESS_POINTERS
274
+
275
+ // A ExternalPointerHandle represents a (opaque) reference to an external
276
+ // pointer that can be stored inside the sandbox. A ExternalPointerHandle has
277
+ // meaning only in combination with an (active) Isolate as it references an
278
+ // external pointer stored in the currently active Isolate's
279
+ // ExternalPointerTable. Internally, an ExternalPointerHandles is simply an
280
+ // index into an ExternalPointerTable that is shifted to the left to guarantee
281
+ // that it is smaller than the size of the table.
282
+ using ExternalPointerHandle = uint32_t;
283
+
284
+ // ExternalPointers point to objects located outside the sandbox. When
285
+ // sandboxed external pointers are enabled, these are stored on heap as
286
+ // ExternalPointerHandles, otherwise they are simply raw pointers.
287
+ #ifdef V8_ENABLE_SANDBOX
288
+ using ExternalPointer_t = ExternalPointerHandle;
289
+ #else
290
+ using ExternalPointer_t = Address;
291
+ #endif
292
+
293
+ // When the sandbox is enabled, external pointers are stored in an external
294
+ // pointer table and are referenced from HeapObjects through an index (a
295
+ // "handle"). When stored in the table, the pointers are tagged with per-type
296
+ // tags to prevent type confusion attacks between different external objects.
297
+ // Besides type information bits, these tags also contain the GC marking bit
298
+ // which indicates whether the pointer table entry is currently alive. When a
299
+ // pointer is written into the table, the tag is ORed into the top bits. When
300
+ // that pointer is later loaded from the table, it is ANDed with the inverse of
301
+ // the expected tag. If the expected and actual type differ, this will leave
302
+ // some of the top bits of the pointer set, rendering the pointer inaccessible.
303
+ // The AND operation also removes the GC marking bit from the pointer.
304
+ //
305
+ // The tags are constructed such that UNTAG(TAG(0, T1), T2) != 0 for any two
306
+ // (distinct) tags T1 and T2. In practice, this is achieved by generating tags
307
+ // that all have the same number of zeroes and ones but different bit patterns.
308
+ // With N type tag bits, this allows for (N choose N/2) possible type tags.
309
+ // Besides the type tag bits, the tags also have the GC marking bit set so that
310
+ // the marking bit is automatically set when a pointer is written into the
311
+ // external pointer table (in which case it is clearly alive) and is cleared
312
+ // when the pointer is loaded. The exception to this is the free entry tag,
313
+ // which doesn't have the mark bit set, as the entry is not alive. This
292
314
  // construction allows performing the type check and removing GC marking bits
293
- // (the MSB) from the pointer at the same time.
294
- // Note: this scheme assumes a 48-bit address space and will likely break if
295
- // more virtual address bits are used.
296
- constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
315
+ // from the pointer in one efficient operation (bitwise AND). The number of
316
+ // available bits is limited in the following way: on x64, bits [47, 64) are
317
+ // generally available for tagging (userspace has 47 address bits available).
318
+ // On Arm64, userspace typically has a 40 or 48 bit address space. However, due
319
+ // to top-byte ignore (TBI) and memory tagging (MTE), the top byte is unusable
320
+ // for type checks as type-check failures would go unnoticed or collide with
321
+ // MTE bits. Some bits of the top byte can, however, still be used for the GC
322
+ // marking bit. The bits available for the type tags are therefore limited to
323
+ // [48, 56), i.e. (8 choose 4) = 70 different types.
324
+ // The following options exist to increase the number of possible types:
325
+ // - Using multiple ExternalPointerTables since tags can safely be reused
326
+ // across different tables
327
+ // - Using "extended" type checks, where additional type information is stored
328
+ // either in an adjacent pointer table entry or at the pointed-to location
329
+ // - Using a different tagging scheme, for example based on XOR which would
330
+ // allow for 2**8 different tags but require a separate operation to remove
331
+ // the marking bit
332
+ //
333
+ // The external pointer sandboxing mechanism ensures that every access to an
334
+ // external pointer field will result in a valid pointer of the expected type
335
+ // even in the presence of an attacker able to corrupt memory inside the
336
+ // sandbox. However, if any data related to the external object is stored
337
+ // inside the sandbox it may still be corrupted and so must be validated before
338
+ // use or moved into the external object. Further, an attacker will always be
339
+ // able to substitute different external pointers of the same type for each
340
+ // other. Therefore, code using external pointers must be written in a
341
+ // "substitution-safe" way, i.e. it must always be possible to substitute
342
+ // external pointers of the same type without causing memory corruption outside
343
+ // of the sandbox. Generally this is achieved by referencing any group of
344
+ // related external objects through a single external pointer.
345
+ //
346
+ // Currently we use bit 62 for the marking bit which should always be unused as
347
+ // it's part of the non-canonical address range. When Arm's top-byte ignore
348
+ // (TBI) is enabled, this bit will be part of the ignored byte, and we assume
349
+ // that the Embedder is not using this byte (really only this one bit) for any
350
+ // other purpose. This bit also does not collide with the memory tagging
351
+ // extension (MTE) which would use bits [56, 60).
352
+ //
353
+ // External pointer tables are also available even when the sandbox is off but
354
+ // pointer compression is on. In that case, the mechanism can be used to easy
355
+ // alignment requirements as it turns unaligned 64-bit raw pointers into
356
+ // aligned 32-bit indices. To "opt-in" to the external pointer table mechanism
357
+ // for this purpose, instead of using the ExternalPointer accessors one needs to
358
+ // use ExternalPointerHandles directly and use them to access the pointers in an
359
+ // ExternalPointerTable.
360
+ constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62;
361
+ constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000;
297
362
  constexpr uint64_t kExternalPointerTagShift = 48;
298
- #define MAKE_TAG(v) (static_cast<uint64_t>(v) << kExternalPointerTagShift)
363
+
364
+ // All possible 8-bit type tags.
365
+ // These are sorted so that tags can be grouped together and it can efficiently
366
+ // be checked if a tag belongs to a given group. See for example the
367
+ // IsSharedExternalPointerType routine.
368
+ constexpr uint64_t kAllExternalPointerTypeTags[] = {
369
+ 0b00001111, 0b00010111, 0b00011011, 0b00011101, 0b00011110, 0b00100111,
370
+ 0b00101011, 0b00101101, 0b00101110, 0b00110011, 0b00110101, 0b00110110,
371
+ 0b00111001, 0b00111010, 0b00111100, 0b01000111, 0b01001011, 0b01001101,
372
+ 0b01001110, 0b01010011, 0b01010101, 0b01010110, 0b01011001, 0b01011010,
373
+ 0b01011100, 0b01100011, 0b01100101, 0b01100110, 0b01101001, 0b01101010,
374
+ 0b01101100, 0b01110001, 0b01110010, 0b01110100, 0b01111000, 0b10000111,
375
+ 0b10001011, 0b10001101, 0b10001110, 0b10010011, 0b10010101, 0b10010110,
376
+ 0b10011001, 0b10011010, 0b10011100, 0b10100011, 0b10100101, 0b10100110,
377
+ 0b10101001, 0b10101010, 0b10101100, 0b10110001, 0b10110010, 0b10110100,
378
+ 0b10111000, 0b11000011, 0b11000101, 0b11000110, 0b11001001, 0b11001010,
379
+ 0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001,
380
+ 0b11100010, 0b11100100, 0b11101000, 0b11110000};
381
+
382
+ #define TAG(i) \
383
+ ((kAllExternalPointerTypeTags[i] << kExternalPointerTagShift) | \
384
+ kExternalPointerMarkBit)
385
+
299
386
  // clang-format off
387
+
388
+ // When adding new tags, please ensure that the code using these tags is
389
+ // "substitution-safe", i.e. still operate safely if external pointers of the
390
+ // same type are swapped by an attacker. See comment above for more details.
391
+
392
+ // Shared external pointers are owned by the shared Isolate and stored in the
393
+ // shared external pointer table associated with that Isolate, where they can
394
+ // be accessed from multiple threads at the same time. The objects referenced
395
+ // in this way must therefore always be thread-safe.
396
+ #define SHARED_EXTERNAL_POINTER_TAGS(V) \
397
+ V(kFirstSharedTag, TAG(0)) \
398
+ V(kWaiterQueueNodeTag, TAG(0)) \
399
+ V(kExternalStringResourceTag, TAG(1)) \
400
+ V(kExternalStringResourceDataTag, TAG(2)) \
401
+ V(kLastSharedTag, TAG(2))
402
+
403
+ // External pointers using these tags are kept in a per-Isolate external
404
+ // pointer table and can only be accessed when this Isolate is active.
405
+ #define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \
406
+ V(kForeignForeignAddressTag, TAG(10)) \
407
+ V(kNativeContextMicrotaskQueueTag, TAG(11)) \
408
+ V(kEmbedderDataSlotPayloadTag, TAG(12)) \
409
+ /* This tag essentially stands for a `void*` pointer in the V8 API, and */ \
410
+ /* it is the Embedder's responsibility to ensure type safety (against */ \
411
+ /* substitution) and lifetime validity of these objects. */ \
412
+ V(kExternalObjectValueTag, TAG(13)) \
413
+ V(kCallHandlerInfoCallbackTag, TAG(14)) \
414
+ V(kAccessorInfoGetterTag, TAG(15)) \
415
+ V(kAccessorInfoSetterTag, TAG(16)) \
416
+ V(kWasmInternalFunctionCallTargetTag, TAG(17)) \
417
+ V(kWasmTypeInfoNativeTypeTag, TAG(18)) \
418
+ V(kWasmExportedFunctionDataSignatureTag, TAG(19)) \
419
+ V(kWasmContinuationJmpbufTag, TAG(20)) \
420
+ V(kArrayBufferExtensionTag, TAG(21))
421
+
422
+ // All external pointer tags.
423
+ #define ALL_EXTERNAL_POINTER_TAGS(V) \
424
+ SHARED_EXTERNAL_POINTER_TAGS(V) \
425
+ PER_ISOLATE_EXTERNAL_POINTER_TAGS(V)
426
+
427
+ #define EXTERNAL_POINTER_TAG_ENUM(Name, Tag) Name = Tag,
428
+ #define MAKE_TAG(HasMarkBit, TypeTag) \
429
+ ((static_cast<uint64_t>(TypeTag) << kExternalPointerTagShift) | \
430
+ (HasMarkBit ? kExternalPointerMarkBit : 0))
300
431
  enum ExternalPointerTag : uint64_t {
301
- kExternalPointerNullTag = MAKE_TAG(0b0000000000000000),
302
- kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000),
303
- kExternalStringResourceTag = MAKE_TAG(0b1000000011111111),
304
- kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111),
305
- kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111),
306
- kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111),
307
- kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111),
308
- kCodeEntryPointTag = MAKE_TAG(0b1000000111110111),
309
- kExternalObjectValueTag = MAKE_TAG(0b1000000111111011),
432
+ // Empty tag value. Mostly used as placeholder.
433
+ kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
434
+ // External pointer tag that will match any external pointer. Use with care!
435
+ kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
436
+ // The free entry tag has all type bits set so every type check with a
437
+ // different type fails. It also doesn't have the mark bit set as free
438
+ // entries are (by definition) not alive.
439
+ kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111),
440
+ // Evacuation entries are used during external pointer table compaction.
441
+ kExternalPointerEvacuationEntryTag = MAKE_TAG(1, 0b11100111),
442
+
443
+ ALL_EXTERNAL_POINTER_TAGS(EXTERNAL_POINTER_TAG_ENUM)
310
444
  };
311
- // clang-format on
445
+
312
446
  #undef MAKE_TAG
447
+ #undef TAG
448
+ #undef EXTERNAL_POINTER_TAG_ENUM
313
449
 
314
- // Converts encoded external pointer to address.
315
- V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate,
316
- ExternalPointer_t pointer,
317
- ExternalPointerTag tag);
450
+ // clang-format on
451
+
452
+ // True if the external pointer must be accessed from the shared isolate's
453
+ // external pointer table.
454
+ V8_INLINE static constexpr bool IsSharedExternalPointerType(
455
+ ExternalPointerTag tag) {
456
+ return tag >= kFirstSharedTag && tag <= kLastSharedTag;
457
+ }
458
+
459
+ // Sanity checks.
460
+ #define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
461
+ static_assert(IsSharedExternalPointerType(Tag));
462
+ #define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
463
+ static_assert(!IsSharedExternalPointerType(Tag));
464
+
465
+ SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS)
466
+ PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS)
467
+
468
+ #undef CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS
469
+ #undef CHECK_SHARED_EXTERNAL_POINTER_TAGS
470
+
471
+ #undef SHARED_EXTERNAL_POINTER_TAGS
472
+ #undef EXTERNAL_POINTER_TAGS
318
473
 
319
474
  // {obj} must be the raw tagged pointer representation of a HeapObject
320
475
  // that's guaranteed to never be in ReadOnlySpace.
@@ -323,10 +478,7 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
323
478
  // Returns if we need to throw when an error occurs. This infers the language
324
479
  // mode based on the current context and the closure. This returns true if the
325
480
  // language mode is strict.
326
- V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
327
-
328
- V8_EXPORT bool CanHaveInternalField(int instance_type);
329
-
481
+ V8_EXPORT bool ShouldThrowOnError(internal::Isolate* isolate);
330
482
  /**
331
483
  * This class exports constants and functionality from within v8 that
332
484
  * is necessary to implement inline functions in the v8 api. Don't
@@ -334,8 +486,7 @@ V8_EXPORT bool CanHaveInternalField(int instance_type);
334
486
  */
335
487
  class Internals {
336
488
  #ifdef V8_MAP_PACKING
337
- V8_INLINE static constexpr internal::Address UnpackMapWord(
338
- internal::Address mapword) {
489
+ V8_INLINE static constexpr Address UnpackMapWord(Address mapword) {
339
490
  // TODO(wenyuzhao): Clear header metadata.
340
491
  return mapword ^ kMapWordXorMask;
341
492
  }
@@ -354,8 +505,10 @@ class Internals {
354
505
  static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
355
506
  static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
356
507
  static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
357
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
358
- static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize;
508
+ #ifdef V8_ENABLE_SANDBOX
509
+ static const int kEmbedderDataSlotExternalPointerOffset = kApiTaggedSize;
510
+ #else
511
+ static const int kEmbedderDataSlotExternalPointerOffset = 0;
359
512
  #endif
360
513
  static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
361
514
  static const int kStringRepresentationAndEncodingMask = 0x0f;
@@ -365,35 +518,75 @@ class Internals {
365
518
 
366
519
  static const uint32_t kNumIsolateDataSlots = 4;
367
520
  static const int kStackGuardSize = 7 * kApiSystemPointerSize;
368
- static const int kBuiltinTier0EntryTableSize = 10 * kApiSystemPointerSize;
369
- static const int kBuiltinTier0TableSize = 10 * kApiSystemPointerSize;
521
+ static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize;
522
+ static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize;
523
+ static const int kLinearAllocationAreaSize = 3 * kApiSystemPointerSize;
524
+ static const int kThreadLocalTopSize = 25 * kApiSystemPointerSize;
525
+
526
+ // ExternalPointerTable layout guarantees.
527
+ static const int kExternalPointerTableBufferOffset = 0;
528
+ static const int kExternalPointerTableSize = 4 * kApiSystemPointerSize;
370
529
 
371
530
  // IsolateData layout guarantees.
372
531
  static const int kIsolateCageBaseOffset = 0;
373
532
  static const int kIsolateStackGuardOffset =
374
533
  kIsolateCageBaseOffset + kApiSystemPointerSize;
375
- static const int kBuiltinTier0EntryTableOffset =
534
+ static const int kVariousBooleanFlagsOffset =
376
535
  kIsolateStackGuardOffset + kStackGuardSize;
536
+ static const int kBuiltinTier0EntryTableOffset =
537
+ kVariousBooleanFlagsOffset + 8;
377
538
  static const int kBuiltinTier0TableOffset =
378
539
  kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize;
379
- static const int kIsolateEmbedderDataOffset =
540
+ static const int kNewAllocationInfoOffset =
380
541
  kBuiltinTier0TableOffset + kBuiltinTier0TableSize;
542
+ static const int kOldAllocationInfoOffset =
543
+ kNewAllocationInfoOffset + kLinearAllocationAreaSize;
381
544
  static const int kIsolateFastCCallCallerFpOffset =
382
- kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
545
+ kOldAllocationInfoOffset + kLinearAllocationAreaSize;
383
546
  static const int kIsolateFastCCallCallerPcOffset =
384
547
  kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
385
548
  static const int kIsolateFastApiCallTargetOffset =
386
549
  kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
387
550
  static const int kIsolateLongTaskStatsCounterOffset =
388
551
  kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
389
- static const int kIsolateRootsOffset =
552
+ static const int kIsolateThreadLocalTopOffset =
390
553
  kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
554
+ static const int kIsolateEmbedderDataOffset =
555
+ kIsolateThreadLocalTopOffset + kThreadLocalTopSize;
556
+ #ifdef V8_COMPRESS_POINTERS
557
+ static const int kIsolateExternalPointerTableOffset =
558
+ kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
559
+ static const int kIsolateSharedExternalPointerTableAddressOffset =
560
+ kIsolateExternalPointerTableOffset + kExternalPointerTableSize;
561
+ static const int kIsolateRootsOffset =
562
+ kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize;
563
+ #else
564
+ static const int kIsolateRootsOffset =
565
+ kIsolateEmbedderDataOffset + kNumIsolateDataSlots * kApiSystemPointerSize;
566
+ #endif
391
567
 
392
- static const int kExternalPointerTableBufferOffset = 0;
393
- static const int kExternalPointerTableCapacityOffset =
394
- kExternalPointerTableBufferOffset + kApiSystemPointerSize;
395
- static const int kExternalPointerTableFreelistHeadOffset =
396
- kExternalPointerTableCapacityOffset + kApiInt32Size;
568
+ #if V8_STATIC_ROOTS_BOOL
569
+
570
+ // These constants need to be initialized in api.cc.
571
+ #define EXPORTED_STATIC_ROOTS_PTR_LIST(V) \
572
+ V(UndefinedValue) \
573
+ V(NullValue) \
574
+ V(TrueValue) \
575
+ V(FalseValue) \
576
+ V(EmptyString) \
577
+ V(TheHoleValue)
578
+
579
+ using Tagged_t = uint32_t;
580
+ struct StaticReadOnlyRoot {
581
+ #define DEF_ROOT(name) V8_EXPORT static const Tagged_t k##name;
582
+ EXPORTED_STATIC_ROOTS_PTR_LIST(DEF_ROOT)
583
+ #undef DEF_ROOT
584
+
585
+ V8_EXPORT static const Tagged_t kFirstStringMap;
586
+ V8_EXPORT static const Tagged_t kLastStringMap;
587
+ };
588
+
589
+ #endif // V8_STATIC_ROOTS_BOOL
397
590
 
398
591
  static const int kUndefinedValueRootIndex = 4;
399
592
  static const int kTheHoleValueRootIndex = 5;
@@ -404,9 +597,10 @@ class Internals {
404
597
 
405
598
  static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize;
406
599
  static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3;
407
- static const int kNodeStateMask = 0x7;
600
+ static const int kNodeStateMask = 0x3;
408
601
  static const int kNodeStateIsWeakValue = 2;
409
- static const int kNodeStateIsPendingValue = 3;
602
+
603
+ static const int kTracedNodeClassIdOffset = kApiSystemPointerSize;
410
604
 
411
605
  static const int kFirstNonstringType = 0x80;
412
606
  static const int kOddballType = 0x83;
@@ -447,15 +641,15 @@ class Internals {
447
641
  #endif
448
642
  }
449
643
 
450
- V8_INLINE static bool HasHeapObjectTag(const internal::Address value) {
644
+ V8_INLINE static bool HasHeapObjectTag(Address value) {
451
645
  return (value & kHeapObjectTagMask) == static_cast<Address>(kHeapObjectTag);
452
646
  }
453
647
 
454
- V8_INLINE static int SmiValue(const internal::Address value) {
648
+ V8_INLINE static int SmiValue(Address value) {
455
649
  return PlatformSmiTagging::SmiToInt(value);
456
650
  }
457
651
 
458
- V8_INLINE static constexpr internal::Address IntToSmi(int value) {
652
+ V8_INLINE static constexpr Address IntToSmi(int value) {
459
653
  return internal::IntToSmi(value);
460
654
  }
461
655
 
@@ -463,16 +657,30 @@ class Internals {
463
657
  return PlatformSmiTagging::IsValidSmi(value);
464
658
  }
465
659
 
466
- V8_INLINE static int GetInstanceType(const internal::Address obj) {
467
- typedef internal::Address A;
468
- A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
660
+ #if V8_STATIC_ROOTS_BOOL
661
+ V8_INLINE static bool is_identical(Address obj, Tagged_t constant) {
662
+ return static_cast<Tagged_t>(obj) == constant;
663
+ }
664
+
665
+ V8_INLINE static bool CheckInstanceMapRange(Address obj, Tagged_t first_map,
666
+ Tagged_t last_map) {
667
+ auto map = ReadRawField<Tagged_t>(obj, kHeapObjectMapOffset);
668
+ #ifdef V8_MAP_PACKING
669
+ map = UnpackMapWord(map);
670
+ #endif
671
+ return map >= first_map && map <= last_map;
672
+ }
673
+ #endif
674
+
675
+ V8_INLINE static int GetInstanceType(Address obj) {
676
+ Address map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
469
677
  #ifdef V8_MAP_PACKING
470
678
  map = UnpackMapWord(map);
471
679
  #endif
472
680
  return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
473
681
  }
474
682
 
475
- V8_INLINE static int GetOddballKind(const internal::Address obj) {
683
+ V8_INLINE static int GetOddballKind(Address obj) {
476
684
  return SmiValue(ReadTaggedSignedField(obj, kOddballKindOffset));
477
685
  }
478
686
 
@@ -481,61 +689,104 @@ class Internals {
481
689
  return representation == kExternalTwoByteRepresentationTag;
482
690
  }
483
691
 
484
- V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) {
692
+ V8_INLINE static constexpr bool CanHaveInternalField(int instance_type) {
693
+ static_assert(kJSObjectType + 1 == kFirstJSApiObjectType);
694
+ static_assert(kJSObjectType < kLastJSApiObjectType);
695
+ static_assert(kFirstJSApiObjectType < kLastJSApiObjectType);
696
+ // Check for IsJSObject() || IsJSSpecialApiObject() || IsJSApiObject()
697
+ return instance_type == kJSSpecialApiObjectType ||
698
+ // inlined version of base::IsInRange
699
+ (static_cast<unsigned>(static_cast<unsigned>(instance_type) -
700
+ static_cast<unsigned>(kJSObjectType)) <=
701
+ static_cast<unsigned>(kLastJSApiObjectType - kJSObjectType));
702
+ }
703
+
704
+ V8_INLINE static uint8_t GetNodeFlag(Address* obj, int shift) {
485
705
  uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
486
706
  return *addr & static_cast<uint8_t>(1U << shift);
487
707
  }
488
708
 
489
- V8_INLINE static void UpdateNodeFlag(internal::Address* obj, bool value,
490
- int shift) {
709
+ V8_INLINE static void UpdateNodeFlag(Address* obj, bool value, int shift) {
491
710
  uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
492
711
  uint8_t mask = static_cast<uint8_t>(1U << shift);
493
712
  *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
494
713
  }
495
714
 
496
- V8_INLINE static uint8_t GetNodeState(internal::Address* obj) {
715
+ V8_INLINE static uint8_t GetNodeState(Address* obj) {
497
716
  uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
498
717
  return *addr & kNodeStateMask;
499
718
  }
500
719
 
501
- V8_INLINE static void UpdateNodeState(internal::Address* obj, uint8_t value) {
720
+ V8_INLINE static void UpdateNodeState(Address* obj, uint8_t value) {
502
721
  uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
503
722
  *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
504
723
  }
505
724
 
506
725
  V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
507
726
  void* data) {
508
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
509
- kIsolateEmbedderDataOffset +
510
- slot * kApiSystemPointerSize;
727
+ Address addr = reinterpret_cast<Address>(isolate) +
728
+ kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
511
729
  *reinterpret_cast<void**>(addr) = data;
512
730
  }
513
731
 
514
732
  V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
515
733
  uint32_t slot) {
516
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
517
- kIsolateEmbedderDataOffset +
518
- slot * kApiSystemPointerSize;
734
+ Address addr = reinterpret_cast<Address>(isolate) +
735
+ kIsolateEmbedderDataOffset + slot * kApiSystemPointerSize;
519
736
  return *reinterpret_cast<void* const*>(addr);
520
737
  }
521
738
 
522
739
  V8_INLINE static void IncrementLongTasksStatsCounter(v8::Isolate* isolate) {
523
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
524
- kIsolateLongTaskStatsCounterOffset;
740
+ Address addr =
741
+ reinterpret_cast<Address>(isolate) + kIsolateLongTaskStatsCounterOffset;
525
742
  ++(*reinterpret_cast<size_t*>(addr));
526
743
  }
527
744
 
528
- V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) {
529
- internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
530
- kIsolateRootsOffset +
531
- index * kApiSystemPointerSize;
532
- return reinterpret_cast<internal::Address*>(addr);
745
+ V8_INLINE static Address* GetRootSlot(v8::Isolate* isolate, int index) {
746
+ Address addr = reinterpret_cast<Address>(isolate) + kIsolateRootsOffset +
747
+ index * kApiSystemPointerSize;
748
+ return reinterpret_cast<Address*>(addr);
749
+ }
750
+
751
+ V8_INLINE static Address GetRoot(v8::Isolate* isolate, int index) {
752
+ #if V8_STATIC_ROOTS_BOOL
753
+ Address base = *reinterpret_cast<Address*>(
754
+ reinterpret_cast<uintptr_t>(isolate) + kIsolateCageBaseOffset);
755
+ switch (index) {
756
+ #define DECOMPRESS_ROOT(name) \
757
+ case k##name##RootIndex: \
758
+ return base + StaticReadOnlyRoot::k##name;
759
+ EXPORTED_STATIC_ROOTS_PTR_LIST(DECOMPRESS_ROOT)
760
+ #undef DECOMPRESS_ROOT
761
+ default:
762
+ break;
763
+ }
764
+ #undef EXPORTED_STATIC_ROOTS_PTR_LIST
765
+ #endif // V8_STATIC_ROOTS_BOOL
766
+ return *GetRootSlot(isolate, index);
767
+ }
768
+
769
+ #ifdef V8_ENABLE_SANDBOX
770
+ V8_INLINE static Address* GetExternalPointerTableBase(v8::Isolate* isolate) {
771
+ Address addr = reinterpret_cast<Address>(isolate) +
772
+ kIsolateExternalPointerTableOffset +
773
+ kExternalPointerTableBufferOffset;
774
+ return *reinterpret_cast<Address**>(addr);
533
775
  }
534
776
 
777
+ V8_INLINE static Address* GetSharedExternalPointerTableBase(
778
+ v8::Isolate* isolate) {
779
+ Address addr = reinterpret_cast<Address>(isolate) +
780
+ kIsolateSharedExternalPointerTableAddressOffset;
781
+ addr = *reinterpret_cast<Address*>(addr);
782
+ addr += kExternalPointerTableBufferOffset;
783
+ return *reinterpret_cast<Address**>(addr);
784
+ }
785
+ #endif
786
+
535
787
  template <typename T>
536
- V8_INLINE static T ReadRawField(internal::Address heap_object_ptr,
537
- int offset) {
538
- internal::Address addr = heap_object_ptr + offset - kHeapObjectTag;
788
+ V8_INLINE static T ReadRawField(Address heap_object_ptr, int offset) {
789
+ Address addr = heap_object_ptr + offset - kHeapObjectTag;
539
790
  #ifdef V8_COMPRESS_POINTERS
540
791
  if (sizeof(T) > kApiTaggedSize) {
541
792
  // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
@@ -550,73 +801,69 @@ class Internals {
550
801
  return *reinterpret_cast<const T*>(addr);
551
802
  }
552
803
 
553
- V8_INLINE static internal::Address ReadTaggedPointerField(
554
- internal::Address heap_object_ptr, int offset) {
804
+ V8_INLINE static Address ReadTaggedPointerField(Address heap_object_ptr,
805
+ int offset) {
555
806
  #ifdef V8_COMPRESS_POINTERS
556
807
  uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
557
- internal::Address base =
558
- GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
559
- return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
808
+ Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
809
+ return base + static_cast<Address>(static_cast<uintptr_t>(value));
560
810
  #else
561
- return ReadRawField<internal::Address>(heap_object_ptr, offset);
811
+ return ReadRawField<Address>(heap_object_ptr, offset);
562
812
  #endif
563
813
  }
564
814
 
565
- V8_INLINE static internal::Address ReadTaggedSignedField(
566
- internal::Address heap_object_ptr, int offset) {
815
+ V8_INLINE static Address ReadTaggedSignedField(Address heap_object_ptr,
816
+ int offset) {
567
817
  #ifdef V8_COMPRESS_POINTERS
568
818
  uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
569
- return static_cast<internal::Address>(static_cast<uintptr_t>(value));
819
+ return static_cast<Address>(static_cast<uintptr_t>(value));
570
820
  #else
571
- return ReadRawField<internal::Address>(heap_object_ptr, offset);
821
+ return ReadRawField<Address>(heap_object_ptr, offset);
572
822
  #endif
573
823
  }
574
824
 
575
- V8_INLINE static internal::Isolate* GetIsolateForSandbox(
576
- internal::Address obj) {
577
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
578
- return internal::IsolateFromNeverReadOnlySpaceObject(obj);
825
+ V8_INLINE static v8::Isolate* GetIsolateForSandbox(Address obj) {
826
+ #ifdef V8_ENABLE_SANDBOX
827
+ return reinterpret_cast<v8::Isolate*>(
828
+ internal::IsolateFromNeverReadOnlySpaceObject(obj));
579
829
  #else
580
830
  // Not used in non-sandbox mode.
581
831
  return nullptr;
582
832
  #endif
583
833
  }
584
834
 
585
- V8_INLINE static Address DecodeExternalPointer(
586
- const Isolate* isolate, ExternalPointer_t encoded_pointer,
587
- ExternalPointerTag tag) {
588
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
589
- return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag);
590
- #else
591
- return encoded_pointer;
592
- #endif
593
- }
594
-
595
- V8_INLINE static internal::Address ReadExternalPointerField(
596
- internal::Isolate* isolate, internal::Address heap_object_ptr, int offset,
597
- ExternalPointerTag tag) {
598
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
599
- internal::ExternalPointer_t encoded_value =
600
- ReadRawField<uint32_t>(heap_object_ptr, offset);
601
- // We currently have to treat zero as nullptr in embedder slots.
602
- return encoded_value ? DecodeExternalPointer(isolate, encoded_value, tag)
603
- : 0;
835
+ template <ExternalPointerTag tag>
836
+ V8_INLINE static Address ReadExternalPointerField(v8::Isolate* isolate,
837
+ Address heap_object_ptr,
838
+ int offset) {
839
+ #ifdef V8_ENABLE_SANDBOX
840
+ static_assert(tag != kExternalPointerNullTag);
841
+ // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so
842
+ // it can be inlined and doesn't require an additional call.
843
+ Address* table = IsSharedExternalPointerType(tag)
844
+ ? GetSharedExternalPointerTableBase(isolate)
845
+ : GetExternalPointerTableBase(isolate);
846
+ internal::ExternalPointerHandle handle =
847
+ ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
848
+ uint32_t index = handle >> kExternalPointerIndexShift;
849
+ std::atomic<Address>* ptr =
850
+ reinterpret_cast<std::atomic<Address>*>(&table[index]);
851
+ Address entry = std::atomic_load_explicit(ptr, std::memory_order_relaxed);
852
+ return entry & ~tag;
604
853
  #else
605
854
  return ReadRawField<Address>(heap_object_ptr, offset);
606
- #endif
855
+ #endif // V8_ENABLE_SANDBOX
607
856
  }
608
857
 
609
858
  #ifdef V8_COMPRESS_POINTERS
610
- V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
611
- internal::Address addr) {
859
+ V8_INLINE static Address GetPtrComprCageBaseFromOnHeapAddress(Address addr) {
612
860
  return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
613
861
  }
614
862
 
615
- V8_INLINE static internal::Address DecompressTaggedAnyField(
616
- internal::Address heap_object_ptr, uint32_t value) {
617
- internal::Address base =
618
- GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
619
- return base + static_cast<internal::Address>(static_cast<uintptr_t>(value));
863
+ V8_INLINE static Address DecompressTaggedField(Address heap_object_ptr,
864
+ uint32_t value) {
865
+ Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
866
+ return base + static_cast<Address>(static_cast<uintptr_t>(value));
620
867
  }
621
868
 
622
869
  #endif // V8_COMPRESS_POINTERS
@@ -652,10 +899,60 @@ class BackingStoreBase {};
652
899
 
653
900
  // The maximum value in enum GarbageCollectionReason, defined in heap.h.
654
901
  // This is needed for histograms sampling garbage collection reasons.
655
- constexpr int kGarbageCollectionReasonMaxValue = 25;
902
+ constexpr int kGarbageCollectionReasonMaxValue = 27;
656
903
 
657
- } // namespace internal
904
+ // Helper functions about values contained in handles.
905
+ class ValueHelper final {
906
+ public:
907
+ #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
908
+ static constexpr Address kLocalTaggedNullAddress = 1;
909
+
910
+ template <typename T>
911
+ static constexpr T* EmptyValue() {
912
+ return reinterpret_cast<T*>(kLocalTaggedNullAddress);
913
+ }
914
+
915
+ template <typename T>
916
+ V8_INLINE static Address ValueAsAddress(const T* value) {
917
+ return reinterpret_cast<Address>(value);
918
+ }
919
+
920
+ template <typename T, typename S>
921
+ V8_INLINE static T* SlotAsValue(S* slot) {
922
+ return *reinterpret_cast<T**>(slot);
923
+ }
924
+
925
+ template <typename T>
926
+ V8_INLINE static T* ValueAsSlot(T* const& value) {
927
+ return reinterpret_cast<T*>(const_cast<T**>(&value));
928
+ }
929
+
930
+ #else // !V8_ENABLE_CONSERVATIVE_STACK_SCANNING
658
931
 
932
+ template <typename T>
933
+ static constexpr T* EmptyValue() {
934
+ return nullptr;
935
+ }
936
+
937
+ template <typename T>
938
+ V8_INLINE static Address ValueAsAddress(const T* value) {
939
+ return *reinterpret_cast<const Address*>(value);
940
+ }
941
+
942
+ template <typename T, typename S>
943
+ V8_INLINE static T* SlotAsValue(S* slot) {
944
+ return reinterpret_cast<T*>(slot);
945
+ }
946
+
947
+ template <typename T>
948
+ V8_INLINE static T* ValueAsSlot(T* const& value) {
949
+ return value;
950
+ }
951
+
952
+ #endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
953
+ };
954
+
955
+ } // namespace internal
659
956
  } // namespace v8
660
957
 
661
958
  #endif // INCLUDE_V8_INTERNAL_H_