libv8-node 18.16.0.0-aarch64-linux-musl → 19.9.0.0-aarch64-linux-musl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/lib/libv8/node/version.rb +3 -3
  3. data/vendor/v8/aarch64-linux-musl/libv8/obj/libv8_monolith.a +0 -0
  4. data/vendor/v8/include/cppgc/common.h +0 -1
  5. data/vendor/v8/include/cppgc/cross-thread-persistent.h +7 -8
  6. data/vendor/v8/include/cppgc/heap-consistency.h +46 -3
  7. data/vendor/v8/include/cppgc/heap-handle.h +43 -0
  8. data/vendor/v8/include/cppgc/heap-statistics.h +2 -2
  9. data/vendor/v8/include/cppgc/heap.h +3 -7
  10. data/vendor/v8/include/cppgc/internal/api-constants.h +11 -1
  11. data/vendor/v8/include/cppgc/internal/base-page-handle.h +45 -0
  12. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +40 -8
  13. data/vendor/v8/include/cppgc/internal/caged-heap.h +61 -0
  14. data/vendor/v8/include/cppgc/internal/gc-info.h +0 -1
  15. data/vendor/v8/include/cppgc/internal/member-storage.h +236 -0
  16. data/vendor/v8/include/cppgc/internal/name-trait.h +21 -6
  17. data/vendor/v8/include/cppgc/internal/persistent-node.h +11 -13
  18. data/vendor/v8/include/cppgc/internal/pointer-policies.h +28 -7
  19. data/vendor/v8/include/cppgc/internal/write-barrier.h +143 -101
  20. data/vendor/v8/include/cppgc/liveness-broker.h +8 -7
  21. data/vendor/v8/include/cppgc/member.h +364 -89
  22. data/vendor/v8/include/cppgc/name-provider.h +4 -4
  23. data/vendor/v8/include/cppgc/persistent.h +5 -9
  24. data/vendor/v8/include/cppgc/platform.h +2 -2
  25. data/vendor/v8/include/cppgc/sentinel-pointer.h +1 -1
  26. data/vendor/v8/include/cppgc/trace-trait.h +4 -0
  27. data/vendor/v8/include/cppgc/type-traits.h +9 -0
  28. data/vendor/v8/include/cppgc/visitor.h +89 -57
  29. data/vendor/v8/include/v8-callbacks.h +19 -5
  30. data/vendor/v8/include/v8-context.h +13 -8
  31. data/vendor/v8/include/v8-cppgc.h +12 -0
  32. data/vendor/v8/include/v8-date.h +5 -0
  33. data/vendor/v8/include/v8-embedder-heap.h +8 -3
  34. data/vendor/v8/include/v8-exception.h +1 -1
  35. data/vendor/v8/include/v8-fast-api-calls.h +46 -32
  36. data/vendor/v8/include/v8-function.h +8 -0
  37. data/vendor/v8/include/v8-initialization.h +23 -49
  38. data/vendor/v8/include/v8-inspector.h +13 -7
  39. data/vendor/v8/include/v8-internal.h +328 -123
  40. data/vendor/v8/include/v8-isolate.h +27 -42
  41. data/vendor/v8/include/v8-local-handle.h +5 -5
  42. data/vendor/v8/include/v8-locker.h +0 -11
  43. data/vendor/v8/include/v8-maybe.h +24 -1
  44. data/vendor/v8/include/v8-message.h +2 -4
  45. data/vendor/v8/include/v8-metrics.h +20 -38
  46. data/vendor/v8/include/v8-microtask-queue.h +1 -1
  47. data/vendor/v8/include/v8-object.h +8 -15
  48. data/vendor/v8/include/v8-persistent-handle.h +0 -2
  49. data/vendor/v8/include/v8-platform.h +54 -25
  50. data/vendor/v8/include/v8-primitive.h +8 -8
  51. data/vendor/v8/include/v8-profiler.h +84 -22
  52. data/vendor/v8/include/v8-regexp.h +2 -1
  53. data/vendor/v8/include/v8-script.h +62 -6
  54. data/vendor/v8/include/v8-template.h +13 -76
  55. data/vendor/v8/include/v8-unwinder-state.h +4 -4
  56. data/vendor/v8/include/v8-util.h +2 -4
  57. data/vendor/v8/include/v8-value-serializer.h +46 -23
  58. data/vendor/v8/include/v8-version.h +3 -3
  59. data/vendor/v8/include/v8-wasm.h +5 -62
  60. data/vendor/v8/include/v8-weak-callback-info.h +0 -7
  61. data/vendor/v8/include/v8config.h +280 -13
  62. metadata +6 -2
@@ -8,6 +8,8 @@
8
8
  #include <stddef.h>
9
9
  #include <stdint.h>
10
10
  #include <string.h>
11
+
12
+ #include <atomic>
11
13
  #include <type_traits>
12
14
 
13
15
  #include "v8-version.h" // NOLINT(build/include_directory)
@@ -50,6 +52,7 @@ const int kHeapObjectTag = 1;
50
52
  const int kWeakHeapObjectTag = 3;
51
53
  const int kHeapObjectTagSize = 2;
52
54
  const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
55
+ const intptr_t kHeapObjectReferenceTagMask = 1 << (kHeapObjectTagSize - 1);
53
56
 
54
57
  // Tag information for fowarding pointers stored in object headers.
55
58
  // 0b00 at the lowest 2 bits in the header indicates that the map word is a
@@ -157,15 +160,7 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
157
160
  * Sandbox related types, constants, and functions.
158
161
  */
159
162
  constexpr bool SandboxIsEnabled() {
160
- #ifdef V8_SANDBOX
161
- return true;
162
- #else
163
- return false;
164
- #endif
165
- }
166
-
167
- constexpr bool SandboxedExternalPointersAreEnabled() {
168
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
163
+ #ifdef V8_ENABLE_SANDBOX
169
164
  return true;
170
165
  #else
171
166
  return false;
@@ -176,19 +171,18 @@ constexpr bool SandboxedExternalPointersAreEnabled() {
176
171
  // for example by storing them as offset rather than as raw pointers.
177
172
  using SandboxedPointer_t = Address;
178
173
 
179
- // ExternalPointers point to objects located outside the sandbox. When sandboxed
180
- // external pointers are enabled, these are stored in an external pointer table
181
- // and referenced from HeapObjects through indices.
182
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
183
- using ExternalPointer_t = uint32_t;
184
- #else
185
- using ExternalPointer_t = Address;
186
- #endif
187
-
188
- #ifdef V8_SANDBOX_IS_AVAILABLE
174
+ #ifdef V8_ENABLE_SANDBOX
189
175
 
190
176
  // Size of the sandbox, excluding the guard regions surrounding it.
177
+ #ifdef V8_TARGET_OS_ANDROID
178
+ // On Android, most 64-bit devices seem to be configured with only 39 bits of
179
+ // virtual address space for userspace. As such, limit the sandbox to 128GB (a
180
+ // quarter of the total available address space).
181
+ constexpr size_t kSandboxSizeLog2 = 37; // 128 GB
182
+ #else
183
+ // Everywhere else use a 1TB sandbox.
191
184
  constexpr size_t kSandboxSizeLog2 = 40; // 1 TB
185
+ #endif // V8_TARGET_OS_ANDROID
192
186
  constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2;
193
187
 
194
188
  // Required alignment of the sandbox. For simplicity, we require the
@@ -213,20 +207,6 @@ static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0,
213
207
  "The size of the guard regions around the sandbox must be a "
214
208
  "multiple of its required alignment.");
215
209
 
216
- // Minimum size of the sandbox, excluding the guard regions surrounding it. If
217
- // the virtual memory reservation for the sandbox fails, its size is currently
218
- // halved until either the reservation succeeds or the minimum size is reached.
219
- // A minimum of 32GB allows the 4GB pointer compression region as well as the
220
- // ArrayBuffer partition and two 10GB Wasm memory cages to fit into the
221
- // sandbox. 32GB should also be the minimum possible size of the userspace
222
- // address space as there are some machine configurations with only 36 virtual
223
- // address bits.
224
- constexpr size_t kSandboxMinimumSize = 32ULL * GB;
225
-
226
- static_assert(kSandboxMinimumSize <= kSandboxSize,
227
- "The minimal size of the sandbox must be smaller or equal to the "
228
- "regular size.");
229
-
230
210
  // On OSes where reserving virtual memory is too expensive to reserve the
231
211
  // entire address space backing the sandbox, notably Windows pre 8.1, we create
232
212
  // a partially reserved sandbox that doesn't actually reserve most of the
@@ -239,82 +219,269 @@ static_assert(kSandboxMinimumSize <= kSandboxSize,
239
219
  // well as the ArrayBuffer partition.
240
220
  constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB;
241
221
 
242
- static_assert(kSandboxMinimumSize > kPtrComprCageReservationSize,
243
- "The sandbox must be larger than the pointer compression cage "
244
- "contained within it.");
245
222
  static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize,
246
223
  "The minimum reservation size for a sandbox must be larger than "
247
224
  "the pointer compression cage contained within it.");
248
225
 
249
- // For now, even if the sandbox is enabled, we still allow backing stores to be
250
- // allocated outside of it as fallback. This will simplify the initial rollout.
251
- // However, if sandboxed pointers are also enabled, we must always place
252
- // backing stores inside the sandbox as they will be referenced though them.
253
- #ifdef V8_SANDBOXED_POINTERS
254
- constexpr bool kAllowBackingStoresOutsideSandbox = false;
255
- #else
256
- constexpr bool kAllowBackingStoresOutsideSandbox = true;
257
- #endif // V8_SANDBOXED_POINTERS
226
+ // The maximum buffer size allowed inside the sandbox. This is mostly dependent
227
+ // on the size of the guard regions around the sandbox: an attacker must not be
228
+ // able to construct a buffer that appears larger than the guard regions and
229
+ // thereby "reach out of" the sandbox.
230
+ constexpr size_t kMaxSafeBufferSizeForSandbox = 32ULL * GB - 1;
231
+ static_assert(kMaxSafeBufferSizeForSandbox <= kSandboxGuardRegionSize,
232
+ "The maximum allowed buffer size must not be larger than the "
233
+ "sandbox's guard regions");
234
+
235
+ constexpr size_t kBoundedSizeShift = 29;
236
+ static_assert(1ULL << (64 - kBoundedSizeShift) ==
237
+ kMaxSafeBufferSizeForSandbox + 1,
238
+ "The maximum size of a BoundedSize must be synchronized with the "
239
+ "kMaxSafeBufferSizeForSandbox");
240
+
241
+ #endif // V8_ENABLE_SANDBOX
242
+
243
+ #ifdef V8_COMPRESS_POINTERS
258
244
 
259
245
  // The size of the virtual memory reservation for an external pointer table.
260
246
  // This determines the maximum number of entries in a table. Using a maximum
261
247
  // size allows omitting bounds checks on table accesses if the indices are
262
248
  // guaranteed (e.g. through shifting) to be below the maximum index. This
263
249
  // value must be a power of two.
264
- static const size_t kExternalPointerTableReservationSize = 128 * MB;
250
+ static const size_t kExternalPointerTableReservationSize = 512 * MB;
265
251
 
266
252
  // The maximum number of entries in an external pointer table.
267
- static const size_t kMaxSandboxedExternalPointers =
253
+ static const size_t kMaxExternalPointers =
268
254
  kExternalPointerTableReservationSize / kApiSystemPointerSize;
269
255
 
270
256
  // The external pointer table indices stored in HeapObjects as external
271
257
  // pointers are shifted to the left by this amount to guarantee that they are
272
258
  // smaller than the maximum table size.
273
- static const uint32_t kExternalPointerIndexShift = 8;
274
- static_assert((1 << (32 - kExternalPointerIndexShift)) ==
275
- kMaxSandboxedExternalPointers,
259
+ static const uint32_t kExternalPointerIndexShift = 6;
260
+ static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers,
276
261
  "kExternalPointerTableReservationSize and "
277
262
  "kExternalPointerIndexShift don't match");
278
263
 
279
- #endif // V8_SANDBOX_IS_AVAILABLE
280
-
281
- // If sandboxed external pointers are enabled, these tag values will be ORed
282
- // with the external pointers in the external pointer table to prevent use of
283
- // pointers of the wrong type. When a pointer is loaded, it is ANDed with the
284
- // inverse of the expected type's tag. The tags are constructed in a way that
285
- // guarantees that a failed type check will result in one or more of the top
286
- // bits of the pointer to be set, rendering the pointer inacessible. Besides
287
- // the type tag bits (48 through 62), the tags also have the GC mark bit (63)
288
- // set, so that the mark bit is automatically set when a pointer is written
289
- // into the external pointer table (in which case it is clearly alive) and is
290
- // cleared when the pointer is loaded. The exception to this is the free entry
291
- // tag, which doesn't have the mark bit set, as the entry is not alive. This
264
+ #else // !V8_COMPRESS_POINTERS
265
+
266
+ // Needed for the V8.SandboxedExternalPointersCount histogram.
267
+ static const size_t kMaxExternalPointers = 0;
268
+
269
+ #endif // V8_COMPRESS_POINTERS
270
+
271
+ // A ExternalPointerHandle represents a (opaque) reference to an external
272
+ // pointer that can be stored inside the sandbox. A ExternalPointerHandle has
273
+ // meaning only in combination with an (active) Isolate as it references an
274
+ // external pointer stored in the currently active Isolate's
275
+ // ExternalPointerTable. Internally, an ExternalPointerHandles is simply an
276
+ // index into an ExternalPointerTable that is shifted to the left to guarantee
277
+ // that it is smaller than the size of the table.
278
+ using ExternalPointerHandle = uint32_t;
279
+
280
+ // ExternalPointers point to objects located outside the sandbox. When
281
+ // sandboxed external pointers are enabled, these are stored on heap as
282
+ // ExternalPointerHandles, otherwise they are simply raw pointers.
283
+ #ifdef V8_ENABLE_SANDBOX
284
+ using ExternalPointer_t = ExternalPointerHandle;
285
+ #else
286
+ using ExternalPointer_t = Address;
287
+ #endif
288
+
289
+ // When the sandbox is enabled, external pointers are stored in an external
290
+ // pointer table and are referenced from HeapObjects through an index (a
291
+ // "handle"). When stored in the table, the pointers are tagged with per-type
292
+ // tags to prevent type confusion attacks between different external objects.
293
+ // Besides type information bits, these tags also contain the GC marking bit
294
+ // which indicates whether the pointer table entry is currently alive. When a
295
+ // pointer is written into the table, the tag is ORed into the top bits. When
296
+ // that pointer is later loaded from the table, it is ANDed with the inverse of
297
+ // the expected tag. If the expected and actual type differ, this will leave
298
+ // some of the top bits of the pointer set, rendering the pointer inaccessible.
299
+ // The AND operation also removes the GC marking bit from the pointer.
300
+ //
301
+ // The tags are constructed such that UNTAG(TAG(0, T1), T2) != 0 for any two
302
+ // (distinct) tags T1 and T2. In practice, this is achieved by generating tags
303
+ // that all have the same number of zeroes and ones but different bit patterns.
304
+ // With N type tag bits, this allows for (N choose N/2) possible type tags.
305
+ // Besides the type tag bits, the tags also have the GC marking bit set so that
306
+ // the marking bit is automatically set when a pointer is written into the
307
+ // external pointer table (in which case it is clearly alive) and is cleared
308
+ // when the pointer is loaded. The exception to this is the free entry tag,
309
+ // which doesn't have the mark bit set, as the entry is not alive. This
292
310
  // construction allows performing the type check and removing GC marking bits
293
- // (the MSB) from the pointer at the same time.
294
- // Note: this scheme assumes a 48-bit address space and will likely break if
295
- // more virtual address bits are used.
296
- constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
311
+ // from the pointer in one efficient operation (bitwise AND). The number of
312
+ // available bits is limited in the following way: on x64, bits [47, 64) are
313
+ // generally available for tagging (userspace has 47 address bits available).
314
+ // On Arm64, userspace typically has a 40 or 48 bit address space. However, due
315
+ // to top-byte ignore (TBI) and memory tagging (MTE), the top byte is unusable
316
+ // for type checks as type-check failures would go unnoticed or collide with
317
+ // MTE bits. Some bits of the top byte can, however, still be used for the GC
318
+ // marking bit. The bits available for the type tags are therefore limited to
319
+ // [48, 56), i.e. (8 choose 4) = 70 different types.
320
+ // The following options exist to increase the number of possible types:
321
+ // - Using multiple ExternalPointerTables since tags can safely be reused
322
+ // across different tables
323
+ // - Using "extended" type checks, where additional type information is stored
324
+ // either in an adjacent pointer table entry or at the pointed-to location
325
+ // - Using a different tagging scheme, for example based on XOR which would
326
+ // allow for 2**8 different tags but require a separate operation to remove
327
+ // the marking bit
328
+ //
329
+ // The external pointer sandboxing mechanism ensures that every access to an
330
+ // external pointer field will result in a valid pointer of the expected type
331
+ // even in the presence of an attacker able to corrupt memory inside the
332
+ // sandbox. However, if any data related to the external object is stored
333
+ // inside the sandbox it may still be corrupted and so must be validated before
334
+ // use or moved into the external object. Further, an attacker will always be
335
+ // able to substitute different external pointers of the same type for each
336
+ // other. Therefore, code using external pointers must be written in a
337
+ // "substitution-safe" way, i.e. it must always be possible to substitute
338
+ // external pointers of the same type without causing memory corruption outside
339
+ // of the sandbox. Generally this is achieved by referencing any group of
340
+ // related external objects through a single external pointer.
341
+ //
342
+ // Currently we use bit 62 for the marking bit which should always be unused as
343
+ // it's part of the non-canonical address range. When Arm's top-byte ignore
344
+ // (TBI) is enabled, this bit will be part of the ignored byte, and we assume
345
+ // that the Embedder is not using this byte (really only this one bit) for any
346
+ // other purpose. This bit also does not collide with the memory tagging
347
+ // extension (MTE) which would use bits [56, 60).
348
+ constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62;
349
+ constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000;
297
350
  constexpr uint64_t kExternalPointerTagShift = 48;
298
- #define MAKE_TAG(v) (static_cast<uint64_t>(v) << kExternalPointerTagShift)
351
+
352
+ // All possible 8-bit type tags.
353
+ // These are sorted so that tags can be grouped together and it can efficiently
354
+ // be checked if a tag belongs to a given group. See for example the
355
+ // IsSharedExternalPointerType routine.
356
+ constexpr uint64_t kAllExternalPointerTypeTags[] = {
357
+ 0b00001111, 0b00010111, 0b00011011, 0b00011101, 0b00011110, 0b00100111,
358
+ 0b00101011, 0b00101101, 0b00101110, 0b00110011, 0b00110101, 0b00110110,
359
+ 0b00111001, 0b00111010, 0b00111100, 0b01000111, 0b01001011, 0b01001101,
360
+ 0b01001110, 0b01010011, 0b01010101, 0b01010110, 0b01011001, 0b01011010,
361
+ 0b01011100, 0b01100011, 0b01100101, 0b01100110, 0b01101001, 0b01101010,
362
+ 0b01101100, 0b01110001, 0b01110010, 0b01110100, 0b01111000, 0b10000111,
363
+ 0b10001011, 0b10001101, 0b10001110, 0b10010011, 0b10010101, 0b10010110,
364
+ 0b10011001, 0b10011010, 0b10011100, 0b10100011, 0b10100101, 0b10100110,
365
+ 0b10101001, 0b10101010, 0b10101100, 0b10110001, 0b10110010, 0b10110100,
366
+ 0b10111000, 0b11000011, 0b11000101, 0b11000110, 0b11001001, 0b11001010,
367
+ 0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001,
368
+ 0b11100010, 0b11100100, 0b11101000, 0b11110000};
369
+
299
370
  // clang-format off
371
+ // New entries should be added with state "sandboxed".
372
+ // When adding new tags, please ensure that the code using these tags is
373
+ // "substitution-safe", i.e. still operate safely if external pointers of the
374
+ // same type are swapped by an attacker. See comment above for more details.
375
+ #define TAG(i) (kAllExternalPointerTypeTags[i])
376
+
377
+ // Shared external pointers are owned by the shared Isolate and stored in the
378
+ // shared external pointer table associated with that Isolate, where they can
379
+ // be accessed from multiple threads at the same time. The objects referenced
380
+ // in this way must therefore always be thread-safe.
381
+ #define SHARED_EXTERNAL_POINTER_TAGS(V) \
382
+ V(kFirstSharedTag, sandboxed, TAG(0)) \
383
+ V(kWaiterQueueNodeTag, sandboxed, TAG(0)) \
384
+ V(kExternalStringResourceTag, sandboxed, TAG(1)) \
385
+ V(kExternalStringResourceDataTag, sandboxed, TAG(2)) \
386
+ V(kLastSharedTag, sandboxed, TAG(2))
387
+
388
+ // External pointers using these tags are kept in a per-Isolate external
389
+ // pointer table and can only be accessed when this Isolate is active.
390
+ #define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \
391
+ V(kForeignForeignAddressTag, sandboxed, TAG(10)) \
392
+ V(kNativeContextMicrotaskQueueTag, sandboxed, TAG(11)) \
393
+ V(kEmbedderDataSlotPayloadTag, sandboxed, TAG(12)) \
394
+ V(kExternalObjectValueTag, sandboxed, TAG(13)) \
395
+ V(kCallHandlerInfoCallbackTag, sandboxed, TAG(14)) \
396
+ V(kAccessorInfoGetterTag, sandboxed, TAG(15)) \
397
+ V(kAccessorInfoSetterTag, sandboxed, TAG(16)) \
398
+ V(kWasmInternalFunctionCallTargetTag, sandboxed, TAG(17)) \
399
+ V(kWasmTypeInfoNativeTypeTag, sandboxed, TAG(18)) \
400
+ V(kWasmExportedFunctionDataSignatureTag, sandboxed, TAG(19)) \
401
+ V(kWasmContinuationJmpbufTag, sandboxed, TAG(20)) \
402
+ V(kArrayBufferExtensionTag, sandboxed, TAG(21))
403
+
404
+ // All external pointer tags.
405
+ #define ALL_EXTERNAL_POINTER_TAGS(V) \
406
+ SHARED_EXTERNAL_POINTER_TAGS(V) \
407
+ PER_ISOLATE_EXTERNAL_POINTER_TAGS(V)
408
+
409
+ // When the sandbox is enabled, external pointers marked as "sandboxed" above
410
+ // use the external pointer table (i.e. are sandboxed). This allows a gradual
411
+ // rollout of external pointer sandboxing. If the sandbox is off, no external
412
+ // pointers are sandboxed.
413
+ //
414
+ // Sandboxed external pointer tags are available when compressing pointers even
415
+ // when the sandbox is off. Some tags (e.g. kWaiterQueueNodeTag) are used
416
+ // manually with the external pointer table even when the sandbox is off to ease
417
+ // alignment requirements.
418
+ #define sandboxed(X) (X << kExternalPointerTagShift) | kExternalPointerMarkBit
419
+ #define unsandboxed(X) kUnsandboxedExternalPointerTag
420
+ #if defined(V8_COMPRESS_POINTERS)
421
+ #define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = State(Bits),
422
+ #else
423
+ #define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = unsandboxed(Bits),
424
+ #endif
425
+
426
+ #define MAKE_TAG(HasMarkBit, TypeTag) \
427
+ ((static_cast<uint64_t>(TypeTag) << kExternalPointerTagShift) | \
428
+ (HasMarkBit ? kExternalPointerMarkBit : 0))
300
429
  enum ExternalPointerTag : uint64_t {
301
- kExternalPointerNullTag = MAKE_TAG(0b0000000000000000),
302
- kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000),
303
- kExternalStringResourceTag = MAKE_TAG(0b1000000011111111),
304
- kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111),
305
- kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111),
306
- kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111),
307
- kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111),
308
- kCodeEntryPointTag = MAKE_TAG(0b1000000111110111),
309
- kExternalObjectValueTag = MAKE_TAG(0b1000000111111011),
430
+ // Empty tag value. Mostly used as placeholder.
431
+ kExternalPointerNullTag = MAKE_TAG(0, 0b00000000),
432
+ // Tag to use for unsandboxed external pointers, which are still stored as
433
+ // raw pointers on the heap.
434
+ kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000),
435
+ // External pointer tag that will match any external pointer. Use with care!
436
+ kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111),
437
+ // The free entry tag has all type bits set so every type check with a
438
+ // different type fails. It also doesn't have the mark bit set as free
439
+ // entries are (by definition) not alive.
440
+ kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111),
441
+ // Evacuation entries are used during external pointer table compaction.
442
+ kExternalPointerEvacuationEntryTag = MAKE_TAG(1, 0b11100111),
443
+
444
+ ALL_EXTERNAL_POINTER_TAGS(EXTERNAL_POINTER_TAG_ENUM)
310
445
  };
311
- // clang-format on
446
+
312
447
  #undef MAKE_TAG
448
+ #undef unsandboxed
449
+ #undef sandboxed
450
+ #undef TAG
451
+ #undef EXTERNAL_POINTER_TAG_ENUM
452
+
453
+ // clang-format on
454
+
455
+ // True if the external pointer is sandboxed and so must be referenced through
456
+ // an external pointer table.
457
+ V8_INLINE static constexpr bool IsSandboxedExternalPointerType(
458
+ ExternalPointerTag tag) {
459
+ return tag != kUnsandboxedExternalPointerTag;
460
+ }
461
+
462
+ // True if the external pointer must be accessed from the shared isolate's
463
+ // external pointer table.
464
+ V8_INLINE static constexpr bool IsSharedExternalPointerType(
465
+ ExternalPointerTag tag) {
466
+ return tag >= kFirstSharedTag && tag <= kLastSharedTag;
467
+ }
468
+
469
+ // Sanity checks.
470
+ #define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
471
+ static_assert(!IsSandboxedExternalPointerType(Tag) || \
472
+ IsSharedExternalPointerType(Tag));
473
+ #define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \
474
+ static_assert(!IsSandboxedExternalPointerType(Tag) || \
475
+ !IsSharedExternalPointerType(Tag));
476
+
477
+ SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS)
478
+ PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS)
313
479
 
314
- // Converts encoded external pointer to address.
315
- V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate,
316
- ExternalPointer_t pointer,
317
- ExternalPointerTag tag);
480
+ #undef CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS
481
+ #undef CHECK_SHARED_EXTERNAL_POINTER_TAGS
482
+
483
+ #undef SHARED_EXTERNAL_POINTER_TAGS
484
+ #undef EXTERNAL_POINTER_TAGS
318
485
 
319
486
  // {obj} must be the raw tagged pointer representation of a HeapObject
320
487
  // that's guaranteed to never be in ReadOnlySpace.
@@ -324,9 +491,6 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
324
491
  // mode based on the current context and the closure. This returns true if the
325
492
  // language mode is strict.
326
493
  V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
327
-
328
- V8_EXPORT bool CanHaveInternalField(int instance_type);
329
-
330
494
  /**
331
495
  * This class exports constants and functionality from within v8 that
332
496
  * is necessary to implement inline functions in the v8 api. Don't
@@ -354,8 +518,10 @@ class Internals {
354
518
  static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
355
519
  static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
356
520
  static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
357
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
358
- static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize;
521
+ #ifdef V8_ENABLE_SANDBOX
522
+ static const int kEmbedderDataSlotExternalPointerOffset = kApiTaggedSize;
523
+ #else
524
+ static const int kEmbedderDataSlotExternalPointerOffset = 0;
359
525
  #endif
360
526
  static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
361
527
  static const int kStringRepresentationAndEncodingMask = 0x0f;
@@ -365,15 +531,21 @@ class Internals {
365
531
 
366
532
  static const uint32_t kNumIsolateDataSlots = 4;
367
533
  static const int kStackGuardSize = 7 * kApiSystemPointerSize;
368
- static const int kBuiltinTier0EntryTableSize = 10 * kApiSystemPointerSize;
369
- static const int kBuiltinTier0TableSize = 10 * kApiSystemPointerSize;
534
+ static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize;
535
+ static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize;
536
+
537
+ // ExternalPointerTable layout guarantees.
538
+ static const int kExternalPointerTableBufferOffset = 0;
539
+ static const int kExternalPointerTableSize = 4 * kApiSystemPointerSize;
370
540
 
371
541
  // IsolateData layout guarantees.
372
542
  static const int kIsolateCageBaseOffset = 0;
373
543
  static const int kIsolateStackGuardOffset =
374
544
  kIsolateCageBaseOffset + kApiSystemPointerSize;
375
- static const int kBuiltinTier0EntryTableOffset =
545
+ static const int kVariousBooleanFlagsOffset =
376
546
  kIsolateStackGuardOffset + kStackGuardSize;
547
+ static const int kBuiltinTier0EntryTableOffset =
548
+ kVariousBooleanFlagsOffset + kApiSystemPointerSize;
377
549
  static const int kBuiltinTier0TableOffset =
378
550
  kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize;
379
551
  static const int kIsolateEmbedderDataOffset =
@@ -386,14 +558,17 @@ class Internals {
386
558
  kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
387
559
  static const int kIsolateLongTaskStatsCounterOffset =
388
560
  kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
561
+ #ifdef V8_COMPRESS_POINTERS
562
+ static const int kIsolateExternalPointerTableOffset =
563
+ kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
564
+ static const int kIsolateSharedExternalPointerTableAddressOffset =
565
+ kIsolateExternalPointerTableOffset + kExternalPointerTableSize;
566
+ static const int kIsolateRootsOffset =
567
+ kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize;
568
+ #else
389
569
  static const int kIsolateRootsOffset =
390
570
  kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
391
-
392
- static const int kExternalPointerTableBufferOffset = 0;
393
- static const int kExternalPointerTableCapacityOffset =
394
- kExternalPointerTableBufferOffset + kApiSystemPointerSize;
395
- static const int kExternalPointerTableFreelistHeadOffset =
396
- kExternalPointerTableCapacityOffset + kApiInt32Size;
571
+ #endif
397
572
 
398
573
  static const int kUndefinedValueRootIndex = 4;
399
574
  static const int kTheHoleValueRootIndex = 5;
@@ -404,9 +579,8 @@ class Internals {
404
579
 
405
580
  static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize;
406
581
  static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3;
407
- static const int kNodeStateMask = 0x7;
582
+ static const int kNodeStateMask = 0x3;
408
583
  static const int kNodeStateIsWeakValue = 2;
409
- static const int kNodeStateIsPendingValue = 3;
410
584
 
411
585
  static const int kFirstNonstringType = 0x80;
412
586
  static const int kOddballType = 0x83;
@@ -481,6 +655,18 @@ class Internals {
481
655
  return representation == kExternalTwoByteRepresentationTag;
482
656
  }
483
657
 
658
+ V8_INLINE static constexpr bool CanHaveInternalField(int instance_type) {
659
+ static_assert(kJSObjectType + 1 == kFirstJSApiObjectType);
660
+ static_assert(kJSObjectType < kLastJSApiObjectType);
661
+ static_assert(kFirstJSApiObjectType < kLastJSApiObjectType);
662
+ // Check for IsJSObject() || IsJSSpecialApiObject() || IsJSApiObject()
663
+ return instance_type == kJSSpecialApiObjectType ||
664
+ // inlined version of base::IsInRange
665
+ (static_cast<unsigned>(static_cast<unsigned>(instance_type) -
666
+ static_cast<unsigned>(kJSObjectType)) <=
667
+ static_cast<unsigned>(kLastJSApiObjectType - kJSObjectType));
668
+ }
669
+
484
670
  V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) {
485
671
  uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
486
672
  return *addr & static_cast<uint8_t>(1U << shift);
@@ -532,6 +718,25 @@ class Internals {
532
718
  return reinterpret_cast<internal::Address*>(addr);
533
719
  }
534
720
 
721
+ #ifdef V8_ENABLE_SANDBOX
722
+ V8_INLINE static internal::Address* GetExternalPointerTableBase(
723
+ v8::Isolate* isolate) {
724
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
725
+ kIsolateExternalPointerTableOffset +
726
+ kExternalPointerTableBufferOffset;
727
+ return *reinterpret_cast<internal::Address**>(addr);
728
+ }
729
+
730
+ V8_INLINE static internal::Address* GetSharedExternalPointerTableBase(
731
+ v8::Isolate* isolate) {
732
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
733
+ kIsolateSharedExternalPointerTableAddressOffset;
734
+ addr = *reinterpret_cast<internal::Address*>(addr);
735
+ addr += kExternalPointerTableBufferOffset;
736
+ return *reinterpret_cast<internal::Address**>(addr);
737
+ }
738
+ #endif
739
+
535
740
  template <typename T>
536
741
  V8_INLINE static T ReadRawField(internal::Address heap_object_ptr,
537
742
  int offset) {
@@ -572,38 +777,38 @@ class Internals {
572
777
  #endif
573
778
  }
574
779
 
575
- V8_INLINE static internal::Isolate* GetIsolateForSandbox(
576
- internal::Address obj) {
577
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
578
- return internal::IsolateFromNeverReadOnlySpaceObject(obj);
780
+ V8_INLINE static v8::Isolate* GetIsolateForSandbox(internal::Address obj) {
781
+ #ifdef V8_ENABLE_SANDBOX
782
+ return reinterpret_cast<v8::Isolate*>(
783
+ internal::IsolateFromNeverReadOnlySpaceObject(obj));
579
784
  #else
580
785
  // Not used in non-sandbox mode.
581
786
  return nullptr;
582
787
  #endif
583
788
  }
584
789
 
585
- V8_INLINE static Address DecodeExternalPointer(
586
- const Isolate* isolate, ExternalPointer_t encoded_pointer,
587
- ExternalPointerTag tag) {
588
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
589
- return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag);
590
- #else
591
- return encoded_pointer;
592
- #endif
593
- }
594
-
790
+ template <ExternalPointerTag tag>
595
791
  V8_INLINE static internal::Address ReadExternalPointerField(
596
- internal::Isolate* isolate, internal::Address heap_object_ptr, int offset,
597
- ExternalPointerTag tag) {
598
- #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
599
- internal::ExternalPointer_t encoded_value =
600
- ReadRawField<uint32_t>(heap_object_ptr, offset);
601
- // We currently have to treat zero as nullptr in embedder slots.
602
- return encoded_value ? DecodeExternalPointer(isolate, encoded_value, tag)
603
- : 0;
604
- #else
605
- return ReadRawField<Address>(heap_object_ptr, offset);
792
+ v8::Isolate* isolate, internal::Address heap_object_ptr, int offset) {
793
+ #ifdef V8_ENABLE_SANDBOX
794
+ if (IsSandboxedExternalPointerType(tag)) {
795
+ // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so
796
+ // it can be inlined and doesn't require an additional call.
797
+ internal::Address* table =
798
+ IsSharedExternalPointerType(tag)
799
+ ? GetSharedExternalPointerTableBase(isolate)
800
+ : GetExternalPointerTableBase(isolate);
801
+ internal::ExternalPointerHandle handle =
802
+ ReadRawField<ExternalPointerHandle>(heap_object_ptr, offset);
803
+ uint32_t index = handle >> kExternalPointerIndexShift;
804
+ std::atomic<internal::Address>* ptr =
805
+ reinterpret_cast<std::atomic<internal::Address>*>(&table[index]);
806
+ internal::Address entry =
807
+ std::atomic_load_explicit(ptr, std::memory_order_relaxed);
808
+ return entry & ~tag;
809
+ }
606
810
  #endif
811
+ return ReadRawField<Address>(heap_object_ptr, offset);
607
812
  }
608
813
 
609
814
  #ifdef V8_COMPRESS_POINTERS
@@ -652,7 +857,7 @@ class BackingStoreBase {};
652
857
 
653
858
  // The maximum value in enum GarbageCollectionReason, defined in heap.h.
654
859
  // This is needed for histograms sampling garbage collection reasons.
655
- constexpr int kGarbageCollectionReasonMaxValue = 25;
860
+ constexpr int kGarbageCollectionReasonMaxValue = 27;
656
861
 
657
862
  } // namespace internal
658
863