libv8-node 17.9.1.1-aarch64-linux → 18.8.0.0-aarch64-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/ext/libv8-node/location.rb +1 -1
  3. data/lib/libv8/node/version.rb +3 -3
  4. data/vendor/v8/aarch64-linux/libv8/obj/libv8_monolith.a +0 -0
  5. data/vendor/v8/include/cppgc/allocation.h +88 -17
  6. data/vendor/v8/include/cppgc/default-platform.h +2 -10
  7. data/vendor/v8/include/cppgc/explicit-management.h +22 -4
  8. data/vendor/v8/include/cppgc/garbage-collected.h +15 -26
  9. data/vendor/v8/include/cppgc/heap-consistency.h +13 -0
  10. data/vendor/v8/include/cppgc/heap-state.h +12 -0
  11. data/vendor/v8/include/cppgc/heap.h +7 -2
  12. data/vendor/v8/include/cppgc/internal/api-constants.h +8 -0
  13. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +23 -12
  14. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +2 -1
  15. data/vendor/v8/include/cppgc/internal/logging.h +3 -3
  16. data/vendor/v8/include/cppgc/internal/persistent-node.h +39 -27
  17. data/vendor/v8/include/cppgc/internal/pointer-policies.h +4 -4
  18. data/vendor/v8/include/cppgc/internal/write-barrier.h +26 -32
  19. data/vendor/v8/include/cppgc/member.h +5 -2
  20. data/vendor/v8/include/cppgc/persistent.h +30 -31
  21. data/vendor/v8/include/cppgc/platform.h +3 -1
  22. data/vendor/v8/include/cppgc/prefinalizer.h +34 -11
  23. data/vendor/v8/include/cppgc/testing.h +9 -2
  24. data/vendor/v8/include/cppgc/type-traits.h +6 -13
  25. data/vendor/v8/include/libplatform/libplatform.h +0 -11
  26. data/vendor/v8/include/libplatform/v8-tracing.h +0 -1
  27. data/vendor/v8/include/v8-array-buffer.h +14 -2
  28. data/vendor/v8/include/v8-callbacks.h +26 -6
  29. data/vendor/v8/include/v8-context.h +3 -14
  30. data/vendor/v8/include/v8-cppgc.h +16 -126
  31. data/vendor/v8/include/v8-data.h +15 -0
  32. data/vendor/v8/include/v8-debug.h +21 -4
  33. data/vendor/v8/include/v8-embedder-heap.h +10 -30
  34. data/vendor/v8/include/v8-embedder-state-scope.h +51 -0
  35. data/vendor/v8/include/v8-exception.h +0 -7
  36. data/vendor/v8/include/v8-fast-api-calls.h +82 -31
  37. data/vendor/v8/include/v8-function.h +3 -0
  38. data/vendor/v8/include/v8-initialization.h +64 -31
  39. data/vendor/v8/include/v8-inspector.h +45 -4
  40. data/vendor/v8/include/v8-internal.h +189 -102
  41. data/vendor/v8/include/v8-isolate.h +49 -2
  42. data/vendor/v8/include/v8-local-handle.h +0 -4
  43. data/vendor/v8/include/v8-locker.h +2 -1
  44. data/vendor/v8/include/v8-message.h +19 -44
  45. data/vendor/v8/include/v8-metrics.h +32 -15
  46. data/vendor/v8/include/v8-object.h +11 -6
  47. data/vendor/v8/include/v8-platform.h +365 -6
  48. data/vendor/v8/include/v8-primitive.h +14 -6
  49. data/vendor/v8/include/v8-profiler.h +78 -2
  50. data/vendor/v8/include/v8-script.h +27 -51
  51. data/vendor/v8/include/v8-snapshot.h +0 -2
  52. data/vendor/v8/include/v8-statistics.h +2 -0
  53. data/vendor/v8/include/v8-template.h +31 -4
  54. data/vendor/v8/include/v8-traced-handle.h +39 -224
  55. data/vendor/v8/include/v8-unwinder.h +10 -7
  56. data/vendor/v8/include/v8-value-serializer-version.h +1 -1
  57. data/vendor/v8/include/v8-value-serializer.h +32 -2
  58. data/vendor/v8/include/v8-version.h +4 -4
  59. data/vendor/v8/include/v8-wasm.h +13 -1
  60. data/vendor/v8/include/v8-weak-callback-info.h +20 -6
  61. data/vendor/v8/include/v8.h +0 -1
  62. data/vendor/v8/include/v8config.h +56 -11
  63. metadata +6 -6
  64. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +0 -30
@@ -8,10 +8,11 @@
8
8
  #include <stddef.h>
9
9
  #include <stdint.h>
10
10
 
11
- #include "v8-internal.h" // NOLINT(build/include_directory)
12
- #include "v8-isolate.h" // NOLINT(build/include_directory)
13
- #include "v8-platform.h" // NOLINT(build/include_directory)
14
- #include "v8config.h" // NOLINT(build/include_directory)
11
+ #include "v8-callbacks.h" // NOLINT(build/include_directory)
12
+ #include "v8-internal.h" // NOLINT(build/include_directory)
13
+ #include "v8-isolate.h" // NOLINT(build/include_directory)
14
+ #include "v8-platform.h" // NOLINT(build/include_directory)
15
+ #include "v8config.h" // NOLINT(build/include_directory)
15
16
 
16
17
  // We reserve the V8_* prefix for macros defined in V8 public API and
17
18
  // assume there are no name conflicts with the embedder's code.
@@ -99,8 +100,10 @@ class V8_EXPORT V8 {
99
100
  const int kBuildConfiguration =
100
101
  (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
101
102
  (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
102
- (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0) |
103
- (internal::VirtualMemoryCageIsEnabled() ? kVirtualMemoryCage : 0);
103
+ (internal::SandboxedExternalPointersAreEnabled()
104
+ ? kSandboxedExternalPointers
105
+ : 0) |
106
+ (internal::SandboxIsEnabled() ? kSandbox : 0);
104
107
  return Initialize(kBuildConfiguration);
105
108
  }
106
109
 
@@ -180,53 +183,75 @@ class V8_EXPORT V8 {
180
183
  * Clears all references to the v8::Platform. This should be invoked after
181
184
  * V8 was disposed.
182
185
  */
183
- static void ShutdownPlatform();
186
+ static void DisposePlatform();
187
+ V8_DEPRECATED("Use DisposePlatform()")
188
+ static void ShutdownPlatform() { DisposePlatform(); }
184
189
 
185
- #ifdef V8_VIRTUAL_MEMORY_CAGE
190
+ #ifdef V8_SANDBOX
186
191
  //
187
- // Virtual Memory Cage related API.
192
+ // Sandbox related API.
188
193
  //
189
194
  // This API is not yet stable and subject to changes in the future.
190
195
  //
191
196
 
192
197
  /**
193
- * Initializes the virtual memory cage for V8.
198
+ * Initializes the V8 sandbox.
194
199
  *
195
200
  * This must be invoked after the platform was initialized but before V8 is
196
- * initialized. The virtual memory cage is torn down during platform shutdown.
201
+ * initialized. The sandbox is torn down during platform shutdown.
197
202
  * Returns true on success, false otherwise.
198
203
  *
199
- * TODO(saelo) Once it is no longer optional to create the virtual memory
200
- * cage when compiling with V8_VIRTUAL_MEMORY_CAGE, the cage initialization
201
- * will likely happen as part of V8::Initialize, at which point this function
202
- * should be removed.
204
+ * TODO(saelo) Once it is no longer optional to initialize the sandbox when
205
+ * compiling with V8_SANDBOX, the sandbox initialization will likely happen
206
+ * as part of V8::Initialize, at which point this function should be removed.
203
207
  */
204
- static bool InitializeVirtualMemoryCage();
208
+ static bool InitializeSandbox();
209
+ V8_DEPRECATE_SOON("Use InitializeSandbox()")
210
+ static bool InitializeVirtualMemoryCage() { return InitializeSandbox(); }
205
211
 
206
212
  /**
207
- * Provides access to the virtual memory cage page allocator.
213
+ * Provides access to the virtual address subspace backing the sandbox.
208
214
  *
209
- * This allocator allocates pages inside the virtual memory cage. It can for
210
- * example be used to obtain virtual memory for ArrayBuffer backing stores,
211
- * which must be located inside the cage.
215
+ * This can be used to allocate pages inside the sandbox, for example to
216
+ * obtain virtual memory for ArrayBuffer backing stores, which must be
217
+ * located inside the sandbox.
212
218
  *
213
- * It should be assumed that an attacker can corrupt data inside the cage,
214
- * and so in particular the contents of pages returned by this allocator,
215
- * arbitrarily and concurrently. Due to this, it is recommended to to only
216
- * place pure data buffers in pages obtained through this allocator.
219
+ * It should be assumed that an attacker can corrupt data inside the sandbox,
220
+ * and so in particular the contents of pages allocagted in this virtual
221
+ * address space, arbitrarily and concurrently. Due to this, it is
222
+ * recommended to to only place pure data buffers in them.
217
223
  *
218
- * This function must only be called after initializing the virtual memory
219
- * cage and V8.
224
+ * This function must only be called after initializing the sandbox.
220
225
  */
226
+ static VirtualAddressSpace* GetSandboxAddressSpace();
227
+ V8_DEPRECATE_SOON("Use GetSandboxAddressSpace()")
221
228
  static PageAllocator* GetVirtualMemoryCagePageAllocator();
222
229
 
223
230
  /**
224
- * Returns the size of the virtual memory cage in bytes.
231
+ * Returns the size of the sandbox in bytes.
225
232
  *
226
- * If the cage has not been initialized, or if the initialization failed,
233
+ * If the sandbox has not been initialized, or if the initialization failed,
227
234
  * this returns zero.
228
235
  */
229
- static size_t GetVirtualMemoryCageSizeInBytes();
236
+ static size_t GetSandboxSizeInBytes();
237
+ V8_DEPRECATE_SOON("Use GetSandboxSizeInBytes()")
238
+ static size_t GetVirtualMemoryCageSizeInBytes() {
239
+ return GetSandboxSizeInBytes();
240
+ }
241
+
242
+ /**
243
+ * Returns whether the sandbox is configured securely.
244
+ *
245
+ * If V8 cannot create a proper sandbox, it will fall back to creating a
246
+ * sandbox that doesn't have the desired security properties but at least
247
+ * still allows V8 to function. This API can be used to determine if such an
248
+ * insecure sandbox is being used, in which case it will return false.
249
+ */
250
+ static bool IsSandboxConfiguredSecurely();
251
+ V8_DEPRECATE_SOON("Use IsSandboxConfiguredSecurely()")
252
+ static bool IsUsingSecureVirtualMemoryCage() {
253
+ return IsSandboxConfiguredSecurely();
254
+ }
230
255
  #endif
231
256
 
232
257
  /**
@@ -251,6 +276,14 @@ class V8_EXPORT V8 {
251
276
  UnhandledExceptionCallback unhandled_exception_callback);
252
277
  #endif
253
278
 
279
+ /**
280
+ * Allows the host application to provide a callback that will be called when
281
+ * v8 has encountered a fatal failure to allocate memory and is about to
282
+ * terminate.
283
+ */
284
+
285
+ static void SetFatalMemoryErrorCallback(OOMErrorCallback oom_error_callback);
286
+
254
287
  /**
255
288
  * Get statistics about the shared memory usage.
256
289
  */
@@ -262,8 +295,8 @@ class V8_EXPORT V8 {
262
295
  enum BuildConfigurationFeatures {
263
296
  kPointerCompression = 1 << 0,
264
297
  k31BitSmis = 1 << 1,
265
- kHeapSandbox = 1 << 2,
266
- kVirtualMemoryCage = 1 << 3,
298
+ kSandboxedExternalPointers = 1 << 2,
299
+ kSandbox = 1 << 3,
267
300
  };
268
301
 
269
302
  /**
@@ -23,6 +23,10 @@ class Value;
23
23
 
24
24
  namespace v8_inspector {
25
25
 
26
+ namespace internal {
27
+ class V8DebuggerId;
28
+ } // namespace internal
29
+
26
30
  namespace protocol {
27
31
  namespace Debugger {
28
32
  namespace API {
@@ -106,6 +110,30 @@ class V8_EXPORT V8ContextInfo {
106
110
  V8ContextInfo& operator=(const V8ContextInfo&) = delete;
107
111
  };
108
112
 
113
+ // This debugger id tries to be unique by generating two random
114
+ // numbers, which should most likely avoid collisions.
115
+ // Debugger id has a 1:1 mapping to context group. It is used to
116
+ // attribute stack traces to a particular debugging, when doing any
117
+ // cross-debugger operations (e.g. async step in).
118
+ // See also Runtime.UniqueDebuggerId in the protocol.
119
+ class V8_EXPORT V8DebuggerId {
120
+ public:
121
+ V8DebuggerId() = default;
122
+ V8DebuggerId(const V8DebuggerId&) = default;
123
+ V8DebuggerId& operator=(const V8DebuggerId&) = default;
124
+
125
+ std::unique_ptr<StringBuffer> toString() const;
126
+ bool isValid() const;
127
+ std::pair<int64_t, int64_t> pair() const;
128
+
129
+ private:
130
+ friend class internal::V8DebuggerId;
131
+ explicit V8DebuggerId(std::pair<int64_t, int64_t>);
132
+
133
+ int64_t m_first = 0;
134
+ int64_t m_second = 0;
135
+ };
136
+
109
137
  class V8_EXPORT V8StackTrace {
110
138
  public:
111
139
  virtual StringView firstNonEmptySourceURL() const = 0;
@@ -114,14 +142,10 @@ class V8_EXPORT V8StackTrace {
114
142
  virtual int topLineNumber() const = 0;
115
143
  virtual int topColumnNumber() const = 0;
116
144
  virtual int topScriptId() const = 0;
117
- V8_DEPRECATE_SOON("Use V8::StackTrace::topScriptId() instead.")
118
- int topScriptIdAsInteger() const { return topScriptId(); }
119
145
  virtual StringView topFunctionName() const = 0;
120
146
 
121
147
  virtual ~V8StackTrace() = default;
122
148
  virtual std::unique_ptr<protocol::Runtime::API::StackTrace>
123
- buildInspectorObject() const = 0;
124
- virtual std::unique_ptr<protocol::Runtime::API::StackTrace>
125
149
  buildInspectorObject(int maxAsyncDepth) const = 0;
126
150
  virtual std::unique_ptr<StringBuffer> toString() const = 0;
127
151
 
@@ -181,6 +205,15 @@ class V8_EXPORT V8InspectorSession {
181
205
  virtual void triggerPreciseCoverageDeltaUpdate(StringView occasion) = 0;
182
206
  };
183
207
 
208
+ class V8_EXPORT WebDriverValue {
209
+ public:
210
+ explicit WebDriverValue(StringView type, v8::MaybeLocal<v8::Value> value = {})
211
+ : type(type), value(value) {}
212
+
213
+ StringView type;
214
+ v8::MaybeLocal<v8::Value> value;
215
+ };
216
+
184
217
  class V8_EXPORT V8InspectorClient {
185
218
  public:
186
219
  virtual ~V8InspectorClient() = default;
@@ -195,6 +228,10 @@ class V8_EXPORT V8InspectorClient {
195
228
  virtual void beginUserGesture() {}
196
229
  virtual void endUserGesture() {}
197
230
 
231
+ virtual std::unique_ptr<WebDriverValue> serializeToWebDriverValue(
232
+ v8::Local<v8::Value> v8_value, int max_depth) {
233
+ return nullptr;
234
+ }
198
235
  virtual std::unique_ptr<StringBuffer> valueSubtype(v8::Local<v8::Value>) {
199
236
  return nullptr;
200
237
  }
@@ -246,6 +283,9 @@ class V8_EXPORT V8InspectorClient {
246
283
  // The caller would defer to generating a random 64 bit integer if
247
284
  // this method returns 0.
248
285
  virtual int64_t generateUniqueId() { return 0; }
286
+
287
+ virtual void dispatchError(v8::Local<v8::Context>, v8::Local<v8::Message>,
288
+ v8::Local<v8::Value>) {}
249
289
  };
250
290
 
251
291
  // These stack trace ids are intended to be passed between debuggers and be
@@ -280,6 +320,7 @@ class V8_EXPORT V8Inspector {
280
320
  virtual void contextDestroyed(v8::Local<v8::Context>) = 0;
281
321
  virtual void resetContextGroup(int contextGroupId) = 0;
282
322
  virtual v8::MaybeLocal<v8::Context> contextById(int contextId) = 0;
323
+ virtual V8DebuggerId uniqueDebuggerId(int contextId) = 0;
283
324
 
284
325
  // Various instrumentation.
285
326
  virtual void idleStarted() = 0;
@@ -29,6 +29,13 @@ class Isolate;
29
29
  typedef uintptr_t Address;
30
30
  static const Address kNullAddress = 0;
31
31
 
32
+ constexpr int KB = 1024;
33
+ constexpr int MB = KB * 1024;
34
+ constexpr int GB = MB * 1024;
35
+ #ifdef V8_TARGET_ARCH_X64
36
+ constexpr size_t TB = size_t{GB} * 1024;
37
+ #endif
38
+
32
39
  /**
33
40
  * Configuration of tagging scheme.
34
41
  */
@@ -109,6 +116,11 @@ struct SmiTagging<8> {
109
116
  };
110
117
 
111
118
  #ifdef V8_COMPRESS_POINTERS
119
+ // See v8:7703 or src/common/ptr-compr-inl.h for details about pointer
120
+ // compression.
121
+ constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
122
+ constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
123
+
112
124
  static_assert(
113
125
  kApiSystemPointerSize == kApiInt64Size,
114
126
  "Pointer compression can be enabled only for 64-bit architectures");
@@ -121,36 +133,6 @@ constexpr bool PointerCompressionIsEnabled() {
121
133
  return kApiTaggedSize != kApiSystemPointerSize;
122
134
  }
123
135
 
124
- constexpr bool HeapSandboxIsEnabled() {
125
- #ifdef V8_HEAP_SANDBOX
126
- return true;
127
- #else
128
- return false;
129
- #endif
130
- }
131
-
132
- using ExternalPointer_t = Address;
133
-
134
- // If the heap sandbox is enabled, these tag values will be ORed with the
135
- // external pointers in the external pointer table to prevent use of pointers of
136
- // the wrong type. When a pointer is loaded, it is ANDed with the inverse of the
137
- // expected type's tag. The tags are constructed in a way that guarantees that a
138
- // failed type check will result in one or more of the top bits of the pointer
139
- // to be set, rendering the pointer inacessible. This construction allows
140
- // performing the type check and removing GC marking bits from the pointer at
141
- // the same time.
142
- enum ExternalPointerTag : uint64_t {
143
- kExternalPointerNullTag = 0x0000000000000000,
144
- kExternalStringResourceTag = 0x00ff000000000000, // 0b000000011111111
145
- kExternalStringResourceDataTag = 0x017f000000000000, // 0b000000101111111
146
- kForeignForeignAddressTag = 0x01bf000000000000, // 0b000000110111111
147
- kNativeContextMicrotaskQueueTag = 0x01df000000000000, // 0b000000111011111
148
- kEmbedderDataSlotPayloadTag = 0x01ef000000000000, // 0b000000111101111
149
- kCodeEntryPointTag = 0x01f7000000000000, // 0b000000111110111
150
- };
151
-
152
- constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
153
-
154
136
  #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
155
137
  using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
156
138
  #else
@@ -171,6 +153,164 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
171
153
  kSmiTag;
172
154
  }
173
155
 
156
+ /*
157
+ * Sandbox related types, constants, and functions.
158
+ */
159
+ constexpr bool SandboxIsEnabled() {
160
+ #ifdef V8_SANDBOX
161
+ return true;
162
+ #else
163
+ return false;
164
+ #endif
165
+ }
166
+
167
+ constexpr bool SandboxedExternalPointersAreEnabled() {
168
+ #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
169
+ return true;
170
+ #else
171
+ return false;
172
+ #endif
173
+ }
174
+
175
+ // SandboxedPointers are guaranteed to point into the sandbox. This is achieved
176
+ // for example by storing them as offset rather than as raw pointers.
177
+ using SandboxedPointer_t = Address;
178
+
179
+ // ExternalPointers point to objects located outside the sandbox. When sandboxed
180
+ // external pointers are enabled, these are stored in an external pointer table
181
+ // and referenced from HeapObjects through indices.
182
+ #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
183
+ using ExternalPointer_t = uint32_t;
184
+ #else
185
+ using ExternalPointer_t = Address;
186
+ #endif
187
+
188
+ #ifdef V8_SANDBOX_IS_AVAILABLE
189
+
190
+ // Size of the sandbox, excluding the guard regions surrounding it.
191
+ constexpr size_t kSandboxSizeLog2 = 40; // 1 TB
192
+ constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2;
193
+
194
+ // Required alignment of the sandbox. For simplicity, we require the
195
+ // size of the guard regions to be a multiple of this, so that this specifies
196
+ // the alignment of the sandbox including and excluding surrounding guard
197
+ // regions. The alignment requirement is due to the pointer compression cage
198
+ // being located at the start of the sandbox.
199
+ constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment;
200
+
201
+ // Sandboxed pointers are stored inside the heap as offset from the sandbox
202
+ // base shifted to the left. This way, it is guaranteed that the offset is
203
+ // smaller than the sandbox size after shifting it to the right again. This
204
+ // constant specifies the shift amount.
205
+ constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2;
206
+
207
+ // Size of the guard regions surrounding the sandbox. This assumes a worst-case
208
+ // scenario of a 32-bit unsigned index used to access an array of 64-bit
209
+ // values.
210
+ constexpr size_t kSandboxGuardRegionSize = 32ULL * GB;
211
+
212
+ static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0,
213
+ "The size of the guard regions around the sandbox must be a "
214
+ "multiple of its required alignment.");
215
+
216
+ // Minimum size of the sandbox, excluding the guard regions surrounding it. If
217
+ // the virtual memory reservation for the sandbox fails, its size is currently
218
+ // halved until either the reservation succeeds or the minimum size is reached.
219
+ // A minimum of 32GB allows the 4GB pointer compression region as well as the
220
+ // ArrayBuffer partition and two 10GB Wasm memory cages to fit into the
221
+ // sandbox. 32GB should also be the minimum possible size of the userspace
222
+ // address space as there are some machine configurations with only 36 virtual
223
+ // address bits.
224
+ constexpr size_t kSandboxMinimumSize = 32ULL * GB;
225
+
226
+ static_assert(kSandboxMinimumSize <= kSandboxSize,
227
+ "The minimal size of the sandbox must be smaller or equal to the "
228
+ "regular size.");
229
+
230
+ // On OSes where reserving virtual memory is too expensive to reserve the
231
+ // entire address space backing the sandbox, notably Windows pre 8.1, we create
232
+ // a partially reserved sandbox that doesn't actually reserve most of the
233
+ // memory, and so doesn't have the desired security properties as unrelated
234
+ // memory allocations could end up inside of it, but which still ensures that
235
+ // objects that should be located inside the sandbox are allocated within
236
+ // kSandboxSize bytes from the start of the sandbox. The minimum size of the
237
+ // region that is actually reserved for such a sandbox is specified by this
238
+ // constant and should be big enough to contain the pointer compression cage as
239
+ // well as the ArrayBuffer partition.
240
+ constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB;
241
+
242
+ static_assert(kSandboxMinimumSize > kPtrComprCageReservationSize,
243
+ "The sandbox must be larger than the pointer compression cage "
244
+ "contained within it.");
245
+ static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize,
246
+ "The minimum reservation size for a sandbox must be larger than "
247
+ "the pointer compression cage contained within it.");
248
+
249
+ // For now, even if the sandbox is enabled, we still allow backing stores to be
250
+ // allocated outside of it as fallback. This will simplify the initial rollout.
251
+ // However, if sandboxed pointers are also enabled, we must always place
252
+ // backing stores inside the sandbox as they will be referenced though them.
253
+ #ifdef V8_SANDBOXED_POINTERS
254
+ constexpr bool kAllowBackingStoresOutsideSandbox = false;
255
+ #else
256
+ constexpr bool kAllowBackingStoresOutsideSandbox = true;
257
+ #endif // V8_SANDBOXED_POINTERS
258
+
259
+ // The size of the virtual memory reservation for an external pointer table.
260
+ // This determines the maximum number of entries in a table. Using a maximum
261
+ // size allows omitting bounds checks on table accesses if the indices are
262
+ // guaranteed (e.g. through shifting) to be below the maximum index. This
263
+ // value must be a power of two.
264
+ static const size_t kExternalPointerTableReservationSize = 128 * MB;
265
+
266
+ // The maximum number of entries in an external pointer table.
267
+ static const size_t kMaxSandboxedExternalPointers =
268
+ kExternalPointerTableReservationSize / kApiSystemPointerSize;
269
+
270
+ // The external pointer table indices stored in HeapObjects as external
271
+ // pointers are shifted to the left by this amount to guarantee that they are
272
+ // smaller than the maximum table size.
273
+ static const uint32_t kExternalPointerIndexShift = 8;
274
+ static_assert((1 << (32 - kExternalPointerIndexShift)) ==
275
+ kMaxSandboxedExternalPointers,
276
+ "kExternalPointerTableReservationSize and "
277
+ "kExternalPointerIndexShift don't match");
278
+
279
+ #endif // V8_SANDBOX_IS_AVAILABLE
280
+
281
+ // If sandboxed external pointers are enabled, these tag values will be ORed
282
+ // with the external pointers in the external pointer table to prevent use of
283
+ // pointers of the wrong type. When a pointer is loaded, it is ANDed with the
284
+ // inverse of the expected type's tag. The tags are constructed in a way that
285
+ // guarantees that a failed type check will result in one or more of the top
286
+ // bits of the pointer to be set, rendering the pointer inacessible. Besides
287
+ // the type tag bits (48 through 62), the tags also have the GC mark bit (63)
288
+ // set, so that the mark bit is automatically set when a pointer is written
289
+ // into the external pointer table (in which case it is clearly alive) and is
290
+ // cleared when the pointer is loaded. The exception to this is the free entry
291
+ // tag, which doesn't have the mark bit set, as the entry is not alive. This
292
+ // construction allows performing the type check and removing GC marking bits
293
+ // (the MSB) from the pointer at the same time.
294
+ // Note: this scheme assumes a 48-bit address space and will likely break if
295
+ // more virtual address bits are used.
296
+ constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
297
+ constexpr uint64_t kExternalPointerTagShift = 48;
298
+ #define MAKE_TAG(v) (static_cast<uint64_t>(v) << kExternalPointerTagShift)
299
+ // clang-format off
300
+ enum ExternalPointerTag : uint64_t {
301
+ kExternalPointerNullTag = MAKE_TAG(0b0000000000000000),
302
+ kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000),
303
+ kExternalStringResourceTag = MAKE_TAG(0b1000000011111111),
304
+ kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111),
305
+ kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111),
306
+ kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111),
307
+ kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111),
308
+ kCodeEntryPointTag = MAKE_TAG(0b1000000111110111),
309
+ kExternalObjectValueTag = MAKE_TAG(0b1000000111111011),
310
+ };
311
+ // clang-format on
312
+ #undef MAKE_TAG
313
+
174
314
  // Converts encoded external pointer to address.
175
315
  V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate,
176
316
  ExternalPointer_t pointer,
@@ -214,19 +354,19 @@ class Internals {
214
354
  static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
215
355
  static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
216
356
  static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
217
- #ifdef V8_HEAP_SANDBOX
357
+ #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
218
358
  static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize;
219
359
  #endif
220
360
  static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
221
- static const int kFullStringRepresentationMask = 0x0f;
361
+ static const int kStringRepresentationAndEncodingMask = 0x0f;
222
362
  static const int kStringEncodingMask = 0x8;
223
363
  static const int kExternalTwoByteRepresentationTag = 0x02;
224
364
  static const int kExternalOneByteRepresentationTag = 0x0a;
225
365
 
226
366
  static const uint32_t kNumIsolateDataSlots = 4;
227
367
  static const int kStackGuardSize = 7 * kApiSystemPointerSize;
228
- static const int kBuiltinTier0EntryTableSize = 13 * kApiSystemPointerSize;
229
- static const int kBuiltinTier0TableSize = 13 * kApiSystemPointerSize;
368
+ static const int kBuiltinTier0EntryTableSize = 10 * kApiSystemPointerSize;
369
+ static const int kBuiltinTier0TableSize = 10 * kApiSystemPointerSize;
230
370
 
231
371
  // IsolateData layout guarantees.
232
372
  static const int kIsolateCageBaseOffset = 0;
@@ -250,10 +390,10 @@ class Internals {
250
390
  kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
251
391
 
252
392
  static const int kExternalPointerTableBufferOffset = 0;
253
- static const int kExternalPointerTableLengthOffset =
254
- kExternalPointerTableBufferOffset + kApiSystemPointerSize;
255
393
  static const int kExternalPointerTableCapacityOffset =
256
- kExternalPointerTableLengthOffset + kApiInt32Size;
394
+ kExternalPointerTableBufferOffset + kApiSystemPointerSize;
395
+ static const int kExternalPointerTableFreelistHeadOffset =
396
+ kExternalPointerTableCapacityOffset + kApiInt32Size;
257
397
 
258
398
  static const int kUndefinedValueRootIndex = 4;
259
399
  static const int kTheHoleValueRootIndex = 5;
@@ -268,9 +408,9 @@ class Internals {
268
408
  static const int kNodeStateIsWeakValue = 2;
269
409
  static const int kNodeStateIsPendingValue = 3;
270
410
 
271
- static const int kFirstNonstringType = 0x40;
272
- static const int kOddballType = 0x43;
273
- static const int kForeignType = 0x46;
411
+ static const int kFirstNonstringType = 0x80;
412
+ static const int kOddballType = 0x83;
413
+ static const int kForeignType = 0xcc;
274
414
  static const int kJSSpecialApiObjectType = 0x410;
275
415
  static const int kJSObjectType = 0x421;
276
416
  static const int kFirstJSApiObjectType = 0x422;
@@ -337,7 +477,7 @@ class Internals {
337
477
  }
338
478
 
339
479
  V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
340
- int representation = (instance_type & kFullStringRepresentationMask);
480
+ int representation = (instance_type & kStringRepresentationAndEncodingMask);
341
481
  return representation == kExternalTwoByteRepresentationTag;
342
482
  }
343
483
 
@@ -432,9 +572,9 @@ class Internals {
432
572
  #endif
433
573
  }
434
574
 
435
- V8_INLINE static internal::Isolate* GetIsolateForHeapSandbox(
575
+ V8_INLINE static internal::Isolate* GetIsolateForSandbox(
436
576
  internal::Address obj) {
437
- #ifdef V8_HEAP_SANDBOX
577
+ #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
438
578
  return internal::IsolateFromNeverReadOnlySpaceObject(obj);
439
579
  #else
440
580
  // Not used in non-sandbox mode.
@@ -445,7 +585,7 @@ class Internals {
445
585
  V8_INLINE static Address DecodeExternalPointer(
446
586
  const Isolate* isolate, ExternalPointer_t encoded_pointer,
447
587
  ExternalPointerTag tag) {
448
- #ifdef V8_HEAP_SANDBOX
588
+ #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
449
589
  return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag);
450
590
  #else
451
591
  return encoded_pointer;
@@ -455,7 +595,7 @@ class Internals {
455
595
  V8_INLINE static internal::Address ReadExternalPointerField(
456
596
  internal::Isolate* isolate, internal::Address heap_object_ptr, int offset,
457
597
  ExternalPointerTag tag) {
458
- #ifdef V8_HEAP_SANDBOX
598
+ #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
459
599
  internal::ExternalPointer_t encoded_value =
460
600
  ReadRawField<uint32_t>(heap_object_ptr, offset);
461
601
  // We currently have to treat zero as nullptr in embedder slots.
@@ -467,10 +607,6 @@ class Internals {
467
607
  }
468
608
 
469
609
  #ifdef V8_COMPRESS_POINTERS
470
- // See v8:7703 or src/ptr-compr.* for details about pointer compression.
471
- static constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32;
472
- static constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32;
473
-
474
610
  V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress(
475
611
  internal::Address addr) {
476
612
  return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
@@ -486,59 +622,6 @@ class Internals {
486
622
  #endif // V8_COMPRESS_POINTERS
487
623
  };
488
624
 
489
- constexpr bool VirtualMemoryCageIsEnabled() {
490
- #ifdef V8_VIRTUAL_MEMORY_CAGE
491
- return true;
492
- #else
493
- return false;
494
- #endif
495
- }
496
-
497
- #ifdef V8_VIRTUAL_MEMORY_CAGE
498
- // Size of the virtual memory cage, excluding the guard regions surrounding it.
499
- constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
500
-
501
- static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize,
502
- "The virtual memory cage must be larger than the pointer "
503
- "compression cage contained within it.");
504
-
505
- // Required alignment of the virtual memory cage. For simplicity, we require the
506
- // size of the guard regions to be a multiple of this, so that this specifies
507
- // the alignment of the cage including and excluding surrounding guard regions.
508
- // The alignment requirement is due to the pointer compression cage being
509
- // located at the start of the virtual memory cage.
510
- constexpr size_t kVirtualMemoryCageAlignment =
511
- Internals::kPtrComprCageBaseAlignment;
512
-
513
- // Size of the guard regions surrounding the virtual memory cage. This assumes a
514
- // worst-case scenario of a 32-bit unsigned index being used to access an array
515
- // of 64-bit values.
516
- constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB
517
-
518
- static_assert((kVirtualMemoryCageGuardRegionSize %
519
- kVirtualMemoryCageAlignment) == 0,
520
- "The size of the virtual memory cage guard region must be a "
521
- "multiple of its required alignment.");
522
-
523
- // Minimum size of the virtual memory cage, excluding the guard regions
524
- // surrounding it. If the cage reservation fails, its size is currently halved
525
- // until either the reservation succeeds or the minimum size is reached. A
526
- // minimum of 32GB allows the 4GB pointer compression region as well as the
527
- // ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage.
528
- constexpr size_t kVirtualMemoryCageMinimumSize = size_t{32} << 30; // 32 GB
529
-
530
- // For now, even if the virtual memory cage is enabled, we still allow backing
531
- // stores to be allocated outside of it as fallback. This will simplify the
532
- // initial rollout. However, if the heap sandbox is also enabled, we already use
533
- // the "enforcing mode" of the virtual memory cage. This is useful for testing.
534
- #ifdef V8_HEAP_SANDBOX
535
- constexpr bool kAllowBackingStoresOutsideCage = false;
536
- #else
537
- constexpr bool kAllowBackingStoresOutsideCage = true;
538
- #endif // V8_HEAP_SANDBOX
539
-
540
- #endif // V8_VIRTUAL_MEMORY_CAGE
541
-
542
625
  // Only perform cast check for types derived from v8::Data since
543
626
  // other types do not implement the Cast method.
544
627
  template <bool PerformCheck>
@@ -567,6 +650,10 @@ V8_INLINE void PerformCastCheck(T* data) {
567
650
  // how static casts work with std::shared_ptr.
568
651
  class BackingStoreBase {};
569
652
 
653
+ // The maximum value in enum GarbageCollectionReason, defined in heap.h.
654
+ // This is needed for histograms sampling garbage collection reasons.
655
+ constexpr int kGarbageCollectionReasonMaxValue = 25;
656
+
570
657
  } // namespace internal
571
658
 
572
659
  } // namespace v8