libv8-node 23.6.1.0-arm64-darwin → 24.1.0.0-arm64-darwin

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. checksums.yaml +4 -4
  2. data/lib/libv8/node/version.rb +3 -3
  3. data/vendor/v8/arm64-darwin/libv8/obj/libv8_monolith.a +0 -0
  4. data/vendor/v8/include/cppgc/allocation.h +1 -2
  5. data/vendor/v8/include/cppgc/default-platform.h +3 -2
  6. data/vendor/v8/include/cppgc/heap-consistency.h +1 -1
  7. data/vendor/v8/include/cppgc/internal/api-constants.h +0 -17
  8. data/vendor/v8/include/cppgc/internal/base-page-handle.h +2 -4
  9. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +0 -4
  10. data/vendor/v8/include/cppgc/internal/caged-heap.h +0 -4
  11. data/vendor/v8/include/cppgc/internal/conditional-stack-allocated.h +41 -0
  12. data/vendor/v8/include/cppgc/internal/logging.h +3 -3
  13. data/vendor/v8/include/cppgc/internal/member-storage.h +63 -20
  14. data/vendor/v8/include/cppgc/internal/persistent-node.h +8 -3
  15. data/vendor/v8/include/cppgc/internal/pointer-policies.h +48 -11
  16. data/vendor/v8/include/cppgc/macros.h +21 -0
  17. data/vendor/v8/include/cppgc/member.h +70 -36
  18. data/vendor/v8/include/cppgc/name-provider.h +3 -0
  19. data/vendor/v8/include/cppgc/platform.h +11 -0
  20. data/vendor/v8/include/cppgc/type-traits.h +1 -0
  21. data/vendor/v8/include/cppgc/visitor.h +25 -1
  22. data/vendor/v8/include/libplatform/libplatform-export.h +2 -2
  23. data/vendor/v8/include/libplatform/v8-tracing.h +0 -1
  24. data/vendor/v8/include/v8-array-buffer.h +111 -34
  25. data/vendor/v8/include/v8-callbacks.h +84 -26
  26. data/vendor/v8/include/v8-context.h +7 -6
  27. data/vendor/v8/include/v8-cppgc.h +2 -1
  28. data/vendor/v8/include/v8-data.h +5 -0
  29. data/vendor/v8/include/v8-debug.h +11 -0
  30. data/vendor/v8/include/v8-embedder-heap.h +1 -32
  31. data/vendor/v8/include/v8-exception.h +2 -0
  32. data/vendor/v8/include/v8-external-memory-accounter.h +60 -0
  33. data/vendor/v8/include/v8-fast-api-calls.h +17 -175
  34. data/vendor/v8/include/v8-function-callback.h +4 -33
  35. data/vendor/v8/include/v8-function.h +7 -0
  36. data/vendor/v8/include/v8-handle-base.h +18 -0
  37. data/vendor/v8/include/v8-initialization.h +9 -1
  38. data/vendor/v8/include/v8-inspector.h +8 -4
  39. data/vendor/v8/include/v8-internal.h +477 -399
  40. data/vendor/v8/include/v8-isolate.h +218 -151
  41. data/vendor/v8/include/v8-local-handle.h +56 -28
  42. data/vendor/v8/include/v8-maybe.h +2 -1
  43. data/vendor/v8/include/v8-memory-span.h +149 -24
  44. data/vendor/v8/include/v8-message.h +9 -1
  45. data/vendor/v8/include/v8-metrics.h +10 -0
  46. data/vendor/v8/include/v8-object.h +7 -2
  47. data/vendor/v8/include/v8-persistent-handle.h +17 -17
  48. data/vendor/v8/include/v8-platform.h +48 -13
  49. data/vendor/v8/include/v8-primitive.h +131 -6
  50. data/vendor/v8/include/v8-profiler.h +13 -1
  51. data/vendor/v8/include/v8-proxy.h +0 -1
  52. data/vendor/v8/include/v8-regexp.h +0 -1
  53. data/vendor/v8/include/v8-sandbox.h +3 -3
  54. data/vendor/v8/include/v8-script.h +21 -3
  55. data/vendor/v8/include/v8-source-location.h +6 -1
  56. data/vendor/v8/include/v8-template.h +8 -2
  57. data/vendor/v8/include/v8-trace-categories.h +23 -0
  58. data/vendor/v8/include/v8-traced-handle.h +16 -17
  59. data/vendor/v8/include/v8-typed-array.h +6 -10
  60. data/vendor/v8/include/v8-unwinder-state.h +2 -3
  61. data/vendor/v8/include/v8-value-serializer-version.h +3 -3
  62. data/vendor/v8/include/v8-value.h +18 -0
  63. data/vendor/v8/include/v8-version.h +4 -4
  64. data/vendor/v8/include/v8-wasm.h +24 -0
  65. data/vendor/v8/include/v8-weak-callback-info.h +20 -12
  66. data/vendor/v8/include/v8.h +3 -3
  67. data/vendor/v8/include/v8config.h +34 -40
  68. metadata +6 -7
  69. data/vendor/v8/include/cppgc/ephemeron-pair.h +0 -30
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0ecb66b9509c4022e882f978c66fd58864dc1e5413c75fde01d82540efcfc426
4
- data.tar.gz: 5ee1e4d9ea7c8eb69eef7904bc5f6cc50ccf2b9ab49b11fc96ebddea2db652a9
3
+ metadata.gz: ec1dd8c6f19d036ab55fa59d95cfea2a3a62ef0b1d1a0826b534a76236dffa06
4
+ data.tar.gz: cfef402adc7ed89ec6120f9899e332e3d9ef0aac46100b584cfb455c06725572
5
5
  SHA512:
6
- metadata.gz: f80230488055479ea203b3632150691e60bd03e7a12be752f3a78f74bfb8eb4f2b6e1b9b2af3ba18c6b9f70d14a0ff3feba65101bf36c1c88c5970b42f9c4930
7
- data.tar.gz: e7feefd5ca78f9c539dab694310d44e198576f1477c0f8f24caccf2681e2c8659b7dbc3f3e71c12868c51119245416df4c5ce88591c132c8afefb9c3eca2f3f6
6
+ metadata.gz: 27f844bd2b8f8739dbd8d525c7068770cc624d6018bbd5caa6f13f9a38d97880f144ecbd2da5bca525d6785ed2c2f6de64ae6934d07965537d239f7d52780175
7
+ data.tar.gz: 63a4cd73e71c336d1991e392f51f6ed1c574b6b33672e080ed8fb35febe43ca844d2d9f248a313bd6639745f45703093f24853b367b8aa26edeb7e7dd44db618
@@ -4,7 +4,7 @@ module Libv8
4
4
  end
5
5
 
6
6
  module Libv8::Node
7
- VERSION = '23.6.1.0'
8
- NODE_VERSION = '23.6.1'
9
- LIBV8_VERSION = '12.9.202.28' # from src/node-.../deps/v8/include/v8-version.h
7
+ VERSION = '24.1.0.0'
8
+ NODE_VERSION = '24.1.0'
9
+ LIBV8_VERSION = '13.6.233.10' # from src/node-.../deps/v8/include/v8-version.h
10
10
  end
@@ -44,8 +44,7 @@ class AllocationHandle;
44
44
 
45
45
  namespace internal {
46
46
 
47
- // Similar to C++17 std::align_val_t;
48
- enum class AlignVal : size_t {};
47
+ using AlignVal = std::align_val_t;
49
48
 
50
49
  class MakeGarbageCollectedTraitInternal {
51
50
  protected:
@@ -37,11 +37,12 @@ class V8_EXPORT DefaultPlatform : public Platform {
37
37
  return v8_platform_->MonotonicallyIncreasingTime();
38
38
  }
39
39
 
40
- std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner() override {
40
+ std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner(
41
+ TaskPriority priority) override {
41
42
  // V8's default platform creates a new task runner when passed the
42
43
  // `v8::Isolate` pointer the first time. For non-default platforms this will
43
44
  // require getting the appropriate task runner.
44
- return v8_platform_->GetForegroundTaskRunner(kNoIsolate);
45
+ return v8_platform_->GetForegroundTaskRunner(kNoIsolate, priority);
45
46
  }
46
47
 
47
48
  std::unique_ptr<cppgc::JobHandle> PostJob(
@@ -114,7 +114,7 @@ class HeapConsistency final {
114
114
  * has not yet been processed.
115
115
  *
116
116
  * \param params The parameters retrieved from `GetWriteBarrierType()`.
117
- * \param object The pointer to the object. May be an interior pointer to a
117
+ * \param object The pointer to the object. May be an interior pointer to
118
118
  * an interface of the actual object.
119
119
  */
120
120
  static V8_INLINE void DijkstraWriteBarrier(const WriteBarrierParams& params,
@@ -33,16 +33,6 @@ static constexpr uint16_t kFullyConstructedBitMask = uint16_t{1};
33
33
  static constexpr size_t kPageSizeBits = 17;
34
34
  static constexpr size_t kPageSize = size_t{1} << kPageSizeBits;
35
35
 
36
- #if defined(V8_HOST_ARCH_ARM64) && defined(V8_OS_DARWIN)
37
- constexpr size_t kGuardPageSize = 0;
38
- #elif defined(V8_HOST_ARCH_PPC64)
39
- constexpr size_t kGuardPageSize = 0;
40
- #elif defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64)
41
- constexpr size_t kGuardPageSize = 0;
42
- #else
43
- constexpr size_t kGuardPageSize = 4096;
44
- #endif
45
-
46
36
  static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
47
37
 
48
38
  #if defined(CPPGC_POINTER_COMPRESSION)
@@ -54,12 +44,6 @@ constexpr unsigned kPointerCompressionShift = 1;
54
44
  #endif // !defined(CPPGC_POINTER_COMPRESSION)
55
45
 
56
46
  #if defined(CPPGC_CAGED_HEAP)
57
- #if defined(CPPGC_2GB_CAGE)
58
- constexpr size_t kCagedHeapDefaultReservationSize =
59
- static_cast<size_t>(2) * kGB;
60
- constexpr size_t kCagedHeapMaxReservationSize =
61
- kCagedHeapDefaultReservationSize;
62
- #else // !defined(CPPGC_2GB_CAGE)
63
47
  constexpr size_t kCagedHeapDefaultReservationSize =
64
48
  static_cast<size_t>(4) * kGB;
65
49
  #if defined(CPPGC_POINTER_COMPRESSION)
@@ -69,7 +53,6 @@ constexpr size_t kCagedHeapMaxReservationSize =
69
53
  constexpr size_t kCagedHeapMaxReservationSize =
70
54
  kCagedHeapDefaultReservationSize;
71
55
  #endif // !defined(CPPGC_POINTER_COMPRESSION)
72
- #endif // !defined(CPPGC_2GB_CAGE)
73
56
  constexpr size_t kCagedHeapReservationAlignment = kCagedHeapMaxReservationSize;
74
57
  #endif // defined(CPPGC_CAGED_HEAP)
75
58
 
@@ -19,9 +19,7 @@ class BasePageHandle {
19
19
  public:
20
20
  static V8_INLINE BasePageHandle* FromPayload(void* payload) {
21
21
  return reinterpret_cast<BasePageHandle*>(
22
- (reinterpret_cast<uintptr_t>(payload) &
23
- ~(api_constants::kPageSize - 1)) +
24
- api_constants::kGuardPageSize);
22
+ reinterpret_cast<uintptr_t>(payload) & ~(api_constants::kPageSize - 1));
25
23
  }
26
24
  static V8_INLINE const BasePageHandle* FromPayload(const void* payload) {
27
25
  return FromPayload(const_cast<void*>(payload));
@@ -33,7 +31,7 @@ class BasePageHandle {
33
31
  protected:
34
32
  explicit BasePageHandle(HeapHandle& heap_handle) : heap_handle_(heap_handle) {
35
33
  CPPGC_DCHECK(reinterpret_cast<uintptr_t>(this) % api_constants::kPageSize ==
36
- api_constants::kGuardPageSize);
34
+ 0);
37
35
  }
38
36
 
39
37
  HeapHandle& heap_handle_;
@@ -77,11 +77,7 @@ class V8_EXPORT AgeTable final {
77
77
  __builtin_ctz(static_cast<uint32_t>(kCardSizeInBytes));
78
78
  #else //! V8_HAS_BUILTIN_CTZ
79
79
  // Hardcode and check with assert.
80
- #if defined(CPPGC_2GB_CAGE)
81
- 11;
82
- #else // !defined(CPPGC_2GB_CAGE)
83
80
  12;
84
- #endif // !defined(CPPGC_2GB_CAGE)
85
81
  #endif // !V8_HAS_BUILTIN_CTZ
86
82
  static_assert((1 << kGranularityBits) == kCardSizeInBytes);
87
83
  const size_t entry = offset >> kGranularityBits;
@@ -32,16 +32,12 @@ class V8_EXPORT CagedHeapBase {
32
32
  }
33
33
 
34
34
  V8_INLINE static bool AreWithinCage(const void* addr1, const void* addr2) {
35
- #if defined(CPPGC_2GB_CAGE)
36
- static constexpr size_t kHeapBaseShift = sizeof(uint32_t) * CHAR_BIT - 1;
37
- #else //! defined(CPPGC_2GB_CAGE)
38
35
  #if defined(CPPGC_POINTER_COMPRESSION)
39
36
  static constexpr size_t kHeapBaseShift =
40
37
  31 + api_constants::kPointerCompressionShift;
41
38
  #else // !defined(CPPGC_POINTER_COMPRESSION)
42
39
  static constexpr size_t kHeapBaseShift = sizeof(uint32_t) * CHAR_BIT;
43
40
  #endif // !defined(CPPGC_POINTER_COMPRESSION)
44
- #endif //! defined(CPPGC_2GB_CAGE)
45
41
  static_assert((static_cast<size_t>(1) << kHeapBaseShift) ==
46
42
  api_constants::kCagedHeapMaxReservationSize);
47
43
  CPPGC_DCHECK(g_heap_base_);
@@ -0,0 +1,41 @@
1
+ // Copyright 2025 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef INCLUDE_CPPGC_INTERNAL_CONDITIONAL_STACK_ALLOCATED_H_
6
+ #define INCLUDE_CPPGC_INTERNAL_CONDITIONAL_STACK_ALLOCATED_H_
7
+
8
+ #include <type_traits>
9
+
10
+ #include "cppgc/macros.h" // NOLINT(build/include_directory)
11
+ #include "cppgc/type-traits.h" // NOLINT(build/include_directory)
12
+
13
+ namespace cppgc {
14
+ namespace internal {
15
+
16
+ // Base class that is marked as stack allocated if T is either marked as stack
17
+ // allocated or a traceable type.
18
+ template <typename T>
19
+ class ConditionalStackAllocatedBase;
20
+
21
+ template <typename T>
22
+ concept RequiresStackAllocated =
23
+ !std::is_void_v<T> &&
24
+ (cppgc::IsStackAllocatedType<T> || cppgc::internal::IsTraceableV<T> ||
25
+ cppgc::IsGarbageCollectedOrMixinTypeV<T>);
26
+
27
+ template <typename T>
28
+ requires(RequiresStackAllocated<T>)
29
+ class ConditionalStackAllocatedBase<T> {
30
+ public:
31
+ CPPGC_STACK_ALLOCATED();
32
+ };
33
+
34
+ template <typename T>
35
+ requires(!RequiresStackAllocated<T>)
36
+ class ConditionalStackAllocatedBase<T> {};
37
+
38
+ } // namespace internal
39
+ } // namespace cppgc
40
+
41
+ #endif // INCLUDE_CPPGC_INTERNAL_CONDITIONAL_STACK_ALLOCATED_H_
@@ -20,18 +20,18 @@ FatalImpl(const char*, const SourceLocation& = SourceLocation::Current());
20
20
  template <typename>
21
21
  struct EatParams {};
22
22
 
23
- #if defined(DEBUG)
23
+ #ifdef CPPGC_ENABLE_API_CHECKS
24
24
  #define CPPGC_DCHECK_MSG(condition, message) \
25
25
  do { \
26
26
  if (V8_UNLIKELY(!(condition))) { \
27
27
  ::cppgc::internal::DCheckImpl(message); \
28
28
  } \
29
29
  } while (false)
30
- #else // !defined(DEBUG)
30
+ #else // !CPPGC_ENABLE_API_CHECKS
31
31
  #define CPPGC_DCHECK_MSG(condition, message) \
32
32
  (static_cast<void>(::cppgc::internal::EatParams<decltype( \
33
33
  static_cast<void>(condition), message)>{}))
34
- #endif // !defined(DEBUG)
34
+ #endif // !CPPGC_ENABLE_API_CHECKS
35
35
 
36
36
  #define CPPGC_DCHECK(condition) CPPGC_DCHECK_MSG(condition, #condition)
37
37
 
@@ -10,6 +10,7 @@
10
10
  #include <type_traits>
11
11
 
12
12
  #include "cppgc/internal/api-constants.h"
13
+ #include "cppgc/internal/caged-heap.h"
13
14
  #include "cppgc/internal/logging.h"
14
15
  #include "cppgc/sentinel-pointer.h"
15
16
  #include "v8config.h" // NOLINT(build/include_directory)
@@ -71,11 +72,17 @@ class V8_EXPORT CageBaseGlobal final {
71
72
 
72
73
  class V8_TRIVIAL_ABI CompressedPointer final {
73
74
  public:
75
+ struct AtomicInitializerTag {};
76
+
74
77
  using IntegralType = uint32_t;
75
78
  static constexpr auto kWriteBarrierSlotType =
76
79
  WriteBarrierSlotType::kCompressed;
77
80
 
78
81
  V8_INLINE CompressedPointer() : value_(0u) {}
82
+ V8_INLINE explicit CompressedPointer(const void* value,
83
+ AtomicInitializerTag) {
84
+ StoreAtomic(value);
85
+ }
79
86
  V8_INLINE explicit CompressedPointer(const void* ptr)
80
87
  : value_(Compress(ptr)) {}
81
88
  V8_INLINE explicit CompressedPointer(std::nullptr_t) : value_(0u) {}
@@ -139,17 +146,12 @@ class V8_TRIVIAL_ABI CompressedPointer final {
139
146
  CPPGC_DCHECK(
140
147
  (reinterpret_cast<uintptr_t>(ptr) & kPointerCompressionShiftMask) == 0);
141
148
 
142
- #if defined(CPPGC_2GB_CAGE)
143
- // Truncate the pointer.
144
- auto compressed =
145
- static_cast<IntegralType>(reinterpret_cast<uintptr_t>(ptr));
146
- #else // !defined(CPPGC_2GB_CAGE)
147
149
  const auto uptr = reinterpret_cast<uintptr_t>(ptr);
148
150
  // Shift the pointer and truncate.
149
151
  auto compressed = static_cast<IntegralType>(
150
152
  uptr >> api_constants::kPointerCompressionShift);
151
- #endif // !defined(CPPGC_2GB_CAGE)
152
- // Normal compressed pointers must have the MSB set.
153
+ // Normal compressed pointers must have the MSB set. This is guaranteed by
154
+ // the cage alignment.
153
155
  CPPGC_DCHECK((!compressed || compressed == kCompressedSentinel) ||
154
156
  (compressed & (1 << 31)));
155
157
  return compressed;
@@ -164,43 +166,77 @@ class V8_TRIVIAL_ABI CompressedPointer final {
164
166
  static V8_INLINE void* Decompress(IntegralType ptr, uintptr_t base) {
165
167
  CPPGC_DCHECK(CageBaseGlobal::IsSet());
166
168
  CPPGC_DCHECK(base == CageBaseGlobal::Get());
167
- // Treat compressed pointer as signed and cast it to uint64_t, which will
168
- // sign-extend it.
169
- #if defined(CPPGC_2GB_CAGE)
170
- const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr));
171
- #else // !defined(CPPGC_2GB_CAGE)
172
- // Then, shift the result. It's important to shift the unsigned
173
- // value, as otherwise it would result in undefined behavior.
169
+ // Sign-extend compressed pointer to full width. This ensure that normal
170
+ // pointers have only 1s in the base part of the address. It's also
171
+ // important to shift the unsigned value, as otherwise it would result in
172
+ // undefined behavior.
174
173
  const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr))
175
174
  << api_constants::kPointerCompressionShift;
176
- #endif // !defined(CPPGC_2GB_CAGE)
175
+ // Set the base part of the address for normal compressed pointers. Note
176
+ // that nullptr and the sentinel value do not have 1s in the base part and
177
+ // remain as-is in this operation.
177
178
  return reinterpret_cast<void*>(mask & base);
178
179
  }
179
180
 
181
+ // For a given memory `address`, this method iterates all possible pointers
182
+ // that can be reasonably recovered with the current compression scheme and
183
+ // passes them to `callback`.
184
+ template <typename Callback>
185
+ static V8_INLINE void VisitPossiblePointers(const void* address,
186
+ Callback callback);
187
+
180
188
  private:
181
- #if defined(CPPGC_2GB_CAGE)
182
- static constexpr IntegralType kCompressedSentinel =
183
- SentinelPointer::kSentinelValue;
184
- #else // !defined(CPPGC_2GB_CAGE)
185
189
  static constexpr IntegralType kCompressedSentinel =
186
190
  SentinelPointer::kSentinelValue >>
187
191
  api_constants::kPointerCompressionShift;
188
- #endif // !defined(CPPGC_2GB_CAGE)
189
192
  // All constructors initialize `value_`. Do not add a default value here as it
190
193
  // results in a non-atomic write on some builds, even when the atomic version
191
194
  // of the constructor is used.
192
195
  IntegralType value_;
193
196
  };
194
197
 
198
+ template <typename Callback>
199
+ // static
200
+ void CompressedPointer::VisitPossiblePointers(const void* address,
201
+ Callback callback) {
202
+ const uintptr_t base = CageBaseGlobal::Get();
203
+ CPPGC_DCHECK(base);
204
+ // We may have random compressed pointers on stack (e.g. due to inlined
205
+ // collections). These could be present in both halfwords.
206
+ const uint32_t compressed_low =
207
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(address));
208
+ callback(CompressedPointer::Decompress(compressed_low, base));
209
+ const uint32_t compressed_high = static_cast<uint32_t>(
210
+ reinterpret_cast<uintptr_t>(address) >> (sizeof(uint32_t) * CHAR_BIT));
211
+ callback(CompressedPointer::Decompress(compressed_high, base));
212
+ // Iterate possible intermediate values, see `Decompress()`. The intermediate
213
+ // value of decompressing is a 64-bit value where 35 bits are the offset. We
214
+ // don't assume sign extension is stored and recover that part.
215
+ //
216
+ // Note that this case conveniently also recovers the full pointer.
217
+ static constexpr uintptr_t kBitForIntermediateValue =
218
+ (sizeof(uint32_t) * CHAR_BIT) + api_constants::kPointerCompressionShift;
219
+ static constexpr uintptr_t kSignExtensionMask =
220
+ ~((uintptr_t{1} << kBitForIntermediateValue) - 1);
221
+ const uintptr_t intermediate_sign_extended =
222
+ reinterpret_cast<uintptr_t>(address) | kSignExtensionMask;
223
+ callback(reinterpret_cast<void*>(intermediate_sign_extended & base));
224
+ }
225
+
195
226
  #endif // defined(CPPGC_POINTER_COMPRESSION)
196
227
 
197
228
  class V8_TRIVIAL_ABI RawPointer final {
198
229
  public:
230
+ struct AtomicInitializerTag {};
231
+
199
232
  using IntegralType = uintptr_t;
200
233
  static constexpr auto kWriteBarrierSlotType =
201
234
  WriteBarrierSlotType::kUncompressed;
202
235
 
203
236
  V8_INLINE RawPointer() : ptr_(nullptr) {}
237
+ V8_INLINE explicit RawPointer(const void* ptr, AtomicInitializerTag) {
238
+ StoreAtomic(ptr);
239
+ }
204
240
  V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}
205
241
 
206
242
  V8_INLINE const void* Load() const { return ptr_; }
@@ -243,6 +279,13 @@ class V8_TRIVIAL_ABI RawPointer final {
243
279
  return a.ptr_ >= b.ptr_;
244
280
  }
245
281
 
282
+ template <typename Callback>
283
+ static V8_INLINE void VisitPossiblePointers(const void* address,
284
+ Callback callback) {
285
+ // Pass along the full pointer.
286
+ return callback(const_cast<void*>(address));
287
+ }
288
+
246
289
  private:
247
290
  // All constructors initialize `ptr_`. Do not add a default value here as it
248
291
  // results in a non-atomic write on some builds, even when the atomic version
@@ -18,6 +18,7 @@ namespace internal {
18
18
 
19
19
  class CrossThreadPersistentRegion;
20
20
  class FatalOutOfMemoryHandler;
21
+ class HeapBase;
21
22
  class RootVisitor;
22
23
 
23
24
  // PersistentNode represents a variant of two states:
@@ -133,10 +134,14 @@ class V8_EXPORT PersistentRegionBase {
133
134
  };
134
135
 
135
136
  // Variant of PersistentRegionBase that checks whether the allocation and
136
- // freeing happens only on the thread that created the region.
137
+ // freeing happens only on the thread that created the heap.
137
138
  class V8_EXPORT PersistentRegion final : public PersistentRegionBase {
138
139
  public:
139
- explicit PersistentRegion(const FatalOutOfMemoryHandler&);
140
+ V8_INLINE PersistentRegion(const HeapBase& heap,
141
+ const FatalOutOfMemoryHandler& oom_handler)
142
+ : PersistentRegionBase(oom_handler), heap_(heap) {
143
+ CPPGC_DCHECK(IsCreationThread());
144
+ }
140
145
  // Clears Persistent fields to avoid stale pointers after heap teardown.
141
146
  ~PersistentRegion() = default;
142
147
 
@@ -161,7 +166,7 @@ class V8_EXPORT PersistentRegion final : public PersistentRegionBase {
161
166
  private:
162
167
  bool IsCreationThread();
163
168
 
164
- int creation_thread_id_;
169
+ const HeapBase& heap_;
165
170
  };
166
171
 
167
172
  // CrossThreadPersistent uses PersistentRegionBase but protects it using this
@@ -28,13 +28,19 @@ class WeakMemberTag;
28
28
  class UntracedMemberTag;
29
29
 
30
30
  struct DijkstraWriteBarrierPolicy {
31
- V8_INLINE static void InitializingBarrier(const void*, const void*) {
32
31
  // Since in initializing writes the source object is always white, having no
33
32
  // barrier doesn't break the tri-color invariant.
34
- }
33
+ V8_INLINE static void InitializingBarrier(const void*, const void*) {}
34
+ V8_INLINE static void InitializingBarrier(const void*, RawPointer storage) {
35
+ }
36
+ #if defined(CPPGC_POINTER_COMPRESSION)
37
+ V8_INLINE static void InitializingBarrier(const void*,
38
+ CompressedPointer storage) {}
39
+ #endif
35
40
 
36
- template <WriteBarrierSlotType SlotType>
37
- V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
41
+ template <WriteBarrierSlotType SlotType>
42
+ V8_INLINE static void AssigningBarrier(const void* slot,
43
+ const void* value) {
38
44
  #ifdef CPPGC_SLIM_WRITE_BARRIER
39
45
  if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
40
46
  WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
@@ -44,7 +50,7 @@ struct DijkstraWriteBarrierPolicy {
44
50
  WriteBarrier::GetWriteBarrierType(slot, value, params);
45
51
  WriteBarrier(type, params, slot, value);
46
52
  #endif // !CPPGC_SLIM_WRITE_BARRIER
47
- }
53
+ }
48
54
 
49
55
  template <WriteBarrierSlotType SlotType>
50
56
  V8_INLINE static void AssigningBarrier(const void* slot, RawPointer storage) {
@@ -101,6 +107,11 @@ struct DijkstraWriteBarrierPolicy {
101
107
 
102
108
  struct NoWriteBarrierPolicy {
103
109
  V8_INLINE static void InitializingBarrier(const void*, const void*) {}
110
+ V8_INLINE static void InitializingBarrier(const void*, RawPointer storage) {}
111
+ #if defined(CPPGC_POINTER_COMPRESSION)
112
+ V8_INLINE static void InitializingBarrier(const void*,
113
+ CompressedPointer storage) {}
114
+ #endif
104
115
  template <WriteBarrierSlotType>
105
116
  V8_INLINE static void AssigningBarrier(const void*, const void*) {}
106
117
  template <WriteBarrierSlotType, typename MemberStorage>
@@ -119,10 +130,29 @@ template <bool kCheckOffHeapAssignments>
119
130
  class V8_EXPORT SameThreadEnabledCheckingPolicy
120
131
  : private SameThreadEnabledCheckingPolicyBase {
121
132
  protected:
133
+ template <typename T>
134
+ V8_INLINE void CheckPointer(RawPointer raw_pointer) {
135
+ if (raw_pointer.IsCleared() || raw_pointer.IsSentinel()) {
136
+ return;
137
+ }
138
+ CheckPointersImplTrampoline<T>::Call(
139
+ this, static_cast<const T*>(raw_pointer.Load()));
140
+ }
141
+ #if defined(CPPGC_POINTER_COMPRESSION)
142
+ template <typename T>
143
+ V8_INLINE void CheckPointer(CompressedPointer compressed_pointer) {
144
+ if (compressed_pointer.IsCleared() || compressed_pointer.IsSentinel()) {
145
+ return;
146
+ }
147
+ CheckPointersImplTrampoline<T>::Call(
148
+ this, static_cast<const T*>(compressed_pointer.Load()));
149
+ }
150
+ #endif
122
151
  template <typename T>
123
152
  void CheckPointer(const T* ptr) {
124
- if (!ptr || (kSentinelPointer == ptr)) return;
125
-
153
+ if (!ptr || (kSentinelPointer == ptr)) {
154
+ return;
155
+ }
126
156
  CheckPointersImplTrampoline<T>::Call(this, ptr);
127
157
  }
128
158
 
@@ -145,20 +175,27 @@ class V8_EXPORT SameThreadEnabledCheckingPolicy
145
175
 
146
176
  class DisabledCheckingPolicy {
147
177
  protected:
148
- V8_INLINE void CheckPointer(const void*) {}
178
+ template <typename T>
179
+ V8_INLINE void CheckPointer(T*) {}
180
+ template <typename T>
181
+ V8_INLINE void CheckPointer(RawPointer) {}
182
+ #if defined(CPPGC_POINTER_COMPRESSION)
183
+ template <typename T>
184
+ V8_INLINE void CheckPointer(CompressedPointer) {}
185
+ #endif
149
186
  };
150
187
 
151
- #ifdef DEBUG
188
+ #ifdef CPPGC_ENABLE_SLOW_API_CHECKS
152
189
  // Off heap members are not connected to object graph and thus cannot ressurect
153
190
  // dead objects.
154
191
  using DefaultMemberCheckingPolicy =
155
192
  SameThreadEnabledCheckingPolicy<false /* kCheckOffHeapAssignments*/>;
156
193
  using DefaultPersistentCheckingPolicy =
157
194
  SameThreadEnabledCheckingPolicy<true /* kCheckOffHeapAssignments*/>;
158
- #else // !DEBUG
195
+ #else // !CPPGC_ENABLE_SLOW_API_CHECKS
159
196
  using DefaultMemberCheckingPolicy = DisabledCheckingPolicy;
160
197
  using DefaultPersistentCheckingPolicy = DisabledCheckingPolicy;
161
- #endif // !DEBUG
198
+ #endif // !CPPGC_ENABLE_SLOW_API_CHECKS
162
199
  // For CT(W)P neither marking information (for value), nor objectstart bitmap
163
200
  // (for slot) are guaranteed to be present because there's no synchronization
164
201
  // between heaps after marking.
@@ -11,10 +11,18 @@
11
11
 
12
12
  namespace cppgc {
13
13
 
14
+ #define CPPGC_DISALLOW_NEW() \
15
+ public: \
16
+ using IsDisallowNewMarker CPPGC_UNUSED = int; \
17
+ void* operator new(size_t, void* location) { return location; } \
18
+ void* operator new(size_t) = delete; \
19
+ static_assert(true, "Force semicolon.")
20
+
14
21
  // Use CPPGC_STACK_ALLOCATED if the object is only stack allocated.
15
22
  // Add the CPPGC_STACK_ALLOCATED_IGNORE annotation on a case-by-case basis when
16
23
  // enforcement of CPPGC_STACK_ALLOCATED should be suppressed.
17
24
  #if defined(__clang__)
25
+
18
26
  #define CPPGC_STACK_ALLOCATED() \
19
27
  public: \
20
28
  using IsStackAllocatedTypeMarker CPPGC_UNUSED = int; \
@@ -23,13 +31,26 @@ namespace cppgc {
23
31
  void* operator new(size_t) = delete; \
24
32
  void* operator new(size_t, void*) = delete; \
25
33
  static_assert(true, "Force semicolon.")
34
+
26
35
  #define CPPGC_STACK_ALLOCATED_IGNORE(bug_or_reason) \
27
36
  __attribute__((annotate("stack_allocated_ignore")))
37
+
38
+ #define CPPGC_PLUGIN_IGNORE(bug_or_reason) \
39
+ __attribute__((annotate("blink_gc_plugin_ignore"), \
40
+ annotate("stack_allocated_ignore")))
41
+
28
42
  #else // !defined(__clang__)
43
+
29
44
  #define CPPGC_STACK_ALLOCATED() static_assert(true, "Force semicolon.")
30
45
  #define CPPGC_STACK_ALLOCATED_IGNORE(bug_or_reason)
46
+ #define CPPGC_PLUGIN_IGNORE(bug_or_reason)
47
+
31
48
  #endif // !defined(__clang__)
32
49
 
50
+ template <typename T>
51
+ concept IsStackAllocatedType =
52
+ requires { typename T::IsStackAllocatedTypeMarker; };
53
+
33
54
  } // namespace cppgc
34
55
 
35
56
  #endif // INCLUDE_CPPGC_MACROS_H_