libv8 7.8.279.23.0beta1-x86_64-linux → 8.4.255.0.1-x86_64-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/ext/libv8/location.rb +1 -1
  3. data/lib/libv8/version.rb +1 -1
  4. data/vendor/v8/include/cppgc/allocation.h +124 -0
  5. data/vendor/v8/include/cppgc/garbage-collected.h +192 -0
  6. data/vendor/v8/include/cppgc/heap.h +50 -0
  7. data/vendor/v8/include/cppgc/internal/accessors.h +26 -0
  8. data/vendor/v8/include/cppgc/internal/api-constants.h +44 -0
  9. data/vendor/v8/include/cppgc/internal/compiler-specific.h +26 -0
  10. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
  11. data/vendor/v8/include/cppgc/internal/gc-info.h +43 -0
  12. data/vendor/v8/include/cppgc/internal/logging.h +50 -0
  13. data/vendor/v8/include/cppgc/internal/persistent-node.h +109 -0
  14. data/vendor/v8/include/cppgc/internal/pointer-policies.h +133 -0
  15. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +31 -0
  16. data/vendor/v8/include/cppgc/liveness-broker.h +50 -0
  17. data/vendor/v8/include/cppgc/macros.h +26 -0
  18. data/vendor/v8/include/cppgc/member.h +206 -0
  19. data/vendor/v8/include/cppgc/persistent.h +304 -0
  20. data/vendor/v8/include/cppgc/platform.h +31 -0
  21. data/vendor/v8/include/cppgc/prefinalizer.h +54 -0
  22. data/vendor/v8/include/cppgc/source-location.h +59 -0
  23. data/vendor/v8/include/cppgc/trace-trait.h +67 -0
  24. data/vendor/v8/include/cppgc/type-traits.h +109 -0
  25. data/vendor/v8/include/cppgc/visitor.h +137 -0
  26. data/vendor/v8/include/libplatform/libplatform.h +13 -10
  27. data/vendor/v8/include/libplatform/v8-tracing.h +36 -22
  28. data/vendor/v8/include/v8-fast-api-calls.h +412 -0
  29. data/vendor/v8/include/v8-inspector-protocol.h +4 -4
  30. data/vendor/v8/include/v8-inspector.h +57 -27
  31. data/vendor/v8/include/v8-internal.h +23 -21
  32. data/vendor/v8/include/v8-platform.h +164 -40
  33. data/vendor/v8/include/v8-profiler.h +27 -23
  34. data/vendor/v8/include/v8-util.h +1 -1
  35. data/vendor/v8/include/v8-version-string.h +1 -1
  36. data/vendor/v8/include/v8-version.h +4 -4
  37. data/vendor/v8/include/v8-wasm-trap-handler-posix.h +1 -1
  38. data/vendor/v8/include/v8-wasm-trap-handler-win.h +1 -1
  39. data/vendor/v8/include/v8.h +1219 -484
  40. data/vendor/v8/include/v8config.h +105 -51
  41. data/vendor/v8/out.gn/libv8/obj/libv8_libbase.a +0 -0
  42. data/vendor/v8/out.gn/libv8/obj/libv8_libplatform.a +0 -0
  43. data/vendor/v8/out.gn/libv8/obj/libv8_monolith.a +0 -0
  44. data/vendor/v8/out.gn/libv8/obj/third_party/icu/libicui18n.a +0 -0
  45. data/vendor/v8/out.gn/libv8/obj/third_party/icu/libicuuc.a +0 -0
  46. data/vendor/v8/out.gn/libv8/obj/third_party/zlib/google/libcompression_utils_portable.a +0 -0
  47. data/vendor/v8/out.gn/libv8/obj/third_party/zlib/libchrome_zlib.a +0 -0
  48. metadata +31 -9
  49. data/vendor/v8/include/v8-testing.h +0 -48
  50. data/vendor/v8/out.gn/libv8/obj/third_party/inspector_protocol/libbindings.a +0 -0
  51. data/vendor/v8/out.gn/libv8/obj/third_party/inspector_protocol/libencoding.a +0 -0
@@ -10,8 +10,8 @@
10
10
  #include <string.h>
11
11
  #include <type_traits>
12
12
 
13
- #include "v8-version.h" // NOLINT(build/include)
14
- #include "v8config.h" // NOLINT(build/include)
13
+ #include "v8-version.h" // NOLINT(build/include_directory)
14
+ #include "v8config.h" // NOLINT(build/include_directory)
15
15
 
16
16
  namespace v8 {
17
17
 
@@ -106,12 +106,18 @@ const int kApiTaggedSize = kApiInt32Size;
106
106
  const int kApiTaggedSize = kApiSystemPointerSize;
107
107
  #endif
108
108
 
109
+ constexpr bool PointerCompressionIsEnabled() {
110
+ return kApiTaggedSize != kApiSystemPointerSize;
111
+ }
112
+
109
113
  #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
110
114
  using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
111
115
  #else
112
116
  using PlatformSmiTagging = SmiTagging<kApiTaggedSize>;
113
117
  #endif
114
118
 
119
+ // TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize
120
+ // since it's used much more often than the inividual constants.
115
121
  const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
116
122
  const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
117
123
  const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
@@ -139,12 +145,11 @@ class Internals {
139
145
  1 * kApiTaggedSize + 2 * kApiInt32Size;
140
146
 
141
147
  static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
142
- static const int kForeignAddressOffset = kApiTaggedSize;
143
148
  static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
144
149
  static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
145
150
  static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
146
151
  static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
147
- static const int kNativeContextEmbedderDataOffset = 7 * kApiTaggedSize;
152
+ static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
148
153
  static const int kFullStringRepresentationMask = 0x0f;
149
154
  static const int kStringEncodingMask = 0x8;
150
155
  static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -158,10 +163,10 @@ class Internals {
158
163
  kNumIsolateDataSlots * kApiSystemPointerSize;
159
164
  static const int kExternalMemoryLimitOffset =
160
165
  kExternalMemoryOffset + kApiInt64Size;
161
- static const int kExternalMemoryAtLastMarkCompactOffset =
166
+ static const int kExternalMemoryLowSinceMarkCompactOffset =
162
167
  kExternalMemoryLimitOffset + kApiInt64Size;
163
168
  static const int kIsolateFastCCallCallerFpOffset =
164
- kExternalMemoryAtLastMarkCompactOffset + kApiInt64Size;
169
+ kExternalMemoryLowSinceMarkCompactOffset + kApiInt64Size;
165
170
  static const int kIsolateFastCCallCallerPcOffset =
166
171
  kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
167
172
  static const int kIsolateStackGuardOffset =
@@ -306,9 +311,9 @@ class Internals {
306
311
  V8_INLINE static internal::Address ReadTaggedPointerField(
307
312
  internal::Address heap_object_ptr, int offset) {
308
313
  #ifdef V8_COMPRESS_POINTERS
309
- int32_t value = ReadRawField<int32_t>(heap_object_ptr, offset);
314
+ uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
310
315
  internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
311
- return root + static_cast<internal::Address>(static_cast<intptr_t>(value));
316
+ return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
312
317
  #else
313
318
  return ReadRawField<internal::Address>(heap_object_ptr, offset);
314
319
  #endif
@@ -317,8 +322,8 @@ class Internals {
317
322
  V8_INLINE static internal::Address ReadTaggedSignedField(
318
323
  internal::Address heap_object_ptr, int offset) {
319
324
  #ifdef V8_COMPRESS_POINTERS
320
- int32_t value = ReadRawField<int32_t>(heap_object_ptr, offset);
321
- return static_cast<internal::Address>(static_cast<intptr_t>(value));
325
+ uint32_t value = ReadRawField<uint32_t>(heap_object_ptr, offset);
326
+ return static_cast<internal::Address>(static_cast<uintptr_t>(value));
322
327
  #else
323
328
  return ReadRawField<internal::Address>(heap_object_ptr, offset);
324
329
  #endif
@@ -327,24 +332,17 @@ class Internals {
327
332
  #ifdef V8_COMPRESS_POINTERS
328
333
  // See v8:7703 or src/ptr-compr.* for details about pointer compression.
329
334
  static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
330
- static constexpr size_t kPtrComprIsolateRootBias =
331
- kPtrComprHeapReservationSize / 2;
332
335
  static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
333
336
 
334
337
  V8_INLINE static internal::Address GetRootFromOnHeapAddress(
335
338
  internal::Address addr) {
336
- return (addr + kPtrComprIsolateRootBias) &
337
- -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
339
+ return addr & -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
338
340
  }
339
341
 
340
342
  V8_INLINE static internal::Address DecompressTaggedAnyField(
341
- internal::Address heap_object_ptr, int32_t value) {
342
- internal::Address root_mask = static_cast<internal::Address>(
343
- -static_cast<intptr_t>(value & kSmiTagMask));
344
- internal::Address root_or_zero =
345
- root_mask & GetRootFromOnHeapAddress(heap_object_ptr);
346
- return root_or_zero +
347
- static_cast<internal::Address>(static_cast<intptr_t>(value));
343
+ internal::Address heap_object_ptr, uint32_t value) {
344
+ internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
345
+ return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
348
346
  }
349
347
  #endif // V8_COMPRESS_POINTERS
350
348
  };
@@ -381,6 +379,10 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
381
379
  // language mode is strict.
382
380
  V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
383
381
 
382
+ // A base class for backing stores, which is needed due to vagaries of
383
+ // how static casts work with std::shared_ptr.
384
+ class BackingStoreBase {};
385
+
384
386
  } // namespace internal
385
387
  } // namespace v8
386
388
 
@@ -11,12 +11,34 @@
11
11
  #include <memory>
12
12
  #include <string>
13
13
 
14
- #include "v8config.h" // NOLINT(build/include)
14
+ #include "v8config.h" // NOLINT(build/include_directory)
15
15
 
16
16
  namespace v8 {
17
17
 
18
18
  class Isolate;
19
19
 
20
+ // Valid priorities supported by the task scheduling infrastructure.
21
+ enum class TaskPriority : uint8_t {
22
+ /**
23
+ * Best effort tasks are not critical for performance of the application. The
24
+ * platform implementation should preempt such tasks if higher priority tasks
25
+ * arrive.
26
+ */
27
+ kBestEffort,
28
+ /**
29
+ * User visible tasks are long running background tasks that will
30
+ * improve performance and memory usage of the application upon completion.
31
+ * Example: background compilation and garbage collection.
32
+ */
33
+ kUserVisible,
34
+ /**
35
+ * User blocking tasks are highest priority tasks that block the execution
36
+ * thread (e.g. major garbage collection). They must be finished as soon as
37
+ * possible.
38
+ */
39
+ kUserBlocking,
40
+ };
41
+
20
42
  /**
21
43
  * A Task represents a unit of work.
22
44
  */
@@ -113,6 +135,82 @@ class TaskRunner {
113
135
  TaskRunner& operator=(const TaskRunner&) = delete;
114
136
  };
115
137
 
138
+ /**
139
+ * Delegate that's passed to Job's worker task, providing an entry point to
140
+ * communicate with the scheduler.
141
+ */
142
+ class JobDelegate {
143
+ public:
144
+ /**
145
+ * Returns true if this thread should return from the worker task on the
146
+ * current thread ASAP. Workers should periodically invoke ShouldYield (or
147
+ * YieldIfNeeded()) as often as is reasonable.
148
+ */
149
+ virtual bool ShouldYield() = 0;
150
+
151
+ /**
152
+ * Notifies the scheduler that max concurrency was increased, and the number
153
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
154
+ * details.
155
+ */
156
+ virtual void NotifyConcurrencyIncrease() = 0;
157
+ };
158
+
159
+ /**
160
+ * Handle returned when posting a Job. Provides methods to control execution of
161
+ * the posted Job.
162
+ */
163
+ class JobHandle {
164
+ public:
165
+ virtual ~JobHandle() = default;
166
+
167
+ /**
168
+ * Notifies the scheduler that max concurrency was increased, and the number
169
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
170
+ * details.
171
+ */
172
+ virtual void NotifyConcurrencyIncrease() = 0;
173
+
174
+ /**
175
+ * Contributes to the job on this thread. Doesn't return until all tasks have
176
+ * completed and max concurrency becomes 0. When Join() is called and max
177
+ * concurrency reaches 0, it should not increase again. This also promotes
178
+ * this Job's priority to be at least as high as the calling thread's
179
+ * priority.
180
+ */
181
+ virtual void Join() = 0;
182
+
183
+ /**
184
+ * Forces all existing workers to yield ASAP. Waits until they have all
185
+ * returned from the Job's callback before returning.
186
+ */
187
+ virtual void Cancel() = 0;
188
+
189
+ /**
190
+ * Returns true if associated with a Job and other methods may be called.
191
+ * Returns false after Join() or Cancel() was called.
192
+ */
193
+ virtual bool IsRunning() = 0;
194
+ };
195
+
196
+ /**
197
+ * A JobTask represents work to run in parallel from Platform::PostJob().
198
+ */
199
+ class JobTask {
200
+ public:
201
+ virtual ~JobTask() = default;
202
+
203
+ virtual void Run(JobDelegate* delegate) = 0;
204
+
205
+ /**
206
+ * Controls the maximum number of threads calling Run() concurrently. Run() is
207
+ * only invoked if the number of threads previously running Run() was less
208
+ * than the value returned. Since GetMaxConcurrency() is a leaf function, it
209
+ * must not call back any JobHandle methods.
210
+ */
211
+ virtual size_t GetMaxConcurrency() const = 0;
212
+ };
213
+
116
214
  /**
117
215
  * The interface represents complex arguments to trace events.
118
216
  */
@@ -138,6 +236,10 @@ class TracingController {
138
236
  public:
139
237
  virtual ~TracingController() = default;
140
238
 
239
+ // In Perfetto mode, trace events are written using Perfetto's Track Event
240
+ // API directly without going through the embedder. However, it is still
241
+ // possible to observe tracing being enabled and disabled.
242
+ #if !defined(V8_USE_PERFETTO)
141
243
  /**
142
244
  * Called by TRACE_EVENT* macros, don't call this directly.
143
245
  * The name parameter is a category group for example:
@@ -183,6 +285,7 @@ class TracingController {
183
285
  **/
184
286
  virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
185
287
  const char* name, uint64_t handle) {}
288
+ #endif // !defined(V8_USE_PERFETTO)
186
289
 
187
290
  class TraceStateObserver {
188
291
  public:
@@ -326,7 +429,8 @@ class Platform {
326
429
 
327
430
  /**
328
431
  * Returns a TaskRunner which can be used to post a task on the foreground.
329
- * This function should only be called from a foreground thread.
432
+ * The TaskRunner's NonNestableTasksEnabled() must be true. This function
433
+ * should only be called from a foreground thread.
330
434
  */
331
435
  virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
332
436
  Isolate* isolate) = 0;
@@ -362,47 +466,67 @@ class Platform {
362
466
  virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
363
467
  double delay_in_seconds) = 0;
364
468
 
365
- /**
366
- * Schedules a task to be invoked on a foreground thread wrt a specific
367
- * |isolate|. Tasks posted for the same isolate should be execute in order of
368
- * scheduling. The definition of "foreground" is opaque to V8.
369
- */
370
- V8_DEPRECATE_SOON(
371
- "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
372
- virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
373
-
374
- /**
375
- * Schedules a task to be invoked on a foreground thread wrt a specific
376
- * |isolate| after the given number of seconds |delay_in_seconds|.
377
- * Tasks posted for the same isolate should be execute in order of
378
- * scheduling. The definition of "foreground" is opaque to V8.
379
- */
380
- V8_DEPRECATE_SOON(
381
- "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
382
- virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
383
- double delay_in_seconds)) = 0;
384
-
385
- /**
386
- * Schedules a task to be invoked on a foreground thread wrt a specific
387
- * |isolate| when the embedder is idle.
388
- * Requires that SupportsIdleTasks(isolate) is true.
389
- * Idle tasks may be reordered relative to other task types and may be
390
- * starved for an arbitrarily long time if no idle time is available.
391
- * The definition of "foreground" is opaque to V8.
392
- */
393
- V8_DEPRECATE_SOON(
394
- "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
395
- virtual void CallIdleOnForegroundThread(Isolate* isolate,
396
- IdleTask* task)) {
397
- // This must be overriden if |IdleTasksEnabled()|.
398
- abort();
399
- }
400
-
401
469
  /**
402
470
  * Returns true if idle tasks are enabled for the given |isolate|.
403
471
  */
404
- virtual bool IdleTasksEnabled(Isolate* isolate) {
405
- return false;
472
+ virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
473
+
474
+ /**
475
+ * Posts |job_task| to run in parallel. Returns a JobHandle associated with
476
+ * the Job, which can be joined or canceled.
477
+ * This avoids degenerate cases:
478
+ * - Calling CallOnWorkerThread() for each work item, causing significant
479
+ * overhead.
480
+ * - Fixed number of CallOnWorkerThread() calls that split the work and might
481
+ * run for a long time. This is problematic when many components post
482
+ * "num cores" tasks and all expect to use all the cores. In these cases,
483
+ * the scheduler lacks context to be fair to multiple same-priority requests
484
+ * and/or ability to request lower priority work to yield when high priority
485
+ * work comes in.
486
+ * A canonical implementation of |job_task| looks like:
487
+ * class MyJobTask : public JobTask {
488
+ * public:
489
+ * MyJobTask(...) : worker_queue_(...) {}
490
+ * // JobTask:
491
+ * void Run(JobDelegate* delegate) override {
492
+ * while (!delegate->ShouldYield()) {
493
+ * // Smallest unit of work.
494
+ * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
495
+ * if (!work_item) return;
496
+ * ProcessWork(work_item);
497
+ * }
498
+ * }
499
+ *
500
+ * size_t GetMaxConcurrency() const override {
501
+ * return worker_queue_.GetSize(); // Thread safe.
502
+ * }
503
+ * };
504
+ * auto handle = PostJob(TaskPriority::kUserVisible,
505
+ * std::make_unique<MyJobTask>(...));
506
+ * handle->Join();
507
+ *
508
+ * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
509
+ * called while holding a lock that could be acquired by JobTask::Run or
510
+ * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
511
+ * because [1] JobTask::GetMaxConcurrency may be invoked while holding
512
+ * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
513
+ * if that lock is *never* held while calling back into JobHandle from any
514
+ * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
515
+ * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
516
+ * (B=>JobHandle::foo=>B deadlock).
517
+ *
518
+ * A sufficient PostJob() implementation that uses the default Job provided in
519
+ * libplatform looks like:
520
+ * std::unique_ptr<JobHandle> PostJob(
521
+ * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
522
+ * return std::make_unique<DefaultJobHandle>(
523
+ * std::make_shared<DefaultJobState>(
524
+ * this, std::move(job_task), kNumThreads));
525
+ * }
526
+ */
527
+ virtual std::unique_ptr<JobHandle> PostJob(
528
+ TaskPriority priority, std::unique_ptr<JobTask> job_task) {
529
+ return nullptr;
406
530
  }
407
531
 
408
532
  /**
@@ -6,9 +6,11 @@
6
6
  #define V8_V8_PROFILER_H_
7
7
 
8
8
  #include <limits.h>
9
+ #include <memory>
9
10
  #include <unordered_set>
10
11
  #include <vector>
11
- #include "v8.h" // NOLINT(build/include)
12
+
13
+ #include "v8.h" // NOLINT(build/include_directory)
12
14
 
13
15
  /**
14
16
  * Profiler support for the V8 JavaScript engine.
@@ -142,11 +144,6 @@ class V8_EXPORT CpuProfileNode {
142
144
  */
143
145
  unsigned GetHitCount() const;
144
146
 
145
- /** Returns function entry UID. */
146
- V8_DEPRECATE_SOON(
147
- "Use GetScriptId, GetLineNumber, and GetColumnNumber instead.",
148
- unsigned GetCallUid() const);
149
-
150
147
  /** Returns id of the node. The id is unique within the tree */
151
148
  unsigned GetNodeId() const;
152
149
 
@@ -370,20 +367,6 @@ class V8_EXPORT CpuProfiler {
370
367
  */
371
368
  CpuProfile* StopProfiling(Local<String> title);
372
369
 
373
- /**
374
- * Force collection of a sample. Must be called on the VM thread.
375
- * Recording the forced sample does not contribute to the aggregated
376
- * profile statistics.
377
- */
378
- V8_DEPRECATED("Use static CollectSample(Isolate*) instead.",
379
- void CollectSample());
380
-
381
- /**
382
- * Tells the profiler whether the embedder is idle.
383
- */
384
- V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
385
- void SetIdle(bool is_idle));
386
-
387
370
  /**
388
371
  * Generate more detailed source positions to code objects. This results in
389
372
  * better results when mapping profiling samples to script source.
@@ -861,7 +844,8 @@ class V8_EXPORT HeapProfiler {
861
844
  */
862
845
  const HeapSnapshot* TakeHeapSnapshot(
863
846
  ActivityControl* control = nullptr,
864
- ObjectNameResolver* global_object_name_resolver = nullptr);
847
+ ObjectNameResolver* global_object_name_resolver = nullptr,
848
+ bool treat_global_objects_as_roots = true);
865
849
 
866
850
  /**
867
851
  * Starts tracking of heap objects population statistics. After calling
@@ -989,7 +973,8 @@ struct HeapStatsUpdate {
989
973
  V(LazyCompile) \
990
974
  V(RegExp) \
991
975
  V(Script) \
992
- V(Stub)
976
+ V(Stub) \
977
+ V(Relocation)
993
978
 
994
979
  /**
995
980
  * Note that this enum may be extended in the future. Please include a default
@@ -1022,10 +1007,12 @@ class V8_EXPORT CodeEvent {
1022
1007
  const char* GetComment();
1023
1008
 
1024
1009
  static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1010
+
1011
+ uintptr_t GetPreviousCodeStartAddress();
1025
1012
  };
1026
1013
 
1027
1014
  /**
1028
- * Interface to listen to code creation events.
1015
+ * Interface to listen to code creation and code relocation events.
1029
1016
  */
1030
1017
  class V8_EXPORT CodeEventHandler {
1031
1018
  public:
@@ -1037,9 +1024,26 @@ class V8_EXPORT CodeEventHandler {
1037
1024
  explicit CodeEventHandler(Isolate* isolate);
1038
1025
  virtual ~CodeEventHandler();
1039
1026
 
1027
+ /**
1028
+ * Handle is called every time a code object is created or moved. Information
1029
+ * about each code event will be available through the `code_event`
1030
+ * parameter.
1031
+ *
1032
+ * When the CodeEventType is kRelocationType, the code for this CodeEvent has
1033
+ * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
1034
+ */
1040
1035
  virtual void Handle(CodeEvent* code_event) = 0;
1041
1036
 
1037
+ /**
1038
+ * Call `Enable()` to starts listening to code creation and code relocation
1039
+ * events. These events will be handled by `Handle()`.
1040
+ */
1042
1041
  void Enable();
1042
+
1043
+ /**
1044
+ * Call `Disable()` to stop listening to code creation and code relocation
1045
+ * events.
1046
+ */
1043
1047
  void Disable();
1044
1048
 
1045
1049
  private:
@@ -5,7 +5,7 @@
5
5
  #ifndef V8_UTIL_H_
6
6
  #define V8_UTIL_H_
7
7
 
8
- #include "v8.h" // NOLINT(build/include)
8
+ #include "v8.h" // NOLINT(build/include_directory)
9
9
  #include <assert.h>
10
10
  #include <map>
11
11
  #include <vector>
@@ -5,7 +5,7 @@
5
5
  #ifndef V8_VERSION_STRING_H_
6
6
  #define V8_VERSION_STRING_H_
7
7
 
8
- #include "v8-version.h" // NOLINT(build/include)
8
+ #include "v8-version.h" // NOLINT(build/include_directory)
9
9
 
10
10
  // This is here rather than v8-version.h to keep that file simple and
11
11
  // machine-processable.
@@ -8,10 +8,10 @@
8
8
  // These macros define the version number for the current version.
9
9
  // NOTE these macros are used by some of the tool scripts and the build
10
10
  // system so their names cannot be changed without changing the scripts.
11
- #define V8_MAJOR_VERSION 7
12
- #define V8_MINOR_VERSION 8
13
- #define V8_BUILD_NUMBER 279
14
- #define V8_PATCH_LEVEL 23
11
+ #define V8_MAJOR_VERSION 8
12
+ #define V8_MINOR_VERSION 4
13
+ #define V8_BUILD_NUMBER 255
14
+ #define V8_PATCH_LEVEL 0
15
15
 
16
16
  // Use 1 for candidates and 0 otherwise.
17
17
  // (Boolean macro values are not supported by all preprocessors.)
@@ -7,7 +7,7 @@
7
7
 
8
8
  #include <signal.h>
9
9
 
10
- #include "v8config.h" // NOLINT(build/include)
10
+ #include "v8config.h" // NOLINT(build/include_directory)
11
11
 
12
12
  namespace v8 {
13
13
  /**
@@ -7,7 +7,7 @@
7
7
 
8
8
  #include <windows.h>
9
9
 
10
- #include "v8config.h" // NOLINT(build/include)
10
+ #include "v8config.h" // NOLINT(build/include_directory)
11
11
 
12
12
  namespace v8 {
13
13
  /**