libv8 7.3.492.27.1-universal-darwin-16 → 8.4.255.0-universal-darwin-16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/libv8/version.rb +1 -1
- data/vendor/v8/include/cppgc/allocation.h +124 -0
- data/vendor/v8/include/cppgc/garbage-collected.h +192 -0
- data/vendor/v8/include/cppgc/heap.h +50 -0
- data/vendor/v8/include/cppgc/internal/accessors.h +26 -0
- data/vendor/v8/include/cppgc/internal/api-constants.h +44 -0
- data/vendor/v8/include/cppgc/internal/compiler-specific.h +26 -0
- data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
- data/vendor/v8/include/cppgc/internal/gc-info.h +43 -0
- data/vendor/v8/include/cppgc/internal/logging.h +50 -0
- data/vendor/v8/include/cppgc/internal/persistent-node.h +109 -0
- data/vendor/v8/include/cppgc/internal/pointer-policies.h +133 -0
- data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +31 -0
- data/vendor/v8/include/cppgc/liveness-broker.h +50 -0
- data/vendor/v8/include/cppgc/macros.h +26 -0
- data/vendor/v8/include/cppgc/member.h +206 -0
- data/vendor/v8/include/cppgc/persistent.h +304 -0
- data/vendor/v8/include/cppgc/platform.h +31 -0
- data/vendor/v8/include/cppgc/prefinalizer.h +54 -0
- data/vendor/v8/include/cppgc/source-location.h +59 -0
- data/vendor/v8/include/cppgc/trace-trait.h +67 -0
- data/vendor/v8/include/cppgc/type-traits.h +109 -0
- data/vendor/v8/include/cppgc/visitor.h +137 -0
- data/vendor/v8/include/libplatform/libplatform.h +13 -19
- data/vendor/v8/include/libplatform/v8-tracing.h +50 -15
- data/vendor/v8/include/v8-fast-api-calls.h +412 -0
- data/vendor/v8/include/v8-inspector-protocol.h +4 -4
- data/vendor/v8/include/v8-inspector.h +60 -29
- data/vendor/v8/include/v8-internal.h +98 -82
- data/vendor/v8/include/v8-platform.h +181 -42
- data/vendor/v8/include/v8-profiler.h +162 -224
- data/vendor/v8/include/v8-util.h +1 -13
- data/vendor/v8/include/v8-version-string.h +1 -1
- data/vendor/v8/include/v8-version.h +4 -4
- data/vendor/v8/include/v8-wasm-trap-handler-posix.h +1 -1
- data/vendor/v8/include/v8-wasm-trap-handler-win.h +1 -1
- data/vendor/v8/include/v8.h +1990 -611
- data/vendor/v8/include/v8config.h +129 -48
- data/vendor/v8/out.gn/libv8/obj/libv8_libbase.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/libv8_libplatform.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/libv8_monolith.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/third_party/icu/libicui18n.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/third_party/icu/libicuuc.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/third_party/zlib/google/libcompression_utils_portable.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/third_party/zlib/libchrome_zlib.a +0 -0
- metadata +34 -8
- data/vendor/v8/include/v8-testing.h +0 -48
@@ -11,12 +11,34 @@
|
|
11
11
|
#include <memory>
|
12
12
|
#include <string>
|
13
13
|
|
14
|
-
#include "v8config.h" // NOLINT(build/
|
14
|
+
#include "v8config.h" // NOLINT(build/include_directory)
|
15
15
|
|
16
16
|
namespace v8 {
|
17
17
|
|
18
18
|
class Isolate;
|
19
19
|
|
20
|
+
// Valid priorities supported by the task scheduling infrastructure.
|
21
|
+
enum class TaskPriority : uint8_t {
|
22
|
+
/**
|
23
|
+
* Best effort tasks are not critical for performance of the application. The
|
24
|
+
* platform implementation should preempt such tasks if higher priority tasks
|
25
|
+
* arrive.
|
26
|
+
*/
|
27
|
+
kBestEffort,
|
28
|
+
/**
|
29
|
+
* User visible tasks are long running background tasks that will
|
30
|
+
* improve performance and memory usage of the application upon completion.
|
31
|
+
* Example: background compilation and garbage collection.
|
32
|
+
*/
|
33
|
+
kUserVisible,
|
34
|
+
/**
|
35
|
+
* User blocking tasks are highest priority tasks that block the execution
|
36
|
+
* thread (e.g. major garbage collection). They must be finished as soon as
|
37
|
+
* possible.
|
38
|
+
*/
|
39
|
+
kUserBlocking,
|
40
|
+
};
|
41
|
+
|
20
42
|
/**
|
21
43
|
* A Task represents a unit of work.
|
22
44
|
*/
|
@@ -70,6 +92,17 @@ class TaskRunner {
|
|
70
92
|
virtual void PostDelayedTask(std::unique_ptr<Task> task,
|
71
93
|
double delay_in_seconds) = 0;
|
72
94
|
|
95
|
+
/**
|
96
|
+
* Schedules a task to be invoked by this TaskRunner. The task is scheduled
|
97
|
+
* after the given number of seconds |delay_in_seconds|. The TaskRunner
|
98
|
+
* implementation takes ownership of |task|. The |task| cannot be nested
|
99
|
+
* within other task executions.
|
100
|
+
*
|
101
|
+
* Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
|
102
|
+
*/
|
103
|
+
virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
|
104
|
+
double delay_in_seconds) {}
|
105
|
+
|
73
106
|
/**
|
74
107
|
* Schedules an idle task to be invoked by this TaskRunner. The task is
|
75
108
|
* scheduled when the embedder is idle. Requires that
|
@@ -90,14 +123,94 @@ class TaskRunner {
|
|
90
123
|
*/
|
91
124
|
virtual bool NonNestableTasksEnabled() const { return false; }
|
92
125
|
|
126
|
+
/**
|
127
|
+
* Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
|
128
|
+
*/
|
129
|
+
virtual bool NonNestableDelayedTasksEnabled() const { return false; }
|
130
|
+
|
93
131
|
TaskRunner() = default;
|
94
132
|
virtual ~TaskRunner() = default;
|
95
133
|
|
96
|
-
private:
|
97
134
|
TaskRunner(const TaskRunner&) = delete;
|
98
135
|
TaskRunner& operator=(const TaskRunner&) = delete;
|
99
136
|
};
|
100
137
|
|
138
|
+
/**
|
139
|
+
* Delegate that's passed to Job's worker task, providing an entry point to
|
140
|
+
* communicate with the scheduler.
|
141
|
+
*/
|
142
|
+
class JobDelegate {
|
143
|
+
public:
|
144
|
+
/**
|
145
|
+
* Returns true if this thread should return from the worker task on the
|
146
|
+
* current thread ASAP. Workers should periodically invoke ShouldYield (or
|
147
|
+
* YieldIfNeeded()) as often as is reasonable.
|
148
|
+
*/
|
149
|
+
virtual bool ShouldYield() = 0;
|
150
|
+
|
151
|
+
/**
|
152
|
+
* Notifies the scheduler that max concurrency was increased, and the number
|
153
|
+
* of worker should be adjusted accordingly. See Platform::PostJob() for more
|
154
|
+
* details.
|
155
|
+
*/
|
156
|
+
virtual void NotifyConcurrencyIncrease() = 0;
|
157
|
+
};
|
158
|
+
|
159
|
+
/**
|
160
|
+
* Handle returned when posting a Job. Provides methods to control execution of
|
161
|
+
* the posted Job.
|
162
|
+
*/
|
163
|
+
class JobHandle {
|
164
|
+
public:
|
165
|
+
virtual ~JobHandle() = default;
|
166
|
+
|
167
|
+
/**
|
168
|
+
* Notifies the scheduler that max concurrency was increased, and the number
|
169
|
+
* of worker should be adjusted accordingly. See Platform::PostJob() for more
|
170
|
+
* details.
|
171
|
+
*/
|
172
|
+
virtual void NotifyConcurrencyIncrease() = 0;
|
173
|
+
|
174
|
+
/**
|
175
|
+
* Contributes to the job on this thread. Doesn't return until all tasks have
|
176
|
+
* completed and max concurrency becomes 0. When Join() is called and max
|
177
|
+
* concurrency reaches 0, it should not increase again. This also promotes
|
178
|
+
* this Job's priority to be at least as high as the calling thread's
|
179
|
+
* priority.
|
180
|
+
*/
|
181
|
+
virtual void Join() = 0;
|
182
|
+
|
183
|
+
/**
|
184
|
+
* Forces all existing workers to yield ASAP. Waits until they have all
|
185
|
+
* returned from the Job's callback before returning.
|
186
|
+
*/
|
187
|
+
virtual void Cancel() = 0;
|
188
|
+
|
189
|
+
/**
|
190
|
+
* Returns true if associated with a Job and other methods may be called.
|
191
|
+
* Returns false after Join() or Cancel() was called.
|
192
|
+
*/
|
193
|
+
virtual bool IsRunning() = 0;
|
194
|
+
};
|
195
|
+
|
196
|
+
/**
|
197
|
+
* A JobTask represents work to run in parallel from Platform::PostJob().
|
198
|
+
*/
|
199
|
+
class JobTask {
|
200
|
+
public:
|
201
|
+
virtual ~JobTask() = default;
|
202
|
+
|
203
|
+
virtual void Run(JobDelegate* delegate) = 0;
|
204
|
+
|
205
|
+
/**
|
206
|
+
* Controls the maximum number of threads calling Run() concurrently. Run() is
|
207
|
+
* only invoked if the number of threads previously running Run() was less
|
208
|
+
* than the value returned. Since GetMaxConcurrency() is a leaf function, it
|
209
|
+
* must not call back any JobHandle methods.
|
210
|
+
*/
|
211
|
+
virtual size_t GetMaxConcurrency() const = 0;
|
212
|
+
};
|
213
|
+
|
101
214
|
/**
|
102
215
|
* The interface represents complex arguments to trace events.
|
103
216
|
*/
|
@@ -123,6 +236,10 @@ class TracingController {
|
|
123
236
|
public:
|
124
237
|
virtual ~TracingController() = default;
|
125
238
|
|
239
|
+
// In Perfetto mode, trace events are written using Perfetto's Track Event
|
240
|
+
// API directly without going through the embedder. However, it is still
|
241
|
+
// possible to observe tracing being enabled and disabled.
|
242
|
+
#if !defined(V8_USE_PERFETTO)
|
126
243
|
/**
|
127
244
|
* Called by TRACE_EVENT* macros, don't call this directly.
|
128
245
|
* The name parameter is a category group for example:
|
@@ -168,6 +285,7 @@ class TracingController {
|
|
168
285
|
**/
|
169
286
|
virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
|
170
287
|
const char* name, uint64_t handle) {}
|
288
|
+
#endif // !defined(V8_USE_PERFETTO)
|
171
289
|
|
172
290
|
class TraceStateObserver {
|
173
291
|
public:
|
@@ -311,7 +429,8 @@ class Platform {
|
|
311
429
|
|
312
430
|
/**
|
313
431
|
* Returns a TaskRunner which can be used to post a task on the foreground.
|
314
|
-
*
|
432
|
+
* The TaskRunner's NonNestableTasksEnabled() must be true. This function
|
433
|
+
* should only be called from a foreground thread.
|
315
434
|
*/
|
316
435
|
virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
|
317
436
|
Isolate* isolate) = 0;
|
@@ -347,47 +466,67 @@ class Platform {
|
|
347
466
|
virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
|
348
467
|
double delay_in_seconds) = 0;
|
349
468
|
|
350
|
-
/**
|
351
|
-
* Schedules a task to be invoked on a foreground thread wrt a specific
|
352
|
-
* |isolate|. Tasks posted for the same isolate should be execute in order of
|
353
|
-
* scheduling. The definition of "foreground" is opaque to V8.
|
354
|
-
*/
|
355
|
-
V8_DEPRECATE_SOON(
|
356
|
-
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
|
357
|
-
virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
|
358
|
-
|
359
|
-
/**
|
360
|
-
* Schedules a task to be invoked on a foreground thread wrt a specific
|
361
|
-
* |isolate| after the given number of seconds |delay_in_seconds|.
|
362
|
-
* Tasks posted for the same isolate should be execute in order of
|
363
|
-
* scheduling. The definition of "foreground" is opaque to V8.
|
364
|
-
*/
|
365
|
-
V8_DEPRECATE_SOON(
|
366
|
-
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
|
367
|
-
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
|
368
|
-
double delay_in_seconds)) = 0;
|
369
|
-
|
370
|
-
/**
|
371
|
-
* Schedules a task to be invoked on a foreground thread wrt a specific
|
372
|
-
* |isolate| when the embedder is idle.
|
373
|
-
* Requires that SupportsIdleTasks(isolate) is true.
|
374
|
-
* Idle tasks may be reordered relative to other task types and may be
|
375
|
-
* starved for an arbitrarily long time if no idle time is available.
|
376
|
-
* The definition of "foreground" is opaque to V8.
|
377
|
-
*/
|
378
|
-
V8_DEPRECATE_SOON(
|
379
|
-
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
|
380
|
-
virtual void CallIdleOnForegroundThread(Isolate* isolate,
|
381
|
-
IdleTask* task)) {
|
382
|
-
// This must be overriden if |IdleTasksEnabled()|.
|
383
|
-
abort();
|
384
|
-
}
|
385
|
-
|
386
469
|
/**
|
387
470
|
* Returns true if idle tasks are enabled for the given |isolate|.
|
388
471
|
*/
|
389
|
-
virtual bool IdleTasksEnabled(Isolate* isolate) {
|
390
|
-
|
472
|
+
virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
|
473
|
+
|
474
|
+
/**
|
475
|
+
* Posts |job_task| to run in parallel. Returns a JobHandle associated with
|
476
|
+
* the Job, which can be joined or canceled.
|
477
|
+
* This avoids degenerate cases:
|
478
|
+
* - Calling CallOnWorkerThread() for each work item, causing significant
|
479
|
+
* overhead.
|
480
|
+
* - Fixed number of CallOnWorkerThread() calls that split the work and might
|
481
|
+
* run for a long time. This is problematic when many components post
|
482
|
+
* "num cores" tasks and all expect to use all the cores. In these cases,
|
483
|
+
* the scheduler lacks context to be fair to multiple same-priority requests
|
484
|
+
* and/or ability to request lower priority work to yield when high priority
|
485
|
+
* work comes in.
|
486
|
+
* A canonical implementation of |job_task| looks like:
|
487
|
+
* class MyJobTask : public JobTask {
|
488
|
+
* public:
|
489
|
+
* MyJobTask(...) : worker_queue_(...) {}
|
490
|
+
* // JobTask:
|
491
|
+
* void Run(JobDelegate* delegate) override {
|
492
|
+
* while (!delegate->ShouldYield()) {
|
493
|
+
* // Smallest unit of work.
|
494
|
+
* auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
|
495
|
+
* if (!work_item) return;
|
496
|
+
* ProcessWork(work_item);
|
497
|
+
* }
|
498
|
+
* }
|
499
|
+
*
|
500
|
+
* size_t GetMaxConcurrency() const override {
|
501
|
+
* return worker_queue_.GetSize(); // Thread safe.
|
502
|
+
* }
|
503
|
+
* };
|
504
|
+
* auto handle = PostJob(TaskPriority::kUserVisible,
|
505
|
+
* std::make_unique<MyJobTask>(...));
|
506
|
+
* handle->Join();
|
507
|
+
*
|
508
|
+
* PostJob() and methods of the returned JobHandle/JobDelegate, must never be
|
509
|
+
* called while holding a lock that could be acquired by JobTask::Run or
|
510
|
+
* JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
|
511
|
+
* because [1] JobTask::GetMaxConcurrency may be invoked while holding
|
512
|
+
* internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
|
513
|
+
* if that lock is *never* held while calling back into JobHandle from any
|
514
|
+
* thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
|
515
|
+
* JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
|
516
|
+
* (B=>JobHandle::foo=>B deadlock).
|
517
|
+
*
|
518
|
+
* A sufficient PostJob() implementation that uses the default Job provided in
|
519
|
+
* libplatform looks like:
|
520
|
+
* std::unique_ptr<JobHandle> PostJob(
|
521
|
+
* TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
|
522
|
+
* return std::make_unique<DefaultJobHandle>(
|
523
|
+
* std::make_shared<DefaultJobState>(
|
524
|
+
* this, std::move(job_task), kNumThreads));
|
525
|
+
* }
|
526
|
+
*/
|
527
|
+
virtual std::unique_ptr<JobHandle> PostJob(
|
528
|
+
TaskPriority priority, std::unique_ptr<JobTask> job_task) {
|
529
|
+
return nullptr;
|
391
530
|
}
|
392
531
|
|
393
532
|
/**
|
@@ -430,7 +569,7 @@ class Platform {
|
|
430
569
|
* since epoch. Useful for implementing |CurrentClockTimeMillis| if
|
431
570
|
* nothing special needed.
|
432
571
|
*/
|
433
|
-
static double SystemClockTimeMillis();
|
572
|
+
V8_EXPORT static double SystemClockTimeMillis();
|
434
573
|
};
|
435
574
|
|
436
575
|
} // namespace v8
|
@@ -5,9 +5,12 @@
|
|
5
5
|
#ifndef V8_V8_PROFILER_H_
|
6
6
|
#define V8_V8_PROFILER_H_
|
7
7
|
|
8
|
+
#include <limits.h>
|
9
|
+
#include <memory>
|
8
10
|
#include <unordered_set>
|
9
11
|
#include <vector>
|
10
|
-
|
12
|
+
|
13
|
+
#include "v8.h" // NOLINT(build/include_directory)
|
11
14
|
|
12
15
|
/**
|
13
16
|
* Profiler support for the V8 JavaScript engine.
|
@@ -17,14 +20,18 @@ namespace v8 {
|
|
17
20
|
class HeapGraphNode;
|
18
21
|
struct HeapStatsUpdate;
|
19
22
|
|
20
|
-
|
21
|
-
|
23
|
+
using NativeObject = void*;
|
24
|
+
using SnapshotObjectId = uint32_t;
|
22
25
|
|
23
26
|
struct CpuProfileDeoptFrame {
|
24
27
|
int script_id;
|
25
28
|
size_t position;
|
26
29
|
};
|
27
30
|
|
31
|
+
namespace internal {
|
32
|
+
class CpuProfile;
|
33
|
+
} // namespace internal
|
34
|
+
|
28
35
|
} // namespace v8
|
29
36
|
|
30
37
|
#ifdef V8_OS_WIN
|
@@ -47,75 +54,6 @@ template class V8_EXPORT std::vector
|
|
47
54
|
|
48
55
|
namespace v8 {
|
49
56
|
|
50
|
-
// TickSample captures the information collected for each sample.
|
51
|
-
struct TickSample {
|
52
|
-
// Internal profiling (with --prof + tools/$OS-tick-processor) wants to
|
53
|
-
// include the runtime function we're calling. Externally exposed tick
|
54
|
-
// samples don't care.
|
55
|
-
enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
|
56
|
-
|
57
|
-
TickSample()
|
58
|
-
: state(OTHER),
|
59
|
-
pc(nullptr),
|
60
|
-
external_callback_entry(nullptr),
|
61
|
-
frames_count(0),
|
62
|
-
has_external_callback(false),
|
63
|
-
update_stats(true) {}
|
64
|
-
|
65
|
-
/**
|
66
|
-
* Initialize a tick sample from the isolate.
|
67
|
-
* \param isolate The isolate.
|
68
|
-
* \param state Execution state.
|
69
|
-
* \param record_c_entry_frame Include or skip the runtime function.
|
70
|
-
* \param update_stats Whether update the sample to the aggregated stats.
|
71
|
-
* \param use_simulator_reg_state When set to true and V8 is running under a
|
72
|
-
* simulator, the method will use the simulator
|
73
|
-
* register state rather than the one provided
|
74
|
-
* with |state| argument. Otherwise the method
|
75
|
-
* will use provided register |state| as is.
|
76
|
-
*/
|
77
|
-
void Init(Isolate* isolate, const v8::RegisterState& state,
|
78
|
-
RecordCEntryFrame record_c_entry_frame, bool update_stats,
|
79
|
-
bool use_simulator_reg_state = true);
|
80
|
-
/**
|
81
|
-
* Get a call stack sample from the isolate.
|
82
|
-
* \param isolate The isolate.
|
83
|
-
* \param state Register state.
|
84
|
-
* \param record_c_entry_frame Include or skip the runtime function.
|
85
|
-
* \param frames Caller allocated buffer to store stack frames.
|
86
|
-
* \param frames_limit Maximum number of frames to capture. The buffer must
|
87
|
-
* be large enough to hold the number of frames.
|
88
|
-
* \param sample_info The sample info is filled up by the function
|
89
|
-
* provides number of actual captured stack frames and
|
90
|
-
* the current VM state.
|
91
|
-
* \param use_simulator_reg_state When set to true and V8 is running under a
|
92
|
-
* simulator, the method will use the simulator
|
93
|
-
* register state rather than the one provided
|
94
|
-
* with |state| argument. Otherwise the method
|
95
|
-
* will use provided register |state| as is.
|
96
|
-
* \note GetStackSample is thread and signal safe and should only be called
|
97
|
-
* when the JS thread is paused or interrupted.
|
98
|
-
* Otherwise the behavior is undefined.
|
99
|
-
*/
|
100
|
-
static bool GetStackSample(Isolate* isolate, v8::RegisterState* state,
|
101
|
-
RecordCEntryFrame record_c_entry_frame,
|
102
|
-
void** frames, size_t frames_limit,
|
103
|
-
v8::SampleInfo* sample_info,
|
104
|
-
bool use_simulator_reg_state = true);
|
105
|
-
StateTag state; // The state of the VM.
|
106
|
-
void* pc; // Instruction pointer.
|
107
|
-
union {
|
108
|
-
void* tos; // Top stack value (*sp).
|
109
|
-
void* external_callback_entry;
|
110
|
-
};
|
111
|
-
static const unsigned kMaxFramesCountLog2 = 8;
|
112
|
-
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
|
113
|
-
void* stack[kMaxFramesCount]; // Call stack.
|
114
|
-
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
|
115
|
-
bool has_external_callback : 1;
|
116
|
-
bool update_stats : 1; // Whether the sample should update aggregated stats.
|
117
|
-
};
|
118
|
-
|
119
57
|
/**
|
120
58
|
* CpuProfileNode represents a node in a call graph.
|
121
59
|
*/
|
@@ -129,6 +67,20 @@ class V8_EXPORT CpuProfileNode {
|
|
129
67
|
unsigned int hit_count;
|
130
68
|
};
|
131
69
|
|
70
|
+
// An annotation hinting at the source of a CpuProfileNode.
|
71
|
+
enum SourceType {
|
72
|
+
// User-supplied script with associated resource information.
|
73
|
+
kScript = 0,
|
74
|
+
// Native scripts and provided builtins.
|
75
|
+
kBuiltin = 1,
|
76
|
+
// Callbacks into native code.
|
77
|
+
kCallback = 2,
|
78
|
+
// VM-internal functions or state.
|
79
|
+
kInternal = 3,
|
80
|
+
// A node that failed to symbolize.
|
81
|
+
kUnresolved = 4,
|
82
|
+
};
|
83
|
+
|
132
84
|
/** Returns function name (empty string for anonymous functions.) */
|
133
85
|
Local<String> GetFunctionName() const;
|
134
86
|
|
@@ -152,6 +104,12 @@ class V8_EXPORT CpuProfileNode {
|
|
152
104
|
*/
|
153
105
|
const char* GetScriptResourceNameStr() const;
|
154
106
|
|
107
|
+
/**
|
108
|
+
* Return true if the script from where the function originates is flagged as
|
109
|
+
* being shared cross-origin.
|
110
|
+
*/
|
111
|
+
bool IsScriptSharedCrossOrigin() const;
|
112
|
+
|
155
113
|
/**
|
156
114
|
* Returns the number, 1-based, of the line where the function originates.
|
157
115
|
* kNoLineNumberInfo if no line number information is available.
|
@@ -186,20 +144,23 @@ class V8_EXPORT CpuProfileNode {
|
|
186
144
|
*/
|
187
145
|
unsigned GetHitCount() const;
|
188
146
|
|
189
|
-
/** Returns function entry UID. */
|
190
|
-
V8_DEPRECATE_SOON(
|
191
|
-
"Use GetScriptId, GetLineNumber, and GetColumnNumber instead.",
|
192
|
-
unsigned GetCallUid() const);
|
193
|
-
|
194
147
|
/** Returns id of the node. The id is unique within the tree */
|
195
148
|
unsigned GetNodeId() const;
|
196
149
|
|
150
|
+
/**
|
151
|
+
* Gets the type of the source which the node was captured from.
|
152
|
+
*/
|
153
|
+
SourceType GetSourceType() const;
|
154
|
+
|
197
155
|
/** Returns child nodes count of the node. */
|
198
156
|
int GetChildrenCount() const;
|
199
157
|
|
200
158
|
/** Retrieves a child node by index. */
|
201
159
|
const CpuProfileNode* GetChild(int index) const;
|
202
160
|
|
161
|
+
/** Retrieves the ancestor node, or null if the root. */
|
162
|
+
const CpuProfileNode* GetParent() const;
|
163
|
+
|
203
164
|
/** Retrieves deopt infos for the node. */
|
204
165
|
const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
|
205
166
|
|
@@ -269,6 +230,66 @@ enum CpuProfilingMode {
|
|
269
230
|
kCallerLineNumbers,
|
270
231
|
};
|
271
232
|
|
233
|
+
// Determines how names are derived for functions sampled.
|
234
|
+
enum CpuProfilingNamingMode {
|
235
|
+
// Use the immediate name of functions at compilation time.
|
236
|
+
kStandardNaming,
|
237
|
+
// Use more verbose naming for functions without names, inferred from scope
|
238
|
+
// where possible.
|
239
|
+
kDebugNaming,
|
240
|
+
};
|
241
|
+
|
242
|
+
enum CpuProfilingLoggingMode {
|
243
|
+
// Enables logging when a profile is active, and disables logging when all
|
244
|
+
// profiles are detached.
|
245
|
+
kLazyLogging,
|
246
|
+
// Enables logging for the lifetime of the CpuProfiler. Calls to
|
247
|
+
// StartRecording are faster, at the expense of runtime overhead.
|
248
|
+
kEagerLogging,
|
249
|
+
};
|
250
|
+
|
251
|
+
/**
|
252
|
+
* Optional profiling attributes.
|
253
|
+
*/
|
254
|
+
class V8_EXPORT CpuProfilingOptions {
|
255
|
+
public:
|
256
|
+
// Indicates that the sample buffer size should not be explicitly limited.
|
257
|
+
static const unsigned kNoSampleLimit = UINT_MAX;
|
258
|
+
|
259
|
+
/**
|
260
|
+
* \param mode Type of computation of stack frame line numbers.
|
261
|
+
* \param max_samples The maximum number of samples that should be recorded by
|
262
|
+
* the profiler. Samples obtained after this limit will be
|
263
|
+
* discarded.
|
264
|
+
* \param sampling_interval_us controls the profile-specific target
|
265
|
+
* sampling interval. The provided sampling
|
266
|
+
* interval will be snapped to the next lowest
|
267
|
+
* non-zero multiple of the profiler's sampling
|
268
|
+
* interval, set via SetSamplingInterval(). If
|
269
|
+
* zero, the sampling interval will be equal to
|
270
|
+
* the profiler's sampling interval.
|
271
|
+
*/
|
272
|
+
CpuProfilingOptions(
|
273
|
+
CpuProfilingMode mode = kLeafNodeLineNumbers,
|
274
|
+
unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
|
275
|
+
MaybeLocal<Context> filter_context = MaybeLocal<Context>());
|
276
|
+
|
277
|
+
CpuProfilingMode mode() const { return mode_; }
|
278
|
+
unsigned max_samples() const { return max_samples_; }
|
279
|
+
int sampling_interval_us() const { return sampling_interval_us_; }
|
280
|
+
|
281
|
+
private:
|
282
|
+
friend class internal::CpuProfile;
|
283
|
+
|
284
|
+
bool has_filter_context() const { return !filter_context_.IsEmpty(); }
|
285
|
+
void* raw_filter_context() const;
|
286
|
+
|
287
|
+
CpuProfilingMode mode_;
|
288
|
+
unsigned max_samples_;
|
289
|
+
int sampling_interval_us_;
|
290
|
+
CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
|
291
|
+
};
|
292
|
+
|
272
293
|
/**
|
273
294
|
* Interface for controlling CPU profiling. Instance of the
|
274
295
|
* profiler can be created using v8::CpuProfiler::New method.
|
@@ -280,7 +301,9 @@ class V8_EXPORT CpuProfiler {
|
|
280
301
|
* initialized. The profiler object must be disposed after use by calling
|
281
302
|
* |Dispose| method.
|
282
303
|
*/
|
283
|
-
static CpuProfiler* New(Isolate* isolate
|
304
|
+
static CpuProfiler* New(Isolate* isolate,
|
305
|
+
CpuProfilingNamingMode = kDebugNaming,
|
306
|
+
CpuProfilingLoggingMode = kLazyLogging);
|
284
307
|
|
285
308
|
/**
|
286
309
|
* Synchronously collect current stack sample in all profilers attached to
|
@@ -302,18 +325,35 @@ class V8_EXPORT CpuProfiler {
|
|
302
325
|
void SetSamplingInterval(int us);
|
303
326
|
|
304
327
|
/**
|
305
|
-
*
|
306
|
-
*
|
307
|
-
*
|
308
|
-
*
|
309
|
-
*
|
310
|
-
|
328
|
+
* Sets whether or not the profiler should prioritize consistency of sample
|
329
|
+
* periodicity on Windows. Disabling this can greatly reduce CPU usage, but
|
330
|
+
* may result in greater variance in sample timings from the platform's
|
331
|
+
* scheduler. Defaults to enabled. This method must be called when there are
|
332
|
+
* no profiles being recorded.
|
333
|
+
*/
|
334
|
+
void SetUsePreciseSampling(bool);
|
335
|
+
|
336
|
+
/**
|
337
|
+
* Starts collecting a CPU profile. Title may be an empty string. Several
|
338
|
+
* profiles may be collected at once. Attempts to start collecting several
|
339
|
+
* profiles with the same title are silently ignored.
|
340
|
+
*/
|
341
|
+
void StartProfiling(Local<String> title, CpuProfilingOptions options);
|
342
|
+
|
343
|
+
/**
|
344
|
+
* Starts profiling with the same semantics as above, except with expanded
|
345
|
+
* parameters.
|
311
346
|
*
|
312
347
|
* |record_samples| parameter controls whether individual samples should
|
313
348
|
* be recorded in addition to the aggregated tree.
|
349
|
+
*
|
350
|
+
* |max_samples| controls the maximum number of samples that should be
|
351
|
+
* recorded by the profiler. Samples obtained after this limit will be
|
352
|
+
* discarded.
|
314
353
|
*/
|
315
|
-
void StartProfiling(
|
316
|
-
|
354
|
+
void StartProfiling(
|
355
|
+
Local<String> title, CpuProfilingMode mode, bool record_samples = false,
|
356
|
+
unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
|
317
357
|
/**
|
318
358
|
* The same as StartProfiling above, but the CpuProfilingMode defaults to
|
319
359
|
* kLeafNodeLineNumbers mode, which was the previous default behavior of the
|
@@ -327,20 +367,6 @@ class V8_EXPORT CpuProfiler {
|
|
327
367
|
*/
|
328
368
|
CpuProfile* StopProfiling(Local<String> title);
|
329
369
|
|
330
|
-
/**
|
331
|
-
* Force collection of a sample. Must be called on the VM thread.
|
332
|
-
* Recording the forced sample does not contribute to the aggregated
|
333
|
-
* profile statistics.
|
334
|
-
*/
|
335
|
-
V8_DEPRECATED("Use static CollectSample(Isolate*) instead.",
|
336
|
-
void CollectSample());
|
337
|
-
|
338
|
-
/**
|
339
|
-
* Tells the profiler whether the embedder is idle.
|
340
|
-
*/
|
341
|
-
V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
|
342
|
-
void SetIdle(bool is_idle));
|
343
|
-
|
344
370
|
/**
|
345
371
|
* Generate more detailed source positions to code objects. This results in
|
346
372
|
* better results when mapping profiling samples to script source.
|
@@ -354,7 +380,6 @@ class V8_EXPORT CpuProfiler {
|
|
354
380
|
CpuProfiler& operator=(const CpuProfiler&);
|
355
381
|
};
|
356
382
|
|
357
|
-
|
358
383
|
/**
|
359
384
|
* HeapSnapshotEdge represents a directed connection between heap
|
360
385
|
* graph nodes: from retainers to retained nodes.
|
@@ -705,7 +730,12 @@ class V8_EXPORT EmbedderGraph {
|
|
705
730
|
*/
|
706
731
|
virtual const char* NamePrefix() { return nullptr; }
|
707
732
|
|
708
|
-
|
733
|
+
/**
|
734
|
+
* Returns the NativeObject that can be used for querying the
|
735
|
+
* |HeapSnapshot|.
|
736
|
+
*/
|
737
|
+
virtual NativeObject GetNativeObject() { return nullptr; }
|
738
|
+
|
709
739
|
Node(const Node&) = delete;
|
710
740
|
Node& operator=(const Node&) = delete;
|
711
741
|
};
|
@@ -746,33 +776,6 @@ class V8_EXPORT HeapProfiler {
|
|
746
776
|
kSamplingForceGC = 1 << 0,
|
747
777
|
};
|
748
778
|
|
749
|
-
typedef std::unordered_set<const v8::PersistentBase<v8::Value>*>
|
750
|
-
RetainerChildren;
|
751
|
-
typedef std::vector<std::pair<v8::RetainedObjectInfo*, RetainerChildren>>
|
752
|
-
RetainerGroups;
|
753
|
-
typedef std::vector<std::pair<const v8::PersistentBase<v8::Value>*,
|
754
|
-
const v8::PersistentBase<v8::Value>*>>
|
755
|
-
RetainerEdges;
|
756
|
-
|
757
|
-
struct RetainerInfos {
|
758
|
-
RetainerGroups groups;
|
759
|
-
RetainerEdges edges;
|
760
|
-
};
|
761
|
-
|
762
|
-
/**
|
763
|
-
* Callback function invoked to retrieve all RetainerInfos from the embedder.
|
764
|
-
*/
|
765
|
-
typedef RetainerInfos (*GetRetainerInfosCallback)(v8::Isolate* isolate);
|
766
|
-
|
767
|
-
/**
|
768
|
-
* Callback function invoked for obtaining RetainedObjectInfo for
|
769
|
-
* the given JavaScript wrapper object. It is prohibited to enter V8
|
770
|
-
* while the callback is running: only getters on the handle and
|
771
|
-
* GetPointerFromInternalField on the objects are allowed.
|
772
|
-
*/
|
773
|
-
typedef RetainedObjectInfo* (*WrapperInfoCallback)(uint16_t class_id,
|
774
|
-
Local<Value> wrapper);
|
775
|
-
|
776
779
|
/**
|
777
780
|
* Callback function invoked during heap snapshot generation to retrieve
|
778
781
|
* the embedder object graph. The callback should use graph->AddEdge(..) to
|
@@ -783,10 +786,6 @@ class V8_EXPORT HeapProfiler {
|
|
783
786
|
v8::EmbedderGraph* graph,
|
784
787
|
void* data);
|
785
788
|
|
786
|
-
/** TODO(addaleax): Remove */
|
787
|
-
typedef void (*LegacyBuildEmbedderGraphCallback)(v8::Isolate* isolate,
|
788
|
-
v8::EmbedderGraph* graph);
|
789
|
-
|
790
789
|
/** Returns the number of snapshots taken. */
|
791
790
|
int GetSnapshotCount();
|
792
791
|
|
@@ -799,6 +798,12 @@ class V8_EXPORT HeapProfiler {
|
|
799
798
|
*/
|
800
799
|
SnapshotObjectId GetObjectId(Local<Value> value);
|
801
800
|
|
801
|
+
/**
|
802
|
+
* Returns SnapshotObjectId for a native object referenced by |value| if it
|
803
|
+
* has been seen by the heap profiler, kUnknownObjectId otherwise.
|
804
|
+
*/
|
805
|
+
SnapshotObjectId GetObjectId(NativeObject value);
|
806
|
+
|
802
807
|
/**
|
803
808
|
* Returns heap object with given SnapshotObjectId if the object is alive,
|
804
809
|
* otherwise empty handle is returned.
|
@@ -839,7 +844,8 @@ class V8_EXPORT HeapProfiler {
|
|
839
844
|
*/
|
840
845
|
const HeapSnapshot* TakeHeapSnapshot(
|
841
846
|
ActivityControl* control = nullptr,
|
842
|
-
ObjectNameResolver* global_object_name_resolver = nullptr
|
847
|
+
ObjectNameResolver* global_object_name_resolver = nullptr,
|
848
|
+
bool treat_global_objects_as_roots = true);
|
843
849
|
|
844
850
|
/**
|
845
851
|
* Starts tracking of heap objects population statistics. After calling
|
@@ -925,20 +931,6 @@ class V8_EXPORT HeapProfiler {
|
|
925
931
|
*/
|
926
932
|
void DeleteAllHeapSnapshots();
|
927
933
|
|
928
|
-
/** Binds a callback to embedder's class ID. */
|
929
|
-
V8_DEPRECATED(
|
930
|
-
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
|
931
|
-
void SetWrapperClassInfoProvider(uint16_t class_id,
|
932
|
-
WrapperInfoCallback callback));
|
933
|
-
|
934
|
-
V8_DEPRECATED(
|
935
|
-
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
|
936
|
-
void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback));
|
937
|
-
|
938
|
-
V8_DEPRECATED(
|
939
|
-
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
|
940
|
-
void SetBuildEmbedderGraphCallback(
|
941
|
-
LegacyBuildEmbedderGraphCallback callback));
|
942
934
|
void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
|
943
935
|
void* data);
|
944
936
|
void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
|
@@ -958,80 +950,6 @@ class V8_EXPORT HeapProfiler {
|
|
958
950
|
HeapProfiler& operator=(const HeapProfiler&);
|
959
951
|
};
|
960
952
|
|
961
|
-
/**
|
962
|
-
* Interface for providing information about embedder's objects
|
963
|
-
* held by global handles. This information is reported in two ways:
|
964
|
-
*
|
965
|
-
* 1. When calling AddObjectGroup, an embedder may pass
|
966
|
-
* RetainedObjectInfo instance describing the group. To collect
|
967
|
-
* this information while taking a heap snapshot, V8 calls GC
|
968
|
-
* prologue and epilogue callbacks.
|
969
|
-
*
|
970
|
-
* 2. When a heap snapshot is collected, V8 additionally
|
971
|
-
* requests RetainedObjectInfos for persistent handles that
|
972
|
-
* were not previously reported via AddObjectGroup.
|
973
|
-
*
|
974
|
-
* Thus, if an embedder wants to provide information about native
|
975
|
-
* objects for heap snapshots, it can do it in a GC prologue
|
976
|
-
* handler, and / or by assigning wrapper class ids in the following way:
|
977
|
-
*
|
978
|
-
* 1. Bind a callback to class id by calling SetWrapperClassInfoProvider.
|
979
|
-
* 2. Call SetWrapperClassId on certain persistent handles.
|
980
|
-
*
|
981
|
-
* V8 takes ownership of RetainedObjectInfo instances passed to it and
|
982
|
-
* keeps them alive only during snapshot collection. Afterwards, they
|
983
|
-
* are freed by calling the Dispose class function.
|
984
|
-
*/
|
985
|
-
class V8_EXPORT RetainedObjectInfo { // NOLINT
|
986
|
-
public:
|
987
|
-
/** Called by V8 when it no longer needs an instance. */
|
988
|
-
virtual void Dispose() = 0;
|
989
|
-
|
990
|
-
/** Returns whether two instances are equivalent. */
|
991
|
-
virtual bool IsEquivalent(RetainedObjectInfo* other) = 0;
|
992
|
-
|
993
|
-
/**
|
994
|
-
* Returns hash value for the instance. Equivalent instances
|
995
|
-
* must have the same hash value.
|
996
|
-
*/
|
997
|
-
virtual intptr_t GetHash() = 0;
|
998
|
-
|
999
|
-
/**
|
1000
|
-
* Returns human-readable label. It must be a null-terminated UTF-8
|
1001
|
-
* encoded string. V8 copies its contents during a call to GetLabel.
|
1002
|
-
*/
|
1003
|
-
virtual const char* GetLabel() = 0;
|
1004
|
-
|
1005
|
-
/**
|
1006
|
-
* Returns human-readable group label. It must be a null-terminated UTF-8
|
1007
|
-
* encoded string. V8 copies its contents during a call to GetGroupLabel.
|
1008
|
-
* Heap snapshot generator will collect all the group names, create
|
1009
|
-
* top level entries with these names and attach the objects to the
|
1010
|
-
* corresponding top level group objects. There is a default
|
1011
|
-
* implementation which is required because embedders don't have their
|
1012
|
-
* own implementation yet.
|
1013
|
-
*/
|
1014
|
-
virtual const char* GetGroupLabel() { return GetLabel(); }
|
1015
|
-
|
1016
|
-
/**
|
1017
|
-
* Returns element count in case if a global handle retains
|
1018
|
-
* a subgraph by holding one of its nodes.
|
1019
|
-
*/
|
1020
|
-
virtual intptr_t GetElementCount() { return -1; }
|
1021
|
-
|
1022
|
-
/** Returns embedder's object size in bytes. */
|
1023
|
-
virtual intptr_t GetSizeInBytes() { return -1; }
|
1024
|
-
|
1025
|
-
protected:
|
1026
|
-
RetainedObjectInfo() = default;
|
1027
|
-
virtual ~RetainedObjectInfo() = default;
|
1028
|
-
|
1029
|
-
private:
|
1030
|
-
RetainedObjectInfo(const RetainedObjectInfo&);
|
1031
|
-
RetainedObjectInfo& operator=(const RetainedObjectInfo&);
|
1032
|
-
};
|
1033
|
-
|
1034
|
-
|
1035
953
|
/**
|
1036
954
|
* A struct for exporting HeapStats data from V8, using "push" model.
|
1037
955
|
* See HeapProfiler::GetHeapStats.
|
@@ -1055,7 +973,8 @@ struct HeapStatsUpdate {
|
|
1055
973
|
V(LazyCompile) \
|
1056
974
|
V(RegExp) \
|
1057
975
|
V(Script) \
|
1058
|
-
V(Stub)
|
976
|
+
V(Stub) \
|
977
|
+
V(Relocation)
|
1059
978
|
|
1060
979
|
/**
|
1061
980
|
* Note that this enum may be extended in the future. Please include a default
|
@@ -1088,10 +1007,12 @@ class V8_EXPORT CodeEvent {
|
|
1088
1007
|
const char* GetComment();
|
1089
1008
|
|
1090
1009
|
static const char* GetCodeEventTypeName(CodeEventType code_event_type);
|
1010
|
+
|
1011
|
+
uintptr_t GetPreviousCodeStartAddress();
|
1091
1012
|
};
|
1092
1013
|
|
1093
1014
|
/**
|
1094
|
-
* Interface to listen to code creation events.
|
1015
|
+
* Interface to listen to code creation and code relocation events.
|
1095
1016
|
*/
|
1096
1017
|
class V8_EXPORT CodeEventHandler {
|
1097
1018
|
public:
|
@@ -1103,9 +1024,26 @@ class V8_EXPORT CodeEventHandler {
|
|
1103
1024
|
explicit CodeEventHandler(Isolate* isolate);
|
1104
1025
|
virtual ~CodeEventHandler();
|
1105
1026
|
|
1027
|
+
/**
|
1028
|
+
* Handle is called every time a code object is created or moved. Information
|
1029
|
+
* about each code event will be available through the `code_event`
|
1030
|
+
* parameter.
|
1031
|
+
*
|
1032
|
+
* When the CodeEventType is kRelocationType, the code for this CodeEvent has
|
1033
|
+
* moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
|
1034
|
+
*/
|
1106
1035
|
virtual void Handle(CodeEvent* code_event) = 0;
|
1107
1036
|
|
1037
|
+
/**
|
1038
|
+
* Call `Enable()` to starts listening to code creation and code relocation
|
1039
|
+
* events. These events will be handled by `Handle()`.
|
1040
|
+
*/
|
1108
1041
|
void Enable();
|
1042
|
+
|
1043
|
+
/**
|
1044
|
+
* Call `Disable()` to stop listening to code creation and code relocation
|
1045
|
+
* events.
|
1046
|
+
*/
|
1109
1047
|
void Disable();
|
1110
1048
|
|
1111
1049
|
private:
|