libv8 7.3.492.27.1-universal-darwin16

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,438 @@
1
+ // Copyright 2013 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_V8_PLATFORM_H_
6
+ #define V8_V8_PLATFORM_H_
7
+
8
+ #include <stddef.h>
9
+ #include <stdint.h>
10
+ #include <stdlib.h> // For abort.
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ #include "v8config.h" // NOLINT(build/include)
15
+
16
+ namespace v8 {
17
+
18
+ class Isolate;
19
+
20
+ /**
21
+ * A Task represents a unit of work.
22
+ */
23
+ class Task {
24
+ public:
25
+ virtual ~Task() = default;
26
+
27
+ virtual void Run() = 0;
28
+ };
29
+
30
+ /**
31
+ * An IdleTask represents a unit of work to be performed in idle time.
32
+ * The Run method is invoked with an argument that specifies the deadline in
33
+ * seconds returned by MonotonicallyIncreasingTime().
34
+ * The idle task is expected to complete by this deadline.
35
+ */
36
+ class IdleTask {
37
+ public:
38
+ virtual ~IdleTask() = default;
39
+ virtual void Run(double deadline_in_seconds) = 0;
40
+ };
41
+
42
+ /**
43
+ * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
44
+ * post tasks after the isolate gets destructed, but these tasks may not get
45
+ * executed anymore. All tasks posted to a given TaskRunner will be invoked in
46
+ * sequence. Tasks can be posted from any thread.
47
+ */
48
+ class TaskRunner {
49
+ public:
50
+ /**
51
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
52
+ * implementation takes ownership of |task|.
53
+ */
54
+ virtual void PostTask(std::unique_ptr<Task> task) = 0;
55
+
56
+ /**
57
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
58
+ * implementation takes ownership of |task|. The |task| cannot be nested
59
+ * within other task executions.
60
+ *
61
+ * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
62
+ */
63
+ virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
64
+
65
+ /**
66
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
67
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
68
+ * implementation takes ownership of |task|.
69
+ */
70
+ virtual void PostDelayedTask(std::unique_ptr<Task> task,
71
+ double delay_in_seconds) = 0;
72
+
73
+ /**
74
+ * Schedules an idle task to be invoked by this TaskRunner. The task is
75
+ * scheduled when the embedder is idle. Requires that
76
+ * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
77
+ * relative to other task types and may be starved for an arbitrarily long
78
+ * time if no idle time is available. The TaskRunner implementation takes
79
+ * ownership of |task|.
80
+ */
81
+ virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
82
+
83
+ /**
84
+ * Returns true if idle tasks are enabled for this TaskRunner.
85
+ */
86
+ virtual bool IdleTasksEnabled() = 0;
87
+
88
+ /**
89
+ * Returns true if non-nestable tasks are enabled for this TaskRunner.
90
+ */
91
+ virtual bool NonNestableTasksEnabled() const { return false; }
92
+
93
+ TaskRunner() = default;
94
+ virtual ~TaskRunner() = default;
95
+
96
+ private:
97
+ TaskRunner(const TaskRunner&) = delete;
98
+ TaskRunner& operator=(const TaskRunner&) = delete;
99
+ };
100
+
101
+ /**
102
+ * The interface represents complex arguments to trace events.
103
+ */
104
+ class ConvertableToTraceFormat {
105
+ public:
106
+ virtual ~ConvertableToTraceFormat() = default;
107
+
108
+ /**
109
+ * Append the class info to the provided |out| string. The appended
110
+ * data must be a valid JSON object. Strings must be properly quoted, and
111
+ * escaped. There is no processing applied to the content after it is
112
+ * appended.
113
+ */
114
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
115
+ };
116
+
117
+ /**
118
+ * V8 Tracing controller.
119
+ *
120
+ * Can be implemented by an embedder to record trace events from V8.
121
+ */
122
+ class TracingController {
123
+ public:
124
+ virtual ~TracingController() = default;
125
+
126
+ /**
127
+ * Called by TRACE_EVENT* macros, don't call this directly.
128
+ * The name parameter is a category group for example:
129
+ * TRACE_EVENT0("v8,parse", "V8.Parse")
130
+ * The pointer returned points to a value with zero or more of the bits
131
+ * defined in CategoryGroupEnabledFlags.
132
+ **/
133
+ virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
134
+ static uint8_t no = 0;
135
+ return &no;
136
+ }
137
+
138
+ /**
139
+ * Adds a trace event to the platform tracing system. These function calls are
140
+ * usually the result of a TRACE_* macro from trace_event_common.h when
141
+ * tracing and the category of the particular trace are enabled. It is not
142
+ * advisable to call these functions on their own; they are really only meant
143
+ * to be used by the trace macros. The returned handle can be used by
144
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
145
+ */
146
+ virtual uint64_t AddTraceEvent(
147
+ char phase, const uint8_t* category_enabled_flag, const char* name,
148
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
149
+ const char** arg_names, const uint8_t* arg_types,
150
+ const uint64_t* arg_values,
151
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
152
+ unsigned int flags) {
153
+ return 0;
154
+ }
155
+ virtual uint64_t AddTraceEventWithTimestamp(
156
+ char phase, const uint8_t* category_enabled_flag, const char* name,
157
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
158
+ const char** arg_names, const uint8_t* arg_types,
159
+ const uint64_t* arg_values,
160
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
161
+ unsigned int flags, int64_t timestamp) {
162
+ return 0;
163
+ }
164
+
165
+ /**
166
+ * Sets the duration field of a COMPLETE trace event. It must be called with
167
+ * the handle returned from AddTraceEvent().
168
+ **/
169
+ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
170
+ const char* name, uint64_t handle) {}
171
+
172
+ class TraceStateObserver {
173
+ public:
174
+ virtual ~TraceStateObserver() = default;
175
+ virtual void OnTraceEnabled() = 0;
176
+ virtual void OnTraceDisabled() = 0;
177
+ };
178
+
179
+ /** Adds tracing state change observer. */
180
+ virtual void AddTraceStateObserver(TraceStateObserver*) {}
181
+
182
+ /** Removes tracing state change observer. */
183
+ virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
184
+ };
185
+
186
+ /**
187
+ * A V8 memory page allocator.
188
+ *
189
+ * Can be implemented by an embedder to manage large host OS allocations.
190
+ */
191
+ class PageAllocator {
192
+ public:
193
+ virtual ~PageAllocator() = default;
194
+
195
+ /**
196
+ * Gets the page granularity for AllocatePages and FreePages. Addresses and
197
+ * lengths for those calls should be multiples of AllocatePageSize().
198
+ */
199
+ virtual size_t AllocatePageSize() = 0;
200
+
201
+ /**
202
+ * Gets the page granularity for SetPermissions and ReleasePages. Addresses
203
+ * and lengths for those calls should be multiples of CommitPageSize().
204
+ */
205
+ virtual size_t CommitPageSize() = 0;
206
+
207
+ /**
208
+ * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
209
+ * sequences of random mmap addresses.
210
+ */
211
+ virtual void SetRandomMmapSeed(int64_t seed) = 0;
212
+
213
+ /**
214
+ * Returns a randomized address, suitable for memory allocation under ASLR.
215
+ * The address will be aligned to AllocatePageSize.
216
+ */
217
+ virtual void* GetRandomMmapAddr() = 0;
218
+
219
+ /**
220
+ * Memory permissions.
221
+ */
222
+ enum Permission {
223
+ kNoAccess,
224
+ kRead,
225
+ kReadWrite,
226
+ // TODO(hpayer): Remove this flag. Memory should never be rwx.
227
+ kReadWriteExecute,
228
+ kReadExecute
229
+ };
230
+
231
+ /**
232
+ * Allocates memory in range with the given alignment and permission.
233
+ */
234
+ virtual void* AllocatePages(void* address, size_t length, size_t alignment,
235
+ Permission permissions) = 0;
236
+
237
+ /**
238
+ * Frees memory in a range that was allocated by a call to AllocatePages.
239
+ */
240
+ virtual bool FreePages(void* address, size_t length) = 0;
241
+
242
+ /**
243
+ * Releases memory in a range that was allocated by a call to AllocatePages.
244
+ */
245
+ virtual bool ReleasePages(void* address, size_t length,
246
+ size_t new_length) = 0;
247
+
248
+ /**
249
+ * Sets permissions on pages in an allocated range.
250
+ */
251
+ virtual bool SetPermissions(void* address, size_t length,
252
+ Permission permissions) = 0;
253
+
254
+ /**
255
+ * Frees memory in the given [address, address + size) range. address and size
256
+ * should be operating system page-aligned. The next write to this
257
+ * memory area brings the memory transparently back.
258
+ */
259
+ virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
260
+ };
261
+
262
+ /**
263
+ * V8 Platform abstraction layer.
264
+ *
265
+ * The embedder has to provide an implementation of this interface before
266
+ * initializing the rest of V8.
267
+ */
268
+ class Platform {
269
+ public:
270
+ virtual ~Platform() = default;
271
+
272
+ /**
273
+ * Allows the embedder to manage memory page allocations.
274
+ */
275
+ virtual PageAllocator* GetPageAllocator() {
276
+ // TODO(bbudge) Make this abstract after all embedders implement this.
277
+ return nullptr;
278
+ }
279
+
280
+ /**
281
+ * Enables the embedder to respond in cases where V8 can't allocate large
282
+ * blocks of memory. V8 retries the failed allocation once after calling this
283
+ * method. On success, execution continues; otherwise V8 exits with a fatal
284
+ * error.
285
+ * Embedder overrides of this function must NOT call back into V8.
286
+ */
287
+ virtual void OnCriticalMemoryPressure() {
288
+ // TODO(bbudge) Remove this when embedders override the following method.
289
+ // See crbug.com/634547.
290
+ }
291
+
292
+ /**
293
+ * Enables the embedder to respond in cases where V8 can't allocate large
294
+ * memory regions. The |length| parameter is the amount of memory needed.
295
+ * Returns true if memory is now available. Returns false if no memory could
296
+ * be made available. V8 will retry allocations until this method returns
297
+ * false.
298
+ *
299
+ * Embedder overrides of this function must NOT call back into V8.
300
+ */
301
+ virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
302
+
303
+ /**
304
+ * Gets the number of worker threads used by
305
+ * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
306
+ * of tasks a work package should be split into. A return value of 0 means
307
+ * that there are no worker threads available. Note that a value of 0 won't
308
+ * prohibit V8 from posting tasks using |CallOnWorkerThread|.
309
+ */
310
+ virtual int NumberOfWorkerThreads() = 0;
311
+
312
+ /**
313
+ * Returns a TaskRunner which can be used to post a task on the foreground.
314
+ * This function should only be called from a foreground thread.
315
+ */
316
+ virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
317
+ Isolate* isolate) = 0;
318
+
319
+ /**
320
+ * Schedules a task to be invoked on a worker thread.
321
+ */
322
+ virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
323
+
324
+ /**
325
+ * Schedules a task that blocks the main thread to be invoked with
326
+ * high-priority on a worker thread.
327
+ */
328
+ virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
329
+ // Embedders may optionally override this to process these tasks in a high
330
+ // priority pool.
331
+ CallOnWorkerThread(std::move(task));
332
+ }
333
+
334
+ /**
335
+ * Schedules a task to be invoked with low-priority on a worker thread.
336
+ */
337
+ virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
338
+ // Embedders may optionally override this to process these tasks in a low
339
+ // priority pool.
340
+ CallOnWorkerThread(std::move(task));
341
+ }
342
+
343
+ /**
344
+ * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
345
+ * expires.
346
+ */
347
+ virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
348
+ double delay_in_seconds) = 0;
349
+
350
+ /**
351
+ * Schedules a task to be invoked on a foreground thread wrt a specific
352
+ * |isolate|. Tasks posted for the same isolate should be execute in order of
353
+ * scheduling. The definition of "foreground" is opaque to V8.
354
+ */
355
+ V8_DEPRECATE_SOON(
356
+ "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
357
+ virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
358
+
359
+ /**
360
+ * Schedules a task to be invoked on a foreground thread wrt a specific
361
+ * |isolate| after the given number of seconds |delay_in_seconds|.
362
+ * Tasks posted for the same isolate should be execute in order of
363
+ * scheduling. The definition of "foreground" is opaque to V8.
364
+ */
365
+ V8_DEPRECATE_SOON(
366
+ "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
367
+ virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
368
+ double delay_in_seconds)) = 0;
369
+
370
+ /**
371
+ * Schedules a task to be invoked on a foreground thread wrt a specific
372
+ * |isolate| when the embedder is idle.
373
+ * Requires that SupportsIdleTasks(isolate) is true.
374
+ * Idle tasks may be reordered relative to other task types and may be
375
+ * starved for an arbitrarily long time if no idle time is available.
376
+ * The definition of "foreground" is opaque to V8.
377
+ */
378
+ V8_DEPRECATE_SOON(
379
+ "Use a taskrunner acquired by GetForegroundTaskRunner instead.",
380
+ virtual void CallIdleOnForegroundThread(Isolate* isolate,
381
+ IdleTask* task)) {
382
+ // This must be overriden if |IdleTasksEnabled()|.
383
+ abort();
384
+ }
385
+
386
+ /**
387
+ * Returns true if idle tasks are enabled for the given |isolate|.
388
+ */
389
+ virtual bool IdleTasksEnabled(Isolate* isolate) {
390
+ return false;
391
+ }
392
+
393
+ /**
394
+ * Monotonically increasing time in seconds from an arbitrary fixed point in
395
+ * the past. This function is expected to return at least
396
+ * millisecond-precision values. For this reason,
397
+ * it is recommended that the fixed point be no further in the past than
398
+ * the epoch.
399
+ **/
400
+ virtual double MonotonicallyIncreasingTime() = 0;
401
+
402
+ /**
403
+ * Current wall-clock time in milliseconds since epoch.
404
+ * This function is expected to return at least millisecond-precision values.
405
+ */
406
+ virtual double CurrentClockTimeMillis() = 0;
407
+
408
+ typedef void (*StackTracePrinter)();
409
+
410
+ /**
411
+ * Returns a function pointer that print a stack trace of the current stack
412
+ * on invocation. Disables printing of the stack trace if nullptr.
413
+ */
414
+ virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
415
+
416
+ /**
417
+ * Returns an instance of a v8::TracingController. This must be non-nullptr.
418
+ */
419
+ virtual TracingController* GetTracingController() = 0;
420
+
421
+ /**
422
+ * Tells the embedder to generate and upload a crashdump during an unexpected
423
+ * but non-critical scenario.
424
+ */
425
+ virtual void DumpWithoutCrashing() {}
426
+
427
+ protected:
428
+ /**
429
+ * Default implementation of current wall-clock time in milliseconds
430
+ * since epoch. Useful for implementing |CurrentClockTimeMillis| if
431
+ * nothing special needed.
432
+ */
433
+ static double SystemClockTimeMillis();
434
+ };
435
+
436
+ } // namespace v8
437
+
438
+ #endif // V8_V8_PLATFORM_H_
@@ -0,0 +1,1121 @@
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_V8_PROFILER_H_
6
+ #define V8_V8_PROFILER_H_
7
+
8
+ #include <unordered_set>
9
+ #include <vector>
10
+ #include "v8.h" // NOLINT(build/include)
11
+
12
+ /**
13
+ * Profiler support for the V8 JavaScript engine.
14
+ */
15
+ namespace v8 {
16
+
17
+ class HeapGraphNode;
18
+ struct HeapStatsUpdate;
19
+
20
+ typedef uint32_t SnapshotObjectId;
21
+
22
+
23
+ struct CpuProfileDeoptFrame {
24
+ int script_id;
25
+ size_t position;
26
+ };
27
+
28
+ } // namespace v8
29
+
30
+ #ifdef V8_OS_WIN
31
+ template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
32
+ #endif
33
+
34
+ namespace v8 {
35
+
36
+ struct V8_EXPORT CpuProfileDeoptInfo {
37
+ /** A pointer to a static string owned by v8. */
38
+ const char* deopt_reason;
39
+ std::vector<CpuProfileDeoptFrame> stack;
40
+ };
41
+
42
+ } // namespace v8
43
+
44
+ #ifdef V8_OS_WIN
45
+ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
46
+ #endif
47
+
48
+ namespace v8 {
49
+
50
+ // TickSample captures the information collected for each sample.
51
+ struct TickSample {
52
+ // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
53
+ // include the runtime function we're calling. Externally exposed tick
54
+ // samples don't care.
55
+ enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
56
+
57
+ TickSample()
58
+ : state(OTHER),
59
+ pc(nullptr),
60
+ external_callback_entry(nullptr),
61
+ frames_count(0),
62
+ has_external_callback(false),
63
+ update_stats(true) {}
64
+
65
+ /**
66
+ * Initialize a tick sample from the isolate.
67
+ * \param isolate The isolate.
68
+ * \param state Execution state.
69
+ * \param record_c_entry_frame Include or skip the runtime function.
70
+ * \param update_stats Whether update the sample to the aggregated stats.
71
+ * \param use_simulator_reg_state When set to true and V8 is running under a
72
+ * simulator, the method will use the simulator
73
+ * register state rather than the one provided
74
+ * with |state| argument. Otherwise the method
75
+ * will use provided register |state| as is.
76
+ */
77
+ void Init(Isolate* isolate, const v8::RegisterState& state,
78
+ RecordCEntryFrame record_c_entry_frame, bool update_stats,
79
+ bool use_simulator_reg_state = true);
80
+ /**
81
+ * Get a call stack sample from the isolate.
82
+ * \param isolate The isolate.
83
+ * \param state Register state.
84
+ * \param record_c_entry_frame Include or skip the runtime function.
85
+ * \param frames Caller allocated buffer to store stack frames.
86
+ * \param frames_limit Maximum number of frames to capture. The buffer must
87
+ * be large enough to hold the number of frames.
88
+ * \param sample_info The sample info is filled up by the function
89
+ * provides number of actual captured stack frames and
90
+ * the current VM state.
91
+ * \param use_simulator_reg_state When set to true and V8 is running under a
92
+ * simulator, the method will use the simulator
93
+ * register state rather than the one provided
94
+ * with |state| argument. Otherwise the method
95
+ * will use provided register |state| as is.
96
+ * \note GetStackSample is thread and signal safe and should only be called
97
+ * when the JS thread is paused or interrupted.
98
+ * Otherwise the behavior is undefined.
99
+ */
100
+ static bool GetStackSample(Isolate* isolate, v8::RegisterState* state,
101
+ RecordCEntryFrame record_c_entry_frame,
102
+ void** frames, size_t frames_limit,
103
+ v8::SampleInfo* sample_info,
104
+ bool use_simulator_reg_state = true);
105
+ StateTag state; // The state of the VM.
106
+ void* pc; // Instruction pointer.
107
+ union {
108
+ void* tos; // Top stack value (*sp).
109
+ void* external_callback_entry;
110
+ };
111
+ static const unsigned kMaxFramesCountLog2 = 8;
112
+ static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
113
+ void* stack[kMaxFramesCount]; // Call stack.
114
+ unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
115
+ bool has_external_callback : 1;
116
+ bool update_stats : 1; // Whether the sample should update aggregated stats.
117
+ };
118
+
119
+ /**
120
+ * CpuProfileNode represents a node in a call graph.
121
+ */
122
+ class V8_EXPORT CpuProfileNode {
123
+ public:
124
+ struct LineTick {
125
+ /** The 1-based number of the source line where the function originates. */
126
+ int line;
127
+
128
+ /** The count of samples associated with the source line. */
129
+ unsigned int hit_count;
130
+ };
131
+
132
+ /** Returns function name (empty string for anonymous functions.) */
133
+ Local<String> GetFunctionName() const;
134
+
135
+ /**
136
+ * Returns function name (empty string for anonymous functions.)
137
+ * The string ownership is *not* passed to the caller. It stays valid until
138
+ * profile is deleted. The function is thread safe.
139
+ */
140
+ const char* GetFunctionNameStr() const;
141
+
142
+ /** Returns id of the script where function is located. */
143
+ int GetScriptId() const;
144
+
145
+ /** Returns resource name for script from where the function originates. */
146
+ Local<String> GetScriptResourceName() const;
147
+
148
+ /**
149
+ * Returns resource name for script from where the function originates.
150
+ * The string ownership is *not* passed to the caller. It stays valid until
151
+ * profile is deleted. The function is thread safe.
152
+ */
153
+ const char* GetScriptResourceNameStr() const;
154
+
155
+ /**
156
+ * Returns the number, 1-based, of the line where the function originates.
157
+ * kNoLineNumberInfo if no line number information is available.
158
+ */
159
+ int GetLineNumber() const;
160
+
161
+ /**
162
+ * Returns 1-based number of the column where the function originates.
163
+ * kNoColumnNumberInfo if no column number information is available.
164
+ */
165
+ int GetColumnNumber() const;
166
+
167
+ /**
168
+ * Returns the number of the function's source lines that collect the samples.
169
+ */
170
+ unsigned int GetHitLineCount() const;
171
+
172
+ /** Returns the set of source lines that collect the samples.
173
+ * The caller allocates buffer and responsible for releasing it.
174
+ * True if all available entries are copied, otherwise false.
175
+ * The function copies nothing if buffer is not large enough.
176
+ */
177
+ bool GetLineTicks(LineTick* entries, unsigned int length) const;
178
+
179
+ /** Returns bailout reason for the function
180
+ * if the optimization was disabled for it.
181
+ */
182
+ const char* GetBailoutReason() const;
183
+
184
+ /**
185
+ * Returns the count of samples where the function was currently executing.
186
+ */
187
+ unsigned GetHitCount() const;
188
+
189
+ /** Returns function entry UID. */
190
+ V8_DEPRECATE_SOON(
191
+ "Use GetScriptId, GetLineNumber, and GetColumnNumber instead.",
192
+ unsigned GetCallUid() const);
193
+
194
+ /** Returns id of the node. The id is unique within the tree */
195
+ unsigned GetNodeId() const;
196
+
197
+ /** Returns child nodes count of the node. */
198
+ int GetChildrenCount() const;
199
+
200
+ /** Retrieves a child node by index. */
201
+ const CpuProfileNode* GetChild(int index) const;
202
+
203
+ /** Retrieves deopt infos for the node. */
204
+ const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
205
+
206
+ static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
207
+ static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
208
+ };
209
+
210
+
211
+ /**
212
+ * CpuProfile contains a CPU profile in a form of top-down call tree
213
+ * (from main() down to functions that do all the work).
214
+ */
215
+ class V8_EXPORT CpuProfile {
216
+ public:
217
+ /** Returns CPU profile title. */
218
+ Local<String> GetTitle() const;
219
+
220
+ /** Returns the root node of the top down call tree. */
221
+ const CpuProfileNode* GetTopDownRoot() const;
222
+
223
+ /**
224
+ * Returns number of samples recorded. The samples are not recorded unless
225
+ * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
226
+ */
227
+ int GetSamplesCount() const;
228
+
229
+ /**
230
+ * Returns profile node corresponding to the top frame the sample at
231
+ * the given index.
232
+ */
233
+ const CpuProfileNode* GetSample(int index) const;
234
+
235
+ /**
236
+ * Returns the timestamp of the sample. The timestamp is the number of
237
+ * microseconds since some unspecified starting point.
238
+ * The point is equal to the starting point used by GetStartTime.
239
+ */
240
+ int64_t GetSampleTimestamp(int index) const;
241
+
242
+ /**
243
+ * Returns time when the profile recording was started (in microseconds)
244
+ * since some unspecified starting point.
245
+ */
246
+ int64_t GetStartTime() const;
247
+
248
+ /**
249
+ * Returns time when the profile recording was stopped (in microseconds)
250
+ * since some unspecified starting point.
251
+ * The point is equal to the starting point used by GetStartTime.
252
+ */
253
+ int64_t GetEndTime() const;
254
+
255
+ /**
256
+ * Deletes the profile and removes it from CpuProfiler's list.
257
+ * All pointers to nodes previously returned become invalid.
258
+ */
259
+ void Delete();
260
+ };
261
+
262
+ enum CpuProfilingMode {
263
+ // In the resulting CpuProfile tree, intermediate nodes in a stack trace
264
+ // (from the root to a leaf) will have line numbers that point to the start
265
+ // line of the function, rather than the line of the callsite of the child.
266
+ kLeafNodeLineNumbers,
267
+ // In the resulting CpuProfile tree, nodes are separated based on the line
268
+ // number of their callsite in their parent.
269
+ kCallerLineNumbers,
270
+ };
271
+
272
+ /**
273
+ * Interface for controlling CPU profiling. Instance of the
274
+ * profiler can be created using v8::CpuProfiler::New method.
275
+ */
276
+ class V8_EXPORT CpuProfiler {
277
+ public:
278
+ /**
279
+ * Creates a new CPU profiler for the |isolate|. The isolate must be
280
+ * initialized. The profiler object must be disposed after use by calling
281
+ * |Dispose| method.
282
+ */
283
+ static CpuProfiler* New(Isolate* isolate);
284
+
285
+ /**
286
+ * Synchronously collect current stack sample in all profilers attached to
287
+ * the |isolate|. The call does not affect number of ticks recorded for
288
+ * the current top node.
289
+ */
290
+ static void CollectSample(Isolate* isolate);
291
+
292
+ /**
293
+ * Disposes the CPU profiler object.
294
+ */
295
+ void Dispose();
296
+
297
+ /**
298
+ * Changes default CPU profiler sampling interval to the specified number
299
+ * of microseconds. Default interval is 1000us. This method must be called
300
+ * when there are no profiles being recorded.
301
+ */
302
+ void SetSamplingInterval(int us);
303
+
304
+ /**
305
+ * Starts collecting CPU profile. Title may be an empty string. It
306
+ * is allowed to have several profiles being collected at
307
+ * once. Attempts to start collecting several profiles with the same
308
+ * title are silently ignored. While collecting a profile, functions
309
+ * from all security contexts are included in it. The token-based
310
+ * filtering is only performed when querying for a profile.
311
+ *
312
+ * |record_samples| parameter controls whether individual samples should
313
+ * be recorded in addition to the aggregated tree.
314
+ */
315
+ void StartProfiling(Local<String> title, CpuProfilingMode mode,
316
+ bool record_samples = false);
317
+ /**
318
+ * The same as StartProfiling above, but the CpuProfilingMode defaults to
319
+ * kLeafNodeLineNumbers mode, which was the previous default behavior of the
320
+ * profiler.
321
+ */
322
+ void StartProfiling(Local<String> title, bool record_samples = false);
323
+
324
+ /**
325
+ * Stops collecting CPU profile with a given title and returns it.
326
+ * If the title given is empty, finishes the last profile started.
327
+ */
328
+ CpuProfile* StopProfiling(Local<String> title);
329
+
330
+ /**
331
+ * Force collection of a sample. Must be called on the VM thread.
332
+ * Recording the forced sample does not contribute to the aggregated
333
+ * profile statistics.
334
+ */
335
+ V8_DEPRECATED("Use static CollectSample(Isolate*) instead.",
336
+ void CollectSample());
337
+
338
+ /**
339
+ * Tells the profiler whether the embedder is idle.
340
+ */
341
+ V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
342
+ void SetIdle(bool is_idle));
343
+
344
+ /**
345
+ * Generate more detailed source positions to code objects. This results in
346
+ * better results when mapping profiling samples to script source.
347
+ */
348
+ static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);
349
+
350
+ private:
351
+ CpuProfiler();
352
+ ~CpuProfiler();
353
+ CpuProfiler(const CpuProfiler&);
354
+ CpuProfiler& operator=(const CpuProfiler&);
355
+ };
356
+
357
+
358
+ /**
359
+ * HeapSnapshotEdge represents a directed connection between heap
360
+ * graph nodes: from retainers to retained nodes.
361
+ */
362
+ class V8_EXPORT HeapGraphEdge {
363
+ public:
364
+ enum Type {
365
+ kContextVariable = 0, // A variable from a function context.
366
+ kElement = 1, // An element of an array.
367
+ kProperty = 2, // A named object property.
368
+ kInternal = 3, // A link that can't be accessed from JS,
369
+ // thus, its name isn't a real property name
370
+ // (e.g. parts of a ConsString).
371
+ kHidden = 4, // A link that is needed for proper sizes
372
+ // calculation, but may be hidden from user.
373
+ kShortcut = 5, // A link that must not be followed during
374
+ // sizes calculation.
375
+ kWeak = 6 // A weak reference (ignored by the GC).
376
+ };
377
+
378
+ /** Returns edge type (see HeapGraphEdge::Type). */
379
+ Type GetType() const;
380
+
381
+ /**
382
+ * Returns edge name. This can be a variable name, an element index, or
383
+ * a property name.
384
+ */
385
+ Local<Value> GetName() const;
386
+
387
+ /** Returns origin node. */
388
+ const HeapGraphNode* GetFromNode() const;
389
+
390
+ /** Returns destination node. */
391
+ const HeapGraphNode* GetToNode() const;
392
+ };
393
+
394
+
395
+ /**
396
+ * HeapGraphNode represents a node in a heap graph.
397
+ */
398
+ class V8_EXPORT HeapGraphNode {
399
+ public:
400
+ enum Type {
401
+ kHidden = 0, // Hidden node, may be filtered when shown to user.
402
+ kArray = 1, // An array of elements.
403
+ kString = 2, // A string.
404
+ kObject = 3, // A JS object (except for arrays and strings).
405
+ kCode = 4, // Compiled code.
406
+ kClosure = 5, // Function closure.
407
+ kRegExp = 6, // RegExp.
408
+ kHeapNumber = 7, // Number stored in the heap.
409
+ kNative = 8, // Native object (not from V8 heap).
410
+ kSynthetic = 9, // Synthetic object, usually used for grouping
411
+ // snapshot items together.
412
+ kConsString = 10, // Concatenated string. A pair of pointers to strings.
413
+ kSlicedString = 11, // Sliced string. A fragment of another string.
414
+ kSymbol = 12, // A Symbol (ES6).
415
+ kBigInt = 13 // BigInt.
416
+ };
417
+
418
+ /** Returns node type (see HeapGraphNode::Type). */
419
+ Type GetType() const;
420
+
421
+ /**
422
+ * Returns node name. Depending on node's type this can be the name
423
+ * of the constructor (for objects), the name of the function (for
424
+ * closures), string value, or an empty string (for compiled code).
425
+ */
426
+ Local<String> GetName() const;
427
+
428
+ /**
429
+ * Returns node id. For the same heap object, the id remains the same
430
+ * across all snapshots.
431
+ */
432
+ SnapshotObjectId GetId() const;
433
+
434
+ /** Returns node's own size, in bytes. */
435
+ size_t GetShallowSize() const;
436
+
437
+ /** Returns child nodes count of the node. */
438
+ int GetChildrenCount() const;
439
+
440
+ /** Retrieves a child by index. */
441
+ const HeapGraphEdge* GetChild(int index) const;
442
+ };
443
+
444
+
445
+ /**
446
+ * An interface for exporting data from V8, using "push" model.
447
+ */
448
+ class V8_EXPORT OutputStream { // NOLINT
449
+ public:
450
+ enum WriteResult {
451
+ kContinue = 0,
452
+ kAbort = 1
453
+ };
454
+ virtual ~OutputStream() = default;
455
+ /** Notify about the end of stream. */
456
+ virtual void EndOfStream() = 0;
457
+ /** Get preferred output chunk size. Called only once. */
458
+ virtual int GetChunkSize() { return 1024; }
459
+ /**
460
+ * Writes the next chunk of snapshot data into the stream. Writing
461
+ * can be stopped by returning kAbort as function result. EndOfStream
462
+ * will not be called in case writing was aborted.
463
+ */
464
+ virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
465
+ /**
466
+ * Writes the next chunk of heap stats data into the stream. Writing
467
+ * can be stopped by returning kAbort as function result. EndOfStream
468
+ * will not be called in case writing was aborted.
469
+ */
470
+ virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
471
+ return kAbort;
472
+ }
473
+ };
474
+
475
+
476
+ /**
477
+ * HeapSnapshots record the state of the JS heap at some moment.
478
+ */
479
+ class V8_EXPORT HeapSnapshot {
480
+ public:
481
+ enum SerializationFormat {
482
+ kJSON = 0 // See format description near 'Serialize' method.
483
+ };
484
+
485
+ /** Returns the root node of the heap graph. */
486
+ const HeapGraphNode* GetRoot() const;
487
+
488
+ /** Returns a node by its id. */
489
+ const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
490
+
491
+ /** Returns total nodes count in the snapshot. */
492
+ int GetNodesCount() const;
493
+
494
+ /** Returns a node by index. */
495
+ const HeapGraphNode* GetNode(int index) const;
496
+
497
+ /** Returns a max seen JS object Id. */
498
+ SnapshotObjectId GetMaxSnapshotJSObjectId() const;
499
+
500
+ /**
501
+ * Deletes the snapshot and removes it from HeapProfiler's list.
502
+ * All pointers to nodes, edges and paths previously returned become
503
+ * invalid.
504
+ */
505
+ void Delete();
506
+
507
+ /**
508
+ * Prepare a serialized representation of the snapshot. The result
509
+ * is written into the stream provided in chunks of specified size.
510
+ * The total length of the serialized snapshot is unknown in
511
+ * advance, it can be roughly equal to JS heap size (that means,
512
+ * it can be really big - tens of megabytes).
513
+ *
514
+ * For the JSON format, heap contents are represented as an object
515
+ * with the following structure:
516
+ *
517
+ * {
518
+ * snapshot: {
519
+ * title: "...",
520
+ * uid: nnn,
521
+ * meta: { meta-info },
522
+ * node_count: nnn,
523
+ * edge_count: nnn
524
+ * },
525
+ * nodes: [nodes array],
526
+ * edges: [edges array],
527
+ * strings: [strings array]
528
+ * }
529
+ *
530
+ * Nodes reference strings, other nodes, and edges by their indexes
531
+ * in corresponding arrays.
532
+ */
533
+ void Serialize(OutputStream* stream,
534
+ SerializationFormat format = kJSON) const;
535
+ };
536
+
537
+
538
+ /**
539
+ * An interface for reporting progress and controlling long-running
540
+ * activities.
541
+ */
542
+ class V8_EXPORT ActivityControl { // NOLINT
543
+ public:
544
+ enum ControlOption {
545
+ kContinue = 0,
546
+ kAbort = 1
547
+ };
548
+ virtual ~ActivityControl() = default;
549
+ /**
550
+ * Notify about current progress. The activity can be stopped by
551
+ * returning kAbort as the callback result.
552
+ */
553
+ virtual ControlOption ReportProgressValue(int done, int total) = 0;
554
+ };
555
+
556
+
557
+ /**
558
+ * AllocationProfile is a sampled profile of allocations done by the program.
559
+ * This is structured as a call-graph.
560
+ */
561
+ class V8_EXPORT AllocationProfile {
562
+ public:
563
+ struct Allocation {
564
+ /**
565
+ * Size of the sampled allocation object.
566
+ */
567
+ size_t size;
568
+
569
+ /**
570
+ * The number of objects of such size that were sampled.
571
+ */
572
+ unsigned int count;
573
+ };
574
+
575
+ /**
576
+ * Represents a node in the call-graph.
577
+ */
578
+ struct Node {
579
+ /**
580
+ * Name of the function. May be empty for anonymous functions or if the
581
+ * script corresponding to this function has been unloaded.
582
+ */
583
+ Local<String> name;
584
+
585
+ /**
586
+ * Name of the script containing the function. May be empty if the script
587
+ * name is not available, or if the script has been unloaded.
588
+ */
589
+ Local<String> script_name;
590
+
591
+ /**
592
+ * id of the script where the function is located. May be equal to
593
+ * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
594
+ */
595
+ int script_id;
596
+
597
+ /**
598
+ * Start position of the function in the script.
599
+ */
600
+ int start_position;
601
+
602
+ /**
603
+ * 1-indexed line number where the function starts. May be
604
+ * kNoLineNumberInfo if no line number information is available.
605
+ */
606
+ int line_number;
607
+
608
+ /**
609
+ * 1-indexed column number where the function starts. May be
610
+ * kNoColumnNumberInfo if no line number information is available.
611
+ */
612
+ int column_number;
613
+
614
+ /**
615
+ * Unique id of the node.
616
+ */
617
+ uint32_t node_id;
618
+
619
+ /**
620
+ * List of callees called from this node for which we have sampled
621
+ * allocations. The lifetime of the children is scoped to the containing
622
+ * AllocationProfile.
623
+ */
624
+ std::vector<Node*> children;
625
+
626
+ /**
627
+ * List of self allocations done by this node in the call-graph.
628
+ */
629
+ std::vector<Allocation> allocations;
630
+ };
631
+
632
+ /**
633
+ * Represent a single sample recorded for an allocation.
634
+ */
635
+ struct Sample {
636
+ /**
637
+ * id of the node in the profile tree.
638
+ */
639
+ uint32_t node_id;
640
+
641
+ /**
642
+ * Size of the sampled allocation object.
643
+ */
644
+ size_t size;
645
+
646
+ /**
647
+ * The number of objects of such size that were sampled.
648
+ */
649
+ unsigned int count;
650
+
651
+ /**
652
+ * Unique time-ordered id of the allocation sample. Can be used to track
653
+ * what samples were added or removed between two snapshots.
654
+ */
655
+ uint64_t sample_id;
656
+ };
657
+
658
+ /**
659
+ * Returns the root node of the call-graph. The root node corresponds to an
660
+ * empty JS call-stack. The lifetime of the returned Node* is scoped to the
661
+ * containing AllocationProfile.
662
+ */
663
+ virtual Node* GetRootNode() = 0;
664
+ virtual const std::vector<Sample>& GetSamples() = 0;
665
+
666
+ virtual ~AllocationProfile() = default;
667
+
668
+ static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
669
+ static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
670
+ };
671
+
672
+ /**
673
+ * An object graph consisting of embedder objects and V8 objects.
674
+ * Edges of the graph are strong references between the objects.
675
+ * The embedder can build this graph during heap snapshot generation
676
+ * to include the embedder objects in the heap snapshot.
677
+ * Usage:
678
+ * 1) Define derived class of EmbedderGraph::Node for embedder objects.
679
+ * 2) Set the build embedder graph callback on the heap profiler using
680
+ * HeapProfiler::AddBuildEmbedderGraphCallback.
681
+ * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
682
+ * node1 to node2.
683
+ * 4) To represent references from/to V8 object, construct V8 nodes using
684
+ * graph->V8Node(value).
685
+ */
686
+ class V8_EXPORT EmbedderGraph {
687
+ public:
688
+ class Node {
689
+ public:
690
+ Node() = default;
691
+ virtual ~Node() = default;
692
+ virtual const char* Name() = 0;
693
+ virtual size_t SizeInBytes() = 0;
694
+ /**
695
+ * The corresponding V8 wrapper node if not null.
696
+ * During heap snapshot generation the embedder node and the V8 wrapper
697
+ * node will be merged into one node to simplify retaining paths.
698
+ */
699
+ virtual Node* WrapperNode() { return nullptr; }
700
+ virtual bool IsRootNode() { return false; }
701
+ /** Must return true for non-V8 nodes. */
702
+ virtual bool IsEmbedderNode() { return true; }
703
+ /**
704
+ * Optional name prefix. It is used in Chrome for tagging detached nodes.
705
+ */
706
+ virtual const char* NamePrefix() { return nullptr; }
707
+
708
+ private:
709
+ Node(const Node&) = delete;
710
+ Node& operator=(const Node&) = delete;
711
+ };
712
+
713
+ /**
714
+ * Returns a node corresponding to the given V8 value. Ownership is not
715
+ * transferred. The result pointer is valid while the graph is alive.
716
+ */
717
+ virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
718
+
719
+ /**
720
+ * Adds the given node to the graph and takes ownership of the node.
721
+ * Returns a raw pointer to the node that is valid while the graph is alive.
722
+ */
723
+ virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
724
+
725
+ /**
726
+ * Adds an edge that represents a strong reference from the given
727
+ * node |from| to the given node |to|. The nodes must be added to the graph
728
+ * before calling this function.
729
+ *
730
+ * If name is nullptr, the edge will have auto-increment indexes, otherwise
731
+ * it will be named accordingly.
732
+ */
733
+ virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
734
+
735
+ virtual ~EmbedderGraph() = default;
736
+ };
737
+
738
+ /**
739
+ * Interface for controlling heap profiling. Instance of the
740
+ * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
741
+ */
742
+ class V8_EXPORT HeapProfiler {
743
+ public:
744
+ enum SamplingFlags {
745
+ kSamplingNoFlags = 0,
746
+ kSamplingForceGC = 1 << 0,
747
+ };
748
+
749
+ typedef std::unordered_set<const v8::PersistentBase<v8::Value>*>
750
+ RetainerChildren;
751
+ typedef std::vector<std::pair<v8::RetainedObjectInfo*, RetainerChildren>>
752
+ RetainerGroups;
753
+ typedef std::vector<std::pair<const v8::PersistentBase<v8::Value>*,
754
+ const v8::PersistentBase<v8::Value>*>>
755
+ RetainerEdges;
756
+
757
+ struct RetainerInfos {
758
+ RetainerGroups groups;
759
+ RetainerEdges edges;
760
+ };
761
+
762
+ /**
763
+ * Callback function invoked to retrieve all RetainerInfos from the embedder.
764
+ */
765
+ typedef RetainerInfos (*GetRetainerInfosCallback)(v8::Isolate* isolate);
766
+
767
+ /**
768
+ * Callback function invoked for obtaining RetainedObjectInfo for
769
+ * the given JavaScript wrapper object. It is prohibited to enter V8
770
+ * while the callback is running: only getters on the handle and
771
+ * GetPointerFromInternalField on the objects are allowed.
772
+ */
773
+ typedef RetainedObjectInfo* (*WrapperInfoCallback)(uint16_t class_id,
774
+ Local<Value> wrapper);
775
+
776
+ /**
777
+ * Callback function invoked during heap snapshot generation to retrieve
778
+ * the embedder object graph. The callback should use graph->AddEdge(..) to
779
+ * add references between the objects.
780
+ * The callback must not trigger garbage collection in V8.
781
+ */
782
+ typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
783
+ v8::EmbedderGraph* graph,
784
+ void* data);
785
+
786
+ /** TODO(addaleax): Remove */
787
+ typedef void (*LegacyBuildEmbedderGraphCallback)(v8::Isolate* isolate,
788
+ v8::EmbedderGraph* graph);
789
+
790
+ /** Returns the number of snapshots taken. */
791
+ int GetSnapshotCount();
792
+
793
+ /** Returns a snapshot by index. */
794
+ const HeapSnapshot* GetHeapSnapshot(int index);
795
+
796
+ /**
797
+ * Returns SnapshotObjectId for a heap object referenced by |value| if
798
+ * it has been seen by the heap profiler, kUnknownObjectId otherwise.
799
+ */
800
+ SnapshotObjectId GetObjectId(Local<Value> value);
801
+
802
+ /**
803
+ * Returns heap object with given SnapshotObjectId if the object is alive,
804
+ * otherwise empty handle is returned.
805
+ */
806
+ Local<Value> FindObjectById(SnapshotObjectId id);
807
+
808
+ /**
809
+ * Clears internal map from SnapshotObjectId to heap object. The new objects
810
+ * will not be added into it unless a heap snapshot is taken or heap object
811
+ * tracking is kicked off.
812
+ */
813
+ void ClearObjectIds();
814
+
815
+ /**
816
+ * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
817
+ * it in case heap profiler cannot find id for the object passed as
818
+ * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
819
+ */
820
+ static const SnapshotObjectId kUnknownObjectId = 0;
821
+
822
+ /**
823
+ * Callback interface for retrieving user friendly names of global objects.
824
+ */
825
+ class ObjectNameResolver {
826
+ public:
827
+ /**
828
+ * Returns name to be used in the heap snapshot for given node. Returned
829
+ * string must stay alive until snapshot collection is completed.
830
+ */
831
+ virtual const char* GetName(Local<Object> object) = 0;
832
+
833
+ protected:
834
+ virtual ~ObjectNameResolver() = default;
835
+ };
836
+
837
+ /**
838
+ * Takes a heap snapshot and returns it.
839
+ */
840
+ const HeapSnapshot* TakeHeapSnapshot(
841
+ ActivityControl* control = nullptr,
842
+ ObjectNameResolver* global_object_name_resolver = nullptr);
843
+
844
+ /**
845
+ * Starts tracking of heap objects population statistics. After calling
846
+ * this method, all heap objects relocations done by the garbage collector
847
+ * are being registered.
848
+ *
849
+ * |track_allocations| parameter controls whether stack trace of each
850
+ * allocation in the heap will be recorded and reported as part of
851
+ * HeapSnapshot.
852
+ */
853
+ void StartTrackingHeapObjects(bool track_allocations = false);
854
+
855
+ /**
856
+ * Adds a new time interval entry to the aggregated statistics array. The
857
+ * time interval entry contains information on the current heap objects
858
+ * population size. The method also updates aggregated statistics and
859
+ * reports updates for all previous time intervals via the OutputStream
860
+ * object. Updates on each time interval are provided as a stream of the
861
+ * HeapStatsUpdate structure instances.
862
+ * If |timestamp_us| is supplied, timestamp of the new entry will be written
863
+ * into it. The return value of the function is the last seen heap object Id.
864
+ *
865
+ * StartTrackingHeapObjects must be called before the first call to this
866
+ * method.
867
+ */
868
+ SnapshotObjectId GetHeapStats(OutputStream* stream,
869
+ int64_t* timestamp_us = nullptr);
870
+
871
+ /**
872
+ * Stops tracking of heap objects population statistics, cleans up all
873
+ * collected data. StartHeapObjectsTracking must be called again prior to
874
+ * calling GetHeapStats next time.
875
+ */
876
+ void StopTrackingHeapObjects();
877
+
878
+ /**
879
+ * Starts gathering a sampling heap profile. A sampling heap profile is
880
+ * similar to tcmalloc's heap profiler and Go's mprof. It samples object
881
+ * allocations and builds an online 'sampling' heap profile. At any point in
882
+ * time, this profile is expected to be a representative sample of objects
883
+ * currently live in the system. Each sampled allocation includes the stack
884
+ * trace at the time of allocation, which makes this really useful for memory
885
+ * leak detection.
886
+ *
887
+ * This mechanism is intended to be cheap enough that it can be used in
888
+ * production with minimal performance overhead.
889
+ *
890
+ * Allocations are sampled using a randomized Poisson process. On average, one
891
+ * allocation will be sampled every |sample_interval| bytes allocated. The
892
+ * |stack_depth| parameter controls the maximum number of stack frames to be
893
+ * captured on each allocation.
894
+ *
895
+ * NOTE: This is a proof-of-concept at this point. Right now we only sample
896
+ * newspace allocations. Support for paged space allocation (e.g. pre-tenured
897
+ * objects, large objects, code objects, etc.) and native allocations
898
+ * doesn't exist yet, but is anticipated in the future.
899
+ *
900
+ * Objects allocated before the sampling is started will not be included in
901
+ * the profile.
902
+ *
903
+ * Returns false if a sampling heap profiler is already running.
904
+ */
905
+ bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
906
+ int stack_depth = 16,
907
+ SamplingFlags flags = kSamplingNoFlags);
908
+
909
+ /**
910
+ * Stops the sampling heap profile and discards the current profile.
911
+ */
912
+ void StopSamplingHeapProfiler();
913
+
914
+ /**
915
+ * Returns the sampled profile of allocations allocated (and still live) since
916
+ * StartSamplingHeapProfiler was called. The ownership of the pointer is
917
+ * transferred to the caller. Returns nullptr if sampling heap profiler is not
918
+ * active.
919
+ */
920
+ AllocationProfile* GetAllocationProfile();
921
+
922
+ /**
923
+ * Deletes all snapshots taken. All previously returned pointers to
924
+ * snapshots and their contents become invalid after this call.
925
+ */
926
+ void DeleteAllHeapSnapshots();
927
+
928
+ /** Binds a callback to embedder's class ID. */
929
+ V8_DEPRECATED(
930
+ "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
931
+ void SetWrapperClassInfoProvider(uint16_t class_id,
932
+ WrapperInfoCallback callback));
933
+
934
+ V8_DEPRECATED(
935
+ "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
936
+ void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback));
937
+
938
+ V8_DEPRECATED(
939
+ "Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
940
+ void SetBuildEmbedderGraphCallback(
941
+ LegacyBuildEmbedderGraphCallback callback));
942
+ void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
943
+ void* data);
944
+ void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
945
+ void* data);
946
+
947
+ /**
948
+ * Default value of persistent handle class ID. Must not be used to
949
+ * define a class. Can be used to reset a class of a persistent
950
+ * handle.
951
+ */
952
+ static const uint16_t kPersistentHandleNoClassId = 0;
953
+
954
+ private:
955
+ HeapProfiler();
956
+ ~HeapProfiler();
957
+ HeapProfiler(const HeapProfiler&);
958
+ HeapProfiler& operator=(const HeapProfiler&);
959
+ };
960
+
961
+ /**
962
+ * Interface for providing information about embedder's objects
963
+ * held by global handles. This information is reported in two ways:
964
+ *
965
+ * 1. When calling AddObjectGroup, an embedder may pass
966
+ * RetainedObjectInfo instance describing the group. To collect
967
+ * this information while taking a heap snapshot, V8 calls GC
968
+ * prologue and epilogue callbacks.
969
+ *
970
+ * 2. When a heap snapshot is collected, V8 additionally
971
+ * requests RetainedObjectInfos for persistent handles that
972
+ * were not previously reported via AddObjectGroup.
973
+ *
974
+ * Thus, if an embedder wants to provide information about native
975
+ * objects for heap snapshots, it can do it in a GC prologue
976
+ * handler, and / or by assigning wrapper class ids in the following way:
977
+ *
978
+ * 1. Bind a callback to class id by calling SetWrapperClassInfoProvider.
979
+ * 2. Call SetWrapperClassId on certain persistent handles.
980
+ *
981
+ * V8 takes ownership of RetainedObjectInfo instances passed to it and
982
+ * keeps them alive only during snapshot collection. Afterwards, they
983
+ * are freed by calling the Dispose class function.
984
+ */
985
+ class V8_EXPORT RetainedObjectInfo { // NOLINT
986
+ public:
987
+ /** Called by V8 when it no longer needs an instance. */
988
+ virtual void Dispose() = 0;
989
+
990
+ /** Returns whether two instances are equivalent. */
991
+ virtual bool IsEquivalent(RetainedObjectInfo* other) = 0;
992
+
993
+ /**
994
+ * Returns hash value for the instance. Equivalent instances
995
+ * must have the same hash value.
996
+ */
997
+ virtual intptr_t GetHash() = 0;
998
+
999
+ /**
1000
+ * Returns human-readable label. It must be a null-terminated UTF-8
1001
+ * encoded string. V8 copies its contents during a call to GetLabel.
1002
+ */
1003
+ virtual const char* GetLabel() = 0;
1004
+
1005
+ /**
1006
+ * Returns human-readable group label. It must be a null-terminated UTF-8
1007
+ * encoded string. V8 copies its contents during a call to GetGroupLabel.
1008
+ * Heap snapshot generator will collect all the group names, create
1009
+ * top level entries with these names and attach the objects to the
1010
+ * corresponding top level group objects. There is a default
1011
+ * implementation which is required because embedders don't have their
1012
+ * own implementation yet.
1013
+ */
1014
+ virtual const char* GetGroupLabel() { return GetLabel(); }
1015
+
1016
+ /**
1017
+ * Returns element count in case if a global handle retains
1018
+ * a subgraph by holding one of its nodes.
1019
+ */
1020
+ virtual intptr_t GetElementCount() { return -1; }
1021
+
1022
+ /** Returns embedder's object size in bytes. */
1023
+ virtual intptr_t GetSizeInBytes() { return -1; }
1024
+
1025
+ protected:
1026
+ RetainedObjectInfo() = default;
1027
+ virtual ~RetainedObjectInfo() = default;
1028
+
1029
+ private:
1030
+ RetainedObjectInfo(const RetainedObjectInfo&);
1031
+ RetainedObjectInfo& operator=(const RetainedObjectInfo&);
1032
+ };
1033
+
1034
+
1035
+ /**
1036
+ * A struct for exporting HeapStats data from V8, using "push" model.
1037
+ * See HeapProfiler::GetHeapStats.
1038
+ */
1039
+ struct HeapStatsUpdate {
1040
+ HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
1041
+ : index(index), count(count), size(size) { }
1042
+ uint32_t index; // Index of the time interval that was changed.
1043
+ uint32_t count; // New value of count field for the interval with this index.
1044
+ uint32_t size; // New value of size field for the interval with this index.
1045
+ };
1046
+
1047
+ #define CODE_EVENTS_LIST(V) \
1048
+ V(Builtin) \
1049
+ V(Callback) \
1050
+ V(Eval) \
1051
+ V(Function) \
1052
+ V(InterpretedFunction) \
1053
+ V(Handler) \
1054
+ V(BytecodeHandler) \
1055
+ V(LazyCompile) \
1056
+ V(RegExp) \
1057
+ V(Script) \
1058
+ V(Stub)
1059
+
1060
+ /**
1061
+ * Note that this enum may be extended in the future. Please include a default
1062
+ * case if this enum is used in a switch statement.
1063
+ */
1064
+ enum CodeEventType {
1065
+ kUnknownType = 0
1066
+ #define V(Name) , k##Name##Type
1067
+ CODE_EVENTS_LIST(V)
1068
+ #undef V
1069
+ };
1070
+
1071
+ /**
1072
+ * Representation of a code creation event
1073
+ */
1074
+ class V8_EXPORT CodeEvent {
1075
+ public:
1076
+ uintptr_t GetCodeStartAddress();
1077
+ size_t GetCodeSize();
1078
+ Local<String> GetFunctionName();
1079
+ Local<String> GetScriptName();
1080
+ int GetScriptLine();
1081
+ int GetScriptColumn();
1082
+ /**
1083
+ * NOTE (mmarchini): We can't allocate objects in the heap when we collect
1084
+ * existing code, and both the code type and the comment are not stored in the
1085
+ * heap, so we return those as const char*.
1086
+ */
1087
+ CodeEventType GetCodeType();
1088
+ const char* GetComment();
1089
+
1090
+ static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1091
+ };
1092
+
1093
+ /**
1094
+ * Interface to listen to code creation events.
1095
+ */
1096
+ class V8_EXPORT CodeEventHandler {
1097
+ public:
1098
+ /**
1099
+ * Creates a new listener for the |isolate|. The isolate must be initialized.
1100
+ * The listener object must be disposed after use by calling |Dispose| method.
1101
+ * Multiple listeners can be created for the same isolate.
1102
+ */
1103
+ explicit CodeEventHandler(Isolate* isolate);
1104
+ virtual ~CodeEventHandler();
1105
+
1106
+ virtual void Handle(CodeEvent* code_event) = 0;
1107
+
1108
+ void Enable();
1109
+ void Disable();
1110
+
1111
+ private:
1112
+ CodeEventHandler();
1113
+ CodeEventHandler(const CodeEventHandler&);
1114
+ CodeEventHandler& operator=(const CodeEventHandler&);
1115
+ void* internal_listener_;
1116
+ };
1117
+
1118
+ } // namespace v8
1119
+
1120
+
1121
+ #endif // V8_V8_PROFILER_H_