libv8 6.7.288.46.1-universal-darwin-16 → 7.3.492.27.1-universal-darwin-16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/libv8/version.rb +1 -1
- data/vendor/v8/include/libplatform/libplatform.h +1 -6
- data/vendor/v8/include/libplatform/v8-tracing.h +7 -5
- data/vendor/v8/include/v8-inspector.h +14 -7
- data/vendor/v8/include/v8-internal.h +373 -0
- data/vendor/v8/include/v8-platform.h +66 -101
- data/vendor/v8/include/v8-profiler.h +157 -31
- data/vendor/v8/include/v8-util.h +27 -13
- data/vendor/v8/include/v8-version.h +4 -4
- data/vendor/v8/include/v8-wasm-trap-handler-posix.h +31 -0
- data/vendor/v8/include/v8-wasm-trap-handler-win.h +28 -0
- data/vendor/v8/include/v8.h +1192 -967
- data/vendor/v8/include/v8config.h +33 -72
- data/vendor/v8/out.gn/libv8/obj/libv8_libbase.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/libv8_libplatform.a +0 -0
- data/vendor/v8/out.gn/libv8/obj/libv8_monolith.a +0 -0
- metadata +6 -4
@@ -53,6 +53,15 @@ class TaskRunner {
|
|
53
53
|
*/
|
54
54
|
virtual void PostTask(std::unique_ptr<Task> task) = 0;
|
55
55
|
|
56
|
+
/**
|
57
|
+
* Schedules a task to be invoked by this TaskRunner. The TaskRunner
|
58
|
+
* implementation takes ownership of |task|. The |task| cannot be nested
|
59
|
+
* within other task executions.
|
60
|
+
*
|
61
|
+
* Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
|
62
|
+
*/
|
63
|
+
virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
|
64
|
+
|
56
65
|
/**
|
57
66
|
* Schedules a task to be invoked by this TaskRunner. The task is scheduled
|
58
67
|
* after the given number of seconds |delay_in_seconds|. The TaskRunner
|
@@ -64,7 +73,7 @@ class TaskRunner {
|
|
64
73
|
/**
|
65
74
|
* Schedules an idle task to be invoked by this TaskRunner. The task is
|
66
75
|
* scheduled when the embedder is idle. Requires that
|
67
|
-
* TaskRunner::
|
76
|
+
* |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
|
68
77
|
* relative to other task types and may be starved for an arbitrarily long
|
69
78
|
* time if no idle time is available. The TaskRunner implementation takes
|
70
79
|
* ownership of |task|.
|
@@ -76,6 +85,11 @@ class TaskRunner {
|
|
76
85
|
*/
|
77
86
|
virtual bool IdleTasksEnabled() = 0;
|
78
87
|
|
88
|
+
/**
|
89
|
+
* Returns true if non-nestable tasks are enabled for this TaskRunner.
|
90
|
+
*/
|
91
|
+
virtual bool NonNestableTasksEnabled() const { return false; }
|
92
|
+
|
79
93
|
TaskRunner() = default;
|
80
94
|
virtual ~TaskRunner() = default;
|
81
95
|
|
@@ -207,6 +221,7 @@ class PageAllocator {
|
|
207
221
|
*/
|
208
222
|
enum Permission {
|
209
223
|
kNoAccess,
|
224
|
+
kRead,
|
210
225
|
kReadWrite,
|
211
226
|
// TODO(hpayer): Remove this flag. Memory should never be rwx.
|
212
227
|
kReadWriteExecute,
|
@@ -235,6 +250,13 @@ class PageAllocator {
|
|
235
250
|
*/
|
236
251
|
virtual bool SetPermissions(void* address, size_t length,
|
237
252
|
Permission permissions) = 0;
|
253
|
+
|
254
|
+
/**
|
255
|
+
* Frees memory in the given [address, address + size) range. address and size
|
256
|
+
* should be operating system page-aligned. The next write to this
|
257
|
+
* memory area brings the memory transparently back.
|
258
|
+
*/
|
259
|
+
virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
|
238
260
|
};
|
239
261
|
|
240
262
|
/**
|
@@ -245,16 +267,6 @@ class PageAllocator {
|
|
245
267
|
*/
|
246
268
|
class Platform {
|
247
269
|
public:
|
248
|
-
/**
|
249
|
-
* This enum is used to indicate whether a task is potentially long running,
|
250
|
-
* or causes a long wait. The embedder might want to use this hint to decide
|
251
|
-
* whether to execute the task on a dedicated thread.
|
252
|
-
*/
|
253
|
-
enum ExpectedRuntime {
|
254
|
-
kShortRunningTask,
|
255
|
-
kLongRunningTask
|
256
|
-
};
|
257
|
-
|
258
270
|
virtual ~Platform() = default;
|
259
271
|
|
260
272
|
/**
|
@@ -289,101 +301,25 @@ class Platform {
|
|
289
301
|
virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
|
290
302
|
|
291
303
|
/**
|
292
|
-
* Gets the number of worker threads used by
|
293
|
-
*
|
294
|
-
* work package should be split into. A return value of 0 means
|
295
|
-
* no worker threads available. Note that a value of 0 won't
|
296
|
-
* posting tasks using |CallOnWorkerThread|.
|
304
|
+
* Gets the number of worker threads used by
|
305
|
+
* Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
|
306
|
+
* of tasks a work package should be split into. A return value of 0 means
|
307
|
+
* that there are no worker threads available. Note that a value of 0 won't
|
308
|
+
* prohibit V8 from posting tasks using |CallOnWorkerThread|.
|
297
309
|
*/
|
298
|
-
virtual int NumberOfWorkerThreads()
|
299
|
-
return static_cast<int>(NumberOfAvailableBackgroundThreads());
|
300
|
-
}
|
301
|
-
|
302
|
-
/**
|
303
|
-
* Deprecated. Use NumberOfWorkerThreads() instead.
|
304
|
-
* TODO(gab): Remove this when all embedders override
|
305
|
-
* NumberOfWorkerThreads() instead.
|
306
|
-
*/
|
307
|
-
V8_DEPRECATE_SOON(
|
308
|
-
"NumberOfAvailableBackgroundThreads() is deprecated, use "
|
309
|
-
"NumberOfAvailableBackgroundThreads() instead.",
|
310
|
-
virtual size_t NumberOfAvailableBackgroundThreads()) {
|
311
|
-
return 0;
|
312
|
-
}
|
310
|
+
virtual int NumberOfWorkerThreads() = 0;
|
313
311
|
|
314
312
|
/**
|
315
313
|
* Returns a TaskRunner which can be used to post a task on the foreground.
|
316
314
|
* This function should only be called from a foreground thread.
|
317
315
|
*/
|
318
316
|
virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
|
319
|
-
Isolate* isolate)
|
320
|
-
// TODO(ahaas): Make this function abstract after it got implemented on all
|
321
|
-
// platforms.
|
322
|
-
return {};
|
323
|
-
}
|
324
|
-
|
325
|
-
/**
|
326
|
-
* Returns a TaskRunner which can be used to post a task on a background.
|
327
|
-
* This function should only be called from a foreground thread.
|
328
|
-
*/
|
329
|
-
V8_DEPRECATE_SOON(
|
330
|
-
"GetBackgroundTaskRunner() is deprecated, use "
|
331
|
-
"GetWorkerThreadsTaskRunner() "
|
332
|
-
"instead.",
|
333
|
-
virtual std::shared_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
|
334
|
-
Isolate* isolate)) {
|
335
|
-
// TODO(gab): Remove this method when all embedders have moved to
|
336
|
-
// GetWorkerThreadsTaskRunner().
|
337
|
-
|
338
|
-
// An implementation needs to be provided here because this is called by the
|
339
|
-
// default GetWorkerThreadsTaskRunner() implementation below. In practice
|
340
|
-
// however, all code either:
|
341
|
-
// - Overrides GetWorkerThreadsTaskRunner() (thus not making this call) --
|
342
|
-
// i.e. all v8 code.
|
343
|
-
// - Overrides this method (thus not making this call) -- i.e. all
|
344
|
-
// unadapted embedders.
|
345
|
-
abort();
|
346
|
-
}
|
347
|
-
|
348
|
-
/**
|
349
|
-
* Returns a TaskRunner which can be used to post async tasks on a worker.
|
350
|
-
* This function should only be called from a foreground thread.
|
351
|
-
*/
|
352
|
-
virtual std::shared_ptr<v8::TaskRunner> GetWorkerThreadsTaskRunner(
|
353
|
-
Isolate* isolate) {
|
354
|
-
// TODO(gab): Make this function abstract after it got implemented on all
|
355
|
-
// platforms.
|
356
|
-
return GetBackgroundTaskRunner(isolate);
|
357
|
-
}
|
358
|
-
|
359
|
-
/**
|
360
|
-
* Schedules a task to be invoked on a background thread. |expected_runtime|
|
361
|
-
* indicates that the task will run a long time. The Platform implementation
|
362
|
-
* takes ownership of |task|. There is no guarantee about order of execution
|
363
|
-
* of tasks wrt order of scheduling, nor is there a guarantee about the
|
364
|
-
* thread the task will be run on.
|
365
|
-
*/
|
366
|
-
V8_DEPRECATE_SOON(
|
367
|
-
"ExpectedRuntime is deprecated, use CallOnWorkerThread() instead.",
|
368
|
-
virtual void CallOnBackgroundThread(Task* task,
|
369
|
-
ExpectedRuntime expected_runtime)) {
|
370
|
-
// An implementation needs to be provided here because this is called by the
|
371
|
-
// default implementation below. In practice however, all code either:
|
372
|
-
// - Overrides the new method (thus not making this call) -- i.e. all v8
|
373
|
-
// code.
|
374
|
-
// - Overrides this method (thus not making this call) -- i.e. all
|
375
|
-
// unadapted embedders.
|
376
|
-
abort();
|
377
|
-
}
|
317
|
+
Isolate* isolate) = 0;
|
378
318
|
|
379
319
|
/**
|
380
320
|
* Schedules a task to be invoked on a worker thread.
|
381
|
-
* TODO(gab): Make pure virtual when all embedders override this instead of
|
382
|
-
* CallOnBackgroundThread().
|
383
321
|
*/
|
384
|
-
virtual void CallOnWorkerThread(std::unique_ptr<Task> task)
|
385
|
-
CallOnBackgroundThread(task.release(), kShortRunningTask);
|
386
|
-
}
|
322
|
+
virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
|
387
323
|
|
388
324
|
/**
|
389
325
|
* Schedules a task that blocks the main thread to be invoked with
|
@@ -395,12 +331,30 @@ class Platform {
|
|
395
331
|
CallOnWorkerThread(std::move(task));
|
396
332
|
}
|
397
333
|
|
334
|
+
/**
|
335
|
+
* Schedules a task to be invoked with low-priority on a worker thread.
|
336
|
+
*/
|
337
|
+
virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
|
338
|
+
// Embedders may optionally override this to process these tasks in a low
|
339
|
+
// priority pool.
|
340
|
+
CallOnWorkerThread(std::move(task));
|
341
|
+
}
|
342
|
+
|
343
|
+
/**
|
344
|
+
* Schedules a task to be invoked on a worker thread after |delay_in_seconds|
|
345
|
+
* expires.
|
346
|
+
*/
|
347
|
+
virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
|
348
|
+
double delay_in_seconds) = 0;
|
349
|
+
|
398
350
|
/**
|
399
351
|
* Schedules a task to be invoked on a foreground thread wrt a specific
|
400
352
|
* |isolate|. Tasks posted for the same isolate should be execute in order of
|
401
353
|
* scheduling. The definition of "foreground" is opaque to V8.
|
402
354
|
*/
|
403
|
-
|
355
|
+
V8_DEPRECATE_SOON(
|
356
|
+
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
|
357
|
+
virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
|
404
358
|
|
405
359
|
/**
|
406
360
|
* Schedules a task to be invoked on a foreground thread wrt a specific
|
@@ -408,8 +362,10 @@ class Platform {
|
|
408
362
|
* Tasks posted for the same isolate should be execute in order of
|
409
363
|
* scheduling. The definition of "foreground" is opaque to V8.
|
410
364
|
*/
|
411
|
-
|
412
|
-
|
365
|
+
V8_DEPRECATE_SOON(
|
366
|
+
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
|
367
|
+
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
|
368
|
+
double delay_in_seconds)) = 0;
|
413
369
|
|
414
370
|
/**
|
415
371
|
* Schedules a task to be invoked on a foreground thread wrt a specific
|
@@ -419,15 +375,18 @@ class Platform {
|
|
419
375
|
* starved for an arbitrarily long time if no idle time is available.
|
420
376
|
* The definition of "foreground" is opaque to V8.
|
421
377
|
*/
|
422
|
-
|
423
|
-
|
378
|
+
V8_DEPRECATE_SOON(
|
379
|
+
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
|
380
|
+
virtual void CallIdleOnForegroundThread(Isolate* isolate,
|
381
|
+
IdleTask* task)) {
|
382
|
+
// This must be overriden if |IdleTasksEnabled()|.
|
383
|
+
abort();
|
424
384
|
}
|
425
385
|
|
426
386
|
/**
|
427
387
|
* Returns true if idle tasks are enabled for the given |isolate|.
|
428
388
|
*/
|
429
389
|
virtual bool IdleTasksEnabled(Isolate* isolate) {
|
430
|
-
// TODO(ulan): Make this function abstract after V8 roll in Chromium.
|
431
390
|
return false;
|
432
391
|
}
|
433
392
|
|
@@ -459,6 +418,12 @@ class Platform {
|
|
459
418
|
*/
|
460
419
|
virtual TracingController* GetTracingController() = 0;
|
461
420
|
|
421
|
+
/**
|
422
|
+
* Tells the embedder to generate and upload a crashdump during an unexpected
|
423
|
+
* but non-critical scenario.
|
424
|
+
*/
|
425
|
+
virtual void DumpWithoutCrashing() {}
|
426
|
+
|
462
427
|
protected:
|
463
428
|
/**
|
464
429
|
* Default implementation of current wall-clock time in milliseconds
|
@@ -47,20 +47,6 @@ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
|
|
47
47
|
|
48
48
|
namespace v8 {
|
49
49
|
|
50
|
-
/**
|
51
|
-
* TracingCpuProfiler monitors tracing being enabled/disabled
|
52
|
-
* and emits CpuProfile trace events once v8.cpu_profiler tracing category
|
53
|
-
* is enabled. It has no overhead unless the category is enabled.
|
54
|
-
*/
|
55
|
-
class V8_EXPORT TracingCpuProfiler {
|
56
|
-
public:
|
57
|
-
static std::unique_ptr<TracingCpuProfiler> Create(Isolate*);
|
58
|
-
virtual ~TracingCpuProfiler() = default;
|
59
|
-
|
60
|
-
protected:
|
61
|
-
TracingCpuProfiler() = default;
|
62
|
-
};
|
63
|
-
|
64
50
|
// TickSample captures the information collected for each sample.
|
65
51
|
struct TickSample {
|
66
52
|
// Internal profiling (with --prof + tools/$OS-tick-processor) wants to
|
@@ -273,6 +259,16 @@ class V8_EXPORT CpuProfile {
|
|
273
259
|
void Delete();
|
274
260
|
};
|
275
261
|
|
262
|
+
enum CpuProfilingMode {
|
263
|
+
// In the resulting CpuProfile tree, intermediate nodes in a stack trace
|
264
|
+
// (from the root to a leaf) will have line numbers that point to the start
|
265
|
+
// line of the function, rather than the line of the callsite of the child.
|
266
|
+
kLeafNodeLineNumbers,
|
267
|
+
// In the resulting CpuProfile tree, nodes are separated based on the line
|
268
|
+
// number of their callsite in their parent.
|
269
|
+
kCallerLineNumbers,
|
270
|
+
};
|
271
|
+
|
276
272
|
/**
|
277
273
|
* Interface for controlling CPU profiling. Instance of the
|
278
274
|
* profiler can be created using v8::CpuProfiler::New method.
|
@@ -316,6 +312,13 @@ class V8_EXPORT CpuProfiler {
|
|
316
312
|
* |record_samples| parameter controls whether individual samples should
|
317
313
|
* be recorded in addition to the aggregated tree.
|
318
314
|
*/
|
315
|
+
void StartProfiling(Local<String> title, CpuProfilingMode mode,
|
316
|
+
bool record_samples = false);
|
317
|
+
/**
|
318
|
+
* The same as StartProfiling above, but the CpuProfilingMode defaults to
|
319
|
+
* kLeafNodeLineNumbers mode, which was the previous default behavior of the
|
320
|
+
* profiler.
|
321
|
+
*/
|
319
322
|
void StartProfiling(Local<String> title, bool record_samples = false);
|
320
323
|
|
321
324
|
/**
|
@@ -338,6 +341,12 @@ class V8_EXPORT CpuProfiler {
|
|
338
341
|
V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
|
339
342
|
void SetIdle(bool is_idle));
|
340
343
|
|
344
|
+
/**
|
345
|
+
* Generate more detailed source positions to code objects. This results in
|
346
|
+
* better results when mapping profiling samples to script source.
|
347
|
+
*/
|
348
|
+
static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);
|
349
|
+
|
341
350
|
private:
|
342
351
|
CpuProfiler();
|
343
352
|
~CpuProfiler();
|
@@ -442,7 +451,7 @@ class V8_EXPORT OutputStream { // NOLINT
|
|
442
451
|
kContinue = 0,
|
443
452
|
kAbort = 1
|
444
453
|
};
|
445
|
-
virtual ~OutputStream()
|
454
|
+
virtual ~OutputStream() = default;
|
446
455
|
/** Notify about the end of stream. */
|
447
456
|
virtual void EndOfStream() = 0;
|
448
457
|
/** Get preferred output chunk size. Called only once. */
|
@@ -536,7 +545,7 @@ class V8_EXPORT ActivityControl { // NOLINT
|
|
536
545
|
kContinue = 0,
|
537
546
|
kAbort = 1
|
538
547
|
};
|
539
|
-
virtual ~ActivityControl()
|
548
|
+
virtual ~ActivityControl() = default;
|
540
549
|
/**
|
541
550
|
* Notify about current progress. The activity can be stopped by
|
542
551
|
* returning kAbort as the callback result.
|
@@ -602,6 +611,11 @@ class V8_EXPORT AllocationProfile {
|
|
602
611
|
*/
|
603
612
|
int column_number;
|
604
613
|
|
614
|
+
/**
|
615
|
+
* Unique id of the node.
|
616
|
+
*/
|
617
|
+
uint32_t node_id;
|
618
|
+
|
605
619
|
/**
|
606
620
|
* List of callees called from this node for which we have sampled
|
607
621
|
* allocations. The lifetime of the children is scoped to the containing
|
@@ -615,14 +629,41 @@ class V8_EXPORT AllocationProfile {
|
|
615
629
|
std::vector<Allocation> allocations;
|
616
630
|
};
|
617
631
|
|
632
|
+
/**
|
633
|
+
* Represent a single sample recorded for an allocation.
|
634
|
+
*/
|
635
|
+
struct Sample {
|
636
|
+
/**
|
637
|
+
* id of the node in the profile tree.
|
638
|
+
*/
|
639
|
+
uint32_t node_id;
|
640
|
+
|
641
|
+
/**
|
642
|
+
* Size of the sampled allocation object.
|
643
|
+
*/
|
644
|
+
size_t size;
|
645
|
+
|
646
|
+
/**
|
647
|
+
* The number of objects of such size that were sampled.
|
648
|
+
*/
|
649
|
+
unsigned int count;
|
650
|
+
|
651
|
+
/**
|
652
|
+
* Unique time-ordered id of the allocation sample. Can be used to track
|
653
|
+
* what samples were added or removed between two snapshots.
|
654
|
+
*/
|
655
|
+
uint64_t sample_id;
|
656
|
+
};
|
657
|
+
|
618
658
|
/**
|
619
659
|
* Returns the root node of the call-graph. The root node corresponds to an
|
620
660
|
* empty JS call-stack. The lifetime of the returned Node* is scoped to the
|
621
661
|
* containing AllocationProfile.
|
622
662
|
*/
|
623
663
|
virtual Node* GetRootNode() = 0;
|
664
|
+
virtual const std::vector<Sample>& GetSamples() = 0;
|
624
665
|
|
625
|
-
virtual ~AllocationProfile()
|
666
|
+
virtual ~AllocationProfile() = default;
|
626
667
|
|
627
668
|
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
|
628
669
|
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
|
@@ -636,7 +677,7 @@ class V8_EXPORT AllocationProfile {
|
|
636
677
|
* Usage:
|
637
678
|
* 1) Define derived class of EmbedderGraph::Node for embedder objects.
|
638
679
|
* 2) Set the build embedder graph callback on the heap profiler using
|
639
|
-
* HeapProfiler::
|
680
|
+
* HeapProfiler::AddBuildEmbedderGraphCallback.
|
640
681
|
* 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
|
641
682
|
* node1 to node2.
|
642
683
|
* 4) To represent references from/to V8 object, construct V8 nodes using
|
@@ -682,11 +723,14 @@ class V8_EXPORT EmbedderGraph {
|
|
682
723
|
virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
|
683
724
|
|
684
725
|
/**
|
685
|
-
* Adds an edge that represents a strong reference from the given
|
686
|
-
* |from| to the given node |to|. The nodes must be added to the graph
|
726
|
+
* Adds an edge that represents a strong reference from the given
|
727
|
+
* node |from| to the given node |to|. The nodes must be added to the graph
|
687
728
|
* before calling this function.
|
729
|
+
*
|
730
|
+
* If name is nullptr, the edge will have auto-increment indexes, otherwise
|
731
|
+
* it will be named accordingly.
|
688
732
|
*/
|
689
|
-
virtual void AddEdge(Node* from, Node* to) = 0;
|
733
|
+
virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
|
690
734
|
|
691
735
|
virtual ~EmbedderGraph() = default;
|
692
736
|
};
|
@@ -736,7 +780,12 @@ class V8_EXPORT HeapProfiler {
|
|
736
780
|
* The callback must not trigger garbage collection in V8.
|
737
781
|
*/
|
738
782
|
typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
|
739
|
-
v8::EmbedderGraph* graph
|
783
|
+
v8::EmbedderGraph* graph,
|
784
|
+
void* data);
|
785
|
+
|
786
|
+
/** TODO(addaleax): Remove */
|
787
|
+
typedef void (*LegacyBuildEmbedderGraphCallback)(v8::Isolate* isolate,
|
788
|
+
v8::EmbedderGraph* graph);
|
740
789
|
|
741
790
|
/** Returns the number of snapshots taken. */
|
742
791
|
int GetSnapshotCount();
|
@@ -782,15 +831,15 @@ class V8_EXPORT HeapProfiler {
|
|
782
831
|
virtual const char* GetName(Local<Object> object) = 0;
|
783
832
|
|
784
833
|
protected:
|
785
|
-
virtual ~ObjectNameResolver()
|
834
|
+
virtual ~ObjectNameResolver() = default;
|
786
835
|
};
|
787
836
|
|
788
837
|
/**
|
789
838
|
* Takes a heap snapshot and returns it.
|
790
839
|
*/
|
791
840
|
const HeapSnapshot* TakeHeapSnapshot(
|
792
|
-
ActivityControl* control =
|
793
|
-
ObjectNameResolver* global_object_name_resolver =
|
841
|
+
ActivityControl* control = nullptr,
|
842
|
+
ObjectNameResolver* global_object_name_resolver = nullptr);
|
794
843
|
|
795
844
|
/**
|
796
845
|
* Starts tracking of heap objects population statistics. After calling
|
@@ -817,7 +866,7 @@ class V8_EXPORT HeapProfiler {
|
|
817
866
|
* method.
|
818
867
|
*/
|
819
868
|
SnapshotObjectId GetHeapStats(OutputStream* stream,
|
820
|
-
int64_t* timestamp_us =
|
869
|
+
int64_t* timestamp_us = nullptr);
|
821
870
|
|
822
871
|
/**
|
823
872
|
* Stops tracking of heap objects population statistics, cleans up all
|
@@ -878,15 +927,22 @@ class V8_EXPORT HeapProfiler {
|
|
878
927
|
|
879
928
|
/** Binds a callback to embedder's class ID. */
|
880
929
|
V8_DEPRECATED(
|
881
|
-
"Use
|
930
|
+
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
|
882
931
|
void SetWrapperClassInfoProvider(uint16_t class_id,
|
883
932
|
WrapperInfoCallback callback));
|
884
933
|
|
885
934
|
V8_DEPRECATED(
|
886
|
-
"Use
|
935
|
+
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
|
887
936
|
void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback));
|
888
937
|
|
889
|
-
|
938
|
+
V8_DEPRECATED(
|
939
|
+
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
|
940
|
+
void SetBuildEmbedderGraphCallback(
|
941
|
+
LegacyBuildEmbedderGraphCallback callback));
|
942
|
+
void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
|
943
|
+
void* data);
|
944
|
+
void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
|
945
|
+
void* data);
|
890
946
|
|
891
947
|
/**
|
892
948
|
* Default value of persistent handle class ID. Must not be used to
|
@@ -967,8 +1023,8 @@ class V8_EXPORT RetainedObjectInfo { // NOLINT
|
|
967
1023
|
virtual intptr_t GetSizeInBytes() { return -1; }
|
968
1024
|
|
969
1025
|
protected:
|
970
|
-
RetainedObjectInfo()
|
971
|
-
virtual ~RetainedObjectInfo()
|
1026
|
+
RetainedObjectInfo() = default;
|
1027
|
+
virtual ~RetainedObjectInfo() = default;
|
972
1028
|
|
973
1029
|
private:
|
974
1030
|
RetainedObjectInfo(const RetainedObjectInfo&);
|
@@ -988,6 +1044,76 @@ struct HeapStatsUpdate {
|
|
988
1044
|
uint32_t size; // New value of size field for the interval with this index.
|
989
1045
|
};
|
990
1046
|
|
1047
|
+
#define CODE_EVENTS_LIST(V) \
|
1048
|
+
V(Builtin) \
|
1049
|
+
V(Callback) \
|
1050
|
+
V(Eval) \
|
1051
|
+
V(Function) \
|
1052
|
+
V(InterpretedFunction) \
|
1053
|
+
V(Handler) \
|
1054
|
+
V(BytecodeHandler) \
|
1055
|
+
V(LazyCompile) \
|
1056
|
+
V(RegExp) \
|
1057
|
+
V(Script) \
|
1058
|
+
V(Stub)
|
1059
|
+
|
1060
|
+
/**
|
1061
|
+
* Note that this enum may be extended in the future. Please include a default
|
1062
|
+
* case if this enum is used in a switch statement.
|
1063
|
+
*/
|
1064
|
+
enum CodeEventType {
|
1065
|
+
kUnknownType = 0
|
1066
|
+
#define V(Name) , k##Name##Type
|
1067
|
+
CODE_EVENTS_LIST(V)
|
1068
|
+
#undef V
|
1069
|
+
};
|
1070
|
+
|
1071
|
+
/**
|
1072
|
+
* Representation of a code creation event
|
1073
|
+
*/
|
1074
|
+
class V8_EXPORT CodeEvent {
|
1075
|
+
public:
|
1076
|
+
uintptr_t GetCodeStartAddress();
|
1077
|
+
size_t GetCodeSize();
|
1078
|
+
Local<String> GetFunctionName();
|
1079
|
+
Local<String> GetScriptName();
|
1080
|
+
int GetScriptLine();
|
1081
|
+
int GetScriptColumn();
|
1082
|
+
/**
|
1083
|
+
* NOTE (mmarchini): We can't allocate objects in the heap when we collect
|
1084
|
+
* existing code, and both the code type and the comment are not stored in the
|
1085
|
+
* heap, so we return those as const char*.
|
1086
|
+
*/
|
1087
|
+
CodeEventType GetCodeType();
|
1088
|
+
const char* GetComment();
|
1089
|
+
|
1090
|
+
static const char* GetCodeEventTypeName(CodeEventType code_event_type);
|
1091
|
+
};
|
1092
|
+
|
1093
|
+
/**
|
1094
|
+
* Interface to listen to code creation events.
|
1095
|
+
*/
|
1096
|
+
class V8_EXPORT CodeEventHandler {
|
1097
|
+
public:
|
1098
|
+
/**
|
1099
|
+
* Creates a new listener for the |isolate|. The isolate must be initialized.
|
1100
|
+
* The listener object must be disposed after use by calling |Dispose| method.
|
1101
|
+
* Multiple listeners can be created for the same isolate.
|
1102
|
+
*/
|
1103
|
+
explicit CodeEventHandler(Isolate* isolate);
|
1104
|
+
virtual ~CodeEventHandler();
|
1105
|
+
|
1106
|
+
virtual void Handle(CodeEvent* code_event) = 0;
|
1107
|
+
|
1108
|
+
void Enable();
|
1109
|
+
void Disable();
|
1110
|
+
|
1111
|
+
private:
|
1112
|
+
CodeEventHandler();
|
1113
|
+
CodeEventHandler(const CodeEventHandler&);
|
1114
|
+
CodeEventHandler& operator=(const CodeEventHandler&);
|
1115
|
+
void* internal_listener_;
|
1116
|
+
};
|
991
1117
|
|
992
1118
|
} // namespace v8
|
993
1119
|
|