libv8-node 15.5.1.0.beta1-aarch64-linux-musl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (56) hide show
  1. checksums.yaml +7 -0
  2. data/ext/libv8-node/.location.yml +1 -0
  3. data/ext/libv8-node/location.rb +76 -0
  4. data/ext/libv8-node/paths.rb +30 -0
  5. data/lib/libv8-node.rb +1 -0
  6. data/lib/libv8/node.rb +11 -0
  7. data/lib/libv8/node/version.rb +7 -0
  8. data/vendor/v8/include/cppgc/allocation.h +173 -0
  9. data/vendor/v8/include/cppgc/common.h +26 -0
  10. data/vendor/v8/include/cppgc/custom-space.h +62 -0
  11. data/vendor/v8/include/cppgc/default-platform.h +76 -0
  12. data/vendor/v8/include/cppgc/garbage-collected.h +116 -0
  13. data/vendor/v8/include/cppgc/heap.h +139 -0
  14. data/vendor/v8/include/cppgc/internal/api-constants.h +47 -0
  15. data/vendor/v8/include/cppgc/internal/atomic-entry-flag.h +48 -0
  16. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +67 -0
  17. data/vendor/v8/include/cppgc/internal/compiler-specific.h +38 -0
  18. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
  19. data/vendor/v8/include/cppgc/internal/gc-info.h +45 -0
  20. data/vendor/v8/include/cppgc/internal/logging.h +50 -0
  21. data/vendor/v8/include/cppgc/internal/persistent-node.h +116 -0
  22. data/vendor/v8/include/cppgc/internal/pointer-policies.h +134 -0
  23. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +30 -0
  24. data/vendor/v8/include/cppgc/internal/process-heap.h +34 -0
  25. data/vendor/v8/include/cppgc/internal/write-barrier.h +78 -0
  26. data/vendor/v8/include/cppgc/liveness-broker.h +68 -0
  27. data/vendor/v8/include/cppgc/macros.h +24 -0
  28. data/vendor/v8/include/cppgc/member.h +226 -0
  29. data/vendor/v8/include/cppgc/persistent.h +341 -0
  30. data/vendor/v8/include/cppgc/platform.h +130 -0
  31. data/vendor/v8/include/cppgc/prefinalizer.h +52 -0
  32. data/vendor/v8/include/cppgc/source-location.h +91 -0
  33. data/vendor/v8/include/cppgc/trace-trait.h +111 -0
  34. data/vendor/v8/include/cppgc/type-traits.h +109 -0
  35. data/vendor/v8/include/cppgc/visitor.h +213 -0
  36. data/vendor/v8/include/libplatform/libplatform-export.h +29 -0
  37. data/vendor/v8/include/libplatform/libplatform.h +106 -0
  38. data/vendor/v8/include/libplatform/v8-tracing.h +332 -0
  39. data/vendor/v8/include/v8-cppgc.h +226 -0
  40. data/vendor/v8/include/v8-fast-api-calls.h +388 -0
  41. data/vendor/v8/include/v8-inspector-protocol.h +13 -0
  42. data/vendor/v8/include/v8-inspector.h +327 -0
  43. data/vendor/v8/include/v8-internal.h +427 -0
  44. data/vendor/v8/include/v8-metrics.h +133 -0
  45. data/vendor/v8/include/v8-platform.h +684 -0
  46. data/vendor/v8/include/v8-profiler.h +1059 -0
  47. data/vendor/v8/include/v8-util.h +652 -0
  48. data/vendor/v8/include/v8-value-serializer-version.h +24 -0
  49. data/vendor/v8/include/v8-version-string.h +38 -0
  50. data/vendor/v8/include/v8-version.h +20 -0
  51. data/vendor/v8/include/v8-wasm-trap-handler-posix.h +31 -0
  52. data/vendor/v8/include/v8-wasm-trap-handler-win.h +28 -0
  53. data/vendor/v8/include/v8.h +12098 -0
  54. data/vendor/v8/include/v8config.h +484 -0
  55. data/vendor/v8/out.gn/libv8/obj/libv8_monolith.a +0 -0
  56. metadata +126 -0
@@ -0,0 +1,1059 @@
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_V8_PROFILER_H_
6
+ #define V8_V8_PROFILER_H_
7
+
8
+ #include <limits.h>
9
+ #include <memory>
10
+ #include <unordered_set>
11
+ #include <vector>
12
+
13
+ #include "v8.h" // NOLINT(build/include_directory)
14
+
15
+ /**
16
+ * Profiler support for the V8 JavaScript engine.
17
+ */
18
+ namespace v8 {
19
+
20
+ class HeapGraphNode;
21
+ struct HeapStatsUpdate;
22
+
23
+ using NativeObject = void*;
24
+ using SnapshotObjectId = uint32_t;
25
+
26
+ struct CpuProfileDeoptFrame {
27
+ int script_id;
28
+ size_t position;
29
+ };
30
+
31
+ namespace internal {
32
+ class CpuProfile;
33
+ } // namespace internal
34
+
35
+ } // namespace v8
36
+
37
+ #ifdef V8_OS_WIN
38
+ template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
39
+ #endif
40
+
41
+ namespace v8 {
42
+
43
+ struct V8_EXPORT CpuProfileDeoptInfo {
44
+ /** A pointer to a static string owned by v8. */
45
+ const char* deopt_reason;
46
+ std::vector<CpuProfileDeoptFrame> stack;
47
+ };
48
+
49
+ } // namespace v8
50
+
51
+ #ifdef V8_OS_WIN
52
+ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
53
+ #endif
54
+
55
+ namespace v8 {
56
+
57
+ /**
58
+ * CpuProfileNode represents a node in a call graph.
59
+ */
60
+ class V8_EXPORT CpuProfileNode {
61
+ public:
62
+ struct LineTick {
63
+ /** The 1-based number of the source line where the function originates. */
64
+ int line;
65
+
66
+ /** The count of samples associated with the source line. */
67
+ unsigned int hit_count;
68
+ };
69
+
70
+ // An annotation hinting at the source of a CpuProfileNode.
71
+ enum SourceType {
72
+ // User-supplied script with associated resource information.
73
+ kScript = 0,
74
+ // Native scripts and provided builtins.
75
+ kBuiltin = 1,
76
+ // Callbacks into native code.
77
+ kCallback = 2,
78
+ // VM-internal functions or state.
79
+ kInternal = 3,
80
+ // A node that failed to symbolize.
81
+ kUnresolved = 4,
82
+ };
83
+
84
+ /** Returns function name (empty string for anonymous functions.) */
85
+ Local<String> GetFunctionName() const;
86
+
87
+ /**
88
+ * Returns function name (empty string for anonymous functions.)
89
+ * The string ownership is *not* passed to the caller. It stays valid until
90
+ * profile is deleted. The function is thread safe.
91
+ */
92
+ const char* GetFunctionNameStr() const;
93
+
94
+ /** Returns id of the script where function is located. */
95
+ int GetScriptId() const;
96
+
97
+ /** Returns resource name for script from where the function originates. */
98
+ Local<String> GetScriptResourceName() const;
99
+
100
+ /**
101
+ * Returns resource name for script from where the function originates.
102
+ * The string ownership is *not* passed to the caller. It stays valid until
103
+ * profile is deleted. The function is thread safe.
104
+ */
105
+ const char* GetScriptResourceNameStr() const;
106
+
107
+ /**
108
+ * Return true if the script from where the function originates is flagged as
109
+ * being shared cross-origin.
110
+ */
111
+ bool IsScriptSharedCrossOrigin() const;
112
+
113
+ /**
114
+ * Returns the number, 1-based, of the line where the function originates.
115
+ * kNoLineNumberInfo if no line number information is available.
116
+ */
117
+ int GetLineNumber() const;
118
+
119
+ /**
120
+ * Returns 1-based number of the column where the function originates.
121
+ * kNoColumnNumberInfo if no column number information is available.
122
+ */
123
+ int GetColumnNumber() const;
124
+
125
+ /**
126
+ * Returns the number of the function's source lines that collect the samples.
127
+ */
128
+ unsigned int GetHitLineCount() const;
129
+
130
+ /** Returns the set of source lines that collect the samples.
131
+ * The caller allocates buffer and responsible for releasing it.
132
+ * True if all available entries are copied, otherwise false.
133
+ * The function copies nothing if buffer is not large enough.
134
+ */
135
+ bool GetLineTicks(LineTick* entries, unsigned int length) const;
136
+
137
+ /** Returns bailout reason for the function
138
+ * if the optimization was disabled for it.
139
+ */
140
+ const char* GetBailoutReason() const;
141
+
142
+ /**
143
+ * Returns the count of samples where the function was currently executing.
144
+ */
145
+ unsigned GetHitCount() const;
146
+
147
+ /** Returns id of the node. The id is unique within the tree */
148
+ unsigned GetNodeId() const;
149
+
150
+ /**
151
+ * Gets the type of the source which the node was captured from.
152
+ */
153
+ SourceType GetSourceType() const;
154
+
155
+ /** Returns child nodes count of the node. */
156
+ int GetChildrenCount() const;
157
+
158
+ /** Retrieves a child node by index. */
159
+ const CpuProfileNode* GetChild(int index) const;
160
+
161
+ /** Retrieves the ancestor node, or null if the root. */
162
+ const CpuProfileNode* GetParent() const;
163
+
164
+ /** Retrieves deopt infos for the node. */
165
+ const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
166
+
167
+ static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
168
+ static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
169
+ };
170
+
171
+
172
+ /**
173
+ * CpuProfile contains a CPU profile in a form of top-down call tree
174
+ * (from main() down to functions that do all the work).
175
+ */
176
+ class V8_EXPORT CpuProfile {
177
+ public:
178
+ /** Returns CPU profile title. */
179
+ Local<String> GetTitle() const;
180
+
181
+ /** Returns the root node of the top down call tree. */
182
+ const CpuProfileNode* GetTopDownRoot() const;
183
+
184
+ /**
185
+ * Returns number of samples recorded. The samples are not recorded unless
186
+ * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
187
+ */
188
+ int GetSamplesCount() const;
189
+
190
+ /**
191
+ * Returns profile node corresponding to the top frame the sample at
192
+ * the given index.
193
+ */
194
+ const CpuProfileNode* GetSample(int index) const;
195
+
196
+ /**
197
+ * Returns the timestamp of the sample. The timestamp is the number of
198
+ * microseconds since some unspecified starting point.
199
+ * The point is equal to the starting point used by GetStartTime.
200
+ */
201
+ int64_t GetSampleTimestamp(int index) const;
202
+
203
+ /**
204
+ * Returns time when the profile recording was started (in microseconds)
205
+ * since some unspecified starting point.
206
+ */
207
+ int64_t GetStartTime() const;
208
+
209
+ /**
210
+ * Returns time when the profile recording was stopped (in microseconds)
211
+ * since some unspecified starting point.
212
+ * The point is equal to the starting point used by GetStartTime.
213
+ */
214
+ int64_t GetEndTime() const;
215
+
216
+ /**
217
+ * Deletes the profile and removes it from CpuProfiler's list.
218
+ * All pointers to nodes previously returned become invalid.
219
+ */
220
+ void Delete();
221
+ };
222
+
223
+ enum CpuProfilingMode {
224
+ // In the resulting CpuProfile tree, intermediate nodes in a stack trace
225
+ // (from the root to a leaf) will have line numbers that point to the start
226
+ // line of the function, rather than the line of the callsite of the child.
227
+ kLeafNodeLineNumbers,
228
+ // In the resulting CpuProfile tree, nodes are separated based on the line
229
+ // number of their callsite in their parent.
230
+ kCallerLineNumbers,
231
+ };
232
+
233
+ // Determines how names are derived for functions sampled.
234
+ enum CpuProfilingNamingMode {
235
+ // Use the immediate name of functions at compilation time.
236
+ kStandardNaming,
237
+ // Use more verbose naming for functions without names, inferred from scope
238
+ // where possible.
239
+ kDebugNaming,
240
+ };
241
+
242
+ enum CpuProfilingLoggingMode {
243
+ // Enables logging when a profile is active, and disables logging when all
244
+ // profiles are detached.
245
+ kLazyLogging,
246
+ // Enables logging for the lifetime of the CpuProfiler. Calls to
247
+ // StartRecording are faster, at the expense of runtime overhead.
248
+ kEagerLogging,
249
+ };
250
+
251
+ /**
252
+ * Optional profiling attributes.
253
+ */
254
+ class V8_EXPORT CpuProfilingOptions {
255
+ public:
256
+ // Indicates that the sample buffer size should not be explicitly limited.
257
+ static const unsigned kNoSampleLimit = UINT_MAX;
258
+
259
+ /**
260
+ * \param mode Type of computation of stack frame line numbers.
261
+ * \param max_samples The maximum number of samples that should be recorded by
262
+ * the profiler. Samples obtained after this limit will be
263
+ * discarded.
264
+ * \param sampling_interval_us controls the profile-specific target
265
+ * sampling interval. The provided sampling
266
+ * interval will be snapped to the next lowest
267
+ * non-zero multiple of the profiler's sampling
268
+ * interval, set via SetSamplingInterval(). If
269
+ * zero, the sampling interval will be equal to
270
+ * the profiler's sampling interval.
271
+ */
272
+ CpuProfilingOptions(
273
+ CpuProfilingMode mode = kLeafNodeLineNumbers,
274
+ unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
275
+ MaybeLocal<Context> filter_context = MaybeLocal<Context>());
276
+
277
+ CpuProfilingMode mode() const { return mode_; }
278
+ unsigned max_samples() const { return max_samples_; }
279
+ int sampling_interval_us() const { return sampling_interval_us_; }
280
+
281
+ private:
282
+ friend class internal::CpuProfile;
283
+
284
+ bool has_filter_context() const { return !filter_context_.IsEmpty(); }
285
+ void* raw_filter_context() const;
286
+
287
+ CpuProfilingMode mode_;
288
+ unsigned max_samples_;
289
+ int sampling_interval_us_;
290
+ CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
291
+ };
292
+
293
+ /**
294
+ * Interface for controlling CPU profiling. Instance of the
295
+ * profiler can be created using v8::CpuProfiler::New method.
296
+ */
297
+ class V8_EXPORT CpuProfiler {
298
+ public:
299
+ /**
300
+ * Creates a new CPU profiler for the |isolate|. The isolate must be
301
+ * initialized. The profiler object must be disposed after use by calling
302
+ * |Dispose| method.
303
+ */
304
+ static CpuProfiler* New(Isolate* isolate,
305
+ CpuProfilingNamingMode = kDebugNaming,
306
+ CpuProfilingLoggingMode = kLazyLogging);
307
+
308
+ /**
309
+ * Synchronously collect current stack sample in all profilers attached to
310
+ * the |isolate|. The call does not affect number of ticks recorded for
311
+ * the current top node.
312
+ */
313
+ static void CollectSample(Isolate* isolate);
314
+
315
+ /**
316
+ * Disposes the CPU profiler object.
317
+ */
318
+ void Dispose();
319
+
320
+ /**
321
+ * Changes default CPU profiler sampling interval to the specified number
322
+ * of microseconds. Default interval is 1000us. This method must be called
323
+ * when there are no profiles being recorded.
324
+ */
325
+ void SetSamplingInterval(int us);
326
+
327
+ /**
328
+ * Sets whether or not the profiler should prioritize consistency of sample
329
+ * periodicity on Windows. Disabling this can greatly reduce CPU usage, but
330
+ * may result in greater variance in sample timings from the platform's
331
+ * scheduler. Defaults to enabled. This method must be called when there are
332
+ * no profiles being recorded.
333
+ */
334
+ void SetUsePreciseSampling(bool);
335
+
336
+ /**
337
+ * Starts collecting a CPU profile. Title may be an empty string. Several
338
+ * profiles may be collected at once. Attempts to start collecting several
339
+ * profiles with the same title are silently ignored.
340
+ */
341
+ void StartProfiling(Local<String> title, CpuProfilingOptions options);
342
+
343
+ /**
344
+ * Starts profiling with the same semantics as above, except with expanded
345
+ * parameters.
346
+ *
347
+ * |record_samples| parameter controls whether individual samples should
348
+ * be recorded in addition to the aggregated tree.
349
+ *
350
+ * |max_samples| controls the maximum number of samples that should be
351
+ * recorded by the profiler. Samples obtained after this limit will be
352
+ * discarded.
353
+ */
354
+ void StartProfiling(
355
+ Local<String> title, CpuProfilingMode mode, bool record_samples = false,
356
+ unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
357
+ /**
358
+ * The same as StartProfiling above, but the CpuProfilingMode defaults to
359
+ * kLeafNodeLineNumbers mode, which was the previous default behavior of the
360
+ * profiler.
361
+ */
362
+ void StartProfiling(Local<String> title, bool record_samples = false);
363
+
364
+ /**
365
+ * Stops collecting CPU profile with a given title and returns it.
366
+ * If the title given is empty, finishes the last profile started.
367
+ */
368
+ CpuProfile* StopProfiling(Local<String> title);
369
+
370
+ /**
371
+ * Generate more detailed source positions to code objects. This results in
372
+ * better results when mapping profiling samples to script source.
373
+ */
374
+ static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);
375
+
376
+ private:
377
+ CpuProfiler();
378
+ ~CpuProfiler();
379
+ CpuProfiler(const CpuProfiler&);
380
+ CpuProfiler& operator=(const CpuProfiler&);
381
+ };
382
+
383
+ /**
384
+ * HeapSnapshotEdge represents a directed connection between heap
385
+ * graph nodes: from retainers to retained nodes.
386
+ */
387
+ class V8_EXPORT HeapGraphEdge {
388
+ public:
389
+ enum Type {
390
+ kContextVariable = 0, // A variable from a function context.
391
+ kElement = 1, // An element of an array.
392
+ kProperty = 2, // A named object property.
393
+ kInternal = 3, // A link that can't be accessed from JS,
394
+ // thus, its name isn't a real property name
395
+ // (e.g. parts of a ConsString).
396
+ kHidden = 4, // A link that is needed for proper sizes
397
+ // calculation, but may be hidden from user.
398
+ kShortcut = 5, // A link that must not be followed during
399
+ // sizes calculation.
400
+ kWeak = 6 // A weak reference (ignored by the GC).
401
+ };
402
+
403
+ /** Returns edge type (see HeapGraphEdge::Type). */
404
+ Type GetType() const;
405
+
406
+ /**
407
+ * Returns edge name. This can be a variable name, an element index, or
408
+ * a property name.
409
+ */
410
+ Local<Value> GetName() const;
411
+
412
+ /** Returns origin node. */
413
+ const HeapGraphNode* GetFromNode() const;
414
+
415
+ /** Returns destination node. */
416
+ const HeapGraphNode* GetToNode() const;
417
+ };
418
+
419
+
420
+ /**
421
+ * HeapGraphNode represents a node in a heap graph.
422
+ */
423
+ class V8_EXPORT HeapGraphNode {
424
+ public:
425
+ enum Type {
426
+ kHidden = 0, // Hidden node, may be filtered when shown to user.
427
+ kArray = 1, // An array of elements.
428
+ kString = 2, // A string.
429
+ kObject = 3, // A JS object (except for arrays and strings).
430
+ kCode = 4, // Compiled code.
431
+ kClosure = 5, // Function closure.
432
+ kRegExp = 6, // RegExp.
433
+ kHeapNumber = 7, // Number stored in the heap.
434
+ kNative = 8, // Native object (not from V8 heap).
435
+ kSynthetic = 9, // Synthetic object, usually used for grouping
436
+ // snapshot items together.
437
+ kConsString = 10, // Concatenated string. A pair of pointers to strings.
438
+ kSlicedString = 11, // Sliced string. A fragment of another string.
439
+ kSymbol = 12, // A Symbol (ES6).
440
+ kBigInt = 13 // BigInt.
441
+ };
442
+
443
+ /** Returns node type (see HeapGraphNode::Type). */
444
+ Type GetType() const;
445
+
446
+ /**
447
+ * Returns node name. Depending on node's type this can be the name
448
+ * of the constructor (for objects), the name of the function (for
449
+ * closures), string value, or an empty string (for compiled code).
450
+ */
451
+ Local<String> GetName() const;
452
+
453
+ /**
454
+ * Returns node id. For the same heap object, the id remains the same
455
+ * across all snapshots.
456
+ */
457
+ SnapshotObjectId GetId() const;
458
+
459
+ /** Returns node's own size, in bytes. */
460
+ size_t GetShallowSize() const;
461
+
462
+ /** Returns child nodes count of the node. */
463
+ int GetChildrenCount() const;
464
+
465
+ /** Retrieves a child by index. */
466
+ const HeapGraphEdge* GetChild(int index) const;
467
+ };
468
+
469
+
470
+ /**
471
+ * An interface for exporting data from V8, using "push" model.
472
+ */
473
+ class V8_EXPORT OutputStream { // NOLINT
474
+ public:
475
+ enum WriteResult {
476
+ kContinue = 0,
477
+ kAbort = 1
478
+ };
479
+ virtual ~OutputStream() = default;
480
+ /** Notify about the end of stream. */
481
+ virtual void EndOfStream() = 0;
482
+ /** Get preferred output chunk size. Called only once. */
483
+ virtual int GetChunkSize() { return 1024; }
484
+ /**
485
+ * Writes the next chunk of snapshot data into the stream. Writing
486
+ * can be stopped by returning kAbort as function result. EndOfStream
487
+ * will not be called in case writing was aborted.
488
+ */
489
+ virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
490
+ /**
491
+ * Writes the next chunk of heap stats data into the stream. Writing
492
+ * can be stopped by returning kAbort as function result. EndOfStream
493
+ * will not be called in case writing was aborted.
494
+ */
495
+ virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
496
+ return kAbort;
497
+ }
498
+ };
499
+
500
+
501
+ /**
502
+ * HeapSnapshots record the state of the JS heap at some moment.
503
+ */
504
+ class V8_EXPORT HeapSnapshot {
505
+ public:
506
+ enum SerializationFormat {
507
+ kJSON = 0 // See format description near 'Serialize' method.
508
+ };
509
+
510
+ /** Returns the root node of the heap graph. */
511
+ const HeapGraphNode* GetRoot() const;
512
+
513
+ /** Returns a node by its id. */
514
+ const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
515
+
516
+ /** Returns total nodes count in the snapshot. */
517
+ int GetNodesCount() const;
518
+
519
+ /** Returns a node by index. */
520
+ const HeapGraphNode* GetNode(int index) const;
521
+
522
+ /** Returns a max seen JS object Id. */
523
+ SnapshotObjectId GetMaxSnapshotJSObjectId() const;
524
+
525
+ /**
526
+ * Deletes the snapshot and removes it from HeapProfiler's list.
527
+ * All pointers to nodes, edges and paths previously returned become
528
+ * invalid.
529
+ */
530
+ void Delete();
531
+
532
+ /**
533
+ * Prepare a serialized representation of the snapshot. The result
534
+ * is written into the stream provided in chunks of specified size.
535
+ * The total length of the serialized snapshot is unknown in
536
+ * advance, it can be roughly equal to JS heap size (that means,
537
+ * it can be really big - tens of megabytes).
538
+ *
539
+ * For the JSON format, heap contents are represented as an object
540
+ * with the following structure:
541
+ *
542
+ * {
543
+ * snapshot: {
544
+ * title: "...",
545
+ * uid: nnn,
546
+ * meta: { meta-info },
547
+ * node_count: nnn,
548
+ * edge_count: nnn
549
+ * },
550
+ * nodes: [nodes array],
551
+ * edges: [edges array],
552
+ * strings: [strings array]
553
+ * }
554
+ *
555
+ * Nodes reference strings, other nodes, and edges by their indexes
556
+ * in corresponding arrays.
557
+ */
558
+ void Serialize(OutputStream* stream,
559
+ SerializationFormat format = kJSON) const;
560
+ };
561
+
562
+
563
+ /**
564
+ * An interface for reporting progress and controlling long-running
565
+ * activities.
566
+ */
567
+ class V8_EXPORT ActivityControl { // NOLINT
568
+ public:
569
+ enum ControlOption {
570
+ kContinue = 0,
571
+ kAbort = 1
572
+ };
573
+ virtual ~ActivityControl() = default;
574
+ /**
575
+ * Notify about current progress. The activity can be stopped by
576
+ * returning kAbort as the callback result.
577
+ */
578
+ virtual ControlOption ReportProgressValue(int done, int total) = 0;
579
+ };
580
+
581
+
582
+ /**
583
+ * AllocationProfile is a sampled profile of allocations done by the program.
584
+ * This is structured as a call-graph.
585
+ */
586
+ class V8_EXPORT AllocationProfile {
587
+ public:
588
+ struct Allocation {
589
+ /**
590
+ * Size of the sampled allocation object.
591
+ */
592
+ size_t size;
593
+
594
+ /**
595
+ * The number of objects of such size that were sampled.
596
+ */
597
+ unsigned int count;
598
+ };
599
+
600
+ /**
601
+ * Represents a node in the call-graph.
602
+ */
603
+ struct Node {
604
+ /**
605
+ * Name of the function. May be empty for anonymous functions or if the
606
+ * script corresponding to this function has been unloaded.
607
+ */
608
+ Local<String> name;
609
+
610
+ /**
611
+ * Name of the script containing the function. May be empty if the script
612
+ * name is not available, or if the script has been unloaded.
613
+ */
614
+ Local<String> script_name;
615
+
616
+ /**
617
+ * id of the script where the function is located. May be equal to
618
+ * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
619
+ */
620
+ int script_id;
621
+
622
+ /**
623
+ * Start position of the function in the script.
624
+ */
625
+ int start_position;
626
+
627
+ /**
628
+ * 1-indexed line number where the function starts. May be
629
+ * kNoLineNumberInfo if no line number information is available.
630
+ */
631
+ int line_number;
632
+
633
+ /**
634
+ * 1-indexed column number where the function starts. May be
635
+ * kNoColumnNumberInfo if no line number information is available.
636
+ */
637
+ int column_number;
638
+
639
+ /**
640
+ * Unique id of the node.
641
+ */
642
+ uint32_t node_id;
643
+
644
+ /**
645
+ * List of callees called from this node for which we have sampled
646
+ * allocations. The lifetime of the children is scoped to the containing
647
+ * AllocationProfile.
648
+ */
649
+ std::vector<Node*> children;
650
+
651
+ /**
652
+ * List of self allocations done by this node in the call-graph.
653
+ */
654
+ std::vector<Allocation> allocations;
655
+ };
656
+
657
+ /**
658
+ * Represent a single sample recorded for an allocation.
659
+ */
660
+ struct Sample {
661
+ /**
662
+ * id of the node in the profile tree.
663
+ */
664
+ uint32_t node_id;
665
+
666
+ /**
667
+ * Size of the sampled allocation object.
668
+ */
669
+ size_t size;
670
+
671
+ /**
672
+ * The number of objects of such size that were sampled.
673
+ */
674
+ unsigned int count;
675
+
676
+ /**
677
+ * Unique time-ordered id of the allocation sample. Can be used to track
678
+ * what samples were added or removed between two snapshots.
679
+ */
680
+ uint64_t sample_id;
681
+ };
682
+
683
+ /**
684
+ * Returns the root node of the call-graph. The root node corresponds to an
685
+ * empty JS call-stack. The lifetime of the returned Node* is scoped to the
686
+ * containing AllocationProfile.
687
+ */
688
+ virtual Node* GetRootNode() = 0;
689
+ virtual const std::vector<Sample>& GetSamples() = 0;
690
+
691
+ virtual ~AllocationProfile() = default;
692
+
693
+ static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
694
+ static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
695
+ };
696
+
697
+ /**
698
+ * An object graph consisting of embedder objects and V8 objects.
699
+ * Edges of the graph are strong references between the objects.
700
+ * The embedder can build this graph during heap snapshot generation
701
+ * to include the embedder objects in the heap snapshot.
702
+ * Usage:
703
+ * 1) Define derived class of EmbedderGraph::Node for embedder objects.
704
+ * 2) Set the build embedder graph callback on the heap profiler using
705
+ * HeapProfiler::AddBuildEmbedderGraphCallback.
706
+ * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
707
+ * node1 to node2.
708
+ * 4) To represent references from/to V8 object, construct V8 nodes using
709
+ * graph->V8Node(value).
710
+ */
711
+ class V8_EXPORT EmbedderGraph {
712
+ public:
713
+ class Node {
714
+ public:
715
+ Node() = default;
716
+ virtual ~Node() = default;
717
+ virtual const char* Name() = 0;
718
+ virtual size_t SizeInBytes() = 0;
719
+ /**
720
+ * The corresponding V8 wrapper node if not null.
721
+ * During heap snapshot generation the embedder node and the V8 wrapper
722
+ * node will be merged into one node to simplify retaining paths.
723
+ */
724
+ virtual Node* WrapperNode() { return nullptr; }
725
+ virtual bool IsRootNode() { return false; }
726
+ /** Must return true for non-V8 nodes. */
727
+ virtual bool IsEmbedderNode() { return true; }
728
+ /**
729
+ * Optional name prefix. It is used in Chrome for tagging detached nodes.
730
+ */
731
+ virtual const char* NamePrefix() { return nullptr; }
732
+
733
+ /**
734
+ * Returns the NativeObject that can be used for querying the
735
+ * |HeapSnapshot|.
736
+ */
737
+ virtual NativeObject GetNativeObject() { return nullptr; }
738
+
739
+ Node(const Node&) = delete;
740
+ Node& operator=(const Node&) = delete;
741
+ };
742
+
743
+ /**
744
+ * Returns a node corresponding to the given V8 value. Ownership is not
745
+ * transferred. The result pointer is valid while the graph is alive.
746
+ */
747
+ virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
748
+
749
+ /**
750
+ * Adds the given node to the graph and takes ownership of the node.
751
+ * Returns a raw pointer to the node that is valid while the graph is alive.
752
+ */
753
+ virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
754
+
755
+ /**
756
+ * Adds an edge that represents a strong reference from the given
757
+ * node |from| to the given node |to|. The nodes must be added to the graph
758
+ * before calling this function.
759
+ *
760
+ * If name is nullptr, the edge will have auto-increment indexes, otherwise
761
+ * it will be named accordingly.
762
+ */
763
+ virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
764
+
765
+ virtual ~EmbedderGraph() = default;
766
+ };
767
+
768
+ /**
769
+ * Interface for controlling heap profiling. Instance of the
770
+ * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
771
+ */
772
+ class V8_EXPORT HeapProfiler {
773
+ public:
774
+ enum SamplingFlags {
775
+ kSamplingNoFlags = 0,
776
+ kSamplingForceGC = 1 << 0,
777
+ };
778
+
779
+ /**
780
+ * Callback function invoked during heap snapshot generation to retrieve
781
+ * the embedder object graph. The callback should use graph->AddEdge(..) to
782
+ * add references between the objects.
783
+ * The callback must not trigger garbage collection in V8.
784
+ */
785
+ typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
786
+ v8::EmbedderGraph* graph,
787
+ void* data);
788
+
789
+ /** Returns the number of snapshots taken. */
790
+ int GetSnapshotCount();
791
+
792
+ /** Returns a snapshot by index. */
793
+ const HeapSnapshot* GetHeapSnapshot(int index);
794
+
795
+ /**
796
+ * Returns SnapshotObjectId for a heap object referenced by |value| if
797
+ * it has been seen by the heap profiler, kUnknownObjectId otherwise.
798
+ */
799
+ SnapshotObjectId GetObjectId(Local<Value> value);
800
+
801
+ /**
802
+ * Returns SnapshotObjectId for a native object referenced by |value| if it
803
+ * has been seen by the heap profiler, kUnknownObjectId otherwise.
804
+ */
805
+ SnapshotObjectId GetObjectId(NativeObject value);
806
+
807
+ /**
808
+ * Returns heap object with given SnapshotObjectId if the object is alive,
809
+ * otherwise empty handle is returned.
810
+ */
811
+ Local<Value> FindObjectById(SnapshotObjectId id);
812
+
813
+ /**
814
+ * Clears internal map from SnapshotObjectId to heap object. The new objects
815
+ * will not be added into it unless a heap snapshot is taken or heap object
816
+ * tracking is kicked off.
817
+ */
818
+ void ClearObjectIds();
819
+
820
+ /**
821
+ * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
822
+ * it in case heap profiler cannot find id for the object passed as
823
+ * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
824
+ */
825
+ static const SnapshotObjectId kUnknownObjectId = 0;
826
+
827
+ /**
828
+ * Callback interface for retrieving user friendly names of global objects.
829
+ */
830
+ class ObjectNameResolver {
831
+ public:
832
+ /**
833
+ * Returns name to be used in the heap snapshot for given node. Returned
834
+ * string must stay alive until snapshot collection is completed.
835
+ */
836
+ virtual const char* GetName(Local<Object> object) = 0;
837
+
838
+ protected:
839
+ virtual ~ObjectNameResolver() = default;
840
+ };
841
+
842
+ /**
843
+ * Takes a heap snapshot and returns it.
844
+ */
845
+ const HeapSnapshot* TakeHeapSnapshot(
846
+ ActivityControl* control = nullptr,
847
+ ObjectNameResolver* global_object_name_resolver = nullptr,
848
+ bool treat_global_objects_as_roots = true);
849
+
850
+ /**
851
+ * Starts tracking of heap objects population statistics. After calling
852
+ * this method, all heap objects relocations done by the garbage collector
853
+ * are being registered.
854
+ *
855
+ * |track_allocations| parameter controls whether stack trace of each
856
+ * allocation in the heap will be recorded and reported as part of
857
+ * HeapSnapshot.
858
+ */
859
+ void StartTrackingHeapObjects(bool track_allocations = false);
860
+
861
+ /**
862
+ * Adds a new time interval entry to the aggregated statistics array. The
863
+ * time interval entry contains information on the current heap objects
864
+ * population size. The method also updates aggregated statistics and
865
+ * reports updates for all previous time intervals via the OutputStream
866
+ * object. Updates on each time interval are provided as a stream of the
867
+ * HeapStatsUpdate structure instances.
868
+ * If |timestamp_us| is supplied, timestamp of the new entry will be written
869
+ * into it. The return value of the function is the last seen heap object Id.
870
+ *
871
+ * StartTrackingHeapObjects must be called before the first call to this
872
+ * method.
873
+ */
874
+ SnapshotObjectId GetHeapStats(OutputStream* stream,
875
+ int64_t* timestamp_us = nullptr);
876
+
877
+ /**
878
+ * Stops tracking of heap objects population statistics, cleans up all
879
+ * collected data. StartHeapObjectsTracking must be called again prior to
880
+ * calling GetHeapStats next time.
881
+ */
882
+ void StopTrackingHeapObjects();
883
+
884
+ /**
885
+ * Starts gathering a sampling heap profile. A sampling heap profile is
886
+ * similar to tcmalloc's heap profiler and Go's mprof. It samples object
887
+ * allocations and builds an online 'sampling' heap profile. At any point in
888
+ * time, this profile is expected to be a representative sample of objects
889
+ * currently live in the system. Each sampled allocation includes the stack
890
+ * trace at the time of allocation, which makes this really useful for memory
891
+ * leak detection.
892
+ *
893
+ * This mechanism is intended to be cheap enough that it can be used in
894
+ * production with minimal performance overhead.
895
+ *
896
+ * Allocations are sampled using a randomized Poisson process. On average, one
897
+ * allocation will be sampled every |sample_interval| bytes allocated. The
898
+ * |stack_depth| parameter controls the maximum number of stack frames to be
899
+ * captured on each allocation.
900
+ *
901
+ * NOTE: This is a proof-of-concept at this point. Right now we only sample
902
+ * newspace allocations. Support for paged space allocation (e.g. pre-tenured
903
+ * objects, large objects, code objects, etc.) and native allocations
904
+ * doesn't exist yet, but is anticipated in the future.
905
+ *
906
+ * Objects allocated before the sampling is started will not be included in
907
+ * the profile.
908
+ *
909
+ * Returns false if a sampling heap profiler is already running.
910
+ */
911
+ bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
912
+ int stack_depth = 16,
913
+ SamplingFlags flags = kSamplingNoFlags);
914
+
915
+ /**
916
+ * Stops the sampling heap profile and discards the current profile.
917
+ */
918
+ void StopSamplingHeapProfiler();
919
+
920
+ /**
921
+ * Returns the sampled profile of allocations allocated (and still live) since
922
+ * StartSamplingHeapProfiler was called. The ownership of the pointer is
923
+ * transferred to the caller. Returns nullptr if sampling heap profiler is not
924
+ * active.
925
+ */
926
+ AllocationProfile* GetAllocationProfile();
927
+
928
+ /**
929
+ * Deletes all snapshots taken. All previously returned pointers to
930
+ * snapshots and their contents become invalid after this call.
931
+ */
932
+ void DeleteAllHeapSnapshots();
933
+
934
+ void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
935
+ void* data);
936
+ void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
937
+ void* data);
938
+
939
+ /**
940
+ * Default value of persistent handle class ID. Must not be used to
941
+ * define a class. Can be used to reset a class of a persistent
942
+ * handle.
943
+ */
944
+ static const uint16_t kPersistentHandleNoClassId = 0;
945
+
946
+ private:
947
+ HeapProfiler();
948
+ ~HeapProfiler();
949
+ HeapProfiler(const HeapProfiler&);
950
+ HeapProfiler& operator=(const HeapProfiler&);
951
+ };
952
+
953
+ /**
954
+ * A struct for exporting HeapStats data from V8, using "push" model.
955
+ * See HeapProfiler::GetHeapStats.
956
+ */
957
+ struct HeapStatsUpdate {
958
+ HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
959
+ : index(index), count(count), size(size) { }
960
+ uint32_t index; // Index of the time interval that was changed.
961
+ uint32_t count; // New value of count field for the interval with this index.
962
+ uint32_t size; // New value of size field for the interval with this index.
963
+ };
964
+
965
+ #define CODE_EVENTS_LIST(V) \
966
+ V(Builtin) \
967
+ V(Callback) \
968
+ V(Eval) \
969
+ V(Function) \
970
+ V(InterpretedFunction) \
971
+ V(Handler) \
972
+ V(BytecodeHandler) \
973
+ V(LazyCompile) \
974
+ V(RegExp) \
975
+ V(Script) \
976
+ V(Stub) \
977
+ V(Relocation)
978
+
979
+ /**
980
+ * Note that this enum may be extended in the future. Please include a default
981
+ * case if this enum is used in a switch statement.
982
+ */
983
+ enum CodeEventType {
984
+ kUnknownType = 0
985
+ #define V(Name) , k##Name##Type
986
+ CODE_EVENTS_LIST(V)
987
+ #undef V
988
+ };
989
+
990
+ /**
991
+ * Representation of a code creation event
992
+ */
993
+ class V8_EXPORT CodeEvent {
994
+ public:
995
+ uintptr_t GetCodeStartAddress();
996
+ size_t GetCodeSize();
997
+ Local<String> GetFunctionName();
998
+ Local<String> GetScriptName();
999
+ int GetScriptLine();
1000
+ int GetScriptColumn();
1001
+ /**
1002
+ * NOTE (mmarchini): We can't allocate objects in the heap when we collect
1003
+ * existing code, and both the code type and the comment are not stored in the
1004
+ * heap, so we return those as const char*.
1005
+ */
1006
+ CodeEventType GetCodeType();
1007
+ const char* GetComment();
1008
+
1009
+ static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1010
+
1011
+ uintptr_t GetPreviousCodeStartAddress();
1012
+ };
1013
+
1014
+ /**
1015
+ * Interface to listen to code creation and code relocation events.
1016
+ */
1017
+ class V8_EXPORT CodeEventHandler {
1018
+ public:
1019
+ /**
1020
+ * Creates a new listener for the |isolate|. The isolate must be initialized.
1021
+ * The listener object must be disposed after use by calling |Dispose| method.
1022
+ * Multiple listeners can be created for the same isolate.
1023
+ */
1024
+ explicit CodeEventHandler(Isolate* isolate);
1025
+ virtual ~CodeEventHandler();
1026
+
1027
+ /**
1028
+ * Handle is called every time a code object is created or moved. Information
1029
+ * about each code event will be available through the `code_event`
1030
+ * parameter.
1031
+ *
1032
+ * When the CodeEventType is kRelocationType, the code for this CodeEvent has
1033
+ * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
1034
+ */
1035
+ virtual void Handle(CodeEvent* code_event) = 0;
1036
+
1037
+ /**
1038
+ * Call `Enable()` to starts listening to code creation and code relocation
1039
+ * events. These events will be handled by `Handle()`.
1040
+ */
1041
+ void Enable();
1042
+
1043
+ /**
1044
+ * Call `Disable()` to stop listening to code creation and code relocation
1045
+ * events.
1046
+ */
1047
+ void Disable();
1048
+
1049
+ private:
1050
+ CodeEventHandler();
1051
+ CodeEventHandler(const CodeEventHandler&);
1052
+ CodeEventHandler& operator=(const CodeEventHandler&);
1053
+ void* internal_listener_;
1054
+ };
1055
+
1056
+ } // namespace v8
1057
+
1058
+
1059
+ #endif // V8_V8_PROFILER_H_