libv8-node 17.9.1.1-aarch64-linux → 18.8.0.0-aarch64-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/ext/libv8-node/location.rb +1 -1
  3. data/lib/libv8/node/version.rb +3 -3
  4. data/vendor/v8/aarch64-linux/libv8/obj/libv8_monolith.a +0 -0
  5. data/vendor/v8/include/cppgc/allocation.h +88 -17
  6. data/vendor/v8/include/cppgc/default-platform.h +2 -10
  7. data/vendor/v8/include/cppgc/explicit-management.h +22 -4
  8. data/vendor/v8/include/cppgc/garbage-collected.h +15 -26
  9. data/vendor/v8/include/cppgc/heap-consistency.h +13 -0
  10. data/vendor/v8/include/cppgc/heap-state.h +12 -0
  11. data/vendor/v8/include/cppgc/heap.h +7 -2
  12. data/vendor/v8/include/cppgc/internal/api-constants.h +8 -0
  13. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +23 -12
  14. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +2 -1
  15. data/vendor/v8/include/cppgc/internal/logging.h +3 -3
  16. data/vendor/v8/include/cppgc/internal/persistent-node.h +39 -27
  17. data/vendor/v8/include/cppgc/internal/pointer-policies.h +4 -4
  18. data/vendor/v8/include/cppgc/internal/write-barrier.h +26 -32
  19. data/vendor/v8/include/cppgc/member.h +5 -2
  20. data/vendor/v8/include/cppgc/persistent.h +30 -31
  21. data/vendor/v8/include/cppgc/platform.h +3 -1
  22. data/vendor/v8/include/cppgc/prefinalizer.h +34 -11
  23. data/vendor/v8/include/cppgc/testing.h +9 -2
  24. data/vendor/v8/include/cppgc/type-traits.h +6 -13
  25. data/vendor/v8/include/libplatform/libplatform.h +0 -11
  26. data/vendor/v8/include/libplatform/v8-tracing.h +0 -1
  27. data/vendor/v8/include/v8-array-buffer.h +14 -2
  28. data/vendor/v8/include/v8-callbacks.h +26 -6
  29. data/vendor/v8/include/v8-context.h +3 -14
  30. data/vendor/v8/include/v8-cppgc.h +16 -126
  31. data/vendor/v8/include/v8-data.h +15 -0
  32. data/vendor/v8/include/v8-debug.h +21 -4
  33. data/vendor/v8/include/v8-embedder-heap.h +10 -30
  34. data/vendor/v8/include/v8-embedder-state-scope.h +51 -0
  35. data/vendor/v8/include/v8-exception.h +0 -7
  36. data/vendor/v8/include/v8-fast-api-calls.h +82 -31
  37. data/vendor/v8/include/v8-function.h +3 -0
  38. data/vendor/v8/include/v8-initialization.h +64 -31
  39. data/vendor/v8/include/v8-inspector.h +45 -4
  40. data/vendor/v8/include/v8-internal.h +189 -102
  41. data/vendor/v8/include/v8-isolate.h +49 -2
  42. data/vendor/v8/include/v8-local-handle.h +0 -4
  43. data/vendor/v8/include/v8-locker.h +2 -1
  44. data/vendor/v8/include/v8-message.h +19 -44
  45. data/vendor/v8/include/v8-metrics.h +32 -15
  46. data/vendor/v8/include/v8-object.h +11 -6
  47. data/vendor/v8/include/v8-platform.h +365 -6
  48. data/vendor/v8/include/v8-primitive.h +14 -6
  49. data/vendor/v8/include/v8-profiler.h +78 -2
  50. data/vendor/v8/include/v8-script.h +27 -51
  51. data/vendor/v8/include/v8-snapshot.h +0 -2
  52. data/vendor/v8/include/v8-statistics.h +2 -0
  53. data/vendor/v8/include/v8-template.h +31 -4
  54. data/vendor/v8/include/v8-traced-handle.h +39 -224
  55. data/vendor/v8/include/v8-unwinder.h +10 -7
  56. data/vendor/v8/include/v8-value-serializer-version.h +1 -1
  57. data/vendor/v8/include/v8-value-serializer.h +32 -2
  58. data/vendor/v8/include/v8-version.h +4 -4
  59. data/vendor/v8/include/v8-wasm.h +13 -1
  60. data/vendor/v8/include/v8-weak-callback-info.h +20 -6
  61. data/vendor/v8/include/v8.h +0 -1
  62. data/vendor/v8/include/v8config.h +56 -11
  63. metadata +6 -6
  64. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +0 -30
@@ -401,6 +401,8 @@ class PageAllocator {
401
401
  // this is used to set the MAP_JIT flag on Apple Silicon.
402
402
  // TODO(jkummerow): Remove this when Wasm has a platform-independent
403
403
  // w^x implementation.
404
+ // TODO(saelo): Remove this once all JIT pages are allocated through the
405
+ // VirtualAddressSpace API.
404
406
  kNoAccessWillJitLater
405
407
  };
406
408
 
@@ -444,13 +446,7 @@ class PageAllocator {
444
446
  * zero-initialized again. The memory must have been previously allocated by a
445
447
  * call to AllocatePages. Returns true on success, false otherwise.
446
448
  */
447
- #ifdef V8_VIRTUAL_MEMORY_CAGE
448
- // Implementing this API is required when the virtual memory cage is enabled.
449
449
  virtual bool DecommitPages(void* address, size_t size) = 0;
450
- #else
451
- // Otherwise, it is optional for now.
452
- virtual bool DecommitPages(void* address, size_t size) { return false; }
453
- #endif
454
450
 
455
451
  /**
456
452
  * INTERNAL ONLY: This interface has not been stabilised and may change
@@ -516,6 +512,349 @@ class PageAllocator {
516
512
  virtual bool CanAllocateSharedPages() { return false; }
517
513
  };
518
514
 
515
+ // Opaque type representing a handle to a shared memory region.
516
+ using PlatformSharedMemoryHandle = intptr_t;
517
+ static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
518
+
519
+ // Conversion routines from the platform-dependent shared memory identifiers
520
+ // into the opaque PlatformSharedMemoryHandle type. These use the underlying
521
+ // types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t)
522
+ // to avoid pulling in large OS header files into this header file. Instead,
523
+ // the users of these routines are expected to include the respecitve OS
524
+ // headers in addition to this one.
525
+ #if V8_OS_MACOS
526
+ // Convert between a shared memory handle and a mach_port_t referencing a memory
527
+ // entry object.
528
+ inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
529
+ unsigned int port) {
530
+ return static_cast<PlatformSharedMemoryHandle>(port);
531
+ }
532
+ inline unsigned int MachMemoryEntryFromSharedMemoryHandle(
533
+ PlatformSharedMemoryHandle handle) {
534
+ return static_cast<unsigned int>(handle);
535
+ }
536
+ #elif V8_OS_FUCHSIA
537
+ // Convert between a shared memory handle and a zx_handle_t to a VMO.
538
+ inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) {
539
+ return static_cast<PlatformSharedMemoryHandle>(handle);
540
+ }
541
+ inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) {
542
+ return static_cast<uint32_t>(handle);
543
+ }
544
+ #elif V8_OS_WIN
545
+ // Convert between a shared memory handle and a Windows HANDLE to a file mapping
546
+ // object.
547
+ inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping(
548
+ void* handle) {
549
+ return reinterpret_cast<PlatformSharedMemoryHandle>(handle);
550
+ }
551
+ inline void* FileMappingFromSharedMemoryHandle(
552
+ PlatformSharedMemoryHandle handle) {
553
+ return reinterpret_cast<void*>(handle);
554
+ }
555
+ #else
556
+ // Convert between a shared memory handle and a file descriptor.
557
+ inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) {
558
+ return static_cast<PlatformSharedMemoryHandle>(fd);
559
+ }
560
+ inline int FileDescriptorFromSharedMemoryHandle(
561
+ PlatformSharedMemoryHandle handle) {
562
+ return static_cast<int>(handle);
563
+ }
564
+ #endif
565
+
566
+ /**
567
+ * Possible permissions for memory pages.
568
+ */
569
+ enum class PagePermissions {
570
+ kNoAccess,
571
+ kRead,
572
+ kReadWrite,
573
+ kReadWriteExecute,
574
+ kReadExecute,
575
+ };
576
+
577
+ /**
578
+ * Class to manage a virtual memory address space.
579
+ *
580
+ * This class represents a contiguous region of virtual address space in which
581
+ * sub-spaces and (private or shared) memory pages can be allocated, freed, and
582
+ * modified. This interface is meant to eventually replace the PageAllocator
583
+ * interface, and can be used as an alternative in the meantime.
584
+ *
585
+ * This API is not yet stable and may change without notice!
586
+ */
587
+ class VirtualAddressSpace {
588
+ public:
589
+ using Address = uintptr_t;
590
+
591
+ VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
592
+ Address base, size_t size,
593
+ PagePermissions max_page_permissions)
594
+ : page_size_(page_size),
595
+ allocation_granularity_(allocation_granularity),
596
+ base_(base),
597
+ size_(size),
598
+ max_page_permissions_(max_page_permissions) {}
599
+
600
+ virtual ~VirtualAddressSpace() = default;
601
+
602
+ /**
603
+ * The page size used inside this space. Guaranteed to be a power of two.
604
+ * Used as granularity for all page-related operations except for allocation,
605
+ * which use the allocation_granularity(), see below.
606
+ *
607
+ * \returns the page size in bytes.
608
+ */
609
+ size_t page_size() const { return page_size_; }
610
+
611
+ /**
612
+ * The granularity of page allocations and, by extension, of subspace
613
+ * allocations. This is guaranteed to be a power of two and a multiple of the
614
+ * page_size(). In practice, this is equal to the page size on most OSes, but
615
+ * on Windows it is usually 64KB, while the page size is 4KB.
616
+ *
617
+ * \returns the allocation granularity in bytes.
618
+ */
619
+ size_t allocation_granularity() const { return allocation_granularity_; }
620
+
621
+ /**
622
+ * The base address of the address space managed by this instance.
623
+ *
624
+ * \returns the base address of this address space.
625
+ */
626
+ Address base() const { return base_; }
627
+
628
+ /**
629
+ * The size of the address space managed by this instance.
630
+ *
631
+ * \returns the size of this address space in bytes.
632
+ */
633
+ size_t size() const { return size_; }
634
+
635
+ /**
636
+ * The maximum page permissions that pages allocated inside this space can
637
+ * obtain.
638
+ *
639
+ * \returns the maximum page permissions.
640
+ */
641
+ PagePermissions max_page_permissions() const { return max_page_permissions_; }
642
+
643
+ /**
644
+ * Sets the random seed so that GetRandomPageAddress() will generate
645
+ * repeatable sequences of random addresses.
646
+ *
647
+ * \param The seed for the PRNG.
648
+ */
649
+ virtual void SetRandomSeed(int64_t seed) = 0;
650
+
651
+ /**
652
+ * Returns a random address inside this address space, suitable for page
653
+ * allocations hints.
654
+ *
655
+ * \returns a random address aligned to allocation_granularity().
656
+ */
657
+ virtual Address RandomPageAddress() = 0;
658
+
659
+ /**
660
+ * Allocates private memory pages with the given alignment and permissions.
661
+ *
662
+ * \param hint If nonzero, the allocation is attempted to be placed at the
663
+ * given address first. If that fails, the allocation is attempted to be
664
+ * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
665
+ * zero for the hint always causes this function to choose a random address.
666
+ * The hint, if specified, must be aligned to the specified alignment.
667
+ *
668
+ * \param size The size of the allocation in bytes. Must be a multiple of the
669
+ * allocation_granularity().
670
+ *
671
+ * \param alignment The alignment of the allocation in bytes. Must be a
672
+ * multiple of the allocation_granularity() and should be a power of two.
673
+ *
674
+ * \param permissions The page permissions of the newly allocated pages.
675
+ *
676
+ * \returns the start address of the allocated pages on success, zero on
677
+ * failure.
678
+ */
679
+ static constexpr Address kNoHint = 0;
680
+ virtual V8_WARN_UNUSED_RESULT Address
681
+ AllocatePages(Address hint, size_t size, size_t alignment,
682
+ PagePermissions permissions) = 0;
683
+
684
+ /**
685
+ * Frees previously allocated pages.
686
+ *
687
+ * This function will terminate the process on failure as this implies a bug
688
+ * in the client. As such, there is no return value.
689
+ *
690
+ * \param address The start address of the pages to free. This address must
691
+ * have been obtained through a call to AllocatePages.
692
+ *
693
+ * \param size The size in bytes of the region to free. This must match the
694
+ * size passed to AllocatePages when the pages were allocated.
695
+ */
696
+ virtual void FreePages(Address address, size_t size) = 0;
697
+
698
+ /**
699
+ * Sets permissions of all allocated pages in the given range.
700
+ *
701
+ * \param address The start address of the range. Must be aligned to
702
+ * page_size().
703
+ *
704
+ * \param size The size in bytes of the range. Must be a multiple
705
+ * of page_size().
706
+ *
707
+ * \param permissions The new permissions for the range.
708
+ *
709
+ * \returns true on success, false otherwise.
710
+ */
711
+ virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(
712
+ Address address, size_t size, PagePermissions permissions) = 0;
713
+
714
+ /**
715
+ * Creates a guard region at the specified address.
716
+ *
717
+ * Guard regions are guaranteed to cause a fault when accessed and generally
718
+ * do not count towards any memory consumption limits. Further, allocating
719
+ * guard regions can usually not fail in subspaces if the region does not
720
+ * overlap with another region, subspace, or page allocation.
721
+ *
722
+ * \param address The start address of the guard region. Must be aligned to
723
+ * the allocation_granularity().
724
+ *
725
+ * \param size The size of the guard region in bytes. Must be a multiple of
726
+ * the allocation_granularity().
727
+ *
728
+ * \returns true on success, false otherwise.
729
+ */
730
+ virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address,
731
+ size_t size) = 0;
732
+
733
+ /**
734
+ * Frees an existing guard region.
735
+ *
736
+ * This function will terminate the process on failure as this implies a bug
737
+ * in the client. As such, there is no return value.
738
+ *
739
+ * \param address The start address of the guard region to free. This address
740
+ * must have previously been used as address parameter in a successful
741
+ * invocation of AllocateGuardRegion.
742
+ *
743
+ * \param size The size in bytes of the guard region to free. This must match
744
+ * the size passed to AllocateGuardRegion when the region was created.
745
+ */
746
+ virtual void FreeGuardRegion(Address address, size_t size) = 0;
747
+
748
+ /**
749
+ * Allocates shared memory pages with the given permissions.
750
+ *
751
+ * \param hint Placement hint. See AllocatePages.
752
+ *
753
+ * \param size The size of the allocation in bytes. Must be a multiple of the
754
+ * allocation_granularity().
755
+ *
756
+ * \param permissions The page permissions of the newly allocated pages.
757
+ *
758
+ * \param handle A platform-specific handle to a shared memory object. See
759
+ * the SharedMemoryHandleFromX routines above for ways to obtain these.
760
+ *
761
+ * \param offset The offset in the shared memory object at which the mapping
762
+ * should start. Must be a multiple of the allocation_granularity().
763
+ *
764
+ * \returns the start address of the allocated pages on success, zero on
765
+ * failure.
766
+ */
767
+ virtual V8_WARN_UNUSED_RESULT Address
768
+ AllocateSharedPages(Address hint, size_t size, PagePermissions permissions,
769
+ PlatformSharedMemoryHandle handle, uint64_t offset) = 0;
770
+
771
+ /**
772
+ * Frees previously allocated shared pages.
773
+ *
774
+ * This function will terminate the process on failure as this implies a bug
775
+ * in the client. As such, there is no return value.
776
+ *
777
+ * \param address The start address of the pages to free. This address must
778
+ * have been obtained through a call to AllocateSharedPages.
779
+ *
780
+ * \param size The size in bytes of the region to free. This must match the
781
+ * size passed to AllocateSharedPages when the pages were allocated.
782
+ */
783
+ virtual void FreeSharedPages(Address address, size_t size) = 0;
784
+
785
+ /**
786
+ * Whether this instance can allocate subspaces or not.
787
+ *
788
+ * \returns true if subspaces can be allocated, false if not.
789
+ */
790
+ virtual bool CanAllocateSubspaces() = 0;
791
+
792
+ /*
793
+ * Allocate a subspace.
794
+ *
795
+ * The address space of a subspace stays reserved in the parent space for the
796
+ * lifetime of the subspace. As such, it is guaranteed that page allocations
797
+ * on the parent space cannot end up inside a subspace.
798
+ *
799
+ * \param hint Hints where the subspace should be allocated. See
800
+ * AllocatePages() for more details.
801
+ *
802
+ * \param size The size in bytes of the subspace. Must be a multiple of the
803
+ * allocation_granularity().
804
+ *
805
+ * \param alignment The alignment of the subspace in bytes. Must be a multiple
806
+ * of the allocation_granularity() and should be a power of two.
807
+ *
808
+ * \param max_page_permissions The maximum permissions that pages allocated in
809
+ * the subspace can obtain.
810
+ *
811
+ * \returns a new subspace or nullptr on failure.
812
+ */
813
+ virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
814
+ Address hint, size_t size, size_t alignment,
815
+ PagePermissions max_page_permissions) = 0;
816
+
817
+ //
818
+ // TODO(v8) maybe refactor the methods below before stabilizing the API. For
819
+ // example by combining them into some form of page operation method that
820
+ // takes a command enum as parameter.
821
+ //
822
+
823
+ /**
824
+ * Frees memory in the given [address, address + size) range. address and
825
+ * size should be aligned to the page_size(). The next write to this memory
826
+ * area brings the memory transparently back. This should be treated as a
827
+ * hint to the OS that the pages are no longer needed. It does not guarantee
828
+ * that the pages will be discarded immediately or at all.
829
+ *
830
+ * \returns true on success, false otherwise. Since this method is only a
831
+ * hint, a successful invocation does not imply that pages have been removed.
832
+ */
833
+ virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address,
834
+ size_t size) {
835
+ return true;
836
+ }
837
+ /**
838
+ * Decommits any wired memory pages in the given range, allowing the OS to
839
+ * reclaim them, and marks the region as inacessible (kNoAccess). The address
840
+ * range stays reserved and can be accessed again later by changing its
841
+ * permissions. However, in that case the memory content is guaranteed to be
842
+ * zero-initialized again. The memory must have been previously allocated by a
843
+ * call to AllocatePages.
844
+ *
845
+ * \returns true on success, false otherwise.
846
+ */
847
+ virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address,
848
+ size_t size) = 0;
849
+
850
+ private:
851
+ const size_t page_size_;
852
+ const size_t allocation_granularity_;
853
+ const Address base_;
854
+ const size_t size_;
855
+ const PagePermissions max_page_permissions_;
856
+ };
857
+
519
858
  /**
520
859
  * V8 Allocator used for allocating zone backings.
521
860
  */
@@ -528,6 +867,16 @@ class ZoneBackingAllocator {
528
867
  virtual FreeFn GetFreeFn() const { return ::free; }
529
868
  };
530
869
 
870
+ /**
871
+ * Observer used by V8 to notify the embedder about entering/leaving sections
872
+ * with high throughput of malloc/free operations.
873
+ */
874
+ class HighAllocationThroughputObserver {
875
+ public:
876
+ virtual void EnterSection() {}
877
+ virtual void LeaveSection() {}
878
+ };
879
+
531
880
  /**
532
881
  * V8 Platform abstraction layer.
533
882
  *
@@ -719,6 +1068,16 @@ class Platform {
719
1068
  */
720
1069
  virtual void DumpWithoutCrashing() {}
721
1070
 
1071
+ /**
1072
+ * Allows the embedder to observe sections with high throughput allocation
1073
+ * operations.
1074
+ */
1075
+ virtual HighAllocationThroughputObserver*
1076
+ GetHighAllocationThroughputObserver() {
1077
+ static HighAllocationThroughputObserver default_observer;
1078
+ return &default_observer;
1079
+ }
1080
+
722
1081
  protected:
723
1082
  /**
724
1083
  * Default implementation of current wall-clock time in milliseconds
@@ -54,12 +54,22 @@ class V8_EXPORT Boolean : public Primitive {
54
54
  * This is passed back to the embedder as part of
55
55
  * HostImportModuleDynamicallyCallback for module loading.
56
56
  */
57
- class V8_EXPORT PrimitiveArray {
57
+ class V8_EXPORT PrimitiveArray : public Data {
58
58
  public:
59
59
  static Local<PrimitiveArray> New(Isolate* isolate, int length);
60
60
  int Length() const;
61
61
  void Set(Isolate* isolate, int index, Local<Primitive> item);
62
62
  Local<Primitive> Get(Isolate* isolate, int index);
63
+
64
+ V8_INLINE static PrimitiveArray* Cast(Data* data) {
65
+ #ifdef V8_ENABLE_CHECKS
66
+ CheckCast(data);
67
+ #endif
68
+ return reinterpret_cast<PrimitiveArray*>(data);
69
+ }
70
+
71
+ private:
72
+ static void CheckCast(Data* obj);
63
73
  };
64
74
 
65
75
  /**
@@ -575,8 +585,6 @@ class V8_EXPORT Symbol : public Name {
575
585
  /**
576
586
  * Returns the description string of the symbol, or undefined if none.
577
587
  */
578
- V8_DEPRECATE_SOON("Use Symbol::Description(isolate)")
579
- Local<Value> Description() const;
580
588
  Local<Value> Description(Isolate* isolate) const;
581
589
 
582
590
  /**
@@ -777,7 +785,7 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
777
785
 
778
786
  ExternalStringResource* result;
779
787
  if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
780
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
788
+ internal::Isolate* isolate = I::GetIsolateForSandbox(obj);
781
789
  A value =
782
790
  I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
783
791
  internal::kExternalStringResourceTag);
@@ -796,12 +804,12 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
796
804
  using A = internal::Address;
797
805
  using I = internal::Internals;
798
806
  A obj = *reinterpret_cast<const A*>(this);
799
- int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
807
+ int type = I::GetInstanceType(obj) & I::kStringRepresentationAndEncodingMask;
800
808
  *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
801
809
  ExternalStringResourceBase* resource;
802
810
  if (type == I::kExternalOneByteRepresentationTag ||
803
811
  type == I::kExternalTwoByteRepresentationTag) {
804
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
812
+ internal::Isolate* isolate = I::GetIsolateForSandbox(obj);
805
813
  A value =
806
814
  I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
807
815
  internal::kExternalStringResourceTag);
@@ -20,12 +20,15 @@
20
20
  */
21
21
  namespace v8 {
22
22
 
23
+ enum class EmbedderStateTag : uint8_t;
23
24
  class HeapGraphNode;
24
25
  struct HeapStatsUpdate;
25
26
  class Object;
27
+ enum StateTag : int;
26
28
 
27
29
  using NativeObject = void*;
28
30
  using SnapshotObjectId = uint32_t;
31
+ using ProfilerId = uint32_t;
29
32
 
30
33
  struct CpuProfileDeoptFrame {
31
34
  int script_id;
@@ -210,6 +213,16 @@ class V8_EXPORT CpuProfile {
210
213
  */
211
214
  int64_t GetStartTime() const;
212
215
 
216
+ /**
217
+ * Returns state of the vm when sample was captured.
218
+ */
219
+ StateTag GetSampleState(int index) const;
220
+
221
+ /**
222
+ * Returns state of the embedder when sample was captured.
223
+ */
224
+ EmbedderStateTag GetSampleEmbedderState(int index) const;
225
+
213
226
  /**
214
227
  * Returns time when the profile recording was stopped (in microseconds)
215
228
  * since some unspecified starting point.
@@ -261,15 +274,33 @@ enum class CpuProfilingStatus {
261
274
  kErrorTooManyProfilers
262
275
  };
263
276
 
277
+ /**
278
+ * Result from StartProfiling returning the Profiling Status, and
279
+ * id of the started profiler, or 0 if profiler is not started
280
+ */
281
+ struct CpuProfilingResult {
282
+ const ProfilerId id;
283
+ const CpuProfilingStatus status;
284
+ };
285
+
264
286
  /**
265
287
  * Delegate for when max samples reached and samples are discarded.
266
288
  */
267
289
  class V8_EXPORT DiscardedSamplesDelegate {
268
290
  public:
269
- DiscardedSamplesDelegate() {}
291
+ DiscardedSamplesDelegate() = default;
270
292
 
271
293
  virtual ~DiscardedSamplesDelegate() = default;
272
294
  virtual void Notify() = 0;
295
+
296
+ ProfilerId GetId() const { return profiler_id_; }
297
+
298
+ private:
299
+ friend internal::CpuProfile;
300
+
301
+ void SetId(ProfilerId id) { profiler_id_ = id; }
302
+
303
+ ProfilerId profiler_id_;
273
304
  };
274
305
 
275
306
  /**
@@ -359,6 +390,45 @@ class V8_EXPORT CpuProfiler {
359
390
  */
360
391
  void SetUsePreciseSampling(bool);
361
392
 
393
+ /**
394
+ * Starts collecting a CPU profile. Several profiles may be collected at once.
395
+ * Generates an anonymous profiler, without a String identifier.
396
+ */
397
+ CpuProfilingResult Start(
398
+ CpuProfilingOptions options,
399
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
400
+
401
+ /**
402
+ * Starts collecting a CPU profile. Title may be an empty string. Several
403
+ * profiles may be collected at once. Attempts to start collecting several
404
+ * profiles with the same title are silently ignored.
405
+ */
406
+ CpuProfilingResult Start(
407
+ Local<String> title, CpuProfilingOptions options,
408
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
409
+
410
+ /**
411
+ * Starts profiling with the same semantics as above, except with expanded
412
+ * parameters.
413
+ *
414
+ * |record_samples| parameter controls whether individual samples should
415
+ * be recorded in addition to the aggregated tree.
416
+ *
417
+ * |max_samples| controls the maximum number of samples that should be
418
+ * recorded by the profiler. Samples obtained after this limit will be
419
+ * discarded.
420
+ */
421
+ CpuProfilingResult Start(
422
+ Local<String> title, CpuProfilingMode mode, bool record_samples = false,
423
+ unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
424
+
425
+ /**
426
+ * The same as StartProfiling above, but the CpuProfilingMode defaults to
427
+ * kLeafNodeLineNumbers mode, which was the previous default behavior of the
428
+ * profiler.
429
+ */
430
+ CpuProfilingResult Start(Local<String> title, bool record_samples = false);
431
+
362
432
  /**
363
433
  * Starts collecting a CPU profile. Title may be an empty string. Several
364
434
  * profiles may be collected at once. Attempts to start collecting several
@@ -382,6 +452,7 @@ class V8_EXPORT CpuProfiler {
382
452
  CpuProfilingStatus StartProfiling(
383
453
  Local<String> title, CpuProfilingMode mode, bool record_samples = false,
384
454
  unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
455
+
385
456
  /**
386
457
  * The same as StartProfiling above, but the CpuProfilingMode defaults to
387
458
  * kLeafNodeLineNumbers mode, which was the previous default behavior of the
@@ -390,6 +461,11 @@ class V8_EXPORT CpuProfiler {
390
461
  CpuProfilingStatus StartProfiling(Local<String> title,
391
462
  bool record_samples = false);
392
463
 
464
+ /**
465
+ * Stops collecting CPU profile with a given id and returns it.
466
+ */
467
+ CpuProfile* Stop(ProfilerId id);
468
+
393
469
  /**
394
470
  * Stops collecting CPU profile with a given title and returns it.
395
471
  * If the title given is empty, finishes the last profile started.
@@ -603,7 +679,7 @@ class V8_EXPORT ActivityControl {
603
679
  * Notify about current progress. The activity can be stopped by
604
680
  * returning kAbort as the callback result.
605
681
  */
606
- virtual ControlOption ReportProgressValue(int done, int total) = 0;
682
+ virtual ControlOption ReportProgressValue(uint32_t done, uint32_t total) = 0;
607
683
  };
608
684
 
609
685
  /**