node-linux-arm64 22.10.0 → 23.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/CHANGELOG.md +527 -2239
  2. package/README.md +9 -14
  3. package/bin/node +0 -0
  4. package/include/node/common.gypi +3 -3
  5. package/include/node/config.gypi +5 -4
  6. package/include/node/cppgc/allocation.h +10 -11
  7. package/include/node/cppgc/garbage-collected.h +8 -0
  8. package/include/node/cppgc/heap-statistics.h +2 -0
  9. package/include/node/cppgc/internal/api-constants.h +6 -1
  10. package/include/node/cppgc/internal/compiler-specific.h +9 -1
  11. package/include/node/cppgc/internal/gc-info.h +12 -10
  12. package/include/node/cppgc/internal/member-storage.h +6 -0
  13. package/include/node/cppgc/internal/name-trait.h +5 -1
  14. package/include/node/cppgc/name-provider.h +7 -0
  15. package/include/node/node.h +8 -18
  16. package/include/node/node_api.h +12 -0
  17. package/include/node/node_version.h +3 -3
  18. package/include/node/uv/tree.h +3 -250
  19. package/include/node/uv/version.h +2 -2
  20. package/include/node/uv/win.h +2 -2
  21. package/include/node/uv.h +45 -17
  22. package/include/node/v8-array-buffer.h +44 -24
  23. package/include/node/v8-callbacks.h +10 -5
  24. package/include/node/v8-context.h +41 -9
  25. package/include/node/v8-cppgc.h +3 -55
  26. package/include/node/v8-date.h +9 -0
  27. package/include/node/v8-embedder-heap.h +4 -1
  28. package/include/node/v8-exception.h +70 -0
  29. package/include/node/v8-function-callback.h +203 -62
  30. package/include/node/v8-function.h +4 -3
  31. package/include/node/v8-handle-base.h +2 -2
  32. package/include/node/v8-initialization.h +18 -1
  33. package/include/node/v8-internal.h +303 -58
  34. package/include/node/v8-isolate.h +51 -39
  35. package/include/node/v8-local-handle.h +18 -19
  36. package/include/node/v8-message.h +0 -21
  37. package/include/node/v8-microtask-queue.h +0 -5
  38. package/include/node/v8-object.h +284 -35
  39. package/include/node/v8-persistent-handle.h +0 -19
  40. package/include/node/v8-platform.h +21 -35
  41. package/include/node/v8-primitive.h +92 -1
  42. package/include/node/v8-profiler.h +38 -1
  43. package/include/node/v8-promise.h +2 -2
  44. package/include/node/v8-sandbox.h +173 -0
  45. package/include/node/v8-script.h +44 -14
  46. package/include/node/v8-snapshot.h +38 -2
  47. package/include/node/v8-template.h +105 -263
  48. package/include/node/v8-traced-handle.h +4 -15
  49. package/include/node/v8-unwinder.h +2 -1
  50. package/include/node/v8-value.h +3 -2
  51. package/include/node/v8-version.h +3 -3
  52. package/include/node/v8-wasm.h +3 -0
  53. package/include/node/v8config.h +47 -7
  54. package/package.json +1 -1
  55. package/share/doc/node/gdbinit +41 -3
  56. package/share/doc/node/lldb_commands.py +7 -2
  57. package/share/man/man1/node.1 +0 -14
@@ -35,21 +35,7 @@
35
35
  #endif
36
36
 
37
37
  /*
38
- * This file defines data structures for different types of trees:
39
- * splay trees and red-black trees.
40
- *
41
- * A splay tree is a self-organizing data structure. Every operation
42
- * on the tree causes a splay to happen. The splay moves the requested
43
- * node to the root of the tree and partly rebalances it.
44
- *
45
- * This has the benefit that request locality causes faster lookups as
46
- * the requested nodes move to the top of the tree. On the other hand,
47
- * every lookup causes memory writes.
48
- *
49
- * The Balance Theorem bounds the total access time for m operations
50
- * and n inserts on an initially empty tree as O((m + n)lg n). The
51
- * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
52
- *
38
+ * This file defines data structures for red-black trees.
53
39
  * A red-black tree is a binary search tree with the node color as an
54
40
  * extra attribute. It fulfills a set of conditions:
55
41
  * - every search path from the root to a leaf consists of the
@@ -61,239 +47,6 @@
61
47
  * The maximum height of a red-black tree is 2lg (n+1).
62
48
  */
63
49
 
64
- #define SPLAY_HEAD(name, type) \
65
- struct name { \
66
- struct type *sph_root; /* root of the tree */ \
67
- }
68
-
69
- #define SPLAY_INITIALIZER(root) \
70
- { NULL }
71
-
72
- #define SPLAY_INIT(root) do { \
73
- (root)->sph_root = NULL; \
74
- } while (/*CONSTCOND*/ 0)
75
-
76
- #define SPLAY_ENTRY(type) \
77
- struct { \
78
- struct type *spe_left; /* left element */ \
79
- struct type *spe_right; /* right element */ \
80
- }
81
-
82
- #define SPLAY_LEFT(elm, field) (elm)->field.spe_left
83
- #define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
84
- #define SPLAY_ROOT(head) (head)->sph_root
85
- #define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
86
-
87
- /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
88
- #define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
89
- SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
90
- SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
91
- (head)->sph_root = tmp; \
92
- } while (/*CONSTCOND*/ 0)
93
-
94
- #define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
95
- SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
96
- SPLAY_LEFT(tmp, field) = (head)->sph_root; \
97
- (head)->sph_root = tmp; \
98
- } while (/*CONSTCOND*/ 0)
99
-
100
- #define SPLAY_LINKLEFT(head, tmp, field) do { \
101
- SPLAY_LEFT(tmp, field) = (head)->sph_root; \
102
- tmp = (head)->sph_root; \
103
- (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
104
- } while (/*CONSTCOND*/ 0)
105
-
106
- #define SPLAY_LINKRIGHT(head, tmp, field) do { \
107
- SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
108
- tmp = (head)->sph_root; \
109
- (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
110
- } while (/*CONSTCOND*/ 0)
111
-
112
- #define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
113
- SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
114
- SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \
115
- SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
116
- SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
117
- } while (/*CONSTCOND*/ 0)
118
-
119
- /* Generates prototypes and inline functions */
120
-
121
- #define SPLAY_PROTOTYPE(name, type, field, cmp) \
122
- void name##_SPLAY(struct name *, struct type *); \
123
- void name##_SPLAY_MINMAX(struct name *, int); \
124
- struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
125
- struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
126
- \
127
- /* Finds the node with the same key as elm */ \
128
- static __inline struct type * \
129
- name##_SPLAY_FIND(struct name *head, struct type *elm) \
130
- { \
131
- if (SPLAY_EMPTY(head)) \
132
- return(NULL); \
133
- name##_SPLAY(head, elm); \
134
- if ((cmp)(elm, (head)->sph_root) == 0) \
135
- return (head->sph_root); \
136
- return (NULL); \
137
- } \
138
- \
139
- static __inline struct type * \
140
- name##_SPLAY_NEXT(struct name *head, struct type *elm) \
141
- { \
142
- name##_SPLAY(head, elm); \
143
- if (SPLAY_RIGHT(elm, field) != NULL) { \
144
- elm = SPLAY_RIGHT(elm, field); \
145
- while (SPLAY_LEFT(elm, field) != NULL) { \
146
- elm = SPLAY_LEFT(elm, field); \
147
- } \
148
- } else \
149
- elm = NULL; \
150
- return (elm); \
151
- } \
152
- \
153
- static __inline struct type * \
154
- name##_SPLAY_MIN_MAX(struct name *head, int val) \
155
- { \
156
- name##_SPLAY_MINMAX(head, val); \
157
- return (SPLAY_ROOT(head)); \
158
- }
159
-
160
- /* Main splay operation.
161
- * Moves node close to the key of elm to top
162
- */
163
- #define SPLAY_GENERATE(name, type, field, cmp) \
164
- struct type * \
165
- name##_SPLAY_INSERT(struct name *head, struct type *elm) \
166
- { \
167
- if (SPLAY_EMPTY(head)) { \
168
- SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
169
- } else { \
170
- int __comp; \
171
- name##_SPLAY(head, elm); \
172
- __comp = (cmp)(elm, (head)->sph_root); \
173
- if(__comp < 0) { \
174
- SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \
175
- SPLAY_RIGHT(elm, field) = (head)->sph_root; \
176
- SPLAY_LEFT((head)->sph_root, field) = NULL; \
177
- } else if (__comp > 0) { \
178
- SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \
179
- SPLAY_LEFT(elm, field) = (head)->sph_root; \
180
- SPLAY_RIGHT((head)->sph_root, field) = NULL; \
181
- } else \
182
- return ((head)->sph_root); \
183
- } \
184
- (head)->sph_root = (elm); \
185
- return (NULL); \
186
- } \
187
- \
188
- struct type * \
189
- name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
190
- { \
191
- struct type *__tmp; \
192
- if (SPLAY_EMPTY(head)) \
193
- return (NULL); \
194
- name##_SPLAY(head, elm); \
195
- if ((cmp)(elm, (head)->sph_root) == 0) { \
196
- if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
197
- (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
198
- } else { \
199
- __tmp = SPLAY_RIGHT((head)->sph_root, field); \
200
- (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
201
- name##_SPLAY(head, elm); \
202
- SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
203
- } \
204
- return (elm); \
205
- } \
206
- return (NULL); \
207
- } \
208
- \
209
- void \
210
- name##_SPLAY(struct name *head, struct type *elm) \
211
- { \
212
- struct type __node, *__left, *__right, *__tmp; \
213
- int __comp; \
214
- \
215
- SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
216
- __left = __right = &__node; \
217
- \
218
- while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
219
- if (__comp < 0) { \
220
- __tmp = SPLAY_LEFT((head)->sph_root, field); \
221
- if (__tmp == NULL) \
222
- break; \
223
- if ((cmp)(elm, __tmp) < 0){ \
224
- SPLAY_ROTATE_RIGHT(head, __tmp, field); \
225
- if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
226
- break; \
227
- } \
228
- SPLAY_LINKLEFT(head, __right, field); \
229
- } else if (__comp > 0) { \
230
- __tmp = SPLAY_RIGHT((head)->sph_root, field); \
231
- if (__tmp == NULL) \
232
- break; \
233
- if ((cmp)(elm, __tmp) > 0){ \
234
- SPLAY_ROTATE_LEFT(head, __tmp, field); \
235
- if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
236
- break; \
237
- } \
238
- SPLAY_LINKRIGHT(head, __left, field); \
239
- } \
240
- } \
241
- SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
242
- } \
243
- \
244
- /* Splay with either the minimum or the maximum element \
245
- * Used to find minimum or maximum element in tree. \
246
- */ \
247
- void name##_SPLAY_MINMAX(struct name *head, int __comp) \
248
- { \
249
- struct type __node, *__left, *__right, *__tmp; \
250
- \
251
- SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
252
- __left = __right = &__node; \
253
- \
254
- for (;;) { \
255
- if (__comp < 0) { \
256
- __tmp = SPLAY_LEFT((head)->sph_root, field); \
257
- if (__tmp == NULL) \
258
- break; \
259
- if (__comp < 0){ \
260
- SPLAY_ROTATE_RIGHT(head, __tmp, field); \
261
- if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
262
- break; \
263
- } \
264
- SPLAY_LINKLEFT(head, __right, field); \
265
- } else if (__comp > 0) { \
266
- __tmp = SPLAY_RIGHT((head)->sph_root, field); \
267
- if (__tmp == NULL) \
268
- break; \
269
- if (__comp > 0) { \
270
- SPLAY_ROTATE_LEFT(head, __tmp, field); \
271
- if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
272
- break; \
273
- } \
274
- SPLAY_LINKRIGHT(head, __left, field); \
275
- } \
276
- } \
277
- SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
278
- }
279
-
280
- #define SPLAY_NEGINF -1
281
- #define SPLAY_INF 1
282
-
283
- #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
284
- #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
285
- #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
286
- #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
287
- #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
288
- : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
289
- #define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
290
- : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
291
-
292
- #define SPLAY_FOREACH(x, name, head) \
293
- for ((x) = SPLAY_MIN(name, head); \
294
- (x) != NULL; \
295
- (x) = SPLAY_NEXT(name, head, x))
296
-
297
50
  /* Macros that define a red-black tree */
298
51
  #define RB_HEAD(name, type) \
299
52
  struct name { \
@@ -730,8 +483,8 @@ name##_RB_MINMAX(struct name *head, int val) \
730
483
  #define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
731
484
  #define RB_FIND(name, x, y) name##_RB_FIND(x, y)
732
485
  #define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
733
- #define RB_NEXT(name, x, y) name##_RB_NEXT(y)
734
- #define RB_PREV(name, x, y) name##_RB_PREV(y)
486
+ #define RB_NEXT(name, x) name##_RB_NEXT(x)
487
+ #define RB_PREV(name, x) name##_RB_PREV(x)
735
488
  #define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
736
489
  #define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
737
490
 
@@ -31,8 +31,8 @@
31
31
  */
32
32
 
33
33
  #define UV_VERSION_MAJOR 1
34
- #define UV_VERSION_MINOR 48
35
- #define UV_VERSION_PATCH 0
34
+ #define UV_VERSION_MINOR 49
35
+ #define UV_VERSION_PATCH 1
36
36
  #define UV_VERSION_IS_RELEASE 1
37
37
  #define UV_VERSION_SUFFIX ""
38
38
 
@@ -290,8 +290,8 @@ typedef struct {
290
290
  #define UV_ONCE_INIT { 0, NULL }
291
291
 
292
292
  typedef struct uv_once_s {
293
- unsigned char ran;
294
- HANDLE event;
293
+ unsigned char unused;
294
+ INIT_ONCE init_once;
295
295
  } uv_once_t;
296
296
 
297
297
  /* Platform-specific definitions for uv_spawn support. */
package/include/node/uv.h CHANGED
@@ -260,7 +260,9 @@ typedef struct uv_metrics_s uv_metrics_t;
260
260
 
261
261
  typedef enum {
262
262
  UV_LOOP_BLOCK_SIGNAL = 0,
263
- UV_METRICS_IDLE_TIME
263
+ UV_METRICS_IDLE_TIME,
264
+ UV_LOOP_USE_IO_URING_SQPOLL
265
+ #define UV_LOOP_USE_IO_URING_SQPOLL UV_LOOP_USE_IO_URING_SQPOLL
264
266
  } uv_loop_option;
265
267
 
266
268
  typedef enum {
@@ -604,7 +606,18 @@ UV_EXTERN int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable);
604
606
 
605
607
  enum uv_tcp_flags {
606
608
  /* Used with uv_tcp_bind, when an IPv6 address is used. */
607
- UV_TCP_IPV6ONLY = 1
609
+ UV_TCP_IPV6ONLY = 1,
610
+
611
+ /* Enable SO_REUSEPORT socket option when binding the handle.
612
+ * This allows completely duplicate bindings by multiple processes
613
+ * or threads if they all set SO_REUSEPORT before binding the port.
614
+ * Incoming connections are distributed across the participating
615
+ * listener sockets.
616
+ *
617
+ * This flag is available only on Linux 3.9+, DragonFlyBSD 3.6+,
618
+ * FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ for now.
619
+ */
620
+ UV_TCP_REUSEPORT = 2,
608
621
  };
609
622
 
610
623
  UV_EXTERN int uv_tcp_bind(uv_tcp_t* handle,
@@ -645,10 +658,13 @@ enum uv_udp_flags {
645
658
  UV_UDP_PARTIAL = 2,
646
659
  /*
647
660
  * Indicates if SO_REUSEADDR will be set when binding the handle.
648
- * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other
649
- * Unix platforms, it sets the SO_REUSEADDR flag. What that means is that
650
- * multiple threads or processes can bind to the same address without error
651
- * (provided they all set the flag) but only the last one to bind will receive
661
+ * This sets the SO_REUSEPORT socket flag on the BSDs (except for
662
+ * DragonFlyBSD), OS X, and other platforms where SO_REUSEPORTs don't
663
+ * have the capability of load balancing, as the opposite of what
664
+ * UV_UDP_REUSEPORT would do. On other Unix platforms, it sets the
665
+ * SO_REUSEADDR flag. What that means is that multiple threads or
666
+ * processes can bind to the same address without error (provided
667
+ * they all set the flag) but only the last one to bind will receive
652
668
  * any traffic, in effect "stealing" the port from the previous listener.
653
669
  */
654
670
  UV_UDP_REUSEADDR = 4,
@@ -671,6 +687,18 @@ enum uv_udp_flags {
671
687
  * This flag is no-op on platforms other than Linux.
672
688
  */
673
689
  UV_UDP_LINUX_RECVERR = 32,
690
+ /*
691
+ * Indicates if SO_REUSEPORT will be set when binding the handle.
692
+ * This sets the SO_REUSEPORT socket option on supported platforms.
693
+ * Unlike UV_UDP_REUSEADDR, this flag will make multiple threads or
694
+ * processes that are binding to the same address and port "share"
695
+ * the port, which means incoming datagrams are distributed across
696
+ * the receiving sockets among threads or processes.
697
+ *
698
+ * This flag is available only on Linux 3.9+, DragonFlyBSD 3.6+,
699
+ * FreeBSD 12.0+, Solaris 11.4, and AIX 7.2.5+ for now.
700
+ */
701
+ UV_UDP_REUSEPORT = 64,
674
702
  /*
675
703
  * Indicates that recvmmsg should be used, if available.
676
704
  */
@@ -1903,17 +1931,17 @@ struct uv_loop_s {
1903
1931
  UV_EXTERN void* uv_loop_get_data(const uv_loop_t*);
1904
1932
  UV_EXTERN void uv_loop_set_data(uv_loop_t*, void* data);
1905
1933
 
1906
- /* String utilities needed internally for dealing with Windows. */
1907
- size_t uv_utf16_length_as_wtf8(const uint16_t* utf16,
1908
- ssize_t utf16_len);
1909
- int uv_utf16_to_wtf8(const uint16_t* utf16,
1910
- ssize_t utf16_len,
1911
- char** wtf8_ptr,
1912
- size_t* wtf8_len_ptr);
1913
- ssize_t uv_wtf8_length_as_utf16(const char* wtf8);
1914
- void uv_wtf8_to_utf16(const char* wtf8,
1915
- uint16_t* utf16,
1916
- size_t utf16_len);
1934
+ /* Unicode utilities needed for dealing with Windows. */
1935
+ UV_EXTERN size_t uv_utf16_length_as_wtf8(const uint16_t* utf16,
1936
+ ssize_t utf16_len);
1937
+ UV_EXTERN int uv_utf16_to_wtf8(const uint16_t* utf16,
1938
+ ssize_t utf16_len,
1939
+ char** wtf8_ptr,
1940
+ size_t* wtf8_len_ptr);
1941
+ UV_EXTERN ssize_t uv_wtf8_length_as_utf16(const char* wtf8);
1942
+ UV_EXTERN void uv_wtf8_to_utf16(const char* wtf8,
1943
+ uint16_t* utf16,
1944
+ size_t utf16_len);
1917
1945
 
1918
1946
  /* Don't export the private CPP symbols. */
1919
1947
  #undef UV_HANDLE_TYPE_PRIVATE
@@ -18,11 +18,12 @@ namespace v8 {
18
18
  class SharedArrayBuffer;
19
19
 
20
20
  #ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
21
- // The number of required internal fields can be defined by embedder.
21
+ // Defined using gn arg `v8_array_buffer_internal_field_count`.
22
22
  #define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
23
23
  #endif
24
24
 
25
25
  enum class ArrayBufferCreationMode { kInternalized, kExternalized };
26
+ enum class BackingStoreInitializationMode { kZeroInitialized, kUninitialized };
26
27
 
27
28
  /**
28
29
  * A wrapper around the backing store (i.e. the raw memory) of an array buffer.
@@ -87,6 +88,9 @@ class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
87
88
  * Assumes that the backing_store was allocated by the ArrayBuffer allocator
88
89
  * of the given isolate.
89
90
  */
91
+ V8_DEPRECATED(
92
+ "Reallocate is unsafe, please do not use. Please allocate a new "
93
+ "BackingStore and copy instead.")
90
94
  static std::unique_ptr<BackingStore> Reallocate(
91
95
  v8::Isolate* isolate, std::unique_ptr<BackingStore> backing_store,
92
96
  size_t byte_length);
@@ -179,6 +183,9 @@ class V8_EXPORT ArrayBuffer : public Object {
179
183
  *
180
184
  * The default implementation allocates a new block and copies data.
181
185
  */
186
+ V8_DEPRECATED(
187
+ "Reallocate is unsafe, please do not use. Please allocate new memory "
188
+ "and copy instead.")
182
189
  virtual void* Reallocate(void* data, size_t old_length, size_t new_length);
183
190
 
184
191
  /**
@@ -211,12 +218,15 @@ class V8_EXPORT ArrayBuffer : public Object {
211
218
  size_t MaxByteLength() const;
212
219
 
213
220
  /**
214
- * Create a new ArrayBuffer. Allocate |byte_length| bytes.
215
- * Allocated memory will be owned by a created ArrayBuffer and
216
- * will be deallocated when it is garbage-collected,
221
+ * Create a new ArrayBuffer. Allocate |byte_length| bytes, which are either
222
+ * zero-initialized or uninitialized. Allocated memory will be owned by a
223
+ * created ArrayBuffer and will be deallocated when it is garbage-collected,
217
224
  * unless the object is externalized.
218
225
  */
219
- static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
226
+ static Local<ArrayBuffer> New(
227
+ Isolate* isolate, size_t byte_length,
228
+ BackingStoreInitializationMode initialization_mode =
229
+ BackingStoreInitializationMode::kZeroInitialized);
220
230
 
221
231
  /**
222
232
  * Create a new ArrayBuffer with an existing backing store.
@@ -235,15 +245,18 @@ class V8_EXPORT ArrayBuffer : public Object {
235
245
 
236
246
  /**
237
247
  * Returns a new standalone BackingStore that is allocated using the array
238
- * buffer allocator of the isolate. The result can be later passed to
248
+ * buffer allocator of the isolate. The allocation can either be zero
249
+ * intialized, or uninitialized. The result can be later passed to
239
250
  * ArrayBuffer::New.
240
251
  *
241
252
  * If the allocator returns nullptr, then the function may cause GCs in the
242
253
  * given isolate and re-try the allocation. If GCs do not help, then the
243
254
  * function will crash with an out-of-memory error.
244
255
  */
245
- static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
246
- size_t byte_length);
256
+ static std::unique_ptr<BackingStore> NewBackingStore(
257
+ Isolate* isolate, size_t byte_length,
258
+ BackingStoreInitializationMode initialization_mode =
259
+ BackingStoreInitializationMode::kZeroInitialized);
247
260
  /**
248
261
  * Returns a new standalone BackingStore that takes over the ownership of
249
262
  * the given buffer. The destructor of the BackingStore invokes the given
@@ -287,7 +300,7 @@ class V8_EXPORT ArrayBuffer : public Object {
287
300
  * preventing JavaScript from ever accessing underlying backing store.
288
301
  * ArrayBuffer should have been externalized and must be detachable.
289
302
  */
290
- V8_DEPRECATE_SOON(
303
+ V8_DEPRECATED(
291
304
  "Use the version which takes a key parameter (passing a null handle is "
292
305
  "ok).")
293
306
  void Detach();
@@ -337,8 +350,9 @@ class V8_EXPORT ArrayBuffer : public Object {
337
350
  return static_cast<ArrayBuffer*>(value);
338
351
  }
339
352
 
340
- static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
341
- static const int kEmbedderFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
353
+ static constexpr int kInternalFieldCount =
354
+ V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
355
+ static constexpr int kEmbedderFieldCount = kInternalFieldCount;
342
356
 
343
357
  private:
344
358
  ArrayBuffer();
@@ -346,7 +360,7 @@ class V8_EXPORT ArrayBuffer : public Object {
346
360
  };
347
361
 
348
362
  #ifndef V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT
349
- // The number of required internal fields can be defined by embedder.
363
+ // Defined using gn arg `v8_array_buffer_view_internal_field_count`.
350
364
  #define V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT 2
351
365
  #endif
352
366
 
@@ -393,10 +407,9 @@ class V8_EXPORT ArrayBufferView : public Object {
393
407
  return static_cast<ArrayBufferView*>(value);
394
408
  }
395
409
 
396
- static const int kInternalFieldCount =
397
- V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
398
- static const int kEmbedderFieldCount =
410
+ static constexpr int kInternalFieldCount =
399
411
  V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
412
+ static const int kEmbedderFieldCount = kInternalFieldCount;
400
413
 
401
414
  private:
402
415
  ArrayBufferView();
@@ -440,12 +453,15 @@ class V8_EXPORT SharedArrayBuffer : public Object {
440
453
  size_t MaxByteLength() const;
441
454
 
442
455
  /**
443
- * Create a new SharedArrayBuffer. Allocate |byte_length| bytes.
444
- * Allocated memory will be owned by a created SharedArrayBuffer and
445
- * will be deallocated when it is garbage-collected,
446
- * unless the object is externalized.
456
+ * Create a new SharedArrayBuffer. Allocate |byte_length| bytes, which are
457
+ * either zero-initialized or uninitialized. Allocated memory will be owned by
458
+ * a created SharedArrayBuffer and will be deallocated when it is
459
+ * garbage-collected, unless the object is externalized.
447
460
  */
448
- static Local<SharedArrayBuffer> New(Isolate* isolate, size_t byte_length);
461
+ static Local<SharedArrayBuffer> New(
462
+ Isolate* isolate, size_t byte_length,
463
+ BackingStoreInitializationMode initialization_mode =
464
+ BackingStoreInitializationMode::kZeroInitialized);
449
465
 
450
466
  /**
451
467
  * Create a new SharedArrayBuffer with an existing backing store.
@@ -464,15 +480,18 @@ class V8_EXPORT SharedArrayBuffer : public Object {
464
480
 
465
481
  /**
466
482
  * Returns a new standalone BackingStore that is allocated using the array
467
- * buffer allocator of the isolate. The result can be later passed to
483
+ * buffer allocator of the isolate. The allocation can either be zero
484
+ * intialized, or uninitialized. The result can be later passed to
468
485
  * SharedArrayBuffer::New.
469
486
  *
470
487
  * If the allocator returns nullptr, then the function may cause GCs in the
471
488
  * given isolate and re-try the allocation. If GCs do not help, then the
472
489
  * function will crash with an out-of-memory error.
473
490
  */
474
- static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
475
- size_t byte_length);
491
+ static std::unique_ptr<BackingStore> NewBackingStore(
492
+ Isolate* isolate, size_t byte_length,
493
+ BackingStoreInitializationMode initialization_mode =
494
+ BackingStoreInitializationMode::kZeroInitialized);
476
495
  /**
477
496
  * Returns a new standalone BackingStore that takes over the ownership of
478
497
  * the given buffer. The destructor of the BackingStore invokes the given
@@ -506,7 +525,8 @@ class V8_EXPORT SharedArrayBuffer : public Object {
506
525
  return static_cast<SharedArrayBuffer*>(value);
507
526
  }
508
527
 
509
- static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
528
+ static constexpr int kInternalFieldCount =
529
+ V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
510
530
 
511
531
  private:
512
532
  SharedArrayBuffer();
@@ -152,9 +152,6 @@ using JitCodeEventHandler = void (*)(const JitCodeEvent* event);
152
152
  enum GCType {
153
153
  kGCTypeScavenge = 1 << 0,
154
154
  kGCTypeMinorMarkSweep = 1 << 1,
155
- kGCTypeMinorMarkCompact V8_DEPRECATE_SOON(
156
- "Use kGCTypeMinorMarkSweep instead of kGCTypeMinorMarkCompact.") =
157
- kGCTypeMinorMarkSweep,
158
155
  kGCTypeMarkSweepCompact = 1 << 2,
159
156
  kGCTypeIncrementalMarking = 1 << 3,
160
157
  kGCTypeProcessWeakCallbacks = 1 << 4,
@@ -234,7 +231,7 @@ using MessageCallback = void (*)(Local<Message> message, Local<Value> data);
234
231
 
235
232
  // --- Tracing ---
236
233
 
237
- enum LogEventStatus : int { kStart = 0, kEnd = 1, kStamp = 2 };
234
+ enum LogEventStatus : int { kStart = 0, kEnd = 1, kLog = 2 };
238
235
  using LogEventCallback = void (*)(const char* name,
239
236
  int /* LogEventStatus */ status);
240
237
 
@@ -341,6 +338,14 @@ using JavaScriptCompileHintsMagicEnabledCallback =
341
338
  // --- Callback for checking if WebAssembly JSPI is enabled ---
342
339
  using WasmJSPIEnabledCallback = bool (*)(Local<Context> context);
343
340
 
341
+ /**
342
+ * Import phases in import requests.
343
+ */
344
+ enum class ModuleImportPhase {
345
+ kSource,
346
+ kEvaluation,
347
+ };
348
+
344
349
  /**
345
350
  * HostImportModuleDynamicallyCallback is called when we
346
351
  * require the embedder to load a module. This is used as part of the dynamic
@@ -354,7 +359,7 @@ using WasmJSPIEnabledCallback = bool (*)(Local<Context> context);
354
359
  * The import_attributes are import attributes for this request in the form:
355
360
  * [key1, value1, key2, value2, ...] where the keys and values are of type
356
361
  * v8::String. Note, unlike the FixedArray passed to ResolveModuleCallback and
357
- * returned from ModuleRequest::GetImportAssertions(), this array does not
362
+ * returned from ModuleRequest::GetImportAttributes(), this array does not
358
363
  * contain the source Locations of the attributes.
359
364
  *
360
365
  * The embedder must compile, instantiate, evaluate the Module, and