libv8-node 16.10.0.0-arm64-darwin

Sign up to get free protection for your applications and to get access to all the features.
Files changed (68) hide show
  1. checksums.yaml +7 -0
  2. data/ext/libv8-node/.location.yml +1 -0
  3. data/ext/libv8-node/location.rb +76 -0
  4. data/ext/libv8-node/paths.rb +34 -0
  5. data/lib/libv8/node/version.rb +7 -0
  6. data/lib/libv8/node.rb +11 -0
  7. data/lib/libv8-node.rb +1 -0
  8. data/vendor/v8/arm64-darwin/libv8/obj/libv8_monolith.a +0 -0
  9. data/vendor/v8/include/cppgc/allocation.h +232 -0
  10. data/vendor/v8/include/cppgc/common.h +29 -0
  11. data/vendor/v8/include/cppgc/cross-thread-persistent.h +384 -0
  12. data/vendor/v8/include/cppgc/custom-space.h +97 -0
  13. data/vendor/v8/include/cppgc/default-platform.h +75 -0
  14. data/vendor/v8/include/cppgc/ephemeron-pair.h +30 -0
  15. data/vendor/v8/include/cppgc/explicit-management.h +82 -0
  16. data/vendor/v8/include/cppgc/garbage-collected.h +117 -0
  17. data/vendor/v8/include/cppgc/heap-consistency.h +236 -0
  18. data/vendor/v8/include/cppgc/heap-state.h +70 -0
  19. data/vendor/v8/include/cppgc/heap-statistics.h +120 -0
  20. data/vendor/v8/include/cppgc/heap.h +201 -0
  21. data/vendor/v8/include/cppgc/internal/api-constants.h +47 -0
  22. data/vendor/v8/include/cppgc/internal/atomic-entry-flag.h +48 -0
  23. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +68 -0
  24. data/vendor/v8/include/cppgc/internal/compiler-specific.h +38 -0
  25. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
  26. data/vendor/v8/include/cppgc/internal/gc-info.h +76 -0
  27. data/vendor/v8/include/cppgc/internal/logging.h +50 -0
  28. data/vendor/v8/include/cppgc/internal/name-trait.h +111 -0
  29. data/vendor/v8/include/cppgc/internal/persistent-node.h +172 -0
  30. data/vendor/v8/include/cppgc/internal/pointer-policies.h +175 -0
  31. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +30 -0
  32. data/vendor/v8/include/cppgc/internal/write-barrier.h +396 -0
  33. data/vendor/v8/include/cppgc/liveness-broker.h +74 -0
  34. data/vendor/v8/include/cppgc/macros.h +26 -0
  35. data/vendor/v8/include/cppgc/member.h +286 -0
  36. data/vendor/v8/include/cppgc/name-provider.h +65 -0
  37. data/vendor/v8/include/cppgc/object-size-trait.h +58 -0
  38. data/vendor/v8/include/cppgc/persistent.h +365 -0
  39. data/vendor/v8/include/cppgc/platform.h +153 -0
  40. data/vendor/v8/include/cppgc/prefinalizer.h +52 -0
  41. data/vendor/v8/include/cppgc/process-heap-statistics.h +36 -0
  42. data/vendor/v8/include/cppgc/sentinel-pointer.h +32 -0
  43. data/vendor/v8/include/cppgc/source-location.h +92 -0
  44. data/vendor/v8/include/cppgc/testing.h +99 -0
  45. data/vendor/v8/include/cppgc/trace-trait.h +116 -0
  46. data/vendor/v8/include/cppgc/type-traits.h +247 -0
  47. data/vendor/v8/include/cppgc/visitor.h +377 -0
  48. data/vendor/v8/include/libplatform/libplatform-export.h +29 -0
  49. data/vendor/v8/include/libplatform/libplatform.h +117 -0
  50. data/vendor/v8/include/libplatform/v8-tracing.h +334 -0
  51. data/vendor/v8/include/v8-cppgc.h +325 -0
  52. data/vendor/v8/include/v8-fast-api-calls.h +791 -0
  53. data/vendor/v8/include/v8-inspector-protocol.h +13 -0
  54. data/vendor/v8/include/v8-inspector.h +348 -0
  55. data/vendor/v8/include/v8-internal.h +499 -0
  56. data/vendor/v8/include/v8-metrics.h +202 -0
  57. data/vendor/v8/include/v8-platform.h +709 -0
  58. data/vendor/v8/include/v8-profiler.h +1123 -0
  59. data/vendor/v8/include/v8-unwinder-state.h +30 -0
  60. data/vendor/v8/include/v8-util.h +652 -0
  61. data/vendor/v8/include/v8-value-serializer-version.h +24 -0
  62. data/vendor/v8/include/v8-version-string.h +38 -0
  63. data/vendor/v8/include/v8-version.h +20 -0
  64. data/vendor/v8/include/v8-wasm-trap-handler-posix.h +31 -0
  65. data/vendor/v8/include/v8-wasm-trap-handler-win.h +28 -0
  66. data/vendor/v8/include/v8.h +12648 -0
  67. data/vendor/v8/include/v8config.h +515 -0
  68. metadata +138 -0
@@ -0,0 +1,709 @@
1
+ // Copyright 2013 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_V8_PLATFORM_H_
6
+ #define V8_V8_PLATFORM_H_
7
+
8
+ #include <stddef.h>
9
+ #include <stdint.h>
10
+ #include <stdlib.h> // For abort.
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ #include "v8config.h" // NOLINT(build/include_directory)
15
+
16
+ namespace v8 {
17
+
18
+ class Isolate;
19
+
20
+ // Valid priorities supported by the task scheduling infrastructure.
21
+ enum class TaskPriority : uint8_t {
22
+ /**
23
+ * Best effort tasks are not critical for performance of the application. The
24
+ * platform implementation should preempt such tasks if higher priority tasks
25
+ * arrive.
26
+ */
27
+ kBestEffort,
28
+ /**
29
+ * User visible tasks are long running background tasks that will
30
+ * improve performance and memory usage of the application upon completion.
31
+ * Example: background compilation and garbage collection.
32
+ */
33
+ kUserVisible,
34
+ /**
35
+ * User blocking tasks are highest priority tasks that block the execution
36
+ * thread (e.g. major garbage collection). They must be finished as soon as
37
+ * possible.
38
+ */
39
+ kUserBlocking,
40
+ };
41
+
42
+ /**
43
+ * A Task represents a unit of work.
44
+ */
45
+ class Task {
46
+ public:
47
+ virtual ~Task() = default;
48
+
49
+ virtual void Run() = 0;
50
+ };
51
+
52
+ /**
53
+ * An IdleTask represents a unit of work to be performed in idle time.
54
+ * The Run method is invoked with an argument that specifies the deadline in
55
+ * seconds returned by MonotonicallyIncreasingTime().
56
+ * The idle task is expected to complete by this deadline.
57
+ */
58
+ class IdleTask {
59
+ public:
60
+ virtual ~IdleTask() = default;
61
+ virtual void Run(double deadline_in_seconds) = 0;
62
+ };
63
+
64
+ /**
65
+ * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
66
+ * post tasks after the isolate gets destructed, but these tasks may not get
67
+ * executed anymore. All tasks posted to a given TaskRunner will be invoked in
68
+ * sequence. Tasks can be posted from any thread.
69
+ */
70
+ class TaskRunner {
71
+ public:
72
+ /**
73
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
74
+ * implementation takes ownership of |task|.
75
+ */
76
+ virtual void PostTask(std::unique_ptr<Task> task) = 0;
77
+
78
+ /**
79
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
80
+ * implementation takes ownership of |task|. The |task| cannot be nested
81
+ * within other task executions.
82
+ *
83
+ * Tasks which shouldn't be interleaved with JS execution must be posted with
84
+ * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
85
+ * embedder may process tasks in a callback which is called during JS
86
+ * execution.
87
+ *
88
+ * In particular, tasks which execute JS must be non-nestable, since JS
89
+ * execution is not allowed to nest.
90
+ *
91
+ * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
92
+ */
93
+ virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
94
+
95
+ /**
96
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
97
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
98
+ * implementation takes ownership of |task|.
99
+ */
100
+ virtual void PostDelayedTask(std::unique_ptr<Task> task,
101
+ double delay_in_seconds) = 0;
102
+
103
+ /**
104
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
105
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
106
+ * implementation takes ownership of |task|. The |task| cannot be nested
107
+ * within other task executions.
108
+ *
109
+ * Tasks which shouldn't be interleaved with JS execution must be posted with
110
+ * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
111
+ * embedder may process tasks in a callback which is called during JS
112
+ * execution.
113
+ *
114
+ * In particular, tasks which execute JS must be non-nestable, since JS
115
+ * execution is not allowed to nest.
116
+ *
117
+ * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
118
+ */
119
+ virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
120
+ double delay_in_seconds) {}
121
+
122
+ /**
123
+ * Schedules an idle task to be invoked by this TaskRunner. The task is
124
+ * scheduled when the embedder is idle. Requires that
125
+ * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
126
+ * relative to other task types and may be starved for an arbitrarily long
127
+ * time if no idle time is available. The TaskRunner implementation takes
128
+ * ownership of |task|.
129
+ */
130
+ virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
131
+
132
+ /**
133
+ * Returns true if idle tasks are enabled for this TaskRunner.
134
+ */
135
+ virtual bool IdleTasksEnabled() = 0;
136
+
137
+ /**
138
+ * Returns true if non-nestable tasks are enabled for this TaskRunner.
139
+ */
140
+ virtual bool NonNestableTasksEnabled() const { return false; }
141
+
142
+ /**
143
+ * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
144
+ */
145
+ virtual bool NonNestableDelayedTasksEnabled() const { return false; }
146
+
147
+ TaskRunner() = default;
148
+ virtual ~TaskRunner() = default;
149
+
150
+ TaskRunner(const TaskRunner&) = delete;
151
+ TaskRunner& operator=(const TaskRunner&) = delete;
152
+ };
153
+
154
+ /**
155
+ * Delegate that's passed to Job's worker task, providing an entry point to
156
+ * communicate with the scheduler.
157
+ */
158
+ class JobDelegate {
159
+ public:
160
+ /**
161
+ * Returns true if this thread should return from the worker task on the
162
+ * current thread ASAP. Workers should periodically invoke ShouldYield (or
163
+ * YieldIfNeeded()) as often as is reasonable.
164
+ */
165
+ virtual bool ShouldYield() = 0;
166
+
167
+ /**
168
+ * Notifies the scheduler that max concurrency was increased, and the number
169
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
170
+ * details.
171
+ */
172
+ virtual void NotifyConcurrencyIncrease() = 0;
173
+
174
+ /**
175
+ * Returns a task_id unique among threads currently running this job, such
176
+ * that GetTaskId() < worker count. To achieve this, the same task_id may be
177
+ * reused by a different thread after a worker_task returns.
178
+ */
179
+ virtual uint8_t GetTaskId() = 0;
180
+
181
+ /**
182
+ * Returns true if the current task is called from the thread currently
183
+ * running JobHandle::Join().
184
+ * TODO(etiennep): Make pure virtual once custom embedders implement it.
185
+ */
186
+ virtual bool IsJoiningThread() const { return false; }
187
+ };
188
+
189
+ /**
190
+ * Handle returned when posting a Job. Provides methods to control execution of
191
+ * the posted Job.
192
+ */
193
+ class JobHandle {
194
+ public:
195
+ virtual ~JobHandle() = default;
196
+
197
+ /**
198
+ * Notifies the scheduler that max concurrency was increased, and the number
199
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
200
+ * details.
201
+ */
202
+ virtual void NotifyConcurrencyIncrease() = 0;
203
+
204
+ /**
205
+ * Contributes to the job on this thread. Doesn't return until all tasks have
206
+ * completed and max concurrency becomes 0. When Join() is called and max
207
+ * concurrency reaches 0, it should not increase again. This also promotes
208
+ * this Job's priority to be at least as high as the calling thread's
209
+ * priority.
210
+ */
211
+ virtual void Join() = 0;
212
+
213
+ /**
214
+ * Forces all existing workers to yield ASAP. Waits until they have all
215
+ * returned from the Job's callback before returning.
216
+ */
217
+ virtual void Cancel() = 0;
218
+
219
+ /*
220
+ * Forces all existing workers to yield ASAP but doesn’t wait for them.
221
+ * Warning, this is dangerous if the Job's callback is bound to or has access
222
+ * to state which may be deleted after this call.
223
+ * TODO(etiennep): Cleanup once implemented by all embedders.
224
+ */
225
+ virtual void CancelAndDetach() { Cancel(); }
226
+
227
+ /**
228
+ * Returns true if there's any work pending or any worker running.
229
+ */
230
+ virtual bool IsActive() = 0;
231
+
232
+ // TODO(etiennep): Clean up once all overrides are removed.
233
+ V8_DEPRECATED("Use !IsActive() instead.")
234
+ virtual bool IsCompleted() { return !IsActive(); }
235
+
236
+ /**
237
+ * Returns true if associated with a Job and other methods may be called.
238
+ * Returns false after Join() or Cancel() was called. This may return true
239
+ * even if no workers are running and IsCompleted() returns true
240
+ */
241
+ virtual bool IsValid() = 0;
242
+
243
+ // TODO(etiennep): Clean up once all overrides are removed.
244
+ V8_DEPRECATED("Use IsValid() instead.")
245
+ virtual bool IsRunning() { return IsValid(); }
246
+
247
+ /**
248
+ * Returns true if job priority can be changed.
249
+ */
250
+ virtual bool UpdatePriorityEnabled() const { return false; }
251
+
252
+ /**
253
+ * Update this Job's priority.
254
+ */
255
+ virtual void UpdatePriority(TaskPriority new_priority) {}
256
+ };
257
+
258
+ /**
259
+ * A JobTask represents work to run in parallel from Platform::PostJob().
260
+ */
261
+ class JobTask {
262
+ public:
263
+ virtual ~JobTask() = default;
264
+
265
+ virtual void Run(JobDelegate* delegate) = 0;
266
+
267
+ /**
268
+ * Controls the maximum number of threads calling Run() concurrently, given
269
+ * the number of threads currently assigned to this job and executing Run().
270
+ * Run() is only invoked if the number of threads previously running Run() was
271
+ * less than the value returned. Since GetMaxConcurrency() is a leaf function,
272
+ * it must not call back any JobHandle methods.
273
+ */
274
+ virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
275
+
276
+ // TODO(1114823): Clean up once all overrides are removed.
277
+ V8_DEPRECATED("Use the version that takes |worker_count|.")
278
+ virtual size_t GetMaxConcurrency() const { return 0; }
279
+ };
280
+
281
+ /**
282
+ * The interface represents complex arguments to trace events.
283
+ */
284
+ class ConvertableToTraceFormat {
285
+ public:
286
+ virtual ~ConvertableToTraceFormat() = default;
287
+
288
+ /**
289
+ * Append the class info to the provided |out| string. The appended
290
+ * data must be a valid JSON object. Strings must be properly quoted, and
291
+ * escaped. There is no processing applied to the content after it is
292
+ * appended.
293
+ */
294
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
295
+ };
296
+
297
+ /**
298
+ * V8 Tracing controller.
299
+ *
300
+ * Can be implemented by an embedder to record trace events from V8.
301
+ */
302
+ class TracingController {
303
+ public:
304
+ virtual ~TracingController() = default;
305
+
306
+ // In Perfetto mode, trace events are written using Perfetto's Track Event
307
+ // API directly without going through the embedder. However, it is still
308
+ // possible to observe tracing being enabled and disabled.
309
+ #if !defined(V8_USE_PERFETTO)
310
+ /**
311
+ * Called by TRACE_EVENT* macros, don't call this directly.
312
+ * The name parameter is a category group for example:
313
+ * TRACE_EVENT0("v8,parse", "V8.Parse")
314
+ * The pointer returned points to a value with zero or more of the bits
315
+ * defined in CategoryGroupEnabledFlags.
316
+ **/
317
+ virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
318
+ static uint8_t no = 0;
319
+ return &no;
320
+ }
321
+
322
+ /**
323
+ * Adds a trace event to the platform tracing system. These function calls are
324
+ * usually the result of a TRACE_* macro from trace_event_common.h when
325
+ * tracing and the category of the particular trace are enabled. It is not
326
+ * advisable to call these functions on their own; they are really only meant
327
+ * to be used by the trace macros. The returned handle can be used by
328
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
329
+ */
330
+ virtual uint64_t AddTraceEvent(
331
+ char phase, const uint8_t* category_enabled_flag, const char* name,
332
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
333
+ const char** arg_names, const uint8_t* arg_types,
334
+ const uint64_t* arg_values,
335
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
336
+ unsigned int flags) {
337
+ return 0;
338
+ }
339
+ virtual uint64_t AddTraceEventWithTimestamp(
340
+ char phase, const uint8_t* category_enabled_flag, const char* name,
341
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
342
+ const char** arg_names, const uint8_t* arg_types,
343
+ const uint64_t* arg_values,
344
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
345
+ unsigned int flags, int64_t timestamp) {
346
+ return 0;
347
+ }
348
+
349
+ /**
350
+ * Sets the duration field of a COMPLETE trace event. It must be called with
351
+ * the handle returned from AddTraceEvent().
352
+ **/
353
+ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
354
+ const char* name, uint64_t handle) {}
355
+ #endif // !defined(V8_USE_PERFETTO)
356
+
357
+ class TraceStateObserver {
358
+ public:
359
+ virtual ~TraceStateObserver() = default;
360
+ virtual void OnTraceEnabled() = 0;
361
+ virtual void OnTraceDisabled() = 0;
362
+ };
363
+
364
+ /** Adds tracing state change observer. */
365
+ virtual void AddTraceStateObserver(TraceStateObserver*) {}
366
+
367
+ /** Removes tracing state change observer. */
368
+ virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
369
+ };
370
+
371
+ /**
372
+ * A V8 memory page allocator.
373
+ *
374
+ * Can be implemented by an embedder to manage large host OS allocations.
375
+ */
376
+ class PageAllocator {
377
+ public:
378
+ virtual ~PageAllocator() = default;
379
+
380
+ /**
381
+ * Gets the page granularity for AllocatePages and FreePages. Addresses and
382
+ * lengths for those calls should be multiples of AllocatePageSize().
383
+ */
384
+ virtual size_t AllocatePageSize() = 0;
385
+
386
+ /**
387
+ * Gets the page granularity for SetPermissions and ReleasePages. Addresses
388
+ * and lengths for those calls should be multiples of CommitPageSize().
389
+ */
390
+ virtual size_t CommitPageSize() = 0;
391
+
392
+ /**
393
+ * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
394
+ * sequences of random mmap addresses.
395
+ */
396
+ virtual void SetRandomMmapSeed(int64_t seed) = 0;
397
+
398
+ /**
399
+ * Returns a randomized address, suitable for memory allocation under ASLR.
400
+ * The address will be aligned to AllocatePageSize.
401
+ */
402
+ virtual void* GetRandomMmapAddr() = 0;
403
+
404
+ /**
405
+ * Memory permissions.
406
+ */
407
+ enum Permission {
408
+ kNoAccess,
409
+ kRead,
410
+ kReadWrite,
411
+ kReadWriteExecute,
412
+ kReadExecute,
413
+ // Set this when reserving memory that will later require kReadWriteExecute
414
+ // permissions. The resulting behavior is platform-specific, currently
415
+ // this is used to set the MAP_JIT flag on Apple Silicon.
416
+ // TODO(jkummerow): Remove this when Wasm has a platform-independent
417
+ // w^x implementation.
418
+ kNoAccessWillJitLater
419
+ };
420
+
421
+ /**
422
+ * Allocates memory in range with the given alignment and permission.
423
+ */
424
+ virtual void* AllocatePages(void* address, size_t length, size_t alignment,
425
+ Permission permissions) = 0;
426
+
427
+ /**
428
+ * Frees memory in a range that was allocated by a call to AllocatePages.
429
+ */
430
+ virtual bool FreePages(void* address, size_t length) = 0;
431
+
432
+ /**
433
+ * Releases memory in a range that was allocated by a call to AllocatePages.
434
+ */
435
+ virtual bool ReleasePages(void* address, size_t length,
436
+ size_t new_length) = 0;
437
+
438
+ /**
439
+ * Sets permissions on pages in an allocated range.
440
+ */
441
+ virtual bool SetPermissions(void* address, size_t length,
442
+ Permission permissions) = 0;
443
+
444
+ /**
445
+ * Frees memory in the given [address, address + size) range. address and size
446
+ * should be operating system page-aligned. The next write to this
447
+ * memory area brings the memory transparently back.
448
+ */
449
+ virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
450
+
451
+ /**
452
+ * INTERNAL ONLY: This interface has not been stabilised and may change
453
+ * without notice from one release to another without being deprecated first.
454
+ */
455
+ class SharedMemoryMapping {
456
+ public:
457
+ // Implementations are expected to free the shared memory mapping in the
458
+ // destructor.
459
+ virtual ~SharedMemoryMapping() = default;
460
+ virtual void* GetMemory() const = 0;
461
+ };
462
+
463
+ /**
464
+ * INTERNAL ONLY: This interface has not been stabilised and may change
465
+ * without notice from one release to another without being deprecated first.
466
+ */
467
+ class SharedMemory {
468
+ public:
469
+ // Implementations are expected to free the shared memory in the destructor.
470
+ virtual ~SharedMemory() = default;
471
+ virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
472
+ void* new_address) const = 0;
473
+ virtual void* GetMemory() const = 0;
474
+ virtual size_t GetSize() const = 0;
475
+ };
476
+
477
+ /**
478
+ * INTERNAL ONLY: This interface has not been stabilised and may change
479
+ * without notice from one release to another without being deprecated first.
480
+ *
481
+ * Reserve pages at a fixed address returning whether the reservation is
482
+ * possible. The reserved memory is detached from the PageAllocator and so
483
+ * should not be freed by it. It's intended for use with
484
+ * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
485
+ */
486
+ virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
487
+ return false;
488
+ }
489
+
490
+ /**
491
+ * INTERNAL ONLY: This interface has not been stabilised and may change
492
+ * without notice from one release to another without being deprecated first.
493
+ *
494
+ * Allocates shared memory pages. Not all PageAllocators need support this and
495
+ * so this method need not be overridden.
496
+ * Allocates a new read-only shared memory region of size |length| and copies
497
+ * the memory at |original_address| into it.
498
+ */
499
+ virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
500
+ size_t length, const void* original_address) {
501
+ return {};
502
+ }
503
+
504
+ /**
505
+ * INTERNAL ONLY: This interface has not been stabilised and may change
506
+ * without notice from one release to another without being deprecated first.
507
+ *
508
+ * If not overridden and changed to return true, V8 will not attempt to call
509
+ * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
510
+ * and RemapSharedPages must also be overridden.
511
+ */
512
+ virtual bool CanAllocateSharedPages() { return false; }
513
+ };
514
+
515
+ /**
516
+ * V8 Platform abstraction layer.
517
+ *
518
+ * The embedder has to provide an implementation of this interface before
519
+ * initializing the rest of V8.
520
+ */
521
+ class Platform {
522
+ public:
523
+ virtual ~Platform() = default;
524
+
525
+ /**
526
+ * Allows the embedder to manage memory page allocations.
527
+ */
528
+ virtual PageAllocator* GetPageAllocator() {
529
+ // TODO(bbudge) Make this abstract after all embedders implement this.
530
+ return nullptr;
531
+ }
532
+
533
+ /**
534
+ * Enables the embedder to respond in cases where V8 can't allocate large
535
+ * blocks of memory. V8 retries the failed allocation once after calling this
536
+ * method. On success, execution continues; otherwise V8 exits with a fatal
537
+ * error.
538
+ * Embedder overrides of this function must NOT call back into V8.
539
+ */
540
+ virtual void OnCriticalMemoryPressure() {
541
+ // TODO(bbudge) Remove this when embedders override the following method.
542
+ // See crbug.com/634547.
543
+ }
544
+
545
+ /**
546
+ * Enables the embedder to respond in cases where V8 can't allocate large
547
+ * memory regions. The |length| parameter is the amount of memory needed.
548
+ * Returns true if memory is now available. Returns false if no memory could
549
+ * be made available. V8 will retry allocations until this method returns
550
+ * false.
551
+ *
552
+ * Embedder overrides of this function must NOT call back into V8.
553
+ */
554
+ virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
555
+
556
+ /**
557
+ * Gets the number of worker threads used by
558
+ * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
559
+ * of tasks a work package should be split into. A return value of 0 means
560
+ * that there are no worker threads available. Note that a value of 0 won't
561
+ * prohibit V8 from posting tasks using |CallOnWorkerThread|.
562
+ */
563
+ virtual int NumberOfWorkerThreads() = 0;
564
+
565
+ /**
566
+ * Returns a TaskRunner which can be used to post a task on the foreground.
567
+ * The TaskRunner's NonNestableTasksEnabled() must be true. This function
568
+ * should only be called from a foreground thread.
569
+ */
570
+ virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
571
+ Isolate* isolate) = 0;
572
+
573
+ /**
574
+ * Schedules a task to be invoked on a worker thread.
575
+ */
576
+ virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
577
+
578
+ /**
579
+ * Schedules a task that blocks the main thread to be invoked with
580
+ * high-priority on a worker thread.
581
+ */
582
+ virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
583
+ // Embedders may optionally override this to process these tasks in a high
584
+ // priority pool.
585
+ CallOnWorkerThread(std::move(task));
586
+ }
587
+
588
+ /**
589
+ * Schedules a task to be invoked with low-priority on a worker thread.
590
+ */
591
+ virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
592
+ // Embedders may optionally override this to process these tasks in a low
593
+ // priority pool.
594
+ CallOnWorkerThread(std::move(task));
595
+ }
596
+
597
+ /**
598
+ * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
599
+ * expires.
600
+ */
601
+ virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
602
+ double delay_in_seconds) = 0;
603
+
604
+ /**
605
+ * Returns true if idle tasks are enabled for the given |isolate|.
606
+ */
607
+ virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
608
+
609
+ /**
610
+ * Posts |job_task| to run in parallel. Returns a JobHandle associated with
611
+ * the Job, which can be joined or canceled.
612
+ * This avoids degenerate cases:
613
+ * - Calling CallOnWorkerThread() for each work item, causing significant
614
+ * overhead.
615
+ * - Fixed number of CallOnWorkerThread() calls that split the work and might
616
+ * run for a long time. This is problematic when many components post
617
+ * "num cores" tasks and all expect to use all the cores. In these cases,
618
+ * the scheduler lacks context to be fair to multiple same-priority requests
619
+ * and/or ability to request lower priority work to yield when high priority
620
+ * work comes in.
621
+ * A canonical implementation of |job_task| looks like:
622
+ * class MyJobTask : public JobTask {
623
+ * public:
624
+ * MyJobTask(...) : worker_queue_(...) {}
625
+ * // JobTask:
626
+ * void Run(JobDelegate* delegate) override {
627
+ * while (!delegate->ShouldYield()) {
628
+ * // Smallest unit of work.
629
+ * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
630
+ * if (!work_item) return;
631
+ * ProcessWork(work_item);
632
+ * }
633
+ * }
634
+ *
635
+ * size_t GetMaxConcurrency() const override {
636
+ * return worker_queue_.GetSize(); // Thread safe.
637
+ * }
638
+ * };
639
+ * auto handle = PostJob(TaskPriority::kUserVisible,
640
+ * std::make_unique<MyJobTask>(...));
641
+ * handle->Join();
642
+ *
643
+ * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
644
+ * called while holding a lock that could be acquired by JobTask::Run or
645
+ * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
646
+ * because [1] JobTask::GetMaxConcurrency may be invoked while holding
647
+ * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
648
+ * if that lock is *never* held while calling back into JobHandle from any
649
+ * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
650
+ * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
651
+ * (B=>JobHandle::foo=>B deadlock).
652
+ *
653
+ * A sufficient PostJob() implementation that uses the default Job provided in
654
+ * libplatform looks like:
655
+ * std::unique_ptr<JobHandle> PostJob(
656
+ * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
657
+ * return v8::platform::NewDefaultJobHandle(
658
+ * this, priority, std::move(job_task), NumberOfWorkerThreads());
659
+ * }
660
+ */
661
+ virtual std::unique_ptr<JobHandle> PostJob(
662
+ TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
663
+
664
+ /**
665
+ * Monotonically increasing time in seconds from an arbitrary fixed point in
666
+ * the past. This function is expected to return at least
667
+ * millisecond-precision values. For this reason,
668
+ * it is recommended that the fixed point be no further in the past than
669
+ * the epoch.
670
+ **/
671
+ virtual double MonotonicallyIncreasingTime() = 0;
672
+
673
+ /**
674
+ * Current wall-clock time in milliseconds since epoch.
675
+ * This function is expected to return at least millisecond-precision values.
676
+ */
677
+ virtual double CurrentClockTimeMillis() = 0;
678
+
679
+ typedef void (*StackTracePrinter)();
680
+
681
+ /**
682
+ * Returns a function pointer that print a stack trace of the current stack
683
+ * on invocation. Disables printing of the stack trace if nullptr.
684
+ */
685
+ virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
686
+
687
+ /**
688
+ * Returns an instance of a v8::TracingController. This must be non-nullptr.
689
+ */
690
+ virtual TracingController* GetTracingController() = 0;
691
+
692
+ /**
693
+ * Tells the embedder to generate and upload a crashdump during an unexpected
694
+ * but non-critical scenario.
695
+ */
696
+ virtual void DumpWithoutCrashing() {}
697
+
698
+ protected:
699
+ /**
700
+ * Default implementation of current wall-clock time in milliseconds
701
+ * since epoch. Useful for implementing |CurrentClockTimeMillis| if
702
+ * nothing special needed.
703
+ */
704
+ V8_EXPORT static double SystemClockTimeMillis();
705
+ };
706
+
707
+ } // namespace v8
708
+
709
+ #endif // V8_V8_PLATFORM_H_