aha-libv8-node 16.0.0.0-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (67) hide show
  1. checksums.yaml +7 -0
  2. data/ext/libv8-node/.location.yml +1 -0
  3. data/ext/libv8-node/location.rb +76 -0
  4. data/ext/libv8-node/paths.rb +30 -0
  5. data/lib/libv8-node.rb +1 -0
  6. data/lib/libv8/node.rb +11 -0
  7. data/lib/libv8/node/version.rb +7 -0
  8. data/vendor/v8/include/cppgc/allocation.h +229 -0
  9. data/vendor/v8/include/cppgc/common.h +29 -0
  10. data/vendor/v8/include/cppgc/cross-thread-persistent.h +345 -0
  11. data/vendor/v8/include/cppgc/custom-space.h +97 -0
  12. data/vendor/v8/include/cppgc/default-platform.h +75 -0
  13. data/vendor/v8/include/cppgc/ephemeron-pair.h +30 -0
  14. data/vendor/v8/include/cppgc/garbage-collected.h +116 -0
  15. data/vendor/v8/include/cppgc/heap-consistency.h +236 -0
  16. data/vendor/v8/include/cppgc/heap-state.h +59 -0
  17. data/vendor/v8/include/cppgc/heap-statistics.h +110 -0
  18. data/vendor/v8/include/cppgc/heap.h +199 -0
  19. data/vendor/v8/include/cppgc/internal/api-constants.h +47 -0
  20. data/vendor/v8/include/cppgc/internal/atomic-entry-flag.h +48 -0
  21. data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +68 -0
  22. data/vendor/v8/include/cppgc/internal/compiler-specific.h +38 -0
  23. data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
  24. data/vendor/v8/include/cppgc/internal/gc-info.h +47 -0
  25. data/vendor/v8/include/cppgc/internal/logging.h +50 -0
  26. data/vendor/v8/include/cppgc/internal/name-trait.h +111 -0
  27. data/vendor/v8/include/cppgc/internal/persistent-node.h +132 -0
  28. data/vendor/v8/include/cppgc/internal/pointer-policies.h +143 -0
  29. data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +30 -0
  30. data/vendor/v8/include/cppgc/internal/write-barrier.h +390 -0
  31. data/vendor/v8/include/cppgc/liveness-broker.h +74 -0
  32. data/vendor/v8/include/cppgc/macros.h +26 -0
  33. data/vendor/v8/include/cppgc/member.h +271 -0
  34. data/vendor/v8/include/cppgc/name-provider.h +65 -0
  35. data/vendor/v8/include/cppgc/object-size-trait.h +58 -0
  36. data/vendor/v8/include/cppgc/persistent.h +365 -0
  37. data/vendor/v8/include/cppgc/platform.h +151 -0
  38. data/vendor/v8/include/cppgc/prefinalizer.h +52 -0
  39. data/vendor/v8/include/cppgc/process-heap-statistics.h +36 -0
  40. data/vendor/v8/include/cppgc/sentinel-pointer.h +32 -0
  41. data/vendor/v8/include/cppgc/source-location.h +91 -0
  42. data/vendor/v8/include/cppgc/testing.h +50 -0
  43. data/vendor/v8/include/cppgc/trace-trait.h +116 -0
  44. data/vendor/v8/include/cppgc/type-traits.h +228 -0
  45. data/vendor/v8/include/cppgc/visitor.h +340 -0
  46. data/vendor/v8/include/libplatform/libplatform-export.h +29 -0
  47. data/vendor/v8/include/libplatform/libplatform.h +117 -0
  48. data/vendor/v8/include/libplatform/v8-tracing.h +334 -0
  49. data/vendor/v8/include/v8-cppgc.h +278 -0
  50. data/vendor/v8/include/v8-fast-api-calls.h +419 -0
  51. data/vendor/v8/include/v8-inspector-protocol.h +13 -0
  52. data/vendor/v8/include/v8-inspector.h +336 -0
  53. data/vendor/v8/include/v8-internal.h +462 -0
  54. data/vendor/v8/include/v8-metrics.h +189 -0
  55. data/vendor/v8/include/v8-platform.h +710 -0
  56. data/vendor/v8/include/v8-profiler.h +1116 -0
  57. data/vendor/v8/include/v8-unwinder-state.h +30 -0
  58. data/vendor/v8/include/v8-util.h +652 -0
  59. data/vendor/v8/include/v8-value-serializer-version.h +24 -0
  60. data/vendor/v8/include/v8-version-string.h +38 -0
  61. data/vendor/v8/include/v8-version.h +20 -0
  62. data/vendor/v8/include/v8-wasm-trap-handler-posix.h +31 -0
  63. data/vendor/v8/include/v8-wasm-trap-handler-win.h +28 -0
  64. data/vendor/v8/include/v8.h +12479 -0
  65. data/vendor/v8/include/v8config.h +521 -0
  66. data/vendor/v8/out.gn/libv8/obj/libv8_monolith.a +0 -0
  67. metadata +137 -0
@@ -0,0 +1,189 @@
1
+ // Copyright 2020 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_METRICS_H_
6
+ #define V8_METRICS_H_
7
+
8
+ #include "v8.h" // NOLINT(build/include_directory)
9
+
10
+ namespace v8 {
11
+ namespace metrics {
12
+
13
+ struct GarbageCollectionPhases {
14
+ int64_t compact_wall_clock_duration_in_us = -1;
15
+ int64_t mark_wall_clock_duration_in_us = -1;
16
+ int64_t sweep_wall_clock_duration_in_us = -1;
17
+ int64_t weak_wall_clock_duration_in_us = -1;
18
+ };
19
+
20
+ struct GarbageCollectionSizes {
21
+ int64_t bytes_before = -1;
22
+ int64_t bytes_after = -1;
23
+ int64_t bytes_freed = -1;
24
+ };
25
+
26
+ struct GarbageCollectionFullCycle {
27
+ GarbageCollectionPhases total;
28
+ GarbageCollectionPhases total_cpp;
29
+ GarbageCollectionPhases main_thread;
30
+ GarbageCollectionPhases main_thread_cpp;
31
+ GarbageCollectionPhases main_thread_atomic;
32
+ GarbageCollectionPhases main_thread_atomic_cpp;
33
+ GarbageCollectionPhases main_thread_incremental;
34
+ GarbageCollectionPhases main_thread_incremental_cpp;
35
+ GarbageCollectionSizes objects;
36
+ GarbageCollectionSizes objects_cpp;
37
+ GarbageCollectionSizes memory;
38
+ GarbageCollectionSizes memory_cpp;
39
+ double collection_rate_in_percent;
40
+ double collection_rate_cpp_in_percent;
41
+ double efficiency_in_bytes_per_us;
42
+ double efficiency_cpp_in_bytes_per_us;
43
+ double main_thread_efficiency_in_bytes_per_us;
44
+ double main_thread_efficiency_cpp_in_bytes_per_us;
45
+ };
46
+
47
+ struct GarbageCollectionFullMainThreadIncrementalMark {
48
+ int64_t wall_clock_duration_in_us = -1;
49
+ int64_t cpp_wall_clock_duration_in_us = -1;
50
+ };
51
+
52
+ struct GarbageCollectionFullMainThreadIncrementalSweep {
53
+ int64_t wall_clock_duration_in_us = -1;
54
+ int64_t cpp_wall_clock_duration_in_us = -1;
55
+ };
56
+
57
+ struct GarbageCollectionYoungCycle {
58
+ int64_t total_wall_clock_duration_in_us = -1;
59
+ int64_t main_thread_wall_clock_duration_in_us = -1;
60
+ double collection_rate_in_percent;
61
+ double efficiency_in_bytes_per_us;
62
+ double main_thread_efficiency_in_bytes_per_us;
63
+ };
64
+
65
+ struct WasmModuleDecoded {
66
+ bool async = false;
67
+ bool streamed = false;
68
+ bool success = false;
69
+ size_t module_size_in_bytes = 0;
70
+ size_t function_count = 0;
71
+ int64_t wall_clock_duration_in_us = -1;
72
+ };
73
+
74
+ struct WasmModuleCompiled {
75
+ bool async = false;
76
+ bool streamed = false;
77
+ bool cached = false;
78
+ bool deserialized = false;
79
+ bool lazy = false;
80
+ bool success = false;
81
+ size_t code_size_in_bytes = 0;
82
+ size_t liftoff_bailout_count = 0;
83
+ int64_t wall_clock_duration_in_us = -1;
84
+ };
85
+
86
+ struct WasmModuleInstantiated {
87
+ bool async = false;
88
+ bool success = false;
89
+ size_t imported_function_count = 0;
90
+ int64_t wall_clock_duration_in_us = -1;
91
+ };
92
+
93
+ struct WasmModuleTieredUp {
94
+ bool lazy = false;
95
+ size_t code_size_in_bytes = 0;
96
+ int64_t wall_clock_duration_in_us = -1;
97
+ };
98
+
99
+ struct WasmModulesPerIsolate {
100
+ size_t count = 0;
101
+ };
102
+
103
+ #define V8_MAIN_THREAD_METRICS_EVENTS(V) \
104
+ V(GarbageCollectionFullCycle) \
105
+ V(GarbageCollectionFullMainThreadIncrementalMark) \
106
+ V(GarbageCollectionFullMainThreadIncrementalSweep) \
107
+ V(GarbageCollectionYoungCycle) \
108
+ V(WasmModuleDecoded) \
109
+ V(WasmModuleCompiled) \
110
+ V(WasmModuleInstantiated) \
111
+ V(WasmModuleTieredUp)
112
+
113
+ #define V8_THREAD_SAFE_METRICS_EVENTS(V) V(WasmModulesPerIsolate)
114
+
115
+ /**
116
+ * This class serves as a base class for recording event-based metrics in V8.
117
+ * There a two kinds of metrics, those which are expected to be thread-safe and
118
+ * whose implementation is required to fulfill this requirement and those whose
119
+ * implementation does not have that requirement and only needs to be
120
+ * executable on the main thread. If such an event is triggered from a
121
+ * background thread, it will be delayed and executed by the foreground task
122
+ * runner.
123
+ *
124
+ * The thread-safe events are listed in the V8_THREAD_SAFE_METRICS_EVENTS
125
+ * macro above while the main thread event are listed in
126
+ * V8_MAIN_THREAD_METRICS_EVENTS above. For the former, a virtual method
127
+ * AddMainThreadEvent(const E& event, v8::Context::Token token) will be
128
+ * generated and for the latter AddThreadSafeEvent(const E& event).
129
+ *
130
+ * Thread-safe events are not allowed to access the context and therefore do
131
+ * not carry a context ID with them. These IDs can be generated using
132
+ * Recorder::GetContextId() and the ID will be valid throughout the lifetime
133
+ * of the isolate. It is not guaranteed that the ID will still resolve to
134
+ * a valid context using Recorder::GetContext() at the time the metric is
135
+ * recorded. In this case, an empty handle will be returned.
136
+ *
137
+ * The embedder is expected to call v8::Isolate::SetMetricsRecorder()
138
+ * providing its implementation and have the virtual methods overwritten
139
+ * for the events it cares about.
140
+ */
141
+ class V8_EXPORT Recorder {
142
+ public:
143
+ // A unique identifier for a context in this Isolate.
144
+ // It is guaranteed to not be reused throughout the lifetime of the Isolate.
145
+ class ContextId {
146
+ public:
147
+ ContextId() : id_(kEmptyId) {}
148
+
149
+ bool IsEmpty() const { return id_ == kEmptyId; }
150
+ static const ContextId Empty() { return ContextId{kEmptyId}; }
151
+
152
+ bool operator==(const ContextId& other) const { return id_ == other.id_; }
153
+ bool operator!=(const ContextId& other) const { return id_ != other.id_; }
154
+
155
+ private:
156
+ friend class ::v8::Context;
157
+ friend class ::v8::internal::Isolate;
158
+
159
+ explicit ContextId(uintptr_t id) : id_(id) {}
160
+
161
+ static constexpr uintptr_t kEmptyId = 0;
162
+ uintptr_t id_;
163
+ };
164
+
165
+ virtual ~Recorder() = default;
166
+
167
+ #define ADD_MAIN_THREAD_EVENT(E) \
168
+ virtual void AddMainThreadEvent(const E& event, ContextId context_id) {}
169
+ V8_MAIN_THREAD_METRICS_EVENTS(ADD_MAIN_THREAD_EVENT)
170
+ #undef ADD_MAIN_THREAD_EVENT
171
+
172
+ #define ADD_THREAD_SAFE_EVENT(E) \
173
+ virtual void AddThreadSafeEvent(const E& event) {}
174
+ V8_THREAD_SAFE_METRICS_EVENTS(ADD_THREAD_SAFE_EVENT)
175
+ #undef ADD_THREAD_SAFE_EVENT
176
+
177
+ virtual void NotifyIsolateDisposal() {}
178
+
179
+ // Return the context with the given id or an empty handle if the context
180
+ // was already garbage collected.
181
+ static MaybeLocal<Context> GetContext(Isolate* isolate, ContextId id);
182
+ // Return the unique id corresponding to the given context.
183
+ static ContextId GetContextId(Local<Context> context);
184
+ };
185
+
186
+ } // namespace metrics
187
+ } // namespace v8
188
+
189
+ #endif // V8_METRICS_H_
@@ -0,0 +1,710 @@
1
+ // Copyright 2013 the V8 project authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file.
4
+
5
+ #ifndef V8_V8_PLATFORM_H_
6
+ #define V8_V8_PLATFORM_H_
7
+
8
+ #include <stddef.h>
9
+ #include <stdint.h>
10
+ #include <stdlib.h> // For abort.
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ #include "v8config.h" // NOLINT(build/include_directory)
15
+
16
+ namespace v8 {
17
+
18
+ class Isolate;
19
+
20
+ // Valid priorities supported by the task scheduling infrastructure.
21
+ enum class TaskPriority : uint8_t {
22
+ /**
23
+ * Best effort tasks are not critical for performance of the application. The
24
+ * platform implementation should preempt such tasks if higher priority tasks
25
+ * arrive.
26
+ */
27
+ kBestEffort,
28
+ /**
29
+ * User visible tasks are long running background tasks that will
30
+ * improve performance and memory usage of the application upon completion.
31
+ * Example: background compilation and garbage collection.
32
+ */
33
+ kUserVisible,
34
+ /**
35
+ * User blocking tasks are highest priority tasks that block the execution
36
+ * thread (e.g. major garbage collection). They must be finished as soon as
37
+ * possible.
38
+ */
39
+ kUserBlocking,
40
+ };
41
+
42
+ /**
43
+ * A Task represents a unit of work.
44
+ */
45
+ class Task {
46
+ public:
47
+ virtual ~Task() = default;
48
+
49
+ virtual void Run() = 0;
50
+ };
51
+
52
+ /**
53
+ * An IdleTask represents a unit of work to be performed in idle time.
54
+ * The Run method is invoked with an argument that specifies the deadline in
55
+ * seconds returned by MonotonicallyIncreasingTime().
56
+ * The idle task is expected to complete by this deadline.
57
+ */
58
+ class IdleTask {
59
+ public:
60
+ virtual ~IdleTask() = default;
61
+ virtual void Run(double deadline_in_seconds) = 0;
62
+ };
63
+
64
+ /**
65
+ * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
66
+ * post tasks after the isolate gets destructed, but these tasks may not get
67
+ * executed anymore. All tasks posted to a given TaskRunner will be invoked in
68
+ * sequence. Tasks can be posted from any thread.
69
+ */
70
+ class TaskRunner {
71
+ public:
72
+ /**
73
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
74
+ * implementation takes ownership of |task|.
75
+ */
76
+ virtual void PostTask(std::unique_ptr<Task> task) = 0;
77
+
78
+ /**
79
+ * Schedules a task to be invoked by this TaskRunner. The TaskRunner
80
+ * implementation takes ownership of |task|. The |task| cannot be nested
81
+ * within other task executions.
82
+ *
83
+ * Tasks which shouldn't be interleaved with JS execution must be posted with
84
+ * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
85
+ * embedder may process tasks in a callback which is called during JS
86
+ * execution.
87
+ *
88
+ * In particular, tasks which execute JS must be non-nestable, since JS
89
+ * execution is not allowed to nest.
90
+ *
91
+ * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
92
+ */
93
+ virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
94
+
95
+ /**
96
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
97
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
98
+ * implementation takes ownership of |task|.
99
+ */
100
+ virtual void PostDelayedTask(std::unique_ptr<Task> task,
101
+ double delay_in_seconds) = 0;
102
+
103
+ /**
104
+ * Schedules a task to be invoked by this TaskRunner. The task is scheduled
105
+ * after the given number of seconds |delay_in_seconds|. The TaskRunner
106
+ * implementation takes ownership of |task|. The |task| cannot be nested
107
+ * within other task executions.
108
+ *
109
+ * Tasks which shouldn't be interleaved with JS execution must be posted with
110
+ * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
111
+ * embedder may process tasks in a callback which is called during JS
112
+ * execution.
113
+ *
114
+ * In particular, tasks which execute JS must be non-nestable, since JS
115
+ * execution is not allowed to nest.
116
+ *
117
+ * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
118
+ */
119
+ virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
120
+ double delay_in_seconds) {}
121
+
122
+ /**
123
+ * Schedules an idle task to be invoked by this TaskRunner. The task is
124
+ * scheduled when the embedder is idle. Requires that
125
+ * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
126
+ * relative to other task types and may be starved for an arbitrarily long
127
+ * time if no idle time is available. The TaskRunner implementation takes
128
+ * ownership of |task|.
129
+ */
130
+ virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
131
+
132
+ /**
133
+ * Returns true if idle tasks are enabled for this TaskRunner.
134
+ */
135
+ virtual bool IdleTasksEnabled() = 0;
136
+
137
+ /**
138
+ * Returns true if non-nestable tasks are enabled for this TaskRunner.
139
+ */
140
+ virtual bool NonNestableTasksEnabled() const { return false; }
141
+
142
+ /**
143
+ * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
144
+ */
145
+ virtual bool NonNestableDelayedTasksEnabled() const { return false; }
146
+
147
+ TaskRunner() = default;
148
+ virtual ~TaskRunner() = default;
149
+
150
+ TaskRunner(const TaskRunner&) = delete;
151
+ TaskRunner& operator=(const TaskRunner&) = delete;
152
+ };
153
+
154
+ /**
155
+ * Delegate that's passed to Job's worker task, providing an entry point to
156
+ * communicate with the scheduler.
157
+ */
158
+ class JobDelegate {
159
+ public:
160
+ /**
161
+ * Returns true if this thread should return from the worker task on the
162
+ * current thread ASAP. Workers should periodically invoke ShouldYield (or
163
+ * YieldIfNeeded()) as often as is reasonable.
164
+ */
165
+ virtual bool ShouldYield() = 0;
166
+
167
+ /**
168
+ * Notifies the scheduler that max concurrency was increased, and the number
169
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
170
+ * details.
171
+ */
172
+ virtual void NotifyConcurrencyIncrease() = 0;
173
+
174
+ /**
175
+ * Returns a task_id unique among threads currently running this job, such
176
+ * that GetTaskId() < worker count. To achieve this, the same task_id may be
177
+ * reused by a different thread after a worker_task returns.
178
+ */
179
+ virtual uint8_t GetTaskId() = 0;
180
+
181
+ /**
182
+ * Returns true if the current task is called from the thread currently
183
+ * running JobHandle::Join().
184
+ * TODO(etiennep): Make pure virtual once custom embedders implement it.
185
+ */
186
+ virtual bool IsJoiningThread() const { return false; }
187
+ };
188
+
189
+ /**
190
+ * Handle returned when posting a Job. Provides methods to control execution of
191
+ * the posted Job.
192
+ */
193
+ class JobHandle {
194
+ public:
195
+ virtual ~JobHandle() = default;
196
+
197
+ /**
198
+ * Notifies the scheduler that max concurrency was increased, and the number
199
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
200
+ * details.
201
+ */
202
+ virtual void NotifyConcurrencyIncrease() = 0;
203
+
204
+ /**
205
+ * Contributes to the job on this thread. Doesn't return until all tasks have
206
+ * completed and max concurrency becomes 0. When Join() is called and max
207
+ * concurrency reaches 0, it should not increase again. This also promotes
208
+ * this Job's priority to be at least as high as the calling thread's
209
+ * priority.
210
+ */
211
+ virtual void Join() = 0;
212
+
213
+ /**
214
+ * Forces all existing workers to yield ASAP. Waits until they have all
215
+ * returned from the Job's callback before returning.
216
+ */
217
+ virtual void Cancel() = 0;
218
+
219
+ /*
220
+ * Forces all existing workers to yield ASAP but doesn’t wait for them.
221
+ * Warning, this is dangerous if the Job's callback is bound to or has access
222
+ * to state which may be deleted after this call.
223
+ * TODO(etiennep): Cleanup once implemented by all embedders.
224
+ */
225
+ virtual void CancelAndDetach() { Cancel(); }
226
+
227
+ /**
228
+ * Returns true if there's any work pending or any worker running.
229
+ */
230
+ virtual bool IsActive() = 0;
231
+
232
+ // TODO(etiennep): Clean up once all overrides are removed.
233
+ V8_DEPRECATED("Use !IsActive() instead.")
234
+ virtual bool IsCompleted() { return !IsActive(); }
235
+
236
+ /**
237
+ * Returns true if associated with a Job and other methods may be called.
238
+ * Returns false after Join() or Cancel() was called. This may return true
239
+ * even if no workers are running and IsCompleted() returns true
240
+ */
241
+ virtual bool IsValid() = 0;
242
+
243
+ // TODO(etiennep): Clean up once all overrides are removed.
244
+ V8_DEPRECATED("Use IsValid() instead.")
245
+ virtual bool IsRunning() { return IsValid(); }
246
+
247
+ /**
248
+ * Returns true if job priority can be changed.
249
+ */
250
+ virtual bool UpdatePriorityEnabled() const { return false; }
251
+
252
+ /**
253
+ * Update this Job's priority.
254
+ */
255
+ virtual void UpdatePriority(TaskPriority new_priority) {}
256
+ };
257
+
258
+ /**
259
+ * A JobTask represents work to run in parallel from Platform::PostJob().
260
+ */
261
+ class JobTask {
262
+ public:
263
+ virtual ~JobTask() = default;
264
+
265
+ virtual void Run(JobDelegate* delegate) = 0;
266
+
267
+ /**
268
+ * Controls the maximum number of threads calling Run() concurrently, given
269
+ * the number of threads currently assigned to this job and executing Run().
270
+ * Run() is only invoked if the number of threads previously running Run() was
271
+ * less than the value returned. Since GetMaxConcurrency() is a leaf function,
272
+ * it must not call back any JobHandle methods.
273
+ */
274
+ virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
275
+
276
+ // TODO(1114823): Clean up once all overrides are removed.
277
+ V8_DEPRECATED("Use the version that takes |worker_count|.")
278
+ virtual size_t GetMaxConcurrency() const { return 0; }
279
+ };
280
+
281
+ /**
282
+ * The interface represents complex arguments to trace events.
283
+ */
284
+ class ConvertableToTraceFormat {
285
+ public:
286
+ virtual ~ConvertableToTraceFormat() = default;
287
+
288
+ /**
289
+ * Append the class info to the provided |out| string. The appended
290
+ * data must be a valid JSON object. Strings must be properly quoted, and
291
+ * escaped. There is no processing applied to the content after it is
292
+ * appended.
293
+ */
294
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
295
+ };
296
+
297
+ /**
298
+ * V8 Tracing controller.
299
+ *
300
+ * Can be implemented by an embedder to record trace events from V8.
301
+ */
302
+ class TracingController {
303
+ public:
304
+ virtual ~TracingController() = default;
305
+
306
+ // In Perfetto mode, trace events are written using Perfetto's Track Event
307
+ // API directly without going through the embedder. However, it is still
308
+ // possible to observe tracing being enabled and disabled.
309
+ #if !defined(V8_USE_PERFETTO)
310
+ /**
311
+ * Called by TRACE_EVENT* macros, don't call this directly.
312
+ * The name parameter is a category group for example:
313
+ * TRACE_EVENT0("v8,parse", "V8.Parse")
314
+ * The pointer returned points to a value with zero or more of the bits
315
+ * defined in CategoryGroupEnabledFlags.
316
+ **/
317
+ virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
318
+ static uint8_t no = 0;
319
+ return &no;
320
+ }
321
+
322
+ /**
323
+ * Adds a trace event to the platform tracing system. These function calls are
324
+ * usually the result of a TRACE_* macro from trace_event_common.h when
325
+ * tracing and the category of the particular trace are enabled. It is not
326
+ * advisable to call these functions on their own; they are really only meant
327
+ * to be used by the trace macros. The returned handle can be used by
328
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
329
+ */
330
+ virtual uint64_t AddTraceEvent(
331
+ char phase, const uint8_t* category_enabled_flag, const char* name,
332
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
333
+ const char** arg_names, const uint8_t* arg_types,
334
+ const uint64_t* arg_values,
335
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
336
+ unsigned int flags) {
337
+ return 0;
338
+ }
339
+ virtual uint64_t AddTraceEventWithTimestamp(
340
+ char phase, const uint8_t* category_enabled_flag, const char* name,
341
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
342
+ const char** arg_names, const uint8_t* arg_types,
343
+ const uint64_t* arg_values,
344
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
345
+ unsigned int flags, int64_t timestamp) {
346
+ return 0;
347
+ }
348
+
349
+ /**
350
+ * Sets the duration field of a COMPLETE trace event. It must be called with
351
+ * the handle returned from AddTraceEvent().
352
+ **/
353
+ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
354
+ const char* name, uint64_t handle) {}
355
+ #endif // !defined(V8_USE_PERFETTO)
356
+
357
+ class TraceStateObserver {
358
+ public:
359
+ virtual ~TraceStateObserver() = default;
360
+ virtual void OnTraceEnabled() = 0;
361
+ virtual void OnTraceDisabled() = 0;
362
+ };
363
+
364
+ /** Adds tracing state change observer. */
365
+ virtual void AddTraceStateObserver(TraceStateObserver*) {}
366
+
367
+ /** Removes tracing state change observer. */
368
+ virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
369
+ };
370
+
371
+ /**
372
+ * A V8 memory page allocator.
373
+ *
374
+ * Can be implemented by an embedder to manage large host OS allocations.
375
+ */
376
+ class PageAllocator {
377
+ public:
378
+ virtual ~PageAllocator() = default;
379
+
380
+ /**
381
+ * Gets the page granularity for AllocatePages and FreePages. Addresses and
382
+ * lengths for those calls should be multiples of AllocatePageSize().
383
+ */
384
+ virtual size_t AllocatePageSize() = 0;
385
+
386
+ /**
387
+ * Gets the page granularity for SetPermissions and ReleasePages. Addresses
388
+ * and lengths for those calls should be multiples of CommitPageSize().
389
+ */
390
+ virtual size_t CommitPageSize() = 0;
391
+
392
+ /**
393
+ * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
394
+ * sequences of random mmap addresses.
395
+ */
396
+ virtual void SetRandomMmapSeed(int64_t seed) = 0;
397
+
398
+ /**
399
+ * Returns a randomized address, suitable for memory allocation under ASLR.
400
+ * The address will be aligned to AllocatePageSize.
401
+ */
402
+ virtual void* GetRandomMmapAddr() = 0;
403
+
404
+ /**
405
+ * Memory permissions.
406
+ */
407
+ enum Permission {
408
+ kNoAccess,
409
+ kRead,
410
+ kReadWrite,
411
+ // TODO(hpayer): Remove this flag. Memory should never be rwx.
412
+ kReadWriteExecute,
413
+ kReadExecute,
414
+ // Set this when reserving memory that will later require kReadWriteExecute
415
+ // permissions. The resulting behavior is platform-specific, currently
416
+ // this is used to set the MAP_JIT flag on Apple Silicon.
417
+ // TODO(jkummerow): Remove this when Wasm has a platform-independent
418
+ // w^x implementation.
419
+ kNoAccessWillJitLater
420
+ };
421
+
422
+ /**
423
+ * Allocates memory in range with the given alignment and permission.
424
+ */
425
+ virtual void* AllocatePages(void* address, size_t length, size_t alignment,
426
+ Permission permissions) = 0;
427
+
428
+ /**
429
+ * Frees memory in a range that was allocated by a call to AllocatePages.
430
+ */
431
+ virtual bool FreePages(void* address, size_t length) = 0;
432
+
433
+ /**
434
+ * Releases memory in a range that was allocated by a call to AllocatePages.
435
+ */
436
+ virtual bool ReleasePages(void* address, size_t length,
437
+ size_t new_length) = 0;
438
+
439
+ /**
440
+ * Sets permissions on pages in an allocated range.
441
+ */
442
+ virtual bool SetPermissions(void* address, size_t length,
443
+ Permission permissions) = 0;
444
+
445
+ /**
446
+ * Frees memory in the given [address, address + size) range. address and size
447
+ * should be operating system page-aligned. The next write to this
448
+ * memory area brings the memory transparently back.
449
+ */
450
+ virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
451
+
452
+ /**
453
+ * INTERNAL ONLY: This interface has not been stabilised and may change
454
+ * without notice from one release to another without being deprecated first.
455
+ */
456
+ class SharedMemoryMapping {
457
+ public:
458
+ // Implementations are expected to free the shared memory mapping in the
459
+ // destructor.
460
+ virtual ~SharedMemoryMapping() = default;
461
+ virtual void* GetMemory() const = 0;
462
+ };
463
+
464
+ /**
465
+ * INTERNAL ONLY: This interface has not been stabilised and may change
466
+ * without notice from one release to another without being deprecated first.
467
+ */
468
+ class SharedMemory {
469
+ public:
470
+ // Implementations are expected to free the shared memory in the destructor.
471
+ virtual ~SharedMemory() = default;
472
+ virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
473
+ void* new_address) const = 0;
474
+ virtual void* GetMemory() const = 0;
475
+ virtual size_t GetSize() const = 0;
476
+ };
477
+
478
+ /**
479
+ * INTERNAL ONLY: This interface has not been stabilised and may change
480
+ * without notice from one release to another without being deprecated first.
481
+ *
482
+ * Reserve pages at a fixed address returning whether the reservation is
483
+ * possible. The reserved memory is detached from the PageAllocator and so
484
+ * should not be freed by it. It's intended for use with
485
+ * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
486
+ */
487
+ virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
488
+ return false;
489
+ }
490
+
491
+ /**
492
+ * INTERNAL ONLY: This interface has not been stabilised and may change
493
+ * without notice from one release to another without being deprecated first.
494
+ *
495
+ * Allocates shared memory pages. Not all PageAllocators need support this and
496
+ * so this method need not be overridden.
497
+ * Allocates a new read-only shared memory region of size |length| and copies
498
+ * the memory at |original_address| into it.
499
+ */
500
+ virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
501
+ size_t length, const void* original_address) {
502
+ return {};
503
+ }
504
+
505
+ /**
506
+ * INTERNAL ONLY: This interface has not been stabilised and may change
507
+ * without notice from one release to another without being deprecated first.
508
+ *
509
+ * If not overridden and changed to return true, V8 will not attempt to call
510
+ * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
511
+ * and RemapSharedPages must also be overridden.
512
+ */
513
+ virtual bool CanAllocateSharedPages() { return false; }
514
+ };
515
+
516
+ /**
517
+ * V8 Platform abstraction layer.
518
+ *
519
+ * The embedder has to provide an implementation of this interface before
520
+ * initializing the rest of V8.
521
+ */
522
+ class Platform {
523
+ public:
524
+ virtual ~Platform() = default;
525
+
526
+ /**
527
+ * Allows the embedder to manage memory page allocations.
528
+ */
529
+ virtual PageAllocator* GetPageAllocator() {
530
+ // TODO(bbudge) Make this abstract after all embedders implement this.
531
+ return nullptr;
532
+ }
533
+
534
+ /**
535
+ * Enables the embedder to respond in cases where V8 can't allocate large
536
+ * blocks of memory. V8 retries the failed allocation once after calling this
537
+ * method. On success, execution continues; otherwise V8 exits with a fatal
538
+ * error.
539
+ * Embedder overrides of this function must NOT call back into V8.
540
+ */
541
+ virtual void OnCriticalMemoryPressure() {
542
+ // TODO(bbudge) Remove this when embedders override the following method.
543
+ // See crbug.com/634547.
544
+ }
545
+
546
+ /**
547
+ * Enables the embedder to respond in cases where V8 can't allocate large
548
+ * memory regions. The |length| parameter is the amount of memory needed.
549
+ * Returns true if memory is now available. Returns false if no memory could
550
+ * be made available. V8 will retry allocations until this method returns
551
+ * false.
552
+ *
553
+ * Embedder overrides of this function must NOT call back into V8.
554
+ */
555
+ virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
556
+
557
+ /**
558
+ * Gets the number of worker threads used by
559
+ * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
560
+ * of tasks a work package should be split into. A return value of 0 means
561
+ * that there are no worker threads available. Note that a value of 0 won't
562
+ * prohibit V8 from posting tasks using |CallOnWorkerThread|.
563
+ */
564
+ virtual int NumberOfWorkerThreads() = 0;
565
+
566
+ /**
567
+ * Returns a TaskRunner which can be used to post a task on the foreground.
568
+ * The TaskRunner's NonNestableTasksEnabled() must be true. This function
569
+ * should only be called from a foreground thread.
570
+ */
571
+ virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
572
+ Isolate* isolate) = 0;
573
+
574
+ /**
575
+ * Schedules a task to be invoked on a worker thread.
576
+ */
577
+ virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
578
+
579
+ /**
580
+ * Schedules a task that blocks the main thread to be invoked with
581
+ * high-priority on a worker thread.
582
+ */
583
+ virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
584
+ // Embedders may optionally override this to process these tasks in a high
585
+ // priority pool.
586
+ CallOnWorkerThread(std::move(task));
587
+ }
588
+
589
+ /**
590
+ * Schedules a task to be invoked with low-priority on a worker thread.
591
+ */
592
+ virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
593
+ // Embedders may optionally override this to process these tasks in a low
594
+ // priority pool.
595
+ CallOnWorkerThread(std::move(task));
596
+ }
597
+
598
+ /**
599
+ * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
600
+ * expires.
601
+ */
602
+ virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
603
+ double delay_in_seconds) = 0;
604
+
605
+ /**
606
+ * Returns true if idle tasks are enabled for the given |isolate|.
607
+ */
608
+ virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
609
+
610
+ /**
611
+ * Posts |job_task| to run in parallel. Returns a JobHandle associated with
612
+ * the Job, which can be joined or canceled.
613
+ * This avoids degenerate cases:
614
+ * - Calling CallOnWorkerThread() for each work item, causing significant
615
+ * overhead.
616
+ * - Fixed number of CallOnWorkerThread() calls that split the work and might
617
+ * run for a long time. This is problematic when many components post
618
+ * "num cores" tasks and all expect to use all the cores. In these cases,
619
+ * the scheduler lacks context to be fair to multiple same-priority requests
620
+ * and/or ability to request lower priority work to yield when high priority
621
+ * work comes in.
622
+ * A canonical implementation of |job_task| looks like:
623
+ * class MyJobTask : public JobTask {
624
+ * public:
625
+ * MyJobTask(...) : worker_queue_(...) {}
626
+ * // JobTask:
627
+ * void Run(JobDelegate* delegate) override {
628
+ * while (!delegate->ShouldYield()) {
629
+ * // Smallest unit of work.
630
+ * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
631
+ * if (!work_item) return;
632
+ * ProcessWork(work_item);
633
+ * }
634
+ * }
635
+ *
636
+ * size_t GetMaxConcurrency() const override {
637
+ * return worker_queue_.GetSize(); // Thread safe.
638
+ * }
639
+ * };
640
+ * auto handle = PostJob(TaskPriority::kUserVisible,
641
+ * std::make_unique<MyJobTask>(...));
642
+ * handle->Join();
643
+ *
644
+ * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
645
+ * called while holding a lock that could be acquired by JobTask::Run or
646
+ * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
647
+ * because [1] JobTask::GetMaxConcurrency may be invoked while holding
648
+ * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
649
+ * if that lock is *never* held while calling back into JobHandle from any
650
+ * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
651
+ * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
652
+ * (B=>JobHandle::foo=>B deadlock).
653
+ *
654
+ * A sufficient PostJob() implementation that uses the default Job provided in
655
+ * libplatform looks like:
656
+ * std::unique_ptr<JobHandle> PostJob(
657
+ * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
658
+ * return v8::platform::NewDefaultJobHandle(
659
+ * this, priority, std::move(job_task), NumberOfWorkerThreads());
660
+ * }
661
+ */
662
+ virtual std::unique_ptr<JobHandle> PostJob(
663
+ TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
664
+
665
+ /**
666
+ * Monotonically increasing time in seconds from an arbitrary fixed point in
667
+ * the past. This function is expected to return at least
668
+ * millisecond-precision values. For this reason,
669
+ * it is recommended that the fixed point be no further in the past than
670
+ * the epoch.
671
+ **/
672
+ virtual double MonotonicallyIncreasingTime() = 0;
673
+
674
+ /**
675
+ * Current wall-clock time in milliseconds since epoch.
676
+ * This function is expected to return at least millisecond-precision values.
677
+ */
678
+ virtual double CurrentClockTimeMillis() = 0;
679
+
680
+ typedef void (*StackTracePrinter)();
681
+
682
+ /**
683
+ * Returns a function pointer that print a stack trace of the current stack
684
+ * on invocation. Disables printing of the stack trace if nullptr.
685
+ */
686
+ virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
687
+
688
+ /**
689
+ * Returns an instance of a v8::TracingController. This must be non-nullptr.
690
+ */
691
+ virtual TracingController* GetTracingController() = 0;
692
+
693
+ /**
694
+ * Tells the embedder to generate and upload a crashdump during an unexpected
695
+ * but non-critical scenario.
696
+ */
697
+ virtual void DumpWithoutCrashing() {}
698
+
699
+ protected:
700
+ /**
701
+ * Default implementation of current wall-clock time in milliseconds
702
+ * since epoch. Useful for implementing |CurrentClockTimeMillis| if
703
+ * nothing special needed.
704
+ */
705
+ V8_EXPORT static double SystemClockTimeMillis();
706
+ };
707
+
708
+ } // namespace v8
709
+
710
+ #endif // V8_V8_PLATFORM_H_