libv8-node 15.5.1.0.beta1-arm64-darwin-21
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/ext/libv8-node/.location.yml +2 -0
- data/ext/libv8-node/location.rb +91 -0
- data/ext/libv8-node/paths.rb +30 -0
- data/lib/libv8/node/version.rb +7 -0
- data/lib/libv8/node.rb +11 -0
- data/lib/libv8-node.rb +1 -0
- data/vendor/v8/include/cppgc/allocation.h +173 -0
- data/vendor/v8/include/cppgc/common.h +26 -0
- data/vendor/v8/include/cppgc/custom-space.h +62 -0
- data/vendor/v8/include/cppgc/default-platform.h +76 -0
- data/vendor/v8/include/cppgc/garbage-collected.h +116 -0
- data/vendor/v8/include/cppgc/heap.h +139 -0
- data/vendor/v8/include/cppgc/internal/api-constants.h +47 -0
- data/vendor/v8/include/cppgc/internal/atomic-entry-flag.h +48 -0
- data/vendor/v8/include/cppgc/internal/caged-heap-local-data.h +67 -0
- data/vendor/v8/include/cppgc/internal/compiler-specific.h +38 -0
- data/vendor/v8/include/cppgc/internal/finalizer-trait.h +90 -0
- data/vendor/v8/include/cppgc/internal/gc-info.h +45 -0
- data/vendor/v8/include/cppgc/internal/logging.h +50 -0
- data/vendor/v8/include/cppgc/internal/persistent-node.h +116 -0
- data/vendor/v8/include/cppgc/internal/pointer-policies.h +134 -0
- data/vendor/v8/include/cppgc/internal/prefinalizer-handler.h +30 -0
- data/vendor/v8/include/cppgc/internal/process-heap.h +34 -0
- data/vendor/v8/include/cppgc/internal/write-barrier.h +78 -0
- data/vendor/v8/include/cppgc/liveness-broker.h +68 -0
- data/vendor/v8/include/cppgc/macros.h +24 -0
- data/vendor/v8/include/cppgc/member.h +226 -0
- data/vendor/v8/include/cppgc/persistent.h +341 -0
- data/vendor/v8/include/cppgc/platform.h +130 -0
- data/vendor/v8/include/cppgc/prefinalizer.h +52 -0
- data/vendor/v8/include/cppgc/source-location.h +91 -0
- data/vendor/v8/include/cppgc/trace-trait.h +111 -0
- data/vendor/v8/include/cppgc/type-traits.h +109 -0
- data/vendor/v8/include/cppgc/visitor.h +213 -0
- data/vendor/v8/include/libplatform/libplatform-export.h +29 -0
- data/vendor/v8/include/libplatform/libplatform.h +106 -0
- data/vendor/v8/include/libplatform/v8-tracing.h +332 -0
- data/vendor/v8/include/v8-cppgc.h +226 -0
- data/vendor/v8/include/v8-fast-api-calls.h +388 -0
- data/vendor/v8/include/v8-inspector-protocol.h +13 -0
- data/vendor/v8/include/v8-inspector.h +327 -0
- data/vendor/v8/include/v8-internal.h +427 -0
- data/vendor/v8/include/v8-metrics.h +133 -0
- data/vendor/v8/include/v8-platform.h +684 -0
- data/vendor/v8/include/v8-profiler.h +1059 -0
- data/vendor/v8/include/v8-util.h +652 -0
- data/vendor/v8/include/v8-value-serializer-version.h +24 -0
- data/vendor/v8/include/v8-version-string.h +38 -0
- data/vendor/v8/include/v8-version.h +20 -0
- data/vendor/v8/include/v8-wasm-trap-handler-posix.h +31 -0
- data/vendor/v8/include/v8-wasm-trap-handler-win.h +28 -0
- data/vendor/v8/include/v8.h +12098 -0
- data/vendor/v8/include/v8config.h +484 -0
- data/vendor/v8/out.gn/libv8/obj/libv8_monolith.a +0 -0
- metadata +112 -0
@@ -0,0 +1,684 @@
|
|
1
|
+
// Copyright 2013 the V8 project authors. All rights reserved.
|
2
|
+
// Use of this source code is governed by a BSD-style license that can be
|
3
|
+
// found in the LICENSE file.
|
4
|
+
|
5
|
+
#ifndef V8_V8_PLATFORM_H_
|
6
|
+
#define V8_V8_PLATFORM_H_
|
7
|
+
|
8
|
+
#include <stddef.h>
|
9
|
+
#include <stdint.h>
|
10
|
+
#include <stdlib.h> // For abort.
|
11
|
+
#include <memory>
|
12
|
+
#include <string>
|
13
|
+
|
14
|
+
#include "v8config.h" // NOLINT(build/include_directory)
|
15
|
+
|
16
|
+
namespace v8 {
|
17
|
+
|
18
|
+
class Isolate;
|
19
|
+
|
20
|
+
// Valid priorities supported by the task scheduling infrastructure.
|
21
|
+
enum class TaskPriority : uint8_t {
|
22
|
+
/**
|
23
|
+
* Best effort tasks are not critical for performance of the application. The
|
24
|
+
* platform implementation should preempt such tasks if higher priority tasks
|
25
|
+
* arrive.
|
26
|
+
*/
|
27
|
+
kBestEffort,
|
28
|
+
/**
|
29
|
+
* User visible tasks are long running background tasks that will
|
30
|
+
* improve performance and memory usage of the application upon completion.
|
31
|
+
* Example: background compilation and garbage collection.
|
32
|
+
*/
|
33
|
+
kUserVisible,
|
34
|
+
/**
|
35
|
+
* User blocking tasks are highest priority tasks that block the execution
|
36
|
+
* thread (e.g. major garbage collection). They must be finished as soon as
|
37
|
+
* possible.
|
38
|
+
*/
|
39
|
+
kUserBlocking,
|
40
|
+
};
|
41
|
+
|
42
|
+
/**
|
43
|
+
* A Task represents a unit of work.
|
44
|
+
*/
|
45
|
+
class Task {
|
46
|
+
public:
|
47
|
+
virtual ~Task() = default;
|
48
|
+
|
49
|
+
virtual void Run() = 0;
|
50
|
+
};
|
51
|
+
|
52
|
+
/**
|
53
|
+
* An IdleTask represents a unit of work to be performed in idle time.
|
54
|
+
* The Run method is invoked with an argument that specifies the deadline in
|
55
|
+
* seconds returned by MonotonicallyIncreasingTime().
|
56
|
+
* The idle task is expected to complete by this deadline.
|
57
|
+
*/
|
58
|
+
class IdleTask {
|
59
|
+
public:
|
60
|
+
virtual ~IdleTask() = default;
|
61
|
+
virtual void Run(double deadline_in_seconds) = 0;
|
62
|
+
};
|
63
|
+
|
64
|
+
/**
|
65
|
+
* A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
|
66
|
+
* post tasks after the isolate gets destructed, but these tasks may not get
|
67
|
+
* executed anymore. All tasks posted to a given TaskRunner will be invoked in
|
68
|
+
* sequence. Tasks can be posted from any thread.
|
69
|
+
*/
|
70
|
+
class TaskRunner {
|
71
|
+
public:
|
72
|
+
/**
|
73
|
+
* Schedules a task to be invoked by this TaskRunner. The TaskRunner
|
74
|
+
* implementation takes ownership of |task|.
|
75
|
+
*/
|
76
|
+
virtual void PostTask(std::unique_ptr<Task> task) = 0;
|
77
|
+
|
78
|
+
/**
|
79
|
+
* Schedules a task to be invoked by this TaskRunner. The TaskRunner
|
80
|
+
* implementation takes ownership of |task|. The |task| cannot be nested
|
81
|
+
* within other task executions.
|
82
|
+
*
|
83
|
+
* Tasks which shouldn't be interleaved with JS execution must be posted with
|
84
|
+
* |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
|
85
|
+
* embedder may process tasks in a callback which is called during JS
|
86
|
+
* execution.
|
87
|
+
*
|
88
|
+
* In particular, tasks which execute JS must be non-nestable, since JS
|
89
|
+
* execution is not allowed to nest.
|
90
|
+
*
|
91
|
+
* Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
|
92
|
+
*/
|
93
|
+
virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
|
94
|
+
|
95
|
+
/**
|
96
|
+
* Schedules a task to be invoked by this TaskRunner. The task is scheduled
|
97
|
+
* after the given number of seconds |delay_in_seconds|. The TaskRunner
|
98
|
+
* implementation takes ownership of |task|.
|
99
|
+
*/
|
100
|
+
virtual void PostDelayedTask(std::unique_ptr<Task> task,
|
101
|
+
double delay_in_seconds) = 0;
|
102
|
+
|
103
|
+
/**
|
104
|
+
* Schedules a task to be invoked by this TaskRunner. The task is scheduled
|
105
|
+
* after the given number of seconds |delay_in_seconds|. The TaskRunner
|
106
|
+
* implementation takes ownership of |task|. The |task| cannot be nested
|
107
|
+
* within other task executions.
|
108
|
+
*
|
109
|
+
* Tasks which shouldn't be interleaved with JS execution must be posted with
|
110
|
+
* |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
|
111
|
+
* embedder may process tasks in a callback which is called during JS
|
112
|
+
* execution.
|
113
|
+
*
|
114
|
+
* In particular, tasks which execute JS must be non-nestable, since JS
|
115
|
+
* execution is not allowed to nest.
|
116
|
+
*
|
117
|
+
* Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
|
118
|
+
*/
|
119
|
+
virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
|
120
|
+
double delay_in_seconds) {}
|
121
|
+
|
122
|
+
/**
|
123
|
+
* Schedules an idle task to be invoked by this TaskRunner. The task is
|
124
|
+
* scheduled when the embedder is idle. Requires that
|
125
|
+
* |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
|
126
|
+
* relative to other task types and may be starved for an arbitrarily long
|
127
|
+
* time if no idle time is available. The TaskRunner implementation takes
|
128
|
+
* ownership of |task|.
|
129
|
+
*/
|
130
|
+
virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
|
131
|
+
|
132
|
+
/**
|
133
|
+
* Returns true if idle tasks are enabled for this TaskRunner.
|
134
|
+
*/
|
135
|
+
virtual bool IdleTasksEnabled() = 0;
|
136
|
+
|
137
|
+
/**
|
138
|
+
* Returns true if non-nestable tasks are enabled for this TaskRunner.
|
139
|
+
*/
|
140
|
+
virtual bool NonNestableTasksEnabled() const { return false; }
|
141
|
+
|
142
|
+
/**
|
143
|
+
* Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
|
144
|
+
*/
|
145
|
+
virtual bool NonNestableDelayedTasksEnabled() const { return false; }
|
146
|
+
|
147
|
+
TaskRunner() = default;
|
148
|
+
virtual ~TaskRunner() = default;
|
149
|
+
|
150
|
+
TaskRunner(const TaskRunner&) = delete;
|
151
|
+
TaskRunner& operator=(const TaskRunner&) = delete;
|
152
|
+
};
|
153
|
+
|
154
|
+
/**
|
155
|
+
* Delegate that's passed to Job's worker task, providing an entry point to
|
156
|
+
* communicate with the scheduler.
|
157
|
+
*/
|
158
|
+
class JobDelegate {
|
159
|
+
public:
|
160
|
+
/**
|
161
|
+
* Returns true if this thread should return from the worker task on the
|
162
|
+
* current thread ASAP. Workers should periodically invoke ShouldYield (or
|
163
|
+
* YieldIfNeeded()) as often as is reasonable.
|
164
|
+
*/
|
165
|
+
virtual bool ShouldYield() = 0;
|
166
|
+
|
167
|
+
/**
|
168
|
+
* Notifies the scheduler that max concurrency was increased, and the number
|
169
|
+
* of worker should be adjusted accordingly. See Platform::PostJob() for more
|
170
|
+
* details.
|
171
|
+
*/
|
172
|
+
virtual void NotifyConcurrencyIncrease() = 0;
|
173
|
+
|
174
|
+
/**
|
175
|
+
* Returns a task_id unique among threads currently running this job, such
|
176
|
+
* that GetTaskId() < worker count. To achieve this, the same task_id may be
|
177
|
+
* reused by a different thread after a worker_task returns.
|
178
|
+
* TODO(etiennep): Make pure virtual once custom embedders implement it.
|
179
|
+
*/
|
180
|
+
virtual uint8_t GetTaskId() { return 0; }
|
181
|
+
};
|
182
|
+
|
183
|
+
/**
|
184
|
+
* Handle returned when posting a Job. Provides methods to control execution of
|
185
|
+
* the posted Job.
|
186
|
+
*/
|
187
|
+
class JobHandle {
|
188
|
+
public:
|
189
|
+
virtual ~JobHandle() = default;
|
190
|
+
|
191
|
+
/**
|
192
|
+
* Notifies the scheduler that max concurrency was increased, and the number
|
193
|
+
* of worker should be adjusted accordingly. See Platform::PostJob() for more
|
194
|
+
* details.
|
195
|
+
*/
|
196
|
+
virtual void NotifyConcurrencyIncrease() = 0;
|
197
|
+
|
198
|
+
/**
|
199
|
+
* Contributes to the job on this thread. Doesn't return until all tasks have
|
200
|
+
* completed and max concurrency becomes 0. When Join() is called and max
|
201
|
+
* concurrency reaches 0, it should not increase again. This also promotes
|
202
|
+
* this Job's priority to be at least as high as the calling thread's
|
203
|
+
* priority.
|
204
|
+
*/
|
205
|
+
virtual void Join() = 0;
|
206
|
+
|
207
|
+
/**
|
208
|
+
* Forces all existing workers to yield ASAP. Waits until they have all
|
209
|
+
* returned from the Job's callback before returning.
|
210
|
+
*/
|
211
|
+
virtual void Cancel() = 0;
|
212
|
+
|
213
|
+
/**
|
214
|
+
* Returns true if there's no work pending and no worker running.
|
215
|
+
* TODO(etiennep): Make pure virtual once custom embedders implement it.
|
216
|
+
*/
|
217
|
+
virtual bool IsCompleted() { return true; }
|
218
|
+
|
219
|
+
/**
|
220
|
+
* Returns true if associated with a Job and other methods may be called.
|
221
|
+
* Returns false after Join() or Cancel() was called.
|
222
|
+
*/
|
223
|
+
virtual bool IsRunning() = 0;
|
224
|
+
};
|
225
|
+
|
226
|
+
/**
|
227
|
+
* A JobTask represents work to run in parallel from Platform::PostJob().
|
228
|
+
*/
|
229
|
+
class JobTask {
|
230
|
+
public:
|
231
|
+
virtual ~JobTask() = default;
|
232
|
+
|
233
|
+
virtual void Run(JobDelegate* delegate) = 0;
|
234
|
+
|
235
|
+
/**
|
236
|
+
* Controls the maximum number of threads calling Run() concurrently. Run() is
|
237
|
+
* only invoked if the number of threads previously running Run() was less
|
238
|
+
* than the value returned. Since GetMaxConcurrency() is a leaf function, it
|
239
|
+
* must not call back any JobHandle methods.
|
240
|
+
*/
|
241
|
+
virtual size_t GetMaxConcurrency() const = 0;
|
242
|
+
|
243
|
+
/*
|
244
|
+
* Meant to replace the version above, given the number of threads currently
|
245
|
+
* assigned to this job and executing Run(). This is useful when the result
|
246
|
+
* must include local work items not visible globaly by other workers.
|
247
|
+
* TODO(etiennep): Replace the version above by this once custom embedders are
|
248
|
+
* migrated.
|
249
|
+
*/
|
250
|
+
size_t GetMaxConcurrency(size_t worker_count) const {
|
251
|
+
return GetMaxConcurrency();
|
252
|
+
}
|
253
|
+
};
|
254
|
+
|
255
|
+
/**
|
256
|
+
* The interface represents complex arguments to trace events.
|
257
|
+
*/
|
258
|
+
class ConvertableToTraceFormat {
|
259
|
+
public:
|
260
|
+
virtual ~ConvertableToTraceFormat() = default;
|
261
|
+
|
262
|
+
/**
|
263
|
+
* Append the class info to the provided |out| string. The appended
|
264
|
+
* data must be a valid JSON object. Strings must be properly quoted, and
|
265
|
+
* escaped. There is no processing applied to the content after it is
|
266
|
+
* appended.
|
267
|
+
*/
|
268
|
+
virtual void AppendAsTraceFormat(std::string* out) const = 0;
|
269
|
+
};
|
270
|
+
|
271
|
+
/**
|
272
|
+
* V8 Tracing controller.
|
273
|
+
*
|
274
|
+
* Can be implemented by an embedder to record trace events from V8.
|
275
|
+
*/
|
276
|
+
class TracingController {
|
277
|
+
public:
|
278
|
+
virtual ~TracingController() = default;
|
279
|
+
|
280
|
+
// In Perfetto mode, trace events are written using Perfetto's Track Event
|
281
|
+
// API directly without going through the embedder. However, it is still
|
282
|
+
// possible to observe tracing being enabled and disabled.
|
283
|
+
#if !defined(V8_USE_PERFETTO)
|
284
|
+
/**
|
285
|
+
* Called by TRACE_EVENT* macros, don't call this directly.
|
286
|
+
* The name parameter is a category group for example:
|
287
|
+
* TRACE_EVENT0("v8,parse", "V8.Parse")
|
288
|
+
* The pointer returned points to a value with zero or more of the bits
|
289
|
+
* defined in CategoryGroupEnabledFlags.
|
290
|
+
**/
|
291
|
+
virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
|
292
|
+
static uint8_t no = 0;
|
293
|
+
return &no;
|
294
|
+
}
|
295
|
+
|
296
|
+
/**
|
297
|
+
* Adds a trace event to the platform tracing system. These function calls are
|
298
|
+
* usually the result of a TRACE_* macro from trace_event_common.h when
|
299
|
+
* tracing and the category of the particular trace are enabled. It is not
|
300
|
+
* advisable to call these functions on their own; they are really only meant
|
301
|
+
* to be used by the trace macros. The returned handle can be used by
|
302
|
+
* UpdateTraceEventDuration to update the duration of COMPLETE events.
|
303
|
+
*/
|
304
|
+
virtual uint64_t AddTraceEvent(
|
305
|
+
char phase, const uint8_t* category_enabled_flag, const char* name,
|
306
|
+
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
|
307
|
+
const char** arg_names, const uint8_t* arg_types,
|
308
|
+
const uint64_t* arg_values,
|
309
|
+
std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
|
310
|
+
unsigned int flags) {
|
311
|
+
return 0;
|
312
|
+
}
|
313
|
+
virtual uint64_t AddTraceEventWithTimestamp(
|
314
|
+
char phase, const uint8_t* category_enabled_flag, const char* name,
|
315
|
+
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
|
316
|
+
const char** arg_names, const uint8_t* arg_types,
|
317
|
+
const uint64_t* arg_values,
|
318
|
+
std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
|
319
|
+
unsigned int flags, int64_t timestamp) {
|
320
|
+
return 0;
|
321
|
+
}
|
322
|
+
|
323
|
+
/**
|
324
|
+
* Sets the duration field of a COMPLETE trace event. It must be called with
|
325
|
+
* the handle returned from AddTraceEvent().
|
326
|
+
**/
|
327
|
+
virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
|
328
|
+
const char* name, uint64_t handle) {}
|
329
|
+
#endif // !defined(V8_USE_PERFETTO)
|
330
|
+
|
331
|
+
class TraceStateObserver {
|
332
|
+
public:
|
333
|
+
virtual ~TraceStateObserver() = default;
|
334
|
+
virtual void OnTraceEnabled() = 0;
|
335
|
+
virtual void OnTraceDisabled() = 0;
|
336
|
+
};
|
337
|
+
|
338
|
+
/** Adds tracing state change observer. */
|
339
|
+
virtual void AddTraceStateObserver(TraceStateObserver*) {}
|
340
|
+
|
341
|
+
/** Removes tracing state change observer. */
|
342
|
+
virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
|
343
|
+
};
|
344
|
+
|
345
|
+
/**
|
346
|
+
* A V8 memory page allocator.
|
347
|
+
*
|
348
|
+
* Can be implemented by an embedder to manage large host OS allocations.
|
349
|
+
*/
|
350
|
+
class PageAllocator {
|
351
|
+
public:
|
352
|
+
virtual ~PageAllocator() = default;
|
353
|
+
|
354
|
+
/**
|
355
|
+
* Gets the page granularity for AllocatePages and FreePages. Addresses and
|
356
|
+
* lengths for those calls should be multiples of AllocatePageSize().
|
357
|
+
*/
|
358
|
+
virtual size_t AllocatePageSize() = 0;
|
359
|
+
|
360
|
+
/**
|
361
|
+
* Gets the page granularity for SetPermissions and ReleasePages. Addresses
|
362
|
+
* and lengths for those calls should be multiples of CommitPageSize().
|
363
|
+
*/
|
364
|
+
virtual size_t CommitPageSize() = 0;
|
365
|
+
|
366
|
+
/**
|
367
|
+
* Sets the random seed so that GetRandomMmapAddr() will generate repeatable
|
368
|
+
* sequences of random mmap addresses.
|
369
|
+
*/
|
370
|
+
virtual void SetRandomMmapSeed(int64_t seed) = 0;
|
371
|
+
|
372
|
+
/**
|
373
|
+
* Returns a randomized address, suitable for memory allocation under ASLR.
|
374
|
+
* The address will be aligned to AllocatePageSize.
|
375
|
+
*/
|
376
|
+
virtual void* GetRandomMmapAddr() = 0;
|
377
|
+
|
378
|
+
/**
|
379
|
+
* Memory permissions.
|
380
|
+
*/
|
381
|
+
enum Permission {
|
382
|
+
kNoAccess,
|
383
|
+
kRead,
|
384
|
+
kReadWrite,
|
385
|
+
// TODO(hpayer): Remove this flag. Memory should never be rwx.
|
386
|
+
kReadWriteExecute,
|
387
|
+
kReadExecute,
|
388
|
+
// Set this when reserving memory that will later require kReadWriteExecute
|
389
|
+
// permissions. The resulting behavior is platform-specific, currently
|
390
|
+
// this is used to set the MAP_JIT flag on Apple Silicon.
|
391
|
+
// TODO(jkummerow): Remove this when Wasm has a platform-independent
|
392
|
+
// w^x implementation.
|
393
|
+
kNoAccessWillJitLater
|
394
|
+
};
|
395
|
+
|
396
|
+
/**
|
397
|
+
* Allocates memory in range with the given alignment and permission.
|
398
|
+
*/
|
399
|
+
virtual void* AllocatePages(void* address, size_t length, size_t alignment,
|
400
|
+
Permission permissions) = 0;
|
401
|
+
|
402
|
+
/**
|
403
|
+
* Frees memory in a range that was allocated by a call to AllocatePages.
|
404
|
+
*/
|
405
|
+
virtual bool FreePages(void* address, size_t length) = 0;
|
406
|
+
|
407
|
+
/**
|
408
|
+
* Releases memory in a range that was allocated by a call to AllocatePages.
|
409
|
+
*/
|
410
|
+
virtual bool ReleasePages(void* address, size_t length,
|
411
|
+
size_t new_length) = 0;
|
412
|
+
|
413
|
+
/**
|
414
|
+
* Sets permissions on pages in an allocated range.
|
415
|
+
*/
|
416
|
+
virtual bool SetPermissions(void* address, size_t length,
|
417
|
+
Permission permissions) = 0;
|
418
|
+
|
419
|
+
/**
|
420
|
+
* Frees memory in the given [address, address + size) range. address and size
|
421
|
+
* should be operating system page-aligned. The next write to this
|
422
|
+
* memory area brings the memory transparently back.
|
423
|
+
*/
|
424
|
+
virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
|
425
|
+
|
426
|
+
/**
|
427
|
+
* INTERNAL ONLY: This interface has not been stabilised and may change
|
428
|
+
* without notice from one release to another without being deprecated first.
|
429
|
+
*/
|
430
|
+
class SharedMemoryMapping {
|
431
|
+
public:
|
432
|
+
// Implementations are expected to free the shared memory mapping in the
|
433
|
+
// destructor.
|
434
|
+
virtual ~SharedMemoryMapping() = default;
|
435
|
+
virtual void* GetMemory() const = 0;
|
436
|
+
};
|
437
|
+
|
438
|
+
/**
|
439
|
+
* INTERNAL ONLY: This interface has not been stabilised and may change
|
440
|
+
* without notice from one release to another without being deprecated first.
|
441
|
+
*/
|
442
|
+
class SharedMemory {
|
443
|
+
public:
|
444
|
+
// Implementations are expected to free the shared memory in the destructor.
|
445
|
+
virtual ~SharedMemory() = default;
|
446
|
+
virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
|
447
|
+
void* new_address) const = 0;
|
448
|
+
virtual void* GetMemory() const = 0;
|
449
|
+
virtual size_t GetSize() const = 0;
|
450
|
+
};
|
451
|
+
|
452
|
+
/**
|
453
|
+
* INTERNAL ONLY: This interface has not been stabilised and may change
|
454
|
+
* without notice from one release to another without being deprecated first.
|
455
|
+
*
|
456
|
+
* Reserve pages at a fixed address returning whether the reservation is
|
457
|
+
* possible. The reserved memory is detached from the PageAllocator and so
|
458
|
+
* should not be freed by it. It's intended for use with
|
459
|
+
* SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
|
460
|
+
*/
|
461
|
+
virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
|
462
|
+
return false;
|
463
|
+
}
|
464
|
+
|
465
|
+
/**
|
466
|
+
* INTERNAL ONLY: This interface has not been stabilised and may change
|
467
|
+
* without notice from one release to another without being deprecated first.
|
468
|
+
*
|
469
|
+
* Allocates shared memory pages. Not all PageAllocators need support this and
|
470
|
+
* so this method need not be overridden.
|
471
|
+
* Allocates a new read-only shared memory region of size |length| and copies
|
472
|
+
* the memory at |original_address| into it.
|
473
|
+
*/
|
474
|
+
virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
|
475
|
+
size_t length, const void* original_address) {
|
476
|
+
return {};
|
477
|
+
}
|
478
|
+
|
479
|
+
/**
|
480
|
+
* INTERNAL ONLY: This interface has not been stabilised and may change
|
481
|
+
* without notice from one release to another without being deprecated first.
|
482
|
+
*
|
483
|
+
* If not overridden and changed to return true, V8 will not attempt to call
|
484
|
+
* AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
|
485
|
+
* and RemapSharedPages must also be overridden.
|
486
|
+
*/
|
487
|
+
virtual bool CanAllocateSharedPages() { return false; }
|
488
|
+
};
|
489
|
+
|
490
|
+
/**
|
491
|
+
* V8 Platform abstraction layer.
|
492
|
+
*
|
493
|
+
* The embedder has to provide an implementation of this interface before
|
494
|
+
* initializing the rest of V8.
|
495
|
+
*/
|
496
|
+
class Platform {
|
497
|
+
public:
|
498
|
+
virtual ~Platform() = default;
|
499
|
+
|
500
|
+
/**
|
501
|
+
* Allows the embedder to manage memory page allocations.
|
502
|
+
*/
|
503
|
+
virtual PageAllocator* GetPageAllocator() {
|
504
|
+
// TODO(bbudge) Make this abstract after all embedders implement this.
|
505
|
+
return nullptr;
|
506
|
+
}
|
507
|
+
|
508
|
+
/**
|
509
|
+
* Enables the embedder to respond in cases where V8 can't allocate large
|
510
|
+
* blocks of memory. V8 retries the failed allocation once after calling this
|
511
|
+
* method. On success, execution continues; otherwise V8 exits with a fatal
|
512
|
+
* error.
|
513
|
+
* Embedder overrides of this function must NOT call back into V8.
|
514
|
+
*/
|
515
|
+
virtual void OnCriticalMemoryPressure() {
|
516
|
+
// TODO(bbudge) Remove this when embedders override the following method.
|
517
|
+
// See crbug.com/634547.
|
518
|
+
}
|
519
|
+
|
520
|
+
/**
|
521
|
+
* Enables the embedder to respond in cases where V8 can't allocate large
|
522
|
+
* memory regions. The |length| parameter is the amount of memory needed.
|
523
|
+
* Returns true if memory is now available. Returns false if no memory could
|
524
|
+
* be made available. V8 will retry allocations until this method returns
|
525
|
+
* false.
|
526
|
+
*
|
527
|
+
* Embedder overrides of this function must NOT call back into V8.
|
528
|
+
*/
|
529
|
+
virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
|
530
|
+
|
531
|
+
/**
|
532
|
+
* Gets the number of worker threads used by
|
533
|
+
* Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
|
534
|
+
* of tasks a work package should be split into. A return value of 0 means
|
535
|
+
* that there are no worker threads available. Note that a value of 0 won't
|
536
|
+
* prohibit V8 from posting tasks using |CallOnWorkerThread|.
|
537
|
+
*/
|
538
|
+
virtual int NumberOfWorkerThreads() = 0;
|
539
|
+
|
540
|
+
/**
|
541
|
+
* Returns a TaskRunner which can be used to post a task on the foreground.
|
542
|
+
* The TaskRunner's NonNestableTasksEnabled() must be true. This function
|
543
|
+
* should only be called from a foreground thread.
|
544
|
+
*/
|
545
|
+
virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
|
546
|
+
Isolate* isolate) = 0;
|
547
|
+
|
548
|
+
/**
|
549
|
+
* Schedules a task to be invoked on a worker thread.
|
550
|
+
*/
|
551
|
+
virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
|
552
|
+
|
553
|
+
/**
|
554
|
+
* Schedules a task that blocks the main thread to be invoked with
|
555
|
+
* high-priority on a worker thread.
|
556
|
+
*/
|
557
|
+
virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
|
558
|
+
// Embedders may optionally override this to process these tasks in a high
|
559
|
+
// priority pool.
|
560
|
+
CallOnWorkerThread(std::move(task));
|
561
|
+
}
|
562
|
+
|
563
|
+
/**
|
564
|
+
* Schedules a task to be invoked with low-priority on a worker thread.
|
565
|
+
*/
|
566
|
+
virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
|
567
|
+
// Embedders may optionally override this to process these tasks in a low
|
568
|
+
// priority pool.
|
569
|
+
CallOnWorkerThread(std::move(task));
|
570
|
+
}
|
571
|
+
|
572
|
+
/**
|
573
|
+
* Schedules a task to be invoked on a worker thread after |delay_in_seconds|
|
574
|
+
* expires.
|
575
|
+
*/
|
576
|
+
virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
|
577
|
+
double delay_in_seconds) = 0;
|
578
|
+
|
579
|
+
/**
|
580
|
+
* Returns true if idle tasks are enabled for the given |isolate|.
|
581
|
+
*/
|
582
|
+
virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
|
583
|
+
|
584
|
+
/**
|
585
|
+
* Posts |job_task| to run in parallel. Returns a JobHandle associated with
|
586
|
+
* the Job, which can be joined or canceled.
|
587
|
+
* This avoids degenerate cases:
|
588
|
+
* - Calling CallOnWorkerThread() for each work item, causing significant
|
589
|
+
* overhead.
|
590
|
+
* - Fixed number of CallOnWorkerThread() calls that split the work and might
|
591
|
+
* run for a long time. This is problematic when many components post
|
592
|
+
* "num cores" tasks and all expect to use all the cores. In these cases,
|
593
|
+
* the scheduler lacks context to be fair to multiple same-priority requests
|
594
|
+
* and/or ability to request lower priority work to yield when high priority
|
595
|
+
* work comes in.
|
596
|
+
* A canonical implementation of |job_task| looks like:
|
597
|
+
* class MyJobTask : public JobTask {
|
598
|
+
* public:
|
599
|
+
* MyJobTask(...) : worker_queue_(...) {}
|
600
|
+
* // JobTask:
|
601
|
+
* void Run(JobDelegate* delegate) override {
|
602
|
+
* while (!delegate->ShouldYield()) {
|
603
|
+
* // Smallest unit of work.
|
604
|
+
* auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
|
605
|
+
* if (!work_item) return;
|
606
|
+
* ProcessWork(work_item);
|
607
|
+
* }
|
608
|
+
* }
|
609
|
+
*
|
610
|
+
* size_t GetMaxConcurrency() const override {
|
611
|
+
* return worker_queue_.GetSize(); // Thread safe.
|
612
|
+
* }
|
613
|
+
* };
|
614
|
+
* auto handle = PostJob(TaskPriority::kUserVisible,
|
615
|
+
* std::make_unique<MyJobTask>(...));
|
616
|
+
* handle->Join();
|
617
|
+
*
|
618
|
+
* PostJob() and methods of the returned JobHandle/JobDelegate, must never be
|
619
|
+
* called while holding a lock that could be acquired by JobTask::Run or
|
620
|
+
* JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
|
621
|
+
* because [1] JobTask::GetMaxConcurrency may be invoked while holding
|
622
|
+
* internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
|
623
|
+
* if that lock is *never* held while calling back into JobHandle from any
|
624
|
+
* thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
|
625
|
+
* JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
|
626
|
+
* (B=>JobHandle::foo=>B deadlock).
|
627
|
+
*
|
628
|
+
* A sufficient PostJob() implementation that uses the default Job provided in
|
629
|
+
* libplatform looks like:
|
630
|
+
* std::unique_ptr<JobHandle> PostJob(
|
631
|
+
* TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
|
632
|
+
* return v8::platform::NewDefaultJobHandle(
|
633
|
+
* this, priority, std::move(job_task), NumberOfWorkerThreads());
|
634
|
+
* }
|
635
|
+
*/
|
636
|
+
virtual std::unique_ptr<JobHandle> PostJob(
|
637
|
+
TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
|
638
|
+
|
639
|
+
/**
|
640
|
+
* Monotonically increasing time in seconds from an arbitrary fixed point in
|
641
|
+
* the past. This function is expected to return at least
|
642
|
+
* millisecond-precision values. For this reason,
|
643
|
+
* it is recommended that the fixed point be no further in the past than
|
644
|
+
* the epoch.
|
645
|
+
**/
|
646
|
+
virtual double MonotonicallyIncreasingTime() = 0;
|
647
|
+
|
648
|
+
/**
|
649
|
+
* Current wall-clock time in milliseconds since epoch.
|
650
|
+
* This function is expected to return at least millisecond-precision values.
|
651
|
+
*/
|
652
|
+
virtual double CurrentClockTimeMillis() = 0;
|
653
|
+
|
654
|
+
typedef void (*StackTracePrinter)();
|
655
|
+
|
656
|
+
/**
|
657
|
+
* Returns a function pointer that print a stack trace of the current stack
|
658
|
+
* on invocation. Disables printing of the stack trace if nullptr.
|
659
|
+
*/
|
660
|
+
virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
|
661
|
+
|
662
|
+
/**
|
663
|
+
* Returns an instance of a v8::TracingController. This must be non-nullptr.
|
664
|
+
*/
|
665
|
+
virtual TracingController* GetTracingController() = 0;
|
666
|
+
|
667
|
+
/**
|
668
|
+
* Tells the embedder to generate and upload a crashdump during an unexpected
|
669
|
+
* but non-critical scenario.
|
670
|
+
*/
|
671
|
+
virtual void DumpWithoutCrashing() {}
|
672
|
+
|
673
|
+
protected:
|
674
|
+
/**
|
675
|
+
* Default implementation of current wall-clock time in milliseconds
|
676
|
+
* since epoch. Useful for implementing |CurrentClockTimeMillis| if
|
677
|
+
* nothing special needed.
|
678
|
+
*/
|
679
|
+
V8_EXPORT static double SystemClockTimeMillis();
|
680
|
+
};
|
681
|
+
|
682
|
+
} // namespace v8
|
683
|
+
|
684
|
+
#endif // V8_V8_PLATFORM_H_
|