node-web-audio-api 0.20.0 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +4 -0
- package/README.md +1 -3
- package/index.cjs +81 -83
- package/index.mjs +5 -0
- package/js/AudioContext.js +16 -8
- package/js/AudioParamMap.js +88 -0
- package/js/AudioRenderCapacity.js +117 -0
- package/js/AudioWorklet.js +261 -0
- package/js/AudioWorkletGlobalScope.js +303 -0
- package/js/AudioWorkletNode.js +290 -0
- package/js/BaseAudioContext.js +36 -13
- package/js/Events.js +151 -5
- package/js/OfflineAudioContext.js +9 -1
- package/js/lib/symbols.js +8 -2
- package/load-native.cjs +87 -0
- package/node-web-audio-api.darwin-arm64.node +0 -0
- package/node-web-audio-api.darwin-x64.node +0 -0
- package/node-web-audio-api.linux-arm-gnueabihf.node +0 -0
- package/node-web-audio-api.linux-arm64-gnu.node +0 -0
- package/node-web-audio-api.linux-x64-gnu.node +0 -0
- package/node-web-audio-api.win32-arm64-msvc.node +0 -0
- package/node-web-audio-api.win32-x64-msvc.node +0 -0
- package/package.json +3 -1
- package/TODOS.md +0 -143
- package/js/monkey-patch.js +0 -84
- package/run-wpt.md +0 -27
- package/simple-test.cjs +0 -20
- package/simple-test.mjs +0 -20
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
/* eslint-disable no-unused-vars */
|
|
2
|
+
const conversions = require('webidl-conversions');
|
|
3
|
+
const {
|
|
4
|
+
toSanitizedSequence,
|
|
5
|
+
} = require('./lib/cast.js');
|
|
6
|
+
const {
|
|
7
|
+
throwSanitizedError,
|
|
8
|
+
} = require('./lib/errors.js');
|
|
9
|
+
const {
|
|
10
|
+
kNapiObj,
|
|
11
|
+
kProcessorRegistered,
|
|
12
|
+
kGetParameterDescriptors,
|
|
13
|
+
kPrivateConstructor,
|
|
14
|
+
kCreateProcessor,
|
|
15
|
+
} = require('./lib/symbols.js');
|
|
16
|
+
const {
|
|
17
|
+
kEnumerableProperty,
|
|
18
|
+
} = require('./lib/utils.js');
|
|
19
|
+
const {
|
|
20
|
+
propagateEvent,
|
|
21
|
+
} = require('./lib/events.js');
|
|
22
|
+
const {
|
|
23
|
+
ErrorEvent,
|
|
24
|
+
} = require('./Events.js');
|
|
25
|
+
|
|
26
|
+
/* eslint-enable no-unused-vars */
|
|
27
|
+
|
|
28
|
+
const AudioNode = require('./AudioNode.js');
|
|
29
|
+
const AudioParamMap = require('./AudioParamMap.js');
|
|
30
|
+
const IMPLEMENTATION_MAX_NUMBER_OF_CHANNELS = 32;
|
|
31
|
+
|
|
32
|
+
module.exports = (jsExport, nativeBinding) => {
|
|
33
|
+
class AudioWorkletNode extends AudioNode {
|
|
34
|
+
#port = null;
|
|
35
|
+
#parameters = {};
|
|
36
|
+
|
|
37
|
+
constructor(context, name, options) {
|
|
38
|
+
if (arguments.length < 2) {
|
|
39
|
+
throw new TypeError(`Failed to construct 'AudioWorkletNode': 2 arguments required, but only ${arguments.length} present`);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
if (!(context instanceof jsExport.BaseAudioContext)) {
|
|
43
|
+
throw new TypeError(`Failed to construct 'AudioWorkletNode': argument 1 is not of type BaseAudioContext`);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
const parsedName = conversions['DOMString'](name, {
|
|
47
|
+
context: `Failed to construct 'AudioWorkletNode': The given 'AudioWorkletProcessor' name`,
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
if (!context.audioWorklet[kProcessorRegistered](parsedName)) {
|
|
51
|
+
throw new DOMException(`Failed to construct 'AudioWorkletNode': processor '${parsedName}' is not registered in 'AudioWorklet'`, 'InvalidStateError');
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// parsed version of the option to be passed to NAPI
|
|
55
|
+
const parsedOptions = {};
|
|
56
|
+
|
|
57
|
+
if (options && (typeof options !== 'object' || options === null)) {
|
|
58
|
+
throw new TypeError('Failed to construct \'AudioWorkletNode\': argument 3 is not of type \'AudioWorkletNodeOptions\'');
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (options && options.numberOfInputs !== undefined) {
|
|
62
|
+
parsedOptions.numberOfInputs = conversions['unsigned long'](options.numberOfInputs, {
|
|
63
|
+
enforceRange: true,
|
|
64
|
+
context: `Failed to construct 'AudioWorkletNode': Failed to read the 'numberOfInputs' property from AudioWorkletNodeOptions: The provided value (${options.numberOfInputs}})`,
|
|
65
|
+
});
|
|
66
|
+
} else {
|
|
67
|
+
parsedOptions.numberOfInputs = 1;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (options && options.numberOfOutputs !== undefined) {
|
|
71
|
+
parsedOptions.numberOfOutputs = conversions['unsigned long'](options.numberOfOutputs, {
|
|
72
|
+
enforceRange: true,
|
|
73
|
+
context: `Failed to construct 'AudioWorkletNode': Failed to read the 'numberOfOutputs' property from AudioWorkletNodeOptions: The provided value (${options.numberOfOutputs}})`,
|
|
74
|
+
});
|
|
75
|
+
} else {
|
|
76
|
+
parsedOptions.numberOfOutputs = 1;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// If outputChannelCount exists,
|
|
80
|
+
// - If any value in outputChannelCount is zero or greater than the implementation’s maximum number of channels, throw a NotSupportedError and abort the remaining steps.
|
|
81
|
+
// - If the length of outputChannelCount does not equal numberOfOutputs, throw an IndexSizeError and abort the remaining steps.
|
|
82
|
+
// - If both numberOfInputs and numberOfOutputs are 1, set the channel count of the node output to the one value in outputChannelCount.
|
|
83
|
+
// - Otherwise set the channel count of the kth output of the node to the kth element of outputChannelCount sequence and return.
|
|
84
|
+
if (options && options.outputChannelCount !== undefined) {
|
|
85
|
+
try {
|
|
86
|
+
parsedOptions.outputChannelCount = toSanitizedSequence(options.outputChannelCount, Uint32Array);
|
|
87
|
+
} catch (err) {
|
|
88
|
+
throw new TypeError(`Failed to construct 'AudioWorkletNode': Failed to read the 'outputChannelCount' property from AudioWorkletNodeOptions: The provided value ${err.message}`);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
parsedOptions.outputChannelCount.forEach((value, index) => {
|
|
92
|
+
if (value <= 0 || value > IMPLEMENTATION_MAX_NUMBER_OF_CHANNELS) {
|
|
93
|
+
throw new DOMException(`Failed to construct 'AudioWorkletNode': Invalid 'outputChannelCount' property from AudioWorkletNodeOptions: Value at index ${index} in outside supported range [1, 32]`, 'NotSupportedError');
|
|
94
|
+
}
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
if (parsedOptions.numberOfOutputs !== parsedOptions.outputChannelCount.length) {
|
|
98
|
+
throw new DOMException(`Failed to construct 'AudioWorkletNode': Invalid 'outputChannelCount' property from AudioWorkletNodeOptions: 'outputChannelCount' length (${parsedOptions.outputChannelCount.length}) does not equal 'numberOfOutputs' (${parsedOptions.numberOfOutputs})`, 'IndexSizeError');
|
|
99
|
+
}
|
|
100
|
+
} else {
|
|
101
|
+
// If outputChannelCount does not exists,
|
|
102
|
+
// - If both numberOfInputs and numberOfOutputs are 1, set the initial channel count of the node output to 1 and return.
|
|
103
|
+
// NOTE: For this case, the output chanel count will change to computedNumberOfChannels dynamically based on the input and the channelCountMode at runtime.
|
|
104
|
+
// - Otherwise set the channel count of each output of the node to 1 and return.
|
|
105
|
+
|
|
106
|
+
// @note - not sure what this means, let's go simple
|
|
107
|
+
parsedOptions.outputChannelCount = new Uint32Array(parsedOptions.numberOfOutputs);
|
|
108
|
+
parsedOptions.outputChannelCount.fill(1);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// @todo
|
|
112
|
+
// - This should be a "record", let's treat it as a raw object of now
|
|
113
|
+
// - Check if this needs to checked against the declared `parameterDescriptors`
|
|
114
|
+
if (options && options.parameterData !== undefined) {
|
|
115
|
+
if (typeof options.parameterData === 'object' && options.parameterData !== null) {
|
|
116
|
+
parsedOptions.parameterData = {};
|
|
117
|
+
|
|
118
|
+
for (let [key, value] in Object.entries(options.parameterData)) {
|
|
119
|
+
const parsedKey = conversions['DOMString'](key, {
|
|
120
|
+
context: `Failed to construct 'AudioWorkletNode': Invalid 'parameterData' property from AudioWorkletNodeOptions: Invalid key (${key})`,
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
const parsedValue = conversions['double'](value, {
|
|
124
|
+
context: `Failed to construct 'AudioWorkletNode': Invalid 'parameterData' property from AudioWorkletNodeOptions: Invalid value for key ${parsedKey}`,
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
parsedOptions.parameterData[parsedKey] = parsedValue;
|
|
128
|
+
}
|
|
129
|
+
} else {
|
|
130
|
+
throw new TypeError(`Failed to construct 'AudioWorkletNode': Invalid 'parameterData' property from AudioWorkletNodeOptions: 'outputChannelCount' length (${parsedOptions.outputChannelCount.length}) does not equal 'numberOfOutputs' (${parsedOptions.numberOfOutputs})`);
|
|
131
|
+
}
|
|
132
|
+
} else {
|
|
133
|
+
parsedOptions.parameterData = {};
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// These ones are for the JS processor
|
|
137
|
+
if (options && options.processorOptions !== undefined) {
|
|
138
|
+
if (typeof options.processorOptions === 'object' && options.processorOptions !== null) {
|
|
139
|
+
parsedOptions.processorOptions = Object.assign({}, options.processorOptions);
|
|
140
|
+
} else {
|
|
141
|
+
throw new TypeError(`Failed to construct 'AudioWorkletNode': Invalid 'processorOptions' property from AudioWorkletNodeOptions: 'processorOptions' is not an object`);
|
|
142
|
+
}
|
|
143
|
+
} else {
|
|
144
|
+
parsedOptions.processorOptions = {};
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// AudioNodeOptions
|
|
148
|
+
if (options && options.channelCount !== undefined) {
|
|
149
|
+
parsedOptions.channelCount = conversions['unsigned long'](options.channelCount, {
|
|
150
|
+
enforceRange: true,
|
|
151
|
+
context: `Failed to construct 'AudioWorkletNode': Failed to read the 'channelCount' property from AudioWorkletNodeOptions: The provided value '${options.channelCount}'`,
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
// if we delegate this check to Rust, this can poison a Mutex
|
|
155
|
+
// (probably the `audio_param_descriptor_channel` one)
|
|
156
|
+
if (parsedOptions.channelCount <= 0 || parsedOptions.channelCount > IMPLEMENTATION_MAX_NUMBER_OF_CHANNELS) {
|
|
157
|
+
throw new DOMException(`Failed to construct 'AudioWorkletNode': Invalid 'channelCount' property: Number of channels: ${parsedOptions.channelCount} is outside range [1, 32]`, 'NotSupportedError')
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if (options && options.channelCountMode !== undefined) {
|
|
162
|
+
if (!['max', 'clamped-max', 'explicit'].includes(options.channelCountMode)) {
|
|
163
|
+
throw new TypeError(`Failed to construct 'AudioWorkletNode': Failed to read the 'channelCountMode' property from 'AudioNodeOptions': The provided value '${options.channelCountMode}' is not a valid enum value of type ChannelCountMode`);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
parsedOptions.channelCountMode = conversions['DOMString'](options.channelCountMode, {
|
|
167
|
+
context: `Failed to construct 'AudioWorkletNode': Failed to read the 'channelCount' property from AudioWorkletNodeOptions: The provided value '${options.channelCountMode}'`,
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
if (options && options.channelInterpretation !== undefined) {
|
|
172
|
+
if (!['speakers', 'discrete'].includes(options.channelInterpretation)) {
|
|
173
|
+
throw new TypeError(`Failed to construct 'AudioWorkletNode': Failed to read the 'channelInterpretation' property from 'AudioNodeOptions': The provided value '${options.channelInterpretation}' is not a valid enum value of type ChannelCountMode`);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
parsedOptions.channelInterpretation = conversions['DOMString'](options.channelInterpretation, {
|
|
177
|
+
context: `Failed to construct 'AudioWorkletNode': Failed to read the 'channelInterpretation' property from AudioWorkletNodeOptions: The provided value '${options.channelInterpretation}'`,
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// Create NapiAudioWorkletNode
|
|
182
|
+
const parameterDescriptors = context.audioWorklet[kGetParameterDescriptors](parsedName);
|
|
183
|
+
let napiObj;
|
|
184
|
+
|
|
185
|
+
try {
|
|
186
|
+
napiObj = new nativeBinding.AudioWorkletNode(
|
|
187
|
+
context[kNapiObj],
|
|
188
|
+
parsedName,
|
|
189
|
+
parsedOptions,
|
|
190
|
+
parameterDescriptors,
|
|
191
|
+
);
|
|
192
|
+
} catch (err) {
|
|
193
|
+
throwSanitizedError(err);
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
super(context, {
|
|
197
|
+
[kNapiObj]: napiObj,
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
let parameters = new Map();
|
|
201
|
+
|
|
202
|
+
for (let name in this[kNapiObj].parameters) {
|
|
203
|
+
const audioParam = new jsExport.AudioParam({
|
|
204
|
+
[kNapiObj]: this[kNapiObj].parameters[name],
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
parameters.set(name, audioParam);
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
this.#parameters = new AudioParamMap({
|
|
211
|
+
[kPrivateConstructor]: true,
|
|
212
|
+
parameters,
|
|
213
|
+
});
|
|
214
|
+
|
|
215
|
+
// Create JS processor
|
|
216
|
+
this.#port = context.audioWorklet[kCreateProcessor](
|
|
217
|
+
parsedName,
|
|
218
|
+
parsedOptions,
|
|
219
|
+
napiObj.id,
|
|
220
|
+
);
|
|
221
|
+
|
|
222
|
+
this.#port.on('message', msg => {
|
|
223
|
+
// ErrorEvent named processorerror
|
|
224
|
+
switch (msg.cmd) {
|
|
225
|
+
case 'node-web-audio-api:worklet:ctor-error': {
|
|
226
|
+
const message = `Failed to construct '${parsedName}' AudioWorkletProcessor: ${msg.err.message}`;
|
|
227
|
+
const event = new ErrorEvent('processorerror', { message, error: msg.err });
|
|
228
|
+
propagateEvent(this, event);
|
|
229
|
+
break;
|
|
230
|
+
}
|
|
231
|
+
case 'node-web-audio-api:worklet:process-invalid': {
|
|
232
|
+
const message = `Failed to execute 'process' on '${parsedName}' AudioWorkletProcessor: ${msg.err.message}`;
|
|
233
|
+
const error = new TypeError(message);
|
|
234
|
+
error.stack = msg.err.stack.replace(msg.err.message, message);
|
|
235
|
+
|
|
236
|
+
const event = new ErrorEvent('processorerror', { message, error });
|
|
237
|
+
propagateEvent(this, event);
|
|
238
|
+
break;
|
|
239
|
+
}
|
|
240
|
+
case 'node-web-audio-api:worklet:process-error': {
|
|
241
|
+
const message = `Failed to execute 'process' on '${parsedName}' AudioWorkletProcessor: ${msg.err.message}`;
|
|
242
|
+
const event = new ErrorEvent('processorerror', { message, error: msg.err });
|
|
243
|
+
propagateEvent(this, event);
|
|
244
|
+
break;
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
});
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
get parameters() {
|
|
251
|
+
if (!(this instanceof AudioWorkletNode)) {
|
|
252
|
+
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioWorkletNode\'');
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
return this.#parameters;
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
get port() {
|
|
259
|
+
if (!(this instanceof AudioWorkletNode)) {
|
|
260
|
+
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AudioWorkletNode\'');
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
return this.#port;
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
Object.defineProperties(AudioWorkletNode, {
|
|
268
|
+
length: {
|
|
269
|
+
__proto__: null,
|
|
270
|
+
writable: false,
|
|
271
|
+
enumerable: false,
|
|
272
|
+
configurable: true,
|
|
273
|
+
value: 2,
|
|
274
|
+
},
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
Object.defineProperties(AudioWorkletNode.prototype, {
|
|
278
|
+
[Symbol.toStringTag]: {
|
|
279
|
+
__proto__: null,
|
|
280
|
+
writable: false,
|
|
281
|
+
enumerable: false,
|
|
282
|
+
configurable: true,
|
|
283
|
+
value: 'AudioWorkletNode',
|
|
284
|
+
},
|
|
285
|
+
parameters: kEnumerableProperty,
|
|
286
|
+
port: kEnumerableProperty,
|
|
287
|
+
});
|
|
288
|
+
|
|
289
|
+
return AudioWorkletNode;
|
|
290
|
+
};
|
package/js/BaseAudioContext.js
CHANGED
|
@@ -24,12 +24,16 @@ const {
|
|
|
24
24
|
} = require('./lib/utils.js');
|
|
25
25
|
const {
|
|
26
26
|
kNapiObj,
|
|
27
|
+
kPrivateConstructor,
|
|
27
28
|
} = require('./lib/symbols.js');
|
|
28
29
|
|
|
30
|
+
const AudioWorklet = require('./AudioWorklet.js');
|
|
31
|
+
|
|
29
32
|
module.exports = (jsExport, _nativeBinding) => {
|
|
30
33
|
class BaseAudioContext extends EventTarget {
|
|
31
|
-
#
|
|
34
|
+
#audioWorklet = null;
|
|
32
35
|
#destination = null;
|
|
36
|
+
#listener = null;
|
|
33
37
|
|
|
34
38
|
constructor(options) {
|
|
35
39
|
// Make constructor "private"
|
|
@@ -47,24 +51,23 @@ module.exports = (jsExport, _nativeBinding) => {
|
|
|
47
51
|
...kHiddenProperty,
|
|
48
52
|
});
|
|
49
53
|
|
|
50
|
-
this.#
|
|
54
|
+
this.#audioWorklet = new AudioWorklet({
|
|
55
|
+
[kPrivateConstructor]: true,
|
|
56
|
+
workletId: this[kNapiObj].workletId,
|
|
57
|
+
sampleRate: this[kNapiObj].sampleRate,
|
|
58
|
+
});
|
|
59
|
+
|
|
51
60
|
this.#destination = new jsExport.AudioDestinationNode(this, {
|
|
52
61
|
[kNapiObj]: this[kNapiObj].destination,
|
|
53
62
|
});
|
|
54
63
|
}
|
|
55
64
|
|
|
56
|
-
get
|
|
65
|
+
get audioWorklet() {
|
|
57
66
|
if (!(this instanceof BaseAudioContext)) {
|
|
58
67
|
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\'');
|
|
59
68
|
}
|
|
60
69
|
|
|
61
|
-
|
|
62
|
-
this.#listener = new jsExport.AudioListener({
|
|
63
|
-
[kNapiObj]: this[kNapiObj].listener,
|
|
64
|
-
});
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
return this.#listener;
|
|
70
|
+
return this.#audioWorklet;
|
|
68
71
|
}
|
|
69
72
|
|
|
70
73
|
get destination() {
|
|
@@ -75,6 +78,20 @@ module.exports = (jsExport, _nativeBinding) => {
|
|
|
75
78
|
return this.#destination;
|
|
76
79
|
}
|
|
77
80
|
|
|
81
|
+
get listener() {
|
|
82
|
+
if (!(this instanceof BaseAudioContext)) {
|
|
83
|
+
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\'');
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if (this.#listener === null) {
|
|
87
|
+
this.#listener = new jsExport.AudioListener({
|
|
88
|
+
[kNapiObj]: this[kNapiObj].listener,
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
return this.#listener;
|
|
93
|
+
}
|
|
94
|
+
|
|
78
95
|
get sampleRate() {
|
|
79
96
|
if (!(this instanceof BaseAudioContext)) {
|
|
80
97
|
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\'');
|
|
@@ -91,6 +108,15 @@ module.exports = (jsExport, _nativeBinding) => {
|
|
|
91
108
|
return this[kNapiObj].currentTime;
|
|
92
109
|
}
|
|
93
110
|
|
|
111
|
+
// @todo - implement in upstream crate + pass to AudioWorkletGlobalScope
|
|
112
|
+
// get renderQuantumSize() {
|
|
113
|
+
// if (!(this instanceof BaseAudioContext)) {
|
|
114
|
+
// throw new TypeError("Invalid Invocation: Value of 'this' must be of type 'BaseAudioContext'");
|
|
115
|
+
// }
|
|
116
|
+
|
|
117
|
+
// return this[kNapiObj].renderQuantumSize;
|
|
118
|
+
// }
|
|
119
|
+
|
|
94
120
|
get state() {
|
|
95
121
|
if (!(this instanceof BaseAudioContext)) {
|
|
96
122
|
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\'');
|
|
@@ -99,9 +125,6 @@ module.exports = (jsExport, _nativeBinding) => {
|
|
|
99
125
|
return this[kNapiObj].state;
|
|
100
126
|
}
|
|
101
127
|
|
|
102
|
-
// renderQuantumSize
|
|
103
|
-
// audioWorklet
|
|
104
|
-
|
|
105
128
|
get onstatechange() {
|
|
106
129
|
if (!(this instanceof BaseAudioContext)) {
|
|
107
130
|
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'BaseAudioContext\'');
|
package/js/Events.js
CHANGED
|
@@ -6,8 +6,12 @@ class OfflineAudioCompletionEvent extends Event {
|
|
|
6
6
|
constructor(type, eventInitDict) {
|
|
7
7
|
super(type);
|
|
8
8
|
|
|
9
|
-
if (
|
|
10
|
-
|
|
9
|
+
if (
|
|
10
|
+
typeof eventInitDict !== 'object'
|
|
11
|
+
|| eventInitDict === null
|
|
12
|
+
|| !('renderedBuffer' in eventInitDict)
|
|
13
|
+
) {
|
|
14
|
+
throw TypeError(`Failed to construct 'OfflineAudioCompletionEvent': Invalid 'OfflineAudioCompletionEventInit' dict given`);
|
|
11
15
|
}
|
|
12
16
|
|
|
13
17
|
this.#renderedBuffer = eventInitDict.renderedBuffer;
|
|
@@ -26,7 +30,6 @@ Object.defineProperties(OfflineAudioCompletionEvent.prototype, {
|
|
|
26
30
|
configurable: true,
|
|
27
31
|
value: 'OfflineAudioCompletionEvent',
|
|
28
32
|
},
|
|
29
|
-
|
|
30
33
|
renderedBuffer: kEnumerableProperty,
|
|
31
34
|
});
|
|
32
35
|
|
|
@@ -43,7 +46,7 @@ class AudioProcessingEvent extends Event {
|
|
|
43
46
|
|| !('inputBuffer' in eventInitDict)
|
|
44
47
|
|| !('outputBuffer' in eventInitDict)
|
|
45
48
|
) {
|
|
46
|
-
throw TypeError(`Failed to construct 'AudioProcessingEvent': Invalid 'AudioProcessingEventInit' given`);
|
|
49
|
+
throw TypeError(`Failed to construct 'AudioProcessingEvent': Invalid 'AudioProcessingEventInit' dict given`);
|
|
47
50
|
}
|
|
48
51
|
|
|
49
52
|
super(type);
|
|
@@ -74,11 +77,154 @@ Object.defineProperties(AudioProcessingEvent.prototype, {
|
|
|
74
77
|
configurable: true,
|
|
75
78
|
value: 'AudioProcessingEvent',
|
|
76
79
|
},
|
|
77
|
-
|
|
78
80
|
playbackTime: kEnumerableProperty,
|
|
79
81
|
inputBuffer: kEnumerableProperty,
|
|
80
82
|
outputBuffer: kEnumerableProperty,
|
|
81
83
|
});
|
|
82
84
|
|
|
85
|
+
class AudioRenderCapacityEvent extends Event {
|
|
86
|
+
#timestamp = 0;
|
|
87
|
+
#averageLoad = 0;
|
|
88
|
+
#peakLoad = 0;
|
|
89
|
+
#underrunRatio = 0;
|
|
90
|
+
|
|
91
|
+
constructor(type, eventInitDict) {
|
|
92
|
+
if (
|
|
93
|
+
typeof eventInitDict !== 'object'
|
|
94
|
+
|| eventInitDict === null
|
|
95
|
+
|| !('timestamp' in eventInitDict)
|
|
96
|
+
|| !('averageLoad' in eventInitDict)
|
|
97
|
+
|| !('peakLoad' in eventInitDict)
|
|
98
|
+
|| !('underrunRatio' in eventInitDict)
|
|
99
|
+
) {
|
|
100
|
+
throw TypeError(`Failed to construct 'AudioRenderCapacityEvent': Invalid 'AudioRenderCapacityEventInit' dict given`);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
super(type);
|
|
104
|
+
|
|
105
|
+
this.#timestamp = eventInitDict.timestamp;
|
|
106
|
+
this.#averageLoad = eventInitDict.averageLoad;
|
|
107
|
+
this.#peakLoad = eventInitDict.peakLoad;
|
|
108
|
+
this.#underrunRatio = eventInitDict.underrunRatio;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
get timestamp() {
|
|
112
|
+
return this.#timestamp;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
get averageLoad() {
|
|
116
|
+
return this.#averageLoad;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
get peakLoad() {
|
|
120
|
+
return this.#peakLoad;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
get underrunRatio() {
|
|
124
|
+
return this.#underrunRatio;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
Object.defineProperties(AudioRenderCapacityEvent.prototype, {
|
|
129
|
+
[Symbol.toStringTag]: {
|
|
130
|
+
__proto__: null,
|
|
131
|
+
writable: false,
|
|
132
|
+
enumerable: false,
|
|
133
|
+
configurable: true,
|
|
134
|
+
value: 'AudioRenderCapacityEvent',
|
|
135
|
+
},
|
|
136
|
+
timestamp: kEnumerableProperty,
|
|
137
|
+
averageLoad: kEnumerableProperty,
|
|
138
|
+
peakLoad: kEnumerableProperty,
|
|
139
|
+
underrunRatio: kEnumerableProperty,
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
// https://html.spec.whatwg.org/multipage/webappapis.html#errorevent
|
|
143
|
+
// interface ErrorEvent : Event {
|
|
144
|
+
// constructor(DOMString type, optional ErrorEventInit eventInitDict = {});
|
|
145
|
+
|
|
146
|
+
// readonly attribute DOMString message;
|
|
147
|
+
// readonly attribute USVString filename;
|
|
148
|
+
// readonly attribute unsigned long lineno;
|
|
149
|
+
// readonly attribute unsigned long colno;
|
|
150
|
+
// readonly attribute any error;
|
|
151
|
+
// };
|
|
152
|
+
|
|
153
|
+
// dictionary ErrorEventInit : EventInit {
|
|
154
|
+
// DOMString message = "";
|
|
155
|
+
// USVString filename = "";
|
|
156
|
+
// unsigned long lineno = 0;
|
|
157
|
+
// unsigned long colno = 0;
|
|
158
|
+
// any error;
|
|
159
|
+
// };
|
|
160
|
+
class ErrorEvent extends Event {
|
|
161
|
+
#message = '';
|
|
162
|
+
#filename = '';
|
|
163
|
+
#lineno = 0;
|
|
164
|
+
#colno = 0;
|
|
165
|
+
#error = undefined;
|
|
166
|
+
|
|
167
|
+
constructor(type, eventInitDict = {}) {
|
|
168
|
+
super(type);
|
|
169
|
+
|
|
170
|
+
if (eventInitDict && typeof eventInitDict.message === 'string') {
|
|
171
|
+
this.#message = eventInitDict.message;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
if (eventInitDict && typeof eventInitDict.filename === 'string') {
|
|
175
|
+
this.#filename = eventInitDict.filename;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
if (eventInitDict && Number.isFinite(eventInitDict.lineno)) {
|
|
179
|
+
this.#lineno = eventInitDict.lineno;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if (eventInitDict && Number.isFinite(eventInitDict.colno)) {
|
|
183
|
+
this.#colno = eventInitDict.colno;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
if (eventInitDict && eventInitDict.error instanceof Error) {
|
|
187
|
+
this.#error = eventInitDict.error;
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
get message() {
|
|
192
|
+
return this.#message;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
get filename() {
|
|
196
|
+
return this.#filename;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
get lineno() {
|
|
200
|
+
return this.#lineno;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
get colno() {
|
|
204
|
+
return this.#colno;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
get error() {
|
|
208
|
+
return this.#error;
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
Object.defineProperties(ErrorEvent.prototype, {
|
|
213
|
+
[Symbol.toStringTag]: {
|
|
214
|
+
__proto__: null,
|
|
215
|
+
writable: false,
|
|
216
|
+
enumerable: false,
|
|
217
|
+
configurable: true,
|
|
218
|
+
value: 'ErrorEvent',
|
|
219
|
+
},
|
|
220
|
+
message: kEnumerableProperty,
|
|
221
|
+
filename: kEnumerableProperty,
|
|
222
|
+
lineno: kEnumerableProperty,
|
|
223
|
+
colno: kEnumerableProperty,
|
|
224
|
+
error: kEnumerableProperty,
|
|
225
|
+
});
|
|
226
|
+
|
|
83
227
|
module.exports.OfflineAudioCompletionEvent = OfflineAudioCompletionEvent;
|
|
84
228
|
module.exports.AudioProcessingEvent = AudioProcessingEvent;
|
|
229
|
+
module.exports.AudioRenderCapacityEvent = AudioRenderCapacityEvent;
|
|
230
|
+
module.exports.ErrorEvent = ErrorEvent;
|
|
@@ -12,8 +12,10 @@ const {
|
|
|
12
12
|
} = require('./lib/utils.js');
|
|
13
13
|
const {
|
|
14
14
|
kNapiObj,
|
|
15
|
+
kWorkletRelease,
|
|
15
16
|
kOnStateChange,
|
|
16
17
|
kOnComplete,
|
|
18
|
+
kCheckProcessorsCreated,
|
|
17
19
|
} = require('./lib/symbols.js');
|
|
18
20
|
|
|
19
21
|
module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) {
|
|
@@ -140,6 +142,9 @@ module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) {
|
|
|
140
142
|
throw new TypeError(`Invalid Invocation: Value of 'this' must be of type 'OfflineAudioContext'`);
|
|
141
143
|
}
|
|
142
144
|
|
|
145
|
+
// ensure all AudioWorkletProcessor have finished their instanciation
|
|
146
|
+
await this.audioWorklet[kCheckProcessorsCreated]();
|
|
147
|
+
|
|
143
148
|
let nativeAudioBuffer;
|
|
144
149
|
|
|
145
150
|
try {
|
|
@@ -148,7 +153,10 @@ module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) {
|
|
|
148
153
|
throwSanitizedError(err);
|
|
149
154
|
}
|
|
150
155
|
|
|
151
|
-
//
|
|
156
|
+
// release audio worklet, if any
|
|
157
|
+
await this.audioWorklet[kWorkletRelease]();
|
|
158
|
+
|
|
159
|
+
// workaround the fact that this event seems to be triggered before
|
|
152
160
|
// startRendering fulfills and that we want to return the exact same instance
|
|
153
161
|
if (this.#renderedBuffer === null) {
|
|
154
162
|
this.#renderedBuffer = new jsExport.AudioBuffer({ [kNapiObj]: nativeAudioBuffer });
|
package/js/lib/symbols.js
CHANGED
|
@@ -1,6 +1,11 @@
|
|
|
1
1
|
module.exports.kNapiObj = Symbol('node-web-audio-api:napi-obj');
|
|
2
2
|
module.exports.kAudioBuffer = Symbol('node-web-audio-api:audio-buffer');
|
|
3
|
-
|
|
3
|
+
module.exports.kPrivateConstructor = Symbol('node-web-audio-api:private-constructor');
|
|
4
|
+
module.exports.kCreateProcessor = Symbol('node-web-audio-api:create-processor');
|
|
5
|
+
module.exports.kProcessorRegistered = Symbol('node-web-audio-api:processor-registered');
|
|
6
|
+
module.exports.kGetParameterDescriptors = Symbol('node-web-audio-api:get-parameter-descriptors');
|
|
7
|
+
module.exports.kWorkletRelease = Symbol('node-web-audio-api:worklet-release');
|
|
8
|
+
module.exports.kCheckProcessorsCreated = Symbol('node-web-audio-api:check-processor-created');
|
|
4
9
|
|
|
5
10
|
// semi-private keys for events listeners
|
|
6
11
|
|
|
@@ -17,4 +22,5 @@ module.exports.kOnComplete = Symbol.for('node-web-audio-api:oncomplete');
|
|
|
17
22
|
module.exports.kOnEnded = Symbol.for('node-web-audio-api:onended');
|
|
18
23
|
// # ScriptProcessorNode
|
|
19
24
|
module.exports.kOnAudioProcess = Symbol.for('node-web-audio-api:onaudioprocess');
|
|
20
|
-
|
|
25
|
+
// # AudioRenderCapacity
|
|
26
|
+
module.exports.kOnUpdate = Symbol.for('node-web-audio-api:onupdate');
|