node-web-audio-api 0.19.0 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -0
- package/README.md +1 -3
- package/index.cjs +81 -83
- package/index.mjs +12 -1
- package/js/AnalyserNode.js +0 -3
- package/js/AudioBuffer.js +10 -11
- package/js/AudioBufferSourceNode.js +0 -6
- package/js/AudioContext.js +44 -13
- package/js/AudioDestinationNode.js +1 -1
- package/js/AudioListener.js +2 -2
- package/js/AudioParamMap.js +88 -0
- package/js/AudioRenderCapacity.js +117 -0
- package/js/AudioScheduledSourceNode.js +15 -0
- package/js/AudioWorklet.js +261 -0
- package/js/AudioWorkletGlobalScope.js +303 -0
- package/js/AudioWorkletNode.js +290 -0
- package/js/BaseAudioContext.js +51 -13
- package/js/BiquadFilterNode.js +0 -3
- package/js/ChannelMergerNode.js +0 -3
- package/js/ChannelSplitterNode.js +0 -3
- package/js/ConstantSourceNode.js +0 -6
- package/js/ConvolverNode.js +0 -3
- package/js/DelayNode.js +0 -3
- package/js/DynamicsCompressorNode.js +0 -3
- package/js/Events.js +230 -0
- package/js/GainNode.js +0 -3
- package/js/IIRFilterNode.js +0 -3
- package/js/MediaStreamAudioSourceNode.js +0 -3
- package/js/OfflineAudioContext.js +57 -34
- package/js/OscillatorNode.js +0 -6
- package/js/PannerNode.js +0 -3
- package/js/ScriptProcessorNode.js +179 -0
- package/js/StereoPannerNode.js +0 -3
- package/js/WaveShaperNode.js +0 -3
- package/js/lib/events.js +6 -16
- package/js/lib/symbols.js +23 -2
- package/load-native.cjs +87 -0
- package/node-web-audio-api.darwin-arm64.node +0 -0
- package/node-web-audio-api.darwin-x64.node +0 -0
- package/node-web-audio-api.linux-arm-gnueabihf.node +0 -0
- package/node-web-audio-api.linux-arm64-gnu.node +0 -0
- package/node-web-audio-api.linux-x64-gnu.node +0 -0
- package/node-web-audio-api.win32-arm64-msvc.node +0 -0
- package/node-web-audio-api.win32-x64-msvc.node +0 -0
- package/package.json +3 -1
- package/TODOS.md +0 -149
- package/js/monkey-patch.js +0 -77
- package/run-wpt.md +0 -27
- package/simple-test.cjs +0 -20
- package/simple-test.mjs +0 -20
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
/* eslint-disable no-unused-vars */
|
|
2
|
+
const conversions = require('webidl-conversions');
|
|
3
|
+
const {
|
|
4
|
+
toSanitizedSequence,
|
|
5
|
+
} = require('./lib/cast.js');
|
|
6
|
+
const {
|
|
7
|
+
isFunction,
|
|
8
|
+
kEnumerableProperty,
|
|
9
|
+
} = require('./lib/utils.js');
|
|
10
|
+
const {
|
|
11
|
+
throwSanitizedError,
|
|
12
|
+
} = require('./lib/errors.js');
|
|
13
|
+
const {
|
|
14
|
+
kNapiObj,
|
|
15
|
+
kAudioBuffer,
|
|
16
|
+
kOnAudioProcess,
|
|
17
|
+
} = require('./lib/symbols.js');
|
|
18
|
+
const {
|
|
19
|
+
propagateEvent,
|
|
20
|
+
} = require('./lib/events.js');
|
|
21
|
+
/* eslint-enable no-unused-vars */
|
|
22
|
+
|
|
23
|
+
const AudioNode = require('./AudioNode.js');
|
|
24
|
+
|
|
25
|
+
module.exports = (jsExport, nativeBinding) => {
|
|
26
|
+
class ScriptProcessorNode extends AudioNode {
|
|
27
|
+
|
|
28
|
+
#onaudioprocess = null;
|
|
29
|
+
|
|
30
|
+
constructor(context, options) {
|
|
31
|
+
|
|
32
|
+
if (arguments.length < 1) {
|
|
33
|
+
throw new TypeError(`Failed to construct 'ScriptProcessorNode': 1 argument required, but only ${arguments.length} present`);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
if (!(context instanceof jsExport.BaseAudioContext)) {
|
|
37
|
+
throw new TypeError(`Failed to construct 'ScriptProcessorNode': argument 1 is not of type BaseAudioContext`);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// parsed version of the option to be passed to NAPI
|
|
41
|
+
const parsedOptions = {};
|
|
42
|
+
|
|
43
|
+
if (options && typeof options !== 'object') {
|
|
44
|
+
throw new TypeError('Failed to construct \'ScriptProcessorNode\': argument 2 is not of type \'ScriptProcessorNodeOptions\'');
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// IDL defines bufferSize default value as 0
|
|
48
|
+
// cf. https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-createscriptprocessor
|
|
49
|
+
// > If it’s not passed in, or if the value is 0, then the implementation
|
|
50
|
+
// > will choose the best buffer size for the given environment, which will
|
|
51
|
+
// > be constant power of 2 throughout the lifetime of the node.
|
|
52
|
+
if (options && options.bufferSize !== undefined && options.bufferSize !== 0) {
|
|
53
|
+
parsedOptions.bufferSize = conversions['unsigned long'](options.bufferSize, {
|
|
54
|
+
enforceRange: true,
|
|
55
|
+
context: `Failed to construct 'ScriptProcessorNode': Failed to read the 'bufferSize' property from ScriptProcessorNodeOptions: The provided value '${options.bufferSize}'`,
|
|
56
|
+
});
|
|
57
|
+
} else {
|
|
58
|
+
parsedOptions.bufferSize = 256;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (options && options.numberOfInputChannels !== undefined) {
|
|
62
|
+
parsedOptions.numberOfInputChannels = conversions['unsigned long'](options.numberOfInputChannels, {
|
|
63
|
+
enforceRange: true,
|
|
64
|
+
context: `Failed to construct 'ScriptProcessorNode': Failed to read the 'numberOfInputChannels' property from ScriptProcessorNodeOptions: The provided value '${options.numberOfInputChannels}'`,
|
|
65
|
+
});
|
|
66
|
+
} else {
|
|
67
|
+
parsedOptions.numberOfInputChannels = 2;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (options && options.numberOfOutputChannels !== undefined) {
|
|
71
|
+
parsedOptions.numberOfOutputChannels = conversions['unsigned long'](options.numberOfOutputChannels, {
|
|
72
|
+
enforceRange: true,
|
|
73
|
+
context: `Failed to construct 'ScriptProcessorNode': Failed to read the 'numberOfOutputChannels' property from ScriptProcessorNodeOptions: The provided value '${options.numberOfOutputChannels}'`,
|
|
74
|
+
});
|
|
75
|
+
} else {
|
|
76
|
+
parsedOptions.numberOfOutputChannels = 2;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (options && options.channelCount !== undefined) {
|
|
80
|
+
parsedOptions.channelCount = conversions['unsigned long'](options.channelCount, {
|
|
81
|
+
enforceRange: true,
|
|
82
|
+
context: `Failed to construct 'ScriptProcessorNode': Failed to read the 'channelCount' property from ScriptProcessorNodeOptions: The provided value '${options.channelCount}'`,
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if (options && options.channelCountMode !== undefined) {
|
|
87
|
+
parsedOptions.channelCountMode = conversions['DOMString'](options.channelCountMode, {
|
|
88
|
+
context: `Failed to construct 'ScriptProcessorNode': Failed to read the 'channelCount' property from ScriptProcessorNodeOptions: The provided value '${options.channelCountMode}'`,
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (options && options.channelInterpretation !== undefined) {
|
|
93
|
+
parsedOptions.channelInterpretation = conversions['DOMString'](options.channelInterpretation, {
|
|
94
|
+
context: `Failed to construct 'ScriptProcessorNode': Failed to read the 'channelInterpretation' property from ScriptProcessorNodeOptions: The provided value '${options.channelInterpretation}'`,
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
let napiObj;
|
|
99
|
+
|
|
100
|
+
try {
|
|
101
|
+
napiObj = new nativeBinding.ScriptProcessorNode(context[kNapiObj], parsedOptions);
|
|
102
|
+
} catch (err) {
|
|
103
|
+
throwSanitizedError(err);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
super(context, {
|
|
107
|
+
[kNapiObj]: napiObj,
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
this[kNapiObj][kOnAudioProcess] = (err, rawEvent) => {
|
|
111
|
+
if (typeof rawEvent !== 'object' && !('type' in rawEvent)) {
|
|
112
|
+
throw new TypeError('Invalid [kOnStateChange] Invocation: rawEvent should have a type property');
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const audioProcessingEventInit = {
|
|
116
|
+
playbackTime: rawEvent.playbackTime,
|
|
117
|
+
inputBuffer: new jsExport.AudioBuffer({ [kNapiObj]: rawEvent.inputBuffer }),
|
|
118
|
+
outputBuffer: new jsExport.AudioBuffer({ [kNapiObj]: rawEvent.outputBuffer }),
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
const event = new jsExport.AudioProcessingEvent('audioprocess', audioProcessingEventInit);
|
|
122
|
+
propagateEvent(this, event);
|
|
123
|
+
};
|
|
124
|
+
|
|
125
|
+
this[kNapiObj].listen_to_events();
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
get bufferSize() {
|
|
129
|
+
if (!(this instanceof ScriptProcessorNode)) {
|
|
130
|
+
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'ScriptProcessorNode\'');
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
return this[kNapiObj].bufferSize;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
get onaudioprocess() {
|
|
137
|
+
if (!(this instanceof ScriptProcessorNode)) {
|
|
138
|
+
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'ScriptProcessorNode\'');
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
return this.#onaudioprocess;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
set onaudioprocess(value) {
|
|
145
|
+
if (!(this instanceof ScriptProcessorNode)) {
|
|
146
|
+
throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'ScriptProcessorNode\'');
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
if (isFunction(value) || value === null) {
|
|
150
|
+
this.#onaudioprocess = value;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
Object.defineProperties(ScriptProcessorNode, {
|
|
156
|
+
length: {
|
|
157
|
+
__proto__: null,
|
|
158
|
+
writable: false,
|
|
159
|
+
enumerable: false,
|
|
160
|
+
configurable: true,
|
|
161
|
+
value: 0,
|
|
162
|
+
},
|
|
163
|
+
});
|
|
164
|
+
|
|
165
|
+
Object.defineProperties(ScriptProcessorNode.prototype, {
|
|
166
|
+
[Symbol.toStringTag]: {
|
|
167
|
+
__proto__: null,
|
|
168
|
+
writable: false,
|
|
169
|
+
enumerable: false,
|
|
170
|
+
configurable: true,
|
|
171
|
+
value: 'ScriptProcessorNode',
|
|
172
|
+
},
|
|
173
|
+
bufferSize: kEnumerableProperty,
|
|
174
|
+
onaudioprocess: kEnumerableProperty,
|
|
175
|
+
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
return ScriptProcessorNode;
|
|
179
|
+
};
|
package/js/StereoPannerNode.js
CHANGED
package/js/WaveShaperNode.js
CHANGED
package/js/lib/events.js
CHANGED
|
@@ -1,20 +1,10 @@
|
|
|
1
|
-
const { kNapiObj, kDispatchEvent } = require('./symbols.js');
|
|
2
1
|
const { isFunction } = require('./utils.js');
|
|
3
2
|
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
// Finalize event registration on Rust side
|
|
9
|
-
jsObj[kNapiObj][kDispatchEvent] = (err, eventType) => {
|
|
10
|
-
const event = new Event(eventType);
|
|
11
|
-
// call attribute first if exists
|
|
12
|
-
if (isFunction(jsObj[`on${event.type}`])) {
|
|
13
|
-
jsObj[`on${event.type}`](event);
|
|
14
|
-
}
|
|
15
|
-
// then distach to add event listeners
|
|
16
|
-
jsObj.dispatchEvent(event);
|
|
3
|
+
module.exports.propagateEvent = function propagateEvent(eventTarget, event) {
|
|
4
|
+
// call attribute first if exists
|
|
5
|
+
if (isFunction(eventTarget[`on${event.type}`])) {
|
|
6
|
+
eventTarget[`on${event.type}`](event);
|
|
17
7
|
}
|
|
18
|
-
//
|
|
19
|
-
|
|
8
|
+
// then distach to add event listeners
|
|
9
|
+
eventTarget.dispatchEvent(event);
|
|
20
10
|
}
|
package/js/lib/symbols.js
CHANGED
|
@@ -1,5 +1,26 @@
|
|
|
1
1
|
module.exports.kNapiObj = Symbol('node-web-audio-api:napi-obj');
|
|
2
2
|
module.exports.kAudioBuffer = Symbol('node-web-audio-api:audio-buffer');
|
|
3
|
-
|
|
4
|
-
module.exports.
|
|
3
|
+
module.exports.kPrivateConstructor = Symbol('node-web-audio-api:private-constructor');
|
|
4
|
+
module.exports.kCreateProcessor = Symbol('node-web-audio-api:create-processor');
|
|
5
|
+
module.exports.kProcessorRegistered = Symbol('node-web-audio-api:processor-registered');
|
|
6
|
+
module.exports.kGetParameterDescriptors = Symbol('node-web-audio-api:get-parameter-descriptors');
|
|
7
|
+
module.exports.kWorkletRelease = Symbol('node-web-audio-api:worklet-release');
|
|
8
|
+
module.exports.kCheckProcessorsCreated = Symbol('node-web-audio-api:check-processor-created');
|
|
5
9
|
|
|
10
|
+
// semi-private keys for events listeners
|
|
11
|
+
|
|
12
|
+
// # BaseAudioContext
|
|
13
|
+
module.exports.kOnStateChange = Symbol.for('node-web-audio-api:onstatechange');
|
|
14
|
+
// AudioContext
|
|
15
|
+
module.exports.kOnSinkChange = Symbol.for('node-web-audio-api:onsinkchange');
|
|
16
|
+
// # OfflineAudioContext
|
|
17
|
+
// > [The onstatechange] event is fired before the complete event is fired
|
|
18
|
+
// cf. https://webaudio.github.io/web-audio-api/#dom-baseaudiocontext-onstatechange
|
|
19
|
+
// @fixme: for now the `complete` event is triggered **before** startRenring fulfills
|
|
20
|
+
module.exports.kOnComplete = Symbol.for('node-web-audio-api:oncomplete');
|
|
21
|
+
// # AudioScheduledSourceNode
|
|
22
|
+
module.exports.kOnEnded = Symbol.for('node-web-audio-api:onended');
|
|
23
|
+
// # ScriptProcessorNode
|
|
24
|
+
module.exports.kOnAudioProcess = Symbol.for('node-web-audio-api:onaudioprocess');
|
|
25
|
+
// # AudioRenderCapacity
|
|
26
|
+
module.exports.kOnUpdate = Symbol.for('node-web-audio-api:onupdate');
|
package/load-native.cjs
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
const { platform, arch } = process;
|
|
2
|
+
|
|
3
|
+
let nativeBinding = null;
|
|
4
|
+
let loadError = null;
|
|
5
|
+
|
|
6
|
+
switch (platform) {
|
|
7
|
+
case 'win32':
|
|
8
|
+
switch (arch) {
|
|
9
|
+
case 'x64':
|
|
10
|
+
try {
|
|
11
|
+
nativeBinding = require('./node-web-audio-api.win32-x64-msvc.node');
|
|
12
|
+
} catch (e) {
|
|
13
|
+
loadError = e;
|
|
14
|
+
}
|
|
15
|
+
break;
|
|
16
|
+
case 'arm64':
|
|
17
|
+
try {
|
|
18
|
+
nativeBinding = require('./node-web-audio-api.win32-arm64-msvc.node');
|
|
19
|
+
} catch (e) {
|
|
20
|
+
loadError = e;
|
|
21
|
+
}
|
|
22
|
+
break;
|
|
23
|
+
default:
|
|
24
|
+
throw new Error(`Unsupported architecture on Windows: ${arch}`);
|
|
25
|
+
}
|
|
26
|
+
break;
|
|
27
|
+
case 'darwin':
|
|
28
|
+
switch (arch) {
|
|
29
|
+
case 'x64':
|
|
30
|
+
try {
|
|
31
|
+
nativeBinding = require('./node-web-audio-api.darwin-x64.node');
|
|
32
|
+
} catch (e) {
|
|
33
|
+
loadError = e;
|
|
34
|
+
}
|
|
35
|
+
break;
|
|
36
|
+
case 'arm64':
|
|
37
|
+
try {
|
|
38
|
+
nativeBinding = require('./node-web-audio-api.darwin-arm64.node');
|
|
39
|
+
} catch (e) {
|
|
40
|
+
loadError = e;
|
|
41
|
+
}
|
|
42
|
+
break;
|
|
43
|
+
default:
|
|
44
|
+
throw new Error(`Unsupported architecture on macOS: ${arch}`);
|
|
45
|
+
}
|
|
46
|
+
break;
|
|
47
|
+
case 'linux':
|
|
48
|
+
switch (arch) {
|
|
49
|
+
case 'x64':
|
|
50
|
+
try {
|
|
51
|
+
nativeBinding = require('./node-web-audio-api.linux-x64-gnu.node');
|
|
52
|
+
} catch (e) {
|
|
53
|
+
loadError = e;
|
|
54
|
+
}
|
|
55
|
+
break;
|
|
56
|
+
case 'arm64':
|
|
57
|
+
try {
|
|
58
|
+
nativeBinding = require('./node-web-audio-api.linux-arm64-gnu.node');
|
|
59
|
+
} catch (e) {
|
|
60
|
+
loadError = e;
|
|
61
|
+
}
|
|
62
|
+
break;
|
|
63
|
+
case 'arm':
|
|
64
|
+
try {
|
|
65
|
+
nativeBinding = require('./node-web-audio-api.linux-arm-gnueabihf.node');
|
|
66
|
+
} catch (e) {
|
|
67
|
+
loadError = e;
|
|
68
|
+
}
|
|
69
|
+
break;
|
|
70
|
+
default:
|
|
71
|
+
throw new Error(`Unsupported architecture on Linux: ${arch}`);
|
|
72
|
+
}
|
|
73
|
+
break;
|
|
74
|
+
default:
|
|
75
|
+
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (!nativeBinding) {
|
|
79
|
+
if (loadError) {
|
|
80
|
+
throw loadError;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
throw new Error(`Failed to load native binding for OS: ${platform}, architecture: ${arch}`);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
module.exports = nativeBinding;
|
|
87
|
+
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "node-web-audio-api",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.21.0",
|
|
4
4
|
"author": "Benjamin Matuszewski",
|
|
5
5
|
"description": "Node.js bindings for web-audio-api-rs using napi-rs",
|
|
6
6
|
"exports": {
|
|
@@ -73,6 +73,8 @@
|
|
|
73
73
|
"dependencies": {
|
|
74
74
|
"@napi-rs/cli": "^2.14.3",
|
|
75
75
|
"@node-rs/helper": "^1.3.3",
|
|
76
|
+
"caller": "^1.1.0",
|
|
77
|
+
"node-fetch": "^3.3.2",
|
|
76
78
|
"webidl-conversions": "^7.0.0"
|
|
77
79
|
}
|
|
78
80
|
}
|
package/TODOS.md
DELETED
|
@@ -1,149 +0,0 @@
|
|
|
1
|
-
# TODO
|
|
2
|
-
|
|
3
|
-
## MISC
|
|
4
|
-
|
|
5
|
-
- [x] decode audio data in dedicated thread
|
|
6
|
-
- [x] Use node DOMExeption <https://nodejs.org/api/globals.html#domexception>
|
|
7
|
-
- [x] connect / disconnect
|
|
8
|
-
- [ ] _napi_ review tsfn store implementation
|
|
9
|
-
- [ ] implement ScriptProcesorNode
|
|
10
|
-
- [ ] wpt: mock for `URL.createObjectURL`
|
|
11
|
-
- [ ] wpt: mock for `requestAnimationFrame` cf. https://github.com/nodejs/help/issues/2483
|
|
12
|
-
- [ ] make sure we don't try to use `in` operator on null values
|
|
13
|
-
|
|
14
|
-
- [ ] _rust_ AudioParam failing tests
|
|
15
|
-
- [ ] _rust_ AudioBufferSourceNode failing tests
|
|
16
|
-
- [ ] _rust_ IIRFilter node
|
|
17
|
-
- [ ] Register AudioScheduledSourceNode listener only on start
|
|
18
|
-
- [ ] refactor - Add context string in `throwSanitizedError` and to `toSanitizedSequence`
|
|
19
|
-
- [ ] Rust - review Symbol.toStringTag https://github.com/nodejs/node/issues/41358
|
|
20
|
-
```
|
|
21
|
-
// symbol as property name example
|
|
22
|
-
let test_symbol = ctx.env.symbol_for("test").unwrap();
|
|
23
|
-
js_this.set_property(test_symbol, &ctx.env.create_string("test").unwrap())?;
|
|
24
|
-
```
|
|
25
|
-
- [ ] wpt bot
|
|
26
|
-
- [ ] wpt - handle loading of 4-channels sound file
|
|
27
|
-
- [ ] _rust_ decodeAudioData should throw EncodingError
|
|
28
|
-
- review JS side when done
|
|
29
|
-
|
|
30
|
-
- [-] AnalyserNode -> requires script processor and request animation frame
|
|
31
|
-
|
|
32
|
-
- [x] DelayNode
|
|
33
|
-
- [x] protect AudioBuffer arguments
|
|
34
|
-
- [x] AudioNode setters (handle enum types, cf audio param too)
|
|
35
|
-
- [x] This is weird `jsExport.AudioBuffer = require('./AudioBuffer.js').AudioBuffer(nativeBinding.AudioBuffer);`
|
|
36
|
-
- [x] Ended event in AudioScheduledSourceNode for offline audio context
|
|
37
|
-
|
|
38
|
-
- cf. util.types.isSharedArrayBuffer(value)
|
|
39
|
-
|
|
40
|
-
## Notes
|
|
41
|
-
|
|
42
|
-
- wpt/webaudio/the-audio-api/the-dynamicscompressornode-interface/dynamicscompressor-basic.html sometimes pass, sometimes fail because audio context is resumed
|
|
43
|
-
|
|
44
|
-
------------------------------------------------------------------------
|
|
45
|
-
|
|
46
|
-
## `onstatechnage` and `onsinkchange`
|
|
47
|
-
|
|
48
|
-
### https://webaudio.github.io/web-audio-api/#eventdef-baseaudiocontext-statechange
|
|
49
|
-
|
|
50
|
-
> A newly-created AudioContext will always begin in the suspended state, and a state change event will be fired whenever the state changes to a different state. This event is fired before the complete event is fired.
|
|
51
|
-
|
|
52
|
-
### https://webaudio.github.io/web-audio-api/#dom-audiocontext-onsinkchange
|
|
53
|
-
|
|
54
|
-
> NOTE: This is not dispatched for the initial device selection in the construction of AudioContext. The statechange event is available to check the readiness of the initial output device.
|
|
55
|
-
|
|
56
|
-
cf. the-audiocontext-interface/audiocontext-sinkid-state-change.https.html
|
|
57
|
-
|
|
58
|
-
### Notes
|
|
59
|
-
|
|
60
|
-
We should explicitly resume context at startup, just as a context created in a console or localhost
|
|
61
|
-
|
|
62
|
-
What happens when sinkId is changed while context is suspended? It seems that it is resumed:
|
|
63
|
-
|
|
64
|
-
```rs
|
|
65
|
-
Startup { graph } => {
|
|
66
|
-
debug_assert!(self.graph.is_none());
|
|
67
|
-
self.graph = Some(graph);
|
|
68
|
-
self.set_state(AudioContextState::Running);
|
|
69
|
-
}
|
|
70
|
-
```
|
|
71
|
-
|
|
72
|
-
@todo - create a test bed
|
|
73
|
-
|
|
74
|
-
- testing AudioContextOptions.sinkId requires this fix
|
|
75
|
-
the-audiocontext-interface/audiocontext-sinkid-constructor.https.html
|
|
76
|
-
- setting sink on supended audioContext test too
|
|
77
|
-
the-audiocontext-interface/audiocontext-sinkid-state-change.https.html
|
|
78
|
-
|
|
79
|
-
------------------------------------------------------------------------
|
|
80
|
-
|
|
81
|
-
## Issue in spec / wpt
|
|
82
|
-
|
|
83
|
-
- [ ] review waveshaper curve (need to be able to set back to null)
|
|
84
|
-
<https://webaudio.github.io/web-audio-api/#dom-waveshapernode-curve>
|
|
85
|
-
To set the curve attribute, execute these steps:
|
|
86
|
-
- Let new curve be a Float32Array to be assigned to curve or null. .
|
|
87
|
-
- If new curve is not null and [[curve set]] is true, throw an InvalidStateError and abort these steps.
|
|
88
|
-
- If new curve is not null, set [[curve set]] to true.
|
|
89
|
-
- Assign new curve to the curve attribute.
|
|
90
|
-
|
|
91
|
-
-> Spec is not inline with wpt tests, both chrome and firefox accept setting curve several times (which makes sens...), without passing to null first
|
|
92
|
-
-> Curve defined as sequence<float> in arguments but as Float32Array in attributes
|
|
93
|
-
-> Means that we can't pass non finite values in ctor but we can with setter
|
|
94
|
-
|
|
95
|
-
- [ ] AudioDestination::numberOfOutputs
|
|
96
|
-
- implementation and wpt report 0
|
|
97
|
-
cf. webaudio/the-audio-api/the-audionode-interface/audionode.html
|
|
98
|
-
- spec specifies 1: https://webaudio.github.io/web-audio-api/#AudioDestinationNode
|
|
99
|
-
|
|
100
|
-
- [ ] Analyser::fftSize
|
|
101
|
-
- wpt defines that when set to -1 it should throw an IndexSizeError, when not a type error as it is defined as unsigned long?
|
|
102
|
-
cf. the-analysernode-interface/realtimeanalyser-fft-sizing.html
|
|
103
|
-
cf. https://webidl.spec.whatwg.org/#js-attributes
|
|
104
|
-
setter step 4.6
|
|
105
|
-
- same with getChannelData
|
|
106
|
-
|
|
107
|
-
- [ ] wpt - propose patch to remove patch regarding `audiobuffersource-multi-channels-expected.wav`in XMLHttpRequest Mock
|
|
108
|
-
|
|
109
|
-
- [ ] Propose a test for decodeAudioData
|
|
110
|
-
"Let error be a DOMException whose name is EncodingError."
|
|
111
|
-
|
|
112
|
-
- [ ] ScriptProcessorNode rehabilitation
|
|
113
|
-
- padenot mail vs spec
|
|
114
|
-
|
|
115
|
-
------------------------------------------------------------------------
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
#### main
|
|
119
|
-
```
|
|
120
|
-
RESULTS:
|
|
121
|
-
- # pass: 6848
|
|
122
|
-
- # fail: 706
|
|
123
|
-
- # type error issues: 5
|
|
124
|
-
> wpt duration: 2:22.697 (m:ss.mmm)
|
|
125
|
-
```
|
|
126
|
-
|
|
127
|
-
#### feat/ended-events
|
|
128
|
-
```
|
|
129
|
-
RESULTS:
|
|
130
|
-
- # pass: 6854
|
|
131
|
-
- # fail: 704
|
|
132
|
-
- # type error issues: 5
|
|
133
|
-
> wpt duration: 2:08.718 (m:ss.mmm)
|
|
134
|
-
```
|
|
135
|
-
|
|
136
|
-
#### refactor/napi-wrappers
|
|
137
|
-
|
|
138
|
-
w/ https://github.com/orottier/web-audio-api-rs/pull/492
|
|
139
|
-
|
|
140
|
-
```
|
|
141
|
-
RESULTS:
|
|
142
|
-
- # pass: 6897
|
|
143
|
-
- # fail: 692
|
|
144
|
-
- # type error issues: 5
|
|
145
|
-
> wpt duration: 1:59.505 (m:ss.mmm)
|
|
146
|
-
```
|
|
147
|
-
|
|
148
|
-
ls wpt/webaudio/the-audio-api/the-audiocontext-interface | xargs -I {} ./run-wpt.sh {}
|
|
149
|
-
ls wpt/webaudio/the-audio-api/the-dynamicscompressornode-interface | xargs -I {} ./run-wpt.sh {}
|
package/js/monkey-patch.js
DELETED
|
@@ -1,77 +0,0 @@
|
|
|
1
|
-
// -------------------------------------------------------------------------- //
|
|
2
|
-
// -------------------------------------------------------------------------- //
|
|
3
|
-
// //
|
|
4
|
-
// //
|
|
5
|
-
// //
|
|
6
|
-
// ██╗ ██╗ █████╗ ██████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗ //
|
|
7
|
-
// ██║ ██║██╔══██╗██╔══██╗████╗ ██║██║████╗ ██║██╔════╝ //
|
|
8
|
-
// ██║ █╗ ██║███████║██████╔╝██╔██╗ ██║██║██╔██╗ ██║██║ ███╗ //
|
|
9
|
-
// ██║███╗██║██╔══██║██╔══██╗██║╚██╗██║██║██║╚██╗██║██║ ██║ //
|
|
10
|
-
// ╚███╔███╔╝██║ ██║██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝ //
|
|
11
|
-
// ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝ //
|
|
12
|
-
// //
|
|
13
|
-
// //
|
|
14
|
-
// - This file has been generated --------------------------- //
|
|
15
|
-
// //
|
|
16
|
-
// //
|
|
17
|
-
// -------------------------------------------------------------------------- //
|
|
18
|
-
// -------------------------------------------------------------------------- //
|
|
19
|
-
|
|
20
|
-
module.exports = function monkeyPatch(nativeBinding) {
|
|
21
|
-
let jsExport = {};
|
|
22
|
-
// --------------------------------------------------------------------------
|
|
23
|
-
// Monkey Patch Web Audio API
|
|
24
|
-
// --------------------------------------------------------------------------
|
|
25
|
-
jsExport.BaseAudioContext = require('./BaseAudioContext.js')(jsExport, nativeBinding);
|
|
26
|
-
jsExport.AudioContext = require('./AudioContext.js')(jsExport, nativeBinding);
|
|
27
|
-
jsExport.OfflineAudioContext = require('./OfflineAudioContext.js')(jsExport, nativeBinding);
|
|
28
|
-
|
|
29
|
-
jsExport.AnalyserNode = require('./AnalyserNode.js')(jsExport, nativeBinding);
|
|
30
|
-
jsExport.AudioBufferSourceNode = require('./AudioBufferSourceNode.js')(jsExport, nativeBinding);
|
|
31
|
-
jsExport.BiquadFilterNode = require('./BiquadFilterNode.js')(jsExport, nativeBinding);
|
|
32
|
-
jsExport.ChannelMergerNode = require('./ChannelMergerNode.js')(jsExport, nativeBinding);
|
|
33
|
-
jsExport.ChannelSplitterNode = require('./ChannelSplitterNode.js')(jsExport, nativeBinding);
|
|
34
|
-
jsExport.ConstantSourceNode = require('./ConstantSourceNode.js')(jsExport, nativeBinding);
|
|
35
|
-
jsExport.ConvolverNode = require('./ConvolverNode.js')(jsExport, nativeBinding);
|
|
36
|
-
jsExport.DelayNode = require('./DelayNode.js')(jsExport, nativeBinding);
|
|
37
|
-
jsExport.DynamicsCompressorNode = require('./DynamicsCompressorNode.js')(jsExport, nativeBinding);
|
|
38
|
-
jsExport.GainNode = require('./GainNode.js')(jsExport, nativeBinding);
|
|
39
|
-
jsExport.IIRFilterNode = require('./IIRFilterNode.js')(jsExport, nativeBinding);
|
|
40
|
-
jsExport.MediaStreamAudioSourceNode = require('./MediaStreamAudioSourceNode.js')(jsExport, nativeBinding);
|
|
41
|
-
jsExport.OscillatorNode = require('./OscillatorNode.js')(jsExport, nativeBinding);
|
|
42
|
-
jsExport.PannerNode = require('./PannerNode.js')(jsExport, nativeBinding);
|
|
43
|
-
jsExport.StereoPannerNode = require('./StereoPannerNode.js')(jsExport, nativeBinding);
|
|
44
|
-
jsExport.WaveShaperNode = require('./WaveShaperNode.js')(jsExport, nativeBinding);
|
|
45
|
-
|
|
46
|
-
jsExport.AudioNode = require('./AudioNode.js');
|
|
47
|
-
jsExport.AudioScheduledSourceNode = require('./AudioScheduledSourceNode.js');
|
|
48
|
-
jsExport.AudioParam = require('./AudioParam.js');
|
|
49
|
-
jsExport.AudioDestinationNode = require('./AudioDestinationNode.js');
|
|
50
|
-
jsExport.AudioListener = require('./AudioListener.js');
|
|
51
|
-
|
|
52
|
-
jsExport.PeriodicWave = require('./PeriodicWave.js')(jsExport, nativeBinding);
|
|
53
|
-
jsExport.AudioBuffer = require('./AudioBuffer.js')(jsExport, nativeBinding);
|
|
54
|
-
|
|
55
|
-
// --------------------------------------------------------------------------
|
|
56
|
-
// Promisify MediaDevices API
|
|
57
|
-
// --------------------------------------------------------------------------
|
|
58
|
-
jsExport.mediaDevices = {};
|
|
59
|
-
|
|
60
|
-
const enumerateDevicesSync = nativeBinding.mediaDevices.enumerateDevices;
|
|
61
|
-
jsExport.mediaDevices.enumerateDevices = async function enumerateDevices() {
|
|
62
|
-
const list = enumerateDevicesSync();
|
|
63
|
-
return Promise.resolve(list);
|
|
64
|
-
};
|
|
65
|
-
|
|
66
|
-
const getUserMediaSync = nativeBinding.mediaDevices.getUserMedia;
|
|
67
|
-
jsExport.mediaDevices.getUserMedia = async function getUserMedia(options) {
|
|
68
|
-
if (options === undefined) {
|
|
69
|
-
throw new TypeError('Failed to execute "getUserMedia" on "MediaDevices": audio must be requested');
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
const stream = getUserMediaSync(options);
|
|
73
|
-
return Promise.resolve(stream);
|
|
74
|
-
};
|
|
75
|
-
|
|
76
|
-
return jsExport;
|
|
77
|
-
};
|
package/run-wpt.md
DELETED
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
create file run-wpt.sh and chmod +x run-wpt.sh:
|
|
2
|
-
|
|
3
|
-
```
|
|
4
|
-
#!/bin/bash
|
|
5
|
-
echo $1
|
|
6
|
-
echo ~~~~~~~~~~~~~~~~~
|
|
7
|
-
node ./.scripts/wpt-harness.mjs --filter "$1" 2>&1 | grep -A 3 RESULTS
|
|
8
|
-
echo
|
|
9
|
-
```
|
|
10
|
-
|
|
11
|
-
dissect the full suite per directory:
|
|
12
|
-
|
|
13
|
-
```
|
|
14
|
-
ls wpt/webaudio/the-audio-api | xargs -I {} ./run-wpt.sh {}
|
|
15
|
-
```
|
|
16
|
-
|
|
17
|
-
check the specific file results in a dir:
|
|
18
|
-
|
|
19
|
-
```
|
|
20
|
-
ls wpt/webaudio/the-audio-api/the-oscillatornode-interface | xargs -I {} ./run-wpt.sh {}
|
|
21
|
-
```
|
|
22
|
-
|
|
23
|
-
ls wpt/webaudio/the-audio-api/the-audiocontext-interface | xargs -I {} ./run-wpt.sh {}
|
|
24
|
-
|
|
25
|
-
processing-model
|
|
26
|
-
|
|
27
|
-
ls wpt/webaudio/the-audio-api/processing-model | xargs -I {} ./run-wpt.sh {}
|
package/simple-test.cjs
DELETED
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
const { AudioContext, mediaDevices } = require('./index.cjs');
|
|
2
|
-
|
|
3
|
-
const audioContext = new AudioContext();
|
|
4
|
-
|
|
5
|
-
setInterval(() => {
|
|
6
|
-
const now = audioContext.currentTime;
|
|
7
|
-
|
|
8
|
-
const env = audioContext.createGain();
|
|
9
|
-
env.connect(audioContext.destination);
|
|
10
|
-
env.gain.value = 0;
|
|
11
|
-
env.gain.setValueAtTime(0, now);
|
|
12
|
-
env.gain.linearRampToValueAtTime(0.1, now + 0.02);
|
|
13
|
-
env.gain.exponentialRampToValueAtTime(0.0001, now + 1);
|
|
14
|
-
|
|
15
|
-
const osc = audioContext.createOscillator();
|
|
16
|
-
osc.frequency.value = 200 + Math.random() * 2800;
|
|
17
|
-
osc.connect(env);
|
|
18
|
-
osc.start(now);
|
|
19
|
-
osc.stop(now + 1);
|
|
20
|
-
}, 100);
|