node-web-audio-api 0.18.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/CHANGELOG.md +7 -0
  2. package/TODOS.md +140 -12
  3. package/index.mjs +10 -5
  4. package/js/AnalyserNode.js +262 -48
  5. package/js/AudioBuffer.js +244 -0
  6. package/js/AudioBufferSourceNode.js +265 -41
  7. package/js/AudioContext.js +271 -28
  8. package/js/AudioDestinationNode.js +42 -100
  9. package/js/AudioListener.js +219 -0
  10. package/js/AudioNode.js +323 -0
  11. package/js/AudioParam.js +252 -39
  12. package/js/AudioScheduledSourceNode.js +105 -0
  13. package/js/BaseAudioContext.js +419 -0
  14. package/js/BiquadFilterNode.js +221 -29
  15. package/js/ChannelMergerNode.js +96 -22
  16. package/js/ChannelSplitterNode.js +96 -22
  17. package/js/ConstantSourceNode.js +92 -26
  18. package/js/ConvolverNode.js +161 -29
  19. package/js/DelayNode.js +115 -21
  20. package/js/DynamicsCompressorNode.js +198 -27
  21. package/js/GainNode.js +107 -21
  22. package/js/IIRFilterNode.js +139 -23
  23. package/js/MediaStreamAudioSourceNode.js +83 -24
  24. package/js/OfflineAudioContext.js +182 -34
  25. package/js/OscillatorNode.js +195 -32
  26. package/js/PannerNode.js +461 -56
  27. package/js/PeriodicWave.js +67 -3
  28. package/js/StereoPannerNode.js +107 -21
  29. package/js/WaveShaperNode.js +147 -29
  30. package/js/lib/cast.js +19 -0
  31. package/js/lib/errors.js +10 -55
  32. package/js/lib/events.js +20 -0
  33. package/js/lib/symbols.js +5 -0
  34. package/js/lib/utils.js +12 -12
  35. package/js/monkey-patch.js +32 -30
  36. package/node-web-audio-api.darwin-arm64.node +0 -0
  37. package/node-web-audio-api.darwin-x64.node +0 -0
  38. package/node-web-audio-api.linux-arm-gnueabihf.node +0 -0
  39. package/node-web-audio-api.linux-arm64-gnu.node +0 -0
  40. package/node-web-audio-api.linux-x64-gnu.node +0 -0
  41. package/node-web-audio-api.win32-arm64-msvc.node +0 -0
  42. package/node-web-audio-api.win32-x64-msvc.node +0 -0
  43. package/package.json +7 -4
  44. package/run-wpt.md +27 -0
  45. package/run-wpt.sh +5 -0
  46. package/js/AudioNode.mixin.js +0 -132
  47. package/js/AudioScheduledSourceNode.mixin.js +0 -67
  48. package/js/BaseAudioContext.mixin.js +0 -154
  49. package/js/EventTarget.mixin.js +0 -60
package/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ ## v0.19.0 (17/04/2024)
2
+
3
+ - Update upstream crate to [1.0.0-rc.5](https://github.com/orottier/web-audio-api-rs/blob/main/CHANGELOG.md#version-0430--100-rc5-2024-04-15)
4
+ - Provide JS facades with proper inheritance chain for all exposed interfaces
5
+ - Implement all AudioNode connect / disconnect alternatives
6
+ - Improve compliance and error handling
7
+
1
8
  ## v0.18.0 (13/03/2024)
2
9
 
3
10
  - Fix `MediaStreamAudioSourceNode`
package/TODOS.md CHANGED
@@ -1,21 +1,149 @@
1
1
  # TODO
2
2
 
3
- - [ ] Review AudioBuffer
4
- - [ ] Ended event in AudioScheduledSourceNode for offline audio context
5
- - [ ] `MediaStreamAudioSourceNode`
6
- + [x] properly handle `mediaStream`
7
- + [ ] do not accept OfflineAudioContext (see if this can be delegated to upstream)
3
+ ## MISC
8
4
 
5
+ - [x] decode audio data in dedicated thread
6
+ - [x] Use node DOMExeption <https://nodejs.org/api/globals.html#domexception>
7
+ - [x] connect / disconnect
8
+ - [ ] _napi_ review tsfn store implementation
9
+ - [ ] implement ScriptProcesorNode
10
+ - [ ] wpt: mock for `URL.createObjectURL`
11
+ - [ ] wpt: mock for `requestAnimationFrame` cf. https://github.com/nodejs/help/issues/2483
12
+ - [ ] make sure we don't try to use `in` operator on null values
13
+
14
+ - [ ] _rust_ AudioParam failing tests
15
+ - [ ] _rust_ AudioBufferSourceNode failing tests
16
+ - [ ] _rust_ IIRFilter node
17
+ - [ ] Register AudioScheduledSourceNode listener only on start
18
+ - [ ] refactor - Add context string in `throwSanitizedError` and to `toSanitizedSequence`
19
+ - [ ] Rust - review Symbol.toStringTag https://github.com/nodejs/node/issues/41358
20
+ ```
21
+ // symbol as property name example
22
+ let test_symbol = ctx.env.symbol_for("test").unwrap();
23
+ js_this.set_property(test_symbol, &ctx.env.create_string("test").unwrap())?;
24
+ ```
9
25
  - [ ] wpt bot
26
+ - [ ] wpt - handle loading of 4-channels sound file
27
+ - [ ] _rust_ decodeAudioData should throw EncodingError
28
+ - review JS side when done
29
+
30
+ - [-] AnalyserNode -> requires script processor and request animation frame
31
+
32
+ - [x] DelayNode
33
+ - [x] protect AudioBuffer arguments
34
+ - [x] AudioNode setters (handle enum types, cf audio param too)
35
+ - [x] This is weird `jsExport.AudioBuffer = require('./AudioBuffer.js').AudioBuffer(nativeBinding.AudioBuffer);`
36
+ - [x] Ended event in AudioScheduledSourceNode for offline audio context
37
+
38
+ - cf. util.types.isSharedArrayBuffer(value)
39
+
40
+ ## Notes
41
+
42
+ - wpt/webaudio/the-audio-api/the-dynamicscompressornode-interface/dynamicscompressor-basic.html sometimes pass, sometimes fail because audio context is resumed
43
+
44
+ ------------------------------------------------------------------------
10
45
 
11
- - [ ] OfflineAudioContext should not lock the process w/ event listeners if startRendering has not been called
46
+ ## `onstatechnage` and `onsinkchange`
47
+
48
+ ### https://webaudio.github.io/web-audio-api/#eventdef-baseaudiocontext-statechange
49
+
50
+ > A newly-created AudioContext will always begin in the suspended state, and a state change event will be fired whenever the state changes to a different state. This event is fired before the complete event is fired.
51
+
52
+ ### https://webaudio.github.io/web-audio-api/#dom-audiocontext-onsinkchange
12
53
 
13
- - [ ] wrap EventTarget::dispatchEvent in setTimeout(callback , 0); so that events are dispatched in the next microtask ?
54
+ > NOTE: This is not dispatched for the initial device selection in the construction of AudioContext. The statechange event is available to check the readiness of the initial output device.
55
+
56
+ cf. the-audiocontext-interface/audiocontext-sinkid-state-change.https.html
57
+
58
+ ### Notes
59
+
60
+ We should explicitly resume context at startup, just as a context created in a console or localhost
61
+
62
+ What happens when sinkId is changed while context is suspended? It seems that it is resumed:
63
+
64
+ ```rs
65
+ Startup { graph } => {
66
+ debug_assert!(self.graph.is_none());
67
+ self.graph = Some(graph);
68
+ self.set_state(AudioContextState::Running);
69
+ }
70
+ ```
71
+
72
+ @todo - create a test bed
73
+
74
+ - testing AudioContextOptions.sinkId requires this fix
75
+ the-audiocontext-interface/audiocontext-sinkid-constructor.https.html
76
+ - setting sink on supended audioContext test too
77
+ the-audiocontext-interface/audiocontext-sinkid-state-change.https.html
78
+
79
+ ------------------------------------------------------------------------
80
+
81
+ ## Issue in spec / wpt
82
+
83
+ - [ ] review waveshaper curve (need to be able to set back to null)
84
+ <https://webaudio.github.io/web-audio-api/#dom-waveshapernode-curve>
85
+ To set the curve attribute, execute these steps:
86
+ - Let new curve be a Float32Array to be assigned to curve or null. .
87
+ - If new curve is not null and [[curve set]] is true, throw an InvalidStateError and abort these steps.
88
+ - If new curve is not null, set [[curve set]] to true.
89
+ - Assign new curve to the curve attribute.
90
+
91
+ -> Spec is not inline with wpt tests, both chrome and firefox accept setting curve several times (which makes sens...), without passing to null first
92
+ -> Curve defined as sequence<float> in arguments but as Float32Array in attributes
93
+ -> Means that we can't pass non finite values in ctor but we can with setter
94
+
95
+ - [ ] AudioDestination::numberOfOutputs
96
+ - implementation and wpt report 0
97
+ cf. webaudio/the-audio-api/the-audionode-interface/audionode.html
98
+ - spec specifies 1: https://webaudio.github.io/web-audio-api/#AudioDestinationNode
99
+
100
+ - [ ] Analyser::fftSize
101
+ - wpt defines that when set to -1 it should throw an IndexSizeError, when not a type error as it is defined as unsigned long?
102
+ cf. the-analysernode-interface/realtimeanalyser-fft-sizing.html
103
+ cf. https://webidl.spec.whatwg.org/#js-attributes
104
+ setter step 4.6
105
+ - same with getChannelData
106
+
107
+ - [ ] wpt - propose patch to remove patch regarding `audiobuffersource-multi-channels-expected.wav`in XMLHttpRequest Mock
108
+
109
+ - [ ] Propose a test for decodeAudioData
110
+ "Let error be a DOMException whose name is EncodingError."
111
+
112
+ - [ ] ScriptProcessorNode rehabilitation
113
+ - padenot mail vs spec
114
+
115
+ ------------------------------------------------------------------------
116
+
117
+
118
+ #### main
119
+ ```
120
+ RESULTS:
121
+ - # pass: 6848
122
+ - # fail: 706
123
+ - # type error issues: 5
124
+ > wpt duration: 2:22.697 (m:ss.mmm)
125
+ ```
126
+
127
+ #### feat/ended-events
128
+ ```
129
+ RESULTS:
130
+ - # pass: 6854
131
+ - # fail: 704
132
+ - # type error issues: 5
133
+ > wpt duration: 2:08.718 (m:ss.mmm)
134
+ ```
135
+
136
+ #### refactor/napi-wrappers
14
137
 
138
+ w/ https://github.com/orottier/web-audio-api-rs/pull/492
15
139
 
16
- ## AudioBuffer notes
140
+ ```
141
+ RESULTS:
142
+ - # pass: 6897
143
+ - # fail: 692
144
+ - # type error issues: 5
145
+ > wpt duration: 1:59.505 (m:ss.mmm)
146
+ ```
17
147
 
18
- - [x] `AudioBuffer` has to be a facade because `startRendering`, `decodeAudioData`
19
- - [x] need to adapt `AudioBufferSourceNode` and `ConvolverNode` so
20
- that `set buffer(value)` retrieve the wrapped value
21
- - [ ] No fucking sound when buffer comes from `decodeAudioData`..., but ok when `createBuffer`
148
+ ls wpt/webaudio/the-audio-api/the-audiocontext-interface | xargs -I {} ./run-wpt.sh {}
149
+ ls wpt/webaudio/the-audio-api/the-dynamicscompressornode-interface | xargs -I {} ./run-wpt.sh {}
package/index.mjs CHANGED
@@ -20,17 +20,25 @@
20
20
  // re-export index.cjs to support esm import syntax
21
21
  // see https://github.com/nodejs/node/issues/40541#issuecomment-951609570
22
22
 
23
- import { createRequire } from 'module';
23
+ import {
24
+ createRequire,
25
+ } from 'module';
24
26
  const require = createRequire(import.meta.url);
25
27
 
26
28
  const nativeModule = require('./index.cjs');
27
29
  export const {
30
+ BaseAudioContext,
28
31
  AudioContext,
29
32
  OfflineAudioContext,
33
+
34
+ AudioNode,
35
+ AudioScheduledSourceNode,
30
36
  AudioParam,
31
37
  AudioDestinationNode,
32
- AudioBuffer,
38
+ AudioListener,
39
+
33
40
  PeriodicWave,
41
+ AudioBuffer,
34
42
  // generated supported nodes
35
43
  AnalyserNode,
36
44
  AudioBufferSourceNode,
@@ -54,6 +62,3 @@ export const {
54
62
  } = nativeModule;
55
63
 
56
64
  export default nativeModule;
57
-
58
-
59
-
@@ -17,114 +17,301 @@
17
17
  // -------------------------------------------------------------------------- //
18
18
  // -------------------------------------------------------------------------- //
19
19
 
20
- // eslint-disable-next-line no-unused-vars
21
- const { throwSanitizedError } = require('./lib/errors.js');
22
- // eslint-disable-next-line no-unused-vars
23
- const { AudioParam } = require('./AudioParam.js');
24
- const EventTargetMixin = require('./EventTarget.mixin.js');
25
- const AudioNodeMixin = require('./AudioNode.mixin.js');
20
+ /* eslint-disable no-unused-vars */
21
+ const conversions = require('webidl-conversions');
22
+ const {
23
+ toSanitizedSequence,
24
+ } = require('./lib/cast.js');
25
+ const {
26
+ isFunction,
27
+ kEnumerableProperty,
28
+ } = require('./lib/utils.js');
29
+ const {
30
+ throwSanitizedError,
31
+ } = require('./lib/errors.js');
32
+ const {
33
+ kNapiObj,
34
+ kAudioBuffer,
35
+ } = require('./lib/symbols.js');
36
+ const {
37
+ bridgeEventTarget,
38
+ } = require('./lib/events.js');
39
+ /* eslint-enable no-unused-vars */
26
40
 
41
+ const AudioNode = require('./AudioNode.js');
27
42
 
28
- module.exports = (NativeAnalyserNode) => {
29
-
30
- const EventTarget = EventTargetMixin(NativeAnalyserNode);
31
- const AudioNode = AudioNodeMixin(EventTarget);
32
-
43
+ module.exports = (jsExport, nativeBinding) => {
33
44
  class AnalyserNode extends AudioNode {
45
+
34
46
  constructor(context, options) {
35
- if (options !== undefined && typeof options !== 'object') {
36
- throw new TypeError("Failed to construct 'AnalyserNode': argument 2 is not of type 'AnalyserOptions'")
47
+
48
+ if (arguments.length < 1) {
49
+ throw new TypeError(`Failed to construct 'AnalyserNode': 1 argument required, but only ${arguments.length} present`);
37
50
  }
38
51
 
39
- super(context, options);
52
+ if (!(context instanceof jsExport.BaseAudioContext)) {
53
+ throw new TypeError(`Failed to construct 'AnalyserNode': argument 1 is not of type BaseAudioContext`);
54
+ }
40
55
 
41
- }
56
+ // parsed version of the option to be passed to NAPI
57
+ const parsedOptions = {};
42
58
 
43
- // getters
59
+ if (options && typeof options !== 'object') {
60
+ throw new TypeError('Failed to construct \'AnalyserNode\': argument 2 is not of type \'AnalyserOptions\'');
61
+ }
44
62
 
45
- get fftSize() {
46
- return super.fftSize;
47
- }
63
+ if (options && options.fftSize !== undefined) {
64
+ parsedOptions.fftSize = conversions['unsigned long'](options.fftSize, {
65
+ enforceRange: true,
66
+ context: `Failed to construct 'AnalyserNode': Failed to read the 'fftSize' property from AnalyserOptions: The provided value (${options.fftSize}})`,
67
+ });
68
+ } else {
69
+ parsedOptions.fftSize = 2048;
70
+ }
48
71
 
49
- get frequencyBinCount() {
50
- return super.frequencyBinCount;
51
- }
72
+ if (options && options.maxDecibels !== undefined) {
73
+ parsedOptions.maxDecibels = conversions['double'](options.maxDecibels, {
74
+ context: `Failed to construct 'AnalyserNode': Failed to read the 'maxDecibels' property from AnalyserOptions: The provided value (${options.maxDecibels}})`,
75
+ });
76
+ } else {
77
+ parsedOptions.maxDecibels = -30;
78
+ }
52
79
 
53
- get minDecibels() {
54
- return super.minDecibels;
55
- }
80
+ if (options && options.minDecibels !== undefined) {
81
+ parsedOptions.minDecibels = conversions['double'](options.minDecibels, {
82
+ context: `Failed to construct 'AnalyserNode': Failed to read the 'minDecibels' property from AnalyserOptions: The provided value (${options.minDecibels}})`,
83
+ });
84
+ } else {
85
+ parsedOptions.minDecibels = -100;
86
+ }
56
87
 
57
- get maxDecibels() {
58
- return super.maxDecibels;
59
- }
88
+ if (options && options.smoothingTimeConstant !== undefined) {
89
+ parsedOptions.smoothingTimeConstant = conversions['double'](options.smoothingTimeConstant, {
90
+ context: `Failed to construct 'AnalyserNode': Failed to read the 'smoothingTimeConstant' property from AnalyserOptions: The provided value (${options.smoothingTimeConstant}})`,
91
+ });
92
+ } else {
93
+ parsedOptions.smoothingTimeConstant = 0.8;
94
+ }
95
+
96
+ if (options && options.channelCount !== undefined) {
97
+ parsedOptions.channelCount = conversions['unsigned long'](options.channelCount, {
98
+ enforceRange: true,
99
+ context: `Failed to construct 'AnalyserNode': Failed to read the 'channelCount' property from AnalyserOptions: The provided value '${options.channelCount}'`,
100
+ });
101
+ }
102
+
103
+ if (options && options.channelCountMode !== undefined) {
104
+ parsedOptions.channelCountMode = conversions['DOMString'](options.channelCountMode, {
105
+ context: `Failed to construct 'AnalyserNode': Failed to read the 'channelCount' property from AnalyserOptions: The provided value '${options.channelCountMode}'`,
106
+ });
107
+ }
108
+
109
+ if (options && options.channelInterpretation !== undefined) {
110
+ parsedOptions.channelInterpretation = conversions['DOMString'](options.channelInterpretation, {
111
+ context: `Failed to construct 'AnalyserNode': Failed to read the 'channelInterpretation' property from AnalyserOptions: The provided value '${options.channelInterpretation}'`,
112
+ });
113
+ }
114
+
115
+ let napiObj;
116
+
117
+ try {
118
+ napiObj = new nativeBinding.AnalyserNode(context[kNapiObj], parsedOptions);
119
+ } catch (err) {
120
+ throwSanitizedError(err);
121
+ }
122
+
123
+ super(context, {
124
+ [kNapiObj]: napiObj,
125
+ });
60
126
 
61
- get smoothingTimeConstant() {
62
- return super.smoothingTimeConstant;
63
127
  }
64
128
 
65
- // setters
129
+ get fftSize() {
130
+ if (!(this instanceof AnalyserNode)) {
131
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
132
+ }
133
+
134
+ return this[kNapiObj].fftSize;
135
+ }
66
136
 
67
137
  set fftSize(value) {
138
+ if (!(this instanceof AnalyserNode)) {
139
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
140
+ }
141
+
142
+ // @fixme - wpt pretends that when set to -1, this should throw IndexSizeError, not a TypeError.
143
+ // For now let's just cast it to Number without further checks, and let Rust do the job
144
+ // as 0 is an invalid value too
145
+ // value = conversions['unsigned long'](value, {
146
+ // enforceRange: true,
147
+ // context: `Failed to set the 'fftSize' property on 'AnalyserNode': Value`
148
+ // });
149
+ value = conversions['unrestricted double'](value, {
150
+ context: `Failed to set the 'fftSize' property on 'AnalyserNode': Value`,
151
+ });
152
+
68
153
  try {
69
- super.fftSize = value;
154
+ this[kNapiObj].fftSize = value;
70
155
  } catch (err) {
71
156
  throwSanitizedError(err);
72
157
  }
73
158
  }
74
159
 
160
+ get frequencyBinCount() {
161
+ if (!(this instanceof AnalyserNode)) {
162
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
163
+ }
164
+
165
+ return this[kNapiObj].frequencyBinCount;
166
+ }
167
+
168
+ get minDecibels() {
169
+ if (!(this instanceof AnalyserNode)) {
170
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
171
+ }
172
+
173
+ return this[kNapiObj].minDecibels;
174
+ }
175
+
75
176
  set minDecibels(value) {
177
+ if (!(this instanceof AnalyserNode)) {
178
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
179
+ }
180
+
181
+ value = conversions['double'](value, {
182
+ context: `Failed to set the 'minDecibels' property on 'AnalyserNode': Value`,
183
+ });
184
+
76
185
  try {
77
- super.minDecibels = value;
186
+ this[kNapiObj].minDecibels = value;
78
187
  } catch (err) {
79
188
  throwSanitizedError(err);
80
189
  }
81
190
  }
82
191
 
192
+ get maxDecibels() {
193
+ if (!(this instanceof AnalyserNode)) {
194
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
195
+ }
196
+
197
+ return this[kNapiObj].maxDecibels;
198
+ }
199
+
83
200
  set maxDecibels(value) {
201
+ if (!(this instanceof AnalyserNode)) {
202
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
203
+ }
204
+
205
+ value = conversions['double'](value, {
206
+ context: `Failed to set the 'maxDecibels' property on 'AnalyserNode': Value`,
207
+ });
208
+
84
209
  try {
85
- super.maxDecibels = value;
210
+ this[kNapiObj].maxDecibels = value;
86
211
  } catch (err) {
87
212
  throwSanitizedError(err);
88
213
  }
89
214
  }
90
215
 
216
+ get smoothingTimeConstant() {
217
+ if (!(this instanceof AnalyserNode)) {
218
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
219
+ }
220
+
221
+ return this[kNapiObj].smoothingTimeConstant;
222
+ }
223
+
91
224
  set smoothingTimeConstant(value) {
225
+ if (!(this instanceof AnalyserNode)) {
226
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
227
+ }
228
+
229
+ value = conversions['double'](value, {
230
+ context: `Failed to set the 'smoothingTimeConstant' property on 'AnalyserNode': Value`,
231
+ });
232
+
92
233
  try {
93
- super.smoothingTimeConstant = value;
234
+ this[kNapiObj].smoothingTimeConstant = value;
94
235
  } catch (err) {
95
236
  throwSanitizedError(err);
96
237
  }
97
238
  }
98
239
 
99
- // methods
100
-
101
- getFloatFrequencyData(...args) {
240
+ getFloatFrequencyData(array) {
241
+ if (!(this instanceof AnalyserNode)) {
242
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
243
+ }
244
+
245
+ if (arguments.length < 1) {
246
+ throw new TypeError(`Failed to execute 'getFloatFrequencyData' on 'AnalyserNode': 1 argument required, but only ${arguments.length} present`);
247
+ }
248
+
249
+ if (!(array instanceof Float32Array)) {
250
+ throw new TypeError(`Failed to execute 'getFloatFrequencyData' on 'AnalyserNode': Parameter 1 is not of type 'Float32Array'`);
251
+ }
252
+
102
253
  try {
103
- return super.getFloatFrequencyData(...args);
254
+ return this[kNapiObj].getFloatFrequencyData(array);
104
255
  } catch (err) {
105
256
  throwSanitizedError(err);
106
257
  }
107
258
  }
108
259
 
109
- getByteFrequencyData(...args) {
260
+ getByteFrequencyData(array) {
261
+ if (!(this instanceof AnalyserNode)) {
262
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
263
+ }
264
+
265
+ if (arguments.length < 1) {
266
+ throw new TypeError(`Failed to execute 'getByteFrequencyData' on 'AnalyserNode': 1 argument required, but only ${arguments.length} present`);
267
+ }
268
+
269
+ if (!(array instanceof Uint8Array)) {
270
+ throw new TypeError(`Failed to execute 'getByteFrequencyData' on 'AnalyserNode': Parameter 1 is not of type 'Uint8Array'`);
271
+ }
272
+
110
273
  try {
111
- return super.getByteFrequencyData(...args);
274
+ return this[kNapiObj].getByteFrequencyData(array);
112
275
  } catch (err) {
113
276
  throwSanitizedError(err);
114
277
  }
115
278
  }
116
279
 
117
- getFloatTimeDomainData(...args) {
280
+ getFloatTimeDomainData(array) {
281
+ if (!(this instanceof AnalyserNode)) {
282
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
283
+ }
284
+
285
+ if (arguments.length < 1) {
286
+ throw new TypeError(`Failed to execute 'getFloatTimeDomainData' on 'AnalyserNode': 1 argument required, but only ${arguments.length} present`);
287
+ }
288
+
289
+ if (!(array instanceof Float32Array)) {
290
+ throw new TypeError(`Failed to execute 'getFloatTimeDomainData' on 'AnalyserNode': Parameter 1 is not of type 'Float32Array'`);
291
+ }
292
+
118
293
  try {
119
- return super.getFloatTimeDomainData(...args);
294
+ return this[kNapiObj].getFloatTimeDomainData(array);
120
295
  } catch (err) {
121
296
  throwSanitizedError(err);
122
297
  }
123
298
  }
124
299
 
125
- getByteTimeDomainData(...args) {
300
+ getByteTimeDomainData(array) {
301
+ if (!(this instanceof AnalyserNode)) {
302
+ throw new TypeError('Invalid Invocation: Value of \'this\' must be of type \'AnalyserNode\'');
303
+ }
304
+
305
+ if (arguments.length < 1) {
306
+ throw new TypeError(`Failed to execute 'getByteTimeDomainData' on 'AnalyserNode': 1 argument required, but only ${arguments.length} present`);
307
+ }
308
+
309
+ if (!(array instanceof Uint8Array)) {
310
+ throw new TypeError(`Failed to execute 'getByteTimeDomainData' on 'AnalyserNode': Parameter 1 is not of type 'Uint8Array'`);
311
+ }
312
+
126
313
  try {
127
- return super.getByteTimeDomainData(...args);
314
+ return this[kNapiObj].getByteTimeDomainData(array);
128
315
  } catch (err) {
129
316
  throwSanitizedError(err);
130
317
  }
@@ -132,8 +319,35 @@ module.exports = (NativeAnalyserNode) => {
132
319
 
133
320
  }
134
321
 
135
- return AnalyserNode;
136
- };
322
+ Object.defineProperties(AnalyserNode, {
323
+ length: {
324
+ __proto__: null,
325
+ writable: false,
326
+ enumerable: false,
327
+ configurable: true,
328
+ value: 1,
329
+ },
330
+ });
137
331
 
332
+ Object.defineProperties(AnalyserNode.prototype, {
333
+ [Symbol.toStringTag]: {
334
+ __proto__: null,
335
+ writable: false,
336
+ enumerable: false,
337
+ configurable: true,
338
+ value: 'AnalyserNode',
339
+ },
138
340
 
139
-
341
+ fftSize: kEnumerableProperty,
342
+ frequencyBinCount: kEnumerableProperty,
343
+ minDecibels: kEnumerableProperty,
344
+ maxDecibels: kEnumerableProperty,
345
+ smoothingTimeConstant: kEnumerableProperty,
346
+ getFloatFrequencyData: kEnumerableProperty,
347
+ getByteFrequencyData: kEnumerableProperty,
348
+ getFloatTimeDomainData: kEnumerableProperty,
349
+ getByteTimeDomainData: kEnumerableProperty,
350
+ });
351
+
352
+ return AnalyserNode;
353
+ };