node-web-audio-api 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/README.md +18 -2
- package/monkey-patch.js +14 -55
- package/node-web-audio-api.darwin-arm64.node +0 -0
- package/node-web-audio-api.darwin-x64.node +0 -0
- package/node-web-audio-api.linux-arm-gnueabihf.node +0 -0
- package/node-web-audio-api.linux-x64-gnu.node +0 -0
- package/node-web-audio-api.win32-arm64-msvc.node +0 -0
- package/node-web-audio-api.win32-x64-msvc.node +0 -0
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,13 @@
|
|
|
1
|
+
## v0.11.0
|
|
2
|
+
|
|
3
|
+
- Update upstream crate to v0.32
|
|
4
|
+
- Implement AudioDestination API
|
|
5
|
+
- Make decodeAudioData(arrayBuffer) API compliant (drop `load` helper)
|
|
6
|
+
|
|
7
|
+
## v0.10.0
|
|
8
|
+
|
|
9
|
+
- Update upstream crate to v0.31
|
|
10
|
+
|
|
1
11
|
## v0.9.0
|
|
2
12
|
|
|
3
13
|
- Update upstream crate to v0.30
|
package/README.md
CHANGED
|
@@ -64,11 +64,11 @@ npm run build
|
|
|
64
64
|
node examples/granular-scrub.mjs
|
|
65
65
|
```
|
|
66
66
|
|
|
67
|
+
If
|
|
68
|
+
|
|
67
69
|
## Caveats
|
|
68
70
|
|
|
69
71
|
- The async methods are not trully async for now and are just patched on the JS side. This will evolve once the "trully" async version of the methods are implemented in the upstream library.
|
|
70
|
-
- On Linux systems, the audio backend is currently Alsa, which limits the number of online `AudioContext` to 1. This is subject to change in the future.
|
|
71
|
-
- On Raspberry Pi, the default render quantum size (128) is too small and underruns occurs frequently. To prevent that, if you do not explicitely provide a latency hint in the AudioContext options, the value is automatically set to 'playback' which uses a buffer of 1024 samples (~21ms at 48000Hz). While this is not per se spec compliant, it allows usage of the library in a more user friendly manner. In the future, this might change according to the support of other audio backend.
|
|
72
72
|
- On Raspberry Pi, the `Linux arm gnueabihf` binary provided only works on 32bit OS. We will provide a version for the 64 bit OS in the future.
|
|
73
73
|
|
|
74
74
|
## Supported Platforms
|
|
@@ -83,6 +83,22 @@ node examples/granular-scrub.mjs
|
|
|
83
83
|
| Linux arm gnueabihf (RPi) | ✓ | ✓ |
|
|
84
84
|
|
|
85
85
|
|
|
86
|
+
## Notes for Linux users
|
|
87
|
+
|
|
88
|
+
Using the library on Linux with the ALSA backend might lead to unexpected cranky sound with the default render size (i.e. 128 frames). In such cases, a simple workaround is to pass the `playback` latency hint when creating the audio context, which will increase the render size to 1024 frames:
|
|
89
|
+
|
|
90
|
+
```js
|
|
91
|
+
const audioContext = new AudioContext({ latencyHint: 'playback' });
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
You can pass the `WEB_AUDIO_LATENCY=playback` env variable to all examples to create the audio context with the playback latency hint, e.g.:
|
|
95
|
+
|
|
96
|
+
```sh
|
|
97
|
+
WEB_AUDIO_LATENCY=playback node examples/amplitude-modulation.mjs
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
For real-time and interactive applications where low latency is crucial, you should instead rely on the JACK backend provided by `cpal`. By default the audio context will use that backend if a running JACK server is found.
|
|
101
|
+
|
|
86
102
|
### Manual Build
|
|
87
103
|
|
|
88
104
|
If prebuilt binaries are not shippped for your platform, you will need to:
|
package/monkey-patch.js
CHANGED
|
@@ -29,34 +29,6 @@ let contextIds = {
|
|
|
29
29
|
let enumerateDevicesSync = null;
|
|
30
30
|
|
|
31
31
|
function handleDefaultOptions(options, kind) {
|
|
32
|
-
if (platform === 'linux') {
|
|
33
|
-
const list = enumerateDevicesSync();
|
|
34
|
-
const jackDevice = list.find(device => device.kind === kind && device.label === 'jack');
|
|
35
|
-
|
|
36
|
-
if (jackDevice === undefined) {
|
|
37
|
-
// throw meaningfull error if several contexts are created on linux,
|
|
38
|
-
// because of alsa backend we currently use
|
|
39
|
-
if (contextIds[kind] === 1) {
|
|
40
|
-
throw new Error(`[node-web-audio-api] node-web-audio-api uses alsa as backend, therefore only one context or audio input stream can be safely created`);
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
// force latencyHint to "playback" on RPi if not explicitely defined
|
|
44
|
-
if (arch === 'arm') {
|
|
45
|
-
if (kind === 'audiooutput' && !('latencyHint' in options)) {
|
|
46
|
-
options.latencyHint = 'playback';
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
} else {
|
|
50
|
-
// default to jack if jack source or sink is found
|
|
51
|
-
const deviceKey = kind === 'audioinput' ? 'deviceId' : 'sinkId';
|
|
52
|
-
|
|
53
|
-
if (!(deviceKey in options)) {
|
|
54
|
-
console.log(`> JACK ${kind} device found, use as default`);
|
|
55
|
-
options[deviceKey] = jackDevice.deviceId;
|
|
56
|
-
}
|
|
57
|
-
}
|
|
58
|
-
}
|
|
59
|
-
|
|
60
32
|
// increment contextIds as they are used to keep the process awake
|
|
61
33
|
contextIds[kind] += 1;
|
|
62
34
|
|
|
@@ -118,10 +90,9 @@ function patchAudioContext(nativeBinding) {
|
|
|
118
90
|
}
|
|
119
91
|
|
|
120
92
|
decodeAudioData(audioData) {
|
|
121
|
-
if (!
|
|
122
|
-
throw new Error(
|
|
93
|
+
if (!audioData instanceof ArrayBuffer) {
|
|
94
|
+
throw new Error('Invalid argument, please provide an ArrayBuffer');
|
|
123
95
|
}
|
|
124
|
-
|
|
125
96
|
try {
|
|
126
97
|
const audioBuffer = super.decodeAudioData(audioData);
|
|
127
98
|
return Promise.resolve(audioBuffer);
|
|
@@ -139,22 +110,24 @@ function patchOfflineAudioContext(nativeBinding) {
|
|
|
139
110
|
constructor(...args) {
|
|
140
111
|
// handle initialisation with either an options object or a sequence of parameters
|
|
141
112
|
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocontext-constructor-contextoptions-contextoptions
|
|
142
|
-
if (
|
|
113
|
+
if (isPlainObject(args[0])
|
|
143
114
|
&& 'numberOfChannels' in args[0] && 'length' in args[0] && 'sampleRate' in args[0]
|
|
144
115
|
) {
|
|
145
116
|
const { numberOfChannels, length, sampleRate } = args[0];
|
|
146
117
|
args = [numberOfChannels, length, sampleRate];
|
|
147
118
|
}
|
|
148
119
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
throw new NotSupportedError(`Unsupported value for
|
|
153
|
-
} else if (!
|
|
154
|
-
throw new NotSupportedError(`Unsupported value for
|
|
120
|
+
const [numberOfChannels, length, sampleRate] = args;
|
|
121
|
+
|
|
122
|
+
if (!isPositiveInt(numberOfChannels)) {
|
|
123
|
+
throw new NotSupportedError(`Unsupported value for numberOfChannels: ${numberOfChannels}`);
|
|
124
|
+
} else if (!isPositiveInt(length)) {
|
|
125
|
+
throw new NotSupportedError(`Unsupported value for length: ${length}`);
|
|
126
|
+
} else if (!isPositiveNumber(sampleRate)) {
|
|
127
|
+
throw new NotSupportedError(`Unsupported value for sampleRate: ${sampleRate}`);
|
|
155
128
|
}
|
|
156
129
|
|
|
157
|
-
super(
|
|
130
|
+
super(numberOfChannels, length, sampleRate);
|
|
158
131
|
}
|
|
159
132
|
|
|
160
133
|
// promisify sync APIs
|
|
@@ -170,10 +143,9 @@ function patchOfflineAudioContext(nativeBinding) {
|
|
|
170
143
|
}
|
|
171
144
|
|
|
172
145
|
decodeAudioData(audioData) {
|
|
173
|
-
if (!
|
|
174
|
-
throw new Error(
|
|
146
|
+
if (!audioData instanceof ArrayBuffer) {
|
|
147
|
+
throw new Error('Invalid argument, please provide an ArrayBuffer');
|
|
175
148
|
}
|
|
176
|
-
|
|
177
149
|
try {
|
|
178
150
|
const audioBuffer = super.decodeAudioData(audioData);
|
|
179
151
|
return Promise.resolve(audioBuffer);
|
|
@@ -186,16 +158,6 @@ function patchOfflineAudioContext(nativeBinding) {
|
|
|
186
158
|
return OfflineAudioContext;
|
|
187
159
|
}
|
|
188
160
|
|
|
189
|
-
// dumb method provided to mock an xhr call and mimick browser's API
|
|
190
|
-
// see also `AudioContext.decodeAudioData`
|
|
191
|
-
function load(path) {
|
|
192
|
-
if (!fs.existsSync(path)) {
|
|
193
|
-
throw new Error(`File not found: "${path}"`);
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
return { path };
|
|
197
|
-
};
|
|
198
|
-
|
|
199
161
|
module.exports = function monkeyPatch(nativeBinding) {
|
|
200
162
|
nativeBinding.AudioContext = patchAudioContext(nativeBinding);
|
|
201
163
|
nativeBinding.OfflineAudioContext = patchOfflineAudioContext(nativeBinding);
|
|
@@ -218,8 +180,5 @@ module.exports = function monkeyPatch(nativeBinding) {
|
|
|
218
180
|
return Promise.resolve(stream);
|
|
219
181
|
}
|
|
220
182
|
|
|
221
|
-
// utils
|
|
222
|
-
nativeBinding.load = load;
|
|
223
|
-
|
|
224
183
|
return nativeBinding;
|
|
225
184
|
}
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "node-web-audio-api",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.11.0",
|
|
4
4
|
"author": "Benjamin Matuszewski",
|
|
5
5
|
"description": "Node.js bindings for web-audio-api-rs using napi-rs",
|
|
6
6
|
"exports": {
|
|
@@ -32,7 +32,7 @@
|
|
|
32
32
|
"scripts": {
|
|
33
33
|
"artifacts": "napi artifacts",
|
|
34
34
|
"build": "npm run generate && napi build --platform --release",
|
|
35
|
-
"build:jack": "npm run generate && napi build --platform --features
|
|
35
|
+
"build:jack": "npm run generate && napi build --platform --features jack --release",
|
|
36
36
|
"build:debug": "npm run generate && napi build --platform",
|
|
37
37
|
"check": "cargo fmt && cargo clippy",
|
|
38
38
|
"generate": "node generator/index.mjs && cargo fmt",
|