node-web-audio-api 0.7.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,8 +1,12 @@
1
+ ## v0.8.0
2
+
3
+ - Implement MediaDevices enumerateDeviaces and getUserMedia
4
+ - Use jack as default output if exists on linux
5
+
1
6
  ## v0.7.0
2
7
 
3
8
  - Improve readme & doc
4
9
  - Fix AudioParam method names
5
- -
6
10
 
7
11
  ## v0.6.0 - Feb 2023
8
12
 
package/README.md CHANGED
@@ -4,10 +4,10 @@
4
4
 
5
5
  Node.js bindings for the Rust implementation of the Web Audio API Specification
6
6
 
7
- - see [`orottier/web-audio-api-rs`](https://github.com/orottier/web-audio-api-rs/) for the "real" audio guts
8
- - use [`napi-rs`](https://github.com/napi-rs/napi-rs/) for the Node.js bindigs
7
+ The goal of this library is to provide an implementation that is both efficient and _exactly_ matches the browsers' API.
9
8
 
10
- The goal of the library is to provide an implementation that is both efficient and _exactly_ matches the browsers' API.
9
+ - see [`orottier/web-audio-api-rs`](https://github.com/orottier/web-audio-api-rs/) for the "real" audio guts
10
+ - use [`napi-rs`](https://github.com/napi-rs/napi-rs/) for the Node.js bindings
11
11
 
12
12
  ## Install
13
13
 
@@ -96,8 +96,9 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
96
96
  2. Install and build from github
97
97
 
98
98
  ```sh
99
- npm install --save git+https://github.com/b-ma/node-web-audio-api.git
99
+ npm install --save git+https://github.com/ircam-ismm/node-web-audio-api.git
100
100
  cd node_modules/node-web-audio-api
101
+ npm install
101
102
  npm run build
102
103
  ```
103
104
 
package/index.cjs CHANGED
@@ -83,33 +83,8 @@ if (!nativeBinding) {
83
83
  throw new Error(`Failed to load native binding for OS: ${platform}, architecture: ${arch}`);
84
84
  }
85
85
 
86
- const {
87
- patchAudioContext,
88
- patchOfflineAudioContext,
89
- load,
90
- } = require('./monkey-patch.js');
91
-
92
- nativeBinding.AudioContext = patchAudioContext(nativeBinding.AudioContext);
93
- nativeBinding.OfflineAudioContext = patchOfflineAudioContext(nativeBinding.OfflineAudioContext);
94
- nativeBinding.load = load;
95
-
96
- // ------------------------------------------------------------------
97
- // monkey patch proto media devices API
98
- // @todo - review
99
- // ------------------------------------------------------------------
100
- class MediaStream extends nativeBinding.Microphone {};
101
- // const Microphone = nativeBinding.Microphone;
102
- nativeBinding.Microphone = null;
103
-
104
- nativeBinding.mediaDevices = {}
105
- nativeBinding.mediaDevices.getUserMedia = function getUserMedia(options) {
106
- if (options && options.audio === true) {
107
- const mic = new MediaStream();
108
- return Promise.resolve(mic);
109
- } else {
110
- throw new NotSupportedError(`Only { audio: true } is currently supported`);
111
- }
112
- }
86
+ const monkeyPatch = require('./monkey-patch.js');
87
+ nativeBinding = monkeyPatch(nativeBinding);
113
88
 
114
89
  module.exports = nativeBinding;
115
90
 
package/monkey-patch.js CHANGED
@@ -20,32 +20,58 @@ class NotSupportedError extends Error {
20
20
  }
21
21
 
22
22
  const { platform, arch } = process;
23
- let contextId = 0;
24
23
 
25
- function patchAudioContext(NativeAudioContext) {
26
- class AudioContext extends NativeAudioContext {
27
- constructor(options = {}) {
24
+ let contextIds = {
25
+ audioinput: 0,
26
+ audiooutput: 0,
27
+ };
28
28
 
29
- // special handling of options on linux, these are not spec compliant but are
30
- // ment to be more user-friendly than what we have now (is subject to change)
31
- if (platform === 'linux') {
32
- // throw meaningfull error if several contexts are created on linux,
33
- // because of alsa backend we currently use
34
- if (contextId === 1) {
35
- throw new Error(`[node-web-audio-api] node-web-audio-api currently uses alsa as backend, therefore only one context can be safely created`);
36
- }
29
+ let enumerateDevicesSync = null;
37
30
 
38
- // fallback latencyHint to "playback" on RPi if not explicitely defined
39
- if (arch === 'arm') {
40
- if (!('latencyHint' in options)) {
41
- options.latencyHint = 'playback';
42
- }
31
+ function handleDefaultOptions(options, kind) {
32
+ if (platform === 'linux') {
33
+ const list = enumerateDevicesSync();
34
+ const jackDevice = list.find(device => device.kind === kind && device.label === 'jack');
35
+
36
+ if (jackDevice === undefined) {
37
+ // throw meaningfull error if several contexts are created on linux,
38
+ // because of alsa backend we currently use
39
+ if (contextIds[kind] === 1) {
40
+ throw new Error(`[node-web-audio-api] node-web-audio-api uses alsa as backend, therefore only one context or audio input stream can be safely created`);
41
+ }
42
+
43
+ // force latencyHint to "playback" on RPi if not explicitely defined
44
+ if (arch === 'arm') {
45
+ if (kind === 'audiooutput' && !('latencyHint' in options)) {
46
+ options.latencyHint = 'playback';
43
47
  }
44
48
  }
49
+ } else {
50
+ // default to jack if jack source or sink is found
51
+ const deviceKey = kind === 'audioinput' ? 'deviceId' : 'sinkId';
45
52
 
53
+ if (!(deviceKey in options)) {
54
+ console.log(`> JACK ${kind} device found, use as default`);
55
+ options[deviceKey] = jackDevice.deviceId;
56
+ }
57
+ }
58
+ }
59
+
60
+ // increment contextIds as they are used to keep the process awake
61
+ contextIds[kind] += 1;
62
+
63
+ return options;
64
+ }
65
+
66
+ function patchAudioContext(nativeBinding) {
67
+ class AudioContext extends nativeBinding.AudioContext {
68
+ constructor(options = {}) {
69
+ // special handling of options on linux, these are not spec compliant but are
70
+ // ment to be more user-friendly than what we have now (is subject to change)
71
+ options = handleDefaultOptions(options, 'audiooutput');
46
72
  super(options);
47
73
  // prevent garbage collection
48
- const processId = `__AudioContext_${contextId}`;
74
+ const processId = `__AudioContext_${contextIds['audiooutput']}`;
49
75
  process[processId] = this;
50
76
 
51
77
  Object.defineProperty(this, '__processId', {
@@ -55,7 +81,6 @@ function patchAudioContext(NativeAudioContext) {
55
81
  configurable: false,
56
82
  });
57
83
 
58
- contextId += 1;
59
84
  // keep process awake
60
85
  const keepAwakeId = setInterval(() => {}, 10000);
61
86
  Object.defineProperty(this, '__keepAwakeId', {
@@ -83,6 +108,15 @@ function patchAudioContext(NativeAudioContext) {
83
108
  return Promise.resolve(super.close());
84
109
  }
85
110
 
111
+ setSinkId(sinkId) {
112
+ try {
113
+ super.setSinkId(sinkId);
114
+ Promise.resolve(undefined);
115
+ } catch (err) {
116
+ Promise.reject(err);
117
+ }
118
+ }
119
+
86
120
  decodeAudioData(audioData) {
87
121
  if (!isPlainObject(audioData) || !('path' in audioData)) {
88
122
  throw new Error(`Invalid argument, please consider using the load helper`);
@@ -100,8 +134,8 @@ function patchAudioContext(NativeAudioContext) {
100
134
  return AudioContext;
101
135
  }
102
136
 
103
- function patchOfflineAudioContext(NativeOfflineAudioContext) {
104
- class OfflineAudioContext extends NativeOfflineAudioContext {
137
+ function patchOfflineAudioContext(nativeBinding) {
138
+ class OfflineAudioContext extends nativeBinding.OfflineAudioContext {
105
139
  constructor(...args) {
106
140
  // handle initialisation with either an options object or a sequence of parameters
107
141
  // https://webaudio.github.io/web-audio-api/#dom-offlineaudiocontext-constructor-contextoptions-contextoptions
@@ -152,15 +186,40 @@ function patchOfflineAudioContext(NativeOfflineAudioContext) {
152
186
  return OfflineAudioContext;
153
187
  }
154
188
 
155
- module.exports.patchAudioContext = patchAudioContext;
156
- module.exports.patchOfflineAudioContext = patchOfflineAudioContext;
157
-
158
189
  // dumb method provided to mock an xhr call and mimick browser's API
159
190
  // see also `AudioContext.decodeAudioData`
160
- module.exports.load = function(path) {
191
+ function load(path) {
161
192
  if (!fs.existsSync(path)) {
162
193
  throw new Error(`File not found: "${path}"`);
163
194
  }
164
195
 
165
196
  return { path };
166
197
  };
198
+
199
+ module.exports = function monkeyPatch(nativeBinding) {
200
+ nativeBinding.AudioContext = patchAudioContext(nativeBinding);
201
+ nativeBinding.OfflineAudioContext = patchOfflineAudioContext(nativeBinding);
202
+
203
+ // Promisify MediaDevices API
204
+ enumerateDevicesSync = nativeBinding.mediaDevices.enumerateDevices;
205
+ nativeBinding.mediaDevices.enumerateDevices = async function enumerateDevices(options) {
206
+ const list = enumerateDevicesSync();
207
+ return Promise.resolve(list);
208
+ }
209
+
210
+ const getUserMediaSync = nativeBinding.mediaDevices.getUserMedia;
211
+ nativeBinding.mediaDevices.getUserMedia = async function getUserMedia(options) {
212
+ if (options === undefined) {
213
+ throw new TypeError("Failed to execute 'getUserMedia' on 'MediaDevices': audio must be requested")
214
+ }
215
+
216
+ options = handleDefaultOptions(options, 'audioinput');
217
+ const stream = getUserMediaSync(options);
218
+ return Promise.resolve(stream);
219
+ }
220
+
221
+ // utils
222
+ nativeBinding.load = load;
223
+
224
+ return nativeBinding;
225
+ }
Binary file
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "node-web-audio-api",
3
- "version": "0.7.0",
3
+ "version": "0.8.0",
4
4
  "author": "Benjamin Matuszewski",
5
5
  "description": "Node.js bindings for web-audio-api-rs using napi-rs",
6
6
  "exports": {
@@ -12,6 +12,10 @@
12
12
  "keywords": [
13
13
  "audio",
14
14
  "web audio api",
15
+ "webaudio",
16
+ "sound",
17
+ "music",
18
+ "dsp",
15
19
  "rust",
16
20
  "n-api"
17
21
  ],
@@ -28,22 +32,26 @@
28
32
  "scripts": {
29
33
  "artifacts": "napi artifacts",
30
34
  "build": "npm run generate && napi build --platform --release",
35
+ "build:jack": "npm run generate && napi build --platform --features 'web-audio-api/cpal-jack' --release",
31
36
  "build:debug": "npm run generate && napi build --platform",
32
37
  "check": "cargo fmt && cargo clippy",
33
38
  "generate": "node generator/index.mjs && cargo fmt",
34
39
  "lint": "eslint monkey-patch.js index.cjs index.mjs && eslint examples/*.mjs",
35
40
  "preversion": "yarn install && npm run generate",
36
- "postversion": "cargo bump $npm_package_version && git commit -am \"v$npm_package_version\" && node bin/check-changelog.mjs"
41
+ "postversion": "cargo bump $npm_package_version && git commit -am \"v$npm_package_version\" && node bin/check-changelog.mjs",
42
+ "test": "mocha"
37
43
  },
38
44
  "devDependencies": {
39
45
  "@ircam/eslint-config": "^1.3.0",
40
46
  "@ircam/sc-gettime": "^1.0.0",
41
47
  "@sindresorhus/slugify": "^2.1.1",
42
48
  "camelcase": "^7.0.1",
49
+ "chai": "^4.3.7",
43
50
  "chalk": "^5.2.0",
44
51
  "cli-table": "^0.3.11",
45
52
  "dotenv": "^16.0.3",
46
53
  "eslint": "^8.32.0",
54
+ "mocha": "^10.2.0",
47
55
  "node-ssh": "^13.0.0",
48
56
  "octokit": "^2.0.11",
49
57
  "ping": "^0.4.2",
package/simple-test.cjs CHANGED
@@ -1,7 +1,6 @@
1
- const { AudioContext } = require('./index.cjs');
1
+ const { AudioContext, mediaDevices } = require('./index.cjs');
2
2
 
3
3
  const audioContext = new AudioContext();
4
- process.audioContext = audioContext;
5
4
 
6
5
  setInterval(() => {
7
6
  const now = audioContext.currentTime;
package/simple-test.mjs CHANGED
@@ -1,4 +1,4 @@
1
- import { AudioContext } from './index.mjs';
1
+ import { AudioContext, mediaDevices } from './index.mjs';
2
2
 
3
3
  const audioContext = new AudioContext();
4
4
 
@@ -0,0 +1,60 @@
1
+ import { assert } from 'chai';
2
+ import { AudioBuffer, AudioContext } from '../index.mjs';
3
+
4
+ describe('# AudioBuffer', () => {
5
+ let audioContext;
6
+
7
+ before(() => {
8
+ audioContext = new AudioContext();
9
+ });
10
+
11
+ after(() => {
12
+ audioContext.close();
13
+ });
14
+
15
+ describe(`## audioContext.createBuffer`, () => {
16
+ it('should properly create audio buffer', () => {
17
+ const audioBuffer = audioContext.createBuffer(1, 100, audioContext.sampleRate);
18
+
19
+ assert.equal(audioBuffer.numberOfChannels, 1);
20
+ assert.equal(audioBuffer.length, 100);
21
+ assert.equal(audioBuffer.sampleRate, audioContext.sampleRate);
22
+ });
23
+
24
+ it('should properly fail if missing argument', () => {
25
+ assert.throws(() => {
26
+ const audioBuffer = audioContext.createBuffer(1, 100);
27
+ });
28
+ });
29
+ });
30
+
31
+ describe(`## new AudioBuffer(options)`, () => {
32
+ it('should properly create audio buffer', () => {
33
+ const audioBuffer = new AudioBuffer({
34
+ length: 100,
35
+ sampleRate: audioContext.sampleRate,
36
+ });
37
+
38
+ assert.equal(audioBuffer.numberOfChannels, 1);
39
+ assert.equal(audioBuffer.length, 100);
40
+ assert.equal(audioBuffer.sampleRate, audioContext.sampleRate);
41
+ });
42
+
43
+ it('should properly fail if missing argument', () => {
44
+ assert.throws(() => {
45
+ const audioBuffer = new AudioBuffer({ length: 100 });
46
+ });
47
+ });
48
+
49
+ it(`should have type error`, () => {
50
+ try {
51
+ new AudioBuffer(Date, 42);
52
+ } catch (err) {
53
+ console.log(err.type);
54
+ console.log(err.name);
55
+ console.log(err.message);
56
+ assert.fail('should be TypeError');
57
+ }
58
+ });
59
+ });
60
+ });
@@ -0,0 +1,58 @@
1
+ import { assert } from 'chai';
2
+
3
+ import { mediaDevices } from '../index.mjs';
4
+
5
+ describe('# mediaDevices.getUserMedia(options)', () => {
6
+ it('should fail if no argument given', async () => {
7
+ let failed = false;
8
+ try {
9
+ await mediaDevices.getUserMedia();
10
+ } catch (err) {
11
+ console.log(err.message);
12
+ failed = true;
13
+ }
14
+
15
+ if (!failed) { assert.fail(); }
16
+ });
17
+
18
+ // @todo - clean error message
19
+ it('should fail if argument is not an object', async () => {
20
+ let failed = false;
21
+ try {
22
+ await mediaDevices.getUserMedia(true);
23
+ } catch (err) {
24
+ console.log(err.message);
25
+ failed = true;
26
+ }
27
+
28
+ if (!failed) { assert.fail(); }
29
+ });
30
+
31
+ it('should fail if options.video', async () => {
32
+ let failed = false;
33
+ try {
34
+ await mediaDevices.getUserMedia({ video: true });
35
+ } catch (err) {
36
+ console.log(err.message);
37
+ failed = true;
38
+ }
39
+
40
+ if (!failed) { assert.fail(); }
41
+ });
42
+
43
+ it.only('should not fail if options.audio = true', async () => {
44
+ let failed = false;
45
+
46
+ try {
47
+ const stream = await mediaDevices.getUserMedia({ audio: true });
48
+ // console.log(stream instanceof mediaDevices.MediaStream);
49
+ } catch (err) {
50
+ console.log(err);
51
+ failed = true;
52
+ }
53
+
54
+ console.log(failed);
55
+
56
+ if (failed) { assert.fail('should not have failed'); }
57
+ });
58
+ });
@@ -1,70 +0,0 @@
1
- import { after, before, describe, it } from 'node:test';
2
- import assert from 'node:assert';
3
-
4
- import { AudioBuffer, AudioContext } from '../index.mjs';
5
-
6
- describe('AudioBuffer', () => {
7
- let audioContext;
8
-
9
- before(() => {
10
- console.log('before is run')
11
- audioContext = new AudioContext();
12
- });
13
-
14
- after(() => {
15
- audioContext.close();
16
- });
17
-
18
- describe(`audioContext.createBuffer`, () => {
19
- // it('should properly create audio buffer', () => {
20
- // const audioBuffer = audioContext.createBuffer(1, 100, audioContext.sampleRate);
21
-
22
- // assert.equal(audioBuffer.numberOfChannels, 1);
23
- // assert.equal(audioBuffer.length, 100);
24
- // assert.equal(audioBuffer.sampleRate, audioContext.sampleRate);
25
- // });
26
-
27
- // it('should properly fail if missing argument', () => {
28
- // assert.throws(() => {
29
- // const audioBuffer = audioContext.createBuffer(1, 100);
30
- // }, {
31
- // name: 'Error', // should be 'NotSupportedError'
32
- // message: 'AudioBuffer: Invalid options, sampleRate is required',
33
- // });
34
- // });
35
- });
36
-
37
- describe(`new AudioBuffer(options)`, () => {
38
- // it('should properly create audio buffer', () => {
39
- // const audioBuffer = new AudioBuffer({
40
- // length: 100,
41
- // sampleRate: audioContext.sampleRate,
42
- // });
43
-
44
- // assert.equal(audioBuffer.numberOfChannels, 1);
45
- // assert.equal(audioBuffer.length, 100);
46
- // assert.equal(audioBuffer.sampleRate, audioContext.sampleRate);
47
- // });
48
-
49
- // it('should properly fail if missing argument', () => {
50
- // assert.throws(() => {
51
- // const audioBuffer = new AudioBuffer({
52
- // length: 100,
53
- // });
54
- // }, {
55
- // name: 'Error', // should be 'NotSupportedError'
56
- // message: 'AudioBuffer: Invalid options, sampleRate is required',
57
- // });
58
- // });
59
-
60
- it(`should have type error`, () => {
61
- try {
62
- new AudioBuffer(Date, 42);
63
- } catch (err) {
64
- console.log(err.type)
65
- console.log(err.name)
66
- console.log(err.message)
67
- }
68
- });
69
- });
70
- });