@juandinella/audio-bands 0.1.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +136 -89
- package/dist/chunk-UAMH5Y33.js +344 -0
- package/dist/chunk-UAMH5Y33.js.map +1 -0
- package/dist/core-entry.cjs +371 -0
- package/dist/core-entry.cjs.map +1 -0
- package/dist/core-entry.d.cts +45 -0
- package/dist/core-entry.d.ts +45 -0
- package/dist/core-entry.js +9 -0
- package/dist/core-entry.js.map +1 -0
- package/dist/index.cjs +262 -126
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +2 -63
- package/dist/index.d.ts +2 -63
- package/dist/index.js +5 -203
- package/dist/index.js.map +1 -1
- package/dist/react-entry.cjs +464 -0
- package/dist/react-entry.cjs.map +1 -0
- package/dist/react-entry.d.cts +26 -0
- package/dist/react-entry.d.ts +26 -0
- package/dist/react-entry.js +100 -0
- package/dist/react-entry.js.map +1 -0
- package/dist/types-CiYwsfgy.d.cts +55 -0
- package/dist/types-CiYwsfgy.d.ts +55 -0
- package/package.json +23 -4
package/README.md
CHANGED
|
@@ -1,18 +1,20 @@
|
|
|
1
1
|
# audio-bands
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
[](https://www.npmjs.com/package/@juandinella/audio-bands)
|
|
4
|
+
|
|
5
|
+
**Demo**: [audio-bands.juandinella.com](https://audio-bands.juandinella.com)
|
|
6
|
+
|
|
7
|
+
Headless audio analysis for the browser. Get normalized `bass`, `mid`, `high`, custom named bands, raw FFT bins, or mic waveform data without shipping a renderer.
|
|
4
8
|
|
|
5
9
|
```ts
|
|
6
10
|
const { bass, mid, high } = audio.getBands();
|
|
7
|
-
|
|
8
|
-
|
|
11
|
+
const custom = audio.getCustomBands();
|
|
9
12
|
const fft = audio.getFftData();
|
|
10
|
-
// Uint8Array(128) — raw frequency bins, 0–255 each
|
|
11
13
|
```
|
|
12
14
|
|
|
13
15
|
## Why
|
|
14
16
|
|
|
15
|
-
|
|
17
|
+
Most audio libraries either only play audio or immediately draw a canvas for you. This one stays lower level: it gives you usable analysis data and lets you decide how to render it.
|
|
16
18
|
|
|
17
19
|
## Install
|
|
18
20
|
|
|
@@ -20,80 +22,75 @@ Every audio visualization library either handles only playback (no analysis) or
|
|
|
20
22
|
npm install @juandinella/audio-bands
|
|
21
23
|
```
|
|
22
24
|
|
|
23
|
-
|
|
25
|
+
### Entry points
|
|
26
|
+
|
|
27
|
+
- `@juandinella/audio-bands`: main framework-agnostic export
|
|
28
|
+
- `@juandinella/audio-bands/core`: explicit core-only entry
|
|
29
|
+
- `@juandinella/audio-bands/react`: React hook
|
|
30
|
+
|
|
31
|
+
If you use the React hook, install `react` as well.
|
|
24
32
|
|
|
25
33
|
## Usage
|
|
26
34
|
|
|
27
35
|
### Vanilla JS
|
|
28
36
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
```js
|
|
37
|
+
```ts
|
|
32
38
|
import { AudioBands } from '@juandinella/audio-bands';
|
|
33
39
|
|
|
34
40
|
const audio = new AudioBands({
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
41
|
+
music: {
|
|
42
|
+
fftSize: 512,
|
|
43
|
+
smoothingTimeConstant: 0.7,
|
|
44
|
+
},
|
|
45
|
+
customBands: {
|
|
46
|
+
presence: { from: 0.25, to: 0.5 },
|
|
47
|
+
air: { from: 0.5, to: 1 },
|
|
48
|
+
},
|
|
49
|
+
onLoadError: (error) => console.error('track error', error),
|
|
50
|
+
onMicError: (error) => console.error('mic error', error),
|
|
40
51
|
});
|
|
41
52
|
|
|
42
53
|
await audio.load('/track.mp3');
|
|
43
54
|
|
|
44
|
-
// Call inside your animation loop
|
|
45
55
|
function loop() {
|
|
46
56
|
const { bass, mid, high, overall } = audio.getBands();
|
|
47
|
-
|
|
57
|
+
const custom = audio.getCustomBands();
|
|
58
|
+
const fft = audio.getFftData();
|
|
48
59
|
|
|
49
|
-
const fft = audio.getFftData(); // raw bins for spectrum visualizations
|
|
50
60
|
requestAnimationFrame(loop);
|
|
51
61
|
}
|
|
52
|
-
requestAnimationFrame(loop);
|
|
53
62
|
|
|
54
|
-
|
|
55
|
-
audio.destroy();
|
|
63
|
+
requestAnimationFrame(loop);
|
|
56
64
|
```
|
|
57
65
|
|
|
58
66
|
### React hook
|
|
59
67
|
|
|
60
68
|
```tsx
|
|
61
|
-
import { useAudioBands } from '@juandinella/audio-bands';
|
|
62
|
-
import { useEffect, useRef } from 'react';
|
|
69
|
+
import { useAudioBands } from '@juandinella/audio-bands/react';
|
|
63
70
|
|
|
64
71
|
function Visualizer() {
|
|
65
|
-
const {
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
|
81
|
-
ctx.beginPath();
|
|
82
|
-
ctx.arc(canvas.width / 2, canvas.height / 2, 20 + bass * 80, 0, Math.PI * 2);
|
|
83
|
-
ctx.fill();
|
|
84
|
-
|
|
85
|
-
raf = requestAnimationFrame(loop);
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
raf = requestAnimationFrame(loop);
|
|
89
|
-
return () => cancelAnimationFrame(raf);
|
|
90
|
-
}, [getBands]);
|
|
72
|
+
const {
|
|
73
|
+
isPlaying,
|
|
74
|
+
hasTrack,
|
|
75
|
+
loadError,
|
|
76
|
+
micError,
|
|
77
|
+
loadTrack,
|
|
78
|
+
togglePlayPause,
|
|
79
|
+
toggleMic,
|
|
80
|
+
getBands,
|
|
81
|
+
getCustomBands,
|
|
82
|
+
} = useAudioBands({
|
|
83
|
+
customBands: {
|
|
84
|
+
presence: { from: 0.25, to: 0.5 },
|
|
85
|
+
},
|
|
86
|
+
});
|
|
91
87
|
|
|
92
88
|
return (
|
|
93
89
|
<>
|
|
94
|
-
<
|
|
90
|
+
<button onClick={() => loadTrack('/track.mp3')}>load</button>
|
|
95
91
|
<button onClick={togglePlayPause}>{isPlaying ? 'Pause' : 'Play'}</button>
|
|
96
92
|
<button onClick={toggleMic}>Toggle mic</button>
|
|
93
|
+
<pre>{JSON.stringify({ hasTrack, loadError, micError, ...getBands(), ...getCustomBands() }, null, 2)}</pre>
|
|
97
94
|
</>
|
|
98
95
|
);
|
|
99
96
|
}
|
|
@@ -102,85 +99,135 @@ function Visualizer() {
|
|
|
102
99
|
### Mic input
|
|
103
100
|
|
|
104
101
|
```ts
|
|
105
|
-
// Enable mic — browser will ask for permission
|
|
106
102
|
await audio.enableMic();
|
|
107
103
|
|
|
108
|
-
|
|
109
|
-
const
|
|
104
|
+
const micBands = audio.getBands('mic');
|
|
105
|
+
const micCustomBands = audio.getCustomBands('mic');
|
|
106
|
+
const waveform = audio.getWaveform();
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## When To Use Bands Vs FFT
|
|
110
110
|
|
|
111
|
-
|
|
112
|
-
const waveform = audio.getWaveform(); // Uint8Array | null
|
|
111
|
+
Use `getBands()` when you want stable, simple control signals:
|
|
113
112
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
113
|
+
- pulsing a blob with low-end energy
|
|
114
|
+
- scaling UI based on overall intensity
|
|
115
|
+
- animating typography or CSS variables
|
|
116
|
+
- driving scenes where three broad zones are enough
|
|
117
|
+
|
|
118
|
+
Use `getCustomBands()` when the default bass/mid/high split is too coarse, but you still want named, high-level buckets:
|
|
119
|
+
|
|
120
|
+
- separate `presence`, `air`, or `sub`
|
|
121
|
+
- tune bands to your own design system or animation logic
|
|
122
|
+
- keep your render code semantic instead of index-based
|
|
123
|
+
|
|
124
|
+
Use `getFftData()` when you need bin-level detail:
|
|
125
|
+
|
|
126
|
+
- bar visualizers
|
|
127
|
+
- line spectrums
|
|
128
|
+
- log interpolation
|
|
129
|
+
- any renderer that maps directly over bins
|
|
130
|
+
|
|
131
|
+
Rule of thumb:
|
|
132
|
+
|
|
133
|
+
- `getBands()` for product UI
|
|
134
|
+
- `getCustomBands()` for art direction
|
|
135
|
+
- `getFftData()` for visualizers
|
|
117
136
|
|
|
118
137
|
## API
|
|
119
138
|
|
|
120
|
-
### `AudioBands`
|
|
139
|
+
### `AudioBands`
|
|
121
140
|
|
|
122
141
|
```ts
|
|
123
|
-
new AudioBands(
|
|
142
|
+
new AudioBands(options?: AudioBandsOptions)
|
|
124
143
|
```
|
|
125
144
|
|
|
126
|
-
|
|
127
|
-
|---|---|
|
|
128
|
-
| `load(url)` | Load and play an audio file. Resolves when playback starts. |
|
|
129
|
-
| `togglePlayPause()` | Toggle playback. |
|
|
130
|
-
| `enableMic()` | Request mic access and start analysis. |
|
|
131
|
-
| `disableMic()` | Stop mic stream and clean up. |
|
|
132
|
-
| `getBands(source?)` | Returns `Bands` for `'music'` (default) or `'mic'`. Call inside RAF. |
|
|
133
|
-
| `getFftData(source?)` | Returns raw `Uint8Array` of frequency bins (0–255) for `'music'` or `'mic'`. Call inside RAF. |
|
|
134
|
-
| `getWaveform()` | Returns raw time-domain `Uint8Array` from mic. Call inside RAF. |
|
|
135
|
-
| `destroy()` | Stop playback, release mic, close AudioContext. |
|
|
145
|
+
#### Methods
|
|
136
146
|
|
|
137
|
-
|
|
147
|
+
| Method | Description |
|
|
148
|
+
| ----------------------- | ----------- |
|
|
149
|
+
| `load(url)` | Load and play a track. Rejects with `AudioBandsError` on failure. |
|
|
150
|
+
| `togglePlayPause()` | Toggle the current track. |
|
|
151
|
+
| `enableMic()` | Request microphone access and start mic analysis. Rejects with `AudioBandsError` on failure. |
|
|
152
|
+
| `disableMic()` | Stop mic input and clean up the stream. |
|
|
153
|
+
| `getBands(source?)` | Returns normalized `{ bass, mid, high, overall }`. |
|
|
154
|
+
| `getCustomBands(source?)` | Returns normalized values for configured custom bands. |
|
|
155
|
+
| `getFftData(source?)` | Returns raw `Uint8Array` frequency bins. |
|
|
156
|
+
| `getWaveform()` | Returns raw mic time-domain data. |
|
|
157
|
+
| `getState()` | Returns the current playback/mic/error state. |
|
|
158
|
+
| `destroy()` | Stop playback, release the mic and close the `AudioContext`. |
|
|
138
159
|
|
|
139
|
-
|
|
160
|
+
### `useAudioBands()`
|
|
140
161
|
|
|
141
162
|
```ts
|
|
142
163
|
const {
|
|
143
164
|
isPlaying,
|
|
144
165
|
micActive,
|
|
166
|
+
hasTrack,
|
|
145
167
|
audioError,
|
|
168
|
+
loadError,
|
|
169
|
+
micError,
|
|
170
|
+
state,
|
|
146
171
|
loadTrack,
|
|
147
172
|
togglePlayPause,
|
|
148
173
|
toggleMic,
|
|
149
174
|
getBands,
|
|
175
|
+
getCustomBands,
|
|
150
176
|
getFftData,
|
|
151
177
|
getWaveform,
|
|
152
|
-
} = useAudioBands();
|
|
178
|
+
} = useAudioBands(options);
|
|
153
179
|
```
|
|
154
180
|
|
|
155
|
-
### `
|
|
181
|
+
### `AudioBandsOptions`
|
|
156
182
|
|
|
157
183
|
```ts
|
|
158
|
-
type
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
184
|
+
type AudioBandsOptions = {
|
|
185
|
+
music?: {
|
|
186
|
+
fftSize?: number;
|
|
187
|
+
smoothingTimeConstant?: number;
|
|
188
|
+
};
|
|
189
|
+
mic?: {
|
|
190
|
+
fftSize?: number;
|
|
191
|
+
smoothingTimeConstant?: number;
|
|
192
|
+
};
|
|
193
|
+
bandRanges?: {
|
|
194
|
+
bass?: { from: number; to: number };
|
|
195
|
+
mid?: { from: number; to: number };
|
|
196
|
+
high?: { from: number; to: number };
|
|
197
|
+
};
|
|
198
|
+
customBands?: Record<string, { from: number; to: number }>;
|
|
199
|
+
onError?: (error: AudioBandsError) => void;
|
|
200
|
+
onLoadError?: (error: AudioBandsError) => void;
|
|
201
|
+
onMicError?: (error: AudioBandsError) => void;
|
|
202
|
+
onStateChange?: (state: AudioBandsState) => void;
|
|
203
|
+
onPlay?: () => void;
|
|
204
|
+
onPause?: () => void;
|
|
205
|
+
onMicStart?: () => void;
|
|
206
|
+
onMicStop?: () => void;
|
|
163
207
|
};
|
|
164
208
|
```
|
|
165
209
|
|
|
166
|
-
### `
|
|
210
|
+
### `AudioBandsState`
|
|
167
211
|
|
|
168
212
|
```ts
|
|
169
|
-
type
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
213
|
+
type AudioBandsState = {
|
|
214
|
+
isPlaying: boolean;
|
|
215
|
+
micActive: boolean;
|
|
216
|
+
hasTrack: boolean; // a track source is assigned, even if playback later fails
|
|
217
|
+
loadError: AudioBandsError | null;
|
|
218
|
+
micError: AudioBandsError | null;
|
|
175
219
|
};
|
|
176
220
|
```
|
|
177
221
|
|
|
178
222
|
## Notes
|
|
179
223
|
|
|
180
|
-
- `AudioContext` is created lazily on the first call to `load()` or `enableMic()`.
|
|
181
|
-
-
|
|
182
|
-
-
|
|
183
|
-
- `
|
|
224
|
+
- `AudioContext` is created lazily on the first call to `load()` or `enableMic()`.
|
|
225
|
+
- `hasTrack` means a track source is currently assigned to the instance. It can still be `true` if `play()` fails due to autoplay policy or another playback error.
|
|
226
|
+
- The mic analyser is not connected to `AudioContext.destination`, so it will not feed back into the speakers.
|
|
227
|
+
- `getBands()`, `getCustomBands()`, `getFftData()`, and `getWaveform()` read live data. Call them inside `requestAnimationFrame`, not from React state updates.
|
|
228
|
+
- `getFftData()` returns the same underlying buffer on each call. Copy it if you need frame-to-frame comparisons.
|
|
229
|
+
- `fftSize` must be a power of two between `32` and `32768`.
|
|
230
|
+
- Band ranges are normalized from `0` to `1`, where `0` is the start of the analyser spectrum and `1` is the end.
|
|
184
231
|
|
|
185
232
|
## License
|
|
186
233
|
|
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
// src/errors.ts
|
|
2
|
+
var AudioBandsError = class extends Error {
|
|
3
|
+
constructor(kind, code, message, cause) {
|
|
4
|
+
super(message);
|
|
5
|
+
this.name = "AudioBandsError";
|
|
6
|
+
this.kind = kind;
|
|
7
|
+
this.code = code;
|
|
8
|
+
this.cause = cause;
|
|
9
|
+
}
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
// src/core.ts
|
|
13
|
+
var DEFAULT_MUSIC_ANALYSER = {
|
|
14
|
+
fftSize: 256,
|
|
15
|
+
smoothingTimeConstant: 0.85
|
|
16
|
+
};
|
|
17
|
+
var DEFAULT_MIC_ANALYSER = {
|
|
18
|
+
fftSize: 256,
|
|
19
|
+
smoothingTimeConstant: 0.8
|
|
20
|
+
};
|
|
21
|
+
var DEFAULT_CLASSIC_RANGES = {
|
|
22
|
+
bass: { from: 0, to: 0.08 },
|
|
23
|
+
mid: { from: 0.08, to: 0.4 },
|
|
24
|
+
high: { from: 0.4, to: 1 }
|
|
25
|
+
};
|
|
26
|
+
var ZERO = { bass: 0, mid: 0, high: 0, overall: 0 };
|
|
27
|
+
function avg(arr, from, to) {
|
|
28
|
+
let sum = 0;
|
|
29
|
+
for (let i = from; i < to; i++) sum += arr[i];
|
|
30
|
+
return sum / (to - from);
|
|
31
|
+
}
|
|
32
|
+
function isPowerOfTwo(value) {
|
|
33
|
+
return (value & value - 1) === 0;
|
|
34
|
+
}
|
|
35
|
+
function normalizeAnalyserConfig(config, fallback) {
|
|
36
|
+
const fftSize = config?.fftSize ?? fallback.fftSize;
|
|
37
|
+
const smoothingTimeConstant = config?.smoothingTimeConstant ?? fallback.smoothingTimeConstant;
|
|
38
|
+
if (!Number.isInteger(fftSize) || fftSize < 32 || fftSize > 32768 || !isPowerOfTwo(fftSize)) {
|
|
39
|
+
throw new AudioBandsError(
|
|
40
|
+
"config",
|
|
41
|
+
"invalid_config",
|
|
42
|
+
"fftSize must be a power of two between 32 and 32768"
|
|
43
|
+
);
|
|
44
|
+
}
|
|
45
|
+
if (typeof smoothingTimeConstant !== "number" || smoothingTimeConstant < 0 || smoothingTimeConstant > 1) {
|
|
46
|
+
throw new AudioBandsError(
|
|
47
|
+
"config",
|
|
48
|
+
"invalid_config",
|
|
49
|
+
"smoothingTimeConstant must be between 0 and 1"
|
|
50
|
+
);
|
|
51
|
+
}
|
|
52
|
+
return { fftSize, smoothingTimeConstant };
|
|
53
|
+
}
|
|
54
|
+
function normalizeRange(name, range) {
|
|
55
|
+
const normalized = range ?? DEFAULT_CLASSIC_RANGES[name];
|
|
56
|
+
if (typeof normalized?.from !== "number" || typeof normalized?.to !== "number" || normalized.from < 0 || normalized.to > 1 || normalized.from >= normalized.to) {
|
|
57
|
+
throw new AudioBandsError(
|
|
58
|
+
"config",
|
|
59
|
+
"invalid_config",
|
|
60
|
+
`Band range "${name}" must satisfy 0 <= from < to <= 1`
|
|
61
|
+
);
|
|
62
|
+
}
|
|
63
|
+
return normalized;
|
|
64
|
+
}
|
|
65
|
+
function normalizeClassicRanges(ranges) {
|
|
66
|
+
return {
|
|
67
|
+
bass: normalizeRange("bass", ranges?.bass),
|
|
68
|
+
mid: normalizeRange("mid", ranges?.mid),
|
|
69
|
+
high: normalizeRange("high", ranges?.high)
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
function normalizeCustomBands(customBands) {
|
|
73
|
+
if (!customBands) return {};
|
|
74
|
+
return Object.fromEntries(
|
|
75
|
+
Object.entries(customBands).map(([name, range]) => [name, normalizeRange(name, range)])
|
|
76
|
+
);
|
|
77
|
+
}
|
|
78
|
+
function getIndexes(len, range) {
|
|
79
|
+
const from = Math.max(0, Math.min(len - 1, Math.floor(len * range.from)));
|
|
80
|
+
const to = Math.max(from + 1, Math.min(len, Math.floor(len * range.to)));
|
|
81
|
+
return [from, to];
|
|
82
|
+
}
|
|
83
|
+
function getRangeValue(data, range) {
|
|
84
|
+
const [from, to] = getIndexes(data.length, range);
|
|
85
|
+
return avg(data, from, to) / 255;
|
|
86
|
+
}
|
|
87
|
+
function fillFrequencyData(analyser, data) {
|
|
88
|
+
analyser.getByteFrequencyData(data);
|
|
89
|
+
return data;
|
|
90
|
+
}
|
|
91
|
+
function computeBands(data, ranges) {
|
|
92
|
+
const bass = getRangeValue(data, ranges.bass);
|
|
93
|
+
const mid = getRangeValue(data, ranges.mid);
|
|
94
|
+
const high = getRangeValue(data, ranges.high);
|
|
95
|
+
return {
|
|
96
|
+
bass,
|
|
97
|
+
mid,
|
|
98
|
+
high,
|
|
99
|
+
overall: bass * 0.5 + mid * 0.3 + high * 0.2
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
function computeCustomBands(data, ranges) {
|
|
103
|
+
return Object.fromEntries(
|
|
104
|
+
Object.entries(ranges).map(([name, range]) => [name, getRangeValue(data, range)])
|
|
105
|
+
);
|
|
106
|
+
}
|
|
107
|
+
function cloneState(state) {
|
|
108
|
+
return { ...state };
|
|
109
|
+
}
|
|
110
|
+
var AudioBands = class {
|
|
111
|
+
constructor(options = {}) {
|
|
112
|
+
this.state = {
|
|
113
|
+
isPlaying: false,
|
|
114
|
+
micActive: false,
|
|
115
|
+
hasTrack: false,
|
|
116
|
+
loadError: null,
|
|
117
|
+
micError: null
|
|
118
|
+
};
|
|
119
|
+
this.ctx = null;
|
|
120
|
+
this.musicAnalyser = null;
|
|
121
|
+
this.musicData = null;
|
|
122
|
+
this.micAnalyser = null;
|
|
123
|
+
this.micData = null;
|
|
124
|
+
this.micWaveformData = null;
|
|
125
|
+
this.audioEl = null;
|
|
126
|
+
this.musicSource = null;
|
|
127
|
+
this.micSource = null;
|
|
128
|
+
this.micStream = null;
|
|
129
|
+
this.destroyed = false;
|
|
130
|
+
this.options = options;
|
|
131
|
+
this.musicConfig = normalizeAnalyserConfig(options.music, DEFAULT_MUSIC_ANALYSER);
|
|
132
|
+
this.micConfig = normalizeAnalyserConfig(options.mic, DEFAULT_MIC_ANALYSER);
|
|
133
|
+
this.classicRanges = normalizeClassicRanges(options.bandRanges);
|
|
134
|
+
this.customBandRanges = normalizeCustomBands(options.customBands);
|
|
135
|
+
}
|
|
136
|
+
getState() {
|
|
137
|
+
return cloneState(this.state);
|
|
138
|
+
}
|
|
139
|
+
getCustomBands(source = "music") {
|
|
140
|
+
const data = this.readFrequencyData(source);
|
|
141
|
+
if (!data) return computeCustomBands(new Uint8Array(1), this.customBandRanges);
|
|
142
|
+
return computeCustomBands(data, this.customBandRanges);
|
|
143
|
+
}
|
|
144
|
+
async load(url) {
|
|
145
|
+
let ctx;
|
|
146
|
+
try {
|
|
147
|
+
ctx = this.ensureCtx();
|
|
148
|
+
} catch (error) {
|
|
149
|
+
throw this.handleError("load", error);
|
|
150
|
+
}
|
|
151
|
+
this.teardownMusic();
|
|
152
|
+
const audio = new Audio();
|
|
153
|
+
audio.crossOrigin = "anonymous";
|
|
154
|
+
audio.src = url;
|
|
155
|
+
audio.loop = true;
|
|
156
|
+
this.audioEl = audio;
|
|
157
|
+
this.setState({ hasTrack: true, loadError: null });
|
|
158
|
+
const source = ctx.createMediaElementSource(audio);
|
|
159
|
+
source.connect(this.musicAnalyser);
|
|
160
|
+
this.musicSource = source;
|
|
161
|
+
try {
|
|
162
|
+
await audio.play();
|
|
163
|
+
this.setState({ isPlaying: true, loadError: null });
|
|
164
|
+
this.options.onPlay?.();
|
|
165
|
+
} catch (error) {
|
|
166
|
+
throw this.handleError("load", error, "load_error");
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
togglePlayPause() {
|
|
170
|
+
const audio = this.audioEl;
|
|
171
|
+
if (!audio) return;
|
|
172
|
+
if (audio.paused) {
|
|
173
|
+
void audio.play().then(() => {
|
|
174
|
+
this.setState({ isPlaying: true, loadError: null });
|
|
175
|
+
this.options.onPlay?.();
|
|
176
|
+
}).catch((error) => {
|
|
177
|
+
this.handleError("load", error, "playback_error");
|
|
178
|
+
});
|
|
179
|
+
return;
|
|
180
|
+
}
|
|
181
|
+
audio.pause();
|
|
182
|
+
this.setState({ isPlaying: false });
|
|
183
|
+
this.options.onPause?.();
|
|
184
|
+
}
|
|
185
|
+
async enableMic() {
|
|
186
|
+
let ctx;
|
|
187
|
+
try {
|
|
188
|
+
ctx = this.ensureCtx();
|
|
189
|
+
} catch (error) {
|
|
190
|
+
throw this.handleError("mic", error);
|
|
191
|
+
}
|
|
192
|
+
if (this.micStream) return;
|
|
193
|
+
try {
|
|
194
|
+
const stream = await navigator.mediaDevices.getUserMedia({
|
|
195
|
+
audio: true,
|
|
196
|
+
video: false
|
|
197
|
+
});
|
|
198
|
+
this.micStream = stream;
|
|
199
|
+
const analyser = this.createAnalyser(ctx, this.micConfig);
|
|
200
|
+
this.micAnalyser = analyser;
|
|
201
|
+
this.micData = new Uint8Array(
|
|
202
|
+
analyser.frequencyBinCount
|
|
203
|
+
);
|
|
204
|
+
this.micWaveformData = new Uint8Array(
|
|
205
|
+
analyser.fftSize
|
|
206
|
+
);
|
|
207
|
+
const source = ctx.createMediaStreamSource(stream);
|
|
208
|
+
source.connect(analyser);
|
|
209
|
+
this.micSource = source;
|
|
210
|
+
this.setState({ micActive: true, micError: null });
|
|
211
|
+
this.options.onMicStart?.();
|
|
212
|
+
} catch (error) {
|
|
213
|
+
throw this.handleError("mic", error, "mic_error");
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
disableMic() {
|
|
217
|
+
const hadMic = Boolean(this.micStream || this.micSource || this.micAnalyser);
|
|
218
|
+
this.micStream?.getTracks().forEach((track) => track.stop());
|
|
219
|
+
this.micStream = null;
|
|
220
|
+
try {
|
|
221
|
+
this.micSource?.disconnect();
|
|
222
|
+
} catch {
|
|
223
|
+
}
|
|
224
|
+
this.micSource = null;
|
|
225
|
+
this.micAnalyser = null;
|
|
226
|
+
this.micData = null;
|
|
227
|
+
this.micWaveformData = null;
|
|
228
|
+
this.setState({ micActive: false });
|
|
229
|
+
if (hadMic) this.options.onMicStop?.();
|
|
230
|
+
}
|
|
231
|
+
getBands(source = "music") {
|
|
232
|
+
const data = this.readFrequencyData(source);
|
|
233
|
+
if (!data) return { ...ZERO };
|
|
234
|
+
return computeBands(data, this.classicRanges);
|
|
235
|
+
}
|
|
236
|
+
getFftData(source = "music") {
|
|
237
|
+
return this.readFrequencyData(source);
|
|
238
|
+
}
|
|
239
|
+
getWaveform() {
|
|
240
|
+
if (!this.micAnalyser || !this.micWaveformData) return null;
|
|
241
|
+
this.micAnalyser.getByteTimeDomainData(this.micWaveformData);
|
|
242
|
+
return this.micWaveformData;
|
|
243
|
+
}
|
|
244
|
+
destroy() {
|
|
245
|
+
if (this.destroyed) return;
|
|
246
|
+
this.teardownMusic();
|
|
247
|
+
this.disableMic();
|
|
248
|
+
void this.ctx?.close();
|
|
249
|
+
this.ctx = null;
|
|
250
|
+
this.musicAnalyser = null;
|
|
251
|
+
this.musicData = null;
|
|
252
|
+
this.setState({ isPlaying: false, micActive: false, hasTrack: false });
|
|
253
|
+
this.options = {};
|
|
254
|
+
this.destroyed = true;
|
|
255
|
+
}
|
|
256
|
+
readFrequencyData(source) {
|
|
257
|
+
if (source === "mic") {
|
|
258
|
+
if (!this.micAnalyser || !this.micData) return null;
|
|
259
|
+
return fillFrequencyData(this.micAnalyser, this.micData);
|
|
260
|
+
}
|
|
261
|
+
if (!this.musicAnalyser || !this.musicData) return null;
|
|
262
|
+
return fillFrequencyData(this.musicAnalyser, this.musicData);
|
|
263
|
+
}
|
|
264
|
+
ensureCtx() {
|
|
265
|
+
if (this.destroyed) {
|
|
266
|
+
throw new AudioBandsError(
|
|
267
|
+
"lifecycle",
|
|
268
|
+
"destroyed",
|
|
269
|
+
"This AudioBands instance was destroyed"
|
|
270
|
+
);
|
|
271
|
+
}
|
|
272
|
+
if (this.ctx) return this.ctx;
|
|
273
|
+
const Ctx = window.AudioContext || window.webkitAudioContext;
|
|
274
|
+
if (!Ctx) {
|
|
275
|
+
throw new AudioBandsError(
|
|
276
|
+
"lifecycle",
|
|
277
|
+
"unsupported_audio_context",
|
|
278
|
+
"AudioContext is not supported in this environment"
|
|
279
|
+
);
|
|
280
|
+
}
|
|
281
|
+
const ctx = new Ctx();
|
|
282
|
+
const analyser = this.createAnalyser(ctx, this.musicConfig);
|
|
283
|
+
analyser.connect(ctx.destination);
|
|
284
|
+
this.ctx = ctx;
|
|
285
|
+
this.musicAnalyser = analyser;
|
|
286
|
+
this.musicData = new Uint8Array(
|
|
287
|
+
analyser.frequencyBinCount
|
|
288
|
+
);
|
|
289
|
+
return ctx;
|
|
290
|
+
}
|
|
291
|
+
createAnalyser(ctx, config) {
|
|
292
|
+
const analyser = ctx.createAnalyser();
|
|
293
|
+
analyser.fftSize = config.fftSize;
|
|
294
|
+
analyser.smoothingTimeConstant = config.smoothingTimeConstant;
|
|
295
|
+
return analyser;
|
|
296
|
+
}
|
|
297
|
+
handleError(kind, error, fallbackCode = kind === "mic" ? "mic_error" : "load_error") {
|
|
298
|
+
const wrapped = error instanceof AudioBandsError ? error : new AudioBandsError(
|
|
299
|
+
kind,
|
|
300
|
+
fallbackCode,
|
|
301
|
+
kind === "mic" ? "Failed to access microphone input" : "Failed to load or play audio track",
|
|
302
|
+
error
|
|
303
|
+
);
|
|
304
|
+
if (kind === "load") {
|
|
305
|
+
this.setState({ isPlaying: false, loadError: wrapped });
|
|
306
|
+
this.options.onLoadError?.(wrapped);
|
|
307
|
+
} else {
|
|
308
|
+
this.setState({ micActive: false, micError: wrapped });
|
|
309
|
+
this.options.onMicError?.(wrapped);
|
|
310
|
+
}
|
|
311
|
+
this.options.onError?.(wrapped);
|
|
312
|
+
return wrapped;
|
|
313
|
+
}
|
|
314
|
+
setState(patch) {
|
|
315
|
+
let changed = false;
|
|
316
|
+
for (const [key, value] of Object.entries(patch)) {
|
|
317
|
+
if (this.state[key] !== value) {
|
|
318
|
+
this.state[key] = value;
|
|
319
|
+
changed = true;
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
if (changed) this.options.onStateChange?.(this.getState());
|
|
323
|
+
}
|
|
324
|
+
teardownMusic() {
|
|
325
|
+
this.audioEl?.pause();
|
|
326
|
+
if (this.audioEl) {
|
|
327
|
+
this.audioEl.src = "";
|
|
328
|
+
this.audioEl.load();
|
|
329
|
+
}
|
|
330
|
+
this.audioEl = null;
|
|
331
|
+
try {
|
|
332
|
+
this.musicSource?.disconnect();
|
|
333
|
+
} catch {
|
|
334
|
+
}
|
|
335
|
+
this.musicSource = null;
|
|
336
|
+
this.setState({ isPlaying: false, hasTrack: false });
|
|
337
|
+
}
|
|
338
|
+
};
|
|
339
|
+
|
|
340
|
+
export {
|
|
341
|
+
AudioBandsError,
|
|
342
|
+
AudioBands
|
|
343
|
+
};
|
|
344
|
+
//# sourceMappingURL=chunk-UAMH5Y33.js.map
|