react-audio-wavekit 0.1.4 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -0
- package/dist/index.d.ts +10 -0
- package/dist/recorder/live-recorder/index.cjs +3 -2
- package/dist/recorder/live-recorder/index.js +3 -2
- package/dist/recorder/live-streaming/use-recording-amplitudes.cjs +9 -3
- package/dist/recorder/live-streaming/use-recording-amplitudes.js +9 -3
- package/dist/waveform/util-audio-decoder.cjs +2 -5
- package/dist/waveform/util-audio-decoder.js +2 -5
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -113,6 +113,7 @@ Scrolling timeline waveform (Voice Memos style). Canvas grows horizontally as re
|
|
|
113
113
|
| `fftSize` | `number` | `2048` | FFT size for frequency analysis |
|
|
114
114
|
| `smoothingTimeConstant` | `number` | `0.8` | Smoothing constant (0-1) |
|
|
115
115
|
| `sampleInterval` | `number` | `50` | Sample interval in ms |
|
|
116
|
+
| `amplitudeScale` | `number` | `1.5` | Amplitude multiplier (lower = quieter waveform) |
|
|
116
117
|
| `appearance` | `LiveStreamingRecorderAppearance` | - | See [Appearance Options](#appearance-options) |
|
|
117
118
|
|
|
118
119
|
**Canvas Props:**
|
|
@@ -143,6 +144,7 @@ Fixed-width waveform where bars compress as recording grows.
|
|
|
143
144
|
| `fftSize` | `number` | `2048` | FFT size for frequency analysis |
|
|
144
145
|
| `smoothingTimeConstant` | `number` | `0.8` | Smoothing constant (0-1) |
|
|
145
146
|
| `sampleInterval` | `number` | `50` | Sample interval in ms |
|
|
147
|
+
| `amplitudeScale` | `number` | `1.5` | Amplitude multiplier (lower = quieter waveform) |
|
|
146
148
|
| `appearance` | `WaveformAppearance` | - | See [Appearance Options](#appearance-options) |
|
|
147
149
|
|
|
148
150
|
### LiveRecorder
|
|
@@ -163,6 +165,7 @@ Real-time frequency bars visualization.
|
|
|
163
165
|
| `fftSize` | `number` | `2048` | FFT size for frequency analysis |
|
|
164
166
|
| `smoothingTimeConstant` | `number` | `0.8` | Smoothing constant (0-1) |
|
|
165
167
|
| `showIdleState` | `boolean` | `true` | Show minimal bars when not recording |
|
|
168
|
+
| `amplitudeScale` | `number` | `1.5` | Amplitude multiplier (lower = quieter waveform) |
|
|
166
169
|
| `appearance` | `WaveformAppearance` | - | See [Appearance Options](#appearance-options) |
|
|
167
170
|
|
|
168
171
|
---
|
package/dist/index.d.ts
CHANGED
|
@@ -65,6 +65,11 @@ declare interface LiveRecorderProps extends React.CanvasHTMLAttributes<HTMLCanva
|
|
|
65
65
|
* @default true
|
|
66
66
|
*/
|
|
67
67
|
showIdleState?: boolean;
|
|
68
|
+
/**
|
|
69
|
+
* Amplitude multiplier - lower values produce quieter waveforms
|
|
70
|
+
* @default 1.5
|
|
71
|
+
*/
|
|
72
|
+
amplitudeScale?: number;
|
|
68
73
|
}
|
|
69
74
|
|
|
70
75
|
declare interface LiveRecorderRef {
|
|
@@ -191,6 +196,11 @@ declare interface UseRecordingAmplitudesOptions {
|
|
|
191
196
|
smoothingTimeConstant?: number;
|
|
192
197
|
/** Interval in ms for sampling amplitude data */
|
|
193
198
|
sampleInterval?: number;
|
|
199
|
+
/**
|
|
200
|
+
* Amplitude multiplier - lower values produce quieter waveforms
|
|
201
|
+
* @default 1.5
|
|
202
|
+
*/
|
|
203
|
+
amplitudeScale?: number;
|
|
194
204
|
}
|
|
195
205
|
|
|
196
206
|
declare interface UseRecordingAmplitudesReturn {
|
|
@@ -11,6 +11,7 @@ const LiveRecorder = react.forwardRef(
|
|
|
11
11
|
fftSize = 2048,
|
|
12
12
|
smoothingTimeConstant = 0.8,
|
|
13
13
|
showIdleState = true,
|
|
14
|
+
amplitudeScale = 1.5,
|
|
14
15
|
...props
|
|
15
16
|
}, ref) => {
|
|
16
17
|
const canvasRef = react.useRef(null);
|
|
@@ -60,7 +61,7 @@ const LiveRecorder = react.forwardRef(
|
|
|
60
61
|
for (let i = 0; i < numBars; i++) {
|
|
61
62
|
const dataIndex = Math.floor(i / numBars * bufferLength);
|
|
62
63
|
const value = dataArray[dataIndex] || 0;
|
|
63
|
-
const amplitude = Math.abs(value - 128) / 128;
|
|
64
|
+
const amplitude = Math.min(1, Math.abs(value - 128) / 128 * amplitudeScale);
|
|
64
65
|
const barHeight = Math.max(2, amplitude * height * barHeightScale);
|
|
65
66
|
const x = i * totalBarWidth;
|
|
66
67
|
const y = (height - barHeight) / 2;
|
|
@@ -90,7 +91,7 @@ const LiveRecorder = react.forwardRef(
|
|
|
90
91
|
animationRef.current = null;
|
|
91
92
|
}
|
|
92
93
|
};
|
|
93
|
-
}, [mediaRecorder, appearance, analyserRef, dataArrayRef, bufferLengthRef]);
|
|
94
|
+
}, [mediaRecorder, appearance, amplitudeScale, analyserRef, dataArrayRef, bufferLengthRef]);
|
|
94
95
|
react.useEffect(() => {
|
|
95
96
|
if (mediaRecorder || !showIdleState || !canvasRef.current) return;
|
|
96
97
|
const canvas = canvasRef.current;
|
|
@@ -9,6 +9,7 @@ const LiveRecorder = forwardRef(
|
|
|
9
9
|
fftSize = 2048,
|
|
10
10
|
smoothingTimeConstant = 0.8,
|
|
11
11
|
showIdleState = true,
|
|
12
|
+
amplitudeScale = 1.5,
|
|
12
13
|
...props
|
|
13
14
|
}, ref) => {
|
|
14
15
|
const canvasRef = useRef(null);
|
|
@@ -58,7 +59,7 @@ const LiveRecorder = forwardRef(
|
|
|
58
59
|
for (let i = 0; i < numBars; i++) {
|
|
59
60
|
const dataIndex = Math.floor(i / numBars * bufferLength);
|
|
60
61
|
const value = dataArray[dataIndex] || 0;
|
|
61
|
-
const amplitude = Math.abs(value - 128) / 128;
|
|
62
|
+
const amplitude = Math.min(1, Math.abs(value - 128) / 128 * amplitudeScale);
|
|
62
63
|
const barHeight = Math.max(2, amplitude * height * barHeightScale);
|
|
63
64
|
const x = i * totalBarWidth;
|
|
64
65
|
const y = (height - barHeight) / 2;
|
|
@@ -88,7 +89,7 @@ const LiveRecorder = forwardRef(
|
|
|
88
89
|
animationRef.current = null;
|
|
89
90
|
}
|
|
90
91
|
};
|
|
91
|
-
}, [mediaRecorder, appearance, analyserRef, dataArrayRef, bufferLengthRef]);
|
|
92
|
+
}, [mediaRecorder, appearance, amplitudeScale, analyserRef, dataArrayRef, bufferLengthRef]);
|
|
92
93
|
useEffect(() => {
|
|
93
94
|
if (mediaRecorder || !showIdleState || !canvasRef.current) return;
|
|
94
95
|
const canvas = canvasRef.current;
|
|
@@ -3,7 +3,13 @@ Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
|
3
3
|
const react = require("react");
|
|
4
4
|
const useAudioAnalyser = require("../use-audio-analyser.cjs");
|
|
5
5
|
function useRecordingAmplitudes(options) {
|
|
6
|
-
const {
|
|
6
|
+
const {
|
|
7
|
+
mediaRecorder,
|
|
8
|
+
fftSize = 2048,
|
|
9
|
+
smoothingTimeConstant = 0.4,
|
|
10
|
+
sampleInterval = 50,
|
|
11
|
+
amplitudeScale = 1.5
|
|
12
|
+
} = options;
|
|
7
13
|
const amplitudeDataRef = react.useRef([]);
|
|
8
14
|
const listenersRef = react.useRef(/* @__PURE__ */ new Set());
|
|
9
15
|
const samplingIntervalRef = react.useRef(null);
|
|
@@ -51,7 +57,7 @@ function useRecordingAmplitudes(options) {
|
|
|
51
57
|
sum += normalized * normalized;
|
|
52
58
|
}
|
|
53
59
|
const rms = Math.sqrt(sum / bufferLength);
|
|
54
|
-
const amplitude = Math.min(1, rms *
|
|
60
|
+
const amplitude = Math.min(1, rms * amplitudeScale);
|
|
55
61
|
amplitudeDataRef.current.push(amplitude);
|
|
56
62
|
notifyListeners();
|
|
57
63
|
};
|
|
@@ -79,7 +85,7 @@ function useRecordingAmplitudes(options) {
|
|
|
79
85
|
mediaRecorder.removeEventListener("resume", handleResume);
|
|
80
86
|
stopSampling();
|
|
81
87
|
};
|
|
82
|
-
}, [mediaRecorder, sampleInterval, analyserRef, dataArrayRef, bufferLengthRef, notifyListeners]);
|
|
88
|
+
}, [mediaRecorder, sampleInterval, amplitudeScale, analyserRef, dataArrayRef, bufferLengthRef, notifyListeners]);
|
|
83
89
|
return {
|
|
84
90
|
amplitudes,
|
|
85
91
|
audioContext: audioContextRef.current,
|
|
@@ -1,7 +1,13 @@
|
|
|
1
1
|
import { useRef, useCallback, useSyncExternalStore, useEffect } from "react";
|
|
2
2
|
import { useAudioAnalyser } from "../use-audio-analyser.js";
|
|
3
3
|
function useRecordingAmplitudes(options) {
|
|
4
|
-
const {
|
|
4
|
+
const {
|
|
5
|
+
mediaRecorder,
|
|
6
|
+
fftSize = 2048,
|
|
7
|
+
smoothingTimeConstant = 0.4,
|
|
8
|
+
sampleInterval = 50,
|
|
9
|
+
amplitudeScale = 1.5
|
|
10
|
+
} = options;
|
|
5
11
|
const amplitudeDataRef = useRef([]);
|
|
6
12
|
const listenersRef = useRef(/* @__PURE__ */ new Set());
|
|
7
13
|
const samplingIntervalRef = useRef(null);
|
|
@@ -49,7 +55,7 @@ function useRecordingAmplitudes(options) {
|
|
|
49
55
|
sum += normalized * normalized;
|
|
50
56
|
}
|
|
51
57
|
const rms = Math.sqrt(sum / bufferLength);
|
|
52
|
-
const amplitude = Math.min(1, rms *
|
|
58
|
+
const amplitude = Math.min(1, rms * amplitudeScale);
|
|
53
59
|
amplitudeDataRef.current.push(amplitude);
|
|
54
60
|
notifyListeners();
|
|
55
61
|
};
|
|
@@ -77,7 +83,7 @@ function useRecordingAmplitudes(options) {
|
|
|
77
83
|
mediaRecorder.removeEventListener("resume", handleResume);
|
|
78
84
|
stopSampling();
|
|
79
85
|
};
|
|
80
|
-
}, [mediaRecorder, sampleInterval, analyserRef, dataArrayRef, bufferLengthRef, notifyListeners]);
|
|
86
|
+
}, [mediaRecorder, sampleInterval, amplitudeScale, analyserRef, dataArrayRef, bufferLengthRef, notifyListeners]);
|
|
81
87
|
return {
|
|
82
88
|
amplitudes,
|
|
83
89
|
audioContext: audioContextRef.current,
|
|
@@ -1,17 +1,15 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
3
3
|
async function decodeAudioBlob(blob, sampleCount) {
|
|
4
|
-
const audioContext = new AudioContext();
|
|
5
4
|
const arrayBuffer = await blob.arrayBuffer();
|
|
6
5
|
if (arrayBuffer.byteLength === 0) {
|
|
7
|
-
await audioContext.close();
|
|
8
6
|
throw new Error("Audio blob is empty");
|
|
9
7
|
}
|
|
8
|
+
const offlineContext = new OfflineAudioContext(1, 1, 44100);
|
|
10
9
|
let audioBuffer;
|
|
11
10
|
try {
|
|
12
|
-
audioBuffer = await
|
|
11
|
+
audioBuffer = await offlineContext.decodeAudioData(arrayBuffer);
|
|
13
12
|
} catch {
|
|
14
|
-
await audioContext.close();
|
|
15
13
|
throw new Error(
|
|
16
14
|
`Unable to decode audio data (type: ${blob.type}, size: ${blob.size} bytes). This may be due to an unsupported audio format or corrupted data.`
|
|
17
15
|
);
|
|
@@ -29,7 +27,6 @@ async function decodeAudioBlob(blob, sampleCount) {
|
|
|
29
27
|
}
|
|
30
28
|
const maxPeak = Math.max(...peaks);
|
|
31
29
|
const normalizedPeaks = maxPeak > 0 ? peaks.map((p) => p / maxPeak) : peaks;
|
|
32
|
-
await audioContext.close();
|
|
33
30
|
return normalizedPeaks;
|
|
34
31
|
}
|
|
35
32
|
const audioDataCache = /* @__PURE__ */ new WeakMap();
|
|
@@ -1,15 +1,13 @@
|
|
|
1
1
|
async function decodeAudioBlob(blob, sampleCount) {
|
|
2
|
-
const audioContext = new AudioContext();
|
|
3
2
|
const arrayBuffer = await blob.arrayBuffer();
|
|
4
3
|
if (arrayBuffer.byteLength === 0) {
|
|
5
|
-
await audioContext.close();
|
|
6
4
|
throw new Error("Audio blob is empty");
|
|
7
5
|
}
|
|
6
|
+
const offlineContext = new OfflineAudioContext(1, 1, 44100);
|
|
8
7
|
let audioBuffer;
|
|
9
8
|
try {
|
|
10
|
-
audioBuffer = await
|
|
9
|
+
audioBuffer = await offlineContext.decodeAudioData(arrayBuffer);
|
|
11
10
|
} catch {
|
|
12
|
-
await audioContext.close();
|
|
13
11
|
throw new Error(
|
|
14
12
|
`Unable to decode audio data (type: ${blob.type}, size: ${blob.size} bytes). This may be due to an unsupported audio format or corrupted data.`
|
|
15
13
|
);
|
|
@@ -27,7 +25,6 @@ async function decodeAudioBlob(blob, sampleCount) {
|
|
|
27
25
|
}
|
|
28
26
|
const maxPeak = Math.max(...peaks);
|
|
29
27
|
const normalizedPeaks = maxPeak > 0 ? peaks.map((p) => p / maxPeak) : peaks;
|
|
30
|
-
await audioContext.close();
|
|
31
28
|
return normalizedPeaks;
|
|
32
29
|
}
|
|
33
30
|
const audioDataCache = /* @__PURE__ */ new WeakMap();
|