react-audio-wavekit 0.2.1 → 0.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md
CHANGED
|
@@ -38,6 +38,8 @@ Static waveform visualization with playhead and drag-to-seek.
|
|
|
38
38
|
|
|
39
39
|
```tsx
|
|
40
40
|
<AudioWaveform
|
|
41
|
+
// Also accepts classnames, compatible with tailwind.
|
|
42
|
+
className=""
|
|
41
43
|
blob={audioBlob}
|
|
42
44
|
currentTime={currentTime}
|
|
43
45
|
duration={duration}
|
|
@@ -47,6 +49,11 @@ Static waveform visualization with playhead and drag-to-seek.
|
|
|
47
49
|
audio.currentTime = time;
|
|
48
50
|
audio.play();
|
|
49
51
|
}}
|
|
52
|
+
style={{
|
|
53
|
+
// Or use classNames with Tailwind
|
|
54
|
+
width: '400px',
|
|
55
|
+
height: '100px',
|
|
56
|
+
}}
|
|
50
57
|
/>
|
|
51
58
|
```
|
|
52
59
|
|
|
@@ -63,6 +70,8 @@ Static waveform visualization with playhead and drag-to-seek.
|
|
|
63
70
|
| `suspense` | `boolean` | `false` | Enable React Suspense mode |
|
|
64
71
|
| `appearance` | `AudioWaveformAppearance` | - | See [Appearance Options](#appearance-options) |
|
|
65
72
|
|
|
73
|
+
**SSR note:** When `suspense` is `true`, decoding only starts on the client after mount. On the server (and during the first client render), the waveform renders without peaks, so wrap the component in a Suspense boundary to show a fallback during hydration.
|
|
74
|
+
|
|
66
75
|
---
|
|
67
76
|
|
|
68
77
|
## Recorder
|
|
@@ -101,6 +110,8 @@ Headless hook to manage recording state. Use with recorder components below.
|
|
|
101
110
|
|
|
102
111
|

|
|
103
112
|
|
|
113
|
+
_This is an example image. The component includes only the waveform._
|
|
114
|
+
|
|
104
115
|
Scrolling timeline waveform (Voice Memos style). Canvas grows horizontally as recording continues.
|
|
105
116
|
|
|
106
117
|
[▶ Demo](https://react-audio-wavekit.netlify.app/?path=/story/recorder-livestreamingrecorder--default)
|
|
@@ -136,6 +147,8 @@ Scrolling timeline waveform (Voice Memos style). Canvas grows horizontally as re
|
|
|
136
147
|
|
|
137
148
|

|
|
138
149
|
|
|
150
|
+
_This is an example image. The component includes only the waveform._
|
|
151
|
+
|
|
139
152
|
Fixed-width waveform where bars compress as recording grows.
|
|
140
153
|
|
|
141
154
|
[▶ Demo](https://react-audio-wavekit.netlify.app/?path=/story/recorder-livestreamingstackrecorder--default)
|
|
@@ -161,6 +174,8 @@ Fixed-width waveform where bars compress as recording grows.
|
|
|
161
174
|
|
|
162
175
|

|
|
163
176
|
|
|
177
|
+
_This is an example image. The component includes only the waveform._
|
|
178
|
+
|
|
164
179
|
Real-time frequency bars visualization.
|
|
165
180
|
|
|
166
181
|
[▶ Demo](https://react-audio-wavekit.netlify.app/?path=/story/recorder-liverecorder--default)
|
|
@@ -222,6 +237,11 @@ Components accept `className`, `style`, and all standard canvas HTML attributes.
|
|
|
222
237
|
<AudioWaveform
|
|
223
238
|
blob={blob}
|
|
224
239
|
className="h-32 w-full rounded-lg bg-slate-900 p-4"
|
|
240
|
+
// or style prop
|
|
241
|
+
style={{
|
|
242
|
+
width: "100%",
|
|
243
|
+
height: "100%",
|
|
244
|
+
}}
|
|
225
245
|
appearance={{
|
|
226
246
|
barColor: "#3b82f6",
|
|
227
247
|
barWidth: 2,
|
package/dist/waveform/index.cjs
CHANGED
|
@@ -24,9 +24,13 @@ const AudioWaveform = react.forwardRef(function AudioWaveform2({
|
|
|
24
24
|
}, ref) {
|
|
25
25
|
const [decodedPeaks, setDecodedPeaks] = react.useState(null);
|
|
26
26
|
const [error, setError] = react.useState(null);
|
|
27
|
+
const [isMounted, setIsMounted] = react.useState(false);
|
|
27
28
|
const blobRef = react.useRef(null);
|
|
28
29
|
const rendererRef = react.useRef(null);
|
|
29
30
|
const sampleCount = react.useMemo(() => getInitialSampleCount(), []);
|
|
31
|
+
react.useEffect(() => {
|
|
32
|
+
setIsMounted(true);
|
|
33
|
+
}, []);
|
|
30
34
|
react.useEffect(() => {
|
|
31
35
|
if (ref && typeof ref === "function") {
|
|
32
36
|
ref({ canvas: rendererRef.current?.canvas || null });
|
|
@@ -35,7 +39,7 @@ const AudioWaveform = react.forwardRef(function AudioWaveform2({
|
|
|
35
39
|
}
|
|
36
40
|
}, [ref]);
|
|
37
41
|
const shouldDecode = !precomputedPeaks && blob;
|
|
38
|
-
const suspensePeaks = shouldDecode && suspense ? utilSuspense.unwrapPromise(utilAudioDecoder.getAudioData(blob, sampleCount)) : null;
|
|
42
|
+
const suspensePeaks = shouldDecode && suspense && isMounted ? utilSuspense.unwrapPromise(utilAudioDecoder.getAudioData(blob, sampleCount)) : null;
|
|
39
43
|
react.useEffect(() => {
|
|
40
44
|
if (!shouldDecode || suspense) {
|
|
41
45
|
if (!shouldDecode) {
|
package/dist/waveform/index.js
CHANGED
|
@@ -22,9 +22,13 @@ const AudioWaveform = forwardRef(function AudioWaveform2({
|
|
|
22
22
|
}, ref) {
|
|
23
23
|
const [decodedPeaks, setDecodedPeaks] = useState(null);
|
|
24
24
|
const [error, setError] = useState(null);
|
|
25
|
+
const [isMounted, setIsMounted] = useState(false);
|
|
25
26
|
const blobRef = useRef(null);
|
|
26
27
|
const rendererRef = useRef(null);
|
|
27
28
|
const sampleCount = useMemo(() => getInitialSampleCount(), []);
|
|
29
|
+
useEffect(() => {
|
|
30
|
+
setIsMounted(true);
|
|
31
|
+
}, []);
|
|
28
32
|
useEffect(() => {
|
|
29
33
|
if (ref && typeof ref === "function") {
|
|
30
34
|
ref({ canvas: rendererRef.current?.canvas || null });
|
|
@@ -33,7 +37,7 @@ const AudioWaveform = forwardRef(function AudioWaveform2({
|
|
|
33
37
|
}
|
|
34
38
|
}, [ref]);
|
|
35
39
|
const shouldDecode = !precomputedPeaks && blob;
|
|
36
|
-
const suspensePeaks = shouldDecode && suspense ? unwrapPromise(getAudioData(blob, sampleCount)) : null;
|
|
40
|
+
const suspensePeaks = shouldDecode && suspense && isMounted ? unwrapPromise(getAudioData(blob, sampleCount)) : null;
|
|
37
41
|
useEffect(() => {
|
|
38
42
|
if (!shouldDecode || suspense) {
|
|
39
43
|
if (!shouldDecode) {
|
|
@@ -44,7 +44,7 @@ async function decodeAudioBlob(blob, sampleCount) {
|
|
|
44
44
|
return extractPeaksFromChannelData(channelData, sampleCount);
|
|
45
45
|
} catch {
|
|
46
46
|
throw new Error(
|
|
47
|
-
`Unable to decode audio data (type: ${blob.type}, size: ${blob.size} bytes). Both native Web Audio API and WASM decoder failed.
|
|
47
|
+
`Unable to decode audio data (type: ${blob.type}, size: ${blob.size} bytes). Both native Web Audio API and WASM decoder failed. Consider providing pre-decoded 'peaks' to the AudioWaveform component to bypass decoding.`
|
|
48
48
|
);
|
|
49
49
|
}
|
|
50
50
|
}
|
|
@@ -42,7 +42,7 @@ async function decodeAudioBlob(blob, sampleCount) {
|
|
|
42
42
|
return extractPeaksFromChannelData(channelData, sampleCount);
|
|
43
43
|
} catch {
|
|
44
44
|
throw new Error(
|
|
45
|
-
`Unable to decode audio data (type: ${blob.type}, size: ${blob.size} bytes). Both native Web Audio API and WASM decoder failed.
|
|
45
|
+
`Unable to decode audio data (type: ${blob.type}, size: ${blob.size} bytes). Both native Web Audio API and WASM decoder failed. Consider providing pre-decoded 'peaks' to the AudioWaveform component to bypass decoding.`
|
|
46
46
|
);
|
|
47
47
|
}
|
|
48
48
|
}
|
package/package.json
CHANGED
|
@@ -1,14 +1,34 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "react-audio-wavekit",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.6",
|
|
4
|
+
"description": "React component library for audio waveform visualization and live recording",
|
|
4
5
|
"license": "CC0-1.0",
|
|
6
|
+
"author": "semanticist21",
|
|
7
|
+
"repository": {
|
|
8
|
+
"type": "git",
|
|
9
|
+
"url": "git+https://github.com/semanticist21/react-audio-waveform.git"
|
|
10
|
+
},
|
|
11
|
+
"homepage": "https://react-audio-wavekit.netlify.app",
|
|
12
|
+
"bugs": {
|
|
13
|
+
"url": "https://github.com/semanticist21/react-audio-waveform/issues"
|
|
14
|
+
},
|
|
15
|
+
"keywords": [
|
|
16
|
+
"react",
|
|
17
|
+
"audio",
|
|
18
|
+
"waveform",
|
|
19
|
+
"visualization",
|
|
20
|
+
"recorder",
|
|
21
|
+
"recording",
|
|
22
|
+
"canvas",
|
|
23
|
+
"audio-player",
|
|
24
|
+
"audio-recorder",
|
|
25
|
+
"web-audio"
|
|
26
|
+
],
|
|
5
27
|
"type": "module",
|
|
6
28
|
"main": "dist/index.cjs",
|
|
7
29
|
"module": "dist/index.js",
|
|
8
30
|
"types": "dist/index.d.ts",
|
|
9
|
-
"sideEffects":
|
|
10
|
-
"**/*.css"
|
|
11
|
-
],
|
|
31
|
+
"sideEffects": false,
|
|
12
32
|
"exports": {
|
|
13
33
|
".": {
|
|
14
34
|
"import": {
|
|
@@ -43,8 +63,8 @@
|
|
|
43
63
|
"peerDependencies": {
|
|
44
64
|
"react": ">=18.0.0",
|
|
45
65
|
"react-dom": ">=18.0.0",
|
|
46
|
-
"overlayscrollbars": "^2.
|
|
47
|
-
"overlayscrollbars-react": "^0.5.
|
|
66
|
+
"overlayscrollbars": "^2.13.0",
|
|
67
|
+
"overlayscrollbars-react": "^0.5.6"
|
|
48
68
|
},
|
|
49
69
|
"devDependencies": {
|
|
50
70
|
"@biomejs/biome": "2.3.8",
|
|
@@ -52,13 +72,13 @@
|
|
|
52
72
|
"@storybook/react": "10.1.4",
|
|
53
73
|
"@storybook/react-vite": "10.1.4",
|
|
54
74
|
"@tailwindcss/vite": "4.1.17",
|
|
55
|
-
"@testing-library/react": "^16.3.
|
|
75
|
+
"@testing-library/react": "^16.3.1",
|
|
56
76
|
"@types/node": "24.10.1",
|
|
57
77
|
"@types/react": "19.2.7",
|
|
58
78
|
"@types/react-dom": "19.2.3",
|
|
59
79
|
"@typescript/native-preview": "7.0.0-dev.20251205.1",
|
|
60
80
|
"@vitejs/plugin-react": "5.1.1",
|
|
61
|
-
"jsdom": "^27.
|
|
81
|
+
"jsdom": "^27.3.0",
|
|
62
82
|
"lucide-react": "0.555.0",
|
|
63
83
|
"react": "19.2.1",
|
|
64
84
|
"react-dom": "19.2.1",
|
|
@@ -67,7 +87,7 @@
|
|
|
67
87
|
"typescript": "5.9.3",
|
|
68
88
|
"vite": "7.2.6",
|
|
69
89
|
"vite-plugin-dts": "4.5.4",
|
|
70
|
-
"vitest": "^4.0.
|
|
90
|
+
"vitest": "^4.0.16"
|
|
71
91
|
},
|
|
72
92
|
"dependencies": {
|
|
73
93
|
"mpg123-decoder": "^1.0.3"
|