react-audio-wavekit 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +116 -0
- package/README.md +231 -0
- package/dist/constants.cjs +20 -0
- package/dist/constants.js +20 -0
- package/dist/index.cjs +12 -0
- package/dist/index.d.ts +235 -0
- package/dist/index.js +12 -0
- package/dist/recorder/live-recorder/index.cjs +125 -0
- package/dist/recorder/live-recorder/index.js +125 -0
- package/dist/recorder/live-streaming/recorder/recorder-compound.cjs +244 -0
- package/dist/recorder/live-streaming/recorder/recorder-compound.js +244 -0
- package/dist/recorder/live-streaming/recorder/recorder-context.cjs +20 -0
- package/dist/recorder/live-streaming/recorder/recorder-context.js +20 -0
- package/dist/recorder/live-streaming/stack-recorder/stack-recorder-compound.cjs +126 -0
- package/dist/recorder/live-streaming/stack-recorder/stack-recorder-compound.js +126 -0
- package/dist/recorder/live-streaming/use-recording-amplitudes.cjs +92 -0
- package/dist/recorder/live-streaming/use-recording-amplitudes.js +92 -0
- package/dist/recorder/use-audio-analyser.cjs +59 -0
- package/dist/recorder/use-audio-analyser.js +59 -0
- package/dist/recorder/use-audio-recorder.cjs +139 -0
- package/dist/recorder/use-audio-recorder.js +139 -0
- package/dist/recorder/util-mime-type.cjs +15 -0
- package/dist/recorder/util-mime-type.js +15 -0
- package/dist/waveform/index.cjs +73 -0
- package/dist/waveform/index.js +73 -0
- package/dist/waveform/util-audio-decoder.cjs +45 -0
- package/dist/waveform/util-audio-decoder.js +45 -0
- package/dist/waveform/util-suspense.cjs +24 -0
- package/dist/waveform/util-suspense.js +24 -0
- package/dist/waveform/waveform-renderer.cjs +105 -0
- package/dist/waveform/waveform-renderer.js +105 -0
- package/package.json +74 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { useRef, useCallback, useSyncExternalStore, useEffect } from "react";
|
|
2
|
+
import { useAudioAnalyser } from "../use-audio-analyser.js";
|
|
3
|
+
function useRecordingAmplitudes(options) {
|
|
4
|
+
const { mediaRecorder, fftSize = 2048, smoothingTimeConstant = 0.4, sampleInterval = 50 } = options;
|
|
5
|
+
const amplitudeDataRef = useRef([]);
|
|
6
|
+
const listenersRef = useRef(/* @__PURE__ */ new Set());
|
|
7
|
+
const samplingIntervalRef = useRef(null);
|
|
8
|
+
const subscribe = useCallback((onStoreChange) => {
|
|
9
|
+
listenersRef.current.add(onStoreChange);
|
|
10
|
+
return () => listenersRef.current.delete(onStoreChange);
|
|
11
|
+
}, []);
|
|
12
|
+
const getSnapshot = useCallback(() => amplitudeDataRef.current, []);
|
|
13
|
+
const notifyListeners = useCallback(() => {
|
|
14
|
+
for (const listener of listenersRef.current) {
|
|
15
|
+
listener();
|
|
16
|
+
}
|
|
17
|
+
}, []);
|
|
18
|
+
const amplitudes = useSyncExternalStore(subscribe, getSnapshot, getSnapshot);
|
|
19
|
+
const { audioContextRef, analyserRef, dataArrayRef, bufferLengthRef } = useAudioAnalyser({
|
|
20
|
+
mediaRecorder,
|
|
21
|
+
fftSize,
|
|
22
|
+
smoothingTimeConstant
|
|
23
|
+
});
|
|
24
|
+
const clearAmplitudes = useCallback(() => {
|
|
25
|
+
amplitudeDataRef.current = [];
|
|
26
|
+
notifyListeners();
|
|
27
|
+
}, [notifyListeners]);
|
|
28
|
+
const prevMediaRecorderRef = useRef(null);
|
|
29
|
+
useEffect(() => {
|
|
30
|
+
if (mediaRecorder !== prevMediaRecorderRef.current) {
|
|
31
|
+
amplitudeDataRef.current = [];
|
|
32
|
+
notifyListeners();
|
|
33
|
+
prevMediaRecorderRef.current = mediaRecorder;
|
|
34
|
+
}
|
|
35
|
+
}, [mediaRecorder, notifyListeners]);
|
|
36
|
+
useEffect(() => {
|
|
37
|
+
if (!mediaRecorder) {
|
|
38
|
+
return;
|
|
39
|
+
}
|
|
40
|
+
const sampleAmplitude = () => {
|
|
41
|
+
const analyser = analyserRef.current;
|
|
42
|
+
const dataArray = dataArrayRef.current;
|
|
43
|
+
const bufferLength = bufferLengthRef.current;
|
|
44
|
+
if (!analyser || !dataArray) return;
|
|
45
|
+
analyser.getByteTimeDomainData(dataArray);
|
|
46
|
+
let sum = 0;
|
|
47
|
+
for (let i = 0; i < bufferLength; i++) {
|
|
48
|
+
const normalized = (dataArray[i] - 128) / 128;
|
|
49
|
+
sum += normalized * normalized;
|
|
50
|
+
}
|
|
51
|
+
const rms = Math.sqrt(sum / bufferLength);
|
|
52
|
+
const amplitude = Math.min(1, rms * 2);
|
|
53
|
+
amplitudeDataRef.current.push(amplitude);
|
|
54
|
+
notifyListeners();
|
|
55
|
+
};
|
|
56
|
+
const startSampling = () => {
|
|
57
|
+
if (!samplingIntervalRef.current) {
|
|
58
|
+
samplingIntervalRef.current = window.setInterval(sampleAmplitude, sampleInterval);
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
const stopSampling = () => {
|
|
62
|
+
if (samplingIntervalRef.current) {
|
|
63
|
+
clearInterval(samplingIntervalRef.current);
|
|
64
|
+
samplingIntervalRef.current = null;
|
|
65
|
+
}
|
|
66
|
+
};
|
|
67
|
+
const handlePause = () => stopSampling();
|
|
68
|
+
const handleResume = () => startSampling();
|
|
69
|
+
mediaRecorder.addEventListener("pause", handlePause);
|
|
70
|
+
mediaRecorder.addEventListener("resume", handleResume);
|
|
71
|
+
const timeoutId = setTimeout(() => {
|
|
72
|
+
startSampling();
|
|
73
|
+
}, 50);
|
|
74
|
+
return () => {
|
|
75
|
+
clearTimeout(timeoutId);
|
|
76
|
+
mediaRecorder.removeEventListener("pause", handlePause);
|
|
77
|
+
mediaRecorder.removeEventListener("resume", handleResume);
|
|
78
|
+
stopSampling();
|
|
79
|
+
};
|
|
80
|
+
}, [mediaRecorder, sampleInterval, analyserRef, dataArrayRef, bufferLengthRef, notifyListeners]);
|
|
81
|
+
return {
|
|
82
|
+
amplitudes,
|
|
83
|
+
audioContext: audioContextRef.current,
|
|
84
|
+
analyser: analyserRef.current,
|
|
85
|
+
isRecording: mediaRecorder?.state === "recording",
|
|
86
|
+
isPaused: mediaRecorder?.state === "paused",
|
|
87
|
+
clearAmplitudes
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
export {
|
|
91
|
+
useRecordingAmplitudes
|
|
92
|
+
};
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
3
|
+
const react = require("react");
|
|
4
|
+
function useAudioAnalyser({
|
|
5
|
+
mediaRecorder,
|
|
6
|
+
fftSize = 2048,
|
|
7
|
+
smoothingTimeConstant = 0.8
|
|
8
|
+
}) {
|
|
9
|
+
const audioContextRef = react.useRef(null);
|
|
10
|
+
const analyserRef = react.useRef(null);
|
|
11
|
+
const sourceRef = react.useRef(null);
|
|
12
|
+
const dataArrayRef = react.useRef(null);
|
|
13
|
+
const bufferLengthRef = react.useRef(0);
|
|
14
|
+
react.useEffect(() => {
|
|
15
|
+
if (!mediaRecorder) {
|
|
16
|
+
return;
|
|
17
|
+
}
|
|
18
|
+
let audioContext = null;
|
|
19
|
+
let analyser = null;
|
|
20
|
+
let source = null;
|
|
21
|
+
try {
|
|
22
|
+
audioContext = new AudioContext();
|
|
23
|
+
analyser = audioContext.createAnalyser();
|
|
24
|
+
analyser.fftSize = fftSize;
|
|
25
|
+
analyser.smoothingTimeConstant = smoothingTimeConstant;
|
|
26
|
+
audioContextRef.current = audioContext;
|
|
27
|
+
analyserRef.current = analyser;
|
|
28
|
+
const stream = mediaRecorder.stream;
|
|
29
|
+
source = audioContext.createMediaStreamSource(stream);
|
|
30
|
+
source.connect(analyser);
|
|
31
|
+
sourceRef.current = source;
|
|
32
|
+
const bufferLength = analyser.frequencyBinCount;
|
|
33
|
+
bufferLengthRef.current = bufferLength;
|
|
34
|
+
dataArrayRef.current = new Uint8Array(bufferLength);
|
|
35
|
+
} catch (error) {
|
|
36
|
+
console.error("Failed to setup audio analyser:", error);
|
|
37
|
+
}
|
|
38
|
+
return () => {
|
|
39
|
+
if (sourceRef.current) {
|
|
40
|
+
sourceRef.current.disconnect();
|
|
41
|
+
sourceRef.current = null;
|
|
42
|
+
}
|
|
43
|
+
if (audioContextRef.current && audioContextRef.current.state !== "closed") {
|
|
44
|
+
audioContextRef.current.close();
|
|
45
|
+
}
|
|
46
|
+
audioContextRef.current = null;
|
|
47
|
+
analyserRef.current = null;
|
|
48
|
+
dataArrayRef.current = null;
|
|
49
|
+
bufferLengthRef.current = 0;
|
|
50
|
+
};
|
|
51
|
+
}, [mediaRecorder, fftSize, smoothingTimeConstant]);
|
|
52
|
+
return {
|
|
53
|
+
audioContextRef,
|
|
54
|
+
analyserRef,
|
|
55
|
+
dataArrayRef,
|
|
56
|
+
bufferLengthRef
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
exports.useAudioAnalyser = useAudioAnalyser;
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { useRef, useEffect } from "react";
|
|
2
|
+
function useAudioAnalyser({
|
|
3
|
+
mediaRecorder,
|
|
4
|
+
fftSize = 2048,
|
|
5
|
+
smoothingTimeConstant = 0.8
|
|
6
|
+
}) {
|
|
7
|
+
const audioContextRef = useRef(null);
|
|
8
|
+
const analyserRef = useRef(null);
|
|
9
|
+
const sourceRef = useRef(null);
|
|
10
|
+
const dataArrayRef = useRef(null);
|
|
11
|
+
const bufferLengthRef = useRef(0);
|
|
12
|
+
useEffect(() => {
|
|
13
|
+
if (!mediaRecorder) {
|
|
14
|
+
return;
|
|
15
|
+
}
|
|
16
|
+
let audioContext = null;
|
|
17
|
+
let analyser = null;
|
|
18
|
+
let source = null;
|
|
19
|
+
try {
|
|
20
|
+
audioContext = new AudioContext();
|
|
21
|
+
analyser = audioContext.createAnalyser();
|
|
22
|
+
analyser.fftSize = fftSize;
|
|
23
|
+
analyser.smoothingTimeConstant = smoothingTimeConstant;
|
|
24
|
+
audioContextRef.current = audioContext;
|
|
25
|
+
analyserRef.current = analyser;
|
|
26
|
+
const stream = mediaRecorder.stream;
|
|
27
|
+
source = audioContext.createMediaStreamSource(stream);
|
|
28
|
+
source.connect(analyser);
|
|
29
|
+
sourceRef.current = source;
|
|
30
|
+
const bufferLength = analyser.frequencyBinCount;
|
|
31
|
+
bufferLengthRef.current = bufferLength;
|
|
32
|
+
dataArrayRef.current = new Uint8Array(bufferLength);
|
|
33
|
+
} catch (error) {
|
|
34
|
+
console.error("Failed to setup audio analyser:", error);
|
|
35
|
+
}
|
|
36
|
+
return () => {
|
|
37
|
+
if (sourceRef.current) {
|
|
38
|
+
sourceRef.current.disconnect();
|
|
39
|
+
sourceRef.current = null;
|
|
40
|
+
}
|
|
41
|
+
if (audioContextRef.current && audioContextRef.current.state !== "closed") {
|
|
42
|
+
audioContextRef.current.close();
|
|
43
|
+
}
|
|
44
|
+
audioContextRef.current = null;
|
|
45
|
+
analyserRef.current = null;
|
|
46
|
+
dataArrayRef.current = null;
|
|
47
|
+
bufferLengthRef.current = 0;
|
|
48
|
+
};
|
|
49
|
+
}, [mediaRecorder, fftSize, smoothingTimeConstant]);
|
|
50
|
+
return {
|
|
51
|
+
audioContextRef,
|
|
52
|
+
analyserRef,
|
|
53
|
+
dataArrayRef,
|
|
54
|
+
bufferLengthRef
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
export {
|
|
58
|
+
useAudioAnalyser
|
|
59
|
+
};
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
3
|
+
const react = require("react");
|
|
4
|
+
const utilMimeType = require("./util-mime-type.cjs");
|
|
5
|
+
const useAudioRecorder = (config = {}) => {
|
|
6
|
+
const { mimeType, audioConstraints = true, onRecordingComplete } = config;
|
|
7
|
+
const resolvedMimeType = typeof mimeType === "function" ? mimeType() : mimeType !== void 0 ? mimeType : utilMimeType.getDefaultMimeType();
|
|
8
|
+
const [mediaRecorder, setMediaRecorder] = react.useState(null);
|
|
9
|
+
const [recordingBlob, setRecordingBlob] = react.useState(null);
|
|
10
|
+
const [recordingTime, setRecordingTime] = react.useState(0);
|
|
11
|
+
const [isRecording, setIsRecording] = react.useState(false);
|
|
12
|
+
const [isPaused, setIsPaused] = react.useState(false);
|
|
13
|
+
const [error, setError] = react.useState(null);
|
|
14
|
+
const audioChunksRef = react.useRef([]);
|
|
15
|
+
const timerRef = react.useRef(null);
|
|
16
|
+
const streamRef = react.useRef(null);
|
|
17
|
+
const mediaRecorderRef = react.useRef(null);
|
|
18
|
+
const isRecordingRef = react.useRef(false);
|
|
19
|
+
const isPausedRef = react.useRef(false);
|
|
20
|
+
react.useEffect(() => {
|
|
21
|
+
mediaRecorderRef.current = mediaRecorder;
|
|
22
|
+
isRecordingRef.current = isRecording;
|
|
23
|
+
isPausedRef.current = isPaused;
|
|
24
|
+
}, [mediaRecorder, isRecording, isPaused]);
|
|
25
|
+
react.useEffect(() => {
|
|
26
|
+
if (isRecording && !isPaused) {
|
|
27
|
+
timerRef.current = window.setInterval(() => {
|
|
28
|
+
setRecordingTime((prev) => prev + 1);
|
|
29
|
+
}, 1e3);
|
|
30
|
+
} else if (timerRef.current) {
|
|
31
|
+
clearInterval(timerRef.current);
|
|
32
|
+
timerRef.current = null;
|
|
33
|
+
}
|
|
34
|
+
return () => {
|
|
35
|
+
if (timerRef.current) {
|
|
36
|
+
clearInterval(timerRef.current);
|
|
37
|
+
}
|
|
38
|
+
};
|
|
39
|
+
}, [isRecording, isPaused]);
|
|
40
|
+
const startRecording = react.useCallback(async () => {
|
|
41
|
+
try {
|
|
42
|
+
setError(null);
|
|
43
|
+
audioChunksRef.current = [];
|
|
44
|
+
setRecordingBlob(null);
|
|
45
|
+
setRecordingTime(0);
|
|
46
|
+
const stream = await navigator.mediaDevices.getUserMedia({
|
|
47
|
+
audio: audioConstraints
|
|
48
|
+
});
|
|
49
|
+
streamRef.current = stream;
|
|
50
|
+
const recorder = new MediaRecorder(stream, {
|
|
51
|
+
mimeType: MediaRecorder.isTypeSupported(resolvedMimeType) ? resolvedMimeType : utilMimeType.getDefaultMimeType()
|
|
52
|
+
});
|
|
53
|
+
recorder.ondataavailable = (event) => {
|
|
54
|
+
if (event.data.size > 0) {
|
|
55
|
+
audioChunksRef.current.push(event.data);
|
|
56
|
+
}
|
|
57
|
+
};
|
|
58
|
+
recorder.onstop = () => {
|
|
59
|
+
const blob = new Blob(audioChunksRef.current, { type: recorder.mimeType });
|
|
60
|
+
setRecordingBlob(blob);
|
|
61
|
+
onRecordingComplete?.(blob);
|
|
62
|
+
setIsRecording(false);
|
|
63
|
+
setIsPaused(false);
|
|
64
|
+
setMediaRecorder(null);
|
|
65
|
+
if (streamRef.current) {
|
|
66
|
+
for (const track of streamRef.current.getTracks()) {
|
|
67
|
+
track.stop();
|
|
68
|
+
}
|
|
69
|
+
streamRef.current = null;
|
|
70
|
+
}
|
|
71
|
+
};
|
|
72
|
+
recorder.onerror = (event) => {
|
|
73
|
+
setError(new Error(`Recording error: ${event}`));
|
|
74
|
+
setIsRecording(false);
|
|
75
|
+
setIsPaused(false);
|
|
76
|
+
};
|
|
77
|
+
setMediaRecorder(recorder);
|
|
78
|
+
recorder.start(100);
|
|
79
|
+
setIsRecording(true);
|
|
80
|
+
setIsPaused(false);
|
|
81
|
+
} catch (err) {
|
|
82
|
+
const error2 = err instanceof Error ? err : new Error(String(err));
|
|
83
|
+
setError(error2);
|
|
84
|
+
console.error("Failed to start recording:", error2);
|
|
85
|
+
}
|
|
86
|
+
}, [resolvedMimeType, audioConstraints, onRecordingComplete]);
|
|
87
|
+
const stopRecording = react.useCallback(() => {
|
|
88
|
+
if (mediaRecorderRef.current && isRecordingRef.current) {
|
|
89
|
+
mediaRecorderRef.current.stop();
|
|
90
|
+
}
|
|
91
|
+
}, []);
|
|
92
|
+
const pauseRecording = react.useCallback(() => {
|
|
93
|
+
if (mediaRecorderRef.current && isRecordingRef.current && !isPausedRef.current) {
|
|
94
|
+
mediaRecorderRef.current.pause();
|
|
95
|
+
setIsPaused(true);
|
|
96
|
+
}
|
|
97
|
+
}, []);
|
|
98
|
+
const resumeRecording = react.useCallback(() => {
|
|
99
|
+
if (mediaRecorderRef.current && isRecordingRef.current && isPausedRef.current) {
|
|
100
|
+
mediaRecorderRef.current.resume();
|
|
101
|
+
setIsPaused(false);
|
|
102
|
+
}
|
|
103
|
+
}, []);
|
|
104
|
+
const clearRecording = react.useCallback(() => {
|
|
105
|
+
if (mediaRecorderRef.current && isRecordingRef.current) {
|
|
106
|
+
mediaRecorderRef.current.stop();
|
|
107
|
+
}
|
|
108
|
+
audioChunksRef.current = [];
|
|
109
|
+
setRecordingBlob(null);
|
|
110
|
+
setRecordingTime(0);
|
|
111
|
+
setError(null);
|
|
112
|
+
}, []);
|
|
113
|
+
react.useEffect(() => {
|
|
114
|
+
return () => {
|
|
115
|
+
if (streamRef.current) {
|
|
116
|
+
for (const track of streamRef.current.getTracks()) {
|
|
117
|
+
track.stop();
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
if (timerRef.current) {
|
|
121
|
+
clearInterval(timerRef.current);
|
|
122
|
+
}
|
|
123
|
+
};
|
|
124
|
+
}, []);
|
|
125
|
+
return {
|
|
126
|
+
startRecording,
|
|
127
|
+
stopRecording,
|
|
128
|
+
pauseRecording,
|
|
129
|
+
resumeRecording,
|
|
130
|
+
clearRecording,
|
|
131
|
+
mediaRecorder,
|
|
132
|
+
recordingBlob,
|
|
133
|
+
recordingTime,
|
|
134
|
+
isRecording,
|
|
135
|
+
isPaused,
|
|
136
|
+
error
|
|
137
|
+
};
|
|
138
|
+
};
|
|
139
|
+
exports.useAudioRecorder = useAudioRecorder;
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
import { useState, useRef, useEffect, useCallback } from "react";
|
|
2
|
+
import { getDefaultMimeType } from "./util-mime-type.js";
|
|
3
|
+
const useAudioRecorder = (config = {}) => {
|
|
4
|
+
const { mimeType, audioConstraints = true, onRecordingComplete } = config;
|
|
5
|
+
const resolvedMimeType = typeof mimeType === "function" ? mimeType() : mimeType !== void 0 ? mimeType : getDefaultMimeType();
|
|
6
|
+
const [mediaRecorder, setMediaRecorder] = useState(null);
|
|
7
|
+
const [recordingBlob, setRecordingBlob] = useState(null);
|
|
8
|
+
const [recordingTime, setRecordingTime] = useState(0);
|
|
9
|
+
const [isRecording, setIsRecording] = useState(false);
|
|
10
|
+
const [isPaused, setIsPaused] = useState(false);
|
|
11
|
+
const [error, setError] = useState(null);
|
|
12
|
+
const audioChunksRef = useRef([]);
|
|
13
|
+
const timerRef = useRef(null);
|
|
14
|
+
const streamRef = useRef(null);
|
|
15
|
+
const mediaRecorderRef = useRef(null);
|
|
16
|
+
const isRecordingRef = useRef(false);
|
|
17
|
+
const isPausedRef = useRef(false);
|
|
18
|
+
useEffect(() => {
|
|
19
|
+
mediaRecorderRef.current = mediaRecorder;
|
|
20
|
+
isRecordingRef.current = isRecording;
|
|
21
|
+
isPausedRef.current = isPaused;
|
|
22
|
+
}, [mediaRecorder, isRecording, isPaused]);
|
|
23
|
+
useEffect(() => {
|
|
24
|
+
if (isRecording && !isPaused) {
|
|
25
|
+
timerRef.current = window.setInterval(() => {
|
|
26
|
+
setRecordingTime((prev) => prev + 1);
|
|
27
|
+
}, 1e3);
|
|
28
|
+
} else if (timerRef.current) {
|
|
29
|
+
clearInterval(timerRef.current);
|
|
30
|
+
timerRef.current = null;
|
|
31
|
+
}
|
|
32
|
+
return () => {
|
|
33
|
+
if (timerRef.current) {
|
|
34
|
+
clearInterval(timerRef.current);
|
|
35
|
+
}
|
|
36
|
+
};
|
|
37
|
+
}, [isRecording, isPaused]);
|
|
38
|
+
const startRecording = useCallback(async () => {
|
|
39
|
+
try {
|
|
40
|
+
setError(null);
|
|
41
|
+
audioChunksRef.current = [];
|
|
42
|
+
setRecordingBlob(null);
|
|
43
|
+
setRecordingTime(0);
|
|
44
|
+
const stream = await navigator.mediaDevices.getUserMedia({
|
|
45
|
+
audio: audioConstraints
|
|
46
|
+
});
|
|
47
|
+
streamRef.current = stream;
|
|
48
|
+
const recorder = new MediaRecorder(stream, {
|
|
49
|
+
mimeType: MediaRecorder.isTypeSupported(resolvedMimeType) ? resolvedMimeType : getDefaultMimeType()
|
|
50
|
+
});
|
|
51
|
+
recorder.ondataavailable = (event) => {
|
|
52
|
+
if (event.data.size > 0) {
|
|
53
|
+
audioChunksRef.current.push(event.data);
|
|
54
|
+
}
|
|
55
|
+
};
|
|
56
|
+
recorder.onstop = () => {
|
|
57
|
+
const blob = new Blob(audioChunksRef.current, { type: recorder.mimeType });
|
|
58
|
+
setRecordingBlob(blob);
|
|
59
|
+
onRecordingComplete?.(blob);
|
|
60
|
+
setIsRecording(false);
|
|
61
|
+
setIsPaused(false);
|
|
62
|
+
setMediaRecorder(null);
|
|
63
|
+
if (streamRef.current) {
|
|
64
|
+
for (const track of streamRef.current.getTracks()) {
|
|
65
|
+
track.stop();
|
|
66
|
+
}
|
|
67
|
+
streamRef.current = null;
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
recorder.onerror = (event) => {
|
|
71
|
+
setError(new Error(`Recording error: ${event}`));
|
|
72
|
+
setIsRecording(false);
|
|
73
|
+
setIsPaused(false);
|
|
74
|
+
};
|
|
75
|
+
setMediaRecorder(recorder);
|
|
76
|
+
recorder.start(100);
|
|
77
|
+
setIsRecording(true);
|
|
78
|
+
setIsPaused(false);
|
|
79
|
+
} catch (err) {
|
|
80
|
+
const error2 = err instanceof Error ? err : new Error(String(err));
|
|
81
|
+
setError(error2);
|
|
82
|
+
console.error("Failed to start recording:", error2);
|
|
83
|
+
}
|
|
84
|
+
}, [resolvedMimeType, audioConstraints, onRecordingComplete]);
|
|
85
|
+
const stopRecording = useCallback(() => {
|
|
86
|
+
if (mediaRecorderRef.current && isRecordingRef.current) {
|
|
87
|
+
mediaRecorderRef.current.stop();
|
|
88
|
+
}
|
|
89
|
+
}, []);
|
|
90
|
+
const pauseRecording = useCallback(() => {
|
|
91
|
+
if (mediaRecorderRef.current && isRecordingRef.current && !isPausedRef.current) {
|
|
92
|
+
mediaRecorderRef.current.pause();
|
|
93
|
+
setIsPaused(true);
|
|
94
|
+
}
|
|
95
|
+
}, []);
|
|
96
|
+
const resumeRecording = useCallback(() => {
|
|
97
|
+
if (mediaRecorderRef.current && isRecordingRef.current && isPausedRef.current) {
|
|
98
|
+
mediaRecorderRef.current.resume();
|
|
99
|
+
setIsPaused(false);
|
|
100
|
+
}
|
|
101
|
+
}, []);
|
|
102
|
+
const clearRecording = useCallback(() => {
|
|
103
|
+
if (mediaRecorderRef.current && isRecordingRef.current) {
|
|
104
|
+
mediaRecorderRef.current.stop();
|
|
105
|
+
}
|
|
106
|
+
audioChunksRef.current = [];
|
|
107
|
+
setRecordingBlob(null);
|
|
108
|
+
setRecordingTime(0);
|
|
109
|
+
setError(null);
|
|
110
|
+
}, []);
|
|
111
|
+
useEffect(() => {
|
|
112
|
+
return () => {
|
|
113
|
+
if (streamRef.current) {
|
|
114
|
+
for (const track of streamRef.current.getTracks()) {
|
|
115
|
+
track.stop();
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
if (timerRef.current) {
|
|
119
|
+
clearInterval(timerRef.current);
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
}, []);
|
|
123
|
+
return {
|
|
124
|
+
startRecording,
|
|
125
|
+
stopRecording,
|
|
126
|
+
pauseRecording,
|
|
127
|
+
resumeRecording,
|
|
128
|
+
clearRecording,
|
|
129
|
+
mediaRecorder,
|
|
130
|
+
recordingBlob,
|
|
131
|
+
recordingTime,
|
|
132
|
+
isRecording,
|
|
133
|
+
isPaused,
|
|
134
|
+
error
|
|
135
|
+
};
|
|
136
|
+
};
|
|
137
|
+
export {
|
|
138
|
+
useAudioRecorder
|
|
139
|
+
};
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
3
|
+
function getDefaultMimeType() {
|
|
4
|
+
if (MediaRecorder.isTypeSupported("audio/mp4")) {
|
|
5
|
+
return "audio/mp4";
|
|
6
|
+
}
|
|
7
|
+
if (MediaRecorder.isTypeSupported("audio/webm;codecs=opus")) {
|
|
8
|
+
return "audio/webm;codecs=opus";
|
|
9
|
+
}
|
|
10
|
+
if (MediaRecorder.isTypeSupported("audio/webm")) {
|
|
11
|
+
return "audio/webm";
|
|
12
|
+
}
|
|
13
|
+
return "audio/webm";
|
|
14
|
+
}
|
|
15
|
+
exports.getDefaultMimeType = getDefaultMimeType;
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
function getDefaultMimeType() {
|
|
2
|
+
if (MediaRecorder.isTypeSupported("audio/mp4")) {
|
|
3
|
+
return "audio/mp4";
|
|
4
|
+
}
|
|
5
|
+
if (MediaRecorder.isTypeSupported("audio/webm;codecs=opus")) {
|
|
6
|
+
return "audio/webm;codecs=opus";
|
|
7
|
+
}
|
|
8
|
+
if (MediaRecorder.isTypeSupported("audio/webm")) {
|
|
9
|
+
return "audio/webm";
|
|
10
|
+
}
|
|
11
|
+
return "audio/webm";
|
|
12
|
+
}
|
|
13
|
+
export {
|
|
14
|
+
getDefaultMimeType
|
|
15
|
+
};
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperties(exports, { __esModule: { value: true }, [Symbol.toStringTag]: { value: "Module" } });
|
|
3
|
+
const jsxRuntime = require("react/jsx-runtime");
|
|
4
|
+
const react = require("react");
|
|
5
|
+
const utilAudioDecoder = require("./util-audio-decoder.cjs");
|
|
6
|
+
const utilSuspense = require("./util-suspense.cjs");
|
|
7
|
+
const waveformRenderer = require("./waveform-renderer.cjs");
|
|
8
|
+
const DEFAULT_SAMPLE_COUNT = 500;
|
|
9
|
+
const AudioWaveform = react.forwardRef(function AudioWaveform2({ blob, appearance, suspense = false, currentTime, duration, onSeek, ...props }, ref) {
|
|
10
|
+
const [peaks, setPeaks] = react.useState(null);
|
|
11
|
+
const [error, setError] = react.useState(null);
|
|
12
|
+
const [sampleCount, setSampleCount] = react.useState(DEFAULT_SAMPLE_COUNT);
|
|
13
|
+
const sampleCountInitializedRef = react.useRef(false);
|
|
14
|
+
const blobRef = react.useRef(null);
|
|
15
|
+
const rendererRef = react.useRef(null);
|
|
16
|
+
react.useEffect(() => {
|
|
17
|
+
if (!sampleCountInitializedRef.current) {
|
|
18
|
+
setSampleCount(Math.max(500, Math.ceil(window.innerWidth)));
|
|
19
|
+
sampleCountInitializedRef.current = true;
|
|
20
|
+
}
|
|
21
|
+
}, []);
|
|
22
|
+
react.useEffect(() => {
|
|
23
|
+
if (ref && typeof ref === "function") {
|
|
24
|
+
ref({ canvas: rendererRef.current?.canvas || null });
|
|
25
|
+
} else if (ref) {
|
|
26
|
+
ref.current = { canvas: rendererRef.current?.canvas || null };
|
|
27
|
+
}
|
|
28
|
+
}, [ref]);
|
|
29
|
+
const suspensePeaks = blob && suspense ? utilSuspense.unwrapPromise(utilAudioDecoder.getAudioData(blob, sampleCount)) : null;
|
|
30
|
+
react.useEffect(() => {
|
|
31
|
+
if (suspense) return;
|
|
32
|
+
if (!blob) {
|
|
33
|
+
setPeaks(null);
|
|
34
|
+
setError(null);
|
|
35
|
+
blobRef.current = null;
|
|
36
|
+
return;
|
|
37
|
+
}
|
|
38
|
+
if (blobRef.current === blob) return;
|
|
39
|
+
blobRef.current = blob;
|
|
40
|
+
let cancelled = false;
|
|
41
|
+
setError(null);
|
|
42
|
+
utilAudioDecoder.decodeAudioBlob(blob, sampleCount).then((data) => {
|
|
43
|
+
if (!cancelled) {
|
|
44
|
+
setPeaks(data);
|
|
45
|
+
}
|
|
46
|
+
}).catch((err) => {
|
|
47
|
+
if (!cancelled) {
|
|
48
|
+
setError(err instanceof Error ? err : new Error(err?.message || "Failed to decode audio"));
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
return () => {
|
|
52
|
+
cancelled = true;
|
|
53
|
+
};
|
|
54
|
+
}, [blob, sampleCount, suspense]);
|
|
55
|
+
if (!suspense && error) {
|
|
56
|
+
throw error;
|
|
57
|
+
}
|
|
58
|
+
const finalPeaks = suspense ? suspensePeaks : peaks;
|
|
59
|
+
return /* @__PURE__ */ jsxRuntime.jsx(
|
|
60
|
+
waveformRenderer.WaveformRenderer,
|
|
61
|
+
{
|
|
62
|
+
ref: rendererRef,
|
|
63
|
+
peaks: finalPeaks,
|
|
64
|
+
appearance,
|
|
65
|
+
currentTime,
|
|
66
|
+
duration,
|
|
67
|
+
onSeek,
|
|
68
|
+
...props
|
|
69
|
+
}
|
|
70
|
+
);
|
|
71
|
+
});
|
|
72
|
+
exports.AudioWaveform = AudioWaveform;
|
|
73
|
+
exports.default = AudioWaveform;
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import { jsx } from "react/jsx-runtime";
|
|
2
|
+
import { forwardRef, useState, useRef, useEffect } from "react";
|
|
3
|
+
import { getAudioData, decodeAudioBlob } from "./util-audio-decoder.js";
|
|
4
|
+
import { unwrapPromise } from "./util-suspense.js";
|
|
5
|
+
import { WaveformRenderer } from "./waveform-renderer.js";
|
|
6
|
+
const DEFAULT_SAMPLE_COUNT = 500;
|
|
7
|
+
const AudioWaveform = forwardRef(function AudioWaveform2({ blob, appearance, suspense = false, currentTime, duration, onSeek, ...props }, ref) {
|
|
8
|
+
const [peaks, setPeaks] = useState(null);
|
|
9
|
+
const [error, setError] = useState(null);
|
|
10
|
+
const [sampleCount, setSampleCount] = useState(DEFAULT_SAMPLE_COUNT);
|
|
11
|
+
const sampleCountInitializedRef = useRef(false);
|
|
12
|
+
const blobRef = useRef(null);
|
|
13
|
+
const rendererRef = useRef(null);
|
|
14
|
+
useEffect(() => {
|
|
15
|
+
if (!sampleCountInitializedRef.current) {
|
|
16
|
+
setSampleCount(Math.max(500, Math.ceil(window.innerWidth)));
|
|
17
|
+
sampleCountInitializedRef.current = true;
|
|
18
|
+
}
|
|
19
|
+
}, []);
|
|
20
|
+
useEffect(() => {
|
|
21
|
+
if (ref && typeof ref === "function") {
|
|
22
|
+
ref({ canvas: rendererRef.current?.canvas || null });
|
|
23
|
+
} else if (ref) {
|
|
24
|
+
ref.current = { canvas: rendererRef.current?.canvas || null };
|
|
25
|
+
}
|
|
26
|
+
}, [ref]);
|
|
27
|
+
const suspensePeaks = blob && suspense ? unwrapPromise(getAudioData(blob, sampleCount)) : null;
|
|
28
|
+
useEffect(() => {
|
|
29
|
+
if (suspense) return;
|
|
30
|
+
if (!blob) {
|
|
31
|
+
setPeaks(null);
|
|
32
|
+
setError(null);
|
|
33
|
+
blobRef.current = null;
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
if (blobRef.current === blob) return;
|
|
37
|
+
blobRef.current = blob;
|
|
38
|
+
let cancelled = false;
|
|
39
|
+
setError(null);
|
|
40
|
+
decodeAudioBlob(blob, sampleCount).then((data) => {
|
|
41
|
+
if (!cancelled) {
|
|
42
|
+
setPeaks(data);
|
|
43
|
+
}
|
|
44
|
+
}).catch((err) => {
|
|
45
|
+
if (!cancelled) {
|
|
46
|
+
setError(err instanceof Error ? err : new Error(err?.message || "Failed to decode audio"));
|
|
47
|
+
}
|
|
48
|
+
});
|
|
49
|
+
return () => {
|
|
50
|
+
cancelled = true;
|
|
51
|
+
};
|
|
52
|
+
}, [blob, sampleCount, suspense]);
|
|
53
|
+
if (!suspense && error) {
|
|
54
|
+
throw error;
|
|
55
|
+
}
|
|
56
|
+
const finalPeaks = suspense ? suspensePeaks : peaks;
|
|
57
|
+
return /* @__PURE__ */ jsx(
|
|
58
|
+
WaveformRenderer,
|
|
59
|
+
{
|
|
60
|
+
ref: rendererRef,
|
|
61
|
+
peaks: finalPeaks,
|
|
62
|
+
appearance,
|
|
63
|
+
currentTime,
|
|
64
|
+
duration,
|
|
65
|
+
onSeek,
|
|
66
|
+
...props
|
|
67
|
+
}
|
|
68
|
+
);
|
|
69
|
+
});
|
|
70
|
+
export {
|
|
71
|
+
AudioWaveform,
|
|
72
|
+
AudioWaveform as default
|
|
73
|
+
};
|