sera-ai 1.0.11 → 1.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +26 -53
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +26 -53
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -2670,7 +2670,6 @@ var createAudioProcessorWorker = () => {
|
|
|
2670
2670
|
this._recordingStartTime = Date.now();
|
|
2671
2671
|
this._initialSilenceThreshold = 44100 * 10;
|
|
2672
2672
|
this._isInitialPhase = true;
|
|
2673
|
-
this._audioActivityBuffer = []; // Track recent audio activity
|
|
2674
2673
|
this._bufferSize = 0; // Track total samples in buffer
|
|
2675
2674
|
|
|
2676
2675
|
this.port.onmessage = (event) => {
|
|
@@ -2765,12 +2764,6 @@ var createAudioProcessorWorker = () => {
|
|
|
2765
2764
|
this._isInitialPhase = false;
|
|
2766
2765
|
this._silentSampleCount = 0;
|
|
2767
2766
|
this._lastAudioTime = Date.now();
|
|
2768
|
-
|
|
2769
|
-
// Track audio activity for chunk validation
|
|
2770
|
-
this._audioActivityBuffer.push(audioLevel);
|
|
2771
|
-
if (this._audioActivityBuffer.length > 1000) {
|
|
2772
|
-
this._audioActivityBuffer.shift(); // Keep only recent activity
|
|
2773
|
-
}
|
|
2774
2767
|
} else {
|
|
2775
2768
|
this._silentSampleCount += samples.length;
|
|
2776
2769
|
|
|
@@ -2795,43 +2788,36 @@ var createAudioProcessorWorker = () => {
|
|
|
2795
2788
|
|
|
2796
2789
|
if (this._uploadChunk && !this._uploadingChunk) {
|
|
2797
2790
|
this._uploadingChunk = true;
|
|
2798
|
-
|
|
2799
|
-
// Check if buffer has meaningful audio content before uploading
|
|
2800
|
-
const recentActivity = this._audioActivityBuffer.slice(-100); // Last 100 activity measurements
|
|
2801
|
-
const hasRecentAudio = recentActivity.some(level => level > this._audioThreshold * 2);
|
|
2802
|
-
|
|
2791
|
+
|
|
2803
2792
|
// Properly flatten the buffer by concatenating Float32Arrays
|
|
2804
2793
|
let totalLength = 0;
|
|
2805
2794
|
for (let i = 0; i < this._buffer.length; i++) {
|
|
2806
2795
|
totalLength += this._buffer[i].length;
|
|
2807
2796
|
}
|
|
2808
|
-
|
|
2797
|
+
|
|
2809
2798
|
const flat = new Float32Array(totalLength);
|
|
2810
2799
|
let offset = 0;
|
|
2811
2800
|
for (let i = 0; i < this._buffer.length; i++) {
|
|
2812
2801
|
flat.set(this._buffer[i], offset);
|
|
2813
2802
|
offset += this._buffer[i].length;
|
|
2814
2803
|
}
|
|
2815
|
-
|
|
2816
|
-
//
|
|
2817
|
-
if (
|
|
2804
|
+
|
|
2805
|
+
// Always send chunks to server - let server handle silence filtering
|
|
2806
|
+
if (this._bufferSize > 0) {
|
|
2818
2807
|
this.port.postMessage(
|
|
2819
2808
|
{
|
|
2820
2809
|
command: "chunk",
|
|
2821
2810
|
audioBuffer: flat.buffer,
|
|
2822
|
-
hasActivity: hasRecentAudio,
|
|
2823
2811
|
bufferDuration: this._bufferSize / 44100
|
|
2824
2812
|
},
|
|
2825
2813
|
[flat.buffer]
|
|
2826
2814
|
);
|
|
2827
|
-
|
|
2815
|
+
console.log('[UPLOAD] Sending chunk: ' + (this._bufferSize / 44100).toFixed(1) + 's');
|
|
2816
|
+
// Clear buffer after upload
|
|
2828
2817
|
this._buffer = [];
|
|
2829
2818
|
this._bufferSize = 0;
|
|
2830
|
-
} else {
|
|
2831
|
-
console.log('[SKIP] Skipping silent chunk upload (' + (this._bufferSize / 44100).toFixed(1) + 's, no recent activity)');
|
|
2832
|
-
// Don't clear buffer for skipped chunks, keep accumulating
|
|
2833
2819
|
}
|
|
2834
|
-
|
|
2820
|
+
|
|
2835
2821
|
this._uploadChunk = false;
|
|
2836
2822
|
this._uploadingChunk = false;
|
|
2837
2823
|
}
|
|
@@ -3098,11 +3084,14 @@ var useAudioRecorder = ({
|
|
|
3098
3084
|
processorRef.current?.port.postMessage({ command: "resetUploadChunk" });
|
|
3099
3085
|
if (audioData && localSessionIdRef.current && !retry) {
|
|
3100
3086
|
try {
|
|
3087
|
+
console.log(`[DB] Saving chunk ${sequence} to IndexedDB session ${localSessionIdRef.current}`);
|
|
3101
3088
|
await appendAudioToSession(localSessionIdRef.current, audioData, sequence);
|
|
3102
|
-
console.log(`[
|
|
3089
|
+
console.log(`[DB] \u2713 Successfully saved audio chunk ${sequence} to IndexedDB (${audioData.length} samples)`);
|
|
3103
3090
|
} catch (error2) {
|
|
3104
|
-
console.error(`[
|
|
3091
|
+
console.error(`[DB] \u2717 Failed to save audio chunk ${sequence} to IndexedDB:`, error2);
|
|
3105
3092
|
}
|
|
3093
|
+
} else {
|
|
3094
|
+
console.log(`[DB] Skipping IndexedDB save: audioData=${!!audioData}, sessionId=${localSessionIdRef.current}, retry=${retry}`);
|
|
3106
3095
|
}
|
|
3107
3096
|
try {
|
|
3108
3097
|
const data = await pRetry__default.default(
|
|
@@ -3119,37 +3108,19 @@ var useAudioRecorder = ({
|
|
|
3119
3108
|
console.log(
|
|
3120
3109
|
`[PROCESSING] Processing audio chunk: ${audioData.length} samples (${(audioData.length / 44100).toFixed(2)}s)`
|
|
3121
3110
|
);
|
|
3122
|
-
let hasAudio = false;
|
|
3123
3111
|
let maxAmplitude = 0;
|
|
3124
3112
|
let nonZeroSamples = 0;
|
|
3125
3113
|
for (let i = 0; i < audioData.length; i++) {
|
|
3126
3114
|
const amplitude = Math.abs(audioData[i]);
|
|
3127
3115
|
if (amplitude > maxAmplitude) maxAmplitude = amplitude;
|
|
3128
3116
|
if (amplitude > 1e-3) {
|
|
3129
|
-
hasAudio = true;
|
|
3130
3117
|
nonZeroSamples++;
|
|
3131
3118
|
}
|
|
3132
3119
|
}
|
|
3133
3120
|
const audioPercentage = nonZeroSamples / audioData.length;
|
|
3134
3121
|
console.log(
|
|
3135
|
-
`[AUDIO] Audio
|
|
3122
|
+
`[AUDIO] Audio stats: maxAmplitude=${maxAmplitude.toFixed(4)}, audioContent=${(audioPercentage * 100).toFixed(2)}%, sequence=${sequence}, isFinal=${isFinalChunk}`
|
|
3136
3123
|
);
|
|
3137
|
-
if (!hasAudio || maxAmplitude < 1e-3 || audioPercentage < 0.01) {
|
|
3138
|
-
console.log(
|
|
3139
|
-
`[SKIP] Skipping silent chunk (${audioPercentage < 0.01 ? "too little audio content" : "completely silent"})`
|
|
3140
|
-
);
|
|
3141
|
-
if (!isFinalChunk) {
|
|
3142
|
-
console.log(`[OUT] Non-final silent chunk skipped entirely`);
|
|
3143
|
-
return;
|
|
3144
|
-
} else {
|
|
3145
|
-
console.log(`[OUT] Final silent chunk - will send minimal audio to close session`);
|
|
3146
|
-
const minimalAudio = new Float32Array(1e3);
|
|
3147
|
-
for (let i = 0; i < minimalAudio.length; i += 100) {
|
|
3148
|
-
minimalAudio[i] = 1e-3;
|
|
3149
|
-
}
|
|
3150
|
-
audioData = minimalAudio;
|
|
3151
|
-
}
|
|
3152
|
-
}
|
|
3153
3124
|
const sampleRate = audioContextRef.current?.sampleRate || 16e3;
|
|
3154
3125
|
const timestamp = Date.now();
|
|
3155
3126
|
const fileName = `audio-chunk-${timestamp}.wav`;
|
|
@@ -3186,11 +3157,8 @@ var useAudioRecorder = ({
|
|
|
3186
3157
|
`[OUT] Final file for transcription: ${wavFile.size} bytes, ${wavFile.name}`
|
|
3187
3158
|
);
|
|
3188
3159
|
if (wavFile.size < 500) {
|
|
3189
|
-
console.
|
|
3190
|
-
`[
|
|
3191
|
-
);
|
|
3192
|
-
throw new Error(
|
|
3193
|
-
`Audio file too small (${wavFile.size} bytes) - no meaningful audio content`
|
|
3160
|
+
console.warn(
|
|
3161
|
+
`[WARN] Small audio file (${wavFile.size} bytes) - may contain minimal audio data, sending to server anyway`
|
|
3194
3162
|
);
|
|
3195
3163
|
}
|
|
3196
3164
|
const requestData = {
|
|
@@ -3516,16 +3484,19 @@ var useAudioRecorder = ({
|
|
|
3516
3484
|
totalRecordingTime,
|
|
3517
3485
|
lastAudioTime
|
|
3518
3486
|
} = event.data;
|
|
3519
|
-
const sequence = sequenceCounterRef.current++;
|
|
3520
3487
|
if (command === "finalChunk" && audioBuffer) {
|
|
3488
|
+
const sequence = sequenceCounterRef.current++;
|
|
3521
3489
|
const audioArray = new Float32Array(audioBuffer);
|
|
3522
|
-
console.log(`
|
|
3490
|
+
console.log(`[RECEIVE] Final chunk: ${audioArray.length} samples, sequence: ${sequence}`);
|
|
3523
3491
|
enqueueChunk(audioArray, true, sequence);
|
|
3524
|
-
} else if (command === "
|
|
3492
|
+
} else if (command === "chunk" && audioBuffer) {
|
|
3493
|
+
const sequence = sequenceCounterRef.current++;
|
|
3525
3494
|
const audioArray = new Float32Array(audioBuffer);
|
|
3495
|
+
console.log(`[RECEIVE] Chunk: ${audioArray.length} samples, sequence: ${sequence}`);
|
|
3526
3496
|
enqueueChunk(audioArray, false, sequence);
|
|
3527
3497
|
} else if (command === "pauseChunk" && audioBuffer) {
|
|
3528
|
-
|
|
3498
|
+
const sequence = sequenceCounterRef.current++;
|
|
3499
|
+
console.log(`[RECEIVE] Pause chunk with audioBuffer, sequence: ${sequence}`);
|
|
3529
3500
|
enqueueChunk(new Float32Array(audioBuffer), false, sequence, true);
|
|
3530
3501
|
} else if (command === "audioLevel") {
|
|
3531
3502
|
setAudioLevel(level);
|
|
@@ -3679,16 +3650,18 @@ var useAudioRecorder = ({
|
|
|
3679
3650
|
const processNextChunkInQueue = React3__namespace.default.useCallback(() => {
|
|
3680
3651
|
if (isProcessingQueueRef.current || chunkQueueRef.current.length === 0) return;
|
|
3681
3652
|
const { chunk, isFinal, sequence, isPaused: isPaused2 = false } = chunkQueueRef.current.shift();
|
|
3653
|
+
console.log(`[QUEUE] Processing chunk ${sequence} from queue, remaining: ${chunkQueueRef.current.length}`);
|
|
3682
3654
|
isProcessingQueueRef.current = true;
|
|
3683
3655
|
uploadChunkToServer(chunk, isFinal, sequence, false, isPaused2).finally(() => {
|
|
3656
|
+
console.log(`[QUEUE] Finished processing chunk ${sequence}`);
|
|
3684
3657
|
isProcessingQueueRef.current = false;
|
|
3685
3658
|
processNextChunkInQueue();
|
|
3686
3659
|
});
|
|
3687
3660
|
}, [uploadChunkToServer, isLoaded]);
|
|
3688
3661
|
const enqueueChunk = React3__namespace.default.useCallback(
|
|
3689
3662
|
(audioData, isFinalChunk, sequence, isPausedChunk = false) => {
|
|
3663
|
+
console.log(`[QUEUE] Enqueuing ${isFinalChunk ? "FINAL" : isPausedChunk ? "PAUSED" : "regular"} chunk ${sequence}, samples: ${audioData?.length || 0}, queue size: ${chunkQueueRef.current.length}`);
|
|
3690
3664
|
if (isFinalChunk) {
|
|
3691
|
-
console.log("Enqueuing final chunk:", sequence);
|
|
3692
3665
|
setIsProcessing(true);
|
|
3693
3666
|
}
|
|
3694
3667
|
chunkQueueRef.current.push({
|