@sage-rsc/talking-head-react 1.0.63 → 1.0.65
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2 -2
- package/dist/index.js +191 -185
- package/package.json +1 -1
- package/src/components/TalkingHeadAvatar.jsx +61 -47
package/package.json
CHANGED
|
@@ -442,22 +442,30 @@ const TalkingHeadAvatar = forwardRef(({
|
|
|
442
442
|
|
|
443
443
|
// Save remaining text from speechQueue before clearing it
|
|
444
444
|
let remainingText = '';
|
|
445
|
-
if (talkingHead.speechQueue && talkingHead.speechQueue.length > 0) {
|
|
445
|
+
if (talkingHead.speechQueue && talkingHead.speechQueue.length > 0 && pausedSpeechRef.current) {
|
|
446
446
|
// Extract text from remaining queue items
|
|
447
|
+
// Note: item.text is an array of objects like [{mark: 0, word: "Hello"}, {mark: 1, word: "world"}]
|
|
447
448
|
const remainingParts = talkingHead.speechQueue
|
|
448
|
-
.filter(item => item.text
|
|
449
|
-
.map(item =>
|
|
449
|
+
.filter(item => item && item.text && Array.isArray(item.text) && item.text.length > 0)
|
|
450
|
+
.map(item => {
|
|
451
|
+
// Extract words from the text array
|
|
452
|
+
return item.text
|
|
453
|
+
.map(wordObj => wordObj.word || '')
|
|
454
|
+
.filter(word => word.length > 0)
|
|
455
|
+
.join(' ');
|
|
456
|
+
})
|
|
457
|
+
.filter(text => text.length > 0)
|
|
450
458
|
.join(' ');
|
|
451
459
|
|
|
452
|
-
if (remainingParts) {
|
|
460
|
+
if (remainingParts && remainingParts.trim()) {
|
|
453
461
|
remainingText = remainingParts.trim();
|
|
454
462
|
}
|
|
455
463
|
}
|
|
456
464
|
|
|
457
|
-
//
|
|
458
|
-
if (
|
|
465
|
+
// Always save progress for resume (even if no remaining text, we'll resume from beginning)
|
|
466
|
+
if (pausedSpeechRef.current) {
|
|
459
467
|
speechProgressRef.current = {
|
|
460
|
-
remainingText: remainingText,
|
|
468
|
+
remainingText: remainingText || null,
|
|
461
469
|
originalText: pausedSpeechRef.current.text,
|
|
462
470
|
options: pausedSpeechRef.current.options
|
|
463
471
|
};
|
|
@@ -477,49 +485,55 @@ const TalkingHeadAvatar = forwardRef(({
|
|
|
477
485
|
}, []);
|
|
478
486
|
|
|
479
487
|
const resumeSpeaking = useCallback(async () => {
|
|
480
|
-
if (talkingHeadRef.current
|
|
481
|
-
|
|
488
|
+
if (!talkingHeadRef.current || !isPaused) {
|
|
489
|
+
return;
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
// Determine what text to speak - use remaining text if available, otherwise full text
|
|
493
|
+
let textToSpeak = '';
|
|
494
|
+
let optionsToUse = {};
|
|
495
|
+
|
|
496
|
+
if (speechProgressRef.current && speechProgressRef.current.remainingText) {
|
|
497
|
+
// Resume from where we paused - speak only the remaining text
|
|
498
|
+
textToSpeak = speechProgressRef.current.remainingText;
|
|
499
|
+
optionsToUse = speechProgressRef.current.options || {};
|
|
500
|
+
// Clear progress after using it
|
|
501
|
+
speechProgressRef.current = { remainingText: null, originalText: null, options: null };
|
|
502
|
+
} else if (pausedSpeechRef.current && pausedSpeechRef.current.text) {
|
|
503
|
+
// Fallback: if no progress tracked, resume from beginning
|
|
504
|
+
textToSpeak = pausedSpeechRef.current.text;
|
|
505
|
+
optionsToUse = pausedSpeechRef.current.options || {};
|
|
506
|
+
} else {
|
|
507
|
+
// Nothing to resume
|
|
508
|
+
console.warn('Resume called but no paused speech found');
|
|
509
|
+
setIsPaused(false);
|
|
510
|
+
isPausedRef.current = false;
|
|
511
|
+
return;
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
// Clear pause state before speaking
|
|
515
|
+
setIsPaused(false);
|
|
516
|
+
isPausedRef.current = false;
|
|
517
|
+
|
|
518
|
+
// Resume audio context
|
|
519
|
+
await resumeAudioContext();
|
|
520
|
+
|
|
521
|
+
// Prepare speak options
|
|
522
|
+
const speakOptions = {
|
|
523
|
+
...optionsToUse,
|
|
524
|
+
lipsyncLang: optionsToUse.lipsyncLang || defaultAvatarConfig.lipsyncLang || 'en'
|
|
525
|
+
};
|
|
526
|
+
|
|
527
|
+
// Use speakText method which will set up the onSpeechEnd callback again
|
|
528
|
+
try {
|
|
529
|
+
await speakText(textToSpeak, speakOptions);
|
|
530
|
+
} catch (error) {
|
|
531
|
+
console.error('Error resuming speech:', error);
|
|
532
|
+
// Reset pause state on error
|
|
482
533
|
setIsPaused(false);
|
|
483
534
|
isPausedRef.current = false;
|
|
484
|
-
|
|
485
|
-
// Resume audio context
|
|
486
|
-
await resumeAudioContext();
|
|
487
|
-
|
|
488
|
-
// Determine what text to speak - use remaining text if available, otherwise full text
|
|
489
|
-
let textToSpeak = '';
|
|
490
|
-
let optionsToUse = {};
|
|
491
|
-
|
|
492
|
-
if (speechProgressRef.current && speechProgressRef.current.remainingText) {
|
|
493
|
-
// Resume from where we paused - speak only the remaining text
|
|
494
|
-
textToSpeak = speechProgressRef.current.remainingText;
|
|
495
|
-
optionsToUse = speechProgressRef.current.options || {};
|
|
496
|
-
// Clear progress after using it
|
|
497
|
-
speechProgressRef.current = { remainingText: null, originalText: null, options: null };
|
|
498
|
-
} else if (pausedSpeechRef.current && pausedSpeechRef.current.text) {
|
|
499
|
-
// Fallback: if no progress tracked, resume from beginning
|
|
500
|
-
textToSpeak = pausedSpeechRef.current.text;
|
|
501
|
-
optionsToUse = pausedSpeechRef.current.options || {};
|
|
502
|
-
} else {
|
|
503
|
-
// Nothing to resume
|
|
504
|
-
return;
|
|
505
|
-
}
|
|
506
|
-
|
|
507
|
-
// Prepare speak options
|
|
508
|
-
const speakOptions = {
|
|
509
|
-
...optionsToUse,
|
|
510
|
-
lipsyncLang: optionsToUse.lipsyncLang || defaultAvatarConfig.lipsyncLang || 'en'
|
|
511
|
-
};
|
|
512
|
-
|
|
513
|
-
// Use speakText method which will set up the onSpeechEnd callback again
|
|
514
|
-
if (talkingHeadRef.current.lipsync && Object.keys(talkingHeadRef.current.lipsync).length > 0) {
|
|
515
|
-
if (talkingHeadRef.current.setSlowdownRate) {
|
|
516
|
-
talkingHeadRef.current.setSlowdownRate(1.05);
|
|
517
|
-
}
|
|
518
|
-
// Call speakText which will handle everything including onSpeechEnd
|
|
519
|
-
await speakText(textToSpeak, speakOptions);
|
|
520
|
-
}
|
|
521
535
|
}
|
|
522
|
-
}, [resumeAudioContext, isPaused, speakText]);
|
|
536
|
+
}, [resumeAudioContext, isPaused, speakText, defaultAvatarConfig]);
|
|
523
537
|
|
|
524
538
|
const setMood = useCallback((mood) => {
|
|
525
539
|
if (talkingHeadRef.current) {
|