@sage-rsc/talking-head-react 1.0.66 → 1.0.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@sage-rsc/talking-head-react",
3
- "version": "1.0.66",
3
+ "version": "1.0.68",
4
4
  "description": "A reusable React component for 3D talking avatars with lip-sync and text-to-speech",
5
5
  "main": "./dist/index.cjs",
6
6
  "module": "./dist/index.js",
@@ -23,6 +23,12 @@ import TalkingHeadAvatar from './TalkingHeadAvatar';
23
23
  * @param {Function} props.onCurriculumComplete - Callback when curriculum completes
24
24
  * @param {Function} props.onCustomAction - Callback for custom actions (receives action type and data)
25
25
  * - 'teachingComplete': Fired when teaching finishes. Check data.hasQuestions to know if questions are available.
26
+ * - 'codeExampleReady': Fired after teaching if lesson has a code_example. Contains codeExample object with:
27
+ * - code: The code string to type/run in IDE
28
+ * - language: Programming language (default: 'javascript')
29
+ * - description: Optional description of what the code does
30
+ * - autoRun: Whether to auto-run after typing (default: false)
31
+ * - typingSpeed: Typing speed in ms per character (default: 50)
26
32
  * - 'answerFeedbackComplete': Fired when answer feedback finishes. Check data.hasNextQuestion to know if more questions exist.
27
33
  * - 'lessonCompleteFeedbackDone': Fired when lesson completion feedback finishes. Check data.hasNextLesson to know if more lessons exist.
28
34
  * - 'allQuestionsComplete': Fired when all questions in a lesson are done. Parent should call completeLesson() when ready.
@@ -632,6 +638,18 @@ const CurriculumLearning = forwardRef(({
632
638
  lesson: currentLesson,
633
639
  hasQuestions: currentLesson.questions && currentLesson.questions.length > 0
634
640
  });
641
+
642
+ // Check if there's a code example to demonstrate
643
+ if (currentLesson?.code_example) {
644
+ // Trigger code example event - parent can handle IDE typing/running
645
+ callbacksRef.current.onCustomAction({
646
+ type: 'codeExampleReady',
647
+ moduleIndex: stateRef.current.currentModuleIndex,
648
+ lessonIndex: stateRef.current.currentLessonIndex,
649
+ lesson: currentLesson,
650
+ codeExample: currentLesson.code_example
651
+ });
652
+ }
635
653
  }
636
654
  });
637
655
  }
@@ -54,6 +54,8 @@ const TalkingHeadAvatar = forwardRef(({
54
54
  const speechEndIntervalRef = useRef(null); // Track onSpeechEnd polling interval
55
55
  const isPausedRef = useRef(false); // Track pause state for interval checks
56
56
  const speechProgressRef = useRef({ remainingText: null, originalText: null, options: null }); // Track speech progress for resume
57
+ const originalSentencesRef = useRef([]); // Track original text split into sentences
58
+ const processedSentenceCountRef = useRef(0); // Track how many sentences have been processed
57
59
  const [isLoading, setIsLoading] = useState(true);
58
60
  const [error, setError] = useState(null);
59
61
  const [isReady, setIsReady] = useState(false);
@@ -295,6 +297,17 @@ const TalkingHeadAvatar = forwardRef(({
295
297
  pausedSpeechRef.current = { text: textToSpeak, options };
296
298
  // Reset progress tracking when starting new speech
297
299
  speechProgressRef.current = { remainingText: null, originalText: null, options: null };
300
+
301
+ // Split text into sentences for tracking
302
+ // Use the same regex as the library: /[!\.\?\n\p{Extended_Pictographic}]/ug
303
+ const sentenceDividers = /[!\.\?\n\p{Extended_Pictographic}]/ug;
304
+ const sentences = textToSpeak
305
+ .split(sentenceDividers)
306
+ .map(s => s.trim())
307
+ .filter(s => s.length > 0);
308
+ originalSentencesRef.current = sentences;
309
+ processedSentenceCountRef.current = 0;
310
+
298
311
  setIsPaused(false);
299
312
  isPausedRef.current = false;
300
313
 
@@ -440,29 +453,56 @@ const TalkingHeadAvatar = forwardRef(({
440
453
  speechEndIntervalRef.current = null;
441
454
  }
442
455
 
443
- // Save remaining text from speechQueue before clearing it
456
+ // Calculate remaining text by reconstructing from original sentences
457
+ // We need to account for:
458
+ // 1. Currently playing sentence (in audioPlaylist) - gets cleared by pauseSpeaking()
459
+ // 2. Queued sentences (in speechQueue) - not yet sent to TTS
444
460
  let remainingText = '';
445
- if (talkingHead.speechQueue && talkingHead.speechQueue.length > 0 && pausedSpeechRef.current) {
446
- // Extract text from remaining queue items
447
- // Note: item.text is an array of objects like [{mark: 0, word: "Hello"}, {mark: 1, word: "world"}]
448
- const remainingParts = talkingHead.speechQueue
449
- .filter(item => item && item.text && Array.isArray(item.text) && item.text.length > 0)
450
- .map(item => {
451
- // Extract words from the text array
452
- return item.text
453
- .map(wordObj => wordObj.word || '')
454
- .filter(word => word.length > 0)
455
- .join(' ');
456
- })
457
- .filter(text => text.length > 0)
458
- .join(' ');
461
+
462
+ if (pausedSpeechRef.current && originalSentencesRef.current.length > 0) {
463
+ const totalSentences = originalSentencesRef.current.length;
464
+
465
+ // Count sentences currently in speechQueue (not yet sent to TTS)
466
+ const queuedSentenceCount = talkingHead.speechQueue
467
+ ? talkingHead.speechQueue.filter(item => item && item.text && Array.isArray(item.text) && item.text.length > 0).length
468
+ : 0;
469
+
470
+ // Check if there's a sentence currently playing (in audioPlaylist)
471
+ // This will be cleared by pauseSpeaking(), so we need to account for it now
472
+ const hasCurrentlyPlaying = talkingHead.audioPlaylist && talkingHead.audioPlaylist.length > 0;
459
473
 
460
- if (remainingParts && remainingParts.trim()) {
461
- remainingText = remainingParts.trim();
474
+ // Total sentences remaining = queued + currently playing (if any)
475
+ const remainingSentenceCount = queuedSentenceCount + (hasCurrentlyPlaying ? 1 : 0);
476
+
477
+ // Calculate which sentence index we're at
478
+ const currentSentenceIndex = totalSentences - remainingSentenceCount;
479
+
480
+ // If there are remaining sentences, reconstruct the text
481
+ if (remainingSentenceCount > 0 && currentSentenceIndex < totalSentences) {
482
+ const remainingSentences = originalSentencesRef.current.slice(currentSentenceIndex);
483
+ remainingText = remainingSentences.join('. ').trim();
484
+
485
+ // Fallback: if reconstruction didn't work, try extracting from queue directly
486
+ if (!remainingText && queuedSentenceCount > 0 && talkingHead.speechQueue) {
487
+ const remainingParts = talkingHead.speechQueue
488
+ .filter(item => item && item.text && Array.isArray(item.text) && item.text.length > 0)
489
+ .map(item => {
490
+ return item.text
491
+ .map(wordObj => wordObj.word || '')
492
+ .filter(word => word.length > 0)
493
+ .join(' ');
494
+ })
495
+ .filter(text => text.length > 0)
496
+ .join(' ');
497
+
498
+ if (remainingParts && remainingParts.trim()) {
499
+ remainingText = remainingParts.trim();
500
+ }
501
+ }
462
502
  }
463
503
  }
464
504
 
465
- // Always save progress for resume (even if no remaining text, we'll resume from beginning)
505
+ // Always save progress for resume
466
506
  if (pausedSpeechRef.current) {
467
507
  speechProgressRef.current = {
468
508
  remainingText: remainingText || null,