@sage-rsc/talking-head-react 1.0.43 → 1.0.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2 -2
- package/dist/index.js +323 -312
- package/package.json +1 -1
- package/src/components/TalkingHeadAvatar.jsx +59 -17
- package/src/lib/talkinghead.mjs +2 -2
package/package.json
CHANGED
|
@@ -282,14 +282,51 @@ const TalkingHeadAvatar = forwardRef(({
|
|
|
282
282
|
if (options.onSpeechEnd && talkingHeadRef.current) {
|
|
283
283
|
const talkingHead = talkingHeadRef.current;
|
|
284
284
|
|
|
285
|
-
// Store original onAudioEnd if it exists
|
|
286
|
-
const originalOnAudioEnd = talkingHead.onAudioEnd;
|
|
287
|
-
|
|
288
285
|
// Set up a polling mechanism to detect when speech finishes
|
|
289
|
-
//
|
|
286
|
+
// Wait for audio to actually start playing before checking if it's finished
|
|
290
287
|
let checkInterval = null;
|
|
291
288
|
let checkCount = 0;
|
|
292
|
-
|
|
289
|
+
let audioStarted = false;
|
|
290
|
+
const maxChecks = 1200; // 60 seconds max (50ms intervals)
|
|
291
|
+
const maxWaitForAudioStart = 10000; // 10 seconds max to wait for audio to start
|
|
292
|
+
|
|
293
|
+
// First, wait for audio to actually start playing (API call completes and audio is added to playlist)
|
|
294
|
+
let waitForAudioStartCount = 0;
|
|
295
|
+
const waitForAudioStart = setInterval(() => {
|
|
296
|
+
waitForAudioStartCount++;
|
|
297
|
+
|
|
298
|
+
// Check if audio has started playing (audioPlaylist has items OR isAudioPlaying is true)
|
|
299
|
+
// Also check if isSpeaking is true (indicating API call has started processing)
|
|
300
|
+
if (talkingHead && talkingHead.isSpeaking && (
|
|
301
|
+
(talkingHead.audioPlaylist && talkingHead.audioPlaylist.length > 0) ||
|
|
302
|
+
(talkingHead.isAudioPlaying === true)
|
|
303
|
+
)) {
|
|
304
|
+
audioStarted = true;
|
|
305
|
+
clearInterval(waitForAudioStart);
|
|
306
|
+
|
|
307
|
+
// Now start checking if speech has finished
|
|
308
|
+
checkInterval = setInterval(checkSpeechFinished, 50);
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
// Timeout if audio doesn't start within reasonable time
|
|
312
|
+
if (waitForAudioStartCount * 50 > maxWaitForAudioStart) {
|
|
313
|
+
clearInterval(waitForAudioStart);
|
|
314
|
+
// Check if speech has actually started (isSpeaking should be true)
|
|
315
|
+
// If isSpeaking is false, the speech might have failed or completed very quickly
|
|
316
|
+
if (talkingHead && talkingHead.isSpeaking) {
|
|
317
|
+
// Still waiting for API, but assume it will start soon
|
|
318
|
+
audioStarted = true;
|
|
319
|
+
checkInterval = setInterval(checkSpeechFinished, 50);
|
|
320
|
+
} else {
|
|
321
|
+
// Speech never started or finished immediately, call callback
|
|
322
|
+
try {
|
|
323
|
+
options.onSpeechEnd();
|
|
324
|
+
} catch (e) {
|
|
325
|
+
console.error('Error in onSpeechEnd callback:', e);
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
}, 50);
|
|
293
330
|
|
|
294
331
|
const checkSpeechFinished = () => {
|
|
295
332
|
checkCount++;
|
|
@@ -307,31 +344,36 @@ const TalkingHeadAvatar = forwardRef(({
|
|
|
307
344
|
return;
|
|
308
345
|
}
|
|
309
346
|
|
|
310
|
-
//
|
|
311
|
-
if (
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
347
|
+
// Only check if audio has started playing
|
|
348
|
+
if (!audioStarted) {
|
|
349
|
+
return;
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
// Check if speech has finished:
|
|
353
|
+
// 1. Not speaking OR speech queue is empty
|
|
354
|
+
// 2. Audio playlist is empty (no more audio to play)
|
|
355
|
+
// 3. Not currently playing audio
|
|
356
|
+
const isFinished = talkingHead &&
|
|
357
|
+
(!talkingHead.isSpeaking || talkingHead.isSpeaking === false) &&
|
|
358
|
+
(!talkingHead.audioPlaylist || talkingHead.audioPlaylist.length === 0) &&
|
|
359
|
+
(!talkingHead.isAudioPlaying || talkingHead.isAudioPlaying === false);
|
|
360
|
+
|
|
361
|
+
if (isFinished) {
|
|
315
362
|
if (checkInterval) {
|
|
316
363
|
clearInterval(checkInterval);
|
|
317
364
|
checkInterval = null;
|
|
318
365
|
}
|
|
319
366
|
|
|
320
|
-
// Small delay to ensure everything is settled
|
|
367
|
+
// Small delay to ensure everything is settled
|
|
321
368
|
setTimeout(() => {
|
|
322
369
|
try {
|
|
323
370
|
options.onSpeechEnd();
|
|
324
371
|
} catch (e) {
|
|
325
372
|
console.error('Error in onSpeechEnd callback:', e);
|
|
326
373
|
}
|
|
327
|
-
},
|
|
374
|
+
}, 50);
|
|
328
375
|
}
|
|
329
376
|
};
|
|
330
|
-
|
|
331
|
-
// Start checking after a minimal delay (to allow speech to start)
|
|
332
|
-
setTimeout(() => {
|
|
333
|
-
checkInterval = setInterval(checkSpeechFinished, 50);
|
|
334
|
-
}, 100);
|
|
335
377
|
}
|
|
336
378
|
|
|
337
379
|
if (talkingHeadRef.current.lipsync && Object.keys(talkingHeadRef.current.lipsync).length > 0) {
|
package/src/lib/talkinghead.mjs
CHANGED
|
@@ -168,9 +168,9 @@ class TalkingHead {
|
|
|
168
168
|
cameraPanEnable: false,
|
|
169
169
|
cameraZoomEnable: false,
|
|
170
170
|
lightAmbientColor: 0xffffff,
|
|
171
|
-
lightAmbientIntensity:
|
|
171
|
+
lightAmbientIntensity: 1.25,
|
|
172
172
|
lightDirectColor: 0x8888aa,
|
|
173
|
-
lightDirectIntensity:
|
|
173
|
+
lightDirectIntensity: 12,
|
|
174
174
|
lightDirectPhi: 1,
|
|
175
175
|
lightDirectTheta: 2,
|
|
176
176
|
lightSpotIntensity: 0,
|