@sage-rsc/talking-head-react 1.0.69 → 1.0.71
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2 -2
- package/dist/index.js +1146 -931
- package/package.json +1 -1
- package/src/components/SimpleTalkingAvatar.jsx +444 -0
- package/src/index.js +1 -0
package/package.json
CHANGED
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
import React, { useEffect, useRef, useState, useCallback, forwardRef, useImperativeHandle } from 'react';
|
|
2
|
+
import { TalkingHead } from '../lib/talkinghead.mjs';
|
|
3
|
+
import { getActiveTTSConfig, ELEVENLABS_CONFIG, DEEPGRAM_CONFIG } from '../config/ttsConfig';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* SimpleTalkingAvatar - A simple React component for 3D talking avatars
|
|
7
|
+
*
|
|
8
|
+
* This component provides all avatar settings and functionality without curriculum features.
|
|
9
|
+
* You can pass text to speak via props or use the ref methods.
|
|
10
|
+
*
|
|
11
|
+
* @param {Object} props
|
|
12
|
+
* @param {string} props.text - Text to speak (optional, can also use speakText method)
|
|
13
|
+
* @param {string} props.avatarUrl - URL/path to the GLB avatar file
|
|
14
|
+
* @param {string} props.avatarBody - Avatar body type ('M' or 'F')
|
|
15
|
+
* @param {string} props.mood - Initial mood ('happy', 'sad', 'neutral', etc.)
|
|
16
|
+
* @param {string} props.ttsLang - Text-to-speech language code
|
|
17
|
+
* @param {string} props.ttsService - TTS service ('edge', 'elevenlabs', 'deepgram', 'google', 'azure', 'browser')
|
|
18
|
+
* @param {string} props.ttsVoice - TTS voice ID
|
|
19
|
+
* @param {string} props.ttsApiKey - TTS API key (overrides config for ElevenLabs, Google Cloud, Azure)
|
|
20
|
+
* @param {string} props.bodyMovement - Initial body movement type
|
|
21
|
+
* @param {number} props.movementIntensity - Movement intensity (0-1)
|
|
22
|
+
* @param {boolean} props.showFullAvatar - Whether to show full body avatar
|
|
23
|
+
* @param {string} props.cameraView - Camera view ('upper', 'full', etc.)
|
|
24
|
+
* @param {Function} props.onReady - Callback when avatar is ready
|
|
25
|
+
* @param {Function} props.onLoading - Callback for loading progress
|
|
26
|
+
* @param {Function} props.onError - Callback for errors
|
|
27
|
+
* @param {Function} props.onSpeechEnd - Callback when speech ends
|
|
28
|
+
* @param {string} props.className - Additional CSS classes
|
|
29
|
+
* @param {Object} props.style - Additional inline styles
|
|
30
|
+
* @param {Object} props.animations - Object mapping animation names to FBX file paths
|
|
31
|
+
* @param {boolean} props.autoSpeak - Whether to automatically speak the text prop when ready
|
|
32
|
+
* @param {Object} ref - Ref to access component methods
|
|
33
|
+
*/
|
|
34
|
+
const SimpleTalkingAvatar = forwardRef(({
|
|
35
|
+
text = null,
|
|
36
|
+
avatarUrl = "/avatars/brunette.glb",
|
|
37
|
+
avatarBody = "F",
|
|
38
|
+
mood = "neutral",
|
|
39
|
+
ttsLang = "en",
|
|
40
|
+
ttsService = null,
|
|
41
|
+
ttsVoice = null,
|
|
42
|
+
ttsApiKey = null,
|
|
43
|
+
bodyMovement = "idle",
|
|
44
|
+
movementIntensity = 0.5,
|
|
45
|
+
showFullAvatar = false,
|
|
46
|
+
cameraView = "upper",
|
|
47
|
+
onReady = () => {},
|
|
48
|
+
onLoading = () => {},
|
|
49
|
+
onError = () => {},
|
|
50
|
+
onSpeechEnd = () => {},
|
|
51
|
+
className = "",
|
|
52
|
+
style = {},
|
|
53
|
+
animations = {},
|
|
54
|
+
autoSpeak = false
|
|
55
|
+
}, ref) => {
|
|
56
|
+
const containerRef = useRef(null);
|
|
57
|
+
const talkingHeadRef = useRef(null);
|
|
58
|
+
const showFullAvatarRef = useRef(showFullAvatar);
|
|
59
|
+
const pausedSpeechRef = useRef(null);
|
|
60
|
+
const speechEndIntervalRef = useRef(null);
|
|
61
|
+
const isPausedRef = useRef(false);
|
|
62
|
+
const speechProgressRef = useRef({ remainingText: null, originalText: null, options: null });
|
|
63
|
+
const originalSentencesRef = useRef([]);
|
|
64
|
+
const [isLoading, setIsLoading] = useState(true);
|
|
65
|
+
const [error, setError] = useState(null);
|
|
66
|
+
const [isReady, setIsReady] = useState(false);
|
|
67
|
+
const [isPaused, setIsPaused] = useState(false);
|
|
68
|
+
|
|
69
|
+
// Keep ref in sync with state
|
|
70
|
+
useEffect(() => {
|
|
71
|
+
isPausedRef.current = isPaused;
|
|
72
|
+
}, [isPaused]);
|
|
73
|
+
|
|
74
|
+
// Update ref when prop changes
|
|
75
|
+
useEffect(() => {
|
|
76
|
+
showFullAvatarRef.current = showFullAvatar;
|
|
77
|
+
}, [showFullAvatar]);
|
|
78
|
+
|
|
79
|
+
// Get TTS configuration
|
|
80
|
+
const ttsConfig = getActiveTTSConfig();
|
|
81
|
+
|
|
82
|
+
// Override TTS service if specified in props
|
|
83
|
+
const effectiveTtsService = ttsService || ttsConfig.service;
|
|
84
|
+
|
|
85
|
+
// Build effective TTS config
|
|
86
|
+
let effectiveTtsConfig;
|
|
87
|
+
|
|
88
|
+
if (effectiveTtsService === 'browser') {
|
|
89
|
+
effectiveTtsConfig = {
|
|
90
|
+
service: 'browser',
|
|
91
|
+
endpoint: '',
|
|
92
|
+
apiKey: null,
|
|
93
|
+
defaultVoice: 'Google US English'
|
|
94
|
+
};
|
|
95
|
+
} else if (effectiveTtsService === 'elevenlabs') {
|
|
96
|
+
const apiKey = ttsApiKey || ttsConfig.apiKey;
|
|
97
|
+
effectiveTtsConfig = {
|
|
98
|
+
service: 'elevenlabs',
|
|
99
|
+
endpoint: 'https://api.elevenlabs.io/v1/text-to-speech',
|
|
100
|
+
apiKey: apiKey,
|
|
101
|
+
defaultVoice: ttsVoice || ttsConfig.defaultVoice || ELEVENLABS_CONFIG.defaultVoice,
|
|
102
|
+
voices: ttsConfig.voices || ELEVENLABS_CONFIG.voices
|
|
103
|
+
};
|
|
104
|
+
} else if (effectiveTtsService === 'deepgram') {
|
|
105
|
+
const apiKey = ttsApiKey || ttsConfig.apiKey;
|
|
106
|
+
effectiveTtsConfig = {
|
|
107
|
+
service: 'deepgram',
|
|
108
|
+
endpoint: 'https://api.deepgram.com/v1/speak',
|
|
109
|
+
apiKey: apiKey,
|
|
110
|
+
defaultVoice: ttsVoice || ttsConfig.defaultVoice || DEEPGRAM_CONFIG.defaultVoice,
|
|
111
|
+
voices: ttsConfig.voices || DEEPGRAM_CONFIG.voices
|
|
112
|
+
};
|
|
113
|
+
} else {
|
|
114
|
+
effectiveTtsConfig = {
|
|
115
|
+
...ttsConfig,
|
|
116
|
+
apiKey: ttsApiKey !== null ? ttsApiKey : ttsConfig.apiKey
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
const defaultAvatarConfig = {
|
|
121
|
+
url: avatarUrl,
|
|
122
|
+
body: avatarBody,
|
|
123
|
+
avatarMood: mood,
|
|
124
|
+
ttsLang: effectiveTtsService === 'browser' ? "en-US" : ttsLang,
|
|
125
|
+
ttsVoice: ttsVoice || effectiveTtsConfig.defaultVoice,
|
|
126
|
+
lipsyncLang: 'en',
|
|
127
|
+
showFullAvatar: showFullAvatar,
|
|
128
|
+
bodyMovement: bodyMovement,
|
|
129
|
+
movementIntensity: movementIntensity,
|
|
130
|
+
};
|
|
131
|
+
|
|
132
|
+
const defaultOptions = {
|
|
133
|
+
ttsEndpoint: effectiveTtsConfig.endpoint,
|
|
134
|
+
ttsApikey: effectiveTtsConfig.apiKey,
|
|
135
|
+
ttsService: effectiveTtsService,
|
|
136
|
+
lipsyncModules: ["en"],
|
|
137
|
+
cameraView: cameraView
|
|
138
|
+
};
|
|
139
|
+
|
|
140
|
+
const initializeTalkingHead = useCallback(async () => {
|
|
141
|
+
if (!containerRef.current || talkingHeadRef.current) return;
|
|
142
|
+
|
|
143
|
+
try {
|
|
144
|
+
setIsLoading(true);
|
|
145
|
+
setError(null);
|
|
146
|
+
|
|
147
|
+
talkingHeadRef.current = new TalkingHead(containerRef.current, defaultOptions);
|
|
148
|
+
|
|
149
|
+
await talkingHeadRef.current.showAvatar(defaultAvatarConfig, (ev) => {
|
|
150
|
+
if (ev.lengthComputable) {
|
|
151
|
+
const progress = Math.min(100, Math.round(ev.loaded / ev.total * 100));
|
|
152
|
+
onLoading(progress);
|
|
153
|
+
}
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
setIsLoading(false);
|
|
157
|
+
setIsReady(true);
|
|
158
|
+
onReady(talkingHeadRef.current);
|
|
159
|
+
|
|
160
|
+
// Handle visibility change
|
|
161
|
+
const handleVisibilityChange = () => {
|
|
162
|
+
if (document.visibilityState === "visible") {
|
|
163
|
+
talkingHeadRef.current?.start();
|
|
164
|
+
} else {
|
|
165
|
+
talkingHeadRef.current?.stop();
|
|
166
|
+
}
|
|
167
|
+
};
|
|
168
|
+
|
|
169
|
+
document.addEventListener("visibilitychange", handleVisibilityChange);
|
|
170
|
+
|
|
171
|
+
return () => {
|
|
172
|
+
document.removeEventListener("visibilitychange", handleVisibilityChange);
|
|
173
|
+
};
|
|
174
|
+
|
|
175
|
+
} catch (err) {
|
|
176
|
+
console.error('Error initializing TalkingHead:', err);
|
|
177
|
+
setError(err.message || 'Failed to initialize avatar');
|
|
178
|
+
setIsLoading(false);
|
|
179
|
+
onError(err);
|
|
180
|
+
}
|
|
181
|
+
}, []); // Empty deps - only initialize once
|
|
182
|
+
|
|
183
|
+
useEffect(() => {
|
|
184
|
+
initializeTalkingHead();
|
|
185
|
+
|
|
186
|
+
return () => {
|
|
187
|
+
if (talkingHeadRef.current) {
|
|
188
|
+
talkingHeadRef.current.stop();
|
|
189
|
+
talkingHeadRef.current.dispose();
|
|
190
|
+
talkingHeadRef.current = null;
|
|
191
|
+
}
|
|
192
|
+
};
|
|
193
|
+
}, [initializeTalkingHead]);
|
|
194
|
+
|
|
195
|
+
// Resume audio context helper
|
|
196
|
+
const resumeAudioContext = useCallback(async () => {
|
|
197
|
+
if (talkingHeadRef.current) {
|
|
198
|
+
try {
|
|
199
|
+
// Try to access audio context through talkingHead instance
|
|
200
|
+
const audioCtx = talkingHeadRef.current.audioCtx || talkingHeadRef.current.audioContext;
|
|
201
|
+
if (audioCtx && (audioCtx.state === 'suspended' || audioCtx.state === 'interrupted')) {
|
|
202
|
+
await audioCtx.resume();
|
|
203
|
+
console.log('Audio context resumed');
|
|
204
|
+
}
|
|
205
|
+
} catch (err) {
|
|
206
|
+
console.warn('Failed to resume audio context:', err);
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
}, []);
|
|
210
|
+
|
|
211
|
+
// Auto-speak text when ready and autoSpeak is true
|
|
212
|
+
useEffect(() => {
|
|
213
|
+
if (isReady && text && autoSpeak && talkingHeadRef.current) {
|
|
214
|
+
speakText(text);
|
|
215
|
+
}
|
|
216
|
+
}, [isReady, text, autoSpeak, speakText]);
|
|
217
|
+
|
|
218
|
+
// Speak text with proper callback handling
|
|
219
|
+
const speakText = useCallback(async (textToSpeak, options = {}) => {
|
|
220
|
+
if (!talkingHeadRef.current || !isReady) {
|
|
221
|
+
console.warn('Avatar not ready for speaking');
|
|
222
|
+
return;
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
if (!textToSpeak || textToSpeak.trim() === '') {
|
|
226
|
+
console.warn('No text provided to speak');
|
|
227
|
+
return;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// Always resume audio context first (required for user interaction)
|
|
231
|
+
await resumeAudioContext();
|
|
232
|
+
|
|
233
|
+
// Reset speech progress tracking
|
|
234
|
+
speechProgressRef.current = { remainingText: null, originalText: null, options: null };
|
|
235
|
+
originalSentencesRef.current = [];
|
|
236
|
+
|
|
237
|
+
// Store for pause/resume
|
|
238
|
+
pausedSpeechRef.current = { text: textToSpeak, options };
|
|
239
|
+
|
|
240
|
+
// Clear any existing speech end interval
|
|
241
|
+
if (speechEndIntervalRef.current) {
|
|
242
|
+
clearInterval(speechEndIntervalRef.current);
|
|
243
|
+
speechEndIntervalRef.current = null;
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Reset pause state
|
|
247
|
+
setIsPaused(false);
|
|
248
|
+
isPausedRef.current = false;
|
|
249
|
+
|
|
250
|
+
// Split text into sentences for tracking
|
|
251
|
+
const sentences = textToSpeak.split(/[.!?]+/).filter(s => s.trim().length > 0);
|
|
252
|
+
originalSentencesRef.current = sentences;
|
|
253
|
+
|
|
254
|
+
const speakOptions = {
|
|
255
|
+
lipsyncLang: options.lipsyncLang || 'en',
|
|
256
|
+
onSpeechEnd: () => {
|
|
257
|
+
// Clear interval
|
|
258
|
+
if (speechEndIntervalRef.current) {
|
|
259
|
+
clearInterval(speechEndIntervalRef.current);
|
|
260
|
+
speechEndIntervalRef.current = null;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// Call user's onSpeechEnd callback
|
|
264
|
+
if (options.onSpeechEnd) {
|
|
265
|
+
options.onSpeechEnd();
|
|
266
|
+
}
|
|
267
|
+
onSpeechEnd();
|
|
268
|
+
}
|
|
269
|
+
};
|
|
270
|
+
|
|
271
|
+
try {
|
|
272
|
+
talkingHeadRef.current.speakText(textToSpeak, speakOptions);
|
|
273
|
+
} catch (err) {
|
|
274
|
+
console.error('Error speaking text:', err);
|
|
275
|
+
setError(err.message || 'Failed to speak text');
|
|
276
|
+
}
|
|
277
|
+
}, [isReady, onSpeechEnd, resumeAudioContext]);
|
|
278
|
+
|
|
279
|
+
// Pause speaking
|
|
280
|
+
const pauseSpeaking = useCallback(() => {
|
|
281
|
+
if (!talkingHeadRef.current) return;
|
|
282
|
+
|
|
283
|
+
try {
|
|
284
|
+
// Check if currently speaking
|
|
285
|
+
const isSpeaking = talkingHeadRef.current.isSpeaking || false;
|
|
286
|
+
const audioPlaylist = talkingHeadRef.current.audioPlaylist || [];
|
|
287
|
+
const speechQueue = talkingHeadRef.current.speechQueue || [];
|
|
288
|
+
|
|
289
|
+
if (isSpeaking || audioPlaylist.length > 0 || speechQueue.length > 0) {
|
|
290
|
+
// Clear speech end interval
|
|
291
|
+
if (speechEndIntervalRef.current) {
|
|
292
|
+
clearInterval(speechEndIntervalRef.current);
|
|
293
|
+
speechEndIntervalRef.current = null;
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// Extract remaining text from speech queue
|
|
297
|
+
let remainingText = '';
|
|
298
|
+
if (speechQueue.length > 0) {
|
|
299
|
+
remainingText = speechQueue.map(item => {
|
|
300
|
+
if (item.text && Array.isArray(item.text)) {
|
|
301
|
+
return item.text.map(wordObj => wordObj.word).join(' ');
|
|
302
|
+
}
|
|
303
|
+
return item.text || '';
|
|
304
|
+
}).join(' ');
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// Store progress for resume
|
|
308
|
+
speechProgressRef.current = {
|
|
309
|
+
remainingText: remainingText || null,
|
|
310
|
+
originalText: pausedSpeechRef.current?.text || null,
|
|
311
|
+
options: pausedSpeechRef.current?.options || null
|
|
312
|
+
};
|
|
313
|
+
|
|
314
|
+
// Clear speech queue and pause
|
|
315
|
+
talkingHeadRef.current.speechQueue.length = 0;
|
|
316
|
+
talkingHeadRef.current.pauseSpeaking();
|
|
317
|
+
setIsPaused(true);
|
|
318
|
+
isPausedRef.current = true;
|
|
319
|
+
}
|
|
320
|
+
} catch (err) {
|
|
321
|
+
console.warn('Error pausing speech:', err);
|
|
322
|
+
}
|
|
323
|
+
}, []);
|
|
324
|
+
|
|
325
|
+
// Resume speaking
|
|
326
|
+
const resumeSpeaking = useCallback(async () => {
|
|
327
|
+
if (!talkingHeadRef.current || !isPaused) return;
|
|
328
|
+
|
|
329
|
+
try {
|
|
330
|
+
// Resume audio context first
|
|
331
|
+
await resumeAudioContext();
|
|
332
|
+
|
|
333
|
+
setIsPaused(false);
|
|
334
|
+
isPausedRef.current = false;
|
|
335
|
+
|
|
336
|
+
// Determine what text to speak
|
|
337
|
+
const remainingText = speechProgressRef.current?.remainingText;
|
|
338
|
+
const originalText = speechProgressRef.current?.originalText || pausedSpeechRef.current?.text;
|
|
339
|
+
const originalOptions = speechProgressRef.current?.options || pausedSpeechRef.current?.options || {};
|
|
340
|
+
|
|
341
|
+
const textToSpeak = remainingText || originalText;
|
|
342
|
+
|
|
343
|
+
if (textToSpeak) {
|
|
344
|
+
speakText(textToSpeak, originalOptions);
|
|
345
|
+
}
|
|
346
|
+
} catch (err) {
|
|
347
|
+
console.warn('Error resuming speech:', err);
|
|
348
|
+
setIsPaused(false);
|
|
349
|
+
isPausedRef.current = false;
|
|
350
|
+
}
|
|
351
|
+
}, [isPaused, speakText, resumeAudioContext]);
|
|
352
|
+
|
|
353
|
+
// Stop speaking
|
|
354
|
+
const stopSpeaking = useCallback(() => {
|
|
355
|
+
if (talkingHeadRef.current) {
|
|
356
|
+
talkingHeadRef.current.stopSpeaking();
|
|
357
|
+
if (speechEndIntervalRef.current) {
|
|
358
|
+
clearInterval(speechEndIntervalRef.current);
|
|
359
|
+
speechEndIntervalRef.current = null;
|
|
360
|
+
}
|
|
361
|
+
setIsPaused(false);
|
|
362
|
+
isPausedRef.current = false;
|
|
363
|
+
}
|
|
364
|
+
}, []);
|
|
365
|
+
|
|
366
|
+
// Expose methods via ref
|
|
367
|
+
useImperativeHandle(ref, () => ({
|
|
368
|
+
speakText,
|
|
369
|
+
pauseSpeaking,
|
|
370
|
+
resumeSpeaking,
|
|
371
|
+
stopSpeaking,
|
|
372
|
+
resumeAudioContext,
|
|
373
|
+
isPaused: () => isPaused,
|
|
374
|
+
setMood: (mood) => talkingHeadRef.current?.setMood(mood),
|
|
375
|
+
setBodyMovement: (movement) => {
|
|
376
|
+
if (talkingHeadRef.current) {
|
|
377
|
+
talkingHeadRef.current.setBodyMovement(movement);
|
|
378
|
+
}
|
|
379
|
+
},
|
|
380
|
+
playAnimation: (animationName, disablePositionLock = false) => {
|
|
381
|
+
if (talkingHeadRef.current && talkingHeadRef.current.playAnimation) {
|
|
382
|
+
talkingHeadRef.current.playAnimation(animationName, null, 10, 0, 0.01, disablePositionLock);
|
|
383
|
+
}
|
|
384
|
+
},
|
|
385
|
+
playReaction: (reactionType) => talkingHeadRef.current?.playReaction(reactionType),
|
|
386
|
+
playCelebration: () => talkingHeadRef.current?.playCelebration(),
|
|
387
|
+
setShowFullAvatar: (show) => {
|
|
388
|
+
if (talkingHeadRef.current) {
|
|
389
|
+
showFullAvatarRef.current = show;
|
|
390
|
+
talkingHeadRef.current.setShowFullAvatar(show);
|
|
391
|
+
}
|
|
392
|
+
},
|
|
393
|
+
isReady,
|
|
394
|
+
talkingHead: talkingHeadRef.current
|
|
395
|
+
}));
|
|
396
|
+
|
|
397
|
+
return (
|
|
398
|
+
<div className={`simple-talking-avatar-container ${className}`} style={style}>
|
|
399
|
+
<div
|
|
400
|
+
ref={containerRef}
|
|
401
|
+
className="talking-head-viewer"
|
|
402
|
+
style={{
|
|
403
|
+
width: '100%',
|
|
404
|
+
height: '100%',
|
|
405
|
+
minHeight: '400px',
|
|
406
|
+
}}
|
|
407
|
+
/>
|
|
408
|
+
{isLoading && (
|
|
409
|
+
<div className="loading-overlay" style={{
|
|
410
|
+
position: 'absolute',
|
|
411
|
+
top: '50%',
|
|
412
|
+
left: '50%',
|
|
413
|
+
transform: 'translate(-50%, -50%)',
|
|
414
|
+
color: 'white',
|
|
415
|
+
fontSize: '18px',
|
|
416
|
+
zIndex: 10
|
|
417
|
+
}}>
|
|
418
|
+
Loading avatar...
|
|
419
|
+
</div>
|
|
420
|
+
)}
|
|
421
|
+
{error && (
|
|
422
|
+
<div className="error-overlay" style={{
|
|
423
|
+
position: 'absolute',
|
|
424
|
+
top: '50%',
|
|
425
|
+
left: '50%',
|
|
426
|
+
transform: 'translate(-50%, -50%)',
|
|
427
|
+
color: '#ff6b6b',
|
|
428
|
+
fontSize: '16px',
|
|
429
|
+
textAlign: 'center',
|
|
430
|
+
zIndex: 10,
|
|
431
|
+
padding: '20px',
|
|
432
|
+
borderRadius: '8px'
|
|
433
|
+
}}>
|
|
434
|
+
{error}
|
|
435
|
+
</div>
|
|
436
|
+
)}
|
|
437
|
+
</div>
|
|
438
|
+
);
|
|
439
|
+
});
|
|
440
|
+
|
|
441
|
+
SimpleTalkingAvatar.displayName = 'SimpleTalkingAvatar';
|
|
442
|
+
|
|
443
|
+
export default SimpleTalkingAvatar;
|
|
444
|
+
|
package/src/index.js
CHANGED
|
@@ -6,6 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
export { default as TalkingHeadAvatar } from './components/TalkingHeadAvatar';
|
|
8
8
|
export { default as TalkingHeadComponent } from './components/TalkingHeadComponent';
|
|
9
|
+
export { default as SimpleTalkingAvatar } from './components/SimpleTalkingAvatar';
|
|
9
10
|
export { default as CurriculumLearning } from './components/CurriculumLearning';
|
|
10
11
|
export { getActiveTTSConfig, getVoiceOptions } from './config/ttsConfig';
|
|
11
12
|
export { animations, getAnimation, hasAnimation } from './config/animations';
|