@memori.ai/memori-react 8.0.2 → 8.1.0-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/dist/components/Chat/Chat.d.ts +0 -2
  3. package/dist/components/Chat/Chat.js +2 -2
  4. package/dist/components/Chat/Chat.js.map +1 -1
  5. package/dist/components/ChatHistoryDrawer/ChatHistory.css +32 -0
  6. package/dist/components/ChatHistoryDrawer/ChatHistory.js +104 -31
  7. package/dist/components/ChatHistoryDrawer/ChatHistory.js.map +1 -1
  8. package/dist/components/ChatInputs/ChatInputs.d.ts +0 -2
  9. package/dist/components/ChatInputs/ChatInputs.js +3 -4
  10. package/dist/components/ChatInputs/ChatInputs.js.map +1 -1
  11. package/dist/components/MemoriWidget/MemoriWidget.js +103 -329
  12. package/dist/components/MemoriWidget/MemoriWidget.js.map +1 -1
  13. package/dist/helpers/stt/useSTT.d.ts +40 -0
  14. package/dist/helpers/stt/useSTT.js +362 -0
  15. package/dist/helpers/stt/useSTT.js.map +1 -0
  16. package/dist/locales/de.json +11 -0
  17. package/dist/locales/en.json +11 -0
  18. package/dist/locales/es.json +11 -0
  19. package/dist/locales/fr.json +11 -0
  20. package/dist/locales/it.json +11 -0
  21. package/esm/components/Chat/Chat.d.ts +0 -2
  22. package/esm/components/Chat/Chat.js +2 -2
  23. package/esm/components/Chat/Chat.js.map +1 -1
  24. package/esm/components/ChatHistoryDrawer/ChatHistory.css +32 -0
  25. package/esm/components/ChatHistoryDrawer/ChatHistory.js +104 -31
  26. package/esm/components/ChatHistoryDrawer/ChatHistory.js.map +1 -1
  27. package/esm/components/ChatInputs/ChatInputs.d.ts +0 -2
  28. package/esm/components/ChatInputs/ChatInputs.js +3 -4
  29. package/esm/components/ChatInputs/ChatInputs.js.map +1 -1
  30. package/esm/components/MemoriWidget/MemoriWidget.js +103 -329
  31. package/esm/components/MemoriWidget/MemoriWidget.js.map +1 -1
  32. package/esm/helpers/stt/useSTT.d.ts +40 -0
  33. package/esm/helpers/stt/useSTT.js +358 -0
  34. package/esm/helpers/stt/useSTT.js.map +1 -0
  35. package/esm/locales/de.json +11 -0
  36. package/esm/locales/en.json +11 -0
  37. package/esm/locales/es.json +11 -0
  38. package/esm/locales/fr.json +11 -0
  39. package/esm/locales/it.json +11 -0
  40. package/package.json +2 -3
  41. package/src/components/Chat/Chat.test.tsx +0 -9
  42. package/src/components/Chat/Chat.tsx +0 -6
  43. package/src/components/ChatHistoryDrawer/ChatHistory.css +32 -0
  44. package/src/components/ChatHistoryDrawer/ChatHistory.tsx +114 -57
  45. package/src/components/ChatInputs/ChatInputs.test.tsx +0 -6
  46. package/src/components/ChatInputs/ChatInputs.tsx +2 -7
  47. package/src/components/MemoriWidget/MemoriWidget.tsx +152 -476
  48. package/src/helpers/stt/useSTT.ts +551 -0
  49. package/src/locales/de.json +11 -0
  50. package/src/locales/en.json +11 -0
  51. package/src/locales/es.json +11 -0
  52. package/src/locales/fr.json +11 -0
  53. package/src/locales/it.json +11 -0
@@ -6,7 +6,6 @@ const react_1 = require("react");
6
6
  const react_i18next_1 = require("react-i18next");
7
7
  const memori_api_client_1 = tslib_1.__importDefault(require("@memori.ai/memori-api-client"));
8
8
  const standardized_audio_context_1 = require("standardized-audio-context");
9
- const speechSdk = tslib_1.__importStar(require("microsoft-cognitiveservices-speech-sdk"));
10
9
  const classnames_1 = tslib_1.__importDefault(require("classnames"));
11
10
  const luxon_1 = require("luxon");
12
11
  const react_hot_toast_1 = tslib_1.__importDefault(require("react-hot-toast"));
@@ -41,6 +40,7 @@ const sanitizer_1 = require("../../helpers/sanitizer");
41
40
  const useTTS_1 = require("../../helpers/tts/useTTS");
42
41
  const Alert_1 = tslib_1.__importDefault(require("../ui/Alert"));
43
42
  const ChatHistory_1 = tslib_1.__importDefault(require("../ChatHistoryDrawer/ChatHistory"));
43
+ const useSTT_1 = require("../../helpers/stt/useSTT");
44
44
  const getMemoriState = (integrationId) => {
45
45
  var _a, _b, _c, _d, _f;
46
46
  let widget = integrationId
@@ -155,9 +155,6 @@ window.getMemoriState = getMemoriState;
155
155
  window.typeMessage = typeMessage;
156
156
  window.typeMessageHidden = typeMessageHidden;
157
157
  window.typeBatchMessages = typeBatchMessages;
158
- let recognizer;
159
- let speechConfig;
160
- let audioDestination;
161
158
  let audioContext;
162
159
  let memoriPassword;
163
160
  let speakerMuted = false;
@@ -386,7 +383,9 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
386
383
  }
387
384
  const mediaDocuments = media === null || media === void 0 ? void 0 : media.filter(m => { var _a; return !m.mediumID && ((_a = m.properties) === null || _a === void 0 ? void 0 : _a.isAttachedFile); });
388
385
  if (mediaDocuments && mediaDocuments.length > 0) {
389
- const documentContents = mediaDocuments.map(doc => doc.content).join(' ');
386
+ const documentContents = mediaDocuments
387
+ .map(doc => doc.content)
388
+ .join(' ');
390
389
  msg = msg + ' ' + documentContents;
391
390
  }
392
391
  const { currentState, ...response } = await postTextEnteredEvent({
@@ -571,6 +570,62 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
571
570
  : 0;
572
571
  const [birthDate, setBirthDate] = (0, react_1.useState)();
573
572
  const [showAgeVerification, setShowAgeVerification] = (0, react_1.useState)(false);
573
+ const getCultureCodeByLanguage = (lang) => {
574
+ var _a, _b;
575
+ let voice = '';
576
+ let voiceLang = (lang ||
577
+ ((_b = (_a = memori.culture) === null || _a === void 0 ? void 0 : _a.split('-')) === null || _b === void 0 ? void 0 : _b[0]) ||
578
+ i18n.language ||
579
+ 'IT').toUpperCase();
580
+ switch (voiceLang) {
581
+ case 'IT':
582
+ voice = 'it-IT';
583
+ break;
584
+ case 'DE':
585
+ voice = 'de-DE';
586
+ break;
587
+ case 'EN':
588
+ voice = 'en-GB';
589
+ break;
590
+ case 'ES':
591
+ voice = 'es-ES';
592
+ break;
593
+ case 'FR':
594
+ voice = 'fr-FR';
595
+ break;
596
+ case 'PT':
597
+ voice = 'pt-PT';
598
+ break;
599
+ case 'UK':
600
+ voice = 'uk-UK';
601
+ break;
602
+ case 'RU':
603
+ voice = 'ru-RU';
604
+ break;
605
+ case 'PL':
606
+ voice = 'pl-PL';
607
+ break;
608
+ case 'FI':
609
+ voice = 'fi-FI';
610
+ break;
611
+ case 'EL':
612
+ voice = 'el-GR';
613
+ break;
614
+ case 'AR':
615
+ voice = 'ar-SA';
616
+ break;
617
+ case 'ZH':
618
+ voice = 'zh-CN';
619
+ break;
620
+ case 'JA':
621
+ voice = 'ja-JP';
622
+ break;
623
+ default:
624
+ voice = 'it-IT';
625
+ break;
626
+ }
627
+ return voice;
628
+ };
574
629
  const [sessionId, setSessionId] = (0, react_1.useState)(initialSessionID);
575
630
  const [currentDialogState, _setCurrentDialogState] = (0, react_1.useState)();
576
631
  const setCurrentDialogState = (state) => {
@@ -1044,17 +1099,43 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
1044
1099
  tenant: tenantID,
1045
1100
  region: 'westeurope',
1046
1101
  voiceType: memori.voiceType,
1047
- layout: selectedLayout
1102
+ layout: selectedLayout,
1048
1103
  });
1049
1104
  }, [ttsProvider, userLang, memori.culture, memori.voiceType]);
1105
+ const sttConfig = (0, react_1.useMemo)(() => ({
1106
+ provider: ttsProvider,
1107
+ language: getCultureCodeByLanguage(userLang),
1108
+ tenant: tenantID,
1109
+ }), [ttsProvider, userLang]);
1050
1110
  const { speak: ttsSpeak, stop: ttsStop, isPlaying: isPlayingAudio, speakerMuted, toggleMute, hasUserActivatedSpeak, setHasUserActivatedSpeak, error, setError, } = (0, useTTS_1.useTTS)(ttsConfig, {
1051
1111
  apiUrl: `${baseUrl}/api/tts`,
1052
1112
  continuousSpeech: continuousSpeech,
1053
- onEndSpeakStartListen: () => {
1054
- console.log('[MemoriWidget] onEndSpeakStartListen called');
1055
- },
1056
1113
  preview: preview,
1057
1114
  }, autoStart, defaultEnableAudio, defaultSpeakerActive);
1115
+ const processSpeechAndSendMessage = (text) => {
1116
+ console.log('processSpeechAndSendMessage', text);
1117
+ if (!text || text.trim().length === 0) {
1118
+ return;
1119
+ }
1120
+ try {
1121
+ const message = (0, utils_1.stripDuplicates)(text);
1122
+ console.debug('Processing speech message:', message);
1123
+ if (message.length > 0) {
1124
+ setUserMessage('');
1125
+ console.debug('Sending message:', message);
1126
+ sendMessage(message);
1127
+ }
1128
+ }
1129
+ catch (error) {
1130
+ console.error('Error in processSpeechAndSendMessage:', error);
1131
+ }
1132
+ };
1133
+ const { isListening, startRecording, stopRecording, } = (0, useSTT_1.useSTT)(sttConfig, processSpeechAndSendMessage, {
1134
+ apiUrl: `${baseUrl}/api/stt`,
1135
+ continuousRecording: continuousSpeech,
1136
+ silenceTimeout: continuousSpeechTimeout,
1137
+ autoStart: autoStart,
1138
+ }, defaultEnableAudio);
1058
1139
  const resetInteractionTimeout = () => {
1059
1140
  clearInteractionTimeout();
1060
1141
  if (!isPlayingAudio && !userMessage.length && !memoriTyping && !listening)
@@ -1156,73 +1237,14 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
1156
1237
  memoriTyping,
1157
1238
  hasUserActivatedSpeak,
1158
1239
  ]);
1159
- const getCultureCodeByLanguage = (lang) => {
1160
- var _a, _b;
1161
- let voice = '';
1162
- let voiceLang = (lang ||
1163
- ((_b = (_a = memori.culture) === null || _a === void 0 ? void 0 : _a.split('-')) === null || _b === void 0 ? void 0 : _b[0]) ||
1164
- i18n.language ||
1165
- 'IT').toUpperCase();
1166
- switch (voiceLang) {
1167
- case 'IT':
1168
- voice = 'it-IT';
1169
- break;
1170
- case 'DE':
1171
- voice = 'de-DE';
1172
- break;
1173
- case 'EN':
1174
- voice = 'en-GB';
1175
- break;
1176
- case 'ES':
1177
- voice = 'es-ES';
1178
- break;
1179
- case 'FR':
1180
- voice = 'fr-FR';
1181
- break;
1182
- case 'PT':
1183
- voice = 'pt-PT';
1184
- break;
1185
- case 'UK':
1186
- voice = 'uk-UK';
1187
- break;
1188
- case 'RU':
1189
- voice = 'ru-RU';
1190
- break;
1191
- case 'PL':
1192
- voice = 'pl-PL';
1193
- break;
1194
- case 'FI':
1195
- voice = 'fi-FI';
1196
- break;
1197
- case 'EL':
1198
- voice = 'el-GR';
1199
- break;
1200
- case 'AR':
1201
- voice = 'ar-SA';
1202
- break;
1203
- case 'ZH':
1204
- voice = 'zh-CN';
1205
- break;
1206
- case 'JA':
1207
- voice = 'ja-JP';
1208
- break;
1209
- default:
1210
- voice = 'it-IT';
1211
- break;
1212
- }
1213
- return voice;
1214
- };
1215
1240
  const handleSpeak = async (text) => {
1216
1241
  if (!text || preview || speakerMuted || !defaultEnableAudio) {
1217
1242
  const e = new CustomEvent('MemoriEndSpeak');
1218
1243
  document.dispatchEvent(e);
1219
- if (continuousSpeech) {
1220
- setListeningTimeout();
1221
- }
1222
1244
  return Promise.resolve();
1223
1245
  }
1224
- if (typeof stopListening === 'function') {
1225
- stopListening();
1246
+ if (typeof stopRecording === 'function') {
1247
+ stopRecording();
1226
1248
  }
1227
1249
  setMemoriTyping(true);
1228
1250
  const processedText = (0, sanitizer_1.sanitizeText)(text);
@@ -1264,25 +1286,6 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
1264
1286
  hasUserActivatedSpeak,
1265
1287
  setHasUserActivatedSpeak,
1266
1288
  ]);
1267
- const handleFallback = (text) => {
1268
- if (defaultEnableAudio) {
1269
- window.speechSynthesis.speak(new SpeechSynthesisUtterance(text));
1270
- }
1271
- cleanup();
1272
- };
1273
- const cleanup = () => {
1274
- if (recognizer) {
1275
- recognizer.stopContinuousRecognitionAsync();
1276
- recognizer.close();
1277
- recognizer = null;
1278
- }
1279
- if (speechSynthesizerRef.current) {
1280
- speechSynthesizerRef.current.close();
1281
- speechSynthesizerRef.current = null;
1282
- }
1283
- setListening(false);
1284
- clearListeningTimeout();
1285
- };
1286
1289
  const stopAudio = (0, react_1.useCallback)(async () => {
1287
1290
  ttsStop();
1288
1291
  }, [ttsStop]);
@@ -1300,230 +1303,8 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
1300
1303
  focusChatInput();
1301
1304
  }
1302
1305
  }, [currentDialogState === null || currentDialogState === void 0 ? void 0 : currentDialogState.emission]);
1303
- const [transcript, setTranscript] = (0, react_1.useState)('');
1304
- const [transcriptTimeout, setTranscriptTimeout] = (0, react_1.useState)(null);
1305
- const [isSpeaking, setIsSpeaking] = (0, react_1.useState)(false);
1306
- const resetTranscript = () => {
1307
- setTranscript('');
1308
- };
1309
- const setListeningTimeout = () => {
1310
- clearListeningTimeout();
1311
- console.debug('Setting speech processing timeout');
1312
- const timeout = setTimeout(() => {
1313
- console.debug('Speech timeout triggered, processing transcript');
1314
- handleTranscriptProcessing();
1315
- }, continuousSpeechTimeout * 1000 + 300);
1316
- setTranscriptTimeout(timeout);
1317
- };
1318
- const clearListeningTimeout = () => {
1319
- if (transcriptTimeout) {
1320
- console.debug('Clearing transcript timeout');
1321
- clearTimeout(transcriptTimeout);
1322
- setTranscriptTimeout(null);
1323
- }
1324
- };
1325
- const resetListeningTimeout = () => {
1326
- clearListeningTimeout();
1327
- if (continuousSpeech && !isProcessingSTT) {
1328
- console.debug('Setting new listening timeout');
1329
- setListeningTimeout();
1330
- }
1331
- };
1332
- (0, react_1.useEffect)(() => {
1333
- if (!isSpeaking && transcript && transcript.length > 0) {
1334
- console.debug('Transcript updated while not speaking, resetting timeout');
1335
- resetListeningTimeout();
1336
- resetInteractionTimeout();
1337
- }
1338
- }, [transcript, isSpeaking]);
1339
- (0, react_1.useEffect)(() => {
1340
- return () => {
1341
- clearListeningTimeout();
1342
- };
1343
- }, []);
1344
- let microphoneStream = null;
1345
- const startListening = async () => {
1346
- console.debug('Starting speech recognition...');
1347
- if (!sessionId) {
1348
- console.error('No session ID available');
1349
- throw new Error('No session ID available');
1350
- }
1351
- if (recognizer) {
1352
- console.debug('Cleaning up existing recognizer...');
1353
- try {
1354
- await new Promise((resolve, _) => {
1355
- recognizer === null || recognizer === void 0 ? void 0 : recognizer.stopContinuousRecognitionAsync(resolve, error => {
1356
- console.error('Error stopping recognition:', error);
1357
- resolve();
1358
- });
1359
- });
1360
- console.debug('Closing existing recognizer...');
1361
- recognizer.close();
1362
- recognizer = null;
1363
- }
1364
- catch (error) {
1365
- console.error('Error during recognizer cleanup:', error);
1366
- }
1367
- }
1368
- console.debug('Resetting transcript and STT state...');
1369
- resetTranscript();
1370
- setIsProcessingSTT(false);
1371
- console.debug('Adding delay for Azure services cleanup...');
1372
- await new Promise(resolve => setTimeout(resolve, 500));
1373
- try {
1374
- console.debug('Requesting microphone access...');
1375
- if (microphoneStream) {
1376
- microphoneStream.getTracks().forEach(track => track.stop());
1377
- microphoneStream = null;
1378
- }
1379
- const stream = await navigator.mediaDevices.getUserMedia({
1380
- audio: true,
1381
- });
1382
- setHasUserActivatedListening(true);
1383
- console.debug('Setting up speech config...');
1384
- console.debug('Creating audio config and recognizer...');
1385
- const audioConfig = speechSdk.AudioConfig.fromDefaultMicrophoneInput();
1386
- recognizer = new speechSdk.SpeechRecognizer(speechConfig, audioConfig);
1387
- console.debug('Setting up recognizer handlers...');
1388
- setupRecognizerHandlers(recognizer);
1389
- console.debug('Starting continuous recognition...');
1390
- await new Promise((resolve, reject) => {
1391
- recognizer === null || recognizer === void 0 ? void 0 : recognizer.startContinuousRecognitionAsync(resolve, error => {
1392
- console.error('Failed to start recognition:', error);
1393
- reject(error);
1394
- });
1395
- });
1396
- console.debug('Speech recognition started successfully');
1397
- setListening(true);
1398
- }
1399
- catch (error) {
1400
- console.error('Error in startListening:', error);
1401
- if (recognizer) {
1402
- console.debug('Cleaning up recognizer after error...');
1403
- recognizer.close();
1404
- recognizer = null;
1405
- }
1406
- setListening(false);
1407
- throw error;
1408
- }
1409
- };
1410
- startListeningRef.current = startListening;
1411
- const onEndSpeakStartListen = (0, react_1.useCallback)((_e) => {
1412
- if (isPlayingAudio && speechSynthesizerRef.current) {
1413
- speechSynthesizerRef.current.close();
1414
- speechSynthesizerRef.current = null;
1415
- }
1416
- if (continuousSpeech &&
1417
- (hasUserActivatedListening || !requestedListening)) {
1418
- setRequestedListening(true);
1419
- if (startListeningRef.current) {
1420
- startListeningRef.current();
1421
- }
1422
- }
1423
- }, [continuousSpeech, hasUserActivatedListening, isPlayingAudio, requestedListening]);
1424
- const setupSpeechConfig = (AZURE_COGNITIVE_SERVICES_TTS_KEY) => {
1425
- console.debug('Creating speech config...');
1426
- speechConfig = speechSdk.SpeechConfig.fromSubscription(AZURE_COGNITIVE_SERVICES_TTS_KEY, 'westeurope');
1427
- console.debug('Setting speech recognition language:', userLang);
1428
- speechConfig.speechRecognitionLanguage = getCultureCodeByLanguage(userLang);
1429
- speechConfig.speechSynthesisLanguage = getCultureCodeByLanguage(userLang);
1430
- speechConfig.speechSynthesisVoiceName = (0, ttsVoiceUtility_1.getTTSVoice)(userLang);
1431
- return speechConfig;
1432
- };
1433
- const [isProcessingSTT, setIsProcessingSTT] = (0, react_1.useState)(false);
1434
- const setupRecognizerHandlers = (recognizer) => {
1435
- if (recognizer) {
1436
- console.debug('Setting up recognizer event handlers...');
1437
- recognizer.recognized = (_, event) => {
1438
- console.debug('Recognition event received');
1439
- handleRecognizedSpeech(event.result.text);
1440
- };
1441
- console.debug('Configuring recognizer properties...');
1442
- recognizer.properties.setProperty('SpeechServiceResponse_JsonResult', 'true');
1443
- recognizer.properties.setProperty('SpeechServiceConnection_NoiseSuppression', 'true');
1444
- recognizer.properties.setProperty('SpeechServiceConnection_SNRThresholdDb', '10.0');
1445
- }
1446
- };
1447
- let isProcessingSpeech = false;
1448
- const processSpeechAndSendMessage = (text) => {
1449
- if (isProcessingSpeech || !text || text.trim().length === 0) {
1450
- console.debug('Skipping speech processing: already processing or empty text');
1451
- return;
1452
- }
1453
- try {
1454
- isProcessingSpeech = true;
1455
- const message = (0, utils_1.stripDuplicates)(text);
1456
- console.debug('Processing speech message:', message);
1457
- if (message.length > 0) {
1458
- setIsProcessingSTT(true);
1459
- setUserMessage('');
1460
- console.debug('Sending message:', message);
1461
- sendMessage(message);
1462
- resetTranscript();
1463
- clearListening();
1464
- }
1465
- }
1466
- finally {
1467
- setTimeout(() => {
1468
- isProcessingSpeech = false;
1469
- }, 1000);
1470
- }
1471
- };
1472
- const handleRecognizedSpeech = (text) => {
1473
- console.debug('Speech recognized:', text);
1474
- setTranscript(text);
1475
- setIsSpeaking(false);
1476
- if (!continuousSpeech) {
1477
- processSpeechAndSendMessage(text);
1478
- }
1479
- };
1480
- const handleTranscriptProcessing = () => {
1481
- if (transcript && transcript.length > 0 && listening) {
1482
- processSpeechAndSendMessage(transcript);
1483
- }
1484
- else if (listening) {
1485
- resetInteractionTimeout();
1486
- }
1487
- };
1488
- const stopListening = async () => {
1489
- console.debug('Stopping speech recognition');
1490
- if (recognizer) {
1491
- try {
1492
- recognizer.stopContinuousRecognitionAsync();
1493
- recognizer.close();
1494
- }
1495
- catch (error) {
1496
- console.error('Error stopping recognizer:', error);
1497
- }
1498
- recognizer = null;
1499
- }
1500
- if (microphoneStream) {
1501
- try {
1502
- microphoneStream.getTracks().forEach(track => track.stop());
1503
- }
1504
- catch (error) {
1505
- console.error('Error stopping microphone stream:', error);
1506
- }
1507
- microphoneStream = null;
1508
- }
1509
- setListening(false);
1510
- };
1511
- const clearListening = () => {
1512
- stopListening();
1513
- clearListeningTimeout();
1514
- setIsSpeaking(false);
1515
- };
1516
- const resetListening = () => {
1517
- if (listening) {
1518
- clearListening();
1519
- resetTranscript();
1520
- setUserMessage('');
1521
- startListening();
1522
- }
1523
- };
1524
1306
  const resetUIEffects = () => {
1525
1307
  try {
1526
- clearListening();
1527
1308
  clearInteractionTimeout();
1528
1309
  setClickedStart(false);
1529
1310
  timeoutRef.current = undefined;
@@ -1544,23 +1325,19 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
1544
1325
  document.removeEventListener('MemoriResetUIEffects', resetUIEffects);
1545
1326
  };
1546
1327
  }, []);
1547
- (0, react_1.useEffect)(() => {
1548
- if ((currentDialogState === null || currentDialogState === void 0 ? void 0 : currentDialogState.state) === 'Z0')
1549
- clearListening();
1550
- }, [currentDialogState === null || currentDialogState === void 0 ? void 0 : currentDialogState.state]);
1551
1328
  (0, react_1.useEffect)(() => {
1552
1329
  if (!isPlayingAudio &&
1553
1330
  continuousSpeech &&
1554
1331
  (hasUserActivatedListening || !requestedListening) &&
1555
1332
  sessionId) {
1556
- startListening();
1333
+ startRecording();
1557
1334
  }
1558
- else if (isPlayingAudio && listening) {
1559
- stopListening();
1335
+ else if (isPlayingAudio && isListening) {
1336
+ stopRecording();
1560
1337
  }
1561
1338
  }, [isPlayingAudio, hasUserActivatedListening]);
1562
1339
  (0, react_1.useEffect)(() => {
1563
- resetListening();
1340
+ stopRecording();
1564
1341
  }, [language]);
1565
1342
  const [sendOnEnter, setSendOnEnter] = (0, react_1.useState)('keypress');
1566
1343
  (0, react_1.useEffect)(() => {
@@ -1664,7 +1441,6 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
1664
1441
  }
1665
1442
  }, [integrationConfig, memori.avatarURL, ogImage]);
1666
1443
  const simulateUserPrompt = (text, translatedText) => {
1667
- stopListening();
1668
1444
  stopAudio();
1669
1445
  sendMessage(text, undefined, undefined, false, translatedText);
1670
1446
  };
@@ -1681,7 +1457,6 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
1681
1457
  }, 1000);
1682
1458
  }
1683
1459
  else {
1684
- stopListening();
1685
1460
  stopAudio();
1686
1461
  sendMessage(text, undefined, undefined, undefined, undefined, hidden, typingText, useLoaderTextAsMsg, hasBatchQueued);
1687
1462
  }
@@ -2110,7 +1885,9 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
2110
1885
  avatar3dVisible,
2111
1886
  setAvatar3dVisible,
2112
1887
  hasUserActivatedSpeak,
2113
- isPlayingAudio: isPlayingAudio && !speakerMuted && ((_y = enableAudio !== null && enableAudio !== void 0 ? enableAudio : integrationConfig === null || integrationConfig === void 0 ? void 0 : integrationConfig.enableAudio) !== null && _y !== void 0 ? _y : true),
1888
+ isPlayingAudio: isPlayingAudio &&
1889
+ !speakerMuted &&
1890
+ ((_y = enableAudio !== null && enableAudio !== void 0 ? enableAudio : integrationConfig === null || integrationConfig === void 0 ? void 0 : integrationConfig.enableAudio) !== null && _y !== void 0 ? _y : true),
2114
1891
  loading: !!memoriTyping,
2115
1892
  baseUrl,
2116
1893
  apiUrl: client.constants.BACKEND_URL,
@@ -2145,7 +1922,6 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
2145
1922
  memori,
2146
1923
  sessionID: sessionId || '',
2147
1924
  tenant,
2148
- provider: ttsProvider,
2149
1925
  translateTo: isMultilanguageEnabled &&
2150
1926
  userLang.toUpperCase() !==
2151
1927
  ((_3 = ((_2 = (_1 = (_0 = (_z = memori.culture) === null || _z === void 0 ? void 0 : _z.split('-')) === null || _0 === void 0 ? void 0 : _0[0]) !== null && _1 !== void 0 ? _1 : i18n.language) !== null && _2 !== void 0 ? _2 : 'IT')) === null || _3 === void 0 ? void 0 : _3.toUpperCase())
@@ -2185,16 +1961,14 @@ const MemoriWidget = ({ memori, memoriConfigs, ownerUserID, ownerUserName, tenan
2185
1961
  onChangeUserMessage,
2186
1962
  sendMessage: (msg, media) => {
2187
1963
  stopAudio();
2188
- stopListening();
1964
+ stopRecording();
2189
1965
  sendMessage(msg, media);
2190
1966
  setUserMessage('');
2191
- resetTranscript();
2192
1967
  },
2193
- stopListening: clearListening,
2194
- startListening,
1968
+ stopListening: stopRecording,
1969
+ startListening: startRecording,
2195
1970
  stopAudio,
2196
- resetTranscript,
2197
- listening,
1971
+ listening: isListening,
2198
1972
  setEnableFocusChatInput,
2199
1973
  isPlayingAudio,
2200
1974
  customMediaRenderer,