dmed-voice-assistant 1.2.5 → 1.2.7
Sign up to get free protection for your applications and to get access to all the features.
- package/dist/index.js +8 -2
- package/dist/recognition.js +21 -1
- package/dist/recorder.js +3 -6
- package/package.json +1 -1
package/dist/index.js
CHANGED
@@ -23,7 +23,10 @@ const VoiceAssistant = _ref => {
|
|
23
23
|
onNewRecordEvent,
|
24
24
|
onRecordDataChange,
|
25
25
|
onNewRecognitionEvent,
|
26
|
-
onRecognitionDataChange
|
26
|
+
onRecognitionDataChange,
|
27
|
+
onCloseRecognition,
|
28
|
+
onRecognitionStopEvent,
|
29
|
+
onRealTimeRecognitionCommandEvent
|
27
30
|
} = _ref;
|
28
31
|
const [mode, setMode] = (0, _react.useState)(!isOnlyRecognitionMode ? "recorder" : "recognition");
|
29
32
|
return /*#__PURE__*/(0, _jsxRuntime.jsx)(_material.Box, {
|
@@ -40,9 +43,12 @@ const VoiceAssistant = _ref => {
|
|
40
43
|
mode: mode,
|
41
44
|
setMode: setMode,
|
42
45
|
recognitionHistoryList: recognitionListValue,
|
46
|
+
onRealTimeRecognitionCommandEvent: onRealTimeRecognitionCommandEvent,
|
43
47
|
onNewRecognitionEvent: onNewRecognitionEvent,
|
44
48
|
onRecognitionDataChange: onRecognitionDataChange,
|
45
|
-
isOnlyRecognitionMode: isOnlyRecognitionMode
|
49
|
+
isOnlyRecognitionMode: isOnlyRecognitionMode,
|
50
|
+
onCloseRecognition: onCloseRecognition,
|
51
|
+
onRecognitionStopEvent: onRecognitionStopEvent
|
46
52
|
})]
|
47
53
|
})
|
48
54
|
});
|
package/dist/recognition.js
CHANGED
@@ -74,7 +74,10 @@ const Recognition = _ref4 => {
|
|
74
74
|
recognitionHistoryList,
|
75
75
|
setMode,
|
76
76
|
onNewRecognitionEvent,
|
77
|
-
onRecognitionDataChange
|
77
|
+
onRecognitionDataChange,
|
78
|
+
onCloseRecognition,
|
79
|
+
onRealTimeRecognitionCommandEvent,
|
80
|
+
onRecognitionStopEvent
|
78
81
|
} = _ref4;
|
79
82
|
const [open, setOpen] = (0, _react.useState)(false);
|
80
83
|
const [anchorEl, setAnchorEl] = (0, _react.useState)(null);
|
@@ -133,6 +136,11 @@ const Recognition = _ref4 => {
|
|
133
136
|
const handleLanguageChange = event => {
|
134
137
|
setSelectedLanguage(event.target.value);
|
135
138
|
};
|
139
|
+
const handleCloseModal = () => {
|
140
|
+
if (onCloseRecognition) {
|
141
|
+
onCloseRecognition();
|
142
|
+
}
|
143
|
+
};
|
136
144
|
const startRecording = async () => {
|
137
145
|
try {
|
138
146
|
if (recognitionRef.current) {
|
@@ -162,6 +170,9 @@ const Recognition = _ref4 => {
|
|
162
170
|
const stopRecording = () => {
|
163
171
|
if (recognitionRef.current && mediaRecorderRef.current) {
|
164
172
|
recognitionRef.current.stop();
|
173
|
+
if (onRecognitionStopEvent) {
|
174
|
+
onRecognitionStopEvent();
|
175
|
+
}
|
165
176
|
clearInterval(intervalId);
|
166
177
|
mediaRecorderRef.current.stop().then(async _ref5 => {
|
167
178
|
let {
|
@@ -220,6 +231,14 @@ const Recognition = _ref4 => {
|
|
220
231
|
}
|
221
232
|
return updatedTranscript;
|
222
233
|
});
|
234
|
+
for (let i = event.resultIndex; i < event.results.length; i++) {
|
235
|
+
if (event.results[i].isFinal) {
|
236
|
+
const resultArr = event.results[i][0].transcript.split(' ').filter(word => word.trim() !== '');
|
237
|
+
if (onRealTimeRecognitionCommandEvent) {
|
238
|
+
onRealTimeRecognitionCommandEvent(resultArr);
|
239
|
+
}
|
240
|
+
}
|
241
|
+
}
|
223
242
|
};
|
224
243
|
recognition.onerror = event => {
|
225
244
|
console.error('Speech recognition error:', event.error);
|
@@ -331,6 +350,7 @@ const Recognition = _ref4 => {
|
|
331
350
|
})]
|
332
351
|
}), /*#__PURE__*/(0, _jsxRuntime.jsx)(_material.Box, {
|
333
352
|
className: "px-[10px] py-[8px] cursor-pointer",
|
353
|
+
onClick: handleCloseModal,
|
334
354
|
children: /*#__PURE__*/(0, _jsxRuntime.jsx)(_svgs.CloseIcon, {})
|
335
355
|
})]
|
336
356
|
}), /*#__PURE__*/(0, _jsxRuntime.jsxs)(_material.Box, {
|
package/dist/recorder.js
CHANGED
@@ -57,12 +57,11 @@ const RecorderBox = _ref => {
|
|
57
57
|
const [voiceList, setVoiceList] = (0, _react.useState)([]);
|
58
58
|
const languageList = ['Auto-Detect', 'English', 'Chinese (Simplified)'];
|
59
59
|
const [selectedLanguage, setSelectedLanguage] = (0, _react.useState)("");
|
60
|
-
const [recordList, setRecordList] = (0, _react.useState)(
|
60
|
+
const [recordList, setRecordList] = (0, _react.useState)([]);
|
61
61
|
const [newRecordFileName, setNewRecordFileName] = (0, _react.useState)("");
|
62
62
|
const [newRecordTime, setNewRecordTime] = (0, _react.useState)(0);
|
63
63
|
const [isRunning, setIsRunning] = (0, _react.useState)(false);
|
64
64
|
const [intervalId, setIntervalId] = (0, _react.useState)(null);
|
65
|
-
const [audioBlob, setAudioBlob] = (0, _react.useState)(null);
|
66
65
|
const [audioSize, setAudioSize] = (0, _react.useState)(0);
|
67
66
|
const mediaRecorderRef = (0, _react.useRef)(null);
|
68
67
|
const handleVoiceChange = event => {
|
@@ -89,7 +88,6 @@ const RecorderBox = _ref => {
|
|
89
88
|
await initRecorder();
|
90
89
|
}
|
91
90
|
if (mediaRecorderRef.current) {
|
92
|
-
setAudioBlob(null);
|
93
91
|
mediaRecorderRef.current.start();
|
94
92
|
setIsStartedRecord(true);
|
95
93
|
setNewRecordFileName(getVoiceFileName(new Date()));
|
@@ -102,7 +100,6 @@ const RecorderBox = _ref => {
|
|
102
100
|
let {
|
103
101
|
blob
|
104
102
|
} = _ref2;
|
105
|
-
setAudioBlob(blob);
|
106
103
|
let temp = [...recordList];
|
107
104
|
const newVoice = {
|
108
105
|
audioURL: URL.createObjectURL(blob),
|
@@ -194,8 +191,8 @@ const RecorderBox = _ref => {
|
|
194
191
|
fetchAudioInputDevices();
|
195
192
|
}, []);
|
196
193
|
(0, _react.useEffect)(() => {
|
197
|
-
|
198
|
-
}, [
|
194
|
+
setRecordList(recordHistoryList);
|
195
|
+
}, [recordHistoryList]);
|
199
196
|
return /*#__PURE__*/(0, _jsxRuntime.jsx)(_material.Box, {
|
200
197
|
className: "bg-[#0B0B0B] rounded-[5px] border p-[20px] w-[850px]",
|
201
198
|
children: /*#__PURE__*/(0, _jsxRuntime.jsxs)(_Grid.default, {
|