dmed-voice-assistant 1.2.2 → 1.2.3
Sign up to get free protection for your applications and to get access to all the features.
- package/dist/recognition.js +53 -27
- package/dist/recorder.js +2 -5
- package/package.json +1 -1
package/dist/recognition.js
CHANGED
@@ -9,6 +9,8 @@ var _react = _interopRequireWildcard(require("react"));
|
|
9
9
|
var _material = require("@mui/material");
|
10
10
|
var _svgs = require("./components/svgs");
|
11
11
|
var _RecognitionListItem = _interopRequireDefault(require("./components/RecognitionListItem"));
|
12
|
+
var _recorderJs = _interopRequireDefault(require("recorder-js"));
|
13
|
+
var _recorder = require("./recorder");
|
12
14
|
var _jsxRuntime = require("react/jsx-runtime");
|
13
15
|
function _getRequireWildcardCache(e) { if ("function" != typeof WeakMap) return null; var r = new WeakMap(), t = new WeakMap(); return (_getRequireWildcardCache = function (e) { return e ? t : r; })(e); }
|
14
16
|
function _interopRequireWildcard(e, r) { if (!r && e && e.__esModule) return e; if (null === e || "object" != typeof e && "function" != typeof e) return { default: e }; var t = _getRequireWildcardCache(r); if (t && t.has(e)) return t.get(e); var n = { __proto__: null }, a = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var u in e) if ("default" !== u && {}.hasOwnProperty.call(e, u)) { var i = a ? Object.getOwnPropertyDescriptor(e, u) : null; i && (i.get || i.set) ? Object.defineProperty(n, u, i) : n[u] = e[u]; } return n.default = e, t && t.set(e, n), n; }
|
@@ -73,8 +75,9 @@ const Recognition = _ref4 => {
|
|
73
75
|
const [selectedVoice, setSelectedVoice] = (0, _react.useState)("");
|
74
76
|
const [voiceList, setVoiceList] = (0, _react.useState)([]);
|
75
77
|
const languageList = ['Auto-Detect', 'English', 'Chinese (Simplified)'];
|
76
|
-
const [selectedLanguage, setSelectedLanguage] = (0, _react.useState)("");
|
78
|
+
const [selectedLanguage, setSelectedLanguage] = (0, _react.useState)("en-US");
|
77
79
|
const recognitionRef = (0, _react.useRef)(null);
|
80
|
+
const mediaRecorderRef = (0, _react.useRef)(null);
|
78
81
|
const [result, setResult] = (0, _react.useState)([]);
|
79
82
|
const [historyList, setHistoryList] = (0, _react.useState)(recognitionHistoryList);
|
80
83
|
const [recordTime, setRecordTime] = (0, _react.useState)(0);
|
@@ -118,34 +121,57 @@ const Recognition = _ref4 => {
|
|
118
121
|
const handleLanguageChange = event => {
|
119
122
|
setSelectedLanguage(event.target.value);
|
120
123
|
};
|
121
|
-
const startRecording = () => {
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
124
|
+
const startRecording = async () => {
|
125
|
+
try {
|
126
|
+
if (recognitionRef.current) {
|
127
|
+
if (!mediaRecorderRef.current) {
|
128
|
+
const stream = await navigator.mediaDevices.getUserMedia({
|
129
|
+
audio: true
|
130
|
+
});
|
131
|
+
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
132
|
+
const newRecorder = new _recorderJs.default(audioContext);
|
133
|
+
await newRecorder.init(stream);
|
134
|
+
mediaRecorderRef.current = newRecorder;
|
135
|
+
}
|
136
|
+
mediaRecorderRef.current.start();
|
137
|
+
setResult([]);
|
138
|
+
setRecordTime(0);
|
139
|
+
const id = setInterval(async () => {
|
140
|
+
setRecordTime(prevCount => prevCount + 1);
|
141
|
+
}, 1000);
|
142
|
+
setIntervalId(id);
|
143
|
+
recognitionRef.current.start();
|
144
|
+
}
|
145
|
+
} catch (error) {
|
146
|
+
console.error("Error starting recording:", error);
|
130
147
|
}
|
131
148
|
};
|
132
149
|
const stopRecording = () => {
|
133
|
-
if (recognitionRef.current) {
|
150
|
+
if (recognitionRef.current && mediaRecorderRef.current) {
|
134
151
|
recognitionRef.current.stop();
|
135
152
|
clearInterval(intervalId);
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
153
|
+
mediaRecorderRef.current.stop().then(async _ref5 => {
|
154
|
+
let {
|
155
|
+
blob
|
156
|
+
} = _ref5;
|
157
|
+
const audioBlob = blob;
|
158
|
+
let temp = [...historyList];
|
159
|
+
const newData = {
|
160
|
+
fileName: (0, _recorder.getVoiceFileName)(new Date()),
|
161
|
+
audioBlob,
|
162
|
+
result,
|
163
|
+
lang: selectedLanguage,
|
164
|
+
date: new Date()
|
165
|
+
};
|
166
|
+
temp.push(newData);
|
167
|
+
setHistoryList(temp);
|
168
|
+
if (onNewRecognitionEvent) {
|
169
|
+
onNewRecognitionEvent(newData);
|
170
|
+
}
|
171
|
+
if (onRecognitionDataChange) {
|
172
|
+
onRecognitionDataChange(temp);
|
173
|
+
}
|
174
|
+
});
|
149
175
|
}
|
150
176
|
};
|
151
177
|
const startSpeechRecognition = () => {
|
@@ -565,13 +591,13 @@ const Recognition = _ref4 => {
|
|
565
591
|
}
|
566
592
|
},
|
567
593
|
children: [/*#__PURE__*/(0, _jsxRuntime.jsx)(_material.MenuItem, {
|
568
|
-
value:
|
594
|
+
value: "en-US",
|
569
595
|
children: "Auto-Detect"
|
570
596
|
}), /*#__PURE__*/(0, _jsxRuntime.jsx)(_material.MenuItem, {
|
571
|
-
value:
|
597
|
+
value: "en-US",
|
572
598
|
children: "English"
|
573
599
|
}), /*#__PURE__*/(0, _jsxRuntime.jsx)(_material.MenuItem, {
|
574
|
-
value:
|
600
|
+
value: "zh-TW",
|
575
601
|
children: "Chinese (Simplified)"
|
576
602
|
})]
|
577
603
|
})]
|
package/dist/recorder.js
CHANGED
@@ -4,7 +4,7 @@ var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefau
|
|
4
4
|
Object.defineProperty(exports, "__esModule", {
|
5
5
|
value: true
|
6
6
|
});
|
7
|
-
exports.default = void 0;
|
7
|
+
exports.getVoiceFileName = exports.default = void 0;
|
8
8
|
var _react = _interopRequireWildcard(require("react"));
|
9
9
|
var _material = require("@mui/material");
|
10
10
|
var _Grid = _interopRequireDefault(require("@mui/material/Grid2"));
|
@@ -15,7 +15,6 @@ var _recorderJs = _interopRequireDefault(require("recorder-js"));
|
|
15
15
|
var _jsxRuntime = require("react/jsx-runtime");
|
16
16
|
function _getRequireWildcardCache(e) { if ("function" != typeof WeakMap) return null; var r = new WeakMap(), t = new WeakMap(); return (_getRequireWildcardCache = function (e) { return e ? t : r; })(e); }
|
17
17
|
function _interopRequireWildcard(e, r) { if (!r && e && e.__esModule) return e; if (null === e || "object" != typeof e && "function" != typeof e) return { default: e }; var t = _getRequireWildcardCache(r); if (t && t.has(e)) return t.get(e); var n = { __proto__: null }, a = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var u in e) if ("default" !== u && {}.hasOwnProperty.call(e, u)) { var i = a ? Object.getOwnPropertyDescriptor(e, u) : null; i && (i.get || i.set) ? Object.defineProperty(n, u, i) : n[u] = e[u]; } return n.default = e, t && t.set(e, n), n; }
|
18
|
-
const apiUrl = 'https://api.origintechx.dev/qa/v1/diagnose/voice';
|
19
18
|
const getVoiceFileName = date => {
|
20
19
|
const year = date.getFullYear(); // Get the full year (YYYY)
|
21
20
|
const month = String(date.getMonth() + 1).padStart(2, '0'); // Get the month (MM), pad with leading zero if necessary
|
@@ -23,6 +22,7 @@ const getVoiceFileName = date => {
|
|
23
22
|
|
24
23
|
return `Voice${year}${month}${day}.wav`;
|
25
24
|
};
|
25
|
+
exports.getVoiceFileName = getVoiceFileName;
|
26
26
|
const getTimeValues = totalSeconds => {
|
27
27
|
const hours = Math.floor(totalSeconds / 3600); // Get hours
|
28
28
|
let minutes = Math.floor(totalSeconds % 3600 / 60); // Get minutes
|
@@ -63,7 +63,6 @@ const RecorderBox = _ref => {
|
|
63
63
|
const [isRunning, setIsRunning] = (0, _react.useState)(false);
|
64
64
|
const [intervalId, setIntervalId] = (0, _react.useState)(null);
|
65
65
|
const [audioBlob, setAudioBlob] = (0, _react.useState)(null);
|
66
|
-
const [audioUrl, setAudioUrl] = (0, _react.useState)('');
|
67
66
|
const [audioSize, setAudioSize] = (0, _react.useState)(0);
|
68
67
|
const mediaRecorderRef = (0, _react.useRef)(null);
|
69
68
|
const handleVoiceChange = event => {
|
@@ -104,8 +103,6 @@ const RecorderBox = _ref => {
|
|
104
103
|
blob
|
105
104
|
} = _ref2;
|
106
105
|
setAudioBlob(blob);
|
107
|
-
setAudioUrl(URL.createObjectURL(blob));
|
108
|
-
console.log(blob);
|
109
106
|
let temp = [...recordList];
|
110
107
|
const newVoice = {
|
111
108
|
audioURL: URL.createObjectURL(blob),
|