@dreamtree-org/twreact-ui 1.1.11 → 1.1.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.esm.js +154 -37
- package/dist/index.js +154 -37
- package/package.json +1 -1
package/dist/index.esm.js
CHANGED
|
@@ -15953,9 +15953,19 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
|
15953
15953
|
_ref$autoStart = _ref.autoStart,
|
|
15954
15954
|
autoStart = _ref$autoStart === void 0 ? false : _ref$autoStart,
|
|
15955
15955
|
_ref$disabled = _ref.disabled,
|
|
15956
|
-
disabled = _ref$disabled === void 0 ? false : _ref$disabled
|
|
15956
|
+
disabled = _ref$disabled === void 0 ? false : _ref$disabled,
|
|
15957
|
+
_ref$maxAlternatives = _ref.maxAlternatives,
|
|
15958
|
+
maxAlternatives = _ref$maxAlternatives === void 0 ? 1 : _ref$maxAlternatives,
|
|
15959
|
+
_ref$grammars = _ref.grammars,
|
|
15960
|
+
grammars = _ref$grammars === void 0 ? null : _ref$grammars,
|
|
15961
|
+
_ref$timeout = _ref.timeout,
|
|
15962
|
+
timeout = _ref$timeout === void 0 ? null : _ref$timeout,
|
|
15963
|
+
_ref$clearOnStop = _ref.clearOnStop,
|
|
15964
|
+
clearOnStop = _ref$clearOnStop === void 0 ? false : _ref$clearOnStop;
|
|
15957
15965
|
var recognitionRef = useRef(null);
|
|
15958
15966
|
var finalTranscriptRef = useRef("");
|
|
15967
|
+
var timeoutRef = useRef(null);
|
|
15968
|
+
var isMountedRef = useRef(true);
|
|
15959
15969
|
var _useState = useState(false),
|
|
15960
15970
|
_useState2 = _slicedToArray(_useState, 2),
|
|
15961
15971
|
_isListening = _useState2[0],
|
|
@@ -15964,34 +15974,48 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
|
15964
15974
|
_useState4 = _slicedToArray(_useState3, 2),
|
|
15965
15975
|
_isSupported = _useState4[0],
|
|
15966
15976
|
setIsSupported = _useState4[1];
|
|
15977
|
+
var _useState5 = useState(null),
|
|
15978
|
+
_useState6 = _slicedToArray(_useState5, 2),
|
|
15979
|
+
error = _useState6[0],
|
|
15980
|
+
setError = _useState6[1];
|
|
15981
|
+
|
|
15982
|
+
// Check browser support
|
|
15967
15983
|
useEffect(function () {
|
|
15968
|
-
|
|
15969
|
-
|
|
15970
|
-
|
|
15971
|
-
|
|
15984
|
+
var supported = "webkitSpeechRecognition" in window || "SpeechRecognition" in window;
|
|
15985
|
+
setIsSupported(supported);
|
|
15986
|
+
if (!supported) {
|
|
15987
|
+
var err = new Error("Speech recognition not supported in this browser");
|
|
15988
|
+
setError(err);
|
|
15989
|
+
onError(err);
|
|
15972
15990
|
}
|
|
15973
|
-
}, []);
|
|
15974
|
-
|
|
15975
|
-
|
|
15976
|
-
|
|
15991
|
+
}, [onError]);
|
|
15992
|
+
|
|
15993
|
+
// Clear timeout helper
|
|
15994
|
+
var clearInactivityTimeout = useCallback(function () {
|
|
15995
|
+
if (timeoutRef.current) {
|
|
15996
|
+
clearTimeout(timeoutRef.current);
|
|
15997
|
+
timeoutRef.current = null;
|
|
15977
15998
|
}
|
|
15978
|
-
|
|
15979
|
-
|
|
15980
|
-
|
|
15981
|
-
|
|
15982
|
-
|
|
15983
|
-
|
|
15984
|
-
|
|
15985
|
-
|
|
15986
|
-
|
|
15999
|
+
}, []);
|
|
16000
|
+
|
|
16001
|
+
// Reset inactivity timeout
|
|
16002
|
+
var resetInactivityTimeout = useCallback(function () {
|
|
16003
|
+
if (!timeout) return;
|
|
16004
|
+
clearInactivityTimeout();
|
|
16005
|
+
timeoutRef.current = setTimeout(function () {
|
|
16006
|
+
if (recognitionRef.current && _isListening) {
|
|
16007
|
+
stopListening();
|
|
15987
16008
|
}
|
|
15988
|
-
};
|
|
15989
|
-
}, [
|
|
15990
|
-
|
|
16009
|
+
}, timeout);
|
|
16010
|
+
}, [timeout, _isListening]);
|
|
16011
|
+
|
|
16012
|
+
// Initialize recognition
|
|
16013
|
+
var initializeRecognition = useCallback(function () {
|
|
15991
16014
|
if (recognitionRef.current) return recognitionRef.current;
|
|
15992
16015
|
var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
15993
16016
|
if (!SpeechRecognition) {
|
|
15994
16017
|
var err = new Error("SpeechRecognition API not available");
|
|
16018
|
+
setError(err);
|
|
15995
16019
|
onError(err);
|
|
15996
16020
|
return null;
|
|
15997
16021
|
}
|
|
@@ -15999,12 +16023,21 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
|
15999
16023
|
recognition.continuous = !!continuous;
|
|
16000
16024
|
recognition.interimResults = !!interimResults;
|
|
16001
16025
|
recognition.lang = lang;
|
|
16026
|
+
recognition.maxAlternatives = maxAlternatives;
|
|
16027
|
+
if (grammars) {
|
|
16028
|
+
recognition.grammars = grammars;
|
|
16029
|
+
}
|
|
16002
16030
|
recognition.onstart = function () {
|
|
16031
|
+
if (!isMountedRef.current) return;
|
|
16003
16032
|
finalTranscriptRef.current = finalTranscriptRef.current || "";
|
|
16004
16033
|
setIsListening(true);
|
|
16034
|
+
setError(null);
|
|
16035
|
+
resetInactivityTimeout();
|
|
16005
16036
|
onStart();
|
|
16006
16037
|
};
|
|
16007
16038
|
recognition.onresult = function (event) {
|
|
16039
|
+
if (!isMountedRef.current) return;
|
|
16040
|
+
resetInactivityTimeout();
|
|
16008
16041
|
var interim = "";
|
|
16009
16042
|
var finalPart = "";
|
|
16010
16043
|
for (var i = event.resultIndex; i < event.results.length; i++) {
|
|
@@ -16024,21 +16057,48 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
|
16024
16057
|
}
|
|
16025
16058
|
};
|
|
16026
16059
|
recognition.onerror = function (event) {
|
|
16027
|
-
|
|
16028
|
-
|
|
16060
|
+
if (!isMountedRef.current) return;
|
|
16061
|
+
clearInactivityTimeout();
|
|
16062
|
+
var errorMessage = (event === null || event === void 0 ? void 0 : event.error) || "Speech recognition error";
|
|
16063
|
+
var err = new Error(errorMessage);
|
|
16064
|
+
setError(err);
|
|
16029
16065
|
setIsListening(false);
|
|
16066
|
+
onError(err);
|
|
16067
|
+
|
|
16068
|
+
// Handle specific errors
|
|
16069
|
+
if (errorMessage === "not-allowed" || errorMessage === "service-not-allowed") {
|
|
16070
|
+
console.warn("Microphone permission denied");
|
|
16071
|
+
} else if (errorMessage === "no-speech") {
|
|
16072
|
+
console.warn("No speech detected");
|
|
16073
|
+
} else if (errorMessage === "aborted") {
|
|
16074
|
+
console.warn("Speech recognition aborted");
|
|
16075
|
+
}
|
|
16030
16076
|
};
|
|
16031
16077
|
recognition.onend = function () {
|
|
16078
|
+
if (!isMountedRef.current) return;
|
|
16079
|
+
clearInactivityTimeout();
|
|
16032
16080
|
setIsListening(false);
|
|
16081
|
+
var finalText = (finalTranscriptRef.current || "").trim();
|
|
16082
|
+
onSpeechComplete(finalText);
|
|
16033
16083
|
onStop();
|
|
16034
|
-
|
|
16084
|
+
if (clearOnStop) {
|
|
16085
|
+
finalTranscriptRef.current = "";
|
|
16086
|
+
}
|
|
16035
16087
|
};
|
|
16036
16088
|
recognitionRef.current = recognition;
|
|
16037
16089
|
return recognition;
|
|
16038
|
-
}
|
|
16039
|
-
|
|
16090
|
+
}, [continuous, interimResults, lang, maxAlternatives, grammars, onStart, onSpeaking, onSpeechComplete, onError, onStop, clearOnStop, resetInactivityTimeout, clearInactivityTimeout]);
|
|
16091
|
+
|
|
16092
|
+
// Start listening
|
|
16093
|
+
var startListening = useCallback(function () {
|
|
16040
16094
|
if (!_isSupported) {
|
|
16041
|
-
|
|
16095
|
+
var err = new Error("Speech recognition not supported in this browser");
|
|
16096
|
+
setError(err);
|
|
16097
|
+
onError(err);
|
|
16098
|
+
return;
|
|
16099
|
+
}
|
|
16100
|
+
if (_isListening) {
|
|
16101
|
+
console.warn("Already listening");
|
|
16042
16102
|
return;
|
|
16043
16103
|
}
|
|
16044
16104
|
try {
|
|
@@ -16046,24 +16106,68 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
|
16046
16106
|
if (!rec) return;
|
|
16047
16107
|
rec.start();
|
|
16048
16108
|
} catch (err) {
|
|
16109
|
+
var _err$message;
|
|
16110
|
+
// Handle "already started" error
|
|
16111
|
+
if ((_err$message = err.message) !== null && _err$message !== void 0 && _err$message.includes("already started")) {
|
|
16112
|
+
console.warn("Recognition already in progress");
|
|
16113
|
+
return;
|
|
16114
|
+
}
|
|
16115
|
+
setError(err);
|
|
16049
16116
|
onError(err);
|
|
16050
16117
|
}
|
|
16051
|
-
}
|
|
16052
|
-
|
|
16118
|
+
}, [_isSupported, _isListening, initializeRecognition, onError]);
|
|
16119
|
+
|
|
16120
|
+
// Stop listening
|
|
16121
|
+
var stopListening = useCallback(function () {
|
|
16053
16122
|
if (recognitionRef.current) {
|
|
16054
16123
|
try {
|
|
16124
|
+
clearInactivityTimeout();
|
|
16055
16125
|
recognitionRef.current.stop();
|
|
16056
|
-
} catch (
|
|
16126
|
+
} catch (err) {
|
|
16127
|
+
console.error("Error stopping recognition:", err);
|
|
16128
|
+
}
|
|
16057
16129
|
}
|
|
16058
|
-
}
|
|
16059
|
-
|
|
16130
|
+
}, [clearInactivityTimeout]);
|
|
16131
|
+
|
|
16132
|
+
// Toggle listening
|
|
16133
|
+
var toggleListening = useCallback(function () {
|
|
16060
16134
|
if (disabled) return;
|
|
16061
16135
|
if (_isListening) {
|
|
16062
16136
|
stopListening();
|
|
16063
16137
|
} else {
|
|
16064
16138
|
startListening();
|
|
16065
16139
|
}
|
|
16066
|
-
}
|
|
16140
|
+
}, [disabled, _isListening, startListening, stopListening]);
|
|
16141
|
+
|
|
16142
|
+
// Auto-start effect
|
|
16143
|
+
useEffect(function () {
|
|
16144
|
+
if (autoStart && _isSupported && !disabled) {
|
|
16145
|
+
startListening();
|
|
16146
|
+
}
|
|
16147
|
+
}, [autoStart, _isSupported, disabled, startListening]);
|
|
16148
|
+
|
|
16149
|
+
// Cleanup on unmount
|
|
16150
|
+
useEffect(function () {
|
|
16151
|
+
isMountedRef.current = true;
|
|
16152
|
+
return function () {
|
|
16153
|
+
isMountedRef.current = false;
|
|
16154
|
+
clearInactivityTimeout();
|
|
16155
|
+
if (recognitionRef.current) {
|
|
16156
|
+
try {
|
|
16157
|
+
recognitionRef.current.onresult = null;
|
|
16158
|
+
recognitionRef.current.onerror = null;
|
|
16159
|
+
recognitionRef.current.onend = null;
|
|
16160
|
+
recognitionRef.current.onstart = null;
|
|
16161
|
+
recognitionRef.current.stop();
|
|
16162
|
+
} catch (err) {
|
|
16163
|
+
console.error("Cleanup error:", err);
|
|
16164
|
+
}
|
|
16165
|
+
recognitionRef.current = null;
|
|
16166
|
+
}
|
|
16167
|
+
};
|
|
16168
|
+
}, [clearInactivityTimeout]);
|
|
16169
|
+
|
|
16170
|
+
// Imperative handle
|
|
16067
16171
|
useImperativeHandle(ref, function () {
|
|
16068
16172
|
return {
|
|
16069
16173
|
start: startListening,
|
|
@@ -16080,26 +16184,39 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
|
|
|
16080
16184
|
},
|
|
16081
16185
|
clearTranscript: function clearTranscript() {
|
|
16082
16186
|
finalTranscriptRef.current = "";
|
|
16187
|
+
},
|
|
16188
|
+
getError: function getError() {
|
|
16189
|
+
return error;
|
|
16083
16190
|
}
|
|
16084
16191
|
};
|
|
16085
|
-
}, [_isListening, _isSupported]);
|
|
16192
|
+
}, [_isListening, _isSupported, error, startListening, stopListening, toggleListening]);
|
|
16193
|
+
|
|
16194
|
+
// Custom button renderer
|
|
16086
16195
|
if (renderButton && typeof renderButton === "function") {
|
|
16087
16196
|
return renderButton({
|
|
16088
16197
|
isListening: _isListening,
|
|
16089
16198
|
isSupported: _isSupported,
|
|
16199
|
+
error: error,
|
|
16090
16200
|
start: startListening,
|
|
16091
16201
|
stop: stopListening,
|
|
16092
16202
|
toggle: toggleListening,
|
|
16093
|
-
disabled: disabled
|
|
16203
|
+
disabled: disabled || !_isSupported
|
|
16094
16204
|
});
|
|
16095
16205
|
}
|
|
16206
|
+
|
|
16207
|
+
// Default button
|
|
16096
16208
|
return /*#__PURE__*/React__default.createElement("button", {
|
|
16097
16209
|
type: "button",
|
|
16098
16210
|
"aria-pressed": _isListening,
|
|
16211
|
+
"aria-label": _isListening ? "Stop listening" : "Start listening",
|
|
16099
16212
|
onClick: toggleListening,
|
|
16100
|
-
disabled: disabled || !_isSupported
|
|
16101
|
-
|
|
16213
|
+
disabled: disabled || !_isSupported,
|
|
16214
|
+
style: {
|
|
16215
|
+
cursor: disabled || !_isSupported ? "not-allowed" : "pointer"
|
|
16216
|
+
}
|
|
16217
|
+
}, _isListening ? "🎤 Stop" : "🎤 Start");
|
|
16102
16218
|
});
|
|
16219
|
+
SpeechToText.displayName = "SpeechToText";
|
|
16103
16220
|
|
|
16104
16221
|
var TextToSpeech = function TextToSpeech(_ref) {
|
|
16105
16222
|
var _ref$text = _ref.text,
|
package/dist/index.js
CHANGED
|
@@ -15973,9 +15973,19 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
|
|
|
15973
15973
|
_ref$autoStart = _ref.autoStart,
|
|
15974
15974
|
autoStart = _ref$autoStart === void 0 ? false : _ref$autoStart,
|
|
15975
15975
|
_ref$disabled = _ref.disabled,
|
|
15976
|
-
disabled = _ref$disabled === void 0 ? false : _ref$disabled
|
|
15976
|
+
disabled = _ref$disabled === void 0 ? false : _ref$disabled,
|
|
15977
|
+
_ref$maxAlternatives = _ref.maxAlternatives,
|
|
15978
|
+
maxAlternatives = _ref$maxAlternatives === void 0 ? 1 : _ref$maxAlternatives,
|
|
15979
|
+
_ref$grammars = _ref.grammars,
|
|
15980
|
+
grammars = _ref$grammars === void 0 ? null : _ref$grammars,
|
|
15981
|
+
_ref$timeout = _ref.timeout,
|
|
15982
|
+
timeout = _ref$timeout === void 0 ? null : _ref$timeout,
|
|
15983
|
+
_ref$clearOnStop = _ref.clearOnStop,
|
|
15984
|
+
clearOnStop = _ref$clearOnStop === void 0 ? false : _ref$clearOnStop;
|
|
15977
15985
|
var recognitionRef = React.useRef(null);
|
|
15978
15986
|
var finalTranscriptRef = React.useRef("");
|
|
15987
|
+
var timeoutRef = React.useRef(null);
|
|
15988
|
+
var isMountedRef = React.useRef(true);
|
|
15979
15989
|
var _useState = React.useState(false),
|
|
15980
15990
|
_useState2 = _slicedToArray(_useState, 2),
|
|
15981
15991
|
_isListening = _useState2[0],
|
|
@@ -15984,34 +15994,48 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
|
|
|
15984
15994
|
_useState4 = _slicedToArray(_useState3, 2),
|
|
15985
15995
|
_isSupported = _useState4[0],
|
|
15986
15996
|
setIsSupported = _useState4[1];
|
|
15997
|
+
var _useState5 = React.useState(null),
|
|
15998
|
+
_useState6 = _slicedToArray(_useState5, 2),
|
|
15999
|
+
error = _useState6[0],
|
|
16000
|
+
setError = _useState6[1];
|
|
16001
|
+
|
|
16002
|
+
// Check browser support
|
|
15987
16003
|
React.useEffect(function () {
|
|
15988
|
-
|
|
15989
|
-
|
|
15990
|
-
|
|
15991
|
-
|
|
16004
|
+
var supported = "webkitSpeechRecognition" in window || "SpeechRecognition" in window;
|
|
16005
|
+
setIsSupported(supported);
|
|
16006
|
+
if (!supported) {
|
|
16007
|
+
var err = new Error("Speech recognition not supported in this browser");
|
|
16008
|
+
setError(err);
|
|
16009
|
+
onError(err);
|
|
15992
16010
|
}
|
|
15993
|
-
}, []);
|
|
15994
|
-
|
|
15995
|
-
|
|
15996
|
-
|
|
16011
|
+
}, [onError]);
|
|
16012
|
+
|
|
16013
|
+
// Clear timeout helper
|
|
16014
|
+
var clearInactivityTimeout = React.useCallback(function () {
|
|
16015
|
+
if (timeoutRef.current) {
|
|
16016
|
+
clearTimeout(timeoutRef.current);
|
|
16017
|
+
timeoutRef.current = null;
|
|
15997
16018
|
}
|
|
15998
|
-
|
|
15999
|
-
|
|
16000
|
-
|
|
16001
|
-
|
|
16002
|
-
|
|
16003
|
-
|
|
16004
|
-
|
|
16005
|
-
|
|
16006
|
-
|
|
16019
|
+
}, []);
|
|
16020
|
+
|
|
16021
|
+
// Reset inactivity timeout
|
|
16022
|
+
var resetInactivityTimeout = React.useCallback(function () {
|
|
16023
|
+
if (!timeout) return;
|
|
16024
|
+
clearInactivityTimeout();
|
|
16025
|
+
timeoutRef.current = setTimeout(function () {
|
|
16026
|
+
if (recognitionRef.current && _isListening) {
|
|
16027
|
+
stopListening();
|
|
16007
16028
|
}
|
|
16008
|
-
};
|
|
16009
|
-
}, [
|
|
16010
|
-
|
|
16029
|
+
}, timeout);
|
|
16030
|
+
}, [timeout, _isListening]);
|
|
16031
|
+
|
|
16032
|
+
// Initialize recognition
|
|
16033
|
+
var initializeRecognition = React.useCallback(function () {
|
|
16011
16034
|
if (recognitionRef.current) return recognitionRef.current;
|
|
16012
16035
|
var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
16013
16036
|
if (!SpeechRecognition) {
|
|
16014
16037
|
var err = new Error("SpeechRecognition API not available");
|
|
16038
|
+
setError(err);
|
|
16015
16039
|
onError(err);
|
|
16016
16040
|
return null;
|
|
16017
16041
|
}
|
|
@@ -16019,12 +16043,21 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
|
|
|
16019
16043
|
recognition.continuous = !!continuous;
|
|
16020
16044
|
recognition.interimResults = !!interimResults;
|
|
16021
16045
|
recognition.lang = lang;
|
|
16046
|
+
recognition.maxAlternatives = maxAlternatives;
|
|
16047
|
+
if (grammars) {
|
|
16048
|
+
recognition.grammars = grammars;
|
|
16049
|
+
}
|
|
16022
16050
|
recognition.onstart = function () {
|
|
16051
|
+
if (!isMountedRef.current) return;
|
|
16023
16052
|
finalTranscriptRef.current = finalTranscriptRef.current || "";
|
|
16024
16053
|
setIsListening(true);
|
|
16054
|
+
setError(null);
|
|
16055
|
+
resetInactivityTimeout();
|
|
16025
16056
|
onStart();
|
|
16026
16057
|
};
|
|
16027
16058
|
recognition.onresult = function (event) {
|
|
16059
|
+
if (!isMountedRef.current) return;
|
|
16060
|
+
resetInactivityTimeout();
|
|
16028
16061
|
var interim = "";
|
|
16029
16062
|
var finalPart = "";
|
|
16030
16063
|
for (var i = event.resultIndex; i < event.results.length; i++) {
|
|
@@ -16044,21 +16077,48 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
|
|
|
16044
16077
|
}
|
|
16045
16078
|
};
|
|
16046
16079
|
recognition.onerror = function (event) {
|
|
16047
|
-
|
|
16048
|
-
|
|
16080
|
+
if (!isMountedRef.current) return;
|
|
16081
|
+
clearInactivityTimeout();
|
|
16082
|
+
var errorMessage = (event === null || event === void 0 ? void 0 : event.error) || "Speech recognition error";
|
|
16083
|
+
var err = new Error(errorMessage);
|
|
16084
|
+
setError(err);
|
|
16049
16085
|
setIsListening(false);
|
|
16086
|
+
onError(err);
|
|
16087
|
+
|
|
16088
|
+
// Handle specific errors
|
|
16089
|
+
if (errorMessage === "not-allowed" || errorMessage === "service-not-allowed") {
|
|
16090
|
+
console.warn("Microphone permission denied");
|
|
16091
|
+
} else if (errorMessage === "no-speech") {
|
|
16092
|
+
console.warn("No speech detected");
|
|
16093
|
+
} else if (errorMessage === "aborted") {
|
|
16094
|
+
console.warn("Speech recognition aborted");
|
|
16095
|
+
}
|
|
16050
16096
|
};
|
|
16051
16097
|
recognition.onend = function () {
|
|
16098
|
+
if (!isMountedRef.current) return;
|
|
16099
|
+
clearInactivityTimeout();
|
|
16052
16100
|
setIsListening(false);
|
|
16101
|
+
var finalText = (finalTranscriptRef.current || "").trim();
|
|
16102
|
+
onSpeechComplete(finalText);
|
|
16053
16103
|
onStop();
|
|
16054
|
-
|
|
16104
|
+
if (clearOnStop) {
|
|
16105
|
+
finalTranscriptRef.current = "";
|
|
16106
|
+
}
|
|
16055
16107
|
};
|
|
16056
16108
|
recognitionRef.current = recognition;
|
|
16057
16109
|
return recognition;
|
|
16058
|
-
}
|
|
16059
|
-
|
|
16110
|
+
}, [continuous, interimResults, lang, maxAlternatives, grammars, onStart, onSpeaking, onSpeechComplete, onError, onStop, clearOnStop, resetInactivityTimeout, clearInactivityTimeout]);
|
|
16111
|
+
|
|
16112
|
+
// Start listening
|
|
16113
|
+
var startListening = React.useCallback(function () {
|
|
16060
16114
|
if (!_isSupported) {
|
|
16061
|
-
|
|
16115
|
+
var err = new Error("Speech recognition not supported in this browser");
|
|
16116
|
+
setError(err);
|
|
16117
|
+
onError(err);
|
|
16118
|
+
return;
|
|
16119
|
+
}
|
|
16120
|
+
if (_isListening) {
|
|
16121
|
+
console.warn("Already listening");
|
|
16062
16122
|
return;
|
|
16063
16123
|
}
|
|
16064
16124
|
try {
|
|
@@ -16066,24 +16126,68 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
|
|
|
16066
16126
|
if (!rec) return;
|
|
16067
16127
|
rec.start();
|
|
16068
16128
|
} catch (err) {
|
|
16129
|
+
var _err$message;
|
|
16130
|
+
// Handle "already started" error
|
|
16131
|
+
if ((_err$message = err.message) !== null && _err$message !== void 0 && _err$message.includes("already started")) {
|
|
16132
|
+
console.warn("Recognition already in progress");
|
|
16133
|
+
return;
|
|
16134
|
+
}
|
|
16135
|
+
setError(err);
|
|
16069
16136
|
onError(err);
|
|
16070
16137
|
}
|
|
16071
|
-
}
|
|
16072
|
-
|
|
16138
|
+
}, [_isSupported, _isListening, initializeRecognition, onError]);
|
|
16139
|
+
|
|
16140
|
+
// Stop listening
|
|
16141
|
+
var stopListening = React.useCallback(function () {
|
|
16073
16142
|
if (recognitionRef.current) {
|
|
16074
16143
|
try {
|
|
16144
|
+
clearInactivityTimeout();
|
|
16075
16145
|
recognitionRef.current.stop();
|
|
16076
|
-
} catch (
|
|
16146
|
+
} catch (err) {
|
|
16147
|
+
console.error("Error stopping recognition:", err);
|
|
16148
|
+
}
|
|
16077
16149
|
}
|
|
16078
|
-
}
|
|
16079
|
-
|
|
16150
|
+
}, [clearInactivityTimeout]);
|
|
16151
|
+
|
|
16152
|
+
// Toggle listening
|
|
16153
|
+
var toggleListening = React.useCallback(function () {
|
|
16080
16154
|
if (disabled) return;
|
|
16081
16155
|
if (_isListening) {
|
|
16082
16156
|
stopListening();
|
|
16083
16157
|
} else {
|
|
16084
16158
|
startListening();
|
|
16085
16159
|
}
|
|
16086
|
-
}
|
|
16160
|
+
}, [disabled, _isListening, startListening, stopListening]);
|
|
16161
|
+
|
|
16162
|
+
// Auto-start effect
|
|
16163
|
+
React.useEffect(function () {
|
|
16164
|
+
if (autoStart && _isSupported && !disabled) {
|
|
16165
|
+
startListening();
|
|
16166
|
+
}
|
|
16167
|
+
}, [autoStart, _isSupported, disabled, startListening]);
|
|
16168
|
+
|
|
16169
|
+
// Cleanup on unmount
|
|
16170
|
+
React.useEffect(function () {
|
|
16171
|
+
isMountedRef.current = true;
|
|
16172
|
+
return function () {
|
|
16173
|
+
isMountedRef.current = false;
|
|
16174
|
+
clearInactivityTimeout();
|
|
16175
|
+
if (recognitionRef.current) {
|
|
16176
|
+
try {
|
|
16177
|
+
recognitionRef.current.onresult = null;
|
|
16178
|
+
recognitionRef.current.onerror = null;
|
|
16179
|
+
recognitionRef.current.onend = null;
|
|
16180
|
+
recognitionRef.current.onstart = null;
|
|
16181
|
+
recognitionRef.current.stop();
|
|
16182
|
+
} catch (err) {
|
|
16183
|
+
console.error("Cleanup error:", err);
|
|
16184
|
+
}
|
|
16185
|
+
recognitionRef.current = null;
|
|
16186
|
+
}
|
|
16187
|
+
};
|
|
16188
|
+
}, [clearInactivityTimeout]);
|
|
16189
|
+
|
|
16190
|
+
// Imperative handle
|
|
16087
16191
|
React.useImperativeHandle(ref, function () {
|
|
16088
16192
|
return {
|
|
16089
16193
|
start: startListening,
|
|
@@ -16100,26 +16204,39 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
|
|
|
16100
16204
|
},
|
|
16101
16205
|
clearTranscript: function clearTranscript() {
|
|
16102
16206
|
finalTranscriptRef.current = "";
|
|
16207
|
+
},
|
|
16208
|
+
getError: function getError() {
|
|
16209
|
+
return error;
|
|
16103
16210
|
}
|
|
16104
16211
|
};
|
|
16105
|
-
}, [_isListening, _isSupported]);
|
|
16212
|
+
}, [_isListening, _isSupported, error, startListening, stopListening, toggleListening]);
|
|
16213
|
+
|
|
16214
|
+
// Custom button renderer
|
|
16106
16215
|
if (renderButton && typeof renderButton === "function") {
|
|
16107
16216
|
return renderButton({
|
|
16108
16217
|
isListening: _isListening,
|
|
16109
16218
|
isSupported: _isSupported,
|
|
16219
|
+
error: error,
|
|
16110
16220
|
start: startListening,
|
|
16111
16221
|
stop: stopListening,
|
|
16112
16222
|
toggle: toggleListening,
|
|
16113
|
-
disabled: disabled
|
|
16223
|
+
disabled: disabled || !_isSupported
|
|
16114
16224
|
});
|
|
16115
16225
|
}
|
|
16226
|
+
|
|
16227
|
+
// Default button
|
|
16116
16228
|
return /*#__PURE__*/React.createElement("button", {
|
|
16117
16229
|
type: "button",
|
|
16118
16230
|
"aria-pressed": _isListening,
|
|
16231
|
+
"aria-label": _isListening ? "Stop listening" : "Start listening",
|
|
16119
16232
|
onClick: toggleListening,
|
|
16120
|
-
disabled: disabled || !_isSupported
|
|
16121
|
-
|
|
16233
|
+
disabled: disabled || !_isSupported,
|
|
16234
|
+
style: {
|
|
16235
|
+
cursor: disabled || !_isSupported ? "not-allowed" : "pointer"
|
|
16236
|
+
}
|
|
16237
|
+
}, _isListening ? "🎤 Stop" : "🎤 Start");
|
|
16122
16238
|
});
|
|
16239
|
+
SpeechToText.displayName = "SpeechToText";
|
|
16123
16240
|
|
|
16124
16241
|
var TextToSpeech = function TextToSpeech(_ref) {
|
|
16125
16242
|
var _ref$text = _ref.text,
|