@dreamtree-org/twreact-ui 1.1.14 → 1.1.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.esm.js CHANGED
@@ -15938,6 +15938,10 @@ function LocationPicker(_ref) {
15938
15938
  }, iconButton)));
15939
15939
  }
15940
15940
 
15941
+ var isProbablyMobile = function isProbablyMobile() {
15942
+ if (typeof navigator === "undefined") return false;
15943
+ return /Android|iPhone|iPad|iPod/i.test(navigator.userAgent);
15944
+ };
15941
15945
  var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
15942
15946
  var _ref$lang = _ref.lang,
15943
15947
  lang = _ref$lang === void 0 ? "en-US" : _ref$lang,
@@ -15955,24 +15959,17 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
15955
15959
  onStart = _ref$onStart === void 0 ? function () {} : _ref$onStart,
15956
15960
  _ref$onStop = _ref.onStop,
15957
15961
  onStop = _ref$onStop === void 0 ? function () {} : _ref$onStop,
15958
- _ref$renderButton = _ref.renderButton,
15959
- renderButton = _ref$renderButton === void 0 ? null : _ref$renderButton,
15962
+ renderButton = _ref.renderButton,
15960
15963
  _ref$autoStart = _ref.autoStart,
15961
15964
  autoStart = _ref$autoStart === void 0 ? false : _ref$autoStart,
15962
15965
  _ref$disabled = _ref.disabled,
15963
15966
  disabled = _ref$disabled === void 0 ? false : _ref$disabled,
15964
- _ref$maxAlternatives = _ref.maxAlternatives,
15965
- maxAlternatives = _ref$maxAlternatives === void 0 ? 1 : _ref$maxAlternatives,
15966
- _ref$grammars = _ref.grammars,
15967
- grammars = _ref$grammars === void 0 ? null : _ref$grammars,
15968
- _ref$timeout = _ref.timeout,
15969
- timeout = _ref$timeout === void 0 ? null : _ref$timeout,
15970
- _ref$clearOnStop = _ref.clearOnStop,
15971
- clearOnStop = _ref$clearOnStop === void 0 ? false : _ref$clearOnStop;
15967
+ _ref$autoRestart = _ref.autoRestart,
15968
+ autoRestart = _ref$autoRestart === void 0 ? true : _ref$autoRestart;
15972
15969
  var recognitionRef = useRef(null);
15973
- var finalTranscriptRef = useRef("");
15974
- var timeoutRef = useRef(null);
15975
15970
  var isMountedRef = useRef(true);
15971
+ var finalByIndexRef = useRef(new Map());
15972
+ var lastEmittedFinalRef = useRef("");
15976
15973
  var _useState = useState(false),
15977
15974
  _useState2 = _slicedToArray(_useState, 2),
15978
15975
  _isListening = _useState2[0],
@@ -15985,196 +15982,121 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
15985
15982
  _useState6 = _slicedToArray(_useState5, 2),
15986
15983
  error = _useState6[0],
15987
15984
  setError = _useState6[1];
15988
-
15989
- // Check browser support
15990
15985
  useEffect(function () {
15991
- var supported = "webkitSpeechRecognition" in window || "SpeechRecognition" in window;
15986
+ return function () {
15987
+ isMountedRef.current = false;
15988
+ if (recognitionRef.current) {
15989
+ try {
15990
+ recognitionRef.current.stop();
15991
+ } catch (_unused) {}
15992
+ }
15993
+ };
15994
+ }, []);
15995
+ useEffect(function () {
15996
+ var supported = typeof window !== "undefined" && ("SpeechRecognition" in window || "webkitSpeechRecognition" in window);
15992
15997
  setIsSupported(supported);
15993
- if (!supported) {
15994
- var err = new Error("Speech recognition not supported in this browser");
15995
- setError(err);
15996
- onError(err);
15997
- }
15998
- }, [onError]);
15999
-
16000
- // Clear timeout helper
16001
- var clearInactivityTimeout = useCallback(function () {
16002
- if (timeoutRef.current) {
16003
- clearTimeout(timeoutRef.current);
16004
- timeoutRef.current = null;
16005
- }
16006
15998
  }, []);
16007
-
16008
- // Reset inactivity timeout
16009
- var resetInactivityTimeout = useCallback(function () {
16010
- if (!timeout) return;
16011
- clearInactivityTimeout();
16012
- timeoutRef.current = setTimeout(function () {
16013
- if (recognitionRef.current && _isListening) {
16014
- stopListening();
16015
- }
16016
- }, timeout);
16017
- }, [timeout, _isListening]);
16018
-
16019
- // Initialize recognition
15999
+ useEffect(function () {
16000
+ if (autoStart && _isSupported && !disabled) {
16001
+ startListening();
16002
+ }
16003
+ }, [autoStart, _isSupported, disabled]);
16004
+ var buildFinalTranscript = function buildFinalTranscript() {
16005
+ return Array.from(finalByIndexRef.current.entries()).sort(function (a, b) {
16006
+ return a[0] - b[0];
16007
+ }).map(function (_ref2) {
16008
+ var _ref3 = _slicedToArray(_ref2, 2),
16009
+ text = _ref3[1];
16010
+ return text.trim();
16011
+ }).join(" ").trim();
16012
+ };
16020
16013
  var initializeRecognition = useCallback(function () {
16021
16014
  if (recognitionRef.current) return recognitionRef.current;
16022
16015
  var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
16023
16016
  if (!SpeechRecognition) {
16024
- var err = new Error("SpeechRecognition API not available");
16017
+ var err = new Error("SpeechRecognition API not supported");
16025
16018
  setError(err);
16026
16019
  onError(err);
16027
16020
  return null;
16028
16021
  }
16029
16022
  var recognition = new SpeechRecognition();
16030
- recognition.continuous = !!continuous;
16031
- recognition.interimResults = !!interimResults;
16023
+ recognition.continuous = isProbablyMobile() ? false : continuous;
16024
+ recognition.interimResults = interimResults;
16032
16025
  recognition.lang = lang;
16033
- recognition.maxAlternatives = maxAlternatives;
16034
- if (grammars) {
16035
- recognition.grammars = grammars;
16036
- }
16037
16026
  recognition.onstart = function () {
16038
16027
  if (!isMountedRef.current) return;
16039
- finalTranscriptRef.current = finalTranscriptRef.current || "";
16040
16028
  setIsListening(true);
16041
16029
  setError(null);
16042
- resetInactivityTimeout();
16043
16030
  onStart();
16044
16031
  };
16045
16032
  recognition.onresult = function (event) {
16046
16033
  if (!isMountedRef.current) return;
16047
- resetInactivityTimeout();
16048
16034
  var interim = "";
16049
- var finalPart = "";
16050
16035
  for (var i = event.resultIndex; i < event.results.length; i++) {
16051
- var piece = event.results[i][0].transcript;
16052
- if (event.results[i].isFinal) {
16053
- finalPart += piece + " ";
16036
+ var _res$;
16037
+ var res = event.results[i];
16038
+ var text = ((_res$ = res[0]) === null || _res$ === void 0 ? void 0 : _res$.transcript) || "";
16039
+ if (res.isFinal) {
16040
+ finalByIndexRef.current.set(i, text);
16054
16041
  } else {
16055
- interim += piece;
16042
+ interim += text;
16056
16043
  }
16057
16044
  }
16058
- if (interim) {
16059
- onSpeaking(interim);
16060
- }
16061
- if (finalPart) {
16062
- finalTranscriptRef.current = (finalTranscriptRef.current || "") + finalPart;
16063
- onSpeechComplete(finalTranscriptRef.current.trim());
16045
+ if (interim) onSpeaking(interim.trim());
16046
+ var finalTranscript = buildFinalTranscript();
16047
+ if (finalTranscript && finalTranscript !== lastEmittedFinalRef.current) {
16048
+ lastEmittedFinalRef.current = finalTranscript;
16049
+ onSpeechComplete(finalTranscript);
16064
16050
  }
16065
16051
  };
16066
16052
  recognition.onerror = function (event) {
16067
- if (!isMountedRef.current) return;
16068
- clearInactivityTimeout();
16069
- var errorMessage = (event === null || event === void 0 ? void 0 : event.error) || "Speech recognition error";
16070
- var err = new Error(errorMessage);
16053
+ var err = new Error((event === null || event === void 0 ? void 0 : event.error) || "Speech recognition error");
16071
16054
  setError(err);
16072
16055
  setIsListening(false);
16073
16056
  onError(err);
16074
-
16075
- // Handle specific errors
16076
- if (errorMessage === "not-allowed" || errorMessage === "service-not-allowed") {
16077
- console.warn("Microphone permission denied");
16078
- } else if (errorMessage === "no-speech") {
16079
- console.warn("No speech detected");
16080
- } else if (errorMessage === "aborted") {
16081
- console.warn("Speech recognition aborted");
16082
- }
16083
16057
  };
16084
16058
  recognition.onend = function () {
16085
16059
  if (!isMountedRef.current) return;
16086
- clearInactivityTimeout();
16087
16060
  setIsListening(false);
16088
- var finalText = (finalTranscriptRef.current || "").trim();
16089
- onSpeechComplete(finalText);
16061
+ var finalTranscript = buildFinalTranscript();
16062
+ if (finalTranscript && finalTranscript !== lastEmittedFinalRef.current) {
16063
+ lastEmittedFinalRef.current = finalTranscript;
16064
+ onSpeechComplete(finalTranscript);
16065
+ }
16090
16066
  onStop();
16091
- if (clearOnStop) {
16092
- finalTranscriptRef.current = "";
16067
+ if (autoRestart && !disabled && _isSupported && !isProbablyMobile()) {
16068
+ try {
16069
+ recognition.start();
16070
+ setIsListening(true);
16071
+ } catch (_unused2) {}
16093
16072
  }
16094
16073
  };
16095
16074
  recognitionRef.current = recognition;
16096
16075
  return recognition;
16097
- }, [continuous, interimResults, lang, maxAlternatives, grammars, onStart, onSpeaking, onSpeechComplete, onError, onStop, clearOnStop, resetInactivityTimeout, clearInactivityTimeout]);
16098
-
16099
- // Start listening
16076
+ }, [continuous, interimResults, lang, autoRestart, disabled, _isSupported, onStart, onSpeaking, onSpeechComplete, onError, onStop]);
16100
16077
  var startListening = useCallback(function () {
16101
- if (!_isSupported) {
16102
- var err = new Error("Speech recognition not supported in this browser");
16103
- setError(err);
16104
- onError(err);
16105
- return;
16106
- }
16107
- if (_isListening) {
16108
- console.warn("Already listening");
16109
- return;
16110
- }
16078
+ if (!_isSupported || disabled || _isListening) return;
16111
16079
  try {
16112
16080
  var rec = initializeRecognition();
16113
16081
  if (!rec) return;
16082
+ finalByIndexRef.current.clear();
16083
+ lastEmittedFinalRef.current = "";
16114
16084
  rec.start();
16115
16085
  } catch (err) {
16116
- var _err$message;
16117
- // Handle "already started" error
16118
- if ((_err$message = err.message) !== null && _err$message !== void 0 && _err$message.includes("already started")) {
16119
- console.warn("Recognition already in progress");
16120
- return;
16121
- }
16122
16086
  setError(err);
16123
16087
  onError(err);
16124
16088
  }
16125
- }, [_isSupported, _isListening, initializeRecognition, onError]);
16126
-
16127
- // Stop listening
16089
+ }, [_isSupported, disabled, _isListening, initializeRecognition]);
16128
16090
  var stopListening = useCallback(function () {
16129
- if (recognitionRef.current) {
16130
- try {
16131
- clearInactivityTimeout();
16132
- recognitionRef.current.stop();
16133
- } catch (err) {
16134
- console.error("Error stopping recognition:", err);
16135
- }
16136
- }
16137
- }, [clearInactivityTimeout]);
16138
-
16139
- // Toggle listening
16091
+ if (!recognitionRef.current) return;
16092
+ try {
16093
+ recognitionRef.current.stop();
16094
+ } catch (_unused3) {}
16095
+ }, []);
16140
16096
  var toggleListening = useCallback(function () {
16141
16097
  if (disabled) return;
16142
- if (_isListening) {
16143
- stopListening();
16144
- } else {
16145
- startListening();
16146
- }
16147
- }, [disabled, _isListening, startListening, stopListening]);
16148
-
16149
- // Auto-start effect
16150
- useEffect(function () {
16151
- if (autoStart && _isSupported && !disabled) {
16152
- startListening();
16153
- }
16154
- }, [autoStart, _isSupported, disabled, startListening]);
16155
-
16156
- // Cleanup on unmount
16157
- useEffect(function () {
16158
- isMountedRef.current = true;
16159
- return function () {
16160
- isMountedRef.current = false;
16161
- clearInactivityTimeout();
16162
- if (recognitionRef.current) {
16163
- try {
16164
- recognitionRef.current.onresult = null;
16165
- recognitionRef.current.onerror = null;
16166
- recognitionRef.current.onend = null;
16167
- recognitionRef.current.onstart = null;
16168
- recognitionRef.current.stop();
16169
- } catch (err) {
16170
- console.error("Cleanup error:", err);
16171
- }
16172
- recognitionRef.current = null;
16173
- }
16174
- };
16175
- }, [clearInactivityTimeout]);
16176
-
16177
- // Imperative handle
16098
+ _isListening ? stopListening() : startListening();
16099
+ }, [_isListening, disabled, startListening, stopListening]);
16178
16100
  useImperativeHandle(ref, function () {
16179
16101
  return {
16180
16102
  start: startListening,
@@ -16186,20 +16108,17 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
16186
16108
  isSupported: function isSupported() {
16187
16109
  return _isSupported;
16188
16110
  },
16189
- getTranscript: function getTranscript() {
16190
- return (finalTranscriptRef.current || "").trim();
16191
- },
16111
+ getTranscript: buildFinalTranscript,
16192
16112
  clearTranscript: function clearTranscript() {
16193
- finalTranscriptRef.current = "";
16113
+ finalByIndexRef.current.clear();
16114
+ lastEmittedFinalRef.current = "";
16194
16115
  },
16195
16116
  getError: function getError() {
16196
16117
  return error;
16197
16118
  }
16198
16119
  };
16199
- }, [_isListening, _isSupported, error, startListening, stopListening, toggleListening]);
16200
-
16201
- // Custom button renderer
16202
- if (renderButton && typeof renderButton === "function") {
16120
+ }, [_isListening, _isSupported, error]);
16121
+ if (typeof renderButton === "function") {
16203
16122
  return renderButton({
16204
16123
  isListening: _isListening,
16205
16124
  isSupported: _isSupported,
@@ -16210,8 +16129,6 @@ var SpeechToText = /*#__PURE__*/forwardRef(function (_ref, ref) {
16210
16129
  disabled: disabled || !_isSupported
16211
16130
  });
16212
16131
  }
16213
-
16214
- // Default button
16215
16132
  return /*#__PURE__*/React__default.createElement("button", {
16216
16133
  type: "button",
16217
16134
  "aria-pressed": _isListening,
package/dist/index.js CHANGED
@@ -15958,6 +15958,10 @@ function LocationPicker(_ref) {
15958
15958
  }, iconButton)));
15959
15959
  }
15960
15960
 
15961
+ var isProbablyMobile = function isProbablyMobile() {
15962
+ if (typeof navigator === "undefined") return false;
15963
+ return /Android|iPhone|iPad|iPod/i.test(navigator.userAgent);
15964
+ };
15961
15965
  var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
15962
15966
  var _ref$lang = _ref.lang,
15963
15967
  lang = _ref$lang === void 0 ? "en-US" : _ref$lang,
@@ -15975,24 +15979,17 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
15975
15979
  onStart = _ref$onStart === void 0 ? function () {} : _ref$onStart,
15976
15980
  _ref$onStop = _ref.onStop,
15977
15981
  onStop = _ref$onStop === void 0 ? function () {} : _ref$onStop,
15978
- _ref$renderButton = _ref.renderButton,
15979
- renderButton = _ref$renderButton === void 0 ? null : _ref$renderButton,
15982
+ renderButton = _ref.renderButton,
15980
15983
  _ref$autoStart = _ref.autoStart,
15981
15984
  autoStart = _ref$autoStart === void 0 ? false : _ref$autoStart,
15982
15985
  _ref$disabled = _ref.disabled,
15983
15986
  disabled = _ref$disabled === void 0 ? false : _ref$disabled,
15984
- _ref$maxAlternatives = _ref.maxAlternatives,
15985
- maxAlternatives = _ref$maxAlternatives === void 0 ? 1 : _ref$maxAlternatives,
15986
- _ref$grammars = _ref.grammars,
15987
- grammars = _ref$grammars === void 0 ? null : _ref$grammars,
15988
- _ref$timeout = _ref.timeout,
15989
- timeout = _ref$timeout === void 0 ? null : _ref$timeout,
15990
- _ref$clearOnStop = _ref.clearOnStop,
15991
- clearOnStop = _ref$clearOnStop === void 0 ? false : _ref$clearOnStop;
15987
+ _ref$autoRestart = _ref.autoRestart,
15988
+ autoRestart = _ref$autoRestart === void 0 ? true : _ref$autoRestart;
15992
15989
  var recognitionRef = React.useRef(null);
15993
- var finalTranscriptRef = React.useRef("");
15994
- var timeoutRef = React.useRef(null);
15995
15990
  var isMountedRef = React.useRef(true);
15991
+ var finalByIndexRef = React.useRef(new Map());
15992
+ var lastEmittedFinalRef = React.useRef("");
15996
15993
  var _useState = React.useState(false),
15997
15994
  _useState2 = _slicedToArray(_useState, 2),
15998
15995
  _isListening = _useState2[0],
@@ -16005,196 +16002,121 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
16005
16002
  _useState6 = _slicedToArray(_useState5, 2),
16006
16003
  error = _useState6[0],
16007
16004
  setError = _useState6[1];
16008
-
16009
- // Check browser support
16010
16005
  React.useEffect(function () {
16011
- var supported = "webkitSpeechRecognition" in window || "SpeechRecognition" in window;
16006
+ return function () {
16007
+ isMountedRef.current = false;
16008
+ if (recognitionRef.current) {
16009
+ try {
16010
+ recognitionRef.current.stop();
16011
+ } catch (_unused) {}
16012
+ }
16013
+ };
16014
+ }, []);
16015
+ React.useEffect(function () {
16016
+ var supported = typeof window !== "undefined" && ("SpeechRecognition" in window || "webkitSpeechRecognition" in window);
16012
16017
  setIsSupported(supported);
16013
- if (!supported) {
16014
- var err = new Error("Speech recognition not supported in this browser");
16015
- setError(err);
16016
- onError(err);
16017
- }
16018
- }, [onError]);
16019
-
16020
- // Clear timeout helper
16021
- var clearInactivityTimeout = React.useCallback(function () {
16022
- if (timeoutRef.current) {
16023
- clearTimeout(timeoutRef.current);
16024
- timeoutRef.current = null;
16025
- }
16026
16018
  }, []);
16027
-
16028
- // Reset inactivity timeout
16029
- var resetInactivityTimeout = React.useCallback(function () {
16030
- if (!timeout) return;
16031
- clearInactivityTimeout();
16032
- timeoutRef.current = setTimeout(function () {
16033
- if (recognitionRef.current && _isListening) {
16034
- stopListening();
16035
- }
16036
- }, timeout);
16037
- }, [timeout, _isListening]);
16038
-
16039
- // Initialize recognition
16019
+ React.useEffect(function () {
16020
+ if (autoStart && _isSupported && !disabled) {
16021
+ startListening();
16022
+ }
16023
+ }, [autoStart, _isSupported, disabled]);
16024
+ var buildFinalTranscript = function buildFinalTranscript() {
16025
+ return Array.from(finalByIndexRef.current.entries()).sort(function (a, b) {
16026
+ return a[0] - b[0];
16027
+ }).map(function (_ref2) {
16028
+ var _ref3 = _slicedToArray(_ref2, 2),
16029
+ text = _ref3[1];
16030
+ return text.trim();
16031
+ }).join(" ").trim();
16032
+ };
16040
16033
  var initializeRecognition = React.useCallback(function () {
16041
16034
  if (recognitionRef.current) return recognitionRef.current;
16042
16035
  var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
16043
16036
  if (!SpeechRecognition) {
16044
- var err = new Error("SpeechRecognition API not available");
16037
+ var err = new Error("SpeechRecognition API not supported");
16045
16038
  setError(err);
16046
16039
  onError(err);
16047
16040
  return null;
16048
16041
  }
16049
16042
  var recognition = new SpeechRecognition();
16050
- recognition.continuous = !!continuous;
16051
- recognition.interimResults = !!interimResults;
16043
+ recognition.continuous = isProbablyMobile() ? false : continuous;
16044
+ recognition.interimResults = interimResults;
16052
16045
  recognition.lang = lang;
16053
- recognition.maxAlternatives = maxAlternatives;
16054
- if (grammars) {
16055
- recognition.grammars = grammars;
16056
- }
16057
16046
  recognition.onstart = function () {
16058
16047
  if (!isMountedRef.current) return;
16059
- finalTranscriptRef.current = finalTranscriptRef.current || "";
16060
16048
  setIsListening(true);
16061
16049
  setError(null);
16062
- resetInactivityTimeout();
16063
16050
  onStart();
16064
16051
  };
16065
16052
  recognition.onresult = function (event) {
16066
16053
  if (!isMountedRef.current) return;
16067
- resetInactivityTimeout();
16068
16054
  var interim = "";
16069
- var finalPart = "";
16070
16055
  for (var i = event.resultIndex; i < event.results.length; i++) {
16071
- var piece = event.results[i][0].transcript;
16072
- if (event.results[i].isFinal) {
16073
- finalPart += piece + " ";
16056
+ var _res$;
16057
+ var res = event.results[i];
16058
+ var text = ((_res$ = res[0]) === null || _res$ === void 0 ? void 0 : _res$.transcript) || "";
16059
+ if (res.isFinal) {
16060
+ finalByIndexRef.current.set(i, text);
16074
16061
  } else {
16075
- interim += piece;
16062
+ interim += text;
16076
16063
  }
16077
16064
  }
16078
- if (interim) {
16079
- onSpeaking(interim);
16080
- }
16081
- if (finalPart) {
16082
- finalTranscriptRef.current = (finalTranscriptRef.current || "") + finalPart;
16083
- onSpeechComplete(finalTranscriptRef.current.trim());
16065
+ if (interim) onSpeaking(interim.trim());
16066
+ var finalTranscript = buildFinalTranscript();
16067
+ if (finalTranscript && finalTranscript !== lastEmittedFinalRef.current) {
16068
+ lastEmittedFinalRef.current = finalTranscript;
16069
+ onSpeechComplete(finalTranscript);
16084
16070
  }
16085
16071
  };
16086
16072
  recognition.onerror = function (event) {
16087
- if (!isMountedRef.current) return;
16088
- clearInactivityTimeout();
16089
- var errorMessage = (event === null || event === void 0 ? void 0 : event.error) || "Speech recognition error";
16090
- var err = new Error(errorMessage);
16073
+ var err = new Error((event === null || event === void 0 ? void 0 : event.error) || "Speech recognition error");
16091
16074
  setError(err);
16092
16075
  setIsListening(false);
16093
16076
  onError(err);
16094
-
16095
- // Handle specific errors
16096
- if (errorMessage === "not-allowed" || errorMessage === "service-not-allowed") {
16097
- console.warn("Microphone permission denied");
16098
- } else if (errorMessage === "no-speech") {
16099
- console.warn("No speech detected");
16100
- } else if (errorMessage === "aborted") {
16101
- console.warn("Speech recognition aborted");
16102
- }
16103
16077
  };
16104
16078
  recognition.onend = function () {
16105
16079
  if (!isMountedRef.current) return;
16106
- clearInactivityTimeout();
16107
16080
  setIsListening(false);
16108
- var finalText = (finalTranscriptRef.current || "").trim();
16109
- onSpeechComplete(finalText);
16081
+ var finalTranscript = buildFinalTranscript();
16082
+ if (finalTranscript && finalTranscript !== lastEmittedFinalRef.current) {
16083
+ lastEmittedFinalRef.current = finalTranscript;
16084
+ onSpeechComplete(finalTranscript);
16085
+ }
16110
16086
  onStop();
16111
- if (clearOnStop) {
16112
- finalTranscriptRef.current = "";
16087
+ if (autoRestart && !disabled && _isSupported && !isProbablyMobile()) {
16088
+ try {
16089
+ recognition.start();
16090
+ setIsListening(true);
16091
+ } catch (_unused2) {}
16113
16092
  }
16114
16093
  };
16115
16094
  recognitionRef.current = recognition;
16116
16095
  return recognition;
16117
- }, [continuous, interimResults, lang, maxAlternatives, grammars, onStart, onSpeaking, onSpeechComplete, onError, onStop, clearOnStop, resetInactivityTimeout, clearInactivityTimeout]);
16118
-
16119
- // Start listening
16096
+ }, [continuous, interimResults, lang, autoRestart, disabled, _isSupported, onStart, onSpeaking, onSpeechComplete, onError, onStop]);
16120
16097
  var startListening = React.useCallback(function () {
16121
- if (!_isSupported) {
16122
- var err = new Error("Speech recognition not supported in this browser");
16123
- setError(err);
16124
- onError(err);
16125
- return;
16126
- }
16127
- if (_isListening) {
16128
- console.warn("Already listening");
16129
- return;
16130
- }
16098
+ if (!_isSupported || disabled || _isListening) return;
16131
16099
  try {
16132
16100
  var rec = initializeRecognition();
16133
16101
  if (!rec) return;
16102
+ finalByIndexRef.current.clear();
16103
+ lastEmittedFinalRef.current = "";
16134
16104
  rec.start();
16135
16105
  } catch (err) {
16136
- var _err$message;
16137
- // Handle "already started" error
16138
- if ((_err$message = err.message) !== null && _err$message !== void 0 && _err$message.includes("already started")) {
16139
- console.warn("Recognition already in progress");
16140
- return;
16141
- }
16142
16106
  setError(err);
16143
16107
  onError(err);
16144
16108
  }
16145
- }, [_isSupported, _isListening, initializeRecognition, onError]);
16146
-
16147
- // Stop listening
16109
+ }, [_isSupported, disabled, _isListening, initializeRecognition]);
16148
16110
  var stopListening = React.useCallback(function () {
16149
- if (recognitionRef.current) {
16150
- try {
16151
- clearInactivityTimeout();
16152
- recognitionRef.current.stop();
16153
- } catch (err) {
16154
- console.error("Error stopping recognition:", err);
16155
- }
16156
- }
16157
- }, [clearInactivityTimeout]);
16158
-
16159
- // Toggle listening
16111
+ if (!recognitionRef.current) return;
16112
+ try {
16113
+ recognitionRef.current.stop();
16114
+ } catch (_unused3) {}
16115
+ }, []);
16160
16116
  var toggleListening = React.useCallback(function () {
16161
16117
  if (disabled) return;
16162
- if (_isListening) {
16163
- stopListening();
16164
- } else {
16165
- startListening();
16166
- }
16167
- }, [disabled, _isListening, startListening, stopListening]);
16168
-
16169
- // Auto-start effect
16170
- React.useEffect(function () {
16171
- if (autoStart && _isSupported && !disabled) {
16172
- startListening();
16173
- }
16174
- }, [autoStart, _isSupported, disabled, startListening]);
16175
-
16176
- // Cleanup on unmount
16177
- React.useEffect(function () {
16178
- isMountedRef.current = true;
16179
- return function () {
16180
- isMountedRef.current = false;
16181
- clearInactivityTimeout();
16182
- if (recognitionRef.current) {
16183
- try {
16184
- recognitionRef.current.onresult = null;
16185
- recognitionRef.current.onerror = null;
16186
- recognitionRef.current.onend = null;
16187
- recognitionRef.current.onstart = null;
16188
- recognitionRef.current.stop();
16189
- } catch (err) {
16190
- console.error("Cleanup error:", err);
16191
- }
16192
- recognitionRef.current = null;
16193
- }
16194
- };
16195
- }, [clearInactivityTimeout]);
16196
-
16197
- // Imperative handle
16118
+ _isListening ? stopListening() : startListening();
16119
+ }, [_isListening, disabled, startListening, stopListening]);
16198
16120
  React.useImperativeHandle(ref, function () {
16199
16121
  return {
16200
16122
  start: startListening,
@@ -16206,20 +16128,17 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
16206
16128
  isSupported: function isSupported() {
16207
16129
  return _isSupported;
16208
16130
  },
16209
- getTranscript: function getTranscript() {
16210
- return (finalTranscriptRef.current || "").trim();
16211
- },
16131
+ getTranscript: buildFinalTranscript,
16212
16132
  clearTranscript: function clearTranscript() {
16213
- finalTranscriptRef.current = "";
16133
+ finalByIndexRef.current.clear();
16134
+ lastEmittedFinalRef.current = "";
16214
16135
  },
16215
16136
  getError: function getError() {
16216
16137
  return error;
16217
16138
  }
16218
16139
  };
16219
- }, [_isListening, _isSupported, error, startListening, stopListening, toggleListening]);
16220
-
16221
- // Custom button renderer
16222
- if (renderButton && typeof renderButton === "function") {
16140
+ }, [_isListening, _isSupported, error]);
16141
+ if (typeof renderButton === "function") {
16223
16142
  return renderButton({
16224
16143
  isListening: _isListening,
16225
16144
  isSupported: _isSupported,
@@ -16230,8 +16149,6 @@ var SpeechToText = /*#__PURE__*/React.forwardRef(function (_ref, ref) {
16230
16149
  disabled: disabled || !_isSupported
16231
16150
  });
16232
16151
  }
16233
-
16234
- // Default button
16235
16152
  return /*#__PURE__*/React.createElement("button", {
16236
16153
  type: "button",
16237
16154
  "aria-pressed": _isListening,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@dreamtree-org/twreact-ui",
3
- "version": "1.1.14",
3
+ "version": "1.1.16",
4
4
  "description": "A comprehensive React + Tailwind components library for building modern web apps",
5
5
  "author": {
6
6
  "name": "Partha Preetham Krishna",