react-voice-to-speech 1.0.3 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +12 -3
- package/dist/index.js +15 -6
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -49,7 +49,10 @@ var OverlayVoiceToSpeech = ({ language, onDataReady, onClose, labels, children }
|
|
|
49
49
|
const streamRef = _react.useRef.call(void 0, null);
|
|
50
50
|
const audioContextRef = _react.useRef.call(void 0, null);
|
|
51
51
|
const timeoutRef = _react.useRef.call(void 0, null);
|
|
52
|
-
const uiLabels =
|
|
52
|
+
const uiLabels = _react.useMemo.call(void 0, () => ({
|
|
53
|
+
...DefaultVoiceToSpeechLabels,
|
|
54
|
+
...labels
|
|
55
|
+
}), [labels]);
|
|
53
56
|
const handleStop = (e) => {
|
|
54
57
|
e.stopPropagation();
|
|
55
58
|
if (status === "listening" && recognitionRef.current) {
|
|
@@ -254,7 +257,10 @@ var OverlayVoiceToSpeechWithPunctuation = ({ lang, onDataReady, onClose, labels,
|
|
|
254
257
|
const streamRef = _react.useRef.call(void 0, null);
|
|
255
258
|
const audioContextRef = _react.useRef.call(void 0, null);
|
|
256
259
|
const timeoutRef = _react.useRef.call(void 0, null);
|
|
257
|
-
const uiLabels =
|
|
260
|
+
const uiLabels = _react.useMemo.call(void 0, () => ({
|
|
261
|
+
...DefaultVoiceToSpeechLabels,
|
|
262
|
+
...labels
|
|
263
|
+
}), [labels]);
|
|
258
264
|
const handleStop = (e) => {
|
|
259
265
|
e.stopPropagation();
|
|
260
266
|
if (status === "listening" && recognitionRef.current) {
|
|
@@ -438,7 +444,10 @@ var BasicVoiceToSpeechButton = ({ language = "en", onDataReady, className, id, s
|
|
|
438
444
|
if (typeof onDataReady === "function")
|
|
439
445
|
onDataReady(data);
|
|
440
446
|
};
|
|
441
|
-
const uiLabels =
|
|
447
|
+
const uiLabels = _react.useMemo.call(void 0, () => ({
|
|
448
|
+
...DefaultVoiceToSpeechLabels,
|
|
449
|
+
...labels
|
|
450
|
+
}), [labels]);
|
|
442
451
|
const onClick = (e) => {
|
|
443
452
|
if (e) {
|
|
444
453
|
e.preventDefault();
|
package/dist/index.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
// src/OverlayVoiceToSpeech.tsx
|
|
2
|
-
import React, { useState, useEffect, useRef } from "react";
|
|
2
|
+
import React, { useState, useEffect, useRef, useMemo } from "react";
|
|
3
3
|
import { createPortal } from "react-dom";
|
|
4
4
|
|
|
5
5
|
// src/soundEffects.ts
|
|
@@ -49,7 +49,10 @@ var OverlayVoiceToSpeech = ({ language, onDataReady, onClose, labels, children }
|
|
|
49
49
|
const streamRef = useRef(null);
|
|
50
50
|
const audioContextRef = useRef(null);
|
|
51
51
|
const timeoutRef = useRef(null);
|
|
52
|
-
const uiLabels =
|
|
52
|
+
const uiLabels = useMemo(() => ({
|
|
53
|
+
...DefaultVoiceToSpeechLabels,
|
|
54
|
+
...labels
|
|
55
|
+
}), [labels]);
|
|
53
56
|
const handleStop = (e) => {
|
|
54
57
|
e.stopPropagation();
|
|
55
58
|
if (status === "listening" && recognitionRef.current) {
|
|
@@ -195,7 +198,7 @@ var OverlayVoiceToSpeech = ({ language, onDataReady, onClose, labels, children }
|
|
|
195
198
|
};
|
|
196
199
|
|
|
197
200
|
// src/OverlayVoiceToSpeechWithPunctuation.tsx
|
|
198
|
-
import React2, { useState as useState2, useEffect as useEffect2, useRef as useRef2 } from "react";
|
|
201
|
+
import React2, { useState as useState2, useEffect as useEffect2, useRef as useRef2, useMemo as useMemo2 } from "react";
|
|
199
202
|
import { createPortal as createPortal2 } from "react-dom";
|
|
200
203
|
|
|
201
204
|
// src/PunctuationLangMap.ts
|
|
@@ -254,7 +257,10 @@ var OverlayVoiceToSpeechWithPunctuation = ({ lang, onDataReady, onClose, labels,
|
|
|
254
257
|
const streamRef = useRef2(null);
|
|
255
258
|
const audioContextRef = useRef2(null);
|
|
256
259
|
const timeoutRef = useRef2(null);
|
|
257
|
-
const uiLabels =
|
|
260
|
+
const uiLabels = useMemo2(() => ({
|
|
261
|
+
...DefaultVoiceToSpeechLabels,
|
|
262
|
+
...labels
|
|
263
|
+
}), [labels]);
|
|
258
264
|
const handleStop = (e) => {
|
|
259
265
|
e.stopPropagation();
|
|
260
266
|
if (status === "listening" && recognitionRef.current) {
|
|
@@ -415,7 +421,7 @@ var OverlayVoiceToSpeechWithPunctuation = ({ lang, onDataReady, onClose, labels,
|
|
|
415
421
|
};
|
|
416
422
|
|
|
417
423
|
// src/BasicVoiceToSpeechButton.tsx
|
|
418
|
-
import React3, { useState as useState4 } from "react";
|
|
424
|
+
import React3, { useMemo as useMemo3, useState as useState4 } from "react";
|
|
419
425
|
|
|
420
426
|
// src/useDetectVoiceSupport.ts
|
|
421
427
|
import { useState as useState3 } from "react";
|
|
@@ -438,7 +444,10 @@ var BasicVoiceToSpeechButton = ({ language = "en", onDataReady, className, id, s
|
|
|
438
444
|
if (typeof onDataReady === "function")
|
|
439
445
|
onDataReady(data);
|
|
440
446
|
};
|
|
441
|
-
const uiLabels =
|
|
447
|
+
const uiLabels = useMemo3(() => ({
|
|
448
|
+
...DefaultVoiceToSpeechLabels,
|
|
449
|
+
...labels
|
|
450
|
+
}), [labels]);
|
|
442
451
|
const onClick = (e) => {
|
|
443
452
|
if (e) {
|
|
444
453
|
e.preventDefault();
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "react-voice-to-speech",
|
|
3
3
|
"description": "A lightweight, customizable React component and hook for voice-to-text recognition using the Web Speech API. Includes a built-in recording overlay.",
|
|
4
|
-
"version": "1.0.
|
|
4
|
+
"version": "1.0.4",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"sideEffects": [
|
|
7
7
|
"**/*.css"
|