react-voice-to-speech 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Felipe Carrillo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,154 @@
1
+ Since your project is a specialized React component for voice-to-speech, your README should focus on the **Web Speech API** requirements and clear usage instructions.
2
+
3
+ Here is a professional `README.md` template:
4
+
5
+ ---
6
+
7
+ # react-voice-to-speech
8
+
9
+ A lightweight, customizable React component and hook for integrated **Voice-to-Text** functionality using the Web Speech API.
10
+
11
+ ## 🚀 Features
12
+
13
+ * **Simple Integration:** Use the pre-built button and stile it to your needs.
14
+ * **Real-time Overlay:** Built-in overlay UI for recording with live feedback.
15
+ * **SCSS Support:** Easily themeable with CSS variables or custom classes.
16
+ * **Modern Testing:** 100% test coverage using Vitest Browser Mode.
17
+
18
+ ---
19
+
20
+ ## 📦 Installation
21
+
22
+ ```bash
23
+ npm install react-voice-to-speech
24
+
25
+ ```
26
+
27
+ ## 🛠️ Usage
28
+
29
+ ### Basic Component
30
+
31
+ The easiest way to get started is using the `BasicVoiceToSpeechButton`.
32
+
33
+ ```tsx
34
+ import { BasicVoiceToSpeechButton } from 'react-voice-to-speech';
35
+ import 'react-voice-to-speech/dist/index.css';
36
+
37
+ function App() {
38
+ const handleText = (data: VoiceResult) => {
39
+ console.log("Transcribed text:", data.text);
40
+ };
41
+
42
+ return (
43
+ <BasicVoiceToSpeechButton
44
+ onDataReady={handleText}
45
+ language="en-US"
46
+ />
47
+ );
48
+ }
49
+ ```
50
+ ---
51
+
52
+ ### Advance Component
53
+
54
+ For full customization build your own button in your preferred framework using the overlay `OverlayVoiceToSpeech`.
55
+
56
+ ```tsx
57
+ import React, { useState } from 'react';
58
+ import {useDetectVoiceSupport} from "./useDetectVoiceSupport";
59
+ import {OverlayVoiceToSpeech} from "./OverlayVoiceToSpeech";
60
+ import type {VoiceResult} from "./commonInterfaces";
61
+ import 'react-voice-to-speech/dist/index.css';
62
+
63
+ interface Props {
64
+ language?: string;
65
+ onTextReady: (data: VoiceResult) => void;
66
+ labels?: {[key:string]: string};
67
+ }
68
+
69
+ export const YourVoiceToSpeechButton: React.FC<Props> = ({ language = 'en', onTextReady, labels }) => {
70
+ const [isOpen, setIsOpen] = useState(false);
71
+ const isSupported = useDetectVoiceSupport();
72
+
73
+ const handleData = (data: VoiceResult) => {
74
+ setIsOpen(false);
75
+ if (typeof onTextReady === "function")
76
+ onTextReady(data);
77
+ };
78
+
79
+ return (
80
+ <>
81
+ { /* Your own button here! */ }
82
+ <button onClick={() => setIsOpen(true)} disabled={!isSupported}>🎤</button>
83
+ {isOpen && (
84
+ <OverlayVoiceToSpeech
85
+ language={language}
86
+ onDataReady={handleData}
87
+ onClose={() => setIsOpen(false)}
88
+ labels={labels}
89
+ />
90
+ )}
91
+ </>
92
+ );
93
+ };
94
+ ```
95
+ ---
96
+
97
+ ## ⚙️ API Reference
98
+
99
+ ### `BasicVoiceToSpeechButton` Props
100
+
101
+ | Prop | Type | Default | Description |
102
+ |---------------| --- | --- |---------------------------------------------------|
103
+ | `onDataReady` | `(text: VoiceResult) => void` | **Required** | Callback fired when transcription is complete. |
104
+ | `language` | `string` | `'en-US'` | BCP 47 language tag (e.g., `'es-ES'`, `'fr-FR'`). |
105
+ | `className` | `string` | | Custom class for the button. |
106
+ | `id` | `string` | | Assign an id. |
107
+ | `style` | `React.CSSProperties` | | Style |
108
+ | `children` | `React.ReactNode` | | Use your own icon |
109
+ | `labels` | `VoiceToSpeechLabels` | `` | Labels. |
110
+
111
+ ### `OverlayVoiceToSpeech` Props
112
+
113
+ | Prop | Type | Default | Description |
114
+ |---------------|-------------------------------| -- |---------------------------------------------------|
115
+ | `onDataReady` | `(text: VoiceResult) => void` | **Required** | Callback fired when transcription is complete. |
116
+ | `language` | `string` | `'en-US'` | BCP 47 language tag (e.g., `'es-ES'`, `'fr-FR'`). |
117
+ | `labels` | `VoiceToSpeechLabels` | `` | Labels. |
118
+
119
+ ---
120
+
121
+ ## 🎙️ Browser Support & Permissions
122
+
123
+ This library relies on the **Web Speech API**.
124
+
125
+ * **Supported:** Chrome, Edge, Safari.
126
+ * **Not Supported:** Firefox (limited support), IE.
127
+ * **Permissions:** Browsers require an active **HTTPS** connection (or `localhost`) to access the microphone.
128
+
129
+ ---
130
+
131
+ ## 🧪 Development & Testing
132
+
133
+ We use **Vitest Browser Mode** to test the component in a real Chromium environment.
134
+
135
+ ```bash
136
+ # Run tests
137
+ npm run test
138
+
139
+ # Run tests in watch mode (interactive)
140
+ npm run test:watch
141
+
142
+ ```
143
+
144
+ ### Mocking Speech in Tests
145
+
146
+ If you want to test your implementation without a physical microphone, see the `tests/voice.test.tsx` file for examples of mocking the `SpeechRecognition` global object.
147
+
148
+ ---
149
+
150
+ ## 📄 License
151
+
152
+ MIT © [Your Name/Organization]
153
+
154
+ ---
package/dist/index.cjs ADDED
@@ -0,0 +1,469 @@
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }// src/OverlayVoiceToSpeech.tsx
2
+ var _react = require('react'); var _react2 = _interopRequireDefault(_react);
3
+ var _reactdom = require('react-dom');
4
+
5
+ // src/soundEffects.ts
6
+ var playTone = (freq, type, duration) => {
7
+ const AudioContextClass = window.AudioContext || window.webkitAudioContext;
8
+ const ctx = new AudioContextClass();
9
+ const osc = ctx.createOscillator();
10
+ const gain = ctx.createGain();
11
+ osc.type = type;
12
+ osc.frequency.setValueAtTime(freq, ctx.currentTime);
13
+ gain.gain.setValueAtTime(0, ctx.currentTime);
14
+ gain.gain.linearRampToValueAtTime(0.2, ctx.currentTime + 0.01);
15
+ gain.gain.exponentialRampToValueAtTime(0.01, ctx.currentTime + duration);
16
+ osc.connect(gain);
17
+ gain.connect(ctx.destination);
18
+ osc.start();
19
+ osc.stop(ctx.currentTime + duration);
20
+ };
21
+ var playStartSound = () => playTone(523.25, "sine", 0.15);
22
+ var playSuccessSound = () => {
23
+ playTone(659.25, "sine", 0.1);
24
+ setTimeout(() => playTone(880, "sine", 0.2), 100);
25
+ };
26
+ var playErrorSound = () => {
27
+ playTone(220, "triangle", 0.1);
28
+ setTimeout(() => playTone(220, "triangle", 0.1), 150);
29
+ };
30
+
31
+ // src/commonInterfaces.ts
32
+ var DefaultVoiceToSpeechLabels = {
33
+ recordButtonAria: "Start voice recording",
34
+ nothingReceived: "Didn't catch that. Try again?",
35
+ listeningText: "Listening...",
36
+ errorPermission: "Microphone access denied",
37
+ errorPrefix: "Error",
38
+ deniedIcon: "\u{1F6AB}",
39
+ errorIcon: "\u26A0\uFE0F"
40
+ };
41
+
42
+ // src/OverlayVoiceToSpeech.tsx
43
+ var OverlayVoiceToSpeech = ({ language, onDataReady, onClose, labels, children }) => {
44
+ const [status, setStatus] = _react.useState.call(void 0, "listening");
45
+ const [errorMessage, setErrorMessage] = _react.useState.call(void 0, "");
46
+ const [interim, setInterim] = _react.useState.call(void 0, "");
47
+ const [volume, setVolume] = _react.useState.call(void 0, 0);
48
+ const recognitionRef = _react.useRef.call(void 0, null);
49
+ const streamRef = _react.useRef.call(void 0, null);
50
+ const audioContextRef = _react.useRef.call(void 0, null);
51
+ const timeoutRef = _react.useRef.call(void 0, null);
52
+ const uiLabels = { ...DefaultVoiceToSpeechLabels, ...labels };
53
+ const handleStop = (e) => {
54
+ e.stopPropagation();
55
+ if (status === "listening" && recognitionRef.current) {
56
+ recognitionRef.current.stop();
57
+ if (!interim) {
58
+ onClose();
59
+ }
60
+ }
61
+ };
62
+ const resetSilenceTimer = () => {
63
+ if (timeoutRef.current) clearTimeout(timeoutRef.current);
64
+ timeoutRef.current = setTimeout(() => {
65
+ setStatus((prevStatus) => {
66
+ if (prevStatus === "listening") {
67
+ playErrorSound();
68
+ setErrorMessage(uiLabels.nothingReceived);
69
+ if (recognitionRef.current) {
70
+ recognitionRef.current.onresult = null;
71
+ recognitionRef.current.abort();
72
+ }
73
+ setTimeout(() => onClose(), 2e3);
74
+ return "error";
75
+ }
76
+ return prevStatus;
77
+ });
78
+ }, 8e3);
79
+ };
80
+ _react.useEffect.call(void 0, () => {
81
+ let isMounted = true;
82
+ resetSilenceTimer();
83
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
84
+ const recog = new SpeechRecognition();
85
+ recog.continuous = true;
86
+ recog.interimResults = true;
87
+ recog.lang = language;
88
+ recog.onstart = () => {
89
+ playStartSound();
90
+ };
91
+ recog.onresult = (event) => {
92
+ resetSilenceTimer();
93
+ let currentInterim = "";
94
+ for (let i = event.resultIndex; i < event.results.length; ++i) {
95
+ if (event.results[i].isFinal) {
96
+ playSuccessSound();
97
+ setStatus("success");
98
+ const result = {
99
+ text: event.results[i][0].transcript,
100
+ confidence: event.results[i][0].confidence,
101
+ timestamp: Date.now()
102
+ };
103
+ setTimeout(() => {
104
+ if (isMounted) onDataReady(result);
105
+ }, 400);
106
+ return;
107
+ } else {
108
+ currentInterim += event.results[i][0].transcript;
109
+ }
110
+ }
111
+ if (isMounted) setInterim(currentInterim);
112
+ };
113
+ recog.onerror = (event) => {
114
+ if (event.error === "no-speech") return;
115
+ if (event.error === "aborted") return;
116
+ if (timeoutRef.current) clearTimeout(timeoutRef.current);
117
+ if (event.error === "not-allowed") {
118
+ playErrorSound();
119
+ setStatus("denied");
120
+ setErrorMessage(uiLabels.errorPermission);
121
+ setTimeout(() => {
122
+ if (isMounted) onClose();
123
+ }, 2e3);
124
+ } else {
125
+ playErrorSound();
126
+ setStatus("error");
127
+ setErrorMessage(`${uiLabels.errorPrefix || "Error"}: ${event.error}`);
128
+ setTimeout(() => {
129
+ if (isMounted) onClose();
130
+ }, 2e3);
131
+ }
132
+ };
133
+ recog.start();
134
+ recognitionRef.current = recog;
135
+ let animationFrame;
136
+ navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
137
+ if (!isMounted) {
138
+ stream.getTracks().forEach((t) => t.stop());
139
+ return;
140
+ }
141
+ streamRef.current = stream;
142
+ const ctx = new (window.AudioContext || window.webkitAudioContext)();
143
+ audioContextRef.current = ctx;
144
+ const source = ctx.createMediaStreamSource(stream);
145
+ const analyzer = ctx.createAnalyser();
146
+ analyzer.fftSize = 64;
147
+ source.connect(analyzer);
148
+ const data = new Uint8Array(analyzer.frequencyBinCount);
149
+ const update = () => {
150
+ analyzer.getByteFrequencyData(data);
151
+ const avg = data.reduce((a, b) => a + b) / data.length;
152
+ if (isMounted) {
153
+ setVolume(avg);
154
+ animationFrame = requestAnimationFrame(update);
155
+ }
156
+ };
157
+ update();
158
+ }).catch(() => {
159
+ if (isMounted) {
160
+ setStatus("denied");
161
+ setErrorMessage(uiLabels.errorPermission);
162
+ setTimeout(() => {
163
+ if (isMounted) onClose();
164
+ }, 2e3);
165
+ }
166
+ });
167
+ return () => {
168
+ isMounted = false;
169
+ if (timeoutRef.current) clearTimeout(timeoutRef.current);
170
+ if (recognitionRef.current) recognitionRef.current.abort();
171
+ if (streamRef.current) streamRef.current.getTracks().forEach((t) => t.stop());
172
+ if (audioContextRef.current) audioContextRef.current.close();
173
+ cancelAnimationFrame(animationFrame);
174
+ };
175
+ }, []);
176
+ return _reactdom.createPortal.call(void 0,
177
+ /* @__PURE__ */ _react2.default.createElement("div", { className: "voice-overlay-backdrop", onClick: onClose }, /* @__PURE__ */ _react2.default.createElement("div", { className: "voice-modal", onClick: (e) => e.stopPropagation() }, status === "listening" || status === "success" ? /* @__PURE__ */ _react2.default.createElement(_react2.default.Fragment, null, /* @__PURE__ */ _react2.default.createElement("div", { className: "interim-text" }, status === "success" ? "" : interim || uiLabels.listeningText), /* @__PURE__ */ _react2.default.createElement(
178
+ "div",
179
+ {
180
+ className: `mic-section ${status}`,
181
+ onClick: handleStop,
182
+ style: { cursor: "pointer" }
183
+ },
184
+ /* @__PURE__ */ _react2.default.createElement(
185
+ "div",
186
+ {
187
+ className: "pulse-ring",
188
+ style: { transform: status === "success" ? "scale(1.2)" : `scale(${1 + volume / 50})` }
189
+ }
190
+ ),
191
+ /* @__PURE__ */ _react2.default.createElement("div", { className: "mic-circle" }, children)
192
+ )) : /* @__PURE__ */ _react2.default.createElement("div", { className: "status-container" }, /* @__PURE__ */ _react2.default.createElement("span", { className: "status-icon" }, status === "denied" ? uiLabels.deniedIcon : uiLabels.errorIcon), /* @__PURE__ */ _react2.default.createElement("div", { className: "error-message" }, errorMessage)))),
193
+ document.body
194
+ );
195
+ };
196
+
197
+ // src/OverlayVoiceToSpeechWithPunctuation.tsx
198
+
199
+
200
+
201
+ // src/PunctuationLangMap.ts
202
+ var PunctuationLangMap = {
203
+ // WESTERN / LATIN
204
+ en: { "comma": ",", "coma": ",", "period": ".", "full stop": ".", "question mark": "?", "exclamation point": "!", "colon": ":" },
205
+ es: { "coma": ",", "punto": ".", "signo de interrogaci\xF3n": "?", "interrogaci\xF3n": "?", "signo de exclamaci\xF3n": "!", "exclamaci\xF3n": "!", "dos puntos": ":" },
206
+ fr: { "virgule": ",", "point": ".", "point d'interrogation": "?", "point d'exclamation": "!", "deux points": ":" },
207
+ de: { "komma": ",", "punkt": ".", "fragezeichen": "?", "ausrufezeichen": "!", "doppelpunkt": ":" },
208
+ it: { "virgola": ",", "punto": ".", "punto interrogativo": "?", "punto esclamativo": "!", "due punti": ":" },
209
+ pt: { "v\xEDrgula": ",", "ponto": ".", "ponto de interroga\xE7\xE3o": "?", "ponto de exclama\xE7\xE3o": "!", "dois pontos": ":" },
210
+ nl: { "komma": ",", "punt": ".", "vraagteken": "?", "uitroepteken": "!", "dubbelpunt": ":" },
211
+ sv: { "komma": ",", "punkt": ".", "fr\xE5getecken": "?", "utropstecken": "!", "kolon": ":" },
212
+ no: { "komma": ",", "punkt": ".", "sp\xF8rsm\xE5lstegn": "?", "utropstegn": "!", "kolon": ":" },
213
+ da: { "komma": ",", "punkt": ".", "sp\xF8rgsm\xE5lstegn": "?", "udr\xE5bstegn": "!", "kolon": ":" },
214
+ fi: { "pilkku": ",", "piste": ".", "kysymysmerkki": "?", "huutomerkki": "!", "kaksoispiste": ":" },
215
+ pl: { "przecinek": ",", "kropka": ".", "znak zapytania": "?", "wykrzyknik": "!", "dwukropek": ":" },
216
+ cs: { "\u010D\xE1rka": ",", "te\u010Dka": ".", "otazn\xEDk": "?", "vyk\u0159i\u010Dn\xEDk": "!", "dvojte\u010Dka": ":" },
217
+ sk: { "\u010Diarka": ",", "bodka": ".", "ot\xE1znik": "?", "v\xFDkri\u010Dn\xEDk": "!", "dvojbodka": ":" },
218
+ ro: { "virgul\u0103": ",", "punct": ".", "semnul \xEEntreb\u0103rii": "?", "semnul exclam\u0103rii": "!", "dou\u0103 puncte": ":" },
219
+ hu: { "vessz\u0151": ",", "pont": ".", "k\xE9rd\u0151jel": "?", "felki\xE1lt\xF3jel": "!", "kett\u0151spont": ":" },
220
+ el: { "\u03BA\u03CC\u03BC\u03BC\u03B1": ",", "\u03C4\u03B5\u03BB\u03B5\u03AF\u03B1": ".", "\u03B5\u03C1\u03C9\u03C4\u03B7\u03BC\u03B1\u03C4\u03B9\u03BA\u03CC": ";", "\u03B8\u03B1\u03C5\u03BC\u03B1\u03C3\u03C4\u03B9\u03BA\u03CC": "!", "\u03AC\u03BD\u03C9 \u03BA\u03AC\u03C4\u03C9 \u03C4\u03B5\u03BB\u03B5\u03AF\u03B1": ":" },
221
+ // SLAVIC
222
+ ru: { "\u0437\u0430\u043F\u044F\u0442\u0430\u044F": ",", "\u0442\u043E\u0447\u043A\u0430": ".", "\u0432\u043E\u043F\u0440\u043E\u0441\u0438\u0442\u0435\u043B\u044C\u043D\u044B\u0439 \u0437\u043D\u0430\u043A": "?", "\u0432\u043E\u0441\u043A\u043B\u0438\u0446\u0430\u0442\u0435\u043B\u044C\u043D\u044B\u0439 \u0437\u043D\u0430\u043A": "!", "\u0434\u0432\u043E\u0435\u0442\u043E\u0447\u0438\u0435": ":" },
223
+ uk: { "\u043A\u043E\u043C\u0430": ",", "\u043A\u0440\u0430\u043F\u043A\u0430": ".", "\u0437\u043D\u0430\u043A \u043F\u0438\u0442\u0430\u043D\u043D\u044F": "?", "\u0437\u043D\u0430\u043A \u043E\u043A\u043B\u0438\u043A\u0443": "!", "\u0434\u0432\u043E\u043A\u0440\u0430\u043F\u043A\u0430": ":" },
224
+ bg: { "\u0437\u0430\u043F\u0435\u0442\u0430\u044F": ",", "\u0442\u043E\u0447\u043A\u0430": ".", "\u0432\u044A\u043F\u0440\u043E\u0441\u0438\u0442\u0435\u043B\u0435\u043D \u0437\u043D\u0430\u043A": "?", "\u0443\u0434\u0438\u0432\u0438\u0442\u0435\u043B\u0435\u043D \u0437\u043D\u0430\u043A": "!", "\u0434\u0432\u043E\u0435\u0442\u043E\u0447\u0438\u0435": ":" },
225
+ // ASIAN (Using full-width punctuation where applicable)
226
+ zh: { "\u9017\u53F7": "\uFF0C", "\u53E5\u53F7": "\u3002", "\u95EE\u53F7": "\uFF1F", "\u611F\u53F9\u53F7": "\uFF01", "\u5192\u53F7": "\uFF1A" },
227
+ ja: { "\u3066\u3093": "\u3001", "\u307E\u308B": "\u3002", "\u306F\u3066\u306A": "\uFF1F", "\u3073\u3063\u304F\u308A": "\uFF01", "\u30B3\u30ED\u30F3": "\uFF1A" },
228
+ ko: { "\uC27C\uD45C": ",", "\uB9C8\uCE68\uD45C": ".", "\uBB3C\uC74C\uD45C": "?", "\uB290\uB08C\uD45C": "!", "\uCF5C\uB860": ":" },
229
+ vi: { "d\u1EA5u ph\u1EA9y": ",", "d\u1EA5u ch\u1EA5m": ".", "d\u1EA5u h\u1ECFi": "?", "d\u1EA5u ch\u1EA5m than": "!", "d\u1EA5u hai ch\u1EA5m": ":" },
230
+ th: { "\u0E08\u0E38\u0E25\u0E20\u0E32\u0E04": ",", "\u0E21\u0E2B\u0E31\u0E1E\u0E20\u0E32\u0E04": ".", "\u0E1B\u0E23\u0E31\u0E28\u0E19\u0E35": "?", "\u0E2D\u0E31\u0E28\u0E40\u0E08\u0E23\u0E35\u0E22\u0E4C": "!", "\u0E17\u0E27\u0E34\u0E20\u0E32\u0E04": ":" },
231
+ // SEMITIC (RTL Support)
232
+ ar: { "\u0641\u0627\u0635\u0644\u0629": "\u060C", "\u0646\u0642\u0637\u0629": ".", "\u0639\u0644\u0627\u0645\u0629 \u0627\u0633\u062A\u0641\u0647\u0627\u0645": "\u061F", "\u0639\u0644\u0627\u0645\u0629 \u062A\u0639\u062C\u0628": "!", "\u0646\u0642\u0637\u062A\u0627\u0646": ":" },
233
+ he: { "\u05E4\u05E1\u05D9\u05E7": ",", "\u05E0\u05E7\u05D5\u05D3\u05D4": ".", "\u05E1\u05D9\u05DE\u05DF \u05E9\u05D0\u05DC\u05D4": "?", "\u05E1\u05D9\u05DE\u05DF \u05E7\u05E8\u05D9\u05D0\u05D4": "!", "\u05E0\u05E7\u05D5\u05D3\u05EA\u05D9\u05D9\u05DD": ":" },
234
+ fa: { "\u06A9\u0627\u0645\u0627": "\u060C", "\u0646\u0642\u0637\u0647": ".", "\u0639\u0644\u0627\u0645\u0629 \u0633\u0648\u0627\u0644": "\u061F", "\u0639\u0644\u0627\u0645\u0629 \u062A\u0639\u062C\u0628": "!", "\u062F\u0648 \u0646\u0642\u0637\u0647": ":" },
235
+ // SOUTH ASIAN
236
+ hi: { "\u0905\u0932\u094D\u092A\u0935\u093F\u0930\u093E\u092E": ",", "\u092A\u0942\u0930\u094D\u0923 \u0935\u093F\u0930\u093E\u092E": ".", "\u092A\u094D\u0930\u0936\u094D\u0928\u0935\u093E\u091A\u0915": "?", "\u0935\u093F\u0938\u094D\u092E\u092F\u093E\u0926\u093F\u092C\u094B\u0927\u0915": "!", "\u0915\u094B\u0932\u0928": ":" },
237
+ bn: { "\u0995\u09AE\u09BE": ",", "\u09A6\u09BE\u0981\u09A1\u09BC\u09BF": "\u0964", "\u09AA\u09CD\u09B0\u09B6\u09CD\u09A8\u09AC\u09CB\u09A7\u0995 \u099A\u09BF\u09B9\u09CD\u09A8": "?", "\u09AC\u09BF\u09B8\u09CD\u09AE\u09AF\u09BC\u09B8\u09C2\u099A\u0995 \u099A\u09BF\u09B9\u09CD\u09A8": "!", "\u0995\u09CB\u09B2\u09A8": ":" },
238
+ ta: { "\u0B95\u0BBE\u0BB1\u0BCD\u0BAA\u0BC1\u0BB3\u0BCD\u0BB3\u0BBF": ",", "\u0BAE\u0BC1\u0BB1\u0BCD\u0BB1\u0BC1\u0BAA\u0BCD\u0BAA\u0BC1\u0BB3\u0BCD\u0BB3\u0BBF": ".", "\u0B95\u0BC7\u0BB3\u0BCD\u0BB5\u0BBF\u0B95\u0BCD\u0B95\u0BC1\u0BB1\u0BBF": "?", "\u0BB5\u0BBF\u0BAF\u0BAA\u0BCD\u0BAA\u0BC1\u0B95\u0BCD\u0B95\u0BC1\u0BB1\u0BBF": "!", "\u0BAE\u0BC1\u0B95\u0BCD\u0B95\u0BB1\u0BCD\u0BAA\u0BC1\u0BB3\u0BCD\u0BB3\u0BBF": ":" },
239
+ te: { "\u0C15\u0C3E\u0C2E\u0C3E": ",", "\u0C2A\u0C41\u0C32\u0C4D\u200C\u0C38\u0C4D\u0C1F\u0C3E\u0C2A\u0C4D": ".", "\u0C2A\u0C4D\u0C30\u0C36\u0C4D\u0C28\u0C3E\u0C30\u0C4D\u0C25\u0C15\u0C02": "?", "\u0C06\u0C36\u0C4D\u0C1A\u0C30\u0C4D\u0C2F\u0C3E\u0C30\u0C4D\u0C25\u0C15\u0C02": "!", "\u0C15\u0C4B\u0C32\u0C28\u0C4D": ":" },
240
+ // OTHER MAJOR
241
+ tr: { "virg\xFCl": ",", "nokta": ".", "soru i\u015Fareti": "?", "\xFCnlem i\u015Fareti": "!", "iki nokta": ":" },
242
+ id: { "koma": ",", "titik": ".", "tanda tanya": "?", "tanda seru": "!", "titik dua": ":" },
243
+ ms: { "koma": ",", "titik": ".", "tanda soal": "?", "tanda seru": "!", "titik bertindih": ":" },
244
+ sw: { "mkato": ",", "nukta": ".", "alama ya swali": "?", "alama ya hisia": "!", "nukta mbili": ":" }
245
+ };
246
+
247
+ // src/OverlayVoiceToSpeechWithPunctuation.tsx
248
+ var OverlayVoiceToSpeechWithPunctuation = ({ lang, onDataReady, onClose, labels, children }) => {
249
+ const [status, setStatus] = _react.useState.call(void 0, "listening");
250
+ const [errorMessage, setErrorMessage] = _react.useState.call(void 0, "");
251
+ const [interim, setInterim] = _react.useState.call(void 0, "");
252
+ const [volume, setVolume] = _react.useState.call(void 0, 0);
253
+ const recognitionRef = _react.useRef.call(void 0, null);
254
+ const streamRef = _react.useRef.call(void 0, null);
255
+ const audioContextRef = _react.useRef.call(void 0, null);
256
+ const timeoutRef = _react.useRef.call(void 0, null);
257
+ const uiLabels = { ...DefaultVoiceToSpeechLabels, ...labels };
258
+ const handleStop = (e) => {
259
+ e.stopPropagation();
260
+ if (status === "listening" && recognitionRef.current) {
261
+ recognitionRef.current.stop();
262
+ if (!interim) {
263
+ onClose();
264
+ }
265
+ }
266
+ };
267
+ const resetSilenceTimer = () => {
268
+ if (timeoutRef.current) clearTimeout(timeoutRef.current);
269
+ timeoutRef.current = setTimeout(() => {
270
+ setStatus((prevStatus) => {
271
+ if (prevStatus === "listening") {
272
+ playErrorSound();
273
+ setErrorMessage(uiLabels.nothingReceived);
274
+ if (recognitionRef.current) {
275
+ recognitionRef.current.onresult = null;
276
+ recognitionRef.current.abort();
277
+ }
278
+ setTimeout(() => onClose(), 2e3);
279
+ return "error";
280
+ }
281
+ return prevStatus;
282
+ });
283
+ }, 8e3);
284
+ };
285
+ _react.useEffect.call(void 0, () => {
286
+ let isMounted = true;
287
+ resetSilenceTimer();
288
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
289
+ const recog = new SpeechRecognition();
290
+ recog.continuous = true;
291
+ recog.interimResults = true;
292
+ recog.lang = lang;
293
+ recog.onstart = () => {
294
+ playStartSound();
295
+ };
296
+ recog.onresult = (event) => {
297
+ resetSilenceTimer();
298
+ let currentInterim = "";
299
+ const shortLang = lang.split("-")[0].toLowerCase();
300
+ const activeMap = PunctuationLangMap[shortLang] || PunctuationLangMap["en"];
301
+ const sortedKeys = Object.keys(activeMap).sort((a, b) => b.length - a.length);
302
+ for (let i = event.resultIndex; i < event.results.length; ++i) {
303
+ let transcript = event.results[i][0].transcript;
304
+ sortedKeys.forEach((key) => {
305
+ const isNoSpaceLang = ["zh", "ja", "th"].includes(shortLang);
306
+ if (isNoSpaceLang) {
307
+ const regex = new RegExp(key, "gi");
308
+ transcript = transcript.replace(regex, activeMap[key]);
309
+ } else {
310
+ const regex = new RegExp(`\\s*${key}\\s*`, "gi");
311
+ transcript = transcript.replace(regex, ` ${activeMap[key]} `);
312
+ }
313
+ });
314
+ transcript = transcript.replace(/\s+/g, " ").replace(/\s([,.!?;:])/g, "$1").trim();
315
+ if (event.results[i].isFinal) {
316
+ playSuccessSound();
317
+ setStatus("success");
318
+ const result = {
319
+ text: transcript,
320
+ confidence: event.results[i][0].confidence,
321
+ timestamp: Date.now()
322
+ };
323
+ setTimeout(() => {
324
+ if (isMounted) onDataReady(result);
325
+ }, 400);
326
+ return;
327
+ } else {
328
+ currentInterim += transcript;
329
+ }
330
+ }
331
+ if (isMounted) setInterim(currentInterim);
332
+ };
333
+ recog.onerror = (event) => {
334
+ if (event.error === "no-speech") return;
335
+ if (event.error === "aborted") return;
336
+ if (timeoutRef.current) clearTimeout(timeoutRef.current);
337
+ if (event.error === "not-allowed") {
338
+ playErrorSound();
339
+ setStatus("denied");
340
+ setErrorMessage(uiLabels.errorPermission);
341
+ setTimeout(() => {
342
+ if (isMounted) onClose();
343
+ }, 2e3);
344
+ } else {
345
+ playErrorSound();
346
+ setStatus("error");
347
+ setErrorMessage(`${uiLabels.errorPrefix || "Error"}: ${event.error}`);
348
+ setTimeout(() => {
349
+ if (isMounted) onClose();
350
+ }, 2e3);
351
+ }
352
+ };
353
+ recog.start();
354
+ recognitionRef.current = recog;
355
+ let animationFrame;
356
+ navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
357
+ if (!isMounted) {
358
+ stream.getTracks().forEach((t) => t.stop());
359
+ return;
360
+ }
361
+ streamRef.current = stream;
362
+ const ctx = new (window.AudioContext || window.webkitAudioContext)();
363
+ audioContextRef.current = ctx;
364
+ const source = ctx.createMediaStreamSource(stream);
365
+ const analyzer = ctx.createAnalyser();
366
+ analyzer.fftSize = 64;
367
+ source.connect(analyzer);
368
+ const data = new Uint8Array(analyzer.frequencyBinCount);
369
+ const update = () => {
370
+ analyzer.getByteFrequencyData(data);
371
+ const avg = data.reduce((a, b) => a + b) / data.length;
372
+ if (isMounted) {
373
+ setVolume(avg);
374
+ animationFrame = requestAnimationFrame(update);
375
+ }
376
+ };
377
+ update();
378
+ }).catch(() => {
379
+ if (isMounted) {
380
+ setStatus("denied");
381
+ setErrorMessage(uiLabels.errorPermission);
382
+ setTimeout(() => {
383
+ if (isMounted) onClose();
384
+ }, 2e3);
385
+ }
386
+ });
387
+ return () => {
388
+ isMounted = false;
389
+ if (timeoutRef.current) clearTimeout(timeoutRef.current);
390
+ if (recognitionRef.current) recognitionRef.current.abort();
391
+ if (streamRef.current) streamRef.current.getTracks().forEach((t) => t.stop());
392
+ if (audioContextRef.current) audioContextRef.current.close();
393
+ cancelAnimationFrame(animationFrame);
394
+ };
395
+ }, [lang]);
396
+ return _reactdom.createPortal.call(void 0,
397
+ /* @__PURE__ */ _react2.default.createElement("div", { className: "voice-overlay-backdrop", onClick: onClose }, /* @__PURE__ */ _react2.default.createElement("div", { className: "voice-modal", onClick: (e) => e.stopPropagation() }, status === "listening" || status === "success" ? /* @__PURE__ */ _react2.default.createElement(_react2.default.Fragment, null, /* @__PURE__ */ _react2.default.createElement("div", { className: "interim-text" }, status === "success" ? "" : interim || uiLabels.listeningText), /* @__PURE__ */ _react2.default.createElement(
398
+ "div",
399
+ {
400
+ className: `mic-section ${status}`,
401
+ onClick: handleStop,
402
+ style: { cursor: "pointer" }
403
+ },
404
+ /* @__PURE__ */ _react2.default.createElement(
405
+ "div",
406
+ {
407
+ className: "pulse-ring",
408
+ style: { transform: status === "success" ? "scale(1.2)" : `scale(${1 + volume / 50})` }
409
+ }
410
+ ),
411
+ /* @__PURE__ */ _react2.default.createElement("div", { className: "mic-circle" }, children)
412
+ )) : /* @__PURE__ */ _react2.default.createElement("div", { className: "status-container" }, /* @__PURE__ */ _react2.default.createElement("span", { className: "status-icon" }, status === "denied" ? uiLabels.deniedIcon : uiLabels.errorIcon), /* @__PURE__ */ _react2.default.createElement("div", { className: "error-message" }, errorMessage)))),
413
+ document.body
414
+ );
415
+ };
416
+
417
+ // src/BasicVoiceToSpeechButton.tsx
418
+
419
+
420
+ // src/useDetectVoiceSupport.ts
421
+
422
+ var useDetectVoiceSupport = () => {
423
+ const [isSupported] = _react.useState.call(void 0, () => {
424
+ if (typeof window === "undefined") return false;
425
+ const hasAPI = !!(window.SpeechRecognition || window.webkitSpeechRecognition);
426
+ const isSecure = window.isSecureContext;
427
+ return hasAPI && isSecure;
428
+ });
429
+ return isSupported;
430
+ };
431
+
432
+ // src/BasicVoiceToSpeechButton.tsx
433
+ var BasicVoiceToSpeechButton = ({ lang = "en", onDataReady, className, id, style, children, labels }) => {
434
+ const [isOpen, setIsOpen] = _react.useState.call(void 0, false);
435
+ const isSupported = useDetectVoiceSupport();
436
+ const handleData = (data) => {
437
+ setIsOpen(false);
438
+ if (typeof onDataReady === "function")
439
+ onDataReady(data);
440
+ };
441
+ const uiLabels = { ...DefaultVoiceToSpeechLabels, ...labels };
442
+ return /* @__PURE__ */ _react2.default.createElement(_react2.default.Fragment, null, /* @__PURE__ */ _react2.default.createElement(
443
+ "button",
444
+ {
445
+ onClick: () => setIsOpen(true),
446
+ disabled: !isSupported,
447
+ className,
448
+ id,
449
+ style,
450
+ "aria-label": uiLabels.recordButtonAria
451
+ },
452
+ children || "\u{1F3A4}"
453
+ ), isOpen && /* @__PURE__ */ _react2.default.createElement(
454
+ OverlayVoiceToSpeech,
455
+ {
456
+ language: lang,
457
+ labels,
458
+ onDataReady: handleData,
459
+ onClose: () => setIsOpen(false)
460
+ }
461
+ ));
462
+ };
463
+
464
+
465
+
466
+
467
+
468
+
469
+ exports.BasicVoiceToSpeechButton = BasicVoiceToSpeechButton; exports.DefaultVoiceToSpeechLabels = DefaultVoiceToSpeechLabels; exports.OverlayVoiceToSpeech = OverlayVoiceToSpeech; exports.OverlayVoiceToSpeechWithPunctuation = OverlayVoiceToSpeechWithPunctuation; exports.useDetectVoiceSupport = useDetectVoiceSupport;