react-native-davoice 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +319 -0
- package/TTSRNBridge.podspec +38 -0
- package/android/.gradle/8.9/checksums/checksums.lock +0 -0
- package/android/.gradle/8.9/dependencies-accessors/gc.properties +0 -0
- package/android/.gradle/8.9/fileChanges/last-build.bin +0 -0
- package/android/.gradle/8.9/fileHashes/fileHashes.lock +0 -0
- package/android/.gradle/8.9/gc.properties +0 -0
- package/android/.gradle/buildOutputCleanup/buildOutputCleanup.lock +0 -0
- package/android/.gradle/buildOutputCleanup/cache.properties +2 -0
- package/android/.gradle/vcs-1/gc.properties +0 -0
- package/android/build.gradle +47 -0
- package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.aar +0 -0
- package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.aar.md5 +1 -0
- package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.aar.sha1 +1 -0
- package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.pom +38 -0
- package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.pom.md5 +1 -0
- package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.pom.sha1 +1 -0
- package/android/settings.gradle +2 -0
- package/android/src/main/AndroidManifest.xml +14 -0
- package/android/src/main/java/com/davoice/rn/DaVoicePackage.java +29 -0
- package/android/src/main/java/com/davoice/stt/rn/STTModule.kt +208 -0
- package/android/src/main/java/com/davoice/tts/rn/DaVoiceTTSBridge.java +733 -0
- package/android/src/main/libs/MyLibrary-release.aar +0 -0
- package/app.plugin.js +60 -0
- package/ios/STTRNBridge/STTBridge.h +7 -0
- package/ios/STTRNBridge/STTBridge.m +130 -0
- package/ios/SpeechBridge/SpeechBridge.h +7 -0
- package/ios/SpeechBridge/SpeechBridge.m +761 -0
- package/ios/TTSRNBridge/DaVoiceTTSBridge.h +7 -0
- package/ios/TTSRNBridge/DaVoiceTTSBridge.m +177 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/Info.plist +44 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/DavoiceTTS +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Headers/DavoiceTTS-Swift.h +424 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Info.plist +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios.abi.json +13253 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios.private.swiftinterface +213 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios.swiftdoc +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios.swiftinterface +213 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/module.modulemap +4 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/DavoiceTTS +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Headers/DavoiceTTS-Swift.h +844 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Info.plist +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios-simulator.abi.json +13253 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios-simulator.private.swiftinterface +213 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios-simulator.swiftdoc +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios-simulator.swiftinterface +213 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/x86_64-apple-ios-simulator.abi.json +13253 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/x86_64-apple-ios-simulator.private.swiftinterface +213 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/x86_64-apple-ios-simulator.swiftdoc +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/x86_64-apple-ios-simulator.swiftinterface +213 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/module.modulemap +4 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeDirectory +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeRequirements +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeRequirements-1 +0 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeResources +282 -0
- package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeSignature +0 -0
- package/ios/TTSRNBridge/libphonemes.a +0 -0
- package/ios/TTSRNBridge/libucd.a +0 -0
- package/package.json +46 -0
- package/react-native.config.js +10 -0
- package/speech/index.ts +1055 -0
- package/stt/index.d.ts +54 -0
- package/stt/index.ts +222 -0
- package/tts/DaVoiceTTSBridge.d.ts +18 -0
- package/tts/DaVoiceTTSBridge.js +71 -0
- package/tts/index.d.ts +3 -0
- package/tts/index.js +4 -0
package/speech/index.ts
ADDED
|
@@ -0,0 +1,1055 @@
|
|
|
1
|
+
// speech/index.ts
|
|
2
|
+
import { NativeModules, NativeEventEmitter, DeviceEventEmitter, Platform } from 'react-native';
|
|
3
|
+
import resolveAssetSource from 'react-native/Libraries/Image/resolveAssetSource';
|
|
4
|
+
// ✅ Needed to get original extension ("dm") from require() assets in release.
|
|
5
|
+
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
6
|
+
const AssetRegistry = require('react-native/Libraries/Image/AssetRegistry');
|
|
7
|
+
|
|
8
|
+
// -------------------- VERBOSE LOGGING --------------------
|
|
9
|
+
const VERBOSE = true;
|
|
10
|
+
const PFX = '[SpeechJS]';
|
|
11
|
+
function ts() {
|
|
12
|
+
const d = new Date();
|
|
13
|
+
return `${d.toISOString()}`;
|
|
14
|
+
}
|
|
15
|
+
function dbg(...args: any[]) {
|
|
16
|
+
if (!VERBOSE) return;
|
|
17
|
+
// eslint-disable-next-line no-console
|
|
18
|
+
console.log(ts(), PFX, ...args);
|
|
19
|
+
}
|
|
20
|
+
function dbgErr(...args: any[]) {
|
|
21
|
+
// eslint-disable-next-line no-console
|
|
22
|
+
console.log(ts(), PFX, '❌', ...args);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
function safeJson(x: any) {
|
|
26
|
+
try {
|
|
27
|
+
return JSON.stringify(x);
|
|
28
|
+
} catch {
|
|
29
|
+
return String(x);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function sleep(ms: number) {
|
|
34
|
+
return new Promise<void>((r) => setTimeout(r, ms));
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// If you use typed-array -> base64, Buffer is convenient (works in RN)
|
|
38
|
+
let toBase64: (u8: Uint8Array) => string;
|
|
39
|
+
try {
|
|
40
|
+
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
41
|
+
const { Buffer } = require('buffer');
|
|
42
|
+
toBase64 = (u8) => Buffer.from(u8.buffer, u8.byteOffset, u8.byteLength).toString('base64');
|
|
43
|
+
} catch {
|
|
44
|
+
// very rare fallback
|
|
45
|
+
toBase64 = (u8) => globalThis.btoa(String.fromCharCode(...u8));
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// Native handles
|
|
49
|
+
const NativeSpeech = NativeModules.SpeechBridge; // iOS unified (if present)
|
|
50
|
+
const NativeSTT =
|
|
51
|
+
NativeModules.STT ||
|
|
52
|
+
NativeModules.RCTSTT ||
|
|
53
|
+
NativeModules.Voice ||
|
|
54
|
+
NativeModules.RCTVoice;
|
|
55
|
+
const NativeTTS = NativeModules.DaVoiceTTSBridge;
|
|
56
|
+
|
|
57
|
+
// Print what we actually have at module load
|
|
58
|
+
dbg('Platform=', Platform.OS);
|
|
59
|
+
dbg('NativeSpeech (SpeechBridge) exists?', !!NativeSpeech, 'keys=', NativeSpeech ? Object.keys(NativeSpeech) : null);
|
|
60
|
+
dbg('NativeSTT exists?', !!NativeSTT, 'keys=', NativeSTT ? Object.keys(NativeSTT) : null);
|
|
61
|
+
dbg('NativeTTS (DaVoiceTTSBridge) exists?', !!NativeTTS, 'keys=', NativeTTS ? Object.keys(NativeTTS) : null);
|
|
62
|
+
|
|
63
|
+
// ---- Types ----
|
|
64
|
+
export type SpeechStartEvent = {};
|
|
65
|
+
export type SpeechEndEvent = {};
|
|
66
|
+
export type SpeechRecognizedEvent = { isFinal: boolean };
|
|
67
|
+
export type SpeechErrorEvent = { error: { code?: string; message?: string } };
|
|
68
|
+
export type SpeechResultsEvent = { value: string[] };
|
|
69
|
+
export type SpeechVolumeChangeEvent = { value: number };
|
|
70
|
+
export type NewSpeechWAVEvent = { path: string };
|
|
71
|
+
|
|
72
|
+
// Allow passing require() asset, uri string, etc.
|
|
73
|
+
export type RNAssetLike = any; // keep permissive (Metro asset numbers/objects vary)
|
|
74
|
+
export type ModelRef = string | RNAssetLike;
|
|
75
|
+
|
|
76
|
+
export type UnifiedEvents = {
|
|
77
|
+
// STT
|
|
78
|
+
onSpeechStart?: (e: SpeechStartEvent) => void;
|
|
79
|
+
onSpeechRecognized?: (e: SpeechRecognizedEvent) => void;
|
|
80
|
+
onSpeechEnd?: (e: SpeechEndEvent) => void;
|
|
81
|
+
onSpeechError?: (e: SpeechErrorEvent) => void;
|
|
82
|
+
onSpeechResults?: (e: SpeechResultsEvent) => void;
|
|
83
|
+
onSpeechPartialResults?: (e: SpeechResultsEvent) => void;
|
|
84
|
+
onSpeechVolumeChanged?: (e: SpeechVolumeChangeEvent) => void;
|
|
85
|
+
/** Android-only: emitted when native MIC+VAD saves a full utterance WAV */
|
|
86
|
+
onNewSpeechWAV?: (e: NewSpeechWAVEvent) => void;
|
|
87
|
+
// TTS
|
|
88
|
+
onFinishedSpeaking?: () => void;
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
type NativeEventName =
|
|
92
|
+
| 'onSpeechStart'
|
|
93
|
+
| 'onSpeechRecognized'
|
|
94
|
+
| 'onSpeechEnd'
|
|
95
|
+
| 'onSpeechError'
|
|
96
|
+
| 'onSpeechResults'
|
|
97
|
+
| 'onSpeechPartialResults'
|
|
98
|
+
| 'onSpeechVolumeChanged'
|
|
99
|
+
| 'onNewSpeechWAV'
|
|
100
|
+
| 'onFinishedSpeaking';
|
|
101
|
+
|
|
102
|
+
// --- NEW: descriptor for external PCM payloads ---
|
|
103
|
+
export type ExternalPCM = {
|
|
104
|
+
/** base64 of raw PCM payload */
|
|
105
|
+
base64: string;
|
|
106
|
+
/** sample rate of payload (e.g., 16000, 22050, 24000, 44100, 48000) */
|
|
107
|
+
sampleRate: number;
|
|
108
|
+
/** number of channels in payload (default 1) */
|
|
109
|
+
channels?: number;
|
|
110
|
+
/** whether payload is interleaved (default true) */
|
|
111
|
+
interleaved?: boolean;
|
|
112
|
+
/** 'i16' for 16-bit signed integer, 'f32' for 32-bit float */
|
|
113
|
+
format: 'i16' | 'f32';
|
|
114
|
+
/** whether this item should trigger onFinishedSpeaking when done (default true) */
|
|
115
|
+
markAsLast?: boolean;
|
|
116
|
+
};
|
|
117
|
+
|
|
118
|
+
class Speech {
|
|
119
|
+
// ---- MIN: serialize TTS + wait-for-finished ----
|
|
120
|
+
private ttsChain: Promise<void> = Promise.resolve();
|
|
121
|
+
private ttsPendingResolve: (() => void) | null = null;
|
|
122
|
+
private ttsPendingTimeout: any = null;
|
|
123
|
+
|
|
124
|
+
private dbgAsset(label: string, x: any) {
|
|
125
|
+
try {
|
|
126
|
+
const a = resolveAssetSource(x);
|
|
127
|
+
dbg(`[ASSETDBG ${label}] inputType=${typeof x} input=`, x);
|
|
128
|
+
dbg(`[ASSETDBG ${label}] resolved keys=`, a ? Object.keys(a) : null);
|
|
129
|
+
dbg(`[ASSETDBG ${label}] resolved=`, a);
|
|
130
|
+
dbg(`[ASSETDBG ${label}] resolved.uri=`, a?.uri);
|
|
131
|
+
// These often exist in dev:
|
|
132
|
+
dbg(`[ASSETDBG ${label}] httpServerLocation=`, (a as any)?.httpServerLocation);
|
|
133
|
+
dbg(`[ASSETDBG ${label}] fileSystemLocation=`, (a as any)?.fileSystemLocation);
|
|
134
|
+
} catch (e) {
|
|
135
|
+
dbgErr(`[ASSETDBG ${label}] resolveAssetSource threw`, String(e));
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
private _onNativeFinishedSpeaking() {
|
|
141
|
+
dbg('[EVENT onFinishedSpeaking]');
|
|
142
|
+
// 1) let app callback run
|
|
143
|
+
try { this.handlers.onFinishedSpeaking(); } catch (e) { dbgErr('onFinishedSpeaking handler error', String(e)); }
|
|
144
|
+
|
|
145
|
+
// 2) resolve the internal await (if any)
|
|
146
|
+
if (this.ttsPendingTimeout) { clearTimeout(this.ttsPendingTimeout); this.ttsPendingTimeout = null; }
|
|
147
|
+
const r = this.ttsPendingResolve;
|
|
148
|
+
this.ttsPendingResolve = null;
|
|
149
|
+
if (r) r();
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
private _nativeSpeak(text: string, speakerId: number, s: number) {
|
|
153
|
+
if (Platform.OS === 'ios' && NativeSpeech?.speak) return (NativeSpeech as any).speak(text, speakerId, s);
|
|
154
|
+
if (!NativeTTS?.speak) throw new Error('TTS speak not available');
|
|
155
|
+
return (NativeTTS as any).speak(text, speakerId, s);
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
private _speakAndWait(text: string, speakerId: number, s: number, timeoutMs = 600000) {
|
|
159
|
+
return new Promise<void>((resolve, reject) => {
|
|
160
|
+
this.ttsPendingResolve = resolve;
|
|
161
|
+
// safety: never hang forever
|
|
162
|
+
this.ttsPendingTimeout = setTimeout(() => {
|
|
163
|
+
dbg('Timeout waiting for onFinishedSpeaking — releasing wait');
|
|
164
|
+
if (this.ttsPendingTimeout) { clearTimeout(this.ttsPendingTimeout); this.ttsPendingTimeout = null; }
|
|
165
|
+
const r = this.ttsPendingResolve;
|
|
166
|
+
this.ttsPendingResolve = null;
|
|
167
|
+
if (r) r(); // ✅ accept after timeout
|
|
168
|
+
}, timeoutMs);
|
|
169
|
+
try {
|
|
170
|
+
this._nativeSpeak(text, speakerId, s);
|
|
171
|
+
} catch (e) {
|
|
172
|
+
if (this.ttsPendingTimeout) { clearTimeout(this.ttsPendingTimeout); this.ttsPendingTimeout = null; }
|
|
173
|
+
this.ttsPendingResolve = null;
|
|
174
|
+
reject(e as any);
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
private sttEmitter: NativeEventEmitter | null = null;
|
|
180
|
+
private ttsEmitter: NativeEventEmitter | typeof DeviceEventEmitter | null = null;
|
|
181
|
+
private unifiedEmitter: NativeEventEmitter | null = null;
|
|
182
|
+
private subs: Array<{ remove: () => void }> = [];
|
|
183
|
+
private handlers: Required<UnifiedEvents>;
|
|
184
|
+
// top of file (new state)
|
|
185
|
+
private lastLocale: string | null = null;
|
|
186
|
+
private lastModel: string | null = null;
|
|
187
|
+
private iosTtsOnly = false; // when true, use NativeTTS directly on iOS
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
private logCall(name: string, payload?: any) {
|
|
191
|
+
dbg(`[CALL ${name}]`, payload !== undefined ? safeJson(payload) : '');
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
// ✅ NEW: best-effort extract extension for require() assets (e.g. ".dm")
|
|
196
|
+
private resolveModelExt(model: ModelRef): string | undefined {
|
|
197
|
+
try {
|
|
198
|
+
// Metro assets are usually numeric IDs on native platforms
|
|
199
|
+
if (typeof model === 'number' && AssetRegistry?.getAssetByID) {
|
|
200
|
+
const a = AssetRegistry.getAssetByID(model);
|
|
201
|
+
const t = a?.type;
|
|
202
|
+
if (typeof t === 'string' && t.length) return t;
|
|
203
|
+
}
|
|
204
|
+
} catch {}
|
|
205
|
+
try {
|
|
206
|
+
const uri = resolveAssetSource(model as any)?.uri;
|
|
207
|
+
if (typeof uri === 'string') {
|
|
208
|
+
const m = uri.match(/\.([a-zA-Z0-9]+)(?:\?|#|$)/);
|
|
209
|
+
if (m && m[1]) return m[1].toLowerCase();
|
|
210
|
+
}
|
|
211
|
+
} catch {}
|
|
212
|
+
if (typeof model === 'string') {
|
|
213
|
+
const m = model.match(/\.([a-zA-Z0-9]+)(?:\?|#|$)/);
|
|
214
|
+
if (m && m[1]) return m[1].toLowerCase();
|
|
215
|
+
}
|
|
216
|
+
return undefined;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// ✅ NEW: resolve require() assets to a usable URI/path string
|
|
220
|
+
private resolveModelToPath(model: ModelRef): string {
|
|
221
|
+
// ✅ Backward compatible: plain strings are passed through unchanged
|
|
222
|
+
if (typeof model === 'string') return model;
|
|
223
|
+
|
|
224
|
+
try {
|
|
225
|
+
const asset = resolveAssetSource(model);
|
|
226
|
+
dbg('[resolveModelToPath] resolveAssetSource ->', asset);
|
|
227
|
+
|
|
228
|
+
const uri = asset?.uri;
|
|
229
|
+
if (uri) return String(uri);
|
|
230
|
+
} catch {
|
|
231
|
+
// ignore and fall through
|
|
232
|
+
}
|
|
233
|
+
return typeof model === 'string' ? model : String(model);
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
constructor() {
|
|
237
|
+
this.handlers = {
|
|
238
|
+
onSpeechStart: () => {},
|
|
239
|
+
onSpeechRecognized: () => {},
|
|
240
|
+
onSpeechEnd: () => {},
|
|
241
|
+
onSpeechError: () => {},
|
|
242
|
+
onSpeechResults: () => {},
|
|
243
|
+
onSpeechPartialResults: () => {},
|
|
244
|
+
onSpeechVolumeChanged: () => {},
|
|
245
|
+
onNewSpeechWAV: () => {},
|
|
246
|
+
onFinishedSpeaking: () => {},
|
|
247
|
+
};
|
|
248
|
+
|
|
249
|
+
// Emitters per-platform
|
|
250
|
+
if (Platform.OS !== 'web') {
|
|
251
|
+
if (Platform.OS === 'ios' && NativeSpeech) {
|
|
252
|
+
this.unifiedEmitter = new NativeEventEmitter(NativeSpeech);
|
|
253
|
+
dbg('[constructor] iOS unifiedEmitter created');
|
|
254
|
+
} else {
|
|
255
|
+
// Android (and iOS fallback): separate modules
|
|
256
|
+
if (NativeSTT) {
|
|
257
|
+
this.sttEmitter = new NativeEventEmitter(NativeSTT);
|
|
258
|
+
dbg('[constructor] sttEmitter created');
|
|
259
|
+
}
|
|
260
|
+
// ANDROID: Native module emits through DeviceEventEmitter
|
|
261
|
+
if (Platform.OS === 'android') {
|
|
262
|
+
this.ttsEmitter = DeviceEventEmitter;
|
|
263
|
+
dbg('[constructor] android ttsEmitter=DeviceEventEmitter');
|
|
264
|
+
} else {
|
|
265
|
+
// non-unified iOS fallback (if ever used)
|
|
266
|
+
if (NativeTTS) {
|
|
267
|
+
this.ttsEmitter = new NativeEventEmitter(NativeTTS);
|
|
268
|
+
dbg('[constructor] iOS fallback ttsEmitter created');
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
// NEW: tiny helper to (re)wire listeners depending on mode
|
|
276
|
+
private rewireListenersForMode() {
|
|
277
|
+
this.teardownListeners();
|
|
278
|
+
// if iOS unified + NOT tts-only -> use unified emitter
|
|
279
|
+
if (Platform.OS === 'ios' && NativeSpeech && !this.iosTtsOnly) {
|
|
280
|
+
this.unifiedEmitter = new NativeEventEmitter(NativeSpeech);
|
|
281
|
+
// unified handles both STT + TTS events
|
|
282
|
+
} else {
|
|
283
|
+
// fallback: separate emitters
|
|
284
|
+
if (NativeSTT) this.sttEmitter = new NativeEventEmitter(NativeSTT);
|
|
285
|
+
if (Platform.OS === 'android') this.ttsEmitter = DeviceEventEmitter;
|
|
286
|
+
else if (NativeTTS) this.ttsEmitter = new NativeEventEmitter(NativeTTS);
|
|
287
|
+
}
|
|
288
|
+
this.ensureListeners();
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// ---------- Init / Destroy ----------
|
|
292
|
+
/**
|
|
293
|
+
* ANDROID ONLY: Initialize remote capture (MIC + VAD) that saves utterances to WAV
|
|
294
|
+
* and emits 'onNewSpeechWAV' with { path }. No-op on iOS (throws).
|
|
295
|
+
*/
|
|
296
|
+
async initAllRemoteSTT(modelOrOpts: ModelRef | { model: ModelRef; onboardingJsonPath?: string }): Promise<void> {
|
|
297
|
+
if (Platform.OS !== 'android') {
|
|
298
|
+
throw new Error('initAllRemoteSTT is Android-only.');
|
|
299
|
+
}
|
|
300
|
+
if (!NativeSTT?.startRemoteSpeech) {
|
|
301
|
+
throw new Error('Native STT module missing startRemoteSpeech()');
|
|
302
|
+
}
|
|
303
|
+
this.ensureListeners();
|
|
304
|
+
const cfg =
|
|
305
|
+
modelOrOpts && typeof modelOrOpts === 'object' && 'model' in (modelOrOpts as any)
|
|
306
|
+
? (modelOrOpts as any)
|
|
307
|
+
: { model: modelOrOpts as ModelRef };
|
|
308
|
+
const modelPath = this.resolveModelToPath(cfg.model);
|
|
309
|
+
|
|
310
|
+
await new Promise<void>((resolve, reject) => {
|
|
311
|
+
try {
|
|
312
|
+
NativeSTT.startRemoteSpeech((err: string) => (err ? reject(new Error(err)) : resolve()));
|
|
313
|
+
} catch (e) {
|
|
314
|
+
reject(e as any);
|
|
315
|
+
}
|
|
316
|
+
});
|
|
317
|
+
if (!NativeSTT || !NativeTTS) {
|
|
318
|
+
throw new Error('Missing native bridges (STT/TTS).');
|
|
319
|
+
}
|
|
320
|
+
// Init TTS
|
|
321
|
+
const modelExt = this.resolveModelExt(cfg.model);
|
|
322
|
+
console.log('[MODELDBG] initAllRemoteSTT.modelPath (resolved)=', modelPath);
|
|
323
|
+
console.log('[MODELDBG] initAllRemoteSTT.modelExt (resolved)=', modelExt);
|
|
324
|
+
await NativeTTS.initTTS({ model: modelPath, modelExt });
|
|
325
|
+
await sleep(500);
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
/**
|
|
329
|
+
* ANDROID ONLY: Start remote STT and initialize TTS in playback-only mode (no model/ONNX init).
|
|
330
|
+
* Accepts the same argument shape as initAllRemoteSTT() for API compatibility.
|
|
331
|
+
* model/onboardingJsonPath are intentionally ignored on Android for now.
|
|
332
|
+
*/
|
|
333
|
+
async initAllRemoteSTTAndTTS(
|
|
334
|
+
_modelOrOpts?: ModelRef | { model: ModelRef; onboardingJsonPath?: string },
|
|
335
|
+
): Promise<void> {
|
|
336
|
+
if (Platform.OS !== 'android') {
|
|
337
|
+
throw new Error('initAllRemoteSTTAndTTS is Android-only.');
|
|
338
|
+
}
|
|
339
|
+
if (!NativeSTT?.startRemoteSpeech) {
|
|
340
|
+
throw new Error('Native STT module missing startRemoteSpeech()');
|
|
341
|
+
}
|
|
342
|
+
this.ensureListeners();
|
|
343
|
+
|
|
344
|
+
await new Promise<void>((resolve, reject) => {
|
|
345
|
+
try {
|
|
346
|
+
NativeSTT.startRemoteSpeech((err: string) => (err ? reject(new Error(err)) : resolve()));
|
|
347
|
+
} catch (e) {
|
|
348
|
+
reject(e as any);
|
|
349
|
+
}
|
|
350
|
+
});
|
|
351
|
+
|
|
352
|
+
if (!NativeTTS?.initTTSPlaybackOnly) {
|
|
353
|
+
throw new Error('Native TTS module missing initTTSPlaybackOnly()');
|
|
354
|
+
}
|
|
355
|
+
await NativeTTS.initTTSPlaybackOnly();
|
|
356
|
+
await sleep(500);
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
dbgModel(label: string, model: any) {
|
|
360
|
+
try {
|
|
361
|
+
console.log(`[MODELDBG] ${label} typeof=`, typeof model, ' value=', model);
|
|
362
|
+
try {
|
|
363
|
+
const asset = resolveAssetSource(model);
|
|
364
|
+
console.log(`[MODELDBG] ${label} resolveAssetSource=`, asset);
|
|
365
|
+
if (asset?.uri) console.log(`[MODELDBG] ${label} asset.uri=`, asset.uri);
|
|
366
|
+
} catch (e) {
|
|
367
|
+
console.log(`[MODELDBG] ${label} resolveAssetSource threw:`, String(e));
|
|
368
|
+
}
|
|
369
|
+
} catch {}
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// ---------- Init / Destroy ----------
|
|
373
|
+
/**
|
|
374
|
+
* iOS: initialize STT then TTS via native SpeechBridge if available.
|
|
375
|
+
* Android: no special init needed; optionally preload TTS (if you want).
|
|
376
|
+
*/
|
|
377
|
+
async initAll(opts: { locale: string; model: ModelRef; timeoutMs?: number; onboardingJsonPath?: string }) {
|
|
378
|
+
this.dbgModel('initAll.opts.model (raw)', opts.model);
|
|
379
|
+
this.dbgAsset('initAll.model', opts.model);
|
|
380
|
+
this._cancelTtsWait('initAll');
|
|
381
|
+
this.ttsChain = Promise.resolve();
|
|
382
|
+
this.wavChain = Promise.resolve();
|
|
383
|
+
|
|
384
|
+
const modelPath = this.resolveModelToPath(opts.model);
|
|
385
|
+
console.log('[MODELDBG] initAll.modelPath (resolved)=', modelPath);
|
|
386
|
+
|
|
387
|
+
this.lastLocale = opts.locale;
|
|
388
|
+
this.lastModel = modelPath;
|
|
389
|
+
|
|
390
|
+
if (Platform.OS === 'ios' && NativeSpeech?.initAll) {
|
|
391
|
+
this.iosTtsOnly = false; // full unified mode
|
|
392
|
+
this.teardownListeners(); // re-wire listeners for unified
|
|
393
|
+
const r = await NativeSpeech.initAll({ ...opts, model: modelPath });
|
|
394
|
+
this.ensureListeners();
|
|
395
|
+
return r;
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
// Fallback (Android or iOS w/o SpeechBridge):
|
|
399
|
+
// 1) Start STT (engine hot will happen internally); 2) init TTS.
|
|
400
|
+
if (!NativeSTT || !NativeTTS) {
|
|
401
|
+
throw new Error('Missing native bridges (STT/TTS).');
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
// Start STT (best-effort; no-op if already running)
|
|
405
|
+
await new Promise<void>((resolve, reject) => {
|
|
406
|
+
try {
|
|
407
|
+
// iOS fallback signature: (locale, cb)
|
|
408
|
+
// Android signature: (locale, extras, cb)
|
|
409
|
+
if (Platform.OS === 'android') {
|
|
410
|
+
// Always try Android 3-arg signature first, then fall back
|
|
411
|
+
try {
|
|
412
|
+
NativeSTT.startSpeech(
|
|
413
|
+
opts.locale,
|
|
414
|
+
{
|
|
415
|
+
EXTRA_LANGUAGE_MODEL: 'LANGUAGE_MODEL_FREE_FORM',
|
|
416
|
+
EXTRA_MAX_RESULTS: 5,
|
|
417
|
+
EXTRA_PARTIAL_RESULTS: true,
|
|
418
|
+
REQUEST_PERMISSIONS_AUTO: true,
|
|
419
|
+
onboardingJsonPath: opts.onboardingJsonPath ?? null,
|
|
420
|
+
},
|
|
421
|
+
(err: string) => (err ? reject(new Error(err)) : resolve())
|
|
422
|
+
);
|
|
423
|
+
} catch {
|
|
424
|
+
// Fallback to 2-arg (some RN voice bridges use this)
|
|
425
|
+
NativeSTT.startSpeech(opts.locale, (err: string) =>
|
|
426
|
+
err ? reject(new Error(err)) : resolve()
|
|
427
|
+
);
|
|
428
|
+
}
|
|
429
|
+
} else {
|
|
430
|
+
NativeSTT.startSpeech(opts.locale, (err: string) =>
|
|
431
|
+
err ? reject(new Error(err)) : resolve(),
|
|
432
|
+
);
|
|
433
|
+
}
|
|
434
|
+
} catch (e) {
|
|
435
|
+
reject(e as any);
|
|
436
|
+
}
|
|
437
|
+
});
|
|
438
|
+
|
|
439
|
+
// Init TTS
|
|
440
|
+
const modelExt = this.resolveModelExt(opts.model);
|
|
441
|
+
console.log('[MODELDBG] initAll.modelExt (resolved)=', modelExt);
|
|
442
|
+
await NativeTTS.initTTS({ model: modelPath, modelExt });
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
async destroyAll() {
|
|
446
|
+
this._cancelTtsWait('destroyAll');
|
|
447
|
+
this.ttsChain = Promise.resolve();
|
|
448
|
+
this.wavChain = Promise.resolve();
|
|
449
|
+
|
|
450
|
+
// iOS unified
|
|
451
|
+
if (Platform.OS === 'ios' && NativeSpeech?.destroyAll) {
|
|
452
|
+
const r = await NativeSpeech.destroyAll();
|
|
453
|
+
this.iosTtsOnly = false;
|
|
454
|
+
this.lastLocale = this.lastLocale ?? null;
|
|
455
|
+
this.teardownListeners();
|
|
456
|
+
return r;
|
|
457
|
+
}
|
|
458
|
+
// Fallback: destroy TTS -> STT
|
|
459
|
+
try { await NativeTTS?.destroy?.(); } catch {}
|
|
460
|
+
try {
|
|
461
|
+
await new Promise<void>((res) => {
|
|
462
|
+
if (!NativeSTT?.destroySpeech) return res();
|
|
463
|
+
NativeSTT.destroySpeech(() => res());
|
|
464
|
+
});
|
|
465
|
+
} catch {}
|
|
466
|
+
this.teardownListeners();
|
|
467
|
+
return 'Destroyed';
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
// ---------- STT ----------
|
|
471
|
+
async start(locale: string, options: Record<string, any> = {}) {
|
|
472
|
+
this.ensureListeners();
|
|
473
|
+
// Prefer unified on iOS
|
|
474
|
+
if (Platform.OS === 'ios' && NativeSpeech?.startSpeech) {
|
|
475
|
+
return new Promise<void>((resolve) => NativeSpeech.startSpeech(locale, () => resolve()));
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
// Android + iOS fallback
|
|
479
|
+
return new Promise<void>((resolve, reject) => {
|
|
480
|
+
if (!NativeSTT?.startSpeech) return reject(new Error('startSpeech not available'));
|
|
481
|
+
|
|
482
|
+
if (Platform.OS === 'android') {
|
|
483
|
+
try {
|
|
484
|
+
NativeSTT.startSpeech(
|
|
485
|
+
locale,
|
|
486
|
+
{
|
|
487
|
+
EXTRA_LANGUAGE_MODEL: 'LANGUAGE_MODEL_FREE_FORM',
|
|
488
|
+
EXTRA_MAX_RESULTS: 5,
|
|
489
|
+
EXTRA_PARTIAL_RESULTS: true,
|
|
490
|
+
REQUEST_PERMISSIONS_AUTO: true,
|
|
491
|
+
...options,
|
|
492
|
+
},
|
|
493
|
+
(err: string) => (err ? reject(new Error(err)) : resolve())
|
|
494
|
+
);
|
|
495
|
+
} catch {
|
|
496
|
+
// Fallback to 2-arg
|
|
497
|
+
NativeSTT.startSpeech(locale, (err: string) =>
|
|
498
|
+
err ? reject(new Error(err)) : resolve()
|
|
499
|
+
);
|
|
500
|
+
}
|
|
501
|
+
} else {
|
|
502
|
+
NativeSTT.startSpeech(locale, (err: string) =>
|
|
503
|
+
err ? reject(new Error(err)) : resolve(),
|
|
504
|
+
);
|
|
505
|
+
}
|
|
506
|
+
});
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
async startWithSVOnboardingJson(locale: string, onboardingJsonPath: string): Promise<void> {
|
|
510
|
+
this.ensureListeners();
|
|
511
|
+
if (Platform.OS === 'ios' && NativeSpeech?.startSpeechWithSVOnboardingJson) {
|
|
512
|
+
return new Promise<void>((resolve) =>
|
|
513
|
+
NativeSpeech.startSpeechWithSVOnboardingJson(locale, onboardingJsonPath, () => resolve()),
|
|
514
|
+
);
|
|
515
|
+
}
|
|
516
|
+
return this.start(locale);
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
pauseSpeechRecognition(): Promise<void> {
|
|
520
|
+
this.logCall('pauseSpeechRecognitionLite');
|
|
521
|
+
|
|
522
|
+
const mod: any = Platform.OS === 'ios' ? NativeSpeech : NativeSTT;
|
|
523
|
+
const fn = mod?.pauseSpeechRecognitionLite;
|
|
524
|
+
|
|
525
|
+
if (!fn) {
|
|
526
|
+
dbg(`pauseSpeechRecognitionLite not available on ${Platform.OS === 'ios' ? 'NativeSpeech' : 'NativeSTT'}`);
|
|
527
|
+
return Promise.resolve();
|
|
528
|
+
}
|
|
529
|
+
if (Platform.OS === 'ios') {
|
|
530
|
+
return new Promise((resolve, reject) => {
|
|
531
|
+
try {
|
|
532
|
+
fn.call(mod, (ok: boolean) => {
|
|
533
|
+
if (!ok) dbgErr('pauseSpeechRecognitionLite returned false');
|
|
534
|
+
resolve();
|
|
535
|
+
});
|
|
536
|
+
} catch (e) {
|
|
537
|
+
reject(e as any);
|
|
538
|
+
}
|
|
539
|
+
});
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
return new Promise<void>((resolve, reject) => {
|
|
543
|
+
try {
|
|
544
|
+
fn.call(mod, async (ok: boolean) => {
|
|
545
|
+
if (!ok) dbgErr('pauseSpeechRecognitionLite returned false');
|
|
546
|
+
// ✅ ANDROID: small delay before resolving
|
|
547
|
+
if (Platform.OS === 'android') await sleep(500);
|
|
548
|
+
resolve();
|
|
549
|
+
});
|
|
550
|
+
} catch (e) {
|
|
551
|
+
reject(e as any);
|
|
552
|
+
}
|
|
553
|
+
});
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
unPauseSpeechRecognition(times: number): Promise<void> {
|
|
557
|
+
this.logCall('unPauseSpeechRecognitionLite', { times });
|
|
558
|
+
|
|
559
|
+
const mod: any = Platform.OS === 'ios' ? NativeSpeech : NativeSTT;
|
|
560
|
+
const fn = mod?.unPauseSpeechRecognitionLite;
|
|
561
|
+
|
|
562
|
+
if (!fn) {
|
|
563
|
+
dbg(`unPauseSpeechRecognitionLite(times) not available on ${Platform.OS === 'ios' ? 'NativeSpeech' : 'NativeSTT'}`);
|
|
564
|
+
return Promise.resolve();
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
if (Platform.OS === 'ios') {
|
|
568
|
+
return new Promise((resolve, reject) => {
|
|
569
|
+
try {
|
|
570
|
+
fn.call(mod, times, (ok: boolean) => {
|
|
571
|
+
if (!ok) dbgErr('unPauseSpeechRecognitionLite(times) returned false');
|
|
572
|
+
resolve();
|
|
573
|
+
});
|
|
574
|
+
} catch (e) {
|
|
575
|
+
reject(e as any);
|
|
576
|
+
}
|
|
577
|
+
});
|
|
578
|
+
}
|
|
579
|
+
return new Promise<void>((resolve, reject) => {
|
|
580
|
+
try {
|
|
581
|
+
fn.call(mod, times, async (ok: boolean) => {
|
|
582
|
+
if (!ok) dbgErr('unPauseSpeechRecognitionLite(times) returned false');
|
|
583
|
+
// ✅ ANDROID: small delay before resolving
|
|
584
|
+
if (Platform.OS === 'android') await sleep(500);
|
|
585
|
+
resolve();
|
|
586
|
+
});
|
|
587
|
+
} catch (e) {
|
|
588
|
+
reject(e as any);
|
|
589
|
+
}
|
|
590
|
+
});
|
|
591
|
+
}
|
|
592
|
+
/** Pause mic/STT (Android native; iOS unified if present) */
|
|
593
|
+
async pauseMicrophone(): Promise<void> {
|
|
594
|
+
console.log('[pauseMicrophone] called');
|
|
595
|
+
this.logCall('pauseMicrophone');
|
|
596
|
+
// iOS: prefer async first, fallback to callback if missing
|
|
597
|
+
if (Platform.OS === 'ios' && (NativeSpeech as any)?.pauseMicrophoneAsync) {
|
|
598
|
+
dbg('IOS [pauseMicrophone] using NativeSpeech.pauseMicrophoneAsync()');
|
|
599
|
+
try {
|
|
600
|
+
const r = await (NativeSpeech as any).pauseMicrophoneAsync(1000);
|
|
601
|
+
dbg('pauseMicrophoneAsync result', r);
|
|
602
|
+
if (r?.ok === false) dbgErr('pauseMicrophoneAsync failed', r?.reason);
|
|
603
|
+
return;
|
|
604
|
+
} catch (e) {
|
|
605
|
+
dbgErr('IOS [pauseMicrophone] NativeSpeech.pauseMicrophoneAsync() ERROR:', String(e));
|
|
606
|
+
throw e;
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
if (Platform.OS === 'ios' && NativeSpeech?.pauseMicrophone) {
|
|
610
|
+
console.log('IOS [pauseMicrophone] called');
|
|
611
|
+
return new Promise((resolve, reject) => {
|
|
612
|
+
try { (NativeSpeech as any).pauseMicrophone(() => resolve()); }
|
|
613
|
+
catch (e) { reject(e as any); }
|
|
614
|
+
});
|
|
615
|
+
}
|
|
616
|
+
if (!(NativeSTT as any)?.pauseMicrophone) return Promise.resolve();
|
|
617
|
+
return new Promise((resolve, reject) => {
|
|
618
|
+
try { (NativeSTT as any).pauseMicrophone(() => resolve()); }
|
|
619
|
+
catch (e) { reject(e as any); }
|
|
620
|
+
});
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
/** Resume mic/STT (Android native; iOS unified if present) */
|
|
624
|
+
async unPauseMicrophone(): Promise<void> {
|
|
625
|
+
this.logCall('unPauseMicrophone');
|
|
626
|
+
// iOS: prefer async first, fallback to callback if missing
|
|
627
|
+
if (Platform.OS === 'ios' && (NativeSpeech as any)?.unPauseMicrophoneAsync) {
|
|
628
|
+
dbg('IOS [unPauseMicrophone] using NativeSpeech.unPauseMicrophoneAsync()');
|
|
629
|
+
try {
|
|
630
|
+
const r = await (NativeSpeech as any).unPauseMicrophoneAsync(1000);
|
|
631
|
+
if (r?.ok === false) dbgErr('unPauseMicrophone failed', r?.reason);
|
|
632
|
+
dbg('IOS [unPauseMicrophone] NativeSpeech.unPauseMicrophoneAsync() DONE');
|
|
633
|
+
return;
|
|
634
|
+
} catch (e) {
|
|
635
|
+
dbgErr('IOS [unPauseMicrophone] NativeSpeech.unPauseMicrophoneAsync() ERROR:', String(e));
|
|
636
|
+
throw e;
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
if (Platform.OS === 'ios' && NativeSpeech?.unPauseMicrophone) {
|
|
641
|
+
console.log('IOS [unPauseMicrophone] called');
|
|
642
|
+
return new Promise((resolve, reject) => {
|
|
643
|
+
try { (NativeSpeech as any).unPauseMicrophone(() => resolve()); }
|
|
644
|
+
catch (e) { reject(e as any); }
|
|
645
|
+
});
|
|
646
|
+
}
|
|
647
|
+
if (Platform.OS === 'ios')
|
|
648
|
+
console.log('IOS [unPauseMicrophone] called without native support');
|
|
649
|
+
if (!(NativeSTT as any)?.unPauseMicrophone) return Promise.resolve();
|
|
650
|
+
return new Promise((resolve, reject) => {
|
|
651
|
+
try { (NativeSTT as any).unPauseMicrophone(() => resolve()); }
|
|
652
|
+
catch (e) { reject(e as any); }
|
|
653
|
+
});
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
stop(): Promise<void> {
|
|
657
|
+
if (Platform.OS === 'ios' && NativeSpeech?.stopSpeech) {
|
|
658
|
+
return new Promise((res) => NativeSpeech.stopSpeech(() => res()));
|
|
659
|
+
}
|
|
660
|
+
if (!NativeSTT?.stopSpeech) return Promise.resolve();
|
|
661
|
+
return new Promise((res) => NativeSTT.stopSpeech(() => res()));
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
cancel(): Promise<void> {
|
|
665
|
+
if (Platform.OS === 'ios' && NativeSpeech?.cancelSpeech) {
|
|
666
|
+
return new Promise((res) => NativeSpeech.cancelSpeech(() => res()));
|
|
667
|
+
}
|
|
668
|
+
if (!NativeSTT?.cancelSpeech) return Promise.resolve();
|
|
669
|
+
return new Promise((res) => NativeSTT.cancelSpeech(() => res()));
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
isAvailable(): Promise<0 | 1> {
|
|
673
|
+
// Prefer unified
|
|
674
|
+
if (Platform.OS === 'ios' && NativeSpeech?.isSpeechAvailable) {
|
|
675
|
+
return new Promise((resolve, reject) =>
|
|
676
|
+
NativeSpeech.isSpeechAvailable((ok: 0 | 1, err: string) =>
|
|
677
|
+
err ? reject(new Error(err)) : resolve(ok),
|
|
678
|
+
),
|
|
679
|
+
);
|
|
680
|
+
}
|
|
681
|
+
if (NativeSTT?.isSpeechAvailable) {
|
|
682
|
+
return new Promise((resolve) =>
|
|
683
|
+
NativeSTT.isSpeechAvailable((ok: 0 | 1) => resolve(ok)),
|
|
684
|
+
);
|
|
685
|
+
}
|
|
686
|
+
return Promise.resolve(1);
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
isRecognizing(): Promise<0 | 1> {
|
|
690
|
+
if (Platform.OS === 'ios' && NativeSpeech?.isRecognizing) {
|
|
691
|
+
return new Promise((resolve) =>
|
|
692
|
+
NativeSpeech.isRecognizing((v: 0 | 1) => resolve(v)),
|
|
693
|
+
);
|
|
694
|
+
}
|
|
695
|
+
if (NativeSTT?.isRecognizing) {
|
|
696
|
+
return new Promise((resolve) =>
|
|
697
|
+
NativeSTT.isRecognizing((v: 0 | 1) => resolve(v)),
|
|
698
|
+
);
|
|
699
|
+
}
|
|
700
|
+
return Promise.resolve(0);
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
async hasIOSMicPermissions(): Promise<boolean> {
|
|
704
|
+
if (Platform.OS !== 'ios') return false;
|
|
705
|
+
if (!NativeSpeech?.hasMicPermissions) return false;
|
|
706
|
+
return !!(await NativeSpeech.hasMicPermissions());
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
async requestIOSMicPermissions(wait_timeout: number): Promise<boolean> {
|
|
710
|
+
if (Platform.OS !== 'ios') return false;
|
|
711
|
+
if (!NativeSpeech?.requestMicPermissions) return false;
|
|
712
|
+
return !!(await NativeSpeech.requestMicPermissions(wait_timeout));
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
async hasIOSSpeechRecognitionPermissions(): Promise<boolean> {
|
|
716
|
+
if (Platform.OS !== 'ios') return false;
|
|
717
|
+
if (!NativeSpeech?.hasSpeechRecognitionPermissions) return false;
|
|
718
|
+
return !!(await NativeSpeech.hasSpeechRecognitionPermissions());
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
async requestIOSSpeechRecognitionPermissions(wait_timeout: number): Promise<boolean> {
|
|
722
|
+
if (Platform.OS !== 'ios') return false;
|
|
723
|
+
if (!NativeSpeech?.requestSpeechRecognitionPermissions) return false;
|
|
724
|
+
return !!(await NativeSpeech.requestSpeechRecognitionPermissions(wait_timeout));
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
async setLicense(licenseKey: string): Promise<boolean> {
|
|
728
|
+
if (!licenseKey) throw new Error('setLicense: missing licenseKey');
|
|
729
|
+
|
|
730
|
+
if (Platform.OS === 'ios' && NativeSpeech?.setLicense) {
|
|
731
|
+
return !!(await NativeSpeech.setLicense(licenseKey));
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
const results = await Promise.all([
|
|
735
|
+
NativeTTS?.setLicense ? NativeTTS.setLicense(licenseKey) : Promise.resolve(false),
|
|
736
|
+
NativeSTT?.setLicense ? NativeSTT.setLicense(licenseKey) : Promise.resolve(false),
|
|
737
|
+
]);
|
|
738
|
+
return results.every(Boolean);
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
async isLicenseValid(licenseKey: string): Promise<boolean> {
|
|
742
|
+
if (!licenseKey) throw new Error('isLicenseValid: missing licenseKey');
|
|
743
|
+
|
|
744
|
+
if (Platform.OS === 'ios' && NativeSpeech?.isLicenseValid) {
|
|
745
|
+
return !!(await NativeSpeech.isLicenseValid(licenseKey));
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
const results = await Promise.all([
|
|
749
|
+
NativeTTS?.isLicenseValid ? NativeTTS.isLicenseValid(licenseKey) : Promise.resolve(false),
|
|
750
|
+
NativeSTT?.isLicenseValid ? NativeSTT.isLicenseValid(licenseKey) : Promise.resolve(false),
|
|
751
|
+
]);
|
|
752
|
+
return results.every(Boolean);
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
// ---------- TTS ----------
|
|
756
|
+
async initTTS(modelOrConfig: ModelRef | { model: ModelRef }) {
|
|
757
|
+
const cfg =
|
|
758
|
+
modelOrConfig && typeof modelOrConfig === 'object' && 'model' in (modelOrConfig as any)
|
|
759
|
+
? (modelOrConfig as any)
|
|
760
|
+
: { model: modelOrConfig as any };
|
|
761
|
+
// // iOS unified asks you to use initAll
|
|
762
|
+
// if (Platform.OS === 'ios' && NativeSpeech?.initAll) {
|
|
763
|
+
// throw new Error('Use initAll() on iOS unified bridge.');
|
|
764
|
+
// }
|
|
765
|
+
if (!cfg?.model) throw new Error("initTTS: missing 'model'");
|
|
766
|
+
const modelPath = this.resolveModelToPath(cfg.model);
|
|
767
|
+
this.lastModel = modelPath;
|
|
768
|
+
return NativeTTS.initTTS({ model: modelPath });
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
async speak(text: string, speakerId = 0, speed = 1.0) {
|
|
772
|
+
// sanitize and invert (avoid NaN/undefined/null)
|
|
773
|
+
// Reverse speed to length.
|
|
774
|
+
const s = Number.isFinite(speed as number) && speed !== 0 ? 1.0 / (speed as number) : 1.0;
|
|
775
|
+
this.ensureListeners();
|
|
776
|
+
// MIN: serialize + await actual completion (event-driven)
|
|
777
|
+
this.ttsChain = this.ttsChain.then(() => this._speakAndWait(text, speakerId, s));
|
|
778
|
+
return this.ttsChain;
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
// --- NEW: cancel any pending "wait for finished" immediately ---
|
|
782
|
+
private _cancelTtsWait(reason = 'stopSpeaking') {
|
|
783
|
+
dbg(`[TTS cancel wait] reason=${reason}`);
|
|
784
|
+
|
|
785
|
+
// stop the 60-min timer
|
|
786
|
+
if (this.ttsPendingTimeout) {
|
|
787
|
+
clearTimeout(this.ttsPendingTimeout);
|
|
788
|
+
this.ttsPendingTimeout = null;
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
// release the promise waiter (if any)
|
|
792
|
+
const r = this.ttsPendingResolve;
|
|
793
|
+
this.ttsPendingResolve = null;
|
|
794
|
+
if (r) r();
|
|
795
|
+
|
|
796
|
+
// IMPORTANT: do NOT call app callback here (stopSpeaking should not emit "finished")
|
|
797
|
+
// this.handlers.onFinishedSpeaking() <-- DO NOT
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
async stopSpeaking() {
|
|
801
|
+
// 1) JS MUST stop waiting + clear queues immediately
|
|
802
|
+
this._cancelTtsWait('stopSpeaking');
|
|
803
|
+
|
|
804
|
+
// clear serialized chains so future speak/playWav starts fresh
|
|
805
|
+
this.ttsChain = Promise.resolve();
|
|
806
|
+
this.wavChain = Promise.resolve();
|
|
807
|
+
|
|
808
|
+
// 2) tell native to stop
|
|
809
|
+
try {
|
|
810
|
+
if (Platform.OS === 'ios' && NativeSpeech?.stopSpeaking) {
|
|
811
|
+
return await NativeSpeech.stopSpeaking();
|
|
812
|
+
}
|
|
813
|
+
if (NativeTTS?.stopSpeaking) {
|
|
814
|
+
return await NativeTTS.stopSpeaking();
|
|
815
|
+
}
|
|
816
|
+
} catch (e) {
|
|
817
|
+
// even if native throws, JS state is already unblocked
|
|
818
|
+
dbgErr('stopSpeaking native error', String(e));
|
|
819
|
+
}
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
// ADD near your other TTS queue fields (DO NOT remove existing ones)
|
|
823
|
+
private wavChain: Promise<void> = Promise.resolve();
|
|
824
|
+
|
|
825
|
+
// ADD helper (minimal, uses existing onFinishedSpeaking event)
|
|
826
|
+
private _playWavAndWait(realPath: string, markAsLast: boolean, timeoutMs = 600000) {
|
|
827
|
+
return new Promise<void>((resolve, reject) => {
|
|
828
|
+
this.ttsPendingResolve = resolve; // reuse existing resolver + event
|
|
829
|
+
this.ttsPendingTimeout = setTimeout(() => {
|
|
830
|
+
dbg('Timeout waiting for onFinishedSpeaking — releasing wait');
|
|
831
|
+
if (this.ttsPendingTimeout) { clearTimeout(this.ttsPendingTimeout); this.ttsPendingTimeout = null; }
|
|
832
|
+
const r = this.ttsPendingResolve;
|
|
833
|
+
this.ttsPendingResolve = null;
|
|
834
|
+
if (r) r(); // ✅ accept after timeout
|
|
835
|
+
}, timeoutMs);
|
|
836
|
+
|
|
837
|
+
try {
|
|
838
|
+
// Prefer unified iOS bridge if present
|
|
839
|
+
if (Platform.OS === 'ios' && NativeSpeech?.playWav) {
|
|
840
|
+
(NativeSpeech as any).playWav(realPath, markAsLast);
|
|
841
|
+
return;
|
|
842
|
+
}
|
|
843
|
+
if (!NativeTTS?.playWav) throw new Error('playWav not available on this platform.');
|
|
844
|
+
(NativeTTS as any).playWav(realPath, markAsLast);
|
|
845
|
+
} catch (e) {
|
|
846
|
+
if (this.ttsPendingTimeout) { clearTimeout(this.ttsPendingTimeout); this.ttsPendingTimeout = null; }
|
|
847
|
+
this.ttsPendingResolve = null;
|
|
848
|
+
reject(e as any);
|
|
849
|
+
}
|
|
850
|
+
});
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
async playWav(pathOrURL: any, markAsLast = true) {
|
|
854
|
+
// ✅ KEEP ALL YOUR EXISTING LOGGING + resolveAssetSource LOGIC
|
|
855
|
+
console.log('[Speech.playWav] called with:', pathOrURL, '| type:', typeof pathOrURL);
|
|
856
|
+
this.dbgAsset('playWav.arg', pathOrURL);
|
|
857
|
+
|
|
858
|
+
const asset = resolveAssetSource(pathOrURL);
|
|
859
|
+
console.log('[Speech.playWav] resolveAssetSource ->', asset);
|
|
860
|
+
|
|
861
|
+
let realPath = asset?.uri ?? pathOrURL;
|
|
862
|
+
if (Platform.OS === 'android' && typeof asset?.uri === 'string') {
|
|
863
|
+
// Pass raw resource entry name through (native will resolve via res/raw)
|
|
864
|
+
realPath = asset.uri;
|
|
865
|
+
}
|
|
866
|
+
|
|
867
|
+
console.log('[Speech.playWav] resolved realPath:', realPath);
|
|
868
|
+
|
|
869
|
+
if (typeof realPath !== 'string') {
|
|
870
|
+
realPath = String(realPath);
|
|
871
|
+
console.log('[Speech.playWav] converted ?? realPath:', realPath);
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
console.log('[Speech.playWav] before checking ios realPath:', realPath);
|
|
875
|
+
|
|
876
|
+
// ✅ IMPORTANT: separate logic per platform + queue only when markAsLast=true
|
|
877
|
+
this.ensureListeners();
|
|
878
|
+
|
|
879
|
+
if (markAsLast) {
|
|
880
|
+
// Waitable: serialize on its own queue (separate from speak)
|
|
881
|
+
this.wavChain = this.wavChain.then(() => this._playWavAndWait(realPath, true));
|
|
882
|
+
return this.wavChain;
|
|
883
|
+
}
|
|
884
|
+
|
|
885
|
+
// Fire-and-forget (cannot await because native will not emit completion)
|
|
886
|
+
if (Platform.OS === 'ios' && NativeSpeech?.playWav) {
|
|
887
|
+
return (NativeSpeech as any).playWav(realPath, false);
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
console.log('[Speech.playWav] after checking ios realPath:', realPath);
|
|
891
|
+
console.log('[Speech.playWav] after checking ios realPath:', typeof realPath);
|
|
892
|
+
|
|
893
|
+
if (!NativeTTS?.playWav) {
|
|
894
|
+
console.log('[Speech.playWav] NativeTTS:', NativeTTS);
|
|
895
|
+
if (NativeTTS) console.log('[Speech.playWav] NativeTTS.playWav :', NativeTTS.playWav);
|
|
896
|
+
throw new Error('playWav not available on this platform.');
|
|
897
|
+
}
|
|
898
|
+
console.log('[Speech.playWav] calling NativeTTS.playWav with type of realPath:', typeof realPath);
|
|
899
|
+
return (NativeTTS as any).playWav(realPath, false);
|
|
900
|
+
}
|
|
901
|
+
|
|
902
|
+
|
|
903
|
+
// /** Queue a WAV file (local path or file:// URL). Routed via AEC path, queued with speak(). */
|
|
904
|
+
// async playWav(pathOrURL: string, markAsLast = true) {
|
|
905
|
+
// // Prefer unified iOS bridge if present
|
|
906
|
+
// if (Platform.OS === 'ios' && NativeSpeech?.playWav) {
|
|
907
|
+
// return NativeSpeech.playWav(pathOrURL, markAsLast);
|
|
908
|
+
// }
|
|
909
|
+
// // Fallback: direct TTS bridge (Android + iOS fallback)
|
|
910
|
+
// if (!NativeTTS?.playWav) throw new Error('playWav not available on this platform.');
|
|
911
|
+
// return NativeTTS.playWav(pathOrURL, markAsLast);
|
|
912
|
+
// }
|
|
913
|
+
|
|
914
|
+
/**
|
|
915
|
+
* Convenience: queue a typed array (Int16Array | Float32Array | ArrayBuffer) as PCM.
|
|
916
|
+
* We’ll base64 it and pass through to native with the right metadata.
|
|
917
|
+
*/
|
|
918
|
+
async playPCM(
|
|
919
|
+
data: ArrayBuffer | Int16Array | Float32Array,
|
|
920
|
+
opts: {
|
|
921
|
+
sampleRate: number;
|
|
922
|
+
channels?: number;
|
|
923
|
+
interleaved?: boolean;
|
|
924
|
+
/** If data is Int16Array → 'i16' (default); if Float32Array → 'f32' (default) */
|
|
925
|
+
format?: 'i16' | 'f32';
|
|
926
|
+
markAsLast?: boolean;
|
|
927
|
+
}
|
|
928
|
+
) {
|
|
929
|
+
let u8: Uint8Array;
|
|
930
|
+
let format: 'i16' | 'f32' = opts.format ?? 'i16';
|
|
931
|
+
|
|
932
|
+
if (data instanceof ArrayBuffer) {
|
|
933
|
+
// assume Int16 unless caller specified
|
|
934
|
+
u8 = new Uint8Array(data);
|
|
935
|
+
} else if (data instanceof Int16Array) {
|
|
936
|
+
u8 = new Uint8Array(data.buffer, data.byteOffset, data.byteLength);
|
|
937
|
+
format = opts.format ?? 'i16';
|
|
938
|
+
} else if (data instanceof Float32Array) {
|
|
939
|
+
u8 = new Uint8Array(data.buffer, data.byteOffset, data.byteLength);
|
|
940
|
+
format = opts.format ?? 'f32';
|
|
941
|
+
} else {
|
|
942
|
+
throw new Error('Unsupported PCM container');
|
|
943
|
+
}
|
|
944
|
+
|
|
945
|
+
const base64 = toBase64(u8);
|
|
946
|
+
return this.playBuffer({
|
|
947
|
+
base64,
|
|
948
|
+
sampleRate: opts.sampleRate,
|
|
949
|
+
channels: opts.channels ?? 1,
|
|
950
|
+
interleaved: opts.interleaved ?? true,
|
|
951
|
+
format,
|
|
952
|
+
markAsLast: opts.markAsLast ?? true,
|
|
953
|
+
});
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
|
|
958
|
+
/**
|
|
959
|
+
* Queue raw PCM buffer from other TTS providers (base64 payload).
|
|
960
|
+
* Use ExternalPCM for full control of metadata.
|
|
961
|
+
*/
|
|
962
|
+
async playBuffer(desc: ExternalPCM) {
|
|
963
|
+
const payload = {
|
|
964
|
+
base64: desc.base64,
|
|
965
|
+
sampleRate: desc.sampleRate,
|
|
966
|
+
channels: desc.channels ?? 1,
|
|
967
|
+
interleaved: desc.interleaved ?? true,
|
|
968
|
+
format: desc.format,
|
|
969
|
+
markAsLast: desc.markAsLast ?? true,
|
|
970
|
+
};
|
|
971
|
+
if (Platform.OS === 'ios' && NativeSpeech?.playBuffer) {
|
|
972
|
+
return NativeSpeech.playBuffer(payload);
|
|
973
|
+
}
|
|
974
|
+
if (!NativeTTS?.playBuffer) throw new Error('playBuffer not available on this platform.');
|
|
975
|
+
return NativeTTS.playBuffer(payload);
|
|
976
|
+
}
|
|
977
|
+
|
|
978
|
+
|
|
979
|
+
// ---------- Events ----------
|
|
980
|
+
private ensureListeners() {
|
|
981
|
+
if (this.subs.length) return;
|
|
982
|
+
|
|
983
|
+
// iOS unified: subscribe once on the unified emitter
|
|
984
|
+
if (Platform.OS === 'ios' && this.unifiedEmitter) {
|
|
985
|
+
const map: Partial<Record<NativeEventName, (...args: any[]) => void>> = {
|
|
986
|
+
onSpeechStart: (e) => this.handlers.onSpeechStart(e),
|
|
987
|
+
onSpeechRecognized: (e) => this.handlers.onSpeechRecognized(e),
|
|
988
|
+
onSpeechEnd: (e) => this.handlers.onSpeechEnd(e),
|
|
989
|
+
onSpeechError: (e) => this.handlers.onSpeechError(e),
|
|
990
|
+
onSpeechResults: (e) => this.handlers.onSpeechResults(e),
|
|
991
|
+
onSpeechPartialResults: (e) => this.handlers.onSpeechPartialResults(e),
|
|
992
|
+
onSpeechVolumeChanged: (e) => this.handlers.onSpeechVolumeChanged(e),
|
|
993
|
+
onFinishedSpeaking: () => this._onNativeFinishedSpeaking(),
|
|
994
|
+
};
|
|
995
|
+
(Object.keys(map) as NativeEventName[]).forEach((name) => {
|
|
996
|
+
try {
|
|
997
|
+
const handler = map[name];
|
|
998
|
+
if (!handler) return;
|
|
999
|
+
const sub = this.unifiedEmitter!.addListener(name, handler);
|
|
1000
|
+
this.subs.push(sub);
|
|
1001
|
+
} catch {}
|
|
1002
|
+
});
|
|
1003
|
+
return;
|
|
1004
|
+
}
|
|
1005
|
+
|
|
1006
|
+
// Android (and iOS fallback): subscribe to both STT and TTS emitters
|
|
1007
|
+
if (this.sttEmitter) {
|
|
1008
|
+
const sttMap = {
|
|
1009
|
+
onSpeechStart: (e: any) => this.handlers.onSpeechStart(e),
|
|
1010
|
+
onSpeechRecognized: (e: any) => this.handlers.onSpeechRecognized(e),
|
|
1011
|
+
onSpeechEnd: (e: any) => this.handlers.onSpeechEnd(e),
|
|
1012
|
+
onSpeechError: (e: any) => this.handlers.onSpeechError(e),
|
|
1013
|
+
onSpeechResults: (e: any) => this.handlers.onSpeechResults(e),
|
|
1014
|
+
onSpeechPartialResults: (e: any) => this.handlers.onSpeechPartialResults(e),
|
|
1015
|
+
onSpeechVolumeChanged: (e: any) => this.handlers.onSpeechVolumeChanged(e),
|
|
1016
|
+
onNewSpeechWAV: (e: any) => this.handlers.onNewSpeechWAV(e),
|
|
1017
|
+
};
|
|
1018
|
+
(Object.keys(sttMap) as (keyof typeof sttMap)[]).forEach((name) => {
|
|
1019
|
+
try {
|
|
1020
|
+
const sub = this.sttEmitter!.addListener(name, sttMap[name]);
|
|
1021
|
+
this.subs.push(sub);
|
|
1022
|
+
} catch {}
|
|
1023
|
+
});
|
|
1024
|
+
}
|
|
1025
|
+
if (this.ttsEmitter) {
|
|
1026
|
+
try {
|
|
1027
|
+
// MIN: prevent duplicate listeners across Fast Refresh / reload
|
|
1028
|
+
const g: any = globalThis as any;
|
|
1029
|
+
try { g.__SpeechJS_finishedSub?.remove?.(); } catch {}
|
|
1030
|
+
const sub = this.ttsEmitter.addListener('onFinishedSpeaking', () => this._onNativeFinishedSpeaking());
|
|
1031
|
+
g.__SpeechJS_finishedSub = sub;
|
|
1032
|
+
this.subs.push(sub);
|
|
1033
|
+
} catch {}
|
|
1034
|
+
}
|
|
1035
|
+
}
|
|
1036
|
+
|
|
1037
|
+
private teardownListeners() {
|
|
1038
|
+
this.subs.forEach(s => { try { s.remove(); } catch {} });
|
|
1039
|
+
this.subs = [];
|
|
1040
|
+
}
|
|
1041
|
+
|
|
1042
|
+
// ---------- Friendly setters ----------
|
|
1043
|
+
set onSpeechStart(fn: (e: SpeechStartEvent) => void) { this.handlers.onSpeechStart = fn; this.ensureListeners(); }
|
|
1044
|
+
set onSpeechRecognized(fn: (e: SpeechRecognizedEvent) => void) { this.handlers.onSpeechRecognized = fn; this.ensureListeners(); }
|
|
1045
|
+
set onSpeechEnd(fn: (e: SpeechEndEvent) => void) { this.handlers.onSpeechEnd = fn; this.ensureListeners(); }
|
|
1046
|
+
set onSpeechError(fn: (e: SpeechErrorEvent) => void) { this.handlers.onSpeechError = fn; this.ensureListeners(); }
|
|
1047
|
+
set onSpeechResults(fn: (e: SpeechResultsEvent) => void) { this.handlers.onSpeechResults = fn; this.ensureListeners(); }
|
|
1048
|
+
set onSpeechPartialResults(fn: (e: SpeechResultsEvent) => void) { this.handlers.onSpeechPartialResults = fn; this.ensureListeners(); }
|
|
1049
|
+
set onSpeechVolumeChanged(fn: (e: SpeechVolumeChangeEvent) => void) { this.handlers.onSpeechVolumeChanged = fn; this.ensureListeners(); }
|
|
1050
|
+
set onNewSpeechWAV(fn: (e: NewSpeechWAVEvent) => void) { this.handlers.onNewSpeechWAV = fn; this.ensureListeners(); }
|
|
1051
|
+
set onFinishedSpeaking(fn: () => void) { this.handlers.onFinishedSpeaking = fn; this.ensureListeners(); }
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
const SpeechInstance = new Speech();
|
|
1055
|
+
export default SpeechInstance;
|