react-native-voice-ts 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.nvmrc +1 -0
- package/.prettierrc +5 -0
- package/.releaserc +15 -0
- package/CONTRIBUTING.md +293 -0
- package/LICENSE +21 -0
- package/MIGRATION_SUMMARY.md +510 -0
- package/README.md +576 -0
- package/android/build.gradle +126 -0
- package/android/gradle.properties +5 -0
- package/android/src/main/AndroidManifest.xml +8 -0
- package/android/src/main/VoiceSpec.kt +55 -0
- package/android/src/main/java/com/wenkesj/voice/Voice.kt +343 -0
- package/android/src/main/java/com/wenkesj/voice/VoiceModule.kt +63 -0
- package/android/src/main/java/com/wenkesj/voice/VoicePackage.kt +35 -0
- package/android/src/newarch/VoiceSpec.kt +55 -0
- package/android/src/oldarch/VoiceSpec.kt +30 -0
- package/app.plugin.js +1 -0
- package/dist/NativeVoiceAndroid.d.ts +22 -0
- package/dist/NativeVoiceAndroid.d.ts.map +1 -0
- package/dist/NativeVoiceAndroid.js +3 -0
- package/dist/NativeVoiceAndroid.js.map +1 -0
- package/dist/NativeVoiceIOS.d.ts +18 -0
- package/dist/NativeVoiceIOS.d.ts.map +1 -0
- package/dist/NativeVoiceIOS.js +3 -0
- package/dist/NativeVoiceIOS.js.map +1 -0
- package/dist/VoiceModuleTypes.d.ts +54 -0
- package/dist/VoiceModuleTypes.d.ts.map +1 -0
- package/dist/VoiceModuleTypes.js +2 -0
- package/dist/VoiceModuleTypes.js.map +1 -0
- package/dist/VoiceUtilTypes.d.ts +43 -0
- package/dist/VoiceUtilTypes.d.ts.map +1 -0
- package/dist/VoiceUtilTypes.js +8 -0
- package/dist/VoiceUtilTypes.js.map +1 -0
- package/dist/index.d.ts +72 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +395 -0
- package/dist/index.js.map +1 -0
- package/ios/Voice/Voice.h +14 -0
- package/ios/Voice/Voice.mm +672 -0
- package/ios/Voice.xcodeproj/project.pbxproj +272 -0
- package/ios/Voice.xcodeproj/project.xcworkspace/contents.xcworkspacedata +7 -0
- package/package.json +101 -0
- package/plugin/build/withVoice.d.ts +13 -0
- package/plugin/build/withVoice.js +47 -0
- package/plugin/tsconfig.tsbuildinfo +1 -0
- package/react-native-voice.podspec +46 -0
- package/src/NativeVoiceAndroid.ts +28 -0
- package/src/NativeVoiceIOS.ts +24 -0
- package/src/VoiceModuleTypes.ts +64 -0
- package/src/VoiceUtilTypes.ts +46 -0
- package/src/index.ts +500 -0
package/src/index.ts
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
1
|
+
import {
|
|
2
|
+
NativeModules,
|
|
3
|
+
NativeEventEmitter,
|
|
4
|
+
Platform,
|
|
5
|
+
PermissionsAndroid,
|
|
6
|
+
type EventSubscription,
|
|
7
|
+
} from 'react-native';
|
|
8
|
+
import invariant from 'invariant';
|
|
9
|
+
import {
|
|
10
|
+
type SpeechEvents,
|
|
11
|
+
type TranscriptionEvents,
|
|
12
|
+
type TranscriptionEndEvent,
|
|
13
|
+
type TranscriptionErrorEvent,
|
|
14
|
+
type TranscriptionStartEvent,
|
|
15
|
+
type SpeechRecognizedEvent,
|
|
16
|
+
type SpeechErrorEvent,
|
|
17
|
+
type SpeechResultsEvent,
|
|
18
|
+
type SpeechStartEvent,
|
|
19
|
+
type SpeechEndEvent,
|
|
20
|
+
type SpeechVolumeChangeEvent,
|
|
21
|
+
type TranscriptionResultsEvent,
|
|
22
|
+
} from './VoiceModuleTypes';
|
|
23
|
+
|
|
24
|
+
const LINKING_ERROR =
|
|
25
|
+
`The package 'react-native-voice-ts' doesn't seem to be linked. Make sure: \n\n` +
|
|
26
|
+
Platform.select({ ios: "- You have run 'pod install'\n", default: '' }) +
|
|
27
|
+
'- You rebuilt the app after installing the package\n' +
|
|
28
|
+
'- You are not using Expo Go\n';
|
|
29
|
+
|
|
30
|
+
// @ts-ignore - turboModuleProxy is not in React Native types
|
|
31
|
+
const isTurboModuleEnabled = global.__turboModuleProxy != null;
|
|
32
|
+
|
|
33
|
+
const VoiceNativeModule = isTurboModuleEnabled
|
|
34
|
+
? Platform.OS === 'android'
|
|
35
|
+
? require('./NativeVoiceAndroid').default
|
|
36
|
+
: require('./NativeVoiceIOS').default
|
|
37
|
+
: NativeModules.Voice;
|
|
38
|
+
|
|
39
|
+
const Voice = VoiceNativeModule
|
|
40
|
+
? VoiceNativeModule
|
|
41
|
+
: new Proxy(
|
|
42
|
+
{},
|
|
43
|
+
{
|
|
44
|
+
get() {
|
|
45
|
+
throw new Error(LINKING_ERROR);
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
);
|
|
49
|
+
|
|
50
|
+
// NativeEventEmitter is only availabe on React Native platforms, so this conditional is used to avoid import conflicts in the browser/server
|
|
51
|
+
const voiceEmitter =
|
|
52
|
+
Platform.OS !== 'web' ? new NativeEventEmitter(Voice) : null;
|
|
53
|
+
type SpeechEvent = keyof SpeechEvents;
|
|
54
|
+
type TranscriptionEvent = keyof TranscriptionEvents;
|
|
55
|
+
|
|
56
|
+
class RCTVoice {
|
|
57
|
+
private _loaded: boolean;
|
|
58
|
+
private _listeners: EventSubscription[];
|
|
59
|
+
private _events: Required<SpeechEvents> & Required<TranscriptionEvents>;
|
|
60
|
+
private _isRecognizing: boolean;
|
|
61
|
+
private _debounceTimeout: NodeJS.Timeout | null;
|
|
62
|
+
private _lastResults: string[];
|
|
63
|
+
private _recognitionStartTime: number;
|
|
64
|
+
|
|
65
|
+
constructor() {
|
|
66
|
+
this._loaded = false;
|
|
67
|
+
this._listeners = [];
|
|
68
|
+
this._isRecognizing = false;
|
|
69
|
+
this._debounceTimeout = null;
|
|
70
|
+
this._lastResults = [];
|
|
71
|
+
this._recognitionStartTime = 0;
|
|
72
|
+
this._events = {
|
|
73
|
+
onSpeechStart: () => {},
|
|
74
|
+
onSpeechRecognized: () => {},
|
|
75
|
+
onSpeechEnd: () => {},
|
|
76
|
+
onSpeechError: () => {},
|
|
77
|
+
onSpeechResults: () => {},
|
|
78
|
+
onSpeechPartialResults: () => {},
|
|
79
|
+
onSpeechVolumeChanged: () => {},
|
|
80
|
+
onTranscriptionStart: () => {},
|
|
81
|
+
onTranscriptionEnd: () => {},
|
|
82
|
+
onTranscriptionError: () => {},
|
|
83
|
+
onTranscriptionResults: () => {},
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
removeAllListeners() {
|
|
88
|
+
if (this._listeners && this._listeners.length > 0) {
|
|
89
|
+
this._listeners.forEach((listener) => {
|
|
90
|
+
if (listener?.remove) {
|
|
91
|
+
listener.remove();
|
|
92
|
+
}
|
|
93
|
+
});
|
|
94
|
+
this._listeners = [];
|
|
95
|
+
}
|
|
96
|
+
if (this._debounceTimeout) {
|
|
97
|
+
clearTimeout(this._debounceTimeout);
|
|
98
|
+
this._debounceTimeout = null;
|
|
99
|
+
}
|
|
100
|
+
this._isRecognizing = false;
|
|
101
|
+
this._lastResults = [];
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
destroy() {
|
|
105
|
+
if (!this._loaded && !this._listeners) {
|
|
106
|
+
return Promise.resolve();
|
|
107
|
+
}
|
|
108
|
+
return new Promise<void>((resolve, reject) => {
|
|
109
|
+
Voice.destroySpeech((error: string) => {
|
|
110
|
+
if (error) {
|
|
111
|
+
reject(new Error(error));
|
|
112
|
+
} else {
|
|
113
|
+
this.removeAllListeners();
|
|
114
|
+
this._loaded = false;
|
|
115
|
+
resolve();
|
|
116
|
+
}
|
|
117
|
+
});
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
destroyTranscription() {
|
|
121
|
+
if (!this._loaded && !this._listeners) {
|
|
122
|
+
return Promise.resolve();
|
|
123
|
+
}
|
|
124
|
+
return new Promise<void>((resolve, reject) => {
|
|
125
|
+
Voice.destroyTranscription((error: string) => {
|
|
126
|
+
if (error) {
|
|
127
|
+
reject(new Error(error));
|
|
128
|
+
} else {
|
|
129
|
+
if (this._listeners?.length > 0) {
|
|
130
|
+
this._listeners.forEach((listener) => listener.remove());
|
|
131
|
+
this._listeners = [];
|
|
132
|
+
}
|
|
133
|
+
this._loaded = false;
|
|
134
|
+
resolve();
|
|
135
|
+
}
|
|
136
|
+
});
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
start(locale: string, options = {}) {
|
|
141
|
+
if (
|
|
142
|
+
!this._loaded &&
|
|
143
|
+
this._listeners.length === 0 &&
|
|
144
|
+
voiceEmitter !== null
|
|
145
|
+
) {
|
|
146
|
+
this._listeners = (Object.keys(this._events) as SpeechEvent[]).map(
|
|
147
|
+
(key: SpeechEvent) => voiceEmitter.addListener(key, this._events[key]),
|
|
148
|
+
);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
this._recognitionStartTime = Date.now();
|
|
152
|
+
this._isRecognizing = true;
|
|
153
|
+
|
|
154
|
+
return new Promise<void>((resolve, reject) => {
|
|
155
|
+
const callback = (error: string) => {
|
|
156
|
+
if (error) {
|
|
157
|
+
this._isRecognizing = false;
|
|
158
|
+
this._recognitionStartTime = 0;
|
|
159
|
+
reject(new Error(error));
|
|
160
|
+
} else {
|
|
161
|
+
resolve();
|
|
162
|
+
}
|
|
163
|
+
};
|
|
164
|
+
if (Platform.OS === 'android') {
|
|
165
|
+
Voice.startSpeech(
|
|
166
|
+
locale,
|
|
167
|
+
Object.assign(
|
|
168
|
+
{
|
|
169
|
+
EXTRA_LANGUAGE_MODEL: 'LANGUAGE_MODEL_FREE_FORM',
|
|
170
|
+
EXTRA_MAX_RESULTS: 5,
|
|
171
|
+
EXTRA_PARTIAL_RESULTS: true,
|
|
172
|
+
REQUEST_PERMISSIONS_AUTO: true,
|
|
173
|
+
},
|
|
174
|
+
options,
|
|
175
|
+
),
|
|
176
|
+
callback,
|
|
177
|
+
);
|
|
178
|
+
} else {
|
|
179
|
+
Voice.startSpeech(locale, callback);
|
|
180
|
+
}
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
startTranscription(url: string, locale: string, options = {}) {
|
|
184
|
+
if (!this._loaded && !this._listeners && voiceEmitter !== null) {
|
|
185
|
+
this._listeners = (Object.keys(this._events) as TranscriptionEvent[]).map(
|
|
186
|
+
(key: TranscriptionEvent) =>
|
|
187
|
+
voiceEmitter.addListener(key, this._events[key]),
|
|
188
|
+
);
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
return new Promise<void>((resolve, reject) => {
|
|
192
|
+
const callback = (error: string) => {
|
|
193
|
+
if (error) {
|
|
194
|
+
reject(new Error(error));
|
|
195
|
+
} else {
|
|
196
|
+
resolve();
|
|
197
|
+
}
|
|
198
|
+
};
|
|
199
|
+
if (Platform.OS === 'android') {
|
|
200
|
+
Voice.startTranscription(
|
|
201
|
+
url,
|
|
202
|
+
locale,
|
|
203
|
+
Object.assign(
|
|
204
|
+
{
|
|
205
|
+
EXTRA_LANGUAGE_MODEL: 'LANGUAGE_MODEL_FREE_FORM',
|
|
206
|
+
EXTRA_MAX_RESULTS: 5,
|
|
207
|
+
EXTRA_PARTIAL_RESULTS: true,
|
|
208
|
+
REQUEST_PERMISSIONS_AUTO: true,
|
|
209
|
+
},
|
|
210
|
+
options,
|
|
211
|
+
),
|
|
212
|
+
callback,
|
|
213
|
+
);
|
|
214
|
+
} else {
|
|
215
|
+
Voice.startTranscription(url, locale, callback);
|
|
216
|
+
}
|
|
217
|
+
});
|
|
218
|
+
}
|
|
219
|
+
stop() {
|
|
220
|
+
if (!this._loaded && !this._listeners) {
|
|
221
|
+
return Promise.resolve();
|
|
222
|
+
}
|
|
223
|
+
this._isRecognizing = false;
|
|
224
|
+
return new Promise<void>((resolve, reject) => {
|
|
225
|
+
Voice.stopSpeech((error?: string) => {
|
|
226
|
+
if (error) {
|
|
227
|
+
reject(new Error(error));
|
|
228
|
+
} else {
|
|
229
|
+
resolve();
|
|
230
|
+
}
|
|
231
|
+
});
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
stopTranscription() {
|
|
235
|
+
if (!this._loaded && !this._listeners) {
|
|
236
|
+
return Promise.resolve();
|
|
237
|
+
}
|
|
238
|
+
return new Promise<void>((resolve, reject) => {
|
|
239
|
+
Voice.stopTranscription((error?: string) => {
|
|
240
|
+
if (error) {
|
|
241
|
+
reject(new Error(error));
|
|
242
|
+
} else {
|
|
243
|
+
resolve();
|
|
244
|
+
}
|
|
245
|
+
});
|
|
246
|
+
});
|
|
247
|
+
}
|
|
248
|
+
cancel() {
|
|
249
|
+
if (!this._loaded && !this._listeners) {
|
|
250
|
+
return Promise.resolve();
|
|
251
|
+
}
|
|
252
|
+
return new Promise<void>((resolve, reject) => {
|
|
253
|
+
Voice.cancelSpeech((error?: string) => {
|
|
254
|
+
if (error) {
|
|
255
|
+
reject(new Error(error));
|
|
256
|
+
} else {
|
|
257
|
+
resolve();
|
|
258
|
+
}
|
|
259
|
+
});
|
|
260
|
+
});
|
|
261
|
+
}
|
|
262
|
+
cancelTranscription() {
|
|
263
|
+
if (!this._loaded && !this._listeners) {
|
|
264
|
+
return Promise.resolve();
|
|
265
|
+
}
|
|
266
|
+
return new Promise<void>((resolve, reject) => {
|
|
267
|
+
Voice.cancelSpeech((error?: string) => {
|
|
268
|
+
if (error) {
|
|
269
|
+
reject(new Error(error));
|
|
270
|
+
} else {
|
|
271
|
+
resolve();
|
|
272
|
+
}
|
|
273
|
+
});
|
|
274
|
+
});
|
|
275
|
+
}
|
|
276
|
+
isAvailable(): Promise<0 | 1> {
|
|
277
|
+
return new Promise((resolve, reject) => {
|
|
278
|
+
Voice.isSpeechAvailable((isAvailable: 0 | 1, error: string) => {
|
|
279
|
+
if (error) {
|
|
280
|
+
reject(new Error(error));
|
|
281
|
+
} else {
|
|
282
|
+
resolve(isAvailable);
|
|
283
|
+
}
|
|
284
|
+
});
|
|
285
|
+
});
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* (Android) Get a list of the speech recognition engines available on the device
|
|
290
|
+
* */
|
|
291
|
+
getSpeechRecognitionServices() {
|
|
292
|
+
if (Platform.OS !== 'android') {
|
|
293
|
+
invariant(
|
|
294
|
+
Voice,
|
|
295
|
+
'Speech recognition services can be queried for only on Android',
|
|
296
|
+
);
|
|
297
|
+
return;
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
return Voice.getSpeechRecognitionServices();
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
isRecognizing(): Promise<0 | 1> {
|
|
304
|
+
return new Promise((resolve) => {
|
|
305
|
+
Voice.isRecognizing((isRecognizing: 0 | 1) => {
|
|
306
|
+
this._isRecognizing = isRecognizing === 1;
|
|
307
|
+
resolve(isRecognizing);
|
|
308
|
+
});
|
|
309
|
+
});
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* Check if speech recognition is currently in progress (synchronous)
|
|
314
|
+
*/
|
|
315
|
+
get recognizing(): boolean {
|
|
316
|
+
return this._isRecognizing;
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* Get the last recognition results without triggering a new recognition
|
|
321
|
+
*/
|
|
322
|
+
getLastResults(): string[] {
|
|
323
|
+
return [...this._lastResults];
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
/**
|
|
327
|
+
* Get recognition duration in milliseconds
|
|
328
|
+
*/
|
|
329
|
+
getRecognitionDuration(): number {
|
|
330
|
+
if (this._recognitionStartTime === 0) return 0;
|
|
331
|
+
return Date.now() - this._recognitionStartTime;
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
/**
|
|
335
|
+
* Request microphone permission (Android)
|
|
336
|
+
*/
|
|
337
|
+
async requestMicrophonePermission(): Promise<boolean> {
|
|
338
|
+
if (Platform.OS !== 'android') {
|
|
339
|
+
return true; // iOS handles permissions automatically
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
try {
|
|
343
|
+
const permission = PermissionsAndroid.PERMISSIONS.RECORD_AUDIO;
|
|
344
|
+
if (!permission) {
|
|
345
|
+
return false;
|
|
346
|
+
}
|
|
347
|
+
const granted = await PermissionsAndroid.request(permission, {
|
|
348
|
+
title: 'Microphone Permission',
|
|
349
|
+
message:
|
|
350
|
+
'This app needs access to your microphone for voice recognition',
|
|
351
|
+
buttonNeutral: 'Ask Me Later',
|
|
352
|
+
buttonNegative: 'Cancel',
|
|
353
|
+
buttonPositive: 'OK',
|
|
354
|
+
});
|
|
355
|
+
return granted === PermissionsAndroid.RESULTS.GRANTED;
|
|
356
|
+
} catch (err) {
|
|
357
|
+
console.warn('Error requesting microphone permission:', err);
|
|
358
|
+
return false;
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
/**
|
|
363
|
+
* Check microphone permission status (Android)
|
|
364
|
+
*/
|
|
365
|
+
async checkMicrophonePermission(): Promise<boolean> {
|
|
366
|
+
if (Platform.OS !== 'android') {
|
|
367
|
+
return true;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
try {
|
|
371
|
+
const permission = PermissionsAndroid.PERMISSIONS.RECORD_AUDIO;
|
|
372
|
+
if (!permission) {
|
|
373
|
+
return false;
|
|
374
|
+
}
|
|
375
|
+
const granted = await PermissionsAndroid.check(permission);
|
|
376
|
+
return granted;
|
|
377
|
+
} catch (err) {
|
|
378
|
+
console.warn('Error checking microphone permission:', err);
|
|
379
|
+
return false;
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
/**
|
|
384
|
+
* Get supported languages for speech recognition
|
|
385
|
+
*/
|
|
386
|
+
async getSupportedLanguages(): Promise<string[]> {
|
|
387
|
+
try {
|
|
388
|
+
if (Platform.OS === 'android') {
|
|
389
|
+
// Android implementation would need native support
|
|
390
|
+
return [];
|
|
391
|
+
}
|
|
392
|
+
// iOS implementation would need native support
|
|
393
|
+
return [];
|
|
394
|
+
} catch (error) {
|
|
395
|
+
console.warn('Error getting supported languages:', error);
|
|
396
|
+
return [];
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Debounce results to avoid excessive updates
|
|
402
|
+
* Reserved for future use
|
|
403
|
+
* @private
|
|
404
|
+
*/
|
|
405
|
+
private debounceResults(
|
|
406
|
+
callback: (results: string[]) => void,
|
|
407
|
+
results: string[],
|
|
408
|
+
delay: number = 300,
|
|
409
|
+
): void {
|
|
410
|
+
if (this._debounceTimeout) {
|
|
411
|
+
clearTimeout(this._debounceTimeout);
|
|
412
|
+
}
|
|
413
|
+
this._debounceTimeout = setTimeout(() => {
|
|
414
|
+
callback(results);
|
|
415
|
+
this._debounceTimeout = null;
|
|
416
|
+
}, delay);
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
set onSpeechStart(fn: (e: SpeechStartEvent) => void) {
|
|
420
|
+
this._events.onSpeechStart = (e: SpeechStartEvent) => {
|
|
421
|
+
this._recognitionStartTime = Date.now();
|
|
422
|
+
this._isRecognizing = true;
|
|
423
|
+
fn(e);
|
|
424
|
+
};
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
set onTranscriptionStart(fn: (e: TranscriptionStartEvent) => void) {
|
|
428
|
+
this._events.onTranscriptionStart = fn;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
set onSpeechRecognized(fn: (e: SpeechRecognizedEvent) => void) {
|
|
432
|
+
this._events.onSpeechRecognized = fn;
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
set onSpeechEnd(fn: (e: SpeechEndEvent) => void) {
|
|
436
|
+
this._events.onSpeechEnd = (e: SpeechEndEvent) => {
|
|
437
|
+
this._isRecognizing = false;
|
|
438
|
+
fn(e);
|
|
439
|
+
};
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
set onTranscriptionEnd(fn: (e: SpeechEndEvent) => void) {
|
|
443
|
+
this._events.onTranscriptionEnd = fn;
|
|
444
|
+
}
|
|
445
|
+
set onSpeechError(fn: (e: SpeechErrorEvent) => void) {
|
|
446
|
+
this._events.onSpeechError = (e: SpeechErrorEvent) => {
|
|
447
|
+
this._isRecognizing = false;
|
|
448
|
+
fn(e);
|
|
449
|
+
};
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
set onTranscriptionError(fn: (e: TranscriptionErrorEvent) => void) {
|
|
453
|
+
this._events.onTranscriptionError = fn;
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
set onSpeechResults(fn: (e: SpeechResultsEvent) => void) {
|
|
457
|
+
this._events.onSpeechResults = (e: SpeechResultsEvent) => {
|
|
458
|
+
if (e.value) {
|
|
459
|
+
this._lastResults = e.value;
|
|
460
|
+
}
|
|
461
|
+
this._isRecognizing = false;
|
|
462
|
+
fn(e);
|
|
463
|
+
};
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
set onTranscriptionResults(fn: (e: TranscriptionResultsEvent) => void) {
|
|
467
|
+
this._events.onTranscriptionResults = fn;
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
set onSpeechPartialResults(fn: (e: SpeechResultsEvent) => void) {
|
|
471
|
+
this._events.onSpeechPartialResults = fn;
|
|
472
|
+
}
|
|
473
|
+
set onSpeechVolumeChanged(fn: (e: SpeechVolumeChangeEvent) => void) {
|
|
474
|
+
this._events.onSpeechVolumeChanged = fn;
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
export type {
|
|
479
|
+
SpeechEndEvent,
|
|
480
|
+
SpeechErrorEvent,
|
|
481
|
+
SpeechEvents,
|
|
482
|
+
SpeechStartEvent,
|
|
483
|
+
SpeechRecognizedEvent,
|
|
484
|
+
SpeechResultsEvent,
|
|
485
|
+
SpeechVolumeChangeEvent,
|
|
486
|
+
TranscriptionEndEvent,
|
|
487
|
+
TranscriptionErrorEvent,
|
|
488
|
+
TranscriptionEvents,
|
|
489
|
+
TranscriptionStartEvent,
|
|
490
|
+
TranscriptionResultsEvent,
|
|
491
|
+
};
|
|
492
|
+
|
|
493
|
+
export type {
|
|
494
|
+
VoiceOptions,
|
|
495
|
+
RecognitionStats,
|
|
496
|
+
PermissionResult,
|
|
497
|
+
Language,
|
|
498
|
+
} from './VoiceUtilTypes';
|
|
499
|
+
|
|
500
|
+
export default new RCTVoice();
|