@iternio/react-native-tts 4.1.2 → 4.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,19 +1,28 @@
1
1
  package net.no_mad.tts;
2
2
 
3
+ import android.content.ActivityNotFoundException;
4
+ import android.content.Intent;
5
+ import android.content.pm.PackageInfo;
6
+ import android.content.pm.PackageManager;
7
+ import android.content.pm.PackageManager.NameNotFoundException;
8
+ import android.media.AudioAttributes;
9
+ import android.media.AudioFocusRequest;
3
10
  import android.media.AudioManager;
11
+ import android.net.Uri;
4
12
  import android.os.Build;
5
13
  import android.os.Bundle;
6
- import android.content.Intent;
7
- import android.content.ActivityNotFoundException;
8
- import android.app.Activity;
9
- import android.net.Uri;
10
14
  import android.speech.tts.TextToSpeech;
11
15
  import android.speech.tts.UtteranceProgressListener;
12
16
  import android.speech.tts.Voice;
13
- import android.content.pm.PackageInfo;
14
- import android.content.pm.PackageManager;
15
- import android.content.pm.PackageManager.NameNotFoundException;
16
- import com.facebook.react.bridge.*;
17
+
18
+ import com.facebook.react.bridge.Arguments;
19
+ import com.facebook.react.bridge.Promise;
20
+ import com.facebook.react.bridge.ReactApplicationContext;
21
+ import com.facebook.react.bridge.ReactContextBaseJavaModule;
22
+ import com.facebook.react.bridge.ReactMethod;
23
+ import com.facebook.react.bridge.ReadableMap;
24
+ import com.facebook.react.bridge.WritableArray;
25
+ import com.facebook.react.bridge.WritableMap;
17
26
  import com.facebook.react.modules.core.DeviceEventManagerModule;
18
27
 
19
28
  import java.util.ArrayList;
@@ -29,14 +38,21 @@ public class TextToSpeechModule extends ReactContextBaseJavaModule {
29
38
 
30
39
  private boolean ducking = false;
31
40
  private AudioManager audioManager;
32
- private AudioManager.OnAudioFocusChangeListener afChangeListener;
41
+ private AudioManager.OnAudioFocusChangeListener afChangeListener = i -> {};
33
42
 
34
43
  private Map<String, Locale> localeCountryMap;
35
44
  private Map<String, Locale> localeLanguageMap;
36
45
 
46
+ private AudioAttributes audioAttributes;
47
+
37
48
  public TextToSpeechModule(ReactApplicationContext reactContext) {
38
49
  super(reactContext);
39
50
  audioManager = (AudioManager) reactContext.getApplicationContext().getSystemService(reactContext.AUDIO_SERVICE);
51
+ audioAttributes = new AudioAttributes.Builder()
52
+ .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
53
+ .setUsage(AudioAttributes.USAGE_ASSISTANCE_NAVIGATION_GUIDANCE)
54
+ .build();
55
+
40
56
  initStatusPromises = new ArrayList<Promise>();
41
57
  //initialize ISO3, ISO2 languague country code mapping.
42
58
  initCountryLanguageCodeMapping();
@@ -97,6 +113,7 @@ public class TextToSpeechModule extends ReactContextBaseJavaModule {
97
113
  params.putInt("start", start);
98
114
  params.putInt("end", end);
99
115
  params.putInt("frame", frame);
116
+ params.putInt("length", end - start);
100
117
  sendEvent("tts-progress", params);
101
118
  }
102
119
  });
@@ -210,12 +227,23 @@ public class TextToSpeechModule extends ReactContextBaseJavaModule {
210
227
  if(notReady(promise)) return;
211
228
 
212
229
  if(ducking) {
230
+ int amResult;
213
231
  // Request audio focus for playback
214
- int amResult = audioManager.requestAudioFocus(afChangeListener,
215
- // Use the music stream.
216
- AudioManager.STREAM_MUSIC,
217
- // Request permanent focus.
218
- AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_MAY_DUCK);
232
+ if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.O) {
233
+ AudioFocusRequest audioFocusRequest = new AudioFocusRequest.Builder(AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_MAY_DUCK)
234
+ .setAudioAttributes(audioAttributes)
235
+ .setAcceptsDelayedFocusGain(false)
236
+ .setOnAudioFocusChangeListener(afChangeListener)
237
+ .build();
238
+
239
+ amResult = audioManager.requestAudioFocus(audioFocusRequest);
240
+ } else {
241
+ amResult = audioManager.requestAudioFocus(afChangeListener,
242
+ // Use the music stream.
243
+ AudioManager.STREAM_MUSIC,
244
+ // Request permanent focus.
245
+ AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_MAY_DUCK);
246
+ }
219
247
 
220
248
  if(amResult != AudioManager.AUDIOFOCUS_REQUEST_GRANTED) {
221
249
  promise.reject("Android AudioManager error, failed to request audio focus");
@@ -311,6 +339,18 @@ public class TextToSpeechModule extends ReactContextBaseJavaModule {
311
339
  }
312
340
  }
313
341
 
342
+ @ReactMethod
343
+ public void getDefaultVoiceIdentifier(String language, Promise promise) {
344
+ if(notReady(promise)) return;
345
+
346
+ Voice currentVoice = tts.getVoice();
347
+ if (currentVoice == null) {
348
+ promise.reject("not_found", "Language not found");
349
+ return;
350
+ }
351
+ promise.resolve(currentVoice.getName());
352
+ }
353
+
314
354
  @ReactMethod
315
355
  public void voices(Promise promise) {
316
356
  if(notReady(promise)) return;
@@ -498,6 +538,8 @@ public class TextToSpeechModule extends ReactContextBaseJavaModule {
498
538
  audioStreamType = AudioManager.USE_DEFAULT_STREAM_TYPE;
499
539
  }
500
540
 
541
+ tts.setAudioAttributes(audioAttributes);
542
+
501
543
  if (Build.VERSION.SDK_INT >= 21) {
502
544
  Bundle params = new Bundle();
503
545
  params.putInt(TextToSpeech.Engine.KEY_PARAM_STREAM, audioStreamType);
package/index.d.ts CHANGED
@@ -62,7 +62,7 @@ export type Engine = {
62
62
 
63
63
  export type AndroidOptions = {
64
64
  /** Parameter key to specify the audio stream type to be used when speaking text or playing back a file */
65
- KEY_PARAM_STREAM:
65
+ KEY_PARAM_STREAM?:
66
66
  | "STREAM_VOICE_CALL"
67
67
  | "STREAM_SYSTEM"
68
68
  | "STREAM_RING"
@@ -72,17 +72,17 @@ export type AndroidOptions = {
72
72
  | "STREAM_DTMF"
73
73
  | "STREAM_ACCESSIBILITY";
74
74
  /** Parameter key to specify the speech volume relative to the current stream type volume used when speaking text. Volume is specified as a float ranging from 0 to 1 where 0 is silence, and 1 is the maximum volume (the default behavior). */
75
- KEY_PARAM_VOLUME: number;
75
+ KEY_PARAM_VOLUME?: number;
76
76
  /** Parameter key to specify how the speech is panned from left to right when speaking text. Pan is specified as a float ranging from -1 to +1 where -1 maps to a hard-left pan, 0 to center (the default behavior), and +1 to hard-right. */
77
- KEY_PARAM_PAN: number;
77
+ KEY_PARAM_PAN?: number;
78
78
  };
79
79
 
80
80
  export type Options =
81
81
  | string
82
82
  | {
83
- iosVoiceId: string;
84
- rate: number;
85
- androidParams: AndroidOptions;
83
+ iosVoiceId?: string;
84
+ rate?: number;
85
+ androidParams?: AndroidOptions;
86
86
  };
87
87
 
88
88
  export class ReactNativeTts extends RN.NativeEventEmitter {
@@ -97,9 +97,10 @@ export class ReactNativeTts extends RN.NativeEventEmitter {
97
97
  setDefaultLanguage: (language: string) => Promise<"success">;
98
98
  setIgnoreSilentSwitch: (ignoreSilentSwitch: IOSSilentSwitchBehavior) => Promise<boolean>;
99
99
  voices: () => Promise<Voice[]>;
100
+ getDefaultVoiceIdentifier: (language: String) => Promise<String>;
100
101
  engines: () => Promise<Engine[]>;
101
102
  /** Read the sentence and return an id for the task. */
102
- speak: (utterance: string, options?: Options) => string | number;
103
+ speak: (utterance: string, options?: Options) => Promise<string | number>;
103
104
  stop: (onWordBoundary?: boolean) => Promise<boolean>;
104
105
  pause: (onWordBoundary?: boolean) => Promise<boolean>;
105
106
  resume: () => Promise<boolean>;
package/index.js CHANGED
@@ -69,6 +69,10 @@ class Tts extends NativeEventEmitter {
69
69
  return TextToSpeech.voices();
70
70
  }
71
71
 
72
+ getDefaultVoiceIdentifier(language) {
73
+ return TextToSpeech.getDefaultVoiceIdentifier(language);
74
+ }
75
+
72
76
  engines() {
73
77
  if (Platform.OS === 'ios' || Platform.OS === 'windows') {
74
78
  return Promise.resolve([]);
@@ -141,14 +141,14 @@ RCT_EXPORT_METHOD(setDucking:(bool *)ducking
141
141
  if(ducking) {
142
142
  AVAudioSession *session = [AVAudioSession sharedInstance];
143
143
  [session setCategory:AVAudioSessionCategoryPlayback
144
- withOptions:AVAudioSessionCategoryOptionDuckOthers
144
+ mode:AVAudioSessionModeVoicePrompt
145
+ options:(AVAudioSessionCategoryOptionDuckOthers|AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers)
145
146
  error:nil];
146
147
  }
147
148
 
148
149
  resolve(@"success");
149
150
  }
150
151
 
151
-
152
152
  RCT_EXPORT_METHOD(setDefaultLanguage:(NSString *)language
153
153
  resolve:(RCTPromiseResolveBlock)resolve
154
154
  reject:(RCTPromiseRejectBlock)reject)
@@ -212,18 +212,40 @@ RCT_EXPORT_METHOD(setIgnoreSilentSwitch:(NSString *)ignoreSilentSwitch
212
212
  }
213
213
  }
214
214
 
215
+ RCT_EXPORT_METHOD(getDefaultVoiceIdentifier:(NSString *)language
216
+ resolve:(RCTPromiseResolveBlock)resolve
217
+ reject:(RCTPromiseRejectBlock)reject)
218
+ {
219
+ AVSpeechSynthesisVoice *voice = [AVSpeechSynthesisVoice voiceWithLanguage:language];
220
+
221
+ if(voice) {
222
+ resolve(voice.identifier);
223
+ } else {
224
+ reject(@"not_found", @"Language not found", nil);
225
+ }
226
+ }
227
+
215
228
  RCT_EXPORT_METHOD(voices:(RCTPromiseResolveBlock)resolve
216
229
  reject:(__unused RCTPromiseRejectBlock)reject)
217
230
  {
218
231
  NSMutableArray *voices = [NSMutableArray new];
219
232
 
220
233
  for (AVSpeechSynthesisVoice *voice in [AVSpeechSynthesisVoice speechVoices]) {
221
- [voices addObject:@{
222
- @"id": voice.identifier,
223
- @"name": voice.name,
224
- @"language": voice.language,
225
- @"quality": (voice.quality == AVSpeechSynthesisVoiceQualityEnhanced) ? @500 : @300
226
- }];
234
+ if (@available(iOS 16.0, *)) {
235
+ [voices addObject:@{
236
+ @"id": voice.identifier,
237
+ @"name": voice.name,
238
+ @"language": voice.language,
239
+ @"quality": (voice.quality == AVSpeechSynthesisVoiceQualityEnhanced) ? @500 : (voice.quality == AVSpeechSynthesisVoiceQualityPremium) ? @800 : @300
240
+ }];
241
+ } else {
242
+ [voices addObject:@{
243
+ @"id": voice.identifier,
244
+ @"name": voice.name,
245
+ @"language": voice.language,
246
+ @"quality": (voice.quality == AVSpeechSynthesisVoiceQualityEnhanced) ? @500 : @300
247
+ }];
248
+ }
227
249
  }
228
250
 
229
251
  resolve(voices);
@@ -232,7 +254,9 @@ RCT_EXPORT_METHOD(voices:(RCTPromiseResolveBlock)resolve
232
254
  -(void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didStartSpeechUtterance:(AVSpeechUtterance *)utterance
233
255
  {
234
256
  if(_ducking) {
235
- [[AVAudioSession sharedInstance] setActive:true error:nil];
257
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
258
+ [[AVAudioSession sharedInstance] setActive:true error:nil];
259
+ });
236
260
  }
237
261
 
238
262
  [self sendEventWithName:@"tts-start" body:@{@"utteranceId":[NSNumber numberWithUnsignedLong:utterance.hash]}];
@@ -241,7 +265,9 @@ RCT_EXPORT_METHOD(voices:(RCTPromiseResolveBlock)resolve
241
265
  -(void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didFinishSpeechUtterance:(AVSpeechUtterance *)utterance
242
266
  {
243
267
  if(_ducking) {
244
- [[AVAudioSession sharedInstance] setActive:false error:nil];
268
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
269
+ [[AVAudioSession sharedInstance] setActive:false error:nil];
270
+ });
245
271
  }
246
272
 
247
273
  [self sendEventWithName:@"tts-finish" body:@{@"utteranceId":[NSNumber numberWithUnsignedLong:utterance.hash]}];
@@ -250,7 +276,9 @@ RCT_EXPORT_METHOD(voices:(RCTPromiseResolveBlock)resolve
250
276
  -(void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didPauseSpeechUtterance:(AVSpeechUtterance *)utterance
251
277
  {
252
278
  if(_ducking) {
253
- [[AVAudioSession sharedInstance] setActive:false error:nil];
279
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
280
+ [[AVAudioSession sharedInstance] setActive:false error:nil];
281
+ });
254
282
  }
255
283
 
256
284
  [self sendEventWithName:@"tts-pause" body:@{@"utteranceId":[NSNumber numberWithUnsignedLong:utterance.hash]}];
@@ -259,7 +287,9 @@ RCT_EXPORT_METHOD(voices:(RCTPromiseResolveBlock)resolve
259
287
  -(void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didContinueSpeechUtterance:(AVSpeechUtterance *)utterance
260
288
  {
261
289
  if(_ducking) {
262
- [[AVAudioSession sharedInstance] setActive:true error:nil];
290
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
291
+ [[AVAudioSession sharedInstance] setActive:true error:nil];
292
+ });
263
293
  }
264
294
 
265
295
  [self sendEventWithName:@"tts-resume" body:@{@"utteranceId":[NSNumber numberWithUnsignedLong:utterance.hash]}];
@@ -276,7 +306,9 @@ RCT_EXPORT_METHOD(voices:(RCTPromiseResolveBlock)resolve
276
306
  -(void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didCancelSpeechUtterance:(AVSpeechUtterance *)utterance
277
307
  {
278
308
  if(_ducking) {
279
- [[AVAudioSession sharedInstance] setActive:false error:nil];
309
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
310
+ [[AVAudioSession sharedInstance] setActive:false error:nil];
311
+ });
280
312
  }
281
313
 
282
314
  [self sendEventWithName:@"tts-cancel" body:@{@"utteranceId":[NSNumber numberWithUnsignedLong:utterance.hash]}];
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@iternio/react-native-tts",
3
- "version": "4.1.2",
3
+ "version": "4.1.3",
4
4
  "description": "React Native Text-To-Speech module for Android and iOS",
5
5
  "main": "index.js",
6
6
  "repository": {