react-native-davoice-tts 1.0.345 → 1.0.346

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. package/TTSRNBridge.podspec +1 -1
  2. package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.aar +0 -0
  3. package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.aar.md5 +1 -1
  4. package/android/libs/com/davoice/tts/1.0.0/tts-1.0.0.aar.sha1 +1 -1
  5. package/android/src/main/java/com/davoice/stt/rn/STTModule.kt +3 -0
  6. package/ios/TTSRNBridge/DaVoiceTTSBridge.m +24 -1
  7. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/DavoiceTTS +0 -0
  8. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios.abi.json +8276 -8256
  9. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios.private.swiftinterface +57 -57
  10. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios.swiftinterface +57 -57
  11. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/DavoiceTTS +0 -0
  12. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios-simulator.abi.json +5766 -5746
  13. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios-simulator.private.swiftinterface +63 -63
  14. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/arm64-apple-ios-simulator.swiftinterface +63 -63
  15. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/x86_64-apple-ios-simulator.abi.json +5766 -5746
  16. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/x86_64-apple-ios-simulator.private.swiftinterface +63 -63
  17. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/Modules/DavoiceTTS.swiftmodule/x86_64-apple-ios-simulator.swiftinterface +63 -63
  18. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeDirectory +0 -0
  19. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeRequirements-1 +0 -0
  20. package/ios/TTSRNBridge/DavoiceTTS.xcframework/ios-arm64_x86_64-simulator/DavoiceTTS.framework/_CodeSignature/CodeResources +24 -24
  21. package/package.json +1 -1
  22. package/speech/index.ts +83 -17
package/speech/index.ts CHANGED
@@ -35,6 +35,7 @@ function sleep(ms: number) {
35
35
  }
36
36
 
37
37
  let speechOpSeq = 0;
38
+ const PROBABLY_GPT_MISTAKE = false;
38
39
  function nextSpeechOpId(prefix: string) {
39
40
  speechOpSeq += 1;
40
41
  return `${prefix}-${speechOpSeq}-${Date.now()}`;
@@ -157,9 +158,22 @@ class Speech {
157
158
  }
158
159
 
159
160
  private _nativeSpeak(text: string, speakerId: number, s: number) {
160
- if (Platform.OS === 'ios' && NativeSpeech?.speak) return (NativeSpeech as any).speak(text, speakerId, s);
161
- if (!NativeTTS?.speak) throw new Error('TTS speak not available');
162
- return (NativeTTS as any).speak(text, speakerId, s);
161
+ const nativeTtsSpeak = () => {
162
+ if (!NativeTTS?.speak) throw new Error('TTS speak not available');
163
+ if (Platform.OS === 'ios') {
164
+ if ((NativeTTS as any).speakWithSpeed) return (NativeTTS as any).speakWithSpeed(text, speakerId, s);
165
+ return (NativeTTS as any).speak(text, speakerId);
166
+ }
167
+ return (NativeTTS as any).speak(text, speakerId, s);
168
+ };
169
+
170
+ if (Platform.OS === 'ios') {
171
+ if (this.iosTtsOnly && NativeTTS?.speak) return nativeTtsSpeak();
172
+ if (this.hasCompletedFullInit && NativeSpeech?.speak) return (NativeSpeech as any).speak(text, speakerId, s);
173
+ if (this.hasCompletedTtsInit && NativeTTS?.speak) return nativeTtsSpeak();
174
+ if (NativeSpeech?.speak) return (NativeSpeech as any).speak(text, speakerId, s);
175
+ }
176
+ return nativeTtsSpeak();
163
177
  }
164
178
 
165
179
  private enqueueSttTransition(opId: string, kind: 'pause' | 'unpause', work: () => Promise<void>): Promise<void> {
@@ -209,6 +223,7 @@ class Speech {
209
223
  private lastModel: string | null = null;
210
224
  private lastOnboardingJsonPath: string | null = null;
211
225
  private hasCompletedFullInit = false;
226
+ private hasCompletedTtsInit = false;
212
227
  private iosTtsOnly = false; // when true, use NativeTTS directly on iOS
213
228
 
214
229
 
@@ -303,9 +318,12 @@ class Speech {
303
318
  // if iOS unified + NOT tts-only -> use unified emitter
304
319
  if (Platform.OS === 'ios' && NativeSpeech && !this.iosTtsOnly) {
305
320
  this.unifiedEmitter = new NativeEventEmitter(NativeSpeech);
321
+ this.sttEmitter = null;
322
+ this.ttsEmitter = null;
306
323
  // unified handles both STT + TTS events
307
324
  } else {
308
325
  // fallback: separate emitters
326
+ this.unifiedEmitter = null;
309
327
  if (NativeSTT) this.sttEmitter = new NativeEventEmitter(NativeSTT);
310
328
  if (Platform.OS === 'android') this.ttsEmitter = DeviceEventEmitter;
311
329
  else if (NativeTTS) this.ttsEmitter = new NativeEventEmitter(NativeTTS);
@@ -414,11 +432,15 @@ class Speech {
414
432
  this.lastOnboardingJsonPath = opts.onboardingJsonPath ?? null;
415
433
 
416
434
  if (Platform.OS === 'ios' && NativeSpeech?.initAll) {
435
+ if (this.iosTtsOnly) {
436
+ try { await NativeTTS?.destroy?.(); } catch {}
437
+ this.hasCompletedTtsInit = false;
438
+ }
417
439
  this.iosTtsOnly = false; // full unified mode
418
- this.teardownListeners(); // re-wire listeners for unified
419
440
  const r = await NativeSpeech.initAll({ ...opts, model: modelPath });
420
441
  this.hasCompletedFullInit = true;
421
- this.ensureListeners();
442
+ this.hasCompletedTtsInit = true;
443
+ this.rewireListenersForMode();
422
444
  return r;
423
445
  }
424
446
 
@@ -468,6 +490,7 @@ class Speech {
468
490
  console.log('[MODELDBG] initAll.modelExt (resolved)=', modelExt);
469
491
  await NativeTTS.initTTS({ model: modelPath, modelExt });
470
492
  this.hasCompletedFullInit = true;
493
+ this.hasCompletedTtsInit = true;
471
494
  }
472
495
 
473
496
  async destroyAll() {
@@ -475,10 +498,21 @@ class Speech {
475
498
  this.ttsChain = Promise.resolve();
476
499
  this.wavChain = Promise.resolve();
477
500
 
501
+ // iOS TTS-only mode: initTTS() used the standalone TTS bridge, so do not
502
+ // tear down SpeechBridge/STT state that was never started by this path.
503
+ if (Platform.OS === 'ios' && this.iosTtsOnly && !this.hasCompletedFullInit) {
504
+ try { await NativeTTS?.destroy?.(); } catch {}
505
+ this.hasCompletedTtsInit = false;
506
+ this.iosTtsOnly = false;
507
+ this.teardownListeners();
508
+ return 'Destroyed';
509
+ }
510
+
478
511
  // iOS unified
479
512
  if (Platform.OS === 'ios' && NativeSpeech?.destroyAll) {
480
513
  const r = await NativeSpeech.destroyAll();
481
514
  this.hasCompletedFullInit = false;
515
+ this.hasCompletedTtsInit = false;
482
516
  this.iosTtsOnly = false;
483
517
  this.lastLocale = this.lastLocale ?? null;
484
518
  this.teardownListeners();
@@ -493,6 +527,7 @@ class Speech {
493
527
  });
494
528
  } catch {}
495
529
  this.hasCompletedFullInit = false;
530
+ this.hasCompletedTtsInit = false;
496
531
  this.teardownListeners();
497
532
  return 'Destroyed';
498
533
  }
@@ -643,22 +678,21 @@ class Speech {
643
678
  // TODO: CHECK THE NATIVE SIDE DOES NOT REALLY AWAITS
644
679
  async pauseSpeechRecognition(): Promise<void> {
645
680
  this.logCall('pauseSpeechRecognitionLite');
646
- const opId = nextSpeechOpId('pause-stt');
647
681
  const startedAt = Date.now();
648
682
 
649
683
  const mod: any = Platform.OS === 'ios' ? NativeSpeech : NativeSTT;
650
684
  const fn = mod?.pauseSpeechRecognitionLite;
651
685
 
652
- await this.enqueueSttTransition(opId, 'pause', async () => {
686
+ const work = async (opId?: string) => {
653
687
  if (!fn) {
654
688
  dbg(`pauseSpeechRecognitionLite not available on ${Platform.OS === 'ios' ? 'NativeSpeech' : 'NativeSTT'}`);
655
689
  return;
656
690
  }
657
691
 
658
692
  if (Platform.OS === 'ios' && typeof mod?.pauseSpeechRecognitionLiteAsync === 'function') {
659
- dbg('[pauseSpeechRecognitionLiteAsync] begin', { opId, timeoutMs: 1500 });
693
+ dbg('[pauseSpeechRecognitionLiteAsync] begin', { ...(opId ? { opId } : {}), timeoutMs: 1500 });
660
694
  const result = await mod.pauseSpeechRecognitionLiteAsync(1500);
661
- dbg('[pauseSpeechRecognitionLiteAsync] resolved', { opId, elapsedMs: Date.now() - startedAt, result });
695
+ dbg('[pauseSpeechRecognitionLiteAsync] resolved', { ...(opId ? { opId } : {}), elapsedMs: Date.now() - startedAt, result });
662
696
  if (result?.ok === false) dbgErr('pauseSpeechRecognitionLiteAsync failed', result?.reason);
663
697
  return;
664
698
  }
@@ -688,7 +722,15 @@ class Speech {
688
722
  reject(e as any);
689
723
  }
690
724
  });
691
- });
725
+ };
726
+
727
+ if (PROBABLY_GPT_MISTAKE) {
728
+ const opId = nextSpeechOpId('pause-stt');
729
+ await this.enqueueSttTransition(opId, 'pause', () => work(opId));
730
+ return;
731
+ }
732
+
733
+ await work();
692
734
  }
693
735
 
694
736
  async unPauseSpeechRecognition(
@@ -697,23 +739,22 @@ class Speech {
697
739
  timeoutMs: number = 2500,
698
740
  ): Promise<void> {
699
741
  this.logCall('unPauseSpeechRecognitionLite', { times, preFetchMs });
700
- const opId = nextSpeechOpId('unpause-stt');
701
742
  const startedAt = Date.now();
702
743
 
703
744
  const mod: any = Platform.OS === 'ios' ? NativeSpeech : NativeSTT;
704
745
  const fn = mod?.unPauseSpeechRecognitionLite;
705
746
 
706
- await this.enqueueSttTransition(opId, 'unpause', async () => {
747
+ const work = async (opId?: string) => {
707
748
  if (!fn) {
708
749
  dbg(`unPauseSpeechRecognitionLite(times) not available on ${Platform.OS === 'ios' ? 'NativeSpeech' : 'NativeSTT'}`);
709
750
  return;
710
751
  }
711
752
 
712
753
  if (Platform.OS === 'ios' && typeof mod?.unPauseSpeechRecognitionLiteAsync === 'function') {
713
- dbg('[unPauseSpeechRecognitionLiteAsync] begin', { opId, times, preFetchMs, timeoutMs });
754
+ dbg('[unPauseSpeechRecognitionLiteAsync] begin', { ...(opId ? { opId } : {}), times, preFetchMs, timeoutMs });
714
755
  const result = await mod.unPauseSpeechRecognitionLiteAsync(times, preFetchMs, timeoutMs);
715
756
  dbg('[unPauseSpeechRecognitionLiteAsync] resolved', {
716
- opId,
757
+ ...(opId ? { opId } : {}),
717
758
  times,
718
759
  preFetchMs,
719
760
  elapsedMs: Date.now() - startedAt,
@@ -763,7 +804,15 @@ class Speech {
763
804
  reject(e as any);
764
805
  }
765
806
  });
766
- });
807
+ };
808
+
809
+ if (PROBABLY_GPT_MISTAKE) {
810
+ const opId = nextSpeechOpId('unpause-stt');
811
+ await this.enqueueSttTransition(opId, 'unpause', () => work(opId));
812
+ return;
813
+ }
814
+
815
+ await work();
767
816
  }
768
817
  /** Pause mic/STT (Android native; iOS unified if present) */
769
818
  async pauseMicrophone(): Promise<void> {
@@ -956,9 +1005,23 @@ class Speech {
956
1005
  // throw new Error('Use initAll() on iOS unified bridge.');
957
1006
  // }
958
1007
  if (!cfg?.model) throw new Error("initTTS: missing 'model'");
1008
+ if (Platform.OS === 'ios' && this.hasCompletedFullInit) {
1009
+ this.iosTtsOnly = false;
1010
+ this.rewireListenersForMode();
1011
+ return 'TTS already initialized by initAll';
1012
+ }
1013
+ if (!NativeTTS?.initTTS) throw new Error('Native TTS module missing initTTS()');
959
1014
  const modelPath = this.resolveModelToPath(cfg.model);
960
1015
  this.lastModel = modelPath;
961
- return NativeTTS.initTTS({ model: modelPath });
1016
+ const r = await NativeTTS.initTTS({ model: modelPath });
1017
+ this.hasCompletedTtsInit = true;
1018
+ if (Platform.OS === 'ios' && !this.hasCompletedFullInit) {
1019
+ this.iosTtsOnly = true;
1020
+ this.rewireListenersForMode();
1021
+ } else {
1022
+ this.ensureListeners();
1023
+ }
1024
+ return r;
962
1025
  }
963
1026
 
964
1027
  async speak(text: string, speakerId = 0, speed = 1.0) {
@@ -1000,6 +1063,9 @@ class Speech {
1000
1063
 
1001
1064
  // 2) tell native to stop
1002
1065
  try {
1066
+ if (Platform.OS === 'ios' && this.iosTtsOnly && NativeTTS?.stopSpeaking) {
1067
+ return await NativeTTS.stopSpeaking();
1068
+ }
1003
1069
  if (Platform.OS === 'ios' && NativeSpeech?.stopSpeaking) {
1004
1070
  return await NativeSpeech.stopSpeaking();
1005
1071
  }
@@ -1174,7 +1240,7 @@ class Speech {
1174
1240
  if (this.subs.length) return;
1175
1241
 
1176
1242
  // iOS unified: subscribe once on the unified emitter
1177
- if (Platform.OS === 'ios' && this.unifiedEmitter) {
1243
+ if (Platform.OS === 'ios' && !this.iosTtsOnly && this.unifiedEmitter) {
1178
1244
  const map: Partial<Record<NativeEventName, (...args: any[]) => void>> = {
1179
1245
  onSpeechStart: (e) => this.handlers.onSpeechStart(e),
1180
1246
  onSpeechRecognized: (e) => this.handlers.onSpeechRecognized(e),