react-native-davoice-tts 1.0.309 → 1.0.311

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,16 +1,319 @@
1
- New tts package
1
+ # react-native-davoice
2
2
 
3
- example usage:
3
+ React Native on-device speech package for:
4
4
 
5
- import { DaVoiceTTSInstance } from 'react-native-davoice-tts';
5
+ - text to speech
6
+ - speech to text
7
+ - unified speech flows that coordinate STT and TTS together
8
+
9
+ It supports iOS and Android and is designed for apps that need a local voice pipeline with native audio-session handling.
10
+
11
+ This package pairs well with `react-native-wakeword`, which covers wake-word, keyword spotting, and related always-listening flows.
12
+
13
+ ## Features
14
+
15
+ - On-device TTS for React Native
16
+ - On-device STT for React Native
17
+ - Unified `speech/` API that can coordinate STT and TTS together
18
+ - Native event support for speech results, partials, volume, and TTS completion
19
+ - License activation and validation APIs
20
+ - Local model paths, bundled assets, and asset `require(...)` support
21
+ - WAV / audio playback helpers in the unified speech API
22
+ - iOS microphone and speech-recognition permission helpers
23
+
24
+ ## Package Layout
25
+
26
+ The package exposes three entry points:
27
+
28
+ - `react-native-davoice`
29
+ Default TTS entry point.
30
+ - `react-native-davoice/stt`
31
+ Standalone speech-to-text API.
32
+ - `react-native-davoice/speech`
33
+ Unified API for apps that use both TTS and STT together.
34
+
35
+ ## Installation
36
+
37
+ ```bash
38
+ npm install react-native-davoice
39
+ ```
40
+
41
+ or
42
+
43
+ ```bash
44
+ yarn add react-native-davoice
45
+ ```
46
+
47
+ For iOS:
48
+
49
+ ```bash
50
+ cd ios
51
+ pod install
52
+ ```
53
+
54
+ React Native autolinking is supported.
55
+
56
+ ## When To Use Which API
57
+
58
+ Use `react-native-davoice/speech` if your app uses both STT and TTS and you want one bridge managing the flow.
59
+
60
+ Use `react-native-davoice` if you only need TTS.
61
+
62
+ Use `react-native-davoice/stt` if you only need speech recognition.
63
+
64
+ ## Quick Start
65
+
66
+ ### Unified Speech API
67
+
68
+ ```ts
69
+ import Speech from 'react-native-davoice/speech';
70
+
71
+ const model = require('./assets/models/model_ex_ariana.dm');
72
+
73
+ await Speech.setLicense('YOUR_LICENSE_KEY');
74
+ await Speech.initAll({
75
+ locale: 'en-US',
76
+ model,
77
+ });
78
+
79
+ Speech.onSpeechResults = (event) => {
80
+ console.log('results', event.value);
81
+ };
82
+
83
+ Speech.onSpeechPartialResults = (event) => {
84
+ console.log('partial', event.value);
85
+ };
86
+
87
+ Speech.onFinishedSpeaking = () => {
88
+ console.log('finished speaking');
89
+ };
90
+
91
+ await Speech.start('en-US');
92
+ await Speech.speak('Hello from DaVoice', 0, 1.0);
93
+ ```
94
+
95
+ ### TTS Only
96
+
97
+ ```ts
98
+ import { DaVoiceTTSInstance } from 'react-native-davoice';
6
99
 
7
100
  const tts = new DaVoiceTTSInstance();
8
101
 
102
+ await tts.setLicense('YOUR_LICENSE_KEY');
9
103
  await tts.initTTS({
10
- model: '/path/to/model.onnx',
11
- tokens: '/path/to/tokens.json',
12
- espeak: '/path/to/phonemes',
13
- voice: 'en_US',
104
+ model: require('./assets/models/model_ex_ariana.dm'),
14
105
  });
15
106
 
16
- await tts.speak('Hello world!', 0);
107
+ tts.onFinishedSpeaking(() => {
108
+ console.log('done');
109
+ });
110
+
111
+ await tts.speak('Hello world', 0);
112
+ ```
113
+
114
+ ### STT Only
115
+
116
+ ```ts
117
+ import STT from 'react-native-davoice/stt';
118
+
119
+ await STT.setLicense('YOUR_LICENSE_KEY');
120
+
121
+ STT.onSpeechResults = (event) => {
122
+ console.log(event.value);
123
+ };
124
+
125
+ await STT.start('en-US');
126
+ ```
127
+
128
+ ## Unified Speech API
129
+
130
+ The unified API is intended for real voice flows where STT and TTS are part of the same experience.
131
+
132
+ Common methods:
133
+
134
+ - `initAll({ locale, model, timeoutMs?, onboardingJsonPath? })`
135
+ - `destroyAll()`
136
+ - `start(locale, options?)`
137
+ - `stop()`
138
+ - `cancel()`
139
+ - `speak(text, speakerId?, speed?)`
140
+ - `stopSpeaking()`
141
+ - `playWav(pathOrURL, markAsLast?)`
142
+ - `playPCM(data, { sampleRate, channels?, interleaved?, format?, markAsLast? })`
143
+ - `playBuffer({ base64, sampleRate, channels?, interleaved?, format, markAsLast? })`
144
+ - `pauseMicrophone()`
145
+ - `unPauseMicrophone()`
146
+ - `pauseSpeechRecognition()`
147
+ - `unPauseSpeechRecognition(times)`
148
+ - `isAvailable()`
149
+ - `isRecognizing()`
150
+ - `setLicense(licenseKey)`
151
+ - `isLicenseValid(licenseKey)`
152
+
153
+ Unified events:
154
+
155
+ - `onSpeechStart`
156
+ - `onSpeechRecognized`
157
+ - `onSpeechEnd`
158
+ - `onSpeechError`
159
+ - `onSpeechResults`
160
+ - `onSpeechPartialResults`
161
+ - `onSpeechVolumeChanged`
162
+ - `onFinishedSpeaking`
163
+ - `onNewSpeechWAV`
164
+ Android-only remote STT flow event.
165
+
166
+ ### iOS Permission Helpers
167
+
168
+ The unified API also exposes iOS permission helpers:
169
+
170
+ - `hasIOSMicPermissions()`
171
+ - `requestIOSMicPermissions(waitTimeout)`
172
+ - `hasIOSSpeechRecognitionPermissions()`
173
+ - `requestIOSSpeechRecognitionPermissions(waitTimeout)`
174
+
175
+ ## TTS API
176
+
177
+ TTS is exposed from the package root.
178
+
179
+ ```ts
180
+ import { DaVoiceTTSInstance } from 'react-native-davoice';
181
+ ```
182
+
183
+ Available methods:
184
+
185
+ - `initTTS({ model })`
186
+ - `setLicense(licenseKey)`
187
+ - `isLicenseValid(licenseKey)`
188
+ - `speak(text, speakerId?)`
189
+ - `stopSpeaking()`
190
+ - `destroy()`
191
+ - `onFinishedSpeaking(callback)`
192
+
193
+ ## STT API
194
+
195
+ ```ts
196
+ import STT from 'react-native-davoice/stt';
197
+ ```
198
+
199
+ Available methods:
200
+
201
+ - `start(locale, options?)`
202
+ - `stop()`
203
+ - `cancel()`
204
+ - `destroy()`
205
+ - `isAvailable()`
206
+ - `isRecognizing()`
207
+ - `setLicense(licenseKey)`
208
+ - `isLicenseValid(licenseKey)`
209
+
210
+ Available event handlers:
211
+
212
+ - `onSpeechStart`
213
+ - `onSpeechRecognized`
214
+ - `onSpeechEnd`
215
+ - `onSpeechError`
216
+ - `onSpeechResults`
217
+ - `onSpeechPartialResults`
218
+ - `onSpeechVolumeChanged`
219
+
220
+ ## License API
221
+
222
+ All entry points expose the same license helpers:
223
+
224
+ ```ts
225
+ await Speech.setLicense(key);
226
+ await Speech.isLicenseValid(key);
227
+
228
+ await tts.setLicense(key);
229
+ await tts.isLicenseValid(key);
230
+
231
+ await STT.setLicense(key);
232
+ await STT.isLicenseValid(key);
233
+ ```
234
+
235
+ For the unified `speech/` entry point, `setLicense` applies the license to both STT and TTS under the hood.
236
+
237
+ ## Models And Assets
238
+
239
+ Model arguments can be provided as:
240
+
241
+ - a local file path
242
+ - a bundled asset via `require(...)`
243
+ - a file URL
244
+ - in some APIs, a remote URL
245
+
246
+ Typical model formats used by this package include `.dm` and `.onnx`.
247
+
248
+ If your app bundles model files with Metro, make sure your React Native asset configuration includes the extensions you use.
249
+
250
+ ## Works Well With react-native-wakeword
251
+
252
+ If your product needs wake word, keyword spotting, or an always-listening front end, pair this package with `react-native-wakeword`.
253
+
254
+ A common production setup is:
255
+
256
+ 1. `react-native-wakeword` for wake word and wake-phase audio control.
257
+ 2. `react-native-davoice/speech` for STT, TTS, and the active voice session.
258
+
259
+ That separation works well for assistants, hands-free flows, and full on-device voice UX.
260
+
261
+ ## Platform Notes
262
+
263
+ ### iOS
264
+
265
+ - Run `pod install` after adding or updating the package.
266
+ - Microphone permission is required.
267
+ - Speech recognition permission may also be required for STT flows.
268
+
269
+ ### Android
270
+
271
+ - Autolinking is supported.
272
+ - The package includes native Android bridge registration.
273
+ - `onNewSpeechWAV` is Android-only.
274
+
275
+ ## Example
276
+
277
+ This README was aligned with the companion DaVoice React Native example app that demonstrates:
278
+
279
+ - TTS model selection
280
+ - STT flows
281
+ - combined speech orchestration
282
+ - integration alongside `react-native-wakeword`
283
+
284
+ ## Troubleshooting
285
+
286
+ ### Native module not found
287
+
288
+ Make sure:
289
+
290
+ - the package is installed in `node_modules`
291
+ - iOS pods are installed
292
+ - the app was rebuilt after installation
293
+
294
+ ### Model file cannot be resolved
295
+
296
+ Check:
297
+
298
+ - the model path is correct
299
+ - the asset was bundled correctly
300
+ - Metro is configured to include the model extension
301
+
302
+ ### TTS or STT fails to initialize
303
+
304
+ Check:
305
+
306
+ - the license was set before initialization
307
+ - the model file exists on device
308
+ - required permissions were granted
309
+
310
+ ## Support
311
+
312
+ For licensing, production integration, or custom deployments:
313
+
314
+ - Website: [https://davoice.io](https://davoice.io)
315
+ - Email: `info@davoice.io`
316
+
317
+ ## License
318
+
319
+ MIT for the React Native wrapper. Native model/runtime licensing may require a commercial DaVoice license depending on your deployment.
@@ -2,7 +2,7 @@ require 'json'
2
2
 
3
3
  Pod::Spec.new do |s|
4
4
  s.name = "TTSRNBridge"
5
- s.version = "1.0.182" # Update to your package version
5
+ s.version = "1.0.185" # Update to your package version
6
6
  s.summary = "TTS for React Native."
7
7
  s.description = <<-DESC
8
8
  A React Native module for tts .
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "react-native-davoice-tts",
3
- "version": "1.0.309",
3
+ "version": "1.0.311",
4
4
  "description": "tts library for React Native",
5
5
  "main": "tts/index.js",
6
6
  "types": "tts/index.d.ts",
package/speech/index.ts CHANGED
@@ -982,7 +982,7 @@ class Speech {
982
982
 
983
983
  // iOS unified: subscribe once on the unified emitter
984
984
  if (Platform.OS === 'ios' && this.unifiedEmitter) {
985
- const map: Record<NativeEventName, (...args: any[]) => void> = {
985
+ const map: Partial<Record<NativeEventName, (...args: any[]) => void>> = {
986
986
  onSpeechStart: (e) => this.handlers.onSpeechStart(e),
987
987
  onSpeechRecognized: (e) => this.handlers.onSpeechRecognized(e),
988
988
  onSpeechEnd: (e) => this.handlers.onSpeechEnd(e),
@@ -990,12 +990,13 @@ class Speech {
990
990
  onSpeechResults: (e) => this.handlers.onSpeechResults(e),
991
991
  onSpeechPartialResults: (e) => this.handlers.onSpeechPartialResults(e),
992
992
  onSpeechVolumeChanged: (e) => this.handlers.onSpeechVolumeChanged(e),
993
- onNewSpeechWAV: (e) => this.handlers.onNewSpeechWAV(e),
994
993
  onFinishedSpeaking: () => this._onNativeFinishedSpeaking(),
995
994
  };
996
995
  (Object.keys(map) as NativeEventName[]).forEach((name) => {
997
996
  try {
998
- const sub = this.unifiedEmitter!.addListener(name, map[name]);
997
+ const handler = map[name];
998
+ if (!handler) return;
999
+ const sub = this.unifiedEmitter!.addListener(name, handler);
999
1000
  this.subs.push(sub);
1000
1001
  } catch {}
1001
1002
  });