react-native-voice-ts 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.nvmrc +1 -0
- package/.prettierrc +5 -0
- package/.releaserc +15 -0
- package/CONTRIBUTING.md +293 -0
- package/LICENSE +21 -0
- package/MIGRATION_SUMMARY.md +510 -0
- package/README.md +576 -0
- package/android/build.gradle +126 -0
- package/android/gradle.properties +5 -0
- package/android/src/main/AndroidManifest.xml +8 -0
- package/android/src/main/VoiceSpec.kt +55 -0
- package/android/src/main/java/com/wenkesj/voice/Voice.kt +343 -0
- package/android/src/main/java/com/wenkesj/voice/VoiceModule.kt +63 -0
- package/android/src/main/java/com/wenkesj/voice/VoicePackage.kt +35 -0
- package/android/src/newarch/VoiceSpec.kt +55 -0
- package/android/src/oldarch/VoiceSpec.kt +30 -0
- package/app.plugin.js +1 -0
- package/dist/NativeVoiceAndroid.d.ts +22 -0
- package/dist/NativeVoiceAndroid.d.ts.map +1 -0
- package/dist/NativeVoiceAndroid.js +3 -0
- package/dist/NativeVoiceAndroid.js.map +1 -0
- package/dist/NativeVoiceIOS.d.ts +18 -0
- package/dist/NativeVoiceIOS.d.ts.map +1 -0
- package/dist/NativeVoiceIOS.js +3 -0
- package/dist/NativeVoiceIOS.js.map +1 -0
- package/dist/VoiceModuleTypes.d.ts +54 -0
- package/dist/VoiceModuleTypes.d.ts.map +1 -0
- package/dist/VoiceModuleTypes.js +2 -0
- package/dist/VoiceModuleTypes.js.map +1 -0
- package/dist/VoiceUtilTypes.d.ts +43 -0
- package/dist/VoiceUtilTypes.d.ts.map +1 -0
- package/dist/VoiceUtilTypes.js +8 -0
- package/dist/VoiceUtilTypes.js.map +1 -0
- package/dist/index.d.ts +72 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +395 -0
- package/dist/index.js.map +1 -0
- package/ios/Voice/Voice.h +14 -0
- package/ios/Voice/Voice.mm +672 -0
- package/ios/Voice.xcodeproj/project.pbxproj +272 -0
- package/ios/Voice.xcodeproj/project.xcworkspace/contents.xcworkspacedata +7 -0
- package/package.json +101 -0
- package/plugin/build/withVoice.d.ts +13 -0
- package/plugin/build/withVoice.js +47 -0
- package/plugin/tsconfig.tsbuildinfo +1 -0
- package/react-native-voice.podspec +46 -0
- package/src/NativeVoiceAndroid.ts +28 -0
- package/src/NativeVoiceIOS.ts +24 -0
- package/src/VoiceModuleTypes.ts +64 -0
- package/src/VoiceUtilTypes.ts +46 -0
- package/src/index.ts +500 -0
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
package com.wenkesj.voice
|
|
2
|
+
|
|
3
|
+
import com.facebook.react.bridge.Callback
|
|
4
|
+
import com.facebook.react.bridge.Promise
|
|
5
|
+
import com.facebook.react.bridge.ReactApplicationContext
|
|
6
|
+
import com.facebook.react.bridge.ReadableMap
|
|
7
|
+
|
|
8
|
+
abstract class VoiceSpec internal constructor(context: ReactApplicationContext) :
|
|
9
|
+
NativeVoiceAndroidSpec(context) {
|
|
10
|
+
private val voice = Voice(context)
|
|
11
|
+
|
|
12
|
+
override fun destroySpeech(callback: Callback) {
|
|
13
|
+
voice.destroySpeech(callback)
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
override fun startSpeech(locale: String, opts: ReadableMap, callback: Callback) {
|
|
17
|
+
voice.startSpeech(locale,opts,callback)
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
override fun stopSpeech(callback: Callback) {
|
|
21
|
+
voice.stopSpeech(callback)
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
override fun cancelSpeech(callback: Callback) {
|
|
25
|
+
voice.cancelSpeech(callback)
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
override fun isSpeechAvailable(callback: Callback) {
|
|
29
|
+
voice.isSpeechAvailable(callback)
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
override fun getSpeechRecognitionServices(promise: Promise) {
|
|
33
|
+
voice.getSpeechRecognitionServices(promise)
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
override fun isRecognizing(callback: Callback) {
|
|
37
|
+
voice.isRecognizing(callback)
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
override fun addListener(eventType: String) {
|
|
41
|
+
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
override fun removeListeners(count: Double) {
|
|
45
|
+
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
override fun getName(): String {
|
|
49
|
+
return NAME
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
companion object {
|
|
53
|
+
const val NAME = "Voice"
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
package com.wenkesj.voice
|
|
2
|
+
|
|
3
|
+
import android.Manifest
|
|
4
|
+
import android.annotation.SuppressLint
|
|
5
|
+
import android.content.ComponentName
|
|
6
|
+
import android.content.Intent
|
|
7
|
+
import android.content.pm.PackageManager
|
|
8
|
+
import android.os.Bundle
|
|
9
|
+
import android.os.Handler
|
|
10
|
+
import android.speech.RecognitionListener
|
|
11
|
+
import android.speech.RecognitionService
|
|
12
|
+
import android.speech.RecognizerIntent
|
|
13
|
+
import android.speech.SpeechRecognizer
|
|
14
|
+
import android.util.Log
|
|
15
|
+
import androidx.annotation.Nullable
|
|
16
|
+
import com.facebook.react.bridge.Arguments
|
|
17
|
+
import com.facebook.react.bridge.Callback
|
|
18
|
+
import com.facebook.react.bridge.Promise
|
|
19
|
+
import com.facebook.react.bridge.ReactApplicationContext
|
|
20
|
+
import com.facebook.react.bridge.ReactContext.RCTDeviceEventEmitter
|
|
21
|
+
import com.facebook.react.bridge.ReadableMap
|
|
22
|
+
import com.facebook.react.bridge.WritableMap
|
|
23
|
+
import com.facebook.react.modules.core.PermissionAwareActivity
|
|
24
|
+
import java.util.Locale
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Voice (context:ReactApplicationContext):RecognitionListener {
|
|
28
|
+
val reactContext: ReactApplicationContext = context
|
|
29
|
+
private var speech: SpeechRecognizer? = null
|
|
30
|
+
private var isRecognizing = false
|
|
31
|
+
private var locale: String? = null
|
|
32
|
+
|
|
33
|
+
private fun getLocale(locale: String?): String {
|
|
34
|
+
if (locale != null && locale != "") {
|
|
35
|
+
return locale
|
|
36
|
+
}
|
|
37
|
+
return Locale.getDefault().toString()
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
private fun startListening(opts: ReadableMap) {
|
|
41
|
+
if (speech != null) {
|
|
42
|
+
speech?.destroy()
|
|
43
|
+
speech = null
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
speech = if (opts.hasKey("RECOGNIZER_ENGINE")) {
|
|
47
|
+
when (opts.getString("RECOGNIZER_ENGINE")) {
|
|
48
|
+
"GOOGLE" -> {
|
|
49
|
+
SpeechRecognizer.createSpeechRecognizer(
|
|
50
|
+
this.reactContext,
|
|
51
|
+
ComponentName.unflattenFromString("com.google.android.googlequicksearchbox/com.google.android.voicesearch.serviceapi.GoogleRecognitionService")
|
|
52
|
+
)
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
else -> SpeechRecognizer.createSpeechRecognizer(this.reactContext)
|
|
56
|
+
}
|
|
57
|
+
} else {
|
|
58
|
+
SpeechRecognizer.createSpeechRecognizer(this.reactContext)
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
speech?.setRecognitionListener(this)
|
|
62
|
+
|
|
63
|
+
val intent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH)
|
|
64
|
+
|
|
65
|
+
// Load the intent with options from JS
|
|
66
|
+
val iterator = opts.keySetIterator()
|
|
67
|
+
while (iterator.hasNextKey()) {
|
|
68
|
+
val key = iterator.nextKey()
|
|
69
|
+
when (key) {
|
|
70
|
+
"EXTRA_LANGUAGE_MODEL" -> when (opts.getString(key)) {
|
|
71
|
+
"LANGUAGE_MODEL_FREE_FORM" -> intent.putExtra(
|
|
72
|
+
RecognizerIntent.EXTRA_LANGUAGE_MODEL,
|
|
73
|
+
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
"LANGUAGE_MODEL_WEB_SEARCH" -> intent.putExtra(
|
|
77
|
+
RecognizerIntent.EXTRA_LANGUAGE_MODEL,
|
|
78
|
+
RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
else -> intent.putExtra(
|
|
82
|
+
RecognizerIntent.EXTRA_LANGUAGE_MODEL,
|
|
83
|
+
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
|
|
84
|
+
)
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
"EXTRA_MAX_RESULTS" -> {
|
|
88
|
+
val extras = opts.getDouble(key)
|
|
89
|
+
intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, extras.toInt())
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
"EXTRA_PARTIAL_RESULTS" -> {
|
|
93
|
+
intent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, opts.getBoolean(key))
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
"EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS" -> {
|
|
97
|
+
val extras = opts.getDouble(key)
|
|
98
|
+
intent.putExtra(RecognizerIntent.EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS, extras.toInt())
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
"EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS" -> {
|
|
102
|
+
val extras = opts.getDouble(key)
|
|
103
|
+
intent.putExtra(
|
|
104
|
+
RecognizerIntent.EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,
|
|
105
|
+
extras.toInt()
|
|
106
|
+
)
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
"EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS" -> {
|
|
110
|
+
val extras = opts.getDouble(key)
|
|
111
|
+
intent.putExtra(
|
|
112
|
+
RecognizerIntent.EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS,
|
|
113
|
+
extras.toInt()
|
|
114
|
+
)
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, getLocale(this.locale))
|
|
120
|
+
speech?.startListening(intent)
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
private fun startSpeechWithPermissions(locale: String, opts: ReadableMap, callback: Callback) {
|
|
124
|
+
this.locale = locale
|
|
125
|
+
|
|
126
|
+
val mainHandler = Handler(reactContext.mainLooper)
|
|
127
|
+
mainHandler.post {
|
|
128
|
+
try {
|
|
129
|
+
startListening(opts)
|
|
130
|
+
isRecognizing = true
|
|
131
|
+
callback.invoke(false)
|
|
132
|
+
} catch (e: Exception) {
|
|
133
|
+
callback.invoke(e.message)
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
fun startSpeech(locale: String?, opts: ReadableMap, callback: Callback?) {
|
|
139
|
+
if (!isPermissionGranted() && opts.getBoolean("REQUEST_PERMISSIONS_AUTO")) {
|
|
140
|
+
val PERMISSIONS = arrayOf<String>(Manifest.permission.RECORD_AUDIO)
|
|
141
|
+
if (reactContext.currentActivity != null) {
|
|
142
|
+
(reactContext.currentActivity as PermissionAwareActivity).requestPermissions(
|
|
143
|
+
PERMISSIONS, 1
|
|
144
|
+
) { requestCode, permissions, grantResults ->
|
|
145
|
+
var permissionsGranted = true
|
|
146
|
+
for (i in permissions.indices) {
|
|
147
|
+
val granted = grantResults[i] == PackageManager.PERMISSION_GRANTED
|
|
148
|
+
permissionsGranted = permissionsGranted && granted
|
|
149
|
+
}
|
|
150
|
+
startSpeechWithPermissions(locale!!, opts, callback!!)
|
|
151
|
+
permissionsGranted
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
return
|
|
155
|
+
}
|
|
156
|
+
startSpeechWithPermissions(locale!!, opts, callback!!)
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
fun stopSpeech(callback: Callback) {
|
|
161
|
+
val mainHandler = Handler(reactContext.mainLooper)
|
|
162
|
+
mainHandler.post {
|
|
163
|
+
try {
|
|
164
|
+
if (speech != null) {
|
|
165
|
+
speech!!.stopListening()
|
|
166
|
+
}
|
|
167
|
+
isRecognizing = false
|
|
168
|
+
callback.invoke(false)
|
|
169
|
+
} catch (e: java.lang.Exception) {
|
|
170
|
+
callback.invoke(e.message)
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
fun cancelSpeech(callback: Callback) {
|
|
176
|
+
val mainHandler = Handler(reactContext.mainLooper)
|
|
177
|
+
mainHandler.post {
|
|
178
|
+
try {
|
|
179
|
+
if (speech != null) {
|
|
180
|
+
speech!!.cancel()
|
|
181
|
+
}
|
|
182
|
+
isRecognizing = false
|
|
183
|
+
callback.invoke(false)
|
|
184
|
+
} catch (e: java.lang.Exception) {
|
|
185
|
+
callback.invoke(e.message)
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
fun destroySpeech(callback: Callback) {
|
|
191
|
+
val mainHandler = Handler(reactContext.mainLooper)
|
|
192
|
+
mainHandler.post {
|
|
193
|
+
try {
|
|
194
|
+
if (speech != null) {
|
|
195
|
+
speech!!.destroy()
|
|
196
|
+
}
|
|
197
|
+
speech = null
|
|
198
|
+
isRecognizing = false
|
|
199
|
+
callback.invoke(false)
|
|
200
|
+
} catch (e: java.lang.Exception) {
|
|
201
|
+
callback.invoke(e.message)
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
fun isSpeechAvailable(callback: Callback) {
|
|
207
|
+
val self: Voice = this
|
|
208
|
+
val mainHandler = Handler(reactContext.mainLooper)
|
|
209
|
+
mainHandler.post {
|
|
210
|
+
try {
|
|
211
|
+
val isSpeechAvailable = SpeechRecognizer.isRecognitionAvailable(self.reactContext)
|
|
212
|
+
callback.invoke(isSpeechAvailable, false)
|
|
213
|
+
} catch (e: java.lang.Exception) {
|
|
214
|
+
callback.invoke(false, e.message)
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
fun getSpeechRecognitionServices(promise: Promise) {
|
|
220
|
+
val services = reactContext.packageManager
|
|
221
|
+
.queryIntentServices(Intent(RecognitionService.SERVICE_INTERFACE), 0)
|
|
222
|
+
val serviceNames = Arguments.createArray()
|
|
223
|
+
for (service in services) {
|
|
224
|
+
serviceNames.pushString(service.serviceInfo.packageName)
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
promise.resolve(serviceNames)
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
private fun isPermissionGranted(): Boolean {
|
|
231
|
+
val permission = Manifest.permission.RECORD_AUDIO
|
|
232
|
+
val res: Int = reactContext.checkCallingOrSelfPermission(permission)
|
|
233
|
+
return res == PackageManager.PERMISSION_GRANTED
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
fun isRecognizing(callback: Callback) {
|
|
237
|
+
callback.invoke(isRecognizing)
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
private fun sendEvent(eventName: String, params: WritableMap) {
|
|
241
|
+
reactContext
|
|
242
|
+
.getJSModule(RCTDeviceEventEmitter::class.java)
|
|
243
|
+
.emit(eventName, params)
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
private fun getErrorText(errorCode: Int): String {
|
|
247
|
+
val message = when (errorCode) {
|
|
248
|
+
SpeechRecognizer.ERROR_AUDIO -> "Audio recording error"
|
|
249
|
+
SpeechRecognizer.ERROR_CLIENT -> "Client side error"
|
|
250
|
+
SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS -> "Insufficient permissions"
|
|
251
|
+
SpeechRecognizer.ERROR_NETWORK -> "Network error"
|
|
252
|
+
SpeechRecognizer.ERROR_NETWORK_TIMEOUT -> "Network timeout"
|
|
253
|
+
SpeechRecognizer.ERROR_NO_MATCH -> "No match"
|
|
254
|
+
SpeechRecognizer.ERROR_RECOGNIZER_BUSY -> "RecognitionService busy"
|
|
255
|
+
SpeechRecognizer.ERROR_SERVER -> "error from server"
|
|
256
|
+
SpeechRecognizer.ERROR_SPEECH_TIMEOUT -> "No speech input"
|
|
257
|
+
else -> "Didn't understand, please try again."
|
|
258
|
+
}
|
|
259
|
+
return message
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
override fun onReadyForSpeech(params: Bundle?) {
|
|
264
|
+
val event = Arguments.createMap()
|
|
265
|
+
event.putBoolean("error", false)
|
|
266
|
+
sendEvent("onSpeechStart", event)
|
|
267
|
+
Log.d("ASR", "onReadyForSpeech()")
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
override fun onBeginningOfSpeech() {
|
|
271
|
+
val event = Arguments.createMap()
|
|
272
|
+
event.putBoolean("error", false)
|
|
273
|
+
sendEvent("onSpeechStart", event)
|
|
274
|
+
Log.d("ASR", "onBeginningOfSpeech()")
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
override fun onRmsChanged(rmsdB: Float) {
|
|
278
|
+
val event = Arguments.createMap()
|
|
279
|
+
event.putDouble("value", rmsdB.toDouble())
|
|
280
|
+
sendEvent("onSpeechVolumeChanged", event)
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
override fun onBufferReceived(buffer: ByteArray?) {
|
|
284
|
+
val event = Arguments.createMap()
|
|
285
|
+
event.putBoolean("error", false)
|
|
286
|
+
sendEvent("onSpeechRecognized", event)
|
|
287
|
+
Log.d("ASR", "onBufferReceived()")
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
override fun onEndOfSpeech() {
|
|
291
|
+
val event = Arguments.createMap()
|
|
292
|
+
event.putBoolean("error", false)
|
|
293
|
+
sendEvent("onSpeechEnd", event)
|
|
294
|
+
Log.d("ASR", "onEndOfSpeech()")
|
|
295
|
+
isRecognizing = false
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
override fun onError(error: Int) {
|
|
300
|
+
val errorMessage = String.format("%d/%s", error, getErrorText(error))
|
|
301
|
+
val errorData = Arguments.createMap()
|
|
302
|
+
errorData.putString("message", errorMessage)
|
|
303
|
+
errorData.putString("code", java.lang.String.valueOf(errorMessage))
|
|
304
|
+
val event = Arguments.createMap()
|
|
305
|
+
event.putMap("error", errorData)
|
|
306
|
+
sendEvent("onSpeechError", event)
|
|
307
|
+
Log.d("ASR", "onError() - $errorMessage")
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
override fun onResults(results: Bundle?) {
|
|
311
|
+
val arr = Arguments.createArray()
|
|
312
|
+
|
|
313
|
+
val matches = results!!.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
|
|
314
|
+
if (matches != null) {
|
|
315
|
+
for (result in matches) {
|
|
316
|
+
arr.pushString(result)
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
val event = Arguments.createMap()
|
|
320
|
+
event.putArray("value", arr)
|
|
321
|
+
sendEvent("onSpeechResults", event)
|
|
322
|
+
Log.d("ASR", "onResults()")
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
override fun onPartialResults(partialResults: Bundle?) {
|
|
326
|
+
val arr = Arguments.createArray()
|
|
327
|
+
|
|
328
|
+
val matches = partialResults?.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
|
|
329
|
+
matches?.let {
|
|
330
|
+
for (result in it) {
|
|
331
|
+
arr.pushString(result)
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
val event = Arguments.createMap()
|
|
336
|
+
event.putArray("value", arr)
|
|
337
|
+
sendEvent("onSpeechPartialResults", event)
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
override fun onEvent(eventType: Int, params: Bundle?) {
|
|
341
|
+
TODO("Not yet implemented")
|
|
342
|
+
}
|
|
343
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
package com.wenkesj.voice
|
|
2
|
+
|
|
3
|
+
import com.facebook.react.bridge.Callback
|
|
4
|
+
import com.facebook.react.bridge.ReactApplicationContext
|
|
5
|
+
import com.facebook.react.bridge.Promise
|
|
6
|
+
import com.facebook.react.bridge.ReadableMap
|
|
7
|
+
import com.facebook.react.bridge.ReactMethod;
|
|
8
|
+
|
|
9
|
+
class VoiceModule internal constructor(context: ReactApplicationContext) :
|
|
10
|
+
VoiceSpec(context) {
|
|
11
|
+
private val voice = Voice(context)
|
|
12
|
+
|
|
13
|
+
@ReactMethod
|
|
14
|
+
override fun destroySpeech(callback: Callback) {
|
|
15
|
+
voice.destroySpeech(callback)
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
@ReactMethod
|
|
19
|
+
override fun startSpeech(locale: String, opts: ReadableMap, callback: Callback) {
|
|
20
|
+
voice.startSpeech(locale,opts,callback)
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
@ReactMethod
|
|
24
|
+
override fun stopSpeech(callback: Callback) {
|
|
25
|
+
voice.stopSpeech(callback)
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
@ReactMethod
|
|
29
|
+
override fun cancelSpeech(callback: Callback) {
|
|
30
|
+
voice.cancelSpeech(callback)
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
@ReactMethod
|
|
34
|
+
override fun isSpeechAvailable(callback: Callback) {
|
|
35
|
+
voice.isSpeechAvailable(callback)
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
@ReactMethod
|
|
39
|
+
override fun getSpeechRecognitionServices(promise: Promise) {
|
|
40
|
+
voice.getSpeechRecognitionServices(promise)
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
@ReactMethod
|
|
44
|
+
override fun isRecognizing(callback: Callback) {
|
|
45
|
+
voice.isRecognizing(callback)
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
override fun addListener(eventType: String) {
|
|
49
|
+
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
override fun removeListeners(count: Double) {
|
|
53
|
+
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
override fun getName(): String {
|
|
57
|
+
return NAME
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
companion object {
|
|
61
|
+
const val NAME = "Voice"
|
|
62
|
+
}
|
|
63
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
package com.wenkesj.voice
|
|
2
|
+
|
|
3
|
+
import com.facebook.react.TurboReactPackage
|
|
4
|
+
import com.facebook.react.bridge.ReactApplicationContext
|
|
5
|
+
import com.facebook.react.bridge.NativeModule
|
|
6
|
+
import com.facebook.react.module.model.ReactModuleInfoProvider
|
|
7
|
+
import com.facebook.react.module.model.ReactModuleInfo
|
|
8
|
+
import java.util.HashMap
|
|
9
|
+
|
|
10
|
+
class VoicePackage : TurboReactPackage() {
|
|
11
|
+
override fun getModule(name: String, reactContext: ReactApplicationContext): NativeModule? {
|
|
12
|
+
return if (name == VoiceModule.NAME) {
|
|
13
|
+
VoiceModule(reactContext)
|
|
14
|
+
} else {
|
|
15
|
+
null
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
override fun getReactModuleInfoProvider(): ReactModuleInfoProvider {
|
|
20
|
+
return ReactModuleInfoProvider {
|
|
21
|
+
val moduleInfos: MutableMap<String, ReactModuleInfo> = HashMap()
|
|
22
|
+
val isTurboModule: Boolean = BuildConfig.IS_NEW_ARCHITECTURE_ENABLED
|
|
23
|
+
moduleInfos[VoiceModule.NAME] = ReactModuleInfo(
|
|
24
|
+
VoiceModule.NAME,
|
|
25
|
+
VoiceModule.NAME,
|
|
26
|
+
false, // canOverrideExistingModule
|
|
27
|
+
false, // needsEagerInit
|
|
28
|
+
true, // hasConstants
|
|
29
|
+
false, // isCxxModule
|
|
30
|
+
isTurboModule // isTurboModule
|
|
31
|
+
)
|
|
32
|
+
moduleInfos
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
package com.wenkesj.voice
|
|
2
|
+
|
|
3
|
+
import com.facebook.react.bridge.Callback
|
|
4
|
+
import com.facebook.react.bridge.Promise
|
|
5
|
+
import com.facebook.react.bridge.ReactApplicationContext
|
|
6
|
+
import com.facebook.react.bridge.ReadableMap
|
|
7
|
+
|
|
8
|
+
abstract class VoiceSpec internal constructor(context: ReactApplicationContext) :
|
|
9
|
+
NativeVoiceAndroidSpec(context) {
|
|
10
|
+
private val voice = Voice(context)
|
|
11
|
+
|
|
12
|
+
override fun destroySpeech(callback: Callback) {
|
|
13
|
+
voice.destroySpeech(callback)
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
override fun startSpeech(locale: String, opts: ReadableMap, callback: Callback) {
|
|
17
|
+
voice.startSpeech(locale,opts,callback)
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
override fun stopSpeech(callback: Callback) {
|
|
21
|
+
voice.stopSpeech(callback)
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
override fun cancelSpeech(callback: Callback) {
|
|
25
|
+
voice.cancelSpeech(callback)
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
override fun isSpeechAvailable(callback: Callback) {
|
|
29
|
+
voice.isSpeechAvailable(callback)
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
override fun getSpeechRecognitionServices(promise: Promise) {
|
|
33
|
+
voice.getSpeechRecognitionServices(promise)
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
override fun isRecognizing(callback: Callback) {
|
|
37
|
+
voice.isRecognizing(callback)
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
override fun addListener(eventType: String) {
|
|
41
|
+
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
override fun removeListeners(count: Double) {
|
|
45
|
+
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
override fun getName(): String {
|
|
49
|
+
return NAME
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
companion object {
|
|
53
|
+
const val NAME = "Voice"
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
package com.wenkesj.voice
|
|
2
|
+
|
|
3
|
+
import com.facebook.react.bridge.Callback
|
|
4
|
+
import com.facebook.react.bridge.Promise
|
|
5
|
+
import com.facebook.react.bridge.ReactApplicationContext
|
|
6
|
+
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
|
7
|
+
import com.facebook.react.bridge.ReadableMap
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
abstract class VoiceSpec internal constructor(context: ReactApplicationContext) :
|
|
11
|
+
ReactContextBaseJavaModule(context) {
|
|
12
|
+
|
|
13
|
+
abstract fun destroySpeech(callback:Callback)
|
|
14
|
+
|
|
15
|
+
abstract fun startSpeech(locale:String, opts:ReadableMap, callback:Callback)
|
|
16
|
+
|
|
17
|
+
abstract fun stopSpeech(callback:Callback)
|
|
18
|
+
|
|
19
|
+
abstract fun cancelSpeech(callback:Callback)
|
|
20
|
+
|
|
21
|
+
abstract fun isSpeechAvailable(callback:Callback)
|
|
22
|
+
|
|
23
|
+
abstract fun getSpeechRecognitionServices(promise: Promise)
|
|
24
|
+
|
|
25
|
+
abstract fun isRecognizing(callback:Callback)
|
|
26
|
+
|
|
27
|
+
abstract fun addListener(eventType:String)
|
|
28
|
+
|
|
29
|
+
abstract fun removeListeners(count:Double)
|
|
30
|
+
}
|
package/app.plugin.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('./plugin/build/withVoice');
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import type { TurboModule } from 'react-native';
|
|
2
|
+
type SpeechType = {
|
|
3
|
+
EXTRA_LANGUAGE_MODEL: string;
|
|
4
|
+
EXTRA_MAX_RESULTS: string;
|
|
5
|
+
EXTRA_PARTIAL_RESULTS: string;
|
|
6
|
+
REQUEST_PERMISSIONS_AUTO: string;
|
|
7
|
+
RECOGNIZER_ENGINE: string;
|
|
8
|
+
};
|
|
9
|
+
export interface Spec extends TurboModule {
|
|
10
|
+
destroySpeech: (callback: (error: string) => void) => void;
|
|
11
|
+
startSpeech: (locale: string, opts: SpeechType, callback: (error: string) => void) => void;
|
|
12
|
+
stopSpeech: (callback: (error: string) => void) => void;
|
|
13
|
+
cancelSpeech: (callback: (error: string) => void) => void;
|
|
14
|
+
isSpeechAvailable: (callback: (isAvailable: boolean, error: string) => void) => void;
|
|
15
|
+
getSpeechRecognitionServices(): Promise<string[]>;
|
|
16
|
+
isRecognizing: (callback: (Recognizing: boolean) => void) => void;
|
|
17
|
+
addListener: (eventType: string) => void;
|
|
18
|
+
removeListeners: (count: number) => void;
|
|
19
|
+
}
|
|
20
|
+
declare const _default: Spec;
|
|
21
|
+
export default _default;
|
|
22
|
+
//# sourceMappingURL=NativeVoiceAndroid.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"NativeVoiceAndroid.d.ts","sourceRoot":"","sources":["../src/NativeVoiceAndroid.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC;AAEhD,KAAK,UAAU,GAAG;IAChB,oBAAoB,EAAE,MAAM,CAAC;IAC7B,iBAAiB,EAAE,MAAM,CAAC;IAC1B,qBAAqB,EAAE,MAAM,CAAC;IAC9B,wBAAwB,EAAE,MAAM,CAAC;IACjC,iBAAiB,EAAE,MAAM,CAAC;CAC3B,CAAC;AACF,MAAM,WAAW,IAAK,SAAQ,WAAW;IACvC,aAAa,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IAC3D,WAAW,EAAE,CACX,MAAM,EAAE,MAAM,EACd,IAAI,EAAE,UAAU,EAChB,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAC9B,IAAI,CAAC;IACV,UAAU,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IACxD,YAAY,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IAC1D,iBAAiB,EAAE,CACjB,QAAQ,EAAE,CAAC,WAAW,EAAE,OAAO,EAAE,KAAK,EAAE,MAAM,KAAK,IAAI,KACpD,IAAI,CAAC;IACV,4BAA4B,IAAI,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;IAClD,aAAa,EAAE,CAAC,QAAQ,EAAE,CAAC,WAAW,EAAE,OAAO,KAAK,IAAI,KAAK,IAAI,CAAC;IAClE,WAAW,EAAE,CAAC,SAAS,EAAE,MAAM,KAAK,IAAI,CAAC;IACzC,eAAe,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,CAAC;CAC1C;;AAED,wBAA+D"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"NativeVoiceAndroid.js","sourceRoot":"","sources":["../src/NativeVoiceAndroid.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,mBAAmB,EAAE,MAAM,cAAc,CAAC;AA0BnD,eAAe,mBAAmB,CAAC,YAAY,CAAO,OAAO,CAAC,CAAC"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type { TurboModule } from 'react-native';
|
|
2
|
+
export interface Spec extends TurboModule {
|
|
3
|
+
destroySpeech: (callback: (error: string) => void) => void;
|
|
4
|
+
startSpeech: (locale: string, callback: (error: string) => void) => void;
|
|
5
|
+
startTranscription: (locale: string, callback: (error: string) => void) => void;
|
|
6
|
+
stopSpeech: (callback: (error: string) => void) => void;
|
|
7
|
+
stopTranscription: (callback: (error: string) => void) => void;
|
|
8
|
+
cancelSpeech: (callback: (error: string) => void) => void;
|
|
9
|
+
cancelTranscription: (callback: (error: string) => void) => void;
|
|
10
|
+
isSpeechAvailable: (callback: (isAvailable: boolean, error: string) => void) => void;
|
|
11
|
+
isRecognizing: (callback: (Recognizing: boolean) => void) => void;
|
|
12
|
+
addListener: (eventType: string) => void;
|
|
13
|
+
removeListeners: (count: number) => void;
|
|
14
|
+
destroyTranscription: (callback: (error: string) => void) => void;
|
|
15
|
+
}
|
|
16
|
+
declare const _default: Spec;
|
|
17
|
+
export default _default;
|
|
18
|
+
//# sourceMappingURL=NativeVoiceIOS.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"NativeVoiceIOS.d.ts","sourceRoot":"","sources":["../src/NativeVoiceIOS.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC;AAGhD,MAAM,WAAW,IAAK,SAAQ,WAAW;IACvC,aAAa,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IAC3D,WAAW,EAAE,CAAC,MAAM,EAAE,MAAM,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IACzE,kBAAkB,EAAE,CAClB,MAAM,EAAE,MAAM,EACd,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAC9B,IAAI,CAAC;IACV,UAAU,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IACxD,iBAAiB,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IAC/D,YAAY,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IAC1D,mBAAmB,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;IACjE,iBAAiB,EAAE,CACjB,QAAQ,EAAE,CAAC,WAAW,EAAE,OAAO,EAAE,KAAK,EAAE,MAAM,KAAK,IAAI,KACpD,IAAI,CAAC;IACV,aAAa,EAAE,CAAC,QAAQ,EAAE,CAAC,WAAW,EAAE,OAAO,KAAK,IAAI,KAAK,IAAI,CAAC;IAClE,WAAW,EAAE,CAAC,SAAS,EAAE,MAAM,KAAK,IAAI,CAAC;IACzC,eAAe,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,CAAC;IACzC,oBAAoB,EAAE,CAAC,QAAQ,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,KAAK,IAAI,CAAC;CACnE;;AAED,wBAA+D"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"NativeVoiceIOS.js","sourceRoot":"","sources":["../src/NativeVoiceIOS.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,mBAAmB,EAAE,MAAM,cAAc,CAAC;AAsBnD,eAAe,mBAAmB,CAAC,YAAY,CAAO,OAAO,CAAC,CAAC"}
|