capacitor-microphone 0.0.3 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -11,6 +11,14 @@ import com.getcapacitor.annotation.Permission;
|
|
|
11
11
|
import com.getcapacitor.annotation.PermissionCallback;
|
|
12
12
|
import com.getcapacitor.PermissionState;
|
|
13
13
|
|
|
14
|
+
import android.content.Intent;
|
|
15
|
+
import android.os.Bundle;
|
|
16
|
+
import android.speech.RecognitionListener;
|
|
17
|
+
import android.speech.RecognizerIntent;
|
|
18
|
+
import android.speech.SpeechRecognizer;
|
|
19
|
+
|
|
20
|
+
import java.util.ArrayList;
|
|
21
|
+
|
|
14
22
|
@CapacitorPlugin(
|
|
15
23
|
name = "CapacitorMicrophone",
|
|
16
24
|
permissions = {
|
|
@@ -22,6 +30,9 @@ import com.getcapacitor.PermissionState;
|
|
|
22
30
|
)
|
|
23
31
|
public class CapacitorMicrophonePlugin extends Plugin {
|
|
24
32
|
|
|
33
|
+
private SpeechRecognizer speechRecognizer;
|
|
34
|
+
private PluginCall currentCall;
|
|
35
|
+
|
|
25
36
|
@PluginMethod
|
|
26
37
|
public void checkPermission(PluginCall call) {
|
|
27
38
|
PermissionState state = getPermissionState("microphone");
|
|
@@ -89,4 +100,120 @@ public class CapacitorMicrophonePlugin extends Plugin {
|
|
|
89
100
|
}
|
|
90
101
|
|
|
91
102
|
|
|
103
|
+
|
|
104
|
+
@PluginMethod
|
|
105
|
+
public void startListening(PluginCall call) {
|
|
106
|
+
|
|
107
|
+
if (speechRecognizer != null) {
|
|
108
|
+
call.reject("Speech recognition already running");
|
|
109
|
+
return;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if (getPermissionState("microphone") != PermissionState.GRANTED) {
|
|
113
|
+
call.reject("Microphone permission not granted");
|
|
114
|
+
return;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
if (!SpeechRecognizer.isRecognitionAvailable(getContext())) {
|
|
118
|
+
call.reject("Speech recognition not available on this device");
|
|
119
|
+
return;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
currentCall = call;
|
|
123
|
+
|
|
124
|
+
speechRecognizer = SpeechRecognizer.createSpeechRecognizer(getContext());
|
|
125
|
+
|
|
126
|
+
Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
|
|
127
|
+
intent.putExtra(
|
|
128
|
+
RecognizerIntent.EXTRA_LANGUAGE_MODEL,
|
|
129
|
+
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
|
|
130
|
+
);
|
|
131
|
+
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "es-MX");
|
|
132
|
+
intent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, true);
|
|
133
|
+
|
|
134
|
+
speechRecognizer.setRecognitionListener(new RecognitionListener() {
|
|
135
|
+
|
|
136
|
+
@Override
|
|
137
|
+
public void onResults(Bundle results) {
|
|
138
|
+
ArrayList<String> matches =
|
|
139
|
+
results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
|
|
140
|
+
|
|
141
|
+
JSObject ret = new JSObject();
|
|
142
|
+
ret.put("text", matches != null && !matches.isEmpty() ? matches.get(0) : "");
|
|
143
|
+
ret.put("isFinal", true);
|
|
144
|
+
if (currentCall != null) {
|
|
145
|
+
currentCall.resolve(ret);
|
|
146
|
+
currentCall = null;
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
@Override
|
|
151
|
+
public void onPartialResults(Bundle partialResults) {
|
|
152
|
+
ArrayList<String> matches =
|
|
153
|
+
partialResults.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
|
|
154
|
+
|
|
155
|
+
if (matches != null && !matches.isEmpty()) {
|
|
156
|
+
JSObject ret = new JSObject();
|
|
157
|
+
ret.put("text", matches.get(0));
|
|
158
|
+
ret.put("isFinal", false);
|
|
159
|
+
|
|
160
|
+
notifyListeners("partialResult", ret);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
@Override public void onError(int error) {
|
|
165
|
+
if (currentCall != null) {
|
|
166
|
+
currentCall.reject(mapError(error));
|
|
167
|
+
|
|
168
|
+
currentCall = null;
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
// Métodos obligatorios (vacíos)
|
|
173
|
+
@Override public void onReadyForSpeech(Bundle params) {}
|
|
174
|
+
@Override public void onBeginningOfSpeech() {}
|
|
175
|
+
@Override public void onRmsChanged(float rmsdB) {}
|
|
176
|
+
@Override public void onBufferReceived(byte[] buffer) {}
|
|
177
|
+
@Override public void onEndOfSpeech() {}
|
|
178
|
+
@Override public void onEvent(int eventType, Bundle params) {}
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
speechRecognizer.startListening(intent);
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
@PluginMethod
|
|
185
|
+
public void stopListening(PluginCall call) {
|
|
186
|
+
if (speechRecognizer != null) {
|
|
187
|
+
speechRecognizer.stopListening();
|
|
188
|
+
speechRecognizer.destroy();
|
|
189
|
+
speechRecognizer = null;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
JSObject ret = new JSObject();
|
|
193
|
+
ret.put("stopped", true);
|
|
194
|
+
call.resolve(ret);
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
@Override
|
|
198
|
+
protected void handleOnDestroy() {
|
|
199
|
+
if (speechRecognizer != null) {
|
|
200
|
+
speechRecognizer.destroy();
|
|
201
|
+
speechRecognizer = null;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
private String mapError(int error) {
|
|
206
|
+
switch (error) {
|
|
207
|
+
case SpeechRecognizer.ERROR_AUDIO: return "Audio error";
|
|
208
|
+
case SpeechRecognizer.ERROR_NETWORK: return "Network error";
|
|
209
|
+
case SpeechRecognizer.ERROR_NO_MATCH: return "No speech recognized";
|
|
210
|
+
case SpeechRecognizer.ERROR_SPEECH_TIMEOUT: return "Speech timeout";
|
|
211
|
+
default: return "Unknown error";
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
|
|
92
216
|
}
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
|
|
@@ -1,17 +1,27 @@
|
|
|
1
1
|
import Foundation
|
|
2
2
|
import Capacitor
|
|
3
3
|
import AVFoundation
|
|
4
|
+
import Speech
|
|
5
|
+
|
|
4
6
|
|
|
5
7
|
@objc(CapacitorMicrophonePlugin)
|
|
6
8
|
public class CapacitorMicrophonePlugin: CAPPlugin, CAPBridgedPlugin {
|
|
7
9
|
|
|
10
|
+
private let audioEngine = AVAudioEngine()
|
|
11
|
+
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
|
|
12
|
+
private var recognitionTask: SFSpeechRecognitionTask?
|
|
13
|
+
private var speechRecognizer: SFSpeechRecognizer?
|
|
14
|
+
private var currentCall: CAPPluginCall?
|
|
15
|
+
|
|
8
16
|
public let identifier = "CapacitorMicrophonePlugin"
|
|
9
17
|
public let jsName = "CapacitorMicrophone"
|
|
10
18
|
|
|
11
19
|
public let pluginMethods: [CAPPluginMethod] = [
|
|
12
20
|
CAPPluginMethod(name: "checkPermission", returnType: CAPPluginReturnPromise),
|
|
13
21
|
CAPPluginMethod(name: "requestPermission", returnType: CAPPluginReturnPromise),
|
|
14
|
-
CAPPluginMethod(name: "checkRequestPermission", returnType: CAPPluginReturnPromise)
|
|
22
|
+
CAPPluginMethod(name: "checkRequestPermission", returnType: CAPPluginReturnPromise),
|
|
23
|
+
CAPPluginMethod(name: "startListening", returnType: CAPPluginReturnPromise),
|
|
24
|
+
CAPPluginMethod(name: "stopListening", returnType: CAPPluginReturnPromise)
|
|
15
25
|
]
|
|
16
26
|
|
|
17
27
|
@objc func checkPermission(_ call: CAPPluginCall) {
|
|
@@ -71,4 +81,116 @@ public class CapacitorMicrophonePlugin: CAPPlugin, CAPBridgedPlugin {
|
|
|
71
81
|
])
|
|
72
82
|
}
|
|
73
83
|
}
|
|
84
|
+
|
|
85
|
+
@objc func startListening(_ call: CAPPluginCall) {
|
|
86
|
+
|
|
87
|
+
if audioEngine.isRunning {
|
|
88
|
+
call.reject("Speech recognition already running")
|
|
89
|
+
return
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
self.currentCall = call
|
|
93
|
+
|
|
94
|
+
let lang = call.getString("lang") ?? "es-MX"
|
|
95
|
+
speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: lang))
|
|
96
|
+
|
|
97
|
+
SFSpeechRecognizer.requestAuthorization { authStatus in
|
|
98
|
+
if authStatus != .authorized {
|
|
99
|
+
self.currentCall?.reject("Speech recognition not authorized")
|
|
100
|
+
self.currentCall = nil
|
|
101
|
+
return
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
DispatchQueue.main.async {
|
|
105
|
+
self.startRecognition()
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
private func startRecognition() {
|
|
112
|
+
|
|
113
|
+
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
|
|
114
|
+
guard let recognitionRequest = recognitionRequest else {
|
|
115
|
+
self.currentCall?.reject("Unable to create recognition request")
|
|
116
|
+
self.currentCall = nil
|
|
117
|
+
return
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
recognitionRequest.shouldReportPartialResults = true
|
|
121
|
+
|
|
122
|
+
let inputNode = audioEngine.inputNode
|
|
123
|
+
|
|
124
|
+
recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest) { result, error in
|
|
125
|
+
if let result = result {
|
|
126
|
+
|
|
127
|
+
self.notifyListeners("partialResult", data: [
|
|
128
|
+
"text": result.bestTranscription.formattedString,
|
|
129
|
+
"isFinal": result.isFinal
|
|
130
|
+
])
|
|
131
|
+
|
|
132
|
+
if result.isFinal, let currentCall = self.currentCall {
|
|
133
|
+
currentCall.resolve([
|
|
134
|
+
"text": result.bestTranscription.formattedString,
|
|
135
|
+
"isFinal": true
|
|
136
|
+
])
|
|
137
|
+
self.currentCall = nil
|
|
138
|
+
self.stopAudio()
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if let error = error, let currentCall = self.currentCall {
|
|
143
|
+
currentCall.reject(error.localizedDescription)
|
|
144
|
+
self.currentCall = nil
|
|
145
|
+
self.stopAudio()
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
let recordingFormat = inputNode.outputFormat(forBus: 0)
|
|
150
|
+
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) {
|
|
151
|
+
buffer, _ in
|
|
152
|
+
recognitionRequest.append(buffer)
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
audioEngine.prepare()
|
|
156
|
+
|
|
157
|
+
do {
|
|
158
|
+
let audioSession = AVAudioSession.sharedInstance()
|
|
159
|
+
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
|
|
160
|
+
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
|
|
161
|
+
try audioEngine.start()
|
|
162
|
+
} catch {
|
|
163
|
+
if let currentCall = self.currentCall {
|
|
164
|
+
currentCall.reject("Audio engine could not start")
|
|
165
|
+
self.currentCall = nil
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
@objc func stopListening(_ call: CAPPluginCall) {
|
|
172
|
+
stopAudio()
|
|
173
|
+
call.resolve(["stopped": true])
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
private func stopAudio() {
|
|
177
|
+
if audioEngine.isRunning {
|
|
178
|
+
audioEngine.stop()
|
|
179
|
+
}
|
|
180
|
+
if audioEngine.inputNode.numberOfInputs > 0 {
|
|
181
|
+
audioEngine.inputNode.removeTap(onBus: 0)
|
|
182
|
+
}
|
|
183
|
+
recognitionRequest?.endAudio()
|
|
184
|
+
|
|
185
|
+
recognitionTask?.cancel()
|
|
186
|
+
recognitionTask = nil
|
|
187
|
+
recognitionRequest = nil
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
override public func handleOnDestroy() {
|
|
191
|
+
stopAudio()
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
|
|
74
196
|
}
|