capacitor-native-speech-recognition 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,42 @@
1
+ 'use strict';
2
+
3
+ var core = require('@capacitor/core');
4
+
5
+ const SpeechRecognition = core.registerPlugin('SpeechRecognition', {
6
+ web: () => Promise.resolve().then(function () { return web; }).then((m) => new m.SpeechRecognitionWeb()),
7
+ });
8
+
9
+ class SpeechRecognitionWeb extends core.WebPlugin {
10
+ available() {
11
+ throw this.unimplemented('Speech recognition is not available on the web.');
12
+ }
13
+ start(_options) {
14
+ throw this.unimplemented('Speech recognition is not available on the web.');
15
+ }
16
+ stop() {
17
+ throw this.unimplemented('Speech recognition is not available on the web.');
18
+ }
19
+ getSupportedLanguages() {
20
+ throw this.unimplemented('Speech recognition is not available on the web.');
21
+ }
22
+ isListening() {
23
+ throw this.unimplemented('Speech recognition is not available on the web.');
24
+ }
25
+ checkPermissions() {
26
+ throw this.unimplemented('Speech recognition permissions are not handled on the web.');
27
+ }
28
+ requestPermissions() {
29
+ throw this.unimplemented('Speech recognition permissions are not handled on the web.');
30
+ }
31
+ async getPluginVersion() {
32
+ return { version: 'web' };
33
+ }
34
+ }
35
+
36
+ var web = /*#__PURE__*/Object.freeze({
37
+ __proto__: null,
38
+ SpeechRecognitionWeb: SpeechRecognitionWeb
39
+ });
40
+
41
+ exports.SpeechRecognition = SpeechRecognition;
42
+ //# sourceMappingURL=plugin.cjs.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"plugin.cjs.js","sources":["esm/index.js","esm/web.js"],"sourcesContent":["import { registerPlugin } from '@capacitor/core';\nconst SpeechRecognition = registerPlugin('SpeechRecognition', {\n web: () => import('./web').then((m) => new m.SpeechRecognitionWeb()),\n});\nexport * from './definitions';\nexport { SpeechRecognition };\n//# sourceMappingURL=index.js.map","import { WebPlugin } from '@capacitor/core';\nexport class SpeechRecognitionWeb extends WebPlugin {\n available() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n start(_options) {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n stop() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n getSupportedLanguages() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n isListening() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n checkPermissions() {\n throw this.unimplemented('Speech recognition permissions are not handled on the web.');\n }\n requestPermissions() {\n throw this.unimplemented('Speech recognition permissions are not handled on the web.');\n }\n async getPluginVersion() {\n return { version: 'web' };\n }\n}\n//# sourceMappingURL=web.js.map"],"names":["registerPlugin","WebPlugin"],"mappings":";;;;AACK,MAAC,iBAAiB,GAAGA,mBAAc,CAAC,mBAAmB,EAAE;AAC9D,IAAI,GAAG,EAAE,MAAM,mDAAe,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,IAAI,CAAC,CAAC,oBAAoB,EAAE,CAAC;AACxE,CAAC;;ACFM,MAAM,oBAAoB,SAASC,cAAS,CAAC;AACpD,IAAI,SAAS,GAAG;AAChB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;AACnF,IAAI;AACJ,IAAI,KAAK,CAAC,QAAQ,EAAE;AACpB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;AACnF,IAAI;AACJ,IAAI,IAAI,GAAG;AACX,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;AACnF,IAAI;AACJ,IAAI,qBAAqB,GAAG;AAC5B,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;AACnF,IAAI;AACJ,IAAI,WAAW,GAAG;AAClB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;AACnF,IAAI;AACJ,IAAI,gBAAgB,GAAG;AACvB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,4DAA4D,CAAC;AAC9F,IAAI;AACJ,IAAI,kBAAkB,GAAG;AACzB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,4DAA4D,CAAC;AAC9F,IAAI;AACJ,IAAI,MAAM,gBAAgB,GAAG;AAC7B,QAAQ,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE;AACjC,IAAI;AACJ;;;;;;;;;"}
package/dist/plugin.js ADDED
@@ -0,0 +1,45 @@
1
+ var capacitorCapacitorSpeechRecognition = (function (exports, core) {
2
+ 'use strict';
3
+
4
+ const SpeechRecognition = core.registerPlugin('SpeechRecognition', {
5
+ web: () => Promise.resolve().then(function () { return web; }).then((m) => new m.SpeechRecognitionWeb()),
6
+ });
7
+
8
+ class SpeechRecognitionWeb extends core.WebPlugin {
9
+ available() {
10
+ throw this.unimplemented('Speech recognition is not available on the web.');
11
+ }
12
+ start(_options) {
13
+ throw this.unimplemented('Speech recognition is not available on the web.');
14
+ }
15
+ stop() {
16
+ throw this.unimplemented('Speech recognition is not available on the web.');
17
+ }
18
+ getSupportedLanguages() {
19
+ throw this.unimplemented('Speech recognition is not available on the web.');
20
+ }
21
+ isListening() {
22
+ throw this.unimplemented('Speech recognition is not available on the web.');
23
+ }
24
+ checkPermissions() {
25
+ throw this.unimplemented('Speech recognition permissions are not handled on the web.');
26
+ }
27
+ requestPermissions() {
28
+ throw this.unimplemented('Speech recognition permissions are not handled on the web.');
29
+ }
30
+ async getPluginVersion() {
31
+ return { version: 'web' };
32
+ }
33
+ }
34
+
35
+ var web = /*#__PURE__*/Object.freeze({
36
+ __proto__: null,
37
+ SpeechRecognitionWeb: SpeechRecognitionWeb
38
+ });
39
+
40
+ exports.SpeechRecognition = SpeechRecognition;
41
+
42
+ return exports;
43
+
44
+ })({}, capacitorExports);
45
+ //# sourceMappingURL=plugin.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"plugin.js","sources":["esm/index.js","esm/web.js"],"sourcesContent":["import { registerPlugin } from '@capacitor/core';\nconst SpeechRecognition = registerPlugin('SpeechRecognition', {\n web: () => import('./web').then((m) => new m.SpeechRecognitionWeb()),\n});\nexport * from './definitions';\nexport { SpeechRecognition };\n//# sourceMappingURL=index.js.map","import { WebPlugin } from '@capacitor/core';\nexport class SpeechRecognitionWeb extends WebPlugin {\n available() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n start(_options) {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n stop() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n getSupportedLanguages() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n isListening() {\n throw this.unimplemented('Speech recognition is not available on the web.');\n }\n checkPermissions() {\n throw this.unimplemented('Speech recognition permissions are not handled on the web.');\n }\n requestPermissions() {\n throw this.unimplemented('Speech recognition permissions are not handled on the web.');\n }\n async getPluginVersion() {\n return { version: 'web' };\n }\n}\n//# sourceMappingURL=web.js.map"],"names":["registerPlugin","WebPlugin"],"mappings":";;;AACK,UAAC,iBAAiB,GAAGA,mBAAc,CAAC,mBAAmB,EAAE;IAC9D,IAAI,GAAG,EAAE,MAAM,mDAAe,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,IAAI,CAAC,CAAC,oBAAoB,EAAE,CAAC;IACxE,CAAC;;ICFM,MAAM,oBAAoB,SAASC,cAAS,CAAC;IACpD,IAAI,SAAS,GAAG;IAChB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;IACnF,IAAI;IACJ,IAAI,KAAK,CAAC,QAAQ,EAAE;IACpB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;IACnF,IAAI;IACJ,IAAI,IAAI,GAAG;IACX,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;IACnF,IAAI;IACJ,IAAI,qBAAqB,GAAG;IAC5B,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;IACnF,IAAI;IACJ,IAAI,WAAW,GAAG;IAClB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,iDAAiD,CAAC;IACnF,IAAI;IACJ,IAAI,gBAAgB,GAAG;IACvB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,4DAA4D,CAAC;IAC9F,IAAI;IACJ,IAAI,kBAAkB,GAAG;IACzB,QAAQ,MAAM,IAAI,CAAC,aAAa,CAAC,4DAA4D,CAAC;IAC9F,IAAI;IACJ,IAAI,MAAM,gBAAgB,GAAG;IAC7B,QAAQ,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE;IACjC,IAAI;IACJ;;;;;;;;;;;;;;;"}
@@ -0,0 +1,295 @@
1
+ import AVFoundation
2
+ import Capacitor
3
+ import Foundation
4
+ import Speech
5
+
6
+ private enum PermissionState: String {
7
+ case granted
8
+ case denied
9
+ case prompt
10
+ }
11
+
12
+ @objc(SpeechRecognitionPlugin)
13
+ public final class SpeechRecognitionPlugin: CAPPlugin, CAPBridgedPlugin {
14
+ private let pluginVersion: String = "8.0.3"
15
+ public let identifier = "SpeechRecognitionPlugin"
16
+ public let jsName = "SpeechRecognition"
17
+ public let pluginMethods: [CAPPluginMethod] = [
18
+ CAPPluginMethod(name: "available", returnType: CAPPluginReturnPromise),
19
+ CAPPluginMethod(name: "start", returnType: CAPPluginReturnPromise),
20
+ CAPPluginMethod(name: "stop", returnType: CAPPluginReturnPromise),
21
+ CAPPluginMethod(name: "getSupportedLanguages", returnType: CAPPluginReturnPromise),
22
+ CAPPluginMethod(name: "isListening", returnType: CAPPluginReturnPromise),
23
+ CAPPluginMethod(name: "checkPermissions", returnType: CAPPluginReturnPromise),
24
+ CAPPluginMethod(name: "requestPermissions", returnType: CAPPluginReturnPromise),
25
+ CAPPluginMethod(name: "getPluginVersion", returnType: CAPPluginReturnPromise)
26
+ ]
27
+
28
+ private let audioEngine = AVAudioEngine()
29
+ private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
30
+ private var recognitionTask: SFSpeechRecognitionTask?
31
+ private var speechRecognizer: SFSpeechRecognizer?
32
+ private var activeCall: CAPPluginCall?
33
+ private var currentOptions: RecognitionOptions?
34
+ private var hasInstalledTap = false
35
+
36
+ private let maxDefaultResults = 5
37
+
38
+ @objc func available(_ call: CAPPluginCall) {
39
+ let locale = Locale(identifier: call.getString("language") ?? Locale.current.identifier)
40
+ let recognizer = SFSpeechRecognizer(locale: locale)
41
+ call.resolve(["available": recognizer?.isAvailable ?? false])
42
+ }
43
+
44
+ @objc func start(_ call: CAPPluginCall) {
45
+ if self.audioEngine.isRunning || recognitionTask != nil {
46
+ CAPLog.print("[SpeechRecognition] Attempted to start while already running")
47
+ call.reject("Speech recognition is already running.")
48
+ return
49
+ }
50
+
51
+ guard isSpeechPermissionGranted else {
52
+ CAPLog.print("[SpeechRecognition] Missing speech permission, rejecting start()")
53
+ call.reject("Missing speech recognition permission.")
54
+ return
55
+ }
56
+
57
+ let options = RecognitionOptions(
58
+ language: call.getString("language") ?? Locale.current.identifier,
59
+ maxResults: call.getInt("maxResults") ?? maxDefaultResults,
60
+ partialResults: call.getBool("partialResults") ?? false,
61
+ addPunctuation: call.getBool("addPunctuation") ?? false
62
+ )
63
+
64
+ self.activeCall = call
65
+ self.currentOptions = options
66
+ CAPLog.print("[SpeechRecognition] Starting session | language=\(options.language) partialResults=\(options.partialResults) punctuation=\(options.addPunctuation)")
67
+
68
+ AVAudioSession.sharedInstance().requestRecordPermission { granted in
69
+ guard granted else {
70
+ CAPLog.print("[SpeechRecognition] Microphone permission denied by user")
71
+ DispatchQueue.main.async {
72
+ call.reject("User denied microphone access.")
73
+ self.cleanupRecognition(notifyStop: false)
74
+ }
75
+ return
76
+ }
77
+
78
+ DispatchQueue.main.async {
79
+ self.beginRecognition(call: call, options: options)
80
+ }
81
+ }
82
+ }
83
+
84
+ @objc func stop(_ call: CAPPluginCall) {
85
+ CAPLog.print("[SpeechRecognition] stop() invoked")
86
+ cleanupRecognition(notifyStop: true)
87
+ call.resolve()
88
+ }
89
+
90
+ @objc func isListening(_ call: CAPPluginCall) {
91
+ call.resolve(["listening": audioEngine.isRunning])
92
+ }
93
+
94
+ @objc func getSupportedLanguages(_ call: CAPPluginCall) {
95
+ let identifiers = SFSpeechRecognizer
96
+ .supportedLocales()
97
+ .map { $0.identifier }
98
+ .sorted()
99
+ call.resolve(["languages": identifiers])
100
+ }
101
+
102
+ @objc override public func checkPermissions(_ call: CAPPluginCall) {
103
+ call.resolve(["speechRecognition": permissionState.rawValue])
104
+ }
105
+
106
+ @objc override public func requestPermissions(_ call: CAPPluginCall) {
107
+ SFSpeechRecognizer.requestAuthorization { status in
108
+ switch status {
109
+ case .authorized:
110
+ AVAudioSession.sharedInstance().requestRecordPermission { granted in
111
+ DispatchQueue.main.async {
112
+ let result: PermissionState = granted ? .granted : .denied
113
+ call.resolve(["speechRecognition": result.rawValue])
114
+ }
115
+ }
116
+ case .denied, .restricted:
117
+ DispatchQueue.main.async {
118
+ call.resolve(["speechRecognition": PermissionState.denied.rawValue])
119
+ }
120
+ case .notDetermined:
121
+ DispatchQueue.main.async {
122
+ call.resolve(["speechRecognition": PermissionState.prompt.rawValue])
123
+ }
124
+ @unknown default:
125
+ DispatchQueue.main.async {
126
+ call.resolve(["speechRecognition": PermissionState.prompt.rawValue])
127
+ }
128
+ }
129
+ }
130
+ }
131
+
132
+ private func beginRecognition(call: CAPPluginCall, options: RecognitionOptions) {
133
+ guard let recognizer = SFSpeechRecognizer(locale: Locale(identifier: options.language)) else {
134
+ call.reject("Unsupported locale: \(options.language)")
135
+ cleanupRecognition(notifyStop: false)
136
+ return
137
+ }
138
+
139
+ guard recognizer.isAvailable else {
140
+ call.reject("Speech recognizer is currently unavailable.")
141
+ cleanupRecognition(notifyStop: false)
142
+ return
143
+ }
144
+
145
+ speechRecognizer = recognizer
146
+
147
+ do {
148
+ try configureAudioSession()
149
+ } catch {
150
+ call.reject("Failed to configure audio session: \(error.localizedDescription)")
151
+ cleanupRecognition(notifyStop: false)
152
+ return
153
+ }
154
+
155
+ let recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
156
+ recognitionRequest.shouldReportPartialResults = options.partialResults
157
+ if #available(iOS 16.0, *) {
158
+ recognitionRequest.addsPunctuation = options.addPunctuation
159
+ }
160
+ self.recognitionRequest = recognitionRequest
161
+
162
+ let inputNode = audioEngine.inputNode
163
+ let recordingFormat = inputNode.outputFormat(forBus: 0)
164
+ inputNode.removeTap(onBus: 0)
165
+ inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { [weak self] buffer, _ in
166
+ self?.recognitionRequest?.append(buffer)
167
+ }
168
+ hasInstalledTap = true
169
+
170
+ audioEngine.prepare()
171
+
172
+ do {
173
+ try audioEngine.start()
174
+ notifyListeners("listeningState", data: ["status": "started"])
175
+ } catch {
176
+ call.reject("Unable to start audio engine: \(error.localizedDescription)")
177
+ cleanupRecognition(notifyStop: false)
178
+ return
179
+ }
180
+
181
+ if options.partialResults {
182
+ call.resolve()
183
+ }
184
+
185
+ recognitionTask = recognizer.recognitionTask(with: recognitionRequest) { [weak self] result, error in
186
+ guard let self else { return }
187
+ if let result {
188
+ let matches = self.buildMatches(from: result, maxResults: options.maxResults)
189
+ if options.partialResults {
190
+ DispatchQueue.main.async {
191
+ self.notifyListeners("partialResults", data: ["matches": matches])
192
+ }
193
+ } else if result.isFinal {
194
+ DispatchQueue.main.async {
195
+ self.activeCall?.resolve(["matches": matches])
196
+ }
197
+ }
198
+
199
+ if result.isFinal {
200
+ self.cleanupRecognition(notifyStop: true)
201
+ }
202
+ }
203
+
204
+ if let error {
205
+ self.handleRecognitionError(error)
206
+ }
207
+ }
208
+ }
209
+
210
+ private func configureAudioSession() throws {
211
+ let session = AVAudioSession.sharedInstance()
212
+ try session.setCategory(.playAndRecord, options: [.defaultToSpeaker, .duckOthers])
213
+ try session.setMode(.measurement)
214
+ try session.setActive(true, options: .notifyOthersOnDeactivation)
215
+ }
216
+
217
+ private func cleanupRecognition(notifyStop: Bool) {
218
+ DispatchQueue.main.async {
219
+ CAPLog.print("[SpeechRecognition] Cleaning up recognition resources")
220
+ if self.audioEngine.isRunning {
221
+ self.audioEngine.stop()
222
+ }
223
+
224
+ if self.hasInstalledTap {
225
+ self.audioEngine.inputNode.removeTap(onBus: 0)
226
+ self.hasInstalledTap = false
227
+ }
228
+
229
+ self.recognitionRequest?.endAudio()
230
+ self.recognitionRequest = nil
231
+ self.recognitionTask?.cancel()
232
+ self.recognitionTask = nil
233
+ self.speechRecognizer = nil
234
+ self.currentOptions = nil
235
+ self.activeCall = nil
236
+
237
+ if notifyStop {
238
+ self.notifyListeners("listeningState", data: ["status": "stopped"])
239
+ }
240
+ }
241
+ }
242
+
243
+ private func handleRecognitionError(_ error: Error) {
244
+ DispatchQueue.main.async {
245
+ CAPLog.print("[SpeechRecognition] Error from recognizer: \(error.localizedDescription)")
246
+ self.cleanupRecognition(notifyStop: true)
247
+ self.activeCall?.reject(error.localizedDescription)
248
+ }
249
+ }
250
+
251
+ private func buildMatches(from result: SFSpeechRecognitionResult, maxResults: Int) -> [String] {
252
+ var matches: [String] = []
253
+ for transcription in result.transcriptions where matches.count < maxResults {
254
+ matches.append(transcription.formattedString)
255
+ }
256
+ return matches
257
+ }
258
+
259
+ private var isSpeechPermissionGranted: Bool {
260
+ switch SFSpeechRecognizer.authorizationStatus() {
261
+ case .authorized:
262
+ return true
263
+ case .notDetermined, .denied, .restricted:
264
+ return false
265
+ @unknown default:
266
+ return false
267
+ }
268
+ }
269
+
270
+ private var permissionState: PermissionState {
271
+ let speechStatus = SFSpeechRecognizer.authorizationStatus()
272
+ let micStatus = AVAudioSession.sharedInstance().recordPermission
273
+
274
+ if speechStatus == .denied || speechStatus == .restricted || micStatus == .denied {
275
+ return .denied
276
+ }
277
+
278
+ if speechStatus == .notDetermined || micStatus == .undetermined {
279
+ return .prompt
280
+ }
281
+
282
+ return .granted
283
+ }
284
+
285
+ @objc func getPluginVersion(_ call: CAPPluginCall) {
286
+ call.resolve(["version": pluginVersion])
287
+ }
288
+ }
289
+
290
+ private struct RecognitionOptions {
291
+ let language: String
292
+ let maxResults: Int
293
+ let partialResults: Bool
294
+ let addPunctuation: Bool
295
+ }
@@ -0,0 +1,7 @@
1
+ import XCTest
2
+
3
+ final class SpeechRecognitionPluginTests: XCTestCase {
4
+ func testExample() {
5
+ XCTAssertTrue(true)
6
+ }
7
+ }
package/package.json ADDED
@@ -0,0 +1,102 @@
1
+ {
2
+ "name": "capacitor-native-speech-recognition",
3
+ "version": "1.0.1",
4
+ "description": "Capacitor plugin for comprehensive on-device speech recognition with live partial results.",
5
+ "main": "dist/plugin.cjs.js",
6
+ "module": "dist/esm/index.js",
7
+ "types": "dist/esm/index.d.ts",
8
+ "unpkg": "dist/plugin.js",
9
+ "exports": {
10
+ ".": {
11
+ "types": "./dist/esm/index.d.ts",
12
+ "import": "./dist/esm/index.js",
13
+ "require": "./dist/plugin.cjs.js",
14
+ "default": "./dist/esm/index.js"
15
+ },
16
+ "./package.json": "./package.json"
17
+ },
18
+ "publishConfig": {
19
+ "access": "public"
20
+ },
21
+ "sideEffects": false,
22
+ "files": [
23
+ "android/src/main/",
24
+ "android/build.gradle",
25
+ "dist/",
26
+ "ios/Sources",
27
+ "ios/Tests",
28
+ "Package.swift",
29
+ "CapgoCapacitorSpeechRecognition.podspec"
30
+ ],
31
+ "author": "Morteza Rahmani <rmorteza3000@gmail.com>",
32
+ "license": "MPL-2.0",
33
+ "repository": {
34
+ "type": "git",
35
+ "url": "git+https://github.com/rahmanimorteza/capgo-speech-recognition.git#package"
36
+ },
37
+ "bugs": {
38
+ "url": "https://github.com/rahmanimorteza/capgo-speech-recognition/issues"
39
+ },
40
+ "homepage": "https://github.com/rahmanimorteza/capgo-speech-recognition#readme",
41
+ "keywords": [
42
+ "capacitor",
43
+ "plugin",
44
+ "speech",
45
+ "recognition",
46
+ "voice",
47
+ "transcription"
48
+ ],
49
+ "scripts": {
50
+ "verify": "bun run verify:ios && bun run verify:android && bun run verify:web",
51
+ "verify:ios": "xcodebuild -scheme CapgoCapacitorSpeechRecognition -destination generic/platform=iOS",
52
+ "verify:android": "cd android && ./gradlew clean build test && cd ..",
53
+ "verify:web": "bun run build",
54
+ "lint": "bun run eslint && bun run prettier -- --check && bun run swiftlint -- lint",
55
+ "fmt": "bun run eslint -- --fix && bun run prettier -- --write && bun run swiftlint -- --fix --format",
56
+ "eslint": "eslint . --ext ts",
57
+ "prettier": "prettier-pretty-check \"**/*.{css,html,ts,js,java}\" --plugin=prettier-plugin-java",
58
+ "swiftlint": "node-swiftlint",
59
+ "docgen": "docgen --api SpeechRecognitionPlugin --output-readme README.md --output-json dist/docs.json",
60
+ "build": "bun run clean && bun run docgen && tsc && rollup -c rollup.config.mjs",
61
+ "clean": "rimraf ./dist",
62
+ "watch": "tsc --watch",
63
+ "prepublishOnly": "bun run build"
64
+ },
65
+ "devDependencies": {
66
+ "@capacitor/android": "^8.0.0",
67
+ "@capacitor/cli": "^8.0.0",
68
+ "@capacitor/core": "^8.0.0",
69
+ "@capacitor/docgen": "^0.3.1",
70
+ "@capacitor/ios": "^8.0.0",
71
+ "@ionic/eslint-config": "^0.4.0",
72
+ "@ionic/prettier-config": "^4.0.0",
73
+ "@ionic/swiftlint-config": "^2.0.0",
74
+ "@types/node": "^24.10.1",
75
+ "eslint": "^8.57.1",
76
+ "eslint-plugin-import": "^2.31.0",
77
+ "husky": "^9.1.7",
78
+ "prettier": "^3.6.2",
79
+ "prettier-plugin-java": "^2.7.7",
80
+ "rimraf": "^6.1.0",
81
+ "rollup": "^4.53.2",
82
+ "swiftlint": "^2.0.0",
83
+ "typescript": "^5.9.3",
84
+ "prettier-pretty-check": "^0.2.0"
85
+ },
86
+ "peerDependencies": {
87
+ "@capacitor/core": ">=8.0.0"
88
+ },
89
+ "eslintConfig": {
90
+ "extends": "@ionic/eslint-config/recommended"
91
+ },
92
+ "prettier": "@ionic/prettier-config",
93
+ "swiftlint": "@ionic/swiftlint-config",
94
+ "capacitor": {
95
+ "ios": {
96
+ "src": "ios"
97
+ },
98
+ "android": {
99
+ "src": "android"
100
+ }
101
+ }
102
+ }