@capgo/capacitor-speech-synthesis 7.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CapgoCapacitorSpeechSynthesis.podspec +17 -0
- package/LICENSE +21 -0
- package/Package.swift +28 -0
- package/README.md +507 -0
- package/android/build.gradle +58 -0
- package/android/src/main/AndroidManifest.xml +2 -0
- package/android/src/main/java/ee/forgr/plugin/speechsynthesis/SpeechSynthesisPlugin.java +438 -0
- package/dist/docs.json +1089 -0
- package/dist/esm/definitions.d.ts +519 -0
- package/dist/esm/definitions.js +2 -0
- package/dist/esm/definitions.js.map +1 -0
- package/dist/esm/index.d.ts +4 -0
- package/dist/esm/index.js +7 -0
- package/dist/esm/index.js.map +1 -0
- package/dist/esm/web.d.ts +35 -0
- package/dist/esm/web.js +153 -0
- package/dist/esm/web.js.map +1 -0
- package/dist/plugin.cjs.js +167 -0
- package/dist/plugin.cjs.js.map +1 -0
- package/dist/plugin.js +170 -0
- package/dist/plugin.js.map +1 -0
- package/ios/Sources/SpeechSynthesisPlugin/SpeechSynthesisPlugin.swift +338 -0
- package/ios/Tests/SpeechSynthesisPluginTests/SpeechSynthesisPluginTests.swift +10 -0
- package/package.json +86 -0
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
import Foundation
|
|
2
|
+
import Capacitor
|
|
3
|
+
import AVFoundation
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Speech Synthesis Plugin for iOS using AVSpeechSynthesizer
|
|
7
|
+
*/
|
|
8
|
+
@objc(SpeechSynthesisPlugin)
|
|
9
|
+
public class SpeechSynthesisPlugin: CAPPlugin, CAPBridgedPlugin, AVSpeechSynthesizerDelegate {
|
|
10
|
+
private let pluginVersion: String = "7.0.0"
|
|
11
|
+
public let identifier = "SpeechSynthesisPlugin"
|
|
12
|
+
public let jsName = "SpeechSynthesis"
|
|
13
|
+
public let pluginMethods: [CAPPluginMethod] = [
|
|
14
|
+
CAPPluginMethod(name: "speak", returnType: CAPPluginReturnPromise),
|
|
15
|
+
CAPPluginMethod(name: "synthesizeToFile", returnType: CAPPluginReturnPromise),
|
|
16
|
+
CAPPluginMethod(name: "cancel", returnType: CAPPluginReturnPromise),
|
|
17
|
+
CAPPluginMethod(name: "pause", returnType: CAPPluginReturnPromise),
|
|
18
|
+
CAPPluginMethod(name: "resume", returnType: CAPPluginReturnPromise),
|
|
19
|
+
CAPPluginMethod(name: "isSpeaking", returnType: CAPPluginReturnPromise),
|
|
20
|
+
CAPPluginMethod(name: "isAvailable", returnType: CAPPluginReturnPromise),
|
|
21
|
+
CAPPluginMethod(name: "getVoices", returnType: CAPPluginReturnPromise),
|
|
22
|
+
CAPPluginMethod(name: "getLanguages", returnType: CAPPluginReturnPromise),
|
|
23
|
+
CAPPluginMethod(name: "isLanguageAvailable", returnType: CAPPluginReturnPromise),
|
|
24
|
+
CAPPluginMethod(name: "isVoiceAvailable", returnType: CAPPluginReturnPromise),
|
|
25
|
+
CAPPluginMethod(name: "initialize", returnType: CAPPluginReturnPromise),
|
|
26
|
+
CAPPluginMethod(name: "activateAudioSession", returnType: CAPPluginReturnPromise),
|
|
27
|
+
CAPPluginMethod(name: "deactivateAudioSession", returnType: CAPPluginReturnPromise),
|
|
28
|
+
CAPPluginMethod(name: "getPluginVersion", returnType: CAPPluginReturnPromise)
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
private var synthesizer: AVSpeechSynthesizer?
|
|
32
|
+
private var utteranceIdCounter: Int = 0
|
|
33
|
+
private var utteranceMap: [String: AVSpeechUtterance] = [:]
|
|
34
|
+
|
|
35
|
+
override public func load() {
|
|
36
|
+
super.load()
|
|
37
|
+
synthesizer = AVSpeechSynthesizer()
|
|
38
|
+
synthesizer?.delegate = self
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
@objc func speak(_ call: CAPPluginCall) {
|
|
42
|
+
guard let text = call.getString("text") else {
|
|
43
|
+
call.reject("Text is required")
|
|
44
|
+
return
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
let utteranceId = "ios-utterance-\(utteranceIdCounter)"
|
|
48
|
+
utteranceIdCounter += 1
|
|
49
|
+
|
|
50
|
+
let utterance = AVSpeechUtterance(string: text)
|
|
51
|
+
|
|
52
|
+
// Set voice
|
|
53
|
+
if let voiceId = call.getString("voiceId") {
|
|
54
|
+
if let voice = AVSpeechSynthesisVoice(identifier: voiceId) {
|
|
55
|
+
utterance.voice = voice
|
|
56
|
+
}
|
|
57
|
+
} else if let language = call.getString("language") {
|
|
58
|
+
if let voice = AVSpeechSynthesisVoice(language: language) {
|
|
59
|
+
utterance.voice = voice
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// Set speech parameters
|
|
64
|
+
if let pitch = call.getFloat("pitch") {
|
|
65
|
+
utterance.pitchMultiplier = max(0.5, min(2.0, pitch))
|
|
66
|
+
}
|
|
67
|
+
if let rate = call.getFloat("rate") {
|
|
68
|
+
utterance.rate = max(AVSpeechUtteranceMinimumSpeechRate, min(AVSpeechUtteranceMaximumSpeechRate, rate * AVSpeechUtteranceDefaultSpeechRate))
|
|
69
|
+
}
|
|
70
|
+
if let volume = call.getFloat("volume") {
|
|
71
|
+
utterance.volume = max(0.0, min(1.0, volume))
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Handle queue strategy
|
|
75
|
+
let queueStrategy = call.getString("queueStrategy") ?? "Add"
|
|
76
|
+
if queueStrategy == "Flush" {
|
|
77
|
+
synthesizer?.stopSpeaking(at: .immediate)
|
|
78
|
+
utteranceMap.removeAll()
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
utteranceMap[utteranceId] = utterance
|
|
82
|
+
|
|
83
|
+
// Store utteranceId in userData for delegate callbacks
|
|
84
|
+
let userInfo: [String: Any] = ["utteranceId": utteranceId]
|
|
85
|
+
setValue(userInfo, forKey: "currentUtteranceId")
|
|
86
|
+
|
|
87
|
+
synthesizer?.speak(utterance)
|
|
88
|
+
|
|
89
|
+
call.resolve([
|
|
90
|
+
"utteranceId": utteranceId
|
|
91
|
+
])
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
@objc func synthesizeToFile(_ call: CAPPluginCall) {
|
|
95
|
+
guard let text = call.getString("text") else {
|
|
96
|
+
call.reject("Text is required")
|
|
97
|
+
return
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
let utteranceId = "ios-file-\(utteranceIdCounter)"
|
|
101
|
+
utteranceIdCounter += 1
|
|
102
|
+
|
|
103
|
+
let utterance = AVSpeechUtterance(string: text)
|
|
104
|
+
|
|
105
|
+
// Set voice
|
|
106
|
+
if let voiceId = call.getString("voiceId") {
|
|
107
|
+
if let voice = AVSpeechSynthesisVoice(identifier: voiceId) {
|
|
108
|
+
utterance.voice = voice
|
|
109
|
+
}
|
|
110
|
+
} else if let language = call.getString("language") {
|
|
111
|
+
if let voice = AVSpeechSynthesisVoice(language: language) {
|
|
112
|
+
utterance.voice = voice
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Set speech parameters
|
|
117
|
+
if let pitch = call.getFloat("pitch") {
|
|
118
|
+
utterance.pitchMultiplier = max(0.5, min(2.0, pitch))
|
|
119
|
+
}
|
|
120
|
+
if let rate = call.getFloat("rate") {
|
|
121
|
+
utterance.rate = max(AVSpeechUtteranceMinimumSpeechRate, min(AVSpeechUtteranceMaximumSpeechRate, rate * AVSpeechUtteranceDefaultSpeechRate))
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Write to file
|
|
125
|
+
if #available(iOS 13.0, *) {
|
|
126
|
+
let documentsPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
|
|
127
|
+
let audioFilename = documentsPath.appendingPathComponent("\(utteranceId).caf")
|
|
128
|
+
|
|
129
|
+
synthesizer?.write(utterance) { (buffer: AVAudioBuffer) in
|
|
130
|
+
guard let pcmBuffer = buffer as? AVAudioPCMBuffer else {
|
|
131
|
+
call.reject("Failed to get PCM buffer")
|
|
132
|
+
return
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
if let audioFile = try? AVAudioFile(forWriting: audioFilename, settings: pcmBuffer.format.settings) {
|
|
136
|
+
do {
|
|
137
|
+
try audioFile.write(from: pcmBuffer)
|
|
138
|
+
call.resolve([
|
|
139
|
+
"filePath": audioFilename.path,
|
|
140
|
+
"utteranceId": utteranceId
|
|
141
|
+
])
|
|
142
|
+
} catch {
|
|
143
|
+
call.reject("Failed to write audio file: \(error.localizedDescription)")
|
|
144
|
+
}
|
|
145
|
+
} else {
|
|
146
|
+
call.reject("Failed to create audio file")
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
} else {
|
|
150
|
+
call.reject("synthesizeToFile requires iOS 13.0 or later")
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
@objc func cancel(_ call: CAPPluginCall) {
|
|
155
|
+
synthesizer?.stopSpeaking(at: .immediate)
|
|
156
|
+
utteranceMap.removeAll()
|
|
157
|
+
call.resolve()
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
@objc func pause(_ call: CAPPluginCall) {
|
|
161
|
+
synthesizer?.pauseSpeaking(at: .immediate)
|
|
162
|
+
call.resolve()
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
@objc func resume(_ call: CAPPluginCall) {
|
|
166
|
+
synthesizer?.continueSpeaking()
|
|
167
|
+
call.resolve()
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
@objc func isSpeaking(_ call: CAPPluginCall) {
|
|
171
|
+
let isSpeaking = synthesizer?.isSpeaking ?? false
|
|
172
|
+
call.resolve([
|
|
173
|
+
"isSpeaking": isSpeaking
|
|
174
|
+
])
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
@objc func isAvailable(_ call: CAPPluginCall) {
|
|
178
|
+
call.resolve([
|
|
179
|
+
"isAvailable": true
|
|
180
|
+
])
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
@objc func getVoices(_ call: CAPPluginCall) {
|
|
184
|
+
let voices = AVSpeechSynthesisVoice.speechVoices()
|
|
185
|
+
let voiceInfos = voices.map { voice -> [String: Any] in
|
|
186
|
+
var info: [String: Any] = [
|
|
187
|
+
"id": voice.identifier,
|
|
188
|
+
"name": voice.name,
|
|
189
|
+
"language": voice.language
|
|
190
|
+
]
|
|
191
|
+
|
|
192
|
+
// Add gender if available
|
|
193
|
+
if #available(iOS 13.0, *) {
|
|
194
|
+
switch voice.gender {
|
|
195
|
+
case .male:
|
|
196
|
+
info["gender"] = "male"
|
|
197
|
+
case .female:
|
|
198
|
+
info["gender"] = "female"
|
|
199
|
+
default:
|
|
200
|
+
info["gender"] = "neutral"
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Add network requirement
|
|
205
|
+
if #available(iOS 13.0, *) {
|
|
206
|
+
// Higher quality voices (enhanced/premium) typically don't require network
|
|
207
|
+
// Default quality = 1, Enhanced quality = 2
|
|
208
|
+
info["isNetworkConnectionRequired"] = voice.quality.rawValue < 2
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
return info
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
call.resolve([
|
|
215
|
+
"voices": voiceInfos
|
|
216
|
+
])
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
@objc func getLanguages(_ call: CAPPluginCall) {
|
|
220
|
+
let voices = AVSpeechSynthesisVoice.speechVoices()
|
|
221
|
+
let languages = Array(Set(voices.map { $0.language })).sorted()
|
|
222
|
+
|
|
223
|
+
call.resolve([
|
|
224
|
+
"languages": languages
|
|
225
|
+
])
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
@objc func isLanguageAvailable(_ call: CAPPluginCall) {
|
|
229
|
+
guard let language = call.getString("language") else {
|
|
230
|
+
call.reject("Language is required")
|
|
231
|
+
return
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
let voices = AVSpeechSynthesisVoice.speechVoices()
|
|
235
|
+
let isAvailable = voices.contains { $0.language == language }
|
|
236
|
+
|
|
237
|
+
call.resolve([
|
|
238
|
+
"isAvailable": isAvailable
|
|
239
|
+
])
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
@objc func isVoiceAvailable(_ call: CAPPluginCall) {
|
|
243
|
+
guard let voiceId = call.getString("voiceId") else {
|
|
244
|
+
call.reject("Voice ID is required")
|
|
245
|
+
return
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
let isAvailable = AVSpeechSynthesisVoice(identifier: voiceId) != nil
|
|
249
|
+
|
|
250
|
+
call.resolve([
|
|
251
|
+
"isAvailable": isAvailable
|
|
252
|
+
])
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
@objc func initialize(_ call: CAPPluginCall) {
|
|
256
|
+
// Pre-warm the speech synthesizer
|
|
257
|
+
if synthesizer == nil {
|
|
258
|
+
synthesizer = AVSpeechSynthesizer()
|
|
259
|
+
synthesizer?.delegate = self
|
|
260
|
+
}
|
|
261
|
+
call.resolve()
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
@objc func activateAudioSession(_ call: CAPPluginCall) {
|
|
265
|
+
let category = call.getString("category") ?? "Playback"
|
|
266
|
+
|
|
267
|
+
let audioSession = AVAudioSession.sharedInstance()
|
|
268
|
+
do {
|
|
269
|
+
if category == "Ambient" {
|
|
270
|
+
try audioSession.setCategory(.ambient, mode: .default)
|
|
271
|
+
} else {
|
|
272
|
+
try audioSession.setCategory(.playback, mode: .default)
|
|
273
|
+
}
|
|
274
|
+
try audioSession.setActive(true)
|
|
275
|
+
call.resolve()
|
|
276
|
+
} catch {
|
|
277
|
+
call.reject("Failed to activate audio session: \(error.localizedDescription)")
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
@objc func deactivateAudioSession(_ call: CAPPluginCall) {
|
|
282
|
+
let audioSession = AVAudioSession.sharedInstance()
|
|
283
|
+
do {
|
|
284
|
+
try audioSession.setActive(false, options: .notifyOthersOnDeactivation)
|
|
285
|
+
call.resolve()
|
|
286
|
+
} catch {
|
|
287
|
+
call.reject("Failed to deactivate audio session: \(error.localizedDescription)")
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
@objc func getPluginVersion(_ call: CAPPluginCall) {
|
|
292
|
+
call.resolve(["version": self.pluginVersion])
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// MARK: - AVSpeechSynthesizerDelegate
|
|
296
|
+
|
|
297
|
+
public func speechSynthesizer(_ synthesizer: AVSpeechSynthesizer, didStart utterance: AVSpeechUtterance) {
|
|
298
|
+
if let utteranceId = getUtteranceId(for: utterance) {
|
|
299
|
+
notifyListeners("start", data: [
|
|
300
|
+
"utteranceId": utteranceId
|
|
301
|
+
])
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
public func speechSynthesizer(_ synthesizer: AVSpeechSynthesizer, didFinish utterance: AVSpeechUtterance) {
|
|
306
|
+
if let utteranceId = getUtteranceId(for: utterance) {
|
|
307
|
+
utteranceMap.removeValue(forKey: utteranceId)
|
|
308
|
+
notifyListeners("end", data: [
|
|
309
|
+
"utteranceId": utteranceId
|
|
310
|
+
])
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
public func speechSynthesizer(_ synthesizer: AVSpeechSynthesizer, willSpeakRangeOfSpeechString characterRange: NSRange, utterance: AVSpeechUtterance) {
|
|
315
|
+
if let utteranceId = getUtteranceId(for: utterance) {
|
|
316
|
+
notifyListeners("boundary", data: [
|
|
317
|
+
"utteranceId": utteranceId,
|
|
318
|
+
"charIndex": characterRange.location,
|
|
319
|
+
"charLength": characterRange.length
|
|
320
|
+
])
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
public func speechSynthesizer(_ synthesizer: AVSpeechSynthesizer, didCancel utterance: AVSpeechUtterance) {
|
|
325
|
+
if let utteranceId = getUtteranceId(for: utterance) {
|
|
326
|
+
utteranceMap.removeValue(forKey: utteranceId)
|
|
327
|
+
notifyListeners("error", data: [
|
|
328
|
+
"utteranceId": utteranceId,
|
|
329
|
+
"error": "Speech was cancelled"
|
|
330
|
+
])
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Helper method to get utteranceId
|
|
335
|
+
private func getUtteranceId(for utterance: AVSpeechUtterance) -> String? {
|
|
336
|
+
return utteranceMap.first(where: { $0.value == utterance })?.key
|
|
337
|
+
}
|
|
338
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@capgo/capacitor-speech-synthesis",
|
|
3
|
+
"version": "7.0.0",
|
|
4
|
+
"description": "Synthesize speech from text with full control over language, voice, pitch, rate, and volume.",
|
|
5
|
+
"main": "dist/plugin.cjs.js",
|
|
6
|
+
"module": "dist/esm/index.js",
|
|
7
|
+
"types": "dist/esm/index.d.ts",
|
|
8
|
+
"unpkg": "dist/plugin.js",
|
|
9
|
+
"files": [
|
|
10
|
+
"android/src/main/",
|
|
11
|
+
"android/build.gradle",
|
|
12
|
+
"dist/",
|
|
13
|
+
"ios/Sources",
|
|
14
|
+
"ios/Tests",
|
|
15
|
+
"Package.swift",
|
|
16
|
+
"CapgoCapacitorSpeechSynthesis.podspec"
|
|
17
|
+
],
|
|
18
|
+
"author": "Martin Donadieu <martin@capgo.app>",
|
|
19
|
+
"license": "MIT",
|
|
20
|
+
"repository": {
|
|
21
|
+
"type": "git",
|
|
22
|
+
"url": "git+https://github.com/Cap-go/capacitor-speech-synthesis.git"
|
|
23
|
+
},
|
|
24
|
+
"bugs": {
|
|
25
|
+
"url": "https://github.com/Cap-go/capacitor-speech-synthesis/issues"
|
|
26
|
+
},
|
|
27
|
+
"keywords": [
|
|
28
|
+
"capacitor",
|
|
29
|
+
"speech",
|
|
30
|
+
"synthesis",
|
|
31
|
+
"tts",
|
|
32
|
+
"text-to-speech",
|
|
33
|
+
"voice",
|
|
34
|
+
"audio",
|
|
35
|
+
"plugin",
|
|
36
|
+
"native"
|
|
37
|
+
],
|
|
38
|
+
"scripts": {
|
|
39
|
+
"verify": "npm run verify:ios && npm run verify:android && npm run verify:web",
|
|
40
|
+
"verify:ios": "xcodebuild -scheme CapgoCapacitorSpeechSynthesis -destination generic/platform=iOS",
|
|
41
|
+
"verify:android": "cd android && ./gradlew clean build test && cd ..",
|
|
42
|
+
"verify:web": "npm run build",
|
|
43
|
+
"lint": "npm run eslint && npm run prettier -- --check && npm run swiftlint -- lint",
|
|
44
|
+
"fmt": "npm run eslint -- --fix && npm run prettier -- --write && npm run swiftlint -- --fix --format",
|
|
45
|
+
"eslint": "eslint . --ext ts",
|
|
46
|
+
"prettier": "prettier \"**/*.{css,html,ts,js,java}\" --plugin=prettier-plugin-java",
|
|
47
|
+
"swiftlint": "node-swiftlint",
|
|
48
|
+
"docgen": "docgen --api SpeechSynthesisPlugin --output-readme README.md --output-json dist/docs.json",
|
|
49
|
+
"build": "npm run clean && npm run docgen && tsc && rollup -c rollup.config.mjs",
|
|
50
|
+
"clean": "rimraf ./dist",
|
|
51
|
+
"watch": "tsc --watch",
|
|
52
|
+
"prepublishOnly": "npm run build"
|
|
53
|
+
},
|
|
54
|
+
"devDependencies": {
|
|
55
|
+
"@capacitor/android": "^7.0.0",
|
|
56
|
+
"@capacitor/core": "^7.0.0",
|
|
57
|
+
"@capacitor/docgen": "^0.3.0",
|
|
58
|
+
"@capacitor/ios": "^7.0.0",
|
|
59
|
+
"@ionic/eslint-config": "^0.4.0",
|
|
60
|
+
"@ionic/prettier-config": "^4.0.0",
|
|
61
|
+
"@ionic/swiftlint-config": "^2.0.0",
|
|
62
|
+
"eslint": "^8.57.0",
|
|
63
|
+
"prettier": "^3.4.2",
|
|
64
|
+
"prettier-plugin-java": "^2.6.6",
|
|
65
|
+
"rimraf": "^6.0.1",
|
|
66
|
+
"rollup": "^4.30.1",
|
|
67
|
+
"swiftlint": "^2.0.0",
|
|
68
|
+
"typescript": "~4.1.5"
|
|
69
|
+
},
|
|
70
|
+
"peerDependencies": {
|
|
71
|
+
"@capacitor/core": ">=7.0.0"
|
|
72
|
+
},
|
|
73
|
+
"prettier": "@ionic/prettier-config",
|
|
74
|
+
"swiftlint": "@ionic/swiftlint-config",
|
|
75
|
+
"eslintConfig": {
|
|
76
|
+
"extends": "@ionic/eslint-config/recommended"
|
|
77
|
+
},
|
|
78
|
+
"capacitor": {
|
|
79
|
+
"ios": {
|
|
80
|
+
"src": "ios"
|
|
81
|
+
},
|
|
82
|
+
"android": {
|
|
83
|
+
"src": "android"
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|