@capacitor-community/text-to-speech 1.0.0 → 1.1.2-dev.aaa3396.1649088226
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CapacitorCommunityTextToSpeech.podspec +17 -17
- package/LICENSE +21 -21
- package/README.md +121 -119
- package/android/build.gradle +58 -58
- package/android/src/main/AndroidManifest.xml +3 -3
- package/android/src/main/java/com/getcapacitor/community/tts/SpeakResultCallback.java +6 -6
- package/android/src/main/java/com/getcapacitor/community/tts/TextToSpeech.java +162 -154
- package/android/src/main/java/com/getcapacitor/community/tts/TextToSpeechPlugin.java +130 -112
- package/dist/docs.json +27 -11
- package/dist/esm/definitions.d.ts +112 -104
- package/dist/esm/definitions.js +1 -1
- package/dist/esm/definitions.js.map +1 -1
- package/dist/esm/index.d.ts +4 -4
- package/dist/esm/index.js +10 -10
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/web.d.ts +25 -20
- package/dist/esm/web.js +91 -83
- package/dist/esm/web.js.map +1 -1
- package/dist/plugin.cjs.js +91 -83
- package/dist/plugin.cjs.js.map +1 -1
- package/dist/plugin.js +91 -83
- package/dist/plugin.js.map +1 -1
- package/ios/Plugin/Info.plist +24 -24
- package/ios/Plugin/TextToSpeech.swift +81 -67
- package/ios/Plugin/TextToSpeechPlugin.h +10 -10
- package/ios/Plugin/TextToSpeechPlugin.m +13 -12
- package/ios/Plugin/TextToSpeechPlugin.swift +64 -57
- package/package.json +81 -80
- package/CHANGELOG.md +0 -63
package/dist/docs.json
CHANGED
|
@@ -39,7 +39,7 @@
|
|
|
39
39
|
"parameters": [],
|
|
40
40
|
"returns": "Promise<{ languages: string[]; }>",
|
|
41
41
|
"tags": [],
|
|
42
|
-
"docs": "Returns a list of supported
|
|
42
|
+
"docs": "Returns a list of supported BCP 47 language tags.",
|
|
43
43
|
"complexTypes": [],
|
|
44
44
|
"slug": "getsupportedlanguages"
|
|
45
45
|
},
|
|
@@ -55,13 +55,29 @@
|
|
|
55
55
|
],
|
|
56
56
|
"slug": "getsupportedvoices"
|
|
57
57
|
},
|
|
58
|
+
{
|
|
59
|
+
"name": "isLanguageSupported",
|
|
60
|
+
"signature": "(options: { lang: string; }) => Promise<{ supported: boolean; }>",
|
|
61
|
+
"parameters": [
|
|
62
|
+
{
|
|
63
|
+
"name": "options",
|
|
64
|
+
"docs": "",
|
|
65
|
+
"type": "{ lang: string; }"
|
|
66
|
+
}
|
|
67
|
+
],
|
|
68
|
+
"returns": "Promise<{ supported: boolean; }>",
|
|
69
|
+
"tags": [],
|
|
70
|
+
"docs": "Checks if a specific BCP 47 language tag is supported.",
|
|
71
|
+
"complexTypes": [],
|
|
72
|
+
"slug": "islanguagesupported"
|
|
73
|
+
},
|
|
58
74
|
{
|
|
59
75
|
"name": "openInstall",
|
|
60
76
|
"signature": "() => Promise<void>",
|
|
61
77
|
"parameters": [],
|
|
62
78
|
"returns": "Promise<void>",
|
|
63
79
|
"tags": [],
|
|
64
|
-
"docs": "Verifies proper installation and availability of resource files on the system.\
|
|
80
|
+
"docs": "Verifies proper installation and availability of resource files on the system.\n\nOnly available for Android.",
|
|
65
81
|
"complexTypes": [],
|
|
66
82
|
"slug": "openinstall"
|
|
67
83
|
}
|
|
@@ -86,42 +102,42 @@
|
|
|
86
102
|
{
|
|
87
103
|
"name": "lang",
|
|
88
104
|
"tags": [],
|
|
89
|
-
"docs": "The language of the utterance.\
|
|
105
|
+
"docs": "The language of the utterance.\nPossible languages can be queried using `getSupportedLanguages`.\n\nDefault: `en-US`.",
|
|
90
106
|
"complexTypes": [],
|
|
91
107
|
"type": "string | undefined"
|
|
92
108
|
},
|
|
93
109
|
{
|
|
94
110
|
"name": "rate",
|
|
95
111
|
"tags": [],
|
|
96
|
-
"docs": "The speed at which the utterance will be spoken at.\
|
|
112
|
+
"docs": "The speed at which the utterance will be spoken at.\n\nDefault: `1.0`.",
|
|
97
113
|
"complexTypes": [],
|
|
98
114
|
"type": "number | undefined"
|
|
99
115
|
},
|
|
100
116
|
{
|
|
101
117
|
"name": "pitch",
|
|
102
118
|
"tags": [],
|
|
103
|
-
"docs": "The pitch at which the utterance will be spoken at.\
|
|
119
|
+
"docs": "The pitch at which the utterance will be spoken at.\n\nDefault: `1.0`.",
|
|
104
120
|
"complexTypes": [],
|
|
105
121
|
"type": "number | undefined"
|
|
106
122
|
},
|
|
107
123
|
{
|
|
108
124
|
"name": "volume",
|
|
109
125
|
"tags": [],
|
|
110
|
-
"docs": "The volume that the utterance will be spoken at.\
|
|
126
|
+
"docs": "The volume that the utterance will be spoken at.\n\nDefault: `1.0`.",
|
|
111
127
|
"complexTypes": [],
|
|
112
128
|
"type": "number | undefined"
|
|
113
129
|
},
|
|
114
130
|
{
|
|
115
131
|
"name": "voice",
|
|
116
132
|
"tags": [],
|
|
117
|
-
"docs": "The index of the selected voice that will be used to speak the utterance.\
|
|
133
|
+
"docs": "The index of the selected voice that will be used to speak the utterance.\nPossible voices can be queried using `getSupportedVoices`.\n\nOnly available for Web.",
|
|
118
134
|
"complexTypes": [],
|
|
119
135
|
"type": "number | undefined"
|
|
120
136
|
},
|
|
121
137
|
{
|
|
122
138
|
"name": "category",
|
|
123
139
|
"tags": [],
|
|
124
|
-
"docs": "Select the iOS Audio session category.\
|
|
140
|
+
"docs": "Select the iOS Audio session category.\nPossible values: `ambient` and `playback`.\nUse `playback` to play audio even when the app is in the background.\n\nOnly available for iOS.\n\nDefault: `ambient`.",
|
|
125
141
|
"complexTypes": [],
|
|
126
142
|
"type": "string | undefined"
|
|
127
143
|
}
|
|
@@ -144,7 +160,7 @@
|
|
|
144
160
|
{
|
|
145
161
|
"name": "lang",
|
|
146
162
|
"tags": [],
|
|
147
|
-
"docs": "BCP 47 language tag indicating the language of the voice.\
|
|
163
|
+
"docs": "BCP 47 language tag indicating the language of the voice.\nExample: `en-US`.",
|
|
148
164
|
"complexTypes": [],
|
|
149
165
|
"type": "string"
|
|
150
166
|
},
|
|
@@ -158,14 +174,14 @@
|
|
|
158
174
|
{
|
|
159
175
|
"name": "name",
|
|
160
176
|
"tags": [],
|
|
161
|
-
"docs": "Human-readable name that represents the voice.\
|
|
177
|
+
"docs": "Human-readable name that represents the voice.\nExample: `Microsoft Zira Desktop - English (United States)`.",
|
|
162
178
|
"complexTypes": [],
|
|
163
179
|
"type": "string"
|
|
164
180
|
},
|
|
165
181
|
{
|
|
166
182
|
"name": "voiceURI",
|
|
167
183
|
"tags": [],
|
|
168
|
-
"docs": "Type of URI and location of the speech synthesis service for this voice.\
|
|
184
|
+
"docs": "Type of URI and location of the speech synthesis service for this voice.\nExample: `urn:moz-tts:sapi:Microsoft Zira Desktop - English (United States)?en-US`.",
|
|
169
185
|
"complexTypes": [],
|
|
170
186
|
"type": "string"
|
|
171
187
|
}
|
|
@@ -1,104 +1,112 @@
|
|
|
1
|
-
export interface TextToSpeechPlugin {
|
|
2
|
-
/**
|
|
3
|
-
* Starts the TTS engine and plays the desired text.
|
|
4
|
-
*/
|
|
5
|
-
speak(options: TTSOptions): Promise<void>;
|
|
6
|
-
/**
|
|
7
|
-
* Stops the TTS engine.
|
|
8
|
-
*/
|
|
9
|
-
stop(): Promise<void>;
|
|
10
|
-
/**
|
|
11
|
-
* Returns a list of supported
|
|
12
|
-
*/
|
|
13
|
-
getSupportedLanguages(): Promise<{
|
|
14
|
-
languages: string[];
|
|
15
|
-
}>;
|
|
16
|
-
/**
|
|
17
|
-
* Returns a list of supported voices.
|
|
18
|
-
*/
|
|
19
|
-
getSupportedVoices(): Promise<{
|
|
20
|
-
voices: SpeechSynthesisVoice[];
|
|
21
|
-
}>;
|
|
22
|
-
/**
|
|
23
|
-
*
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
/**
|
|
31
|
-
*
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
*
|
|
44
|
-
*
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
*
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
*
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
*
|
|
63
|
-
*
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
*
|
|
69
|
-
*
|
|
70
|
-
*
|
|
71
|
-
* Only available for
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
*
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
1
|
+
export interface TextToSpeechPlugin {
|
|
2
|
+
/**
|
|
3
|
+
* Starts the TTS engine and plays the desired text.
|
|
4
|
+
*/
|
|
5
|
+
speak(options: TTSOptions): Promise<void>;
|
|
6
|
+
/**
|
|
7
|
+
* Stops the TTS engine.
|
|
8
|
+
*/
|
|
9
|
+
stop(): Promise<void>;
|
|
10
|
+
/**
|
|
11
|
+
* Returns a list of supported BCP 47 language tags.
|
|
12
|
+
*/
|
|
13
|
+
getSupportedLanguages(): Promise<{
|
|
14
|
+
languages: string[];
|
|
15
|
+
}>;
|
|
16
|
+
/**
|
|
17
|
+
* Returns a list of supported voices.
|
|
18
|
+
*/
|
|
19
|
+
getSupportedVoices(): Promise<{
|
|
20
|
+
voices: SpeechSynthesisVoice[];
|
|
21
|
+
}>;
|
|
22
|
+
/**
|
|
23
|
+
* Checks if a specific BCP 47 language tag is supported.
|
|
24
|
+
*/
|
|
25
|
+
isLanguageSupported(options: {
|
|
26
|
+
lang: string;
|
|
27
|
+
}): Promise<{
|
|
28
|
+
supported: boolean;
|
|
29
|
+
}>;
|
|
30
|
+
/**
|
|
31
|
+
* Verifies proper installation and availability of resource files on the system.
|
|
32
|
+
*
|
|
33
|
+
* Only available for Android.
|
|
34
|
+
*/
|
|
35
|
+
openInstall(): Promise<void>;
|
|
36
|
+
}
|
|
37
|
+
export interface TTSOptions {
|
|
38
|
+
/**
|
|
39
|
+
* The text that will be synthesised when the utterance is spoken.
|
|
40
|
+
*/
|
|
41
|
+
text: string;
|
|
42
|
+
/**
|
|
43
|
+
* The language of the utterance.
|
|
44
|
+
* Possible languages can be queried using `getSupportedLanguages`.
|
|
45
|
+
*
|
|
46
|
+
* Default: `en-US`.
|
|
47
|
+
*/
|
|
48
|
+
lang?: string;
|
|
49
|
+
/**
|
|
50
|
+
* The speed at which the utterance will be spoken at.
|
|
51
|
+
*
|
|
52
|
+
* Default: `1.0`.
|
|
53
|
+
*/
|
|
54
|
+
rate?: number;
|
|
55
|
+
/**
|
|
56
|
+
* The pitch at which the utterance will be spoken at.
|
|
57
|
+
*
|
|
58
|
+
* Default: `1.0`.
|
|
59
|
+
*/
|
|
60
|
+
pitch?: number;
|
|
61
|
+
/**
|
|
62
|
+
* The volume that the utterance will be spoken at.
|
|
63
|
+
*
|
|
64
|
+
* Default: `1.0`.
|
|
65
|
+
*/
|
|
66
|
+
volume?: number;
|
|
67
|
+
/**
|
|
68
|
+
* The index of the selected voice that will be used to speak the utterance.
|
|
69
|
+
* Possible voices can be queried using `getSupportedVoices`.
|
|
70
|
+
*
|
|
71
|
+
* Only available for Web.
|
|
72
|
+
*/
|
|
73
|
+
voice?: number;
|
|
74
|
+
/**
|
|
75
|
+
* Select the iOS Audio session category.
|
|
76
|
+
* Possible values: `ambient` and `playback`.
|
|
77
|
+
* Use `playback` to play audio even when the app is in the background.
|
|
78
|
+
*
|
|
79
|
+
* Only available for iOS.
|
|
80
|
+
*
|
|
81
|
+
* Default: `ambient`.
|
|
82
|
+
*/
|
|
83
|
+
category?: string;
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* The SpeechSynthesisVoice interface represents a voice that the system supports.
|
|
87
|
+
*/
|
|
88
|
+
export interface SpeechSynthesisVoice {
|
|
89
|
+
/**
|
|
90
|
+
* Specifies whether the voice is the default voice for the current app (`true`) or not (`false`).
|
|
91
|
+
*/
|
|
92
|
+
default: boolean;
|
|
93
|
+
/**
|
|
94
|
+
* BCP 47 language tag indicating the language of the voice.
|
|
95
|
+
* Example: `en-US`.
|
|
96
|
+
*/
|
|
97
|
+
lang: string;
|
|
98
|
+
/**
|
|
99
|
+
* Specifies whether the voice is supplied by a local (`true`) or remote (`false`) speech synthesizer service.
|
|
100
|
+
*/
|
|
101
|
+
localService: boolean;
|
|
102
|
+
/**
|
|
103
|
+
* Human-readable name that represents the voice.
|
|
104
|
+
* Example: `Microsoft Zira Desktop - English (United States)`.
|
|
105
|
+
*/
|
|
106
|
+
name: string;
|
|
107
|
+
/**
|
|
108
|
+
* Type of URI and location of the speech synthesis service for this voice.
|
|
109
|
+
* Example: `urn:moz-tts:sapi:Microsoft Zira Desktop - English (United States)?en-US`.
|
|
110
|
+
*/
|
|
111
|
+
voiceURI: string;
|
|
112
|
+
}
|
package/dist/esm/definitions.js
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
export {};
|
|
1
|
+
export {};
|
|
2
2
|
//# sourceMappingURL=definitions.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"definitions.js","sourceRoot":"","sources":["../../src/definitions.ts"],"names":[],"mappings":""}
|
|
1
|
+
{"version":3,"file":"definitions.js","sourceRoot":"","sources":["../../src/definitions.ts"],"names":[],"mappings":"","sourcesContent":["export interface TextToSpeechPlugin {\n /**\n * Starts the TTS engine and plays the desired text.\n */\n speak(options: TTSOptions): Promise<void>;\n /**\n * Stops the TTS engine.\n */\n stop(): Promise<void>;\n /**\n * Returns a list of supported BCP 47 language tags.\n */\n getSupportedLanguages(): Promise<{ languages: string[] }>;\n /**\n * Returns a list of supported voices.\n */\n getSupportedVoices(): Promise<{ voices: SpeechSynthesisVoice[] }>;\n /**\n * Checks if a specific BCP 47 language tag is supported.\n */\n isLanguageSupported(options: {\n lang: string;\n }): Promise<{ supported: boolean }>;\n /**\n * Verifies proper installation and availability of resource files on the system.\n *\n * Only available for Android.\n */\n openInstall(): Promise<void>;\n}\n\nexport interface TTSOptions {\n /**\n * The text that will be synthesised when the utterance is spoken.\n */\n text: string;\n /**\n * The language of the utterance.\n * Possible languages can be queried using `getSupportedLanguages`.\n *\n * Default: `en-US`.\n */\n lang?: string;\n /**\n * The speed at which the utterance will be spoken at.\n *\n * Default: `1.0`.\n */\n rate?: number;\n /**\n * The pitch at which the utterance will be spoken at.\n *\n * Default: `1.0`.\n */\n pitch?: number;\n /**\n * The volume that the utterance will be spoken at.\n *\n * Default: `1.0`.\n */\n volume?: number;\n /**\n * The index of the selected voice that will be used to speak the utterance.\n * Possible voices can be queried using `getSupportedVoices`.\n *\n * Only available for Web.\n */\n voice?: number;\n /**\n * Select the iOS Audio session category.\n * Possible values: `ambient` and `playback`.\n * Use `playback` to play audio even when the app is in the background.\n *\n * Only available for iOS.\n *\n * Default: `ambient`.\n */\n category?: string; // iOS only\n}\n\n/**\n * The SpeechSynthesisVoice interface represents a voice that the system supports.\n */\nexport interface SpeechSynthesisVoice {\n /**\n * Specifies whether the voice is the default voice for the current app (`true`) or not (`false`).\n */\n default: boolean;\n /**\n * BCP 47 language tag indicating the language of the voice.\n * Example: `en-US`.\n */\n lang: string;\n /**\n * Specifies whether the voice is supplied by a local (`true`) or remote (`false`) speech synthesizer service.\n */\n localService: boolean;\n /**\n * Human-readable name that represents the voice.\n * Example: `Microsoft Zira Desktop - English (United States)`.\n */\n name: string;\n /**\n * Type of URI and location of the speech synthesis service for this voice.\n * Example: `urn:moz-tts:sapi:Microsoft Zira Desktop - English (United States)?en-US`.\n */\n voiceURI: string;\n}\n"]}
|
package/dist/esm/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { TextToSpeechPlugin } from './definitions';
|
|
2
|
-
declare const TextToSpeech: TextToSpeechPlugin;
|
|
3
|
-
export * from './definitions';
|
|
4
|
-
export { TextToSpeech };
|
|
1
|
+
import type { TextToSpeechPlugin } from './definitions';
|
|
2
|
+
declare const TextToSpeech: TextToSpeechPlugin;
|
|
3
|
+
export * from './definitions';
|
|
4
|
+
export { TextToSpeech };
|
package/dist/esm/index.js
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import { registerPlugin } from '@capacitor/core';
|
|
2
|
-
const TextToSpeech = registerPlugin('TextToSpeech', {
|
|
3
|
-
web: () => import('./web').then(m => new m.TextToSpeechWeb()),
|
|
4
|
-
});
|
|
5
|
-
// Warm up
|
|
6
|
-
if ('speechSynthesis' in window) {
|
|
7
|
-
window.speechSynthesis;
|
|
8
|
-
}
|
|
9
|
-
export * from './definitions';
|
|
10
|
-
export { TextToSpeech };
|
|
1
|
+
import { registerPlugin } from '@capacitor/core';
|
|
2
|
+
const TextToSpeech = registerPlugin('TextToSpeech', {
|
|
3
|
+
web: () => import('./web').then(m => new m.TextToSpeechWeb()),
|
|
4
|
+
});
|
|
5
|
+
// Warm up
|
|
6
|
+
if ('speechSynthesis' in window) {
|
|
7
|
+
window.speechSynthesis;
|
|
8
|
+
}
|
|
9
|
+
export * from './definitions';
|
|
10
|
+
export { TextToSpeech };
|
|
11
11
|
//# sourceMappingURL=index.js.map
|
package/dist/esm/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,iBAAiB,CAAC;AAIjD,MAAM,YAAY,GAAG,cAAc,CAAqB,cAAc,EAAE;IACtE,GAAG,EAAE,GAAG,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,eAAe,EAAE,CAAC;CAC9D,CAAC,CAAC;AAEH,UAAU;AACV,IAAI,iBAAiB,IAAI,MAAM,EAAE;IAC/B,MAAM,CAAC,eAAe,CAAC;CACxB;AAED,cAAc,eAAe,CAAC;AAC9B,OAAO,EAAE,YAAY,EAAE,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,iBAAiB,CAAC;AAIjD,MAAM,YAAY,GAAG,cAAc,CAAqB,cAAc,EAAE;IACtE,GAAG,EAAE,GAAG,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,eAAe,EAAE,CAAC;CAC9D,CAAC,CAAC;AAEH,UAAU;AACV,IAAI,iBAAiB,IAAI,MAAM,EAAE;IAC/B,MAAM,CAAC,eAAe,CAAC;CACxB;AAED,cAAc,eAAe,CAAC;AAC9B,OAAO,EAAE,YAAY,EAAE,CAAC","sourcesContent":["import { registerPlugin } from '@capacitor/core';\n\nimport type { TextToSpeechPlugin } from './definitions';\n\nconst TextToSpeech = registerPlugin<TextToSpeechPlugin>('TextToSpeech', {\n web: () => import('./web').then(m => new m.TextToSpeechWeb()),\n});\n\n// Warm up\nif ('speechSynthesis' in window) {\n window.speechSynthesis;\n}\n\nexport * from './definitions';\nexport { TextToSpeech };\n"]}
|
package/dist/esm/web.d.ts
CHANGED
|
@@ -1,20 +1,25 @@
|
|
|
1
|
-
import { WebPlugin } from '@capacitor/core';
|
|
2
|
-
import type { TextToSpeechPlugin, TTSOptions } from './definitions';
|
|
3
|
-
export declare class TextToSpeechWeb extends WebPlugin implements TextToSpeechPlugin {
|
|
4
|
-
private speechSynthesis;
|
|
5
|
-
private supportedVoices;
|
|
6
|
-
constructor();
|
|
7
|
-
speak(options: TTSOptions): Promise<void>;
|
|
8
|
-
stop(): Promise<void>;
|
|
9
|
-
getSupportedLanguages(): Promise<{
|
|
10
|
-
languages: string[];
|
|
11
|
-
}>;
|
|
12
|
-
getSupportedVoices(): Promise<{
|
|
13
|
-
voices: SpeechSynthesisVoice[];
|
|
14
|
-
}>;
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
1
|
+
import { WebPlugin } from '@capacitor/core';
|
|
2
|
+
import type { TextToSpeechPlugin, TTSOptions } from './definitions';
|
|
3
|
+
export declare class TextToSpeechWeb extends WebPlugin implements TextToSpeechPlugin {
|
|
4
|
+
private speechSynthesis;
|
|
5
|
+
private supportedVoices;
|
|
6
|
+
constructor();
|
|
7
|
+
speak(options: TTSOptions): Promise<void>;
|
|
8
|
+
stop(): Promise<void>;
|
|
9
|
+
getSupportedLanguages(): Promise<{
|
|
10
|
+
languages: string[];
|
|
11
|
+
}>;
|
|
12
|
+
getSupportedVoices(): Promise<{
|
|
13
|
+
voices: SpeechSynthesisVoice[];
|
|
14
|
+
}>;
|
|
15
|
+
isLanguageSupported(options: {
|
|
16
|
+
lang: string;
|
|
17
|
+
}): Promise<{
|
|
18
|
+
supported: boolean;
|
|
19
|
+
}>;
|
|
20
|
+
openInstall(): Promise<void>;
|
|
21
|
+
private createSpeechSynthesisUtterance;
|
|
22
|
+
private getSpeechSynthesisVoices;
|
|
23
|
+
private throwUnsupportedError;
|
|
24
|
+
private throwUnimplementedError;
|
|
25
|
+
}
|
package/dist/esm/web.js
CHANGED
|
@@ -1,84 +1,92 @@
|
|
|
1
|
-
import { WebPlugin } from '@capacitor/core';
|
|
2
|
-
export class TextToSpeechWeb extends WebPlugin {
|
|
3
|
-
constructor() {
|
|
4
|
-
super();
|
|
5
|
-
this.speechSynthesis = null;
|
|
6
|
-
if ('speechSynthesis' in window) {
|
|
7
|
-
this.speechSynthesis = window.speechSynthesis;
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
utterance.
|
|
22
|
-
|
|
23
|
-
};
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
this.speechSynthesis
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
const
|
|
48
|
-
const
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
1
|
+
import { WebPlugin } from '@capacitor/core';
|
|
2
|
+
export class TextToSpeechWeb extends WebPlugin {
|
|
3
|
+
constructor() {
|
|
4
|
+
super();
|
|
5
|
+
this.speechSynthesis = null;
|
|
6
|
+
if ('speechSynthesis' in window) {
|
|
7
|
+
this.speechSynthesis = window.speechSynthesis;
|
|
8
|
+
window.addEventListener("beforeunload", () => {
|
|
9
|
+
this.stop();
|
|
10
|
+
});
|
|
11
|
+
}
|
|
12
|
+
}
|
|
13
|
+
async speak(options) {
|
|
14
|
+
if (!this.speechSynthesis) {
|
|
15
|
+
this.throwUnsupportedError();
|
|
16
|
+
}
|
|
17
|
+
await this.stop();
|
|
18
|
+
const speechSynthesis = this.speechSynthesis;
|
|
19
|
+
const utterance = this.createSpeechSynthesisUtterance(options);
|
|
20
|
+
return new Promise((resolve, reject) => {
|
|
21
|
+
utterance.onend = () => {
|
|
22
|
+
resolve();
|
|
23
|
+
};
|
|
24
|
+
utterance.onerror = (event) => {
|
|
25
|
+
reject(event);
|
|
26
|
+
};
|
|
27
|
+
speechSynthesis.speak(utterance);
|
|
28
|
+
});
|
|
29
|
+
}
|
|
30
|
+
async stop() {
|
|
31
|
+
if (!this.speechSynthesis) {
|
|
32
|
+
this.throwUnsupportedError();
|
|
33
|
+
}
|
|
34
|
+
this.speechSynthesis.cancel();
|
|
35
|
+
}
|
|
36
|
+
async getSupportedLanguages() {
|
|
37
|
+
const voices = this.getSpeechSynthesisVoices();
|
|
38
|
+
const languages = voices.map(voice => voice.lang);
|
|
39
|
+
const filteredLanguages = languages.filter((v, i, a) => a.indexOf(v) == i);
|
|
40
|
+
return { languages: filteredLanguages };
|
|
41
|
+
}
|
|
42
|
+
async getSupportedVoices() {
|
|
43
|
+
const voices = this.getSpeechSynthesisVoices();
|
|
44
|
+
return { voices };
|
|
45
|
+
}
|
|
46
|
+
async isLanguageSupported(options) {
|
|
47
|
+
const result = await this.getSupportedLanguages();
|
|
48
|
+
const isLanguageSupported = result.languages.includes(options.lang);
|
|
49
|
+
return { supported: isLanguageSupported };
|
|
50
|
+
}
|
|
51
|
+
async openInstall() {
|
|
52
|
+
this.throwUnimplementedError();
|
|
53
|
+
}
|
|
54
|
+
createSpeechSynthesisUtterance(options) {
|
|
55
|
+
const voices = this.getSpeechSynthesisVoices();
|
|
56
|
+
const utterance = new SpeechSynthesisUtterance();
|
|
57
|
+
const { text, lang, rate, pitch, volume, voice } = options;
|
|
58
|
+
if (voice) {
|
|
59
|
+
utterance.voice = voices[voice];
|
|
60
|
+
}
|
|
61
|
+
if (volume) {
|
|
62
|
+
utterance.volume = volume >= 0 && volume <= 1 ? volume : 1;
|
|
63
|
+
}
|
|
64
|
+
if (rate) {
|
|
65
|
+
utterance.rate = rate >= 0.1 && rate <= 10 ? rate : 1;
|
|
66
|
+
}
|
|
67
|
+
if (pitch) {
|
|
68
|
+
utterance.pitch = pitch >= 0 && pitch <= 2 ? pitch : 2;
|
|
69
|
+
}
|
|
70
|
+
if (lang) {
|
|
71
|
+
utterance.lang = lang;
|
|
72
|
+
}
|
|
73
|
+
utterance.text = text;
|
|
74
|
+
return utterance;
|
|
75
|
+
}
|
|
76
|
+
getSpeechSynthesisVoices() {
|
|
77
|
+
if (!this.speechSynthesis) {
|
|
78
|
+
this.throwUnsupportedError();
|
|
79
|
+
}
|
|
80
|
+
if (!this.supportedVoices || this.supportedVoices.length < 1) {
|
|
81
|
+
this.supportedVoices = this.speechSynthesis.getVoices();
|
|
82
|
+
}
|
|
83
|
+
return this.supportedVoices;
|
|
84
|
+
}
|
|
85
|
+
throwUnsupportedError() {
|
|
86
|
+
throw this.unavailable('SpeechSynthesis API not available in this browser.');
|
|
87
|
+
}
|
|
88
|
+
throwUnimplementedError() {
|
|
89
|
+
throw this.unimplemented('Not implemented on web.');
|
|
90
|
+
}
|
|
91
|
+
}
|
|
84
92
|
//# sourceMappingURL=web.js.map
|
package/dist/esm/web.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"web.js","sourceRoot":"","sources":["../../src/web.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AAI5C,MAAM,OAAO,eAAgB,SAAQ,SAAS;IAI5C;QACE,KAAK,EAAE,CAAC;QAJF,oBAAe,GAA2B,IAAI,CAAC;QAKrD,IAAI,iBAAiB,IAAI,MAAM,EAAE;YAC/B,IAAI,CAAC,eAAe,GAAG,MAAM,CAAC,eAAe,CAAC;
|
|
1
|
+
{"version":3,"file":"web.js","sourceRoot":"","sources":["../../src/web.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AAI5C,MAAM,OAAO,eAAgB,SAAQ,SAAS;IAI5C;QACE,KAAK,EAAE,CAAC;QAJF,oBAAe,GAA2B,IAAI,CAAC;QAKrD,IAAI,iBAAiB,IAAI,MAAM,EAAE;YAC/B,IAAI,CAAC,eAAe,GAAG,MAAM,CAAC,eAAe,CAAC;YAC9C,MAAM,CAAC,gBAAgB,CAAC,cAAc,EAAE,GAAG,EAAE;gBACzC,IAAI,CAAC,IAAI,EAAE,CAAC;YAChB,CAAC,CAAC,CAAC;SACJ;IACH,CAAC;IAEM,KAAK,CAAC,KAAK,CAAC,OAAmB;QACpC,IAAI,CAAC,IAAI,CAAC,eAAe,EAAE;YACzB,IAAI,CAAC,qBAAqB,EAAE,CAAC;SAC9B;QACD,MAAM,IAAI,CAAC,IAAI,EAAE,CAAC;QAClB,MAAM,eAAe,GAAG,IAAI,CAAC,eAAe,CAAC;QAC7C,MAAM,SAAS,GAAG,IAAI,CAAC,8BAA8B,CAAC,OAAO,CAAC,CAAC;QAC/D,OAAO,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;YACrC,SAAS,CAAC,KAAK,GAAG,GAAG,EAAE;gBACrB,OAAO,EAAE,CAAC;YACZ,CAAC,CAAC;YACF,SAAS,CAAC,OAAO,GAAG,CAAC,KAAU,EAAE,EAAE;gBACjC,MAAM,CAAC,KAAK,CAAC,CAAC;YAChB,CAAC,CAAC;YACF,eAAe,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC;QACnC,CAAC,CAAC,CAAC;IACL,CAAC;IAEM,KAAK,CAAC,IAAI;QACf,IAAI,CAAC,IAAI,CAAC,eAAe,EAAE;YACzB,IAAI,CAAC,qBAAqB,EAAE,CAAC;SAC9B;QACD,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,CAAC;IAChC,CAAC;IAEM,KAAK,CAAC,qBAAqB;QAChC,MAAM,MAAM,GAAG,IAAI,CAAC,wBAAwB,EAAE,CAAC;QAC/C,MAAM,SAAS,GAAG,MAAM,CAAC,GAAG,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;QAClD,MAAM,iBAAiB,GAAG,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC;QAC3E,OAAO,EAAE,SAAS,EAAE,iBAAiB,EAAE,CAAC;IAC1C,CAAC;IAEM,KAAK,CAAC,kBAAkB;QAG7B,MAAM,MAAM,GAAG,IAAI,CAAC,wBAAwB,EAAE,CAAC;QAC/C,OAAO,EAAE,MAAM,EAAE,CAAC;IACpB,CAAC;IAEM,KAAK,CAAC,mBAAmB,CAAC,OAEhC;QACC,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,qBAAqB,EAAE,CAAC;QAClD,MAAM,mBAAmB,GAAG,MAAM,CAAC,SAAS,CAAC,QAAQ,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;QACpE,OAAO,EAAE,SAAS,EAAE,mBAAmB,EAAE,CAAC;IAC5C,CAAC;IAEM,KAAK,CAAC,WAAW;QACtB,IAAI,CAAC,uBAAuB,EAAE,CAAC;IACjC,CAAC;IAEO,8BAA8B,CACpC,OAAmB;QAEnB,MAAM,MAAM,GAAG,IAAI,CAAC,wBAAwB,EAAE,CAAC;QAC/C,MAAM,SAAS,GAAG,IAAI,wBAAwB,EAAE,CAAC;QACjD,MAAM,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,GAAG,OAAO,CAAC;QAC3D,IAAI,KAAK,EAAE;YACT,SAAS,CAAC,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC;SACjC;QACD,IAAI,MAAM,EAAE;YACV,SAAS,CAAC,MAAM,GAAG,MAAM,IAAI,CAAC,IAAI,MAAM,IAAI,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;SAC5D;QACD,IAAI,IAAI,EAAE;YACR,SAAS,CAAC,IAAI,GAAG,IAAI,IAAI,GAAG,IAAI,IAAI,IAAI,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC;SACvD;QACD,IAAI,KAAK,EAAE;YACT,SAAS,CAAC,KAAK,GAAG,KAAK,IAAI,CAAC,IAAI,KAAK,IAAI,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;SACxD;QACD,IAAI,IAAI,EAAE;YACR,SAAS,CAAC,IAAI,GAAG,IAAI,CAAC;SACvB;QACD,SAAS,CAAC,IAAI,GAAG,IAAI,CAAC;QACtB,OAAO,SAAS,CAAC;IACnB,CAAC;IAEO,wBAAwB;QAC9B,IAAI,CAAC,IAAI,CAAC,eAAe,EAAE;YACzB,IAAI,CAAC,qBAAqB,EAAE,CAAC;SAC9B;QACD,IAAI,CAAC,IAAI,CAAC,eAAe,IAAI,IAAI,CAAC,eAAe,CAAC,MAAM,GAAG,CAAC,EAAE;YAC5D,IAAI,CAAC,eAAe,GAAG,IAAI,CAAC,eAAe,CAAC,SAAS,EAAE,CAAC;SACzD;QACD,OAAO,IAAI,CAAC,eAAe,CAAC;IAC9B,CAAC;IAEO,qBAAqB;QAC3B,MAAM,IAAI,CAAC,WAAW,CACpB,oDAAoD,CACrD,CAAC;IACJ,CAAC;IAEO,uBAAuB;QAC7B,MAAM,IAAI,CAAC,aAAa,CAAC,yBAAyB,CAAC,CAAC;IACtD,CAAC;CACF","sourcesContent":["import { WebPlugin } from '@capacitor/core';\n\nimport type { TextToSpeechPlugin, TTSOptions } from './definitions';\n\nexport class TextToSpeechWeb extends WebPlugin implements TextToSpeechPlugin {\n private speechSynthesis: SpeechSynthesis | null = null;\n private supportedVoices: SpeechSynthesisVoice[] | undefined;\n\n constructor() {\n super();\n if ('speechSynthesis' in window) {\n this.speechSynthesis = window.speechSynthesis;\n window.addEventListener(\"beforeunload\", () => {\n this.stop();\n });\n }\n }\n\n public async speak(options: TTSOptions): Promise<void> {\n if (!this.speechSynthesis) {\n this.throwUnsupportedError();\n }\n await this.stop();\n const speechSynthesis = this.speechSynthesis;\n const utterance = this.createSpeechSynthesisUtterance(options);\n return new Promise((resolve, reject) => {\n utterance.onend = () => {\n resolve();\n };\n utterance.onerror = (event: any) => {\n reject(event);\n };\n speechSynthesis.speak(utterance);\n });\n }\n\n public async stop(): Promise<void> {\n if (!this.speechSynthesis) {\n this.throwUnsupportedError();\n }\n this.speechSynthesis.cancel();\n }\n\n public async getSupportedLanguages(): Promise<{ languages: string[] }> {\n const voices = this.getSpeechSynthesisVoices();\n const languages = voices.map(voice => voice.lang);\n const filteredLanguages = languages.filter((v, i, a) => a.indexOf(v) == i);\n return { languages: filteredLanguages };\n }\n\n public async getSupportedVoices(): Promise<{\n voices: SpeechSynthesisVoice[];\n }> {\n const voices = this.getSpeechSynthesisVoices();\n return { voices };\n }\n\n public async isLanguageSupported(options: {\n lang: string;\n }): Promise<{ supported: boolean }> {\n const result = await this.getSupportedLanguages();\n const isLanguageSupported = result.languages.includes(options.lang);\n return { supported: isLanguageSupported };\n }\n\n public async openInstall(): Promise<void> {\n this.throwUnimplementedError();\n }\n\n private createSpeechSynthesisUtterance(\n options: TTSOptions,\n ): SpeechSynthesisUtterance {\n const voices = this.getSpeechSynthesisVoices();\n const utterance = new SpeechSynthesisUtterance();\n const { text, lang, rate, pitch, volume, voice } = options;\n if (voice) {\n utterance.voice = voices[voice];\n }\n if (volume) {\n utterance.volume = volume >= 0 && volume <= 1 ? volume : 1;\n }\n if (rate) {\n utterance.rate = rate >= 0.1 && rate <= 10 ? rate : 1;\n }\n if (pitch) {\n utterance.pitch = pitch >= 0 && pitch <= 2 ? pitch : 2;\n }\n if (lang) {\n utterance.lang = lang;\n }\n utterance.text = text;\n return utterance;\n }\n\n private getSpeechSynthesisVoices(): SpeechSynthesisVoice[] {\n if (!this.speechSynthesis) {\n this.throwUnsupportedError();\n }\n if (!this.supportedVoices || this.supportedVoices.length < 1) {\n this.supportedVoices = this.speechSynthesis.getVoices();\n }\n return this.supportedVoices;\n }\n\n private throwUnsupportedError(): never {\n throw this.unavailable(\n 'SpeechSynthesis API not available in this browser.',\n );\n }\n\n private throwUnimplementedError(): never {\n throw this.unimplemented('Not implemented on web.');\n }\n}\n"]}
|