web-speech-cognitive-services 7.1.4-master.151bc9b → 8.0.0-main.15b930d

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/dist/web-speech-cognitive-services.d.mts +329 -0
  2. package/dist/web-speech-cognitive-services.d.ts +329 -0
  3. package/dist/web-speech-cognitive-services.development.js +25746 -0
  4. package/dist/web-speech-cognitive-services.development.js.map +1 -0
  5. package/dist/web-speech-cognitive-services.js +1409 -0
  6. package/dist/web-speech-cognitive-services.js.map +1 -0
  7. package/dist/web-speech-cognitive-services.mjs +1374 -0
  8. package/dist/web-speech-cognitive-services.mjs.map +1 -0
  9. package/dist/web-speech-cognitive-services.production.min.js +31 -0
  10. package/dist/web-speech-cognitive-services.production.min.js.map +1 -0
  11. package/package.json +67 -47
  12. package/CHANGELOG.md +0 -372
  13. package/lib/BingSpeech/SpeechToText/SpeechGrammarList.js +0 -94
  14. package/lib/BingSpeech/SpeechToText/SpeechGrammarList.js.map +0 -1
  15. package/lib/BingSpeech/SpeechToText/createSpeechRecognitionPonyfill.js +0 -483
  16. package/lib/BingSpeech/SpeechToText/createSpeechRecognitionPonyfill.js.map +0 -1
  17. package/lib/BingSpeech/SpeechToText.js +0 -14
  18. package/lib/BingSpeech/SpeechToText.js.map +0 -1
  19. package/lib/BingSpeech/TextToSpeech/AudioContextConsumer.js +0 -122
  20. package/lib/BingSpeech/TextToSpeech/AudioContextConsumer.js.map +0 -1
  21. package/lib/BingSpeech/TextToSpeech/AudioContextQueue.js +0 -104
  22. package/lib/BingSpeech/TextToSpeech/AudioContextQueue.js.map +0 -1
  23. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisUtterance.js +0 -264
  24. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisUtterance.js.map +0 -1
  25. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisVoice.js +0 -61
  26. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisVoice.js.map +0 -1
  27. package/lib/BingSpeech/TextToSpeech/buildSSML.js +0 -32
  28. package/lib/BingSpeech/TextToSpeech/buildSSML.js.map +0 -1
  29. package/lib/BingSpeech/TextToSpeech/createSpeechSynthesisPonyfill.js +0 -220
  30. package/lib/BingSpeech/TextToSpeech/createSpeechSynthesisPonyfill.js.map +0 -1
  31. package/lib/BingSpeech/TextToSpeech/fetchSpeechData.js +0 -74
  32. package/lib/BingSpeech/TextToSpeech/fetchSpeechData.js.map +0 -1
  33. package/lib/BingSpeech/TextToSpeech/fetchVoices.js +0 -335
  34. package/lib/BingSpeech/TextToSpeech/fetchVoices.js.map +0 -1
  35. package/lib/BingSpeech/TextToSpeech/isSSML.js +0 -13
  36. package/lib/BingSpeech/TextToSpeech/isSSML.js.map +0 -1
  37. package/lib/BingSpeech/TextToSpeech/subscribeEvent.js +0 -14
  38. package/lib/BingSpeech/TextToSpeech/subscribeEvent.js.map +0 -1
  39. package/lib/BingSpeech/TextToSpeech.js +0 -14
  40. package/lib/BingSpeech/TextToSpeech.js.map +0 -1
  41. package/lib/BingSpeech/Util/DOMEventEmitter.js +0 -61
  42. package/lib/BingSpeech/Util/DOMEventEmitter.js.map +0 -1
  43. package/lib/BingSpeech/Util/createFetchTokenUsingSubscriptionKey.js +0 -41
  44. package/lib/BingSpeech/Util/createFetchTokenUsingSubscriptionKey.js.map +0 -1
  45. package/lib/BingSpeech/fetchAuthorizationToken.js +0 -57
  46. package/lib/BingSpeech/fetchAuthorizationToken.js.map +0 -1
  47. package/lib/BingSpeech/index.js +0 -84
  48. package/lib/BingSpeech/index.js.map +0 -1
  49. package/lib/SpeechServices/SpeechSDK.js +0 -19
  50. package/lib/SpeechServices/SpeechSDK.js.map +0 -1
  51. package/lib/SpeechServices/SpeechToText/SpeechGrammarList.js +0 -45
  52. package/lib/SpeechServices/SpeechToText/SpeechGrammarList.js.map +0 -1
  53. package/lib/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResultList.js +0 -56
  54. package/lib/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResultList.js.map +0 -1
  55. package/lib/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js +0 -984
  56. package/lib/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js.map +0 -1
  57. package/lib/SpeechServices/SpeechToText.js +0 -24
  58. package/lib/SpeechServices/SpeechToText.js.map +0 -1
  59. package/lib/SpeechServices/TextToSpeech/AudioContextConsumer.js +0 -92
  60. package/lib/SpeechServices/TextToSpeech/AudioContextConsumer.js.map +0 -1
  61. package/lib/SpeechServices/TextToSpeech/AudioContextQueue.js +0 -111
  62. package/lib/SpeechServices/TextToSpeech/AudioContextQueue.js.map +0 -1
  63. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js +0 -40
  64. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js.map +0 -1
  65. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js +0 -283
  66. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js.map +0 -1
  67. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js +0 -63
  68. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js.map +0 -1
  69. package/lib/SpeechServices/TextToSpeech/buildSSML.js +0 -32
  70. package/lib/SpeechServices/TextToSpeech/buildSSML.js.map +0 -1
  71. package/lib/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js +0 -282
  72. package/lib/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js.map +0 -1
  73. package/lib/SpeechServices/TextToSpeech/fetchCustomVoices.js +0 -110
  74. package/lib/SpeechServices/TextToSpeech/fetchCustomVoices.js.map +0 -1
  75. package/lib/SpeechServices/TextToSpeech/fetchSpeechData.js +0 -127
  76. package/lib/SpeechServices/TextToSpeech/fetchSpeechData.js.map +0 -1
  77. package/lib/SpeechServices/TextToSpeech/fetchVoices.js +0 -87
  78. package/lib/SpeechServices/TextToSpeech/fetchVoices.js.map +0 -1
  79. package/lib/SpeechServices/TextToSpeech/isSSML.js +0 -13
  80. package/lib/SpeechServices/TextToSpeech/isSSML.js.map +0 -1
  81. package/lib/SpeechServices/TextToSpeech/subscribeEvent.js +0 -14
  82. package/lib/SpeechServices/TextToSpeech/subscribeEvent.js.map +0 -1
  83. package/lib/SpeechServices/TextToSpeech.js +0 -14
  84. package/lib/SpeechServices/TextToSpeech.js.map +0 -1
  85. package/lib/SpeechServices/fetchAuthorizationToken.js +0 -58
  86. package/lib/SpeechServices/fetchAuthorizationToken.js.map +0 -1
  87. package/lib/SpeechServices/patchOptions.js +0 -213
  88. package/lib/SpeechServices/patchOptions.js.map +0 -1
  89. package/lib/SpeechServices/resolveFunctionOrReturnValue.js +0 -11
  90. package/lib/SpeechServices/resolveFunctionOrReturnValue.js.map +0 -1
  91. package/lib/SpeechServices.js +0 -73
  92. package/lib/SpeechServices.js.map +0 -1
  93. package/lib/Util/arrayToMap.js +0 -28
  94. package/lib/Util/arrayToMap.js.map +0 -1
  95. package/lib/Util/createPromiseQueue.js +0 -40
  96. package/lib/Util/createPromiseQueue.js.map +0 -1
  97. package/lib/index.js +0 -14
  98. package/lib/index.js.map +0 -1
  99. package/umd/web-speech-cognitive-services.development.js +0 -4740
  100. package/umd/web-speech-cognitive-services.production.min.js +0 -2
@@ -1,282 +0,0 @@
1
- "use strict";
2
-
3
- var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
4
-
5
- Object.defineProperty(exports, "__esModule", {
6
- value: true
7
- });
8
- exports.default = void 0;
9
-
10
- var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
11
-
12
- var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
13
-
14
- var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
15
-
16
- var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
17
-
18
- var _inherits2 = _interopRequireDefault(require("@babel/runtime/helpers/inherits"));
19
-
20
- var _possibleConstructorReturn2 = _interopRequireDefault(require("@babel/runtime/helpers/possibleConstructorReturn"));
21
-
22
- var _getPrototypeOf2 = _interopRequireDefault(require("@babel/runtime/helpers/getPrototypeOf"));
23
-
24
- var _es = require("event-target-shim/es5");
25
-
26
- var _pDeferEs = _interopRequireDefault(require("p-defer-es5"));
27
-
28
- var _onErrorResumeNext = _interopRequireDefault(require("on-error-resume-next"));
29
-
30
- var _AudioContextQueue = _interopRequireDefault(require("./AudioContextQueue"));
31
-
32
- var _fetchCustomVoices = _interopRequireDefault(require("./fetchCustomVoices"));
33
-
34
- var _fetchVoices = _interopRequireDefault(require("./fetchVoices"));
35
-
36
- var _patchOptions2 = _interopRequireDefault(require("../patchOptions"));
37
-
38
- var _SpeechSynthesisEvent = _interopRequireDefault(require("./SpeechSynthesisEvent"));
39
-
40
- var _SpeechSynthesisUtterance = _interopRequireDefault(require("./SpeechSynthesisUtterance"));
41
-
42
- function _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = (0, _getPrototypeOf2.default)(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = (0, _getPrototypeOf2.default)(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return (0, _possibleConstructorReturn2.default)(this, result); }; }
43
-
44
- function _isNativeReflectConstruct() { if (typeof Reflect === "undefined" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === "function") return true; try { Boolean.prototype.valueOf.call(Reflect.construct(Boolean, [], function () {})); return true; } catch (e) { return false; } }
45
-
46
- // Supported output format can be found at https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/rest-text-to-speech#audio-outputs
47
- var DEFAULT_OUTPUT_FORMAT = 'audio-24khz-160kbitrate-mono-mp3';
48
- var EMPTY_ARRAY = [];
49
-
50
- var _default = function _default(options) {
51
- var _patchOptions = (0, _patchOptions2.default)(options),
52
- audioContext = _patchOptions.audioContext,
53
- fetchCredentials = _patchOptions.fetchCredentials,
54
- _patchOptions$ponyfil = _patchOptions.ponyfill,
55
- ponyfill = _patchOptions$ponyfil === void 0 ? {
56
- AudioContext: window.AudioContext || window.webkitAudioContext
57
- } : _patchOptions$ponyfil,
58
- speechSynthesisDeploymentId = _patchOptions.speechSynthesisDeploymentId,
59
- _patchOptions$speechS = _patchOptions.speechSynthesisOutputFormat,
60
- speechSynthesisOutputFormat = _patchOptions$speechS === void 0 ? DEFAULT_OUTPUT_FORMAT : _patchOptions$speechS;
61
-
62
- if (!audioContext && !ponyfill.AudioContext) {
63
- console.warn('web-speech-cognitive-services: This browser does not support Web Audio and it will not work with Cognitive Services Speech Services.');
64
- return {};
65
- }
66
-
67
- var SpeechSynthesis = /*#__PURE__*/function (_EventTarget) {
68
- (0, _inherits2.default)(SpeechSynthesis, _EventTarget);
69
-
70
- var _super = _createSuper(SpeechSynthesis);
71
-
72
- function SpeechSynthesis() {
73
- var _this;
74
-
75
- (0, _classCallCheck2.default)(this, SpeechSynthesis);
76
- _this = _super.call(this);
77
- _this.queue = new _AudioContextQueue.default({
78
- audioContext: audioContext,
79
- ponyfill: ponyfill
80
- });
81
-
82
- _this.updateVoices();
83
-
84
- return _this;
85
- }
86
-
87
- (0, _createClass2.default)(SpeechSynthesis, [{
88
- key: "cancel",
89
- value: function cancel() {
90
- this.queue.stop();
91
- }
92
- }, {
93
- key: "getVoices",
94
- value: function getVoices() {
95
- return EMPTY_ARRAY;
96
- }
97
- }, {
98
- key: "onvoiceschanged",
99
- get: function get() {
100
- return (0, _es.getEventAttributeValue)(this, 'voiceschanged');
101
- },
102
- set: function set(value) {
103
- (0, _es.setEventAttributeValue)(this, 'voiceschanged', value);
104
- }
105
- }, {
106
- key: "pause",
107
- value: function pause() {
108
- this.queue.pause();
109
- }
110
- }, {
111
- key: "resume",
112
- value: function resume() {
113
- this.queue.resume();
114
- }
115
- }, {
116
- key: "speak",
117
- value: function speak(utterance) {
118
- if (!(utterance instanceof _SpeechSynthesisUtterance.default)) {
119
- throw new Error('invalid utterance');
120
- }
121
-
122
- var _createDeferred = (0, _pDeferEs.default)(),
123
- reject = _createDeferred.reject,
124
- resolve = _createDeferred.resolve,
125
- promise = _createDeferred.promise;
126
-
127
- var handleError = function handleError(_ref) {
128
- var errorCode = _ref.error,
129
- message = _ref.message;
130
- var error = new Error(errorCode);
131
- error.stack = message;
132
- reject(error);
133
- };
134
-
135
- utterance.addEventListener('end', resolve);
136
- utterance.addEventListener('error', handleError);
137
- utterance.preload({
138
- deploymentId: speechSynthesisDeploymentId,
139
- fetchCredentials: fetchCredentials,
140
- outputFormat: speechSynthesisOutputFormat
141
- });
142
- this.queue.push(utterance);
143
- return promise.finally(function () {
144
- utterance.removeEventListener('end', resolve);
145
- utterance.removeEventListener('error', handleError);
146
- });
147
- }
148
- }, {
149
- key: "speaking",
150
- get: function get() {
151
- return this.queue.speaking;
152
- }
153
- }, {
154
- key: "updateVoices",
155
- value: function () {
156
- var _updateVoices = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee3() {
157
- var _this2 = this;
158
-
159
- var _yield$fetchCredentia, customVoiceHostname, region, speechSynthesisHostname, subscriptionKey;
160
-
161
- return _regenerator.default.wrap(function _callee3$(_context3) {
162
- while (1) {
163
- switch (_context3.prev = _context3.next) {
164
- case 0:
165
- _context3.next = 2;
166
- return fetchCredentials();
167
-
168
- case 2:
169
- _yield$fetchCredentia = _context3.sent;
170
- customVoiceHostname = _yield$fetchCredentia.customVoiceHostname;
171
- region = _yield$fetchCredentia.region;
172
- speechSynthesisHostname = _yield$fetchCredentia.speechSynthesisHostname;
173
- subscriptionKey = _yield$fetchCredentia.subscriptionKey;
174
-
175
- if (!speechSynthesisDeploymentId) {
176
- _context3.next = 14;
177
- break;
178
- }
179
-
180
- if (!subscriptionKey) {
181
- _context3.next = 12;
182
- break;
183
- }
184
-
185
- console.warn('web-speech-cognitive-services: Listing of custom voice models are only available when using subscription key.');
186
- _context3.next = 12;
187
- return (0, _onErrorResumeNext.default)( /*#__PURE__*/(0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee() {
188
- var voices;
189
- return _regenerator.default.wrap(function _callee$(_context) {
190
- while (1) {
191
- switch (_context.prev = _context.next) {
192
- case 0:
193
- _context.next = 2;
194
- return (0, _fetchCustomVoices.default)({
195
- customVoiceHostname: customVoiceHostname,
196
- deploymentId: speechSynthesisDeploymentId,
197
- region: region,
198
- speechSynthesisHostname: speechSynthesisHostname,
199
- subscriptionKey: subscriptionKey
200
- });
201
-
202
- case 2:
203
- voices = _context.sent;
204
-
205
- _this2.getVoices = function () {
206
- return voices;
207
- };
208
-
209
- case 4:
210
- case "end":
211
- return _context.stop();
212
- }
213
- }
214
- }, _callee);
215
- })));
216
-
217
- case 12:
218
- _context3.next = 16;
219
- break;
220
-
221
- case 14:
222
- _context3.next = 16;
223
- return (0, _onErrorResumeNext.default)( /*#__PURE__*/(0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee2() {
224
- var voices;
225
- return _regenerator.default.wrap(function _callee2$(_context2) {
226
- while (1) {
227
- switch (_context2.prev = _context2.next) {
228
- case 0:
229
- _context2.t0 = _fetchVoices.default;
230
- _context2.next = 3;
231
- return fetchCredentials();
232
-
233
- case 3:
234
- _context2.t1 = _context2.sent;
235
- _context2.next = 6;
236
- return (0, _context2.t0)(_context2.t1);
237
-
238
- case 6:
239
- voices = _context2.sent;
240
-
241
- _this2.getVoices = function () {
242
- return voices;
243
- };
244
-
245
- case 8:
246
- case "end":
247
- return _context2.stop();
248
- }
249
- }
250
- }, _callee2);
251
- })));
252
-
253
- case 16:
254
- this.dispatchEvent(new _SpeechSynthesisEvent.default('voiceschanged'));
255
-
256
- case 17:
257
- case "end":
258
- return _context3.stop();
259
- }
260
- }
261
- }, _callee3, this);
262
- }));
263
-
264
- function updateVoices() {
265
- return _updateVoices.apply(this, arguments);
266
- }
267
-
268
- return updateVoices;
269
- }()
270
- }]);
271
- return SpeechSynthesis;
272
- }(_es.EventTarget);
273
-
274
- return {
275
- speechSynthesis: new SpeechSynthesis(),
276
- SpeechSynthesisEvent: _SpeechSynthesisEvent.default,
277
- SpeechSynthesisUtterance: _SpeechSynthesisUtterance.default
278
- };
279
- };
280
-
281
- exports.default = _default;
282
- //# sourceMappingURL=createSpeechSynthesisPonyfill.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"createSpeechSynthesisPonyfill.js","names":["DEFAULT_OUTPUT_FORMAT","EMPTY_ARRAY","options","patchOptions","audioContext","fetchCredentials","ponyfill","AudioContext","window","webkitAudioContext","speechSynthesisDeploymentId","speechSynthesisOutputFormat","console","warn","SpeechSynthesis","queue","AudioContextQueue","updateVoices","stop","getEventAttributeValue","value","setEventAttributeValue","pause","resume","utterance","SpeechSynthesisUtterance","Error","createDeferred","reject","resolve","promise","handleError","errorCode","error","message","stack","addEventListener","preload","deploymentId","outputFormat","push","finally","removeEventListener","speaking","customVoiceHostname","region","speechSynthesisHostname","subscriptionKey","onErrorResumeNext","fetchCustomVoices","voices","getVoices","fetchVoices","dispatchEvent","SpeechSynthesisEvent","EventTarget","speechSynthesis"],"sources":["../../../src/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js"],"sourcesContent":["/* eslint class-methods-use-this: 0 */\n\nimport { EventTarget, getEventAttributeValue, setEventAttributeValue } from 'event-target-shim/es5';\nimport createDeferred from 'p-defer-es5';\nimport onErrorResumeNext from 'on-error-resume-next';\n\nimport AudioContextQueue from './AudioContextQueue';\nimport fetchCustomVoices from './fetchCustomVoices';\nimport fetchVoices from './fetchVoices';\nimport patchOptions from '../patchOptions';\nimport SpeechSynthesisEvent from './SpeechSynthesisEvent';\nimport SpeechSynthesisUtterance from './SpeechSynthesisUtterance';\n\n// Supported output format can be found at https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/rest-text-to-speech#audio-outputs\nconst DEFAULT_OUTPUT_FORMAT = 'audio-24khz-160kbitrate-mono-mp3';\nconst EMPTY_ARRAY = [];\n\nexport default options => {\n const {\n audioContext,\n fetchCredentials,\n ponyfill = {\n AudioContext: window.AudioContext || window.webkitAudioContext\n },\n speechSynthesisDeploymentId,\n speechSynthesisOutputFormat = DEFAULT_OUTPUT_FORMAT\n } = patchOptions(options);\n\n if (!audioContext && !ponyfill.AudioContext) {\n console.warn(\n 'web-speech-cognitive-services: This browser does not support Web Audio and it will not work with Cognitive Services Speech Services.'\n );\n\n return {};\n }\n\n class SpeechSynthesis extends EventTarget {\n constructor() {\n super();\n\n this.queue = new AudioContextQueue({ audioContext, ponyfill });\n\n this.updateVoices();\n }\n\n cancel() {\n this.queue.stop();\n }\n\n getVoices() {\n return EMPTY_ARRAY;\n }\n\n get onvoiceschanged() {\n return getEventAttributeValue(this, 'voiceschanged');\n }\n\n set onvoiceschanged(value) {\n setEventAttributeValue(this, 'voiceschanged', value);\n }\n\n pause() {\n this.queue.pause();\n }\n\n resume() {\n this.queue.resume();\n }\n\n speak(utterance) {\n if (!(utterance instanceof SpeechSynthesisUtterance)) {\n throw new Error('invalid utterance');\n }\n\n const { reject, resolve, promise } = createDeferred();\n const handleError = ({ error: errorCode, message }) => {\n const error = new Error(errorCode);\n\n error.stack = message;\n\n reject(error);\n };\n\n utterance.addEventListener('end', resolve);\n utterance.addEventListener('error', handleError);\n\n utterance.preload({\n deploymentId: speechSynthesisDeploymentId,\n fetchCredentials,\n outputFormat: speechSynthesisOutputFormat\n });\n\n this.queue.push(utterance);\n\n return promise.finally(() => {\n utterance.removeEventListener('end', resolve);\n utterance.removeEventListener('error', handleError);\n });\n }\n\n get speaking() {\n return this.queue.speaking;\n }\n\n async updateVoices() {\n const { customVoiceHostname, region, speechSynthesisHostname, subscriptionKey } = await fetchCredentials();\n\n if (speechSynthesisDeploymentId) {\n if (subscriptionKey) {\n console.warn(\n 'web-speech-cognitive-services: Listing of custom voice models are only available when using subscription key.'\n );\n\n await onErrorResumeNext(async () => {\n const voices = await fetchCustomVoices({\n customVoiceHostname,\n deploymentId: speechSynthesisDeploymentId,\n region,\n speechSynthesisHostname,\n subscriptionKey\n });\n\n this.getVoices = () => voices;\n });\n }\n } else {\n // If fetch voice list failed, we will not emit \"voiceschanged\" event.\n // In the spec, there is no \"error\" event.\n\n await onErrorResumeNext(async () => {\n const voices = await fetchVoices(await fetchCredentials());\n\n this.getVoices = () => voices;\n });\n }\n\n this.dispatchEvent(new SpeechSynthesisEvent('voiceschanged'));\n }\n }\n\n return {\n speechSynthesis: new SpeechSynthesis(),\n SpeechSynthesisEvent,\n SpeechSynthesisUtterance\n };\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;AAEA;;AACA;;AACA;;AAEA;;AACA;;AACA;;AACA;;AACA;;AACA;;;;;;AAEA;AACA,IAAMA,qBAAqB,GAAG,kCAA9B;AACA,IAAMC,WAAW,GAAG,EAApB;;eAEe,kBAAAC,OAAO,EAAI;EACxB,oBAQI,IAAAC,sBAAA,EAAaD,OAAb,CARJ;EAAA,IACEE,YADF,iBACEA,YADF;EAAA,IAEEC,gBAFF,iBAEEA,gBAFF;EAAA,0CAGEC,QAHF;EAAA,IAGEA,QAHF,sCAGa;IACTC,YAAY,EAAEC,MAAM,CAACD,YAAP,IAAuBC,MAAM,CAACC;EADnC,CAHb;EAAA,IAMEC,2BANF,iBAMEA,2BANF;EAAA,0CAOEC,2BAPF;EAAA,IAOEA,2BAPF,sCAOgCX,qBAPhC;;EAUA,IAAI,CAACI,YAAD,IAAiB,CAACE,QAAQ,CAACC,YAA/B,EAA6C;IAC3CK,OAAO,CAACC,IAAR,CACE,sIADF;IAIA,OAAO,EAAP;EACD;;EAjBuB,IAmBlBC,eAnBkB;IAAA;;IAAA;;IAoBtB,2BAAc;MAAA;;MAAA;MACZ;MAEA,MAAKC,KAAL,GAAa,IAAIC,0BAAJ,CAAsB;QAAEZ,YAAY,EAAZA,YAAF;QAAgBE,QAAQ,EAARA;MAAhB,CAAtB,CAAb;;MAEA,MAAKW,YAAL;;MALY;IAMb;;IA1BqB;MAAA;MAAA,OA4BtB,kBAAS;QACP,KAAKF,KAAL,CAAWG,IAAX;MACD;IA9BqB;MAAA;MAAA,OAgCtB,qBAAY;QACV,OAAOjB,WAAP;MACD;IAlCqB;MAAA;MAAA,KAoCtB,eAAsB;QACpB,OAAO,IAAAkB,0BAAA,EAAuB,IAAvB,EAA6B,eAA7B,CAAP;MACD,CAtCqB;MAAA,KAwCtB,aAAoBC,KAApB,EAA2B;QACzB,IAAAC,0BAAA,EAAuB,IAAvB,EAA6B,eAA7B,EAA8CD,KAA9C;MACD;IA1CqB;MAAA;MAAA,OA4CtB,iBAAQ;QACN,KAAKL,KAAL,CAAWO,KAAX;MACD;IA9CqB;MAAA;MAAA,OAgDtB,kBAAS;QACP,KAAKP,KAAL,CAAWQ,MAAX;MACD;IAlDqB;MAAA;MAAA,OAoDtB,eAAMC,SAAN,EAAiB;QACf,IAAI,EAAEA,SAAS,YAAYC,iCAAvB,CAAJ,EAAsD;UACpD,MAAM,IAAIC,KAAJ,CAAU,mBAAV,CAAN;QACD;;QAED,sBAAqC,IAAAC,iBAAA,GAArC;QAAA,IAAQC,MAAR,mBAAQA,MAAR;QAAA,IAAgBC,OAAhB,mBAAgBA,OAAhB;QAAA,IAAyBC,OAAzB,mBAAyBA,OAAzB;;QACA,IAAMC,WAAW,GAAG,SAAdA,WAAc,OAAmC;UAAA,IAAzBC,SAAyB,QAAhCC,KAAgC;UAAA,IAAdC,OAAc,QAAdA,OAAc;UACrD,IAAMD,KAAK,GAAG,IAAIP,KAAJ,CAAUM,SAAV,CAAd;UAEAC,KAAK,CAACE,KAAN,GAAcD,OAAd;UAEAN,MAAM,CAACK,KAAD,CAAN;QACD,CAND;;QAQAT,SAAS,CAACY,gBAAV,CAA2B,KAA3B,EAAkCP,OAAlC;QACAL,SAAS,CAACY,gBAAV,CAA2B,OAA3B,EAAoCL,WAApC;QAEAP,SAAS,CAACa,OAAV,CAAkB;UAChBC,YAAY,EAAE5B,2BADE;UAEhBL,gBAAgB,EAAhBA,gBAFgB;UAGhBkC,YAAY,EAAE5B;QAHE,CAAlB;QAMA,KAAKI,KAAL,CAAWyB,IAAX,CAAgBhB,SAAhB;QAEA,OAAOM,OAAO,CAACW,OAAR,CAAgB,YAAM;UAC3BjB,SAAS,CAACkB,mBAAV,CAA8B,KAA9B,EAAqCb,OAArC;UACAL,SAAS,CAACkB,mBAAV,CAA8B,OAA9B,EAAuCX,WAAvC;QACD,CAHM,CAAP;MAID;IAjFqB;MAAA;MAAA,KAmFtB,eAAe;QACb,OAAO,KAAKhB,KAAL,CAAW4B,QAAlB;MACD;IArFqB;MAAA;MAAA;QAAA,4FAuFtB;UAAA;;UAAA;;UAAA;YAAA;cAAA;gBAAA;kBAAA;kBAAA,OAC0FtC,gBAAgB,EAD1G;;gBAAA;kBAAA;kBACUuC,mBADV,yBACUA,mBADV;kBAC+BC,MAD/B,yBAC+BA,MAD/B;kBACuCC,uBADvC,yBACuCA,uBADvC;kBACgEC,eADhE,yBACgEA,eADhE;;kBAAA,KAGMrC,2BAHN;oBAAA;oBAAA;kBAAA;;kBAAA,KAIQqC,eAJR;oBAAA;oBAAA;kBAAA;;kBAKMnC,OAAO,CAACC,IAAR,CACE,+GADF;kBALN;kBAAA,OASY,IAAAmC,0BAAA,wFAAkB;oBAAA;oBAAA;sBAAA;wBAAA;0BAAA;4BAAA;4BAAA,OACD,IAAAC,0BAAA,EAAkB;8BACrCL,mBAAmB,EAAnBA,mBADqC;8BAErCN,YAAY,EAAE5B,2BAFuB;8BAGrCmC,MAAM,EAANA,MAHqC;8BAIrCC,uBAAuB,EAAvBA,uBAJqC;8BAKrCC,eAAe,EAAfA;4BALqC,CAAlB,CADC;;0BAAA;4BAChBG,MADgB;;4BAStB,MAAI,CAACC,SAAL,GAAiB;8BAAA,OAAMD,MAAN;4BAAA,CAAjB;;0BATsB;0BAAA;4BAAA;wBAAA;sBAAA;oBAAA;kBAAA,CAAlB,GATZ;;gBAAA;kBAAA;kBAAA;;gBAAA;kBAAA;kBAAA,OAyBU,IAAAF,0BAAA,wFAAkB;oBAAA;oBAAA;sBAAA;wBAAA;0BAAA;4BAAA,eACDI,oBADC;4BAAA;4BAAA,OACiB/C,gBAAgB,EADjC;;0BAAA;4BAAA;4BAAA;4BAAA;;0BAAA;4BAChB6C,MADgB;;4BAGtB,MAAI,CAACC,SAAL,GAAiB;8BAAA,OAAMD,MAAN;4BAAA,CAAjB;;0BAHsB;0BAAA;4BAAA;wBAAA;sBAAA;oBAAA;kBAAA,CAAlB,GAzBV;;gBAAA;kBAgCE,KAAKG,aAAL,CAAmB,IAAIC,6BAAJ,CAAyB,eAAzB,CAAnB;;gBAhCF;gBAAA;kBAAA;cAAA;YAAA;UAAA;QAAA,CAvFsB;;QAAA;UAAA;QAAA;;QAAA;MAAA;IAAA;IAAA;EAAA,EAmBMC,eAnBN;;EA2HxB,OAAO;IACLC,eAAe,EAAE,IAAI1C,eAAJ,EADZ;IAELwC,oBAAoB,EAApBA,6BAFK;IAGL7B,wBAAwB,EAAxBA;EAHK,CAAP;AAKD,C"}
@@ -1,110 +0,0 @@
1
- "use strict";
2
-
3
- var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
4
-
5
- Object.defineProperty(exports, "__esModule", {
6
- value: true
7
- });
8
- exports.default = _default;
9
-
10
- var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
11
-
12
- var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
13
-
14
- var _SpeechSynthesisVoice = _interopRequireDefault(require("./SpeechSynthesisVoice"));
15
-
16
- /* eslint no-magic-numbers: ["error", { "ignore": [0, 1, -1] }] */
17
- function fetchCustomVoices(_x) {
18
- return _fetchCustomVoices.apply(this, arguments);
19
- }
20
-
21
- function _fetchCustomVoices() {
22
- _fetchCustomVoices = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee(_ref) {
23
- var customVoiceHostname, deploymentId, region, subscriptionKey, hostname, res;
24
- return _regenerator.default.wrap(function _callee$(_context) {
25
- while (1) {
26
- switch (_context.prev = _context.next) {
27
- case 0:
28
- customVoiceHostname = _ref.customVoiceHostname, deploymentId = _ref.deploymentId, region = _ref.region, subscriptionKey = _ref.subscriptionKey;
29
- hostname = customVoiceHostname || "".concat(region, ".customvoice.api.speech.microsoft.com"); // Although encodeURI on a hostname doesn't work as expected for hostname, at least, it will fail peacefully.
30
-
31
- _context.next = 4;
32
- return fetch("https://".concat(encodeURI(hostname), "/api/texttospeech/v2.0/endpoints/").concat(encodeURIComponent(deploymentId)), {
33
- headers: {
34
- accept: 'application/json',
35
- 'ocp-apim-subscription-key': subscriptionKey
36
- }
37
- });
38
-
39
- case 4:
40
- res = _context.sent;
41
-
42
- if (res.ok) {
43
- _context.next = 7;
44
- break;
45
- }
46
-
47
- throw new Error('Failed to fetch custom voices');
48
-
49
- case 7:
50
- return _context.abrupt("return", res.json());
51
-
52
- case 8:
53
- case "end":
54
- return _context.stop();
55
- }
56
- }
57
- }, _callee);
58
- }));
59
- return _fetchCustomVoices.apply(this, arguments);
60
- }
61
-
62
- function _default(_x2) {
63
- return _ref3.apply(this, arguments);
64
- }
65
-
66
- function _ref3() {
67
- _ref3 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee2(_ref2) {
68
- var customVoiceHostname, deploymentId, region, subscriptionKey, _yield$fetchCustomVoi, models;
69
-
70
- return _regenerator.default.wrap(function _callee2$(_context2) {
71
- while (1) {
72
- switch (_context2.prev = _context2.next) {
73
- case 0:
74
- customVoiceHostname = _ref2.customVoiceHostname, deploymentId = _ref2.deploymentId, region = _ref2.region, subscriptionKey = _ref2.subscriptionKey;
75
- _context2.next = 3;
76
- return fetchCustomVoices({
77
- customVoiceHostname: customVoiceHostname,
78
- deploymentId: deploymentId,
79
- region: region,
80
- subscriptionKey: subscriptionKey
81
- });
82
-
83
- case 3:
84
- _yield$fetchCustomVoi = _context2.sent;
85
- models = _yield$fetchCustomVoi.models;
86
- return _context2.abrupt("return", models.map(function (_ref4) {
87
- var gender = _ref4.properties.Gender,
88
- lang = _ref4.locale,
89
- voiceURI = _ref4.name;
90
- return new _SpeechSynthesisVoice.default({
91
- gender: gender,
92
- lang: lang,
93
- voiceURI: voiceURI
94
- });
95
- }).sort(function (_ref5, _ref6) {
96
- var x = _ref5.name;
97
- var y = _ref6.name;
98
- return x > y ? 1 : x < y ? -1 : 0;
99
- }));
100
-
101
- case 6:
102
- case "end":
103
- return _context2.stop();
104
- }
105
- }
106
- }, _callee2);
107
- }));
108
- return _ref3.apply(this, arguments);
109
- }
110
- //# sourceMappingURL=fetchCustomVoices.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"fetchCustomVoices.js","names":["fetchCustomVoices","customVoiceHostname","deploymentId","region","subscriptionKey","hostname","fetch","encodeURI","encodeURIComponent","headers","accept","res","ok","Error","json","models","map","gender","properties","Gender","lang","locale","voiceURI","name","SpeechSynthesisVoice","sort","x","y"],"sources":["../../../src/SpeechServices/TextToSpeech/fetchCustomVoices.js"],"sourcesContent":["/* eslint no-magic-numbers: [\"error\", { \"ignore\": [0, 1, -1] }] */\n\nimport SpeechSynthesisVoice from './SpeechSynthesisVoice';\n\nasync function fetchCustomVoices({ customVoiceHostname, deploymentId, region, subscriptionKey }) {\n const hostname = customVoiceHostname || `${ region }.customvoice.api.speech.microsoft.com`;\n\n // Although encodeURI on a hostname doesn't work as expected for hostname, at least, it will fail peacefully.\n const res = await fetch(\n `https://${ encodeURI(hostname) }/api/texttospeech/v2.0/endpoints/${ encodeURIComponent(deploymentId) }`,\n {\n headers: {\n accept: 'application/json',\n 'ocp-apim-subscription-key': subscriptionKey\n }\n }\n );\n\n if (!res.ok) {\n throw new Error('Failed to fetch custom voices');\n }\n\n return res.json();\n}\n\nexport default async function({ customVoiceHostname, deploymentId, region, subscriptionKey }) {\n const { models } = await fetchCustomVoices({ customVoiceHostname, deploymentId, region, subscriptionKey });\n\n return models\n .map(\n ({ properties: { Gender: gender }, locale: lang, name: voiceURI }) =>\n new SpeechSynthesisVoice({ gender, lang, voiceURI })\n )\n .sort(({ name: x }, { name: y }) => (x > y ? 1 : x < y ? -1 : 0));\n}\n"],"mappings":";;;;;;;;;;;;;AAEA;;AAFA;SAIeA,iB;;;;;+FAAf;IAAA;IAAA;MAAA;QAAA;UAAA;YAAmCC,mBAAnC,QAAmCA,mBAAnC,EAAwDC,YAAxD,QAAwDA,YAAxD,EAAsEC,MAAtE,QAAsEA,MAAtE,EAA8EC,eAA9E,QAA8EA,eAA9E;YACQC,QADR,GACmBJ,mBAAmB,cAAQE,MAAR,0CADtC,EAGE;;YAHF;YAAA,OAIoBG,KAAK,mBACTC,SAAS,CAACF,QAAD,CADA,8CACgDG,kBAAkB,CAACN,YAAD,CADlE,GAErB;cACEO,OAAO,EAAE;gBACPC,MAAM,EAAE,kBADD;gBAEP,6BAA6BN;cAFtB;YADX,CAFqB,CAJzB;;UAAA;YAIQO,GAJR;;YAAA,IAcOA,GAAG,CAACC,EAdX;cAAA;cAAA;YAAA;;YAAA,MAeU,IAAIC,KAAJ,CAAU,+BAAV,CAfV;;UAAA;YAAA,iCAkBSF,GAAG,CAACG,IAAJ,EAlBT;;UAAA;UAAA;YAAA;QAAA;MAAA;IAAA;EAAA,C;;;;;;;;;kFAqBe;IAAA;;IAAA;MAAA;QAAA;UAAA;YAAiBb,mBAAjB,SAAiBA,mBAAjB,EAAsCC,YAAtC,SAAsCA,YAAtC,EAAoDC,MAApD,SAAoDA,MAApD,EAA4DC,eAA5D,SAA4DA,eAA5D;YAAA;YAAA,OACYJ,iBAAiB,CAAC;cAAEC,mBAAmB,EAAnBA,mBAAF;cAAuBC,YAAY,EAAZA,YAAvB;cAAqCC,MAAM,EAANA,MAArC;cAA6CC,eAAe,EAAfA;YAA7C,CAAD,CAD7B;;UAAA;YAAA;YACLW,MADK,yBACLA,MADK;YAAA,kCAGNA,MAAM,CACVC,GADI,CAEH;cAAA,IAAyBC,MAAzB,SAAGC,UAAH,CAAiBC,MAAjB;cAAA,IAA2CC,IAA3C,SAAmCC,MAAnC;cAAA,IAAuDC,QAAvD,SAAiDC,IAAjD;cAAA,OACE,IAAIC,6BAAJ,CAAyB;gBAAEP,MAAM,EAANA,MAAF;gBAAUG,IAAI,EAAJA,IAAV;gBAAgBE,QAAQ,EAARA;cAAhB,CAAzB,CADF;YAAA,CAFG,EAKJG,IALI,CAKC;cAAA,IAASC,CAAT,SAAGH,IAAH;cAAA,IAAsBI,CAAtB,SAAgBJ,IAAhB;cAAA,OAA+BG,CAAC,GAAGC,CAAJ,GAAQ,CAAR,GAAYD,CAAC,GAAGC,CAAJ,GAAQ,CAAC,CAAT,GAAa,CAAxD;YAAA,CALD,CAHM;;UAAA;UAAA;YAAA;QAAA;MAAA;IAAA;EAAA,C"}
@@ -1,127 +0,0 @@
1
- "use strict";
2
-
3
- var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
4
-
5
- Object.defineProperty(exports, "__esModule", {
6
- value: true
7
- });
8
- exports.default = _default;
9
-
10
- var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
11
-
12
- var _defineProperty2 = _interopRequireDefault(require("@babel/runtime/helpers/defineProperty"));
13
-
14
- var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
15
-
16
- var _base64Arraybuffer = require("base64-arraybuffer");
17
-
18
- var _buildSSML = _interopRequireDefault(require("./buildSSML"));
19
-
20
- var _isSSML = _interopRequireDefault(require("./isSSML"));
21
-
22
- function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; }
23
-
24
- function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { (0, _defineProperty2.default)(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; }
25
-
26
- var DEFAULT_LANGUAGE = 'en-US';
27
- var DEFAULT_OUTPUT_FORMAT = 'riff-16khz-16bit-mono-pcm';
28
- var DEFAULT_VOICE = 'Microsoft Server Speech Text to Speech Voice (en-US, AriaNeural)';
29
- var EMPTY_MP3_BASE64 = 'SUQzBAAAAAAAI1RTU0UAAAAPAAADTGF2ZjU3LjU2LjEwMQAAAAAAAAAAAAAA//tAwAAAAAAAAAAAAAAAAAAAAAAASW5mbwAAAA8AAAACAAABhgC7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7//////////////////////////////////////////////////////////////////8AAAAATGF2YzU3LjY0AAAAAAAAAAAAAAAAJAUHAAAAAAAAAYYoRBqpAAAAAAD/+xDEAAPAAAGkAAAAIAAANIAAAARMQU1FMy45OS41VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVf/7EMQpg8AAAaQAAAAgAAA0gAAABFVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV';
30
-
31
- function _default(_x) {
32
- return _ref2.apply(this, arguments);
33
- }
34
-
35
- function _ref2() {
36
- _ref2 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee(_ref) {
37
- var deploymentId, fetchCredentials, _ref$lang, lang, _ref$outputFormat, outputFormat, pitch, rate, text, _ref$voice, voice, volume, _yield$fetchCredentia, authorizationToken, region, speechSynthesisHostname, subscriptionKey, ssml, hostname, search, url, res;
38
-
39
- return _regenerator.default.wrap(function _callee$(_context) {
40
- while (1) {
41
- switch (_context.prev = _context.next) {
42
- case 0:
43
- deploymentId = _ref.deploymentId, fetchCredentials = _ref.fetchCredentials, _ref$lang = _ref.lang, lang = _ref$lang === void 0 ? DEFAULT_LANGUAGE : _ref$lang, _ref$outputFormat = _ref.outputFormat, outputFormat = _ref$outputFormat === void 0 ? DEFAULT_OUTPUT_FORMAT : _ref$outputFormat, pitch = _ref.pitch, rate = _ref.rate, text = _ref.text, _ref$voice = _ref.voice, voice = _ref$voice === void 0 ? DEFAULT_VOICE : _ref$voice, volume = _ref.volume;
44
-
45
- if (text) {
46
- _context.next = 3;
47
- break;
48
- }
49
-
50
- return _context.abrupt("return", (0, _base64Arraybuffer.decode)(EMPTY_MP3_BASE64));
51
-
52
- case 3:
53
- _context.next = 5;
54
- return fetchCredentials();
55
-
56
- case 5:
57
- _yield$fetchCredentia = _context.sent;
58
- authorizationToken = _yield$fetchCredentia.authorizationToken;
59
- region = _yield$fetchCredentia.region;
60
- speechSynthesisHostname = _yield$fetchCredentia.speechSynthesisHostname;
61
- subscriptionKey = _yield$fetchCredentia.subscriptionKey;
62
-
63
- if (!(authorizationToken && subscriptionKey || !authorizationToken && !subscriptionKey)) {
64
- _context.next = 14;
65
- break;
66
- }
67
-
68
- throw new Error('Only "authorizationToken" or "subscriptionKey" should be set.');
69
-
70
- case 14:
71
- if (!(region && speechSynthesisHostname || !region && !speechSynthesisHostname)) {
72
- _context.next = 16;
73
- break;
74
- }
75
-
76
- throw new Error('Only "region" or "speechSynthesisHostnamename" should be set.');
77
-
78
- case 16:
79
- ssml = (0, _isSSML.default)(text) ? text : (0, _buildSSML.default)({
80
- lang: lang,
81
- pitch: pitch,
82
- rate: rate,
83
- text: text,
84
- voice: voice,
85
- volume: volume
86
- }); // Although calling encodeURI on hostname does not actually works, it fails faster and safer.
87
-
88
- hostname = speechSynthesisHostname || (deploymentId ? "".concat(encodeURI(region), ".voice.speech.microsoft.com") : "".concat(encodeURI(region), ".tts.speech.microsoft.com"));
89
- search = deploymentId ? "?deploymentId=".concat(encodeURI(deploymentId)) : '';
90
- url = "https://".concat(hostname, "/cognitiveservices/v1").concat(search);
91
- _context.next = 22;
92
- return fetch(url, {
93
- headers: _objectSpread({
94
- 'Content-Type': 'application/ssml+xml',
95
- 'X-Microsoft-OutputFormat': outputFormat
96
- }, authorizationToken ? {
97
- Authorization: "Bearer ".concat(authorizationToken)
98
- } : {
99
- 'Ocp-Apim-Subscription-Key': subscriptionKey
100
- }),
101
- method: 'POST',
102
- body: ssml
103
- });
104
-
105
- case 22:
106
- res = _context.sent;
107
-
108
- if (res.ok) {
109
- _context.next = 25;
110
- break;
111
- }
112
-
113
- throw new Error("web-speech-cognitive-services: Failed to syntheis speech, server returned ".concat(res.status));
114
-
115
- case 25:
116
- return _context.abrupt("return", res.arrayBuffer());
117
-
118
- case 26:
119
- case "end":
120
- return _context.stop();
121
- }
122
- }
123
- }, _callee);
124
- }));
125
- return _ref2.apply(this, arguments);
126
- }
127
- //# sourceMappingURL=fetchSpeechData.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"fetchSpeechData.js","names":["DEFAULT_LANGUAGE","DEFAULT_OUTPUT_FORMAT","DEFAULT_VOICE","EMPTY_MP3_BASE64","deploymentId","fetchCredentials","lang","outputFormat","pitch","rate","text","voice","volume","decode","authorizationToken","region","speechSynthesisHostname","subscriptionKey","Error","ssml","isSSML","buildSSML","hostname","encodeURI","search","url","fetch","headers","Authorization","method","body","res","ok","status","arrayBuffer"],"sources":["../../../src/SpeechServices/TextToSpeech/fetchSpeechData.js"],"sourcesContent":["import { decode } from 'base64-arraybuffer';\nimport buildSSML from './buildSSML';\nimport isSSML from './isSSML';\n\nconst DEFAULT_LANGUAGE = 'en-US';\nconst DEFAULT_OUTPUT_FORMAT = 'riff-16khz-16bit-mono-pcm';\nconst DEFAULT_VOICE = 'Microsoft Server Speech Text to Speech Voice (en-US, AriaNeural)';\nconst EMPTY_MP3_BASE64 =\n 'SUQzBAAAAAAAI1RTU0UAAAAPAAADTGF2ZjU3LjU2LjEwMQAAAAAAAAAAAAAA//tAwAAAAAAAAAAAAAAAAAAAAAAASW5mbwAAAA8AAAACAAABhgC7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7u7//////////////////////////////////////////////////////////////////8AAAAATGF2YzU3LjY0AAAAAAAAAAAAAAAAJAUHAAAAAAAAAYYoRBqpAAAAAAD/+xDEAAPAAAGkAAAAIAAANIAAAARMQU1FMy45OS41VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVf/7EMQpg8AAAaQAAAAgAAA0gAAABFVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV';\n\nexport default async function({\n deploymentId,\n fetchCredentials,\n lang = DEFAULT_LANGUAGE,\n outputFormat = DEFAULT_OUTPUT_FORMAT,\n pitch,\n rate,\n text,\n voice = DEFAULT_VOICE,\n volume\n}) {\n if (!text) {\n // If text is empty, play a short audio clip. This allows developers to easily prime the AudioContext object by playing an empty string.\n return decode(EMPTY_MP3_BASE64);\n }\n\n const { authorizationToken, region, speechSynthesisHostname, subscriptionKey } = await fetchCredentials();\n\n if ((authorizationToken && subscriptionKey) || (!authorizationToken && !subscriptionKey)) {\n throw new Error('Only \"authorizationToken\" or \"subscriptionKey\" should be set.');\n } else if ((region && speechSynthesisHostname) || (!region && !speechSynthesisHostname)) {\n throw new Error('Only \"region\" or \"speechSynthesisHostnamename\" should be set.');\n }\n\n const ssml = isSSML(text) ? text : buildSSML({ lang, pitch, rate, text, voice, volume });\n\n // Although calling encodeURI on hostname does not actually works, it fails faster and safer.\n const hostname =\n speechSynthesisHostname ||\n (deploymentId\n ? `${ encodeURI(region) }.voice.speech.microsoft.com`\n : `${ encodeURI(region) }.tts.speech.microsoft.com`);\n const search = deploymentId ? `?deploymentId=${ encodeURI(deploymentId) }` : '';\n const url = `https://${ hostname }/cognitiveservices/v1${ search }`;\n\n const res = await fetch(url, {\n headers: {\n 'Content-Type': 'application/ssml+xml',\n 'X-Microsoft-OutputFormat': outputFormat,\n ...(authorizationToken\n ? {\n Authorization: `Bearer ${ authorizationToken }`\n }\n : {\n 'Ocp-Apim-Subscription-Key': subscriptionKey\n })\n },\n method: 'POST',\n body: ssml\n });\n\n if (!res.ok) {\n throw new Error(`web-speech-cognitive-services: Failed to syntheis speech, server returned ${ res.status }`);\n }\n\n return res.arrayBuffer();\n}\n"],"mappings":";;;;;;;;;;;;;;;AAAA;;AACA;;AACA;;;;;;AAEA,IAAMA,gBAAgB,GAAG,OAAzB;AACA,IAAMC,qBAAqB,GAAG,2BAA9B;AACA,IAAMC,aAAa,GAAG,kEAAtB;AACA,IAAMC,gBAAgB,GACpB,skBADF;;;;;;;kFAGe;IAAA;;IAAA;MAAA;QAAA;UAAA;YACbC,YADa,QACbA,YADa,EAEbC,gBAFa,QAEbA,gBAFa,mBAGbC,IAHa,EAGbA,IAHa,0BAGNN,gBAHM,uCAIbO,YAJa,EAIbA,YAJa,kCAIEN,qBAJF,sBAKbO,KALa,QAKbA,KALa,EAMbC,IANa,QAMbA,IANa,EAObC,IAPa,QAObA,IAPa,oBAQbC,KARa,EAQbA,KARa,2BAQLT,aARK,eASbU,MATa,QASbA,MATa;;YAAA,IAWRF,IAXQ;cAAA;cAAA;YAAA;;YAAA,iCAaJ,IAAAG,yBAAA,EAAOV,gBAAP,CAbI;;UAAA;YAAA;YAAA,OAgB0EE,gBAAgB,EAhB1F;;UAAA;YAAA;YAgBLS,kBAhBK,yBAgBLA,kBAhBK;YAgBeC,MAhBf,yBAgBeA,MAhBf;YAgBuBC,uBAhBvB,yBAgBuBA,uBAhBvB;YAgBgDC,eAhBhD,yBAgBgDA,eAhBhD;;YAAA,MAkBRH,kBAAkB,IAAIG,eAAvB,IAA4C,CAACH,kBAAD,IAAuB,CAACG,eAlB3D;cAAA;cAAA;YAAA;;YAAA,MAmBL,IAAIC,KAAJ,CAAU,+DAAV,CAnBK;;UAAA;YAAA,MAoBDH,MAAM,IAAIC,uBAAX,IAAwC,CAACD,MAAD,IAAW,CAACC,uBApBlD;cAAA;cAAA;YAAA;;YAAA,MAqBL,IAAIE,KAAJ,CAAU,+DAAV,CArBK;;UAAA;YAwBPC,IAxBO,GAwBA,IAAAC,eAAA,EAAOV,IAAP,IAAeA,IAAf,GAAsB,IAAAW,kBAAA,EAAU;cAAEf,IAAI,EAAJA,IAAF;cAAQE,KAAK,EAALA,KAAR;cAAeC,IAAI,EAAJA,IAAf;cAAqBC,IAAI,EAAJA,IAArB;cAA2BC,KAAK,EAALA,KAA3B;cAAkCC,MAAM,EAANA;YAAlC,CAAV,CAxBtB,EA0Bb;;YACMU,QA3BO,GA4BXN,uBAAuB,KACtBZ,YAAY,aACLmB,SAAS,CAACR,MAAD,CADJ,6CAELQ,SAAS,CAACR,MAAD,CAFJ,8BADU,CA5BZ;YAgCPS,MAhCO,GAgCEpB,YAAY,2BAAqBmB,SAAS,CAACnB,YAAD,CAA9B,IAAkD,EAhChE;YAiCPqB,GAjCO,qBAiCWH,QAjCX,kCAiC6CE,MAjC7C;YAAA;YAAA,OAmCKE,KAAK,CAACD,GAAD,EAAM;cAC3BE,OAAO;gBACL,gBAAgB,sBADX;gBAEL,4BAA4BpB;cAFvB,GAGDO,kBAAkB,GAClB;gBACEc,aAAa,mBAAad,kBAAb;cADf,CADkB,GAIlB;gBACE,6BAA6BG;cAD/B,CAPC,CADoB;cAY3BY,MAAM,EAAE,MAZmB;cAa3BC,IAAI,EAAEX;YAbqB,CAAN,CAnCV;;UAAA;YAmCPY,GAnCO;;YAAA,IAmDRA,GAAG,CAACC,EAnDI;cAAA;cAAA;YAAA;;YAAA,MAoDL,IAAId,KAAJ,qFAAwFa,GAAG,CAACE,MAA5F,EApDK;;UAAA;YAAA,iCAuDNF,GAAG,CAACG,WAAJ,EAvDM;;UAAA;UAAA;YAAA;QAAA;MAAA;IAAA;EAAA,C"}
@@ -1,87 +0,0 @@
1
- "use strict";
2
-
3
- var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
4
-
5
- Object.defineProperty(exports, "__esModule", {
6
- value: true
7
- });
8
- exports.default = fetchVoices;
9
-
10
- var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
11
-
12
- var _defineProperty2 = _interopRequireDefault(require("@babel/runtime/helpers/defineProperty"));
13
-
14
- var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
15
-
16
- var _SpeechSynthesisVoice = _interopRequireDefault(require("./SpeechSynthesisVoice"));
17
-
18
- function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; }
19
-
20
- function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { (0, _defineProperty2.default)(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; }
21
-
22
- function fetchVoices(_x) {
23
- return _fetchVoices.apply(this, arguments);
24
- }
25
-
26
- function _fetchVoices() {
27
- _fetchVoices = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee(_ref) {
28
- var authorizationToken, region, speechSynthesisHostname, subscriptionKey, hostname, res, voices;
29
- return _regenerator.default.wrap(function _callee$(_context) {
30
- while (1) {
31
- switch (_context.prev = _context.next) {
32
- case 0:
33
- authorizationToken = _ref.authorizationToken, region = _ref.region, speechSynthesisHostname = _ref.speechSynthesisHostname, subscriptionKey = _ref.subscriptionKey;
34
- // Although encodeURI on a hostname doesn't work as expected for hostname, at least, it will fail peacefully.
35
- hostname = speechSynthesisHostname || "".concat(encodeURI(region), ".tts.speech.microsoft.com");
36
- _context.next = 4;
37
- return fetch("https://".concat(hostname, "/cognitiveservices/voices/list"), {
38
- headers: _objectSpread({
39
- 'content-type': 'application/json'
40
- }, authorizationToken ? {
41
- authorization: "Bearer ".concat(authorizationToken)
42
- } : {
43
- 'Ocp-Apim-Subscription-Key': subscriptionKey
44
- })
45
- });
46
-
47
- case 4:
48
- res = _context.sent;
49
-
50
- if (res.ok) {
51
- _context.next = 7;
52
- break;
53
- }
54
-
55
- throw new Error('Failed to fetch voices');
56
-
57
- case 7:
58
- _context.next = 9;
59
- return res.json();
60
-
61
- case 9:
62
- voices = _context.sent;
63
- return _context.abrupt("return", voices.map(function (_ref2) {
64
- var gender = _ref2.Gender,
65
- lang = _ref2.Locale,
66
- voiceURI = _ref2.Name;
67
- return new _SpeechSynthesisVoice.default({
68
- gender: gender,
69
- lang: lang,
70
- voiceURI: voiceURI
71
- });
72
- }).sort(function (_ref3, _ref4) {
73
- var x = _ref3.name;
74
- var y = _ref4.name;
75
- return x > y ? 1 : x < y ? -1 : 0;
76
- }));
77
-
78
- case 11:
79
- case "end":
80
- return _context.stop();
81
- }
82
- }
83
- }, _callee);
84
- }));
85
- return _fetchVoices.apply(this, arguments);
86
- }
87
- //# sourceMappingURL=fetchVoices.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"fetchVoices.js","names":["fetchVoices","authorizationToken","region","speechSynthesisHostname","subscriptionKey","hostname","encodeURI","fetch","headers","authorization","res","ok","Error","json","voices","map","gender","Gender","lang","Locale","voiceURI","Name","SpeechSynthesisVoice","sort","x","name","y"],"sources":["../../../src/SpeechServices/TextToSpeech/fetchVoices.js"],"sourcesContent":["/* eslint no-magic-numbers: [\"error\", { \"ignore\": [0, 1, -1] }] */\n\nimport SpeechSynthesisVoice from './SpeechSynthesisVoice';\n\nexport default async function fetchVoices({ authorizationToken, region, speechSynthesisHostname, subscriptionKey }) {\n // Although encodeURI on a hostname doesn't work as expected for hostname, at least, it will fail peacefully.\n const hostname = speechSynthesisHostname || `${ encodeURI(region) }.tts.speech.microsoft.com`;\n const res = await fetch(`https://${ hostname }/cognitiveservices/voices/list`, {\n headers: {\n 'content-type': 'application/json',\n ...(authorizationToken\n ? {\n authorization: `Bearer ${ authorizationToken }`\n }\n : {\n 'Ocp-Apim-Subscription-Key': subscriptionKey\n })\n }\n });\n\n if (!res.ok) {\n throw new Error('Failed to fetch voices');\n }\n\n const voices = await res.json();\n\n return voices\n .map(({ Gender: gender, Locale: lang, Name: voiceURI }) => new SpeechSynthesisVoice({ gender, lang, voiceURI }))\n .sort(({ name: x }, { name: y }) => (x > y ? 1 : x < y ? -1 : 0));\n}\n"],"mappings":";;;;;;;;;;;;;;;AAEA;;;;;;SAE8BA,W;;;;;yFAAf;IAAA;IAAA;MAAA;QAAA;UAAA;YAA6BC,kBAA7B,QAA6BA,kBAA7B,EAAiDC,MAAjD,QAAiDA,MAAjD,EAAyDC,uBAAzD,QAAyDA,uBAAzD,EAAkFC,eAAlF,QAAkFA,eAAlF;YACb;YACMC,QAFO,GAEIF,uBAAuB,cAAQG,SAAS,CAACJ,MAAD,CAAjB,8BAF3B;YAAA;YAAA,OAGKK,KAAK,mBAAaF,QAAb,qCAAwD;cAC7EG,OAAO;gBACL,gBAAgB;cADX,GAEDP,kBAAkB,GAClB;gBACEQ,aAAa,mBAAaR,kBAAb;cADf,CADkB,GAIlB;gBACE,6BAA6BG;cAD/B,CANC;YADsE,CAAxD,CAHV;;UAAA;YAGPM,GAHO;;YAAA,IAgBRA,GAAG,CAACC,EAhBI;cAAA;cAAA;YAAA;;YAAA,MAiBL,IAAIC,KAAJ,CAAU,wBAAV,CAjBK;;UAAA;YAAA;YAAA,OAoBQF,GAAG,CAACG,IAAJ,EApBR;;UAAA;YAoBPC,MApBO;YAAA,iCAsBNA,MAAM,CACVC,GADI,CACA;cAAA,IAAWC,MAAX,SAAGC,MAAH;cAAA,IAA2BC,IAA3B,SAAmBC,MAAnB;cAAA,IAAuCC,QAAvC,SAAiCC,IAAjC;cAAA,OAAsD,IAAIC,6BAAJ,CAAyB;gBAAEN,MAAM,EAANA,MAAF;gBAAUE,IAAI,EAAJA,IAAV;gBAAgBE,QAAQ,EAARA;cAAhB,CAAzB,CAAtD;YAAA,CADA,EAEJG,IAFI,CAEC;cAAA,IAASC,CAAT,SAAGC,IAAH;cAAA,IAAsBC,CAAtB,SAAgBD,IAAhB;cAAA,OAA+BD,CAAC,GAAGE,CAAJ,GAAQ,CAAR,GAAYF,CAAC,GAAGE,CAAJ,GAAQ,CAAC,CAAT,GAAa,CAAxD;YAAA,CAFD,CAtBM;;UAAA;UAAA;YAAA;QAAA;MAAA;IAAA;EAAA,C"}