web-speech-cognitive-services 7.1.4-master.151bc9b → 8.0.0-main.181f814

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/dist/web-speech-cognitive-services.d.mts +283 -0
  2. package/dist/web-speech-cognitive-services.d.ts +283 -0
  3. package/dist/web-speech-cognitive-services.development.js +25696 -0
  4. package/dist/web-speech-cognitive-services.development.js.map +1 -0
  5. package/dist/web-speech-cognitive-services.js +1356 -0
  6. package/dist/web-speech-cognitive-services.js.map +1 -0
  7. package/dist/web-speech-cognitive-services.mjs +1321 -0
  8. package/dist/web-speech-cognitive-services.mjs.map +1 -0
  9. package/dist/web-speech-cognitive-services.production.min.js +31 -0
  10. package/dist/web-speech-cognitive-services.production.min.js.map +1 -0
  11. package/package.json +67 -47
  12. package/CHANGELOG.md +0 -372
  13. package/lib/BingSpeech/SpeechToText/SpeechGrammarList.js +0 -94
  14. package/lib/BingSpeech/SpeechToText/SpeechGrammarList.js.map +0 -1
  15. package/lib/BingSpeech/SpeechToText/createSpeechRecognitionPonyfill.js +0 -483
  16. package/lib/BingSpeech/SpeechToText/createSpeechRecognitionPonyfill.js.map +0 -1
  17. package/lib/BingSpeech/SpeechToText.js +0 -14
  18. package/lib/BingSpeech/SpeechToText.js.map +0 -1
  19. package/lib/BingSpeech/TextToSpeech/AudioContextConsumer.js +0 -122
  20. package/lib/BingSpeech/TextToSpeech/AudioContextConsumer.js.map +0 -1
  21. package/lib/BingSpeech/TextToSpeech/AudioContextQueue.js +0 -104
  22. package/lib/BingSpeech/TextToSpeech/AudioContextQueue.js.map +0 -1
  23. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisUtterance.js +0 -264
  24. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisUtterance.js.map +0 -1
  25. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisVoice.js +0 -61
  26. package/lib/BingSpeech/TextToSpeech/SpeechSynthesisVoice.js.map +0 -1
  27. package/lib/BingSpeech/TextToSpeech/buildSSML.js +0 -32
  28. package/lib/BingSpeech/TextToSpeech/buildSSML.js.map +0 -1
  29. package/lib/BingSpeech/TextToSpeech/createSpeechSynthesisPonyfill.js +0 -220
  30. package/lib/BingSpeech/TextToSpeech/createSpeechSynthesisPonyfill.js.map +0 -1
  31. package/lib/BingSpeech/TextToSpeech/fetchSpeechData.js +0 -74
  32. package/lib/BingSpeech/TextToSpeech/fetchSpeechData.js.map +0 -1
  33. package/lib/BingSpeech/TextToSpeech/fetchVoices.js +0 -335
  34. package/lib/BingSpeech/TextToSpeech/fetchVoices.js.map +0 -1
  35. package/lib/BingSpeech/TextToSpeech/isSSML.js +0 -13
  36. package/lib/BingSpeech/TextToSpeech/isSSML.js.map +0 -1
  37. package/lib/BingSpeech/TextToSpeech/subscribeEvent.js +0 -14
  38. package/lib/BingSpeech/TextToSpeech/subscribeEvent.js.map +0 -1
  39. package/lib/BingSpeech/TextToSpeech.js +0 -14
  40. package/lib/BingSpeech/TextToSpeech.js.map +0 -1
  41. package/lib/BingSpeech/Util/DOMEventEmitter.js +0 -61
  42. package/lib/BingSpeech/Util/DOMEventEmitter.js.map +0 -1
  43. package/lib/BingSpeech/Util/createFetchTokenUsingSubscriptionKey.js +0 -41
  44. package/lib/BingSpeech/Util/createFetchTokenUsingSubscriptionKey.js.map +0 -1
  45. package/lib/BingSpeech/fetchAuthorizationToken.js +0 -57
  46. package/lib/BingSpeech/fetchAuthorizationToken.js.map +0 -1
  47. package/lib/BingSpeech/index.js +0 -84
  48. package/lib/BingSpeech/index.js.map +0 -1
  49. package/lib/SpeechServices/SpeechSDK.js +0 -19
  50. package/lib/SpeechServices/SpeechSDK.js.map +0 -1
  51. package/lib/SpeechServices/SpeechToText/SpeechGrammarList.js +0 -45
  52. package/lib/SpeechServices/SpeechToText/SpeechGrammarList.js.map +0 -1
  53. package/lib/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResultList.js +0 -56
  54. package/lib/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResultList.js.map +0 -1
  55. package/lib/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js +0 -984
  56. package/lib/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js.map +0 -1
  57. package/lib/SpeechServices/SpeechToText.js +0 -24
  58. package/lib/SpeechServices/SpeechToText.js.map +0 -1
  59. package/lib/SpeechServices/TextToSpeech/AudioContextConsumer.js +0 -92
  60. package/lib/SpeechServices/TextToSpeech/AudioContextConsumer.js.map +0 -1
  61. package/lib/SpeechServices/TextToSpeech/AudioContextQueue.js +0 -111
  62. package/lib/SpeechServices/TextToSpeech/AudioContextQueue.js.map +0 -1
  63. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js +0 -40
  64. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js.map +0 -1
  65. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js +0 -283
  66. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js.map +0 -1
  67. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js +0 -63
  68. package/lib/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js.map +0 -1
  69. package/lib/SpeechServices/TextToSpeech/buildSSML.js +0 -32
  70. package/lib/SpeechServices/TextToSpeech/buildSSML.js.map +0 -1
  71. package/lib/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js +0 -282
  72. package/lib/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js.map +0 -1
  73. package/lib/SpeechServices/TextToSpeech/fetchCustomVoices.js +0 -110
  74. package/lib/SpeechServices/TextToSpeech/fetchCustomVoices.js.map +0 -1
  75. package/lib/SpeechServices/TextToSpeech/fetchSpeechData.js +0 -127
  76. package/lib/SpeechServices/TextToSpeech/fetchSpeechData.js.map +0 -1
  77. package/lib/SpeechServices/TextToSpeech/fetchVoices.js +0 -87
  78. package/lib/SpeechServices/TextToSpeech/fetchVoices.js.map +0 -1
  79. package/lib/SpeechServices/TextToSpeech/isSSML.js +0 -13
  80. package/lib/SpeechServices/TextToSpeech/isSSML.js.map +0 -1
  81. package/lib/SpeechServices/TextToSpeech/subscribeEvent.js +0 -14
  82. package/lib/SpeechServices/TextToSpeech/subscribeEvent.js.map +0 -1
  83. package/lib/SpeechServices/TextToSpeech.js +0 -14
  84. package/lib/SpeechServices/TextToSpeech.js.map +0 -1
  85. package/lib/SpeechServices/fetchAuthorizationToken.js +0 -58
  86. package/lib/SpeechServices/fetchAuthorizationToken.js.map +0 -1
  87. package/lib/SpeechServices/patchOptions.js +0 -213
  88. package/lib/SpeechServices/patchOptions.js.map +0 -1
  89. package/lib/SpeechServices/resolveFunctionOrReturnValue.js +0 -11
  90. package/lib/SpeechServices/resolveFunctionOrReturnValue.js.map +0 -1
  91. package/lib/SpeechServices.js +0 -73
  92. package/lib/SpeechServices.js.map +0 -1
  93. package/lib/Util/arrayToMap.js +0 -28
  94. package/lib/Util/arrayToMap.js.map +0 -1
  95. package/lib/Util/createPromiseQueue.js +0 -40
  96. package/lib/Util/createPromiseQueue.js.map +0 -1
  97. package/lib/index.js +0 -14
  98. package/lib/index.js.map +0 -1
  99. package/umd/web-speech-cognitive-services.development.js +0 -4740
  100. package/umd/web-speech-cognitive-services.production.min.js +0 -2
@@ -1,984 +0,0 @@
1
- "use strict";
2
-
3
- var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
4
-
5
- Object.defineProperty(exports, "__esModule", {
6
- value: true
7
- });
8
- exports.createSpeechRecognitionPonyfillFromRecognizer = createSpeechRecognitionPonyfillFromRecognizer;
9
- exports.default = void 0;
10
-
11
- var _regenerator = _interopRequireDefault(require("@babel/runtime/regenerator"));
12
-
13
- var _toConsumableArray2 = _interopRequireDefault(require("@babel/runtime/helpers/toConsumableArray"));
14
-
15
- var _defineProperty2 = _interopRequireDefault(require("@babel/runtime/helpers/defineProperty"));
16
-
17
- var _asyncToGenerator2 = _interopRequireDefault(require("@babel/runtime/helpers/asyncToGenerator"));
18
-
19
- var _createClass2 = _interopRequireDefault(require("@babel/runtime/helpers/createClass"));
20
-
21
- var _classCallCheck2 = _interopRequireDefault(require("@babel/runtime/helpers/classCallCheck"));
22
-
23
- var _inherits2 = _interopRequireDefault(require("@babel/runtime/helpers/inherits"));
24
-
25
- var _possibleConstructorReturn2 = _interopRequireDefault(require("@babel/runtime/helpers/possibleConstructorReturn"));
26
-
27
- var _getPrototypeOf2 = _interopRequireDefault(require("@babel/runtime/helpers/getPrototypeOf"));
28
-
29
- var _es = require("event-target-shim/es5");
30
-
31
- var _cognitiveServiceEventResultToWebSpeechRecognitionResultList = _interopRequireDefault(require("./cognitiveServiceEventResultToWebSpeechRecognitionResultList"));
32
-
33
- var _createPromiseQueue = _interopRequireDefault(require("../../Util/createPromiseQueue"));
34
-
35
- var _patchOptions2 = _interopRequireDefault(require("../patchOptions"));
36
-
37
- var _SpeechGrammarList = _interopRequireDefault(require("./SpeechGrammarList"));
38
-
39
- var _SpeechSDK = _interopRequireDefault(require("../SpeechSDK"));
40
-
41
- function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; }
42
-
43
- function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { (0, _defineProperty2.default)(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; }
44
-
45
- function _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = (0, _getPrototypeOf2.default)(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = (0, _getPrototypeOf2.default)(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return (0, _possibleConstructorReturn2.default)(this, result); }; }
46
-
47
- function _isNativeReflectConstruct() { if (typeof Reflect === "undefined" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === "function") return true; try { Boolean.prototype.valueOf.call(Reflect.construct(Boolean, [], function () {})); return true; } catch (e) { return false; } }
48
-
49
- // https://docs.microsoft.com/en-us/javascript/api/microsoft-cognitiveservices-speech-sdk/speechconfig?view=azure-node-latest#outputformat
50
- // {
51
- // "RecognitionStatus": "Success",
52
- // "Offset": 900000,
53
- // "Duration": 49000000,
54
- // "NBest": [
55
- // {
56
- // "Confidence": 0.738919,
57
- // "Lexical": "second",
58
- // "ITN": "second",
59
- // "MaskedITN": "second",
60
- // "Display": "Second."
61
- // }
62
- // ]
63
- // }
64
- // {
65
- // "RecognitionStatus": "InitialSilenceTimeout",
66
- // "Offset": 50000000,
67
- // "Duration": 0
68
- // }
69
- var AudioConfig = _SpeechSDK.default.AudioConfig,
70
- OutputFormat = _SpeechSDK.default.OutputFormat,
71
- ResultReason = _SpeechSDK.default.ResultReason,
72
- SpeechConfig = _SpeechSDK.default.SpeechConfig,
73
- SpeechRecognizer = _SpeechSDK.default.SpeechRecognizer;
74
-
75
- function serializeRecognitionResult(_ref) {
76
- var duration = _ref.duration,
77
- errorDetails = _ref.errorDetails,
78
- json = _ref.json,
79
- offset = _ref.offset,
80
- properties = _ref.properties,
81
- reason = _ref.reason,
82
- resultId = _ref.resultId,
83
- text = _ref.text;
84
- return {
85
- duration: duration,
86
- errorDetails: errorDetails,
87
- json: JSON.parse(json),
88
- offset: offset,
89
- properties: properties,
90
- reason: reason,
91
- resultId: resultId,
92
- text: text
93
- };
94
- }
95
-
96
- function averageAmplitude(arrayBuffer) {
97
- var array = new Int16Array(arrayBuffer);
98
- return [].reduce.call(array, function (averageAmplitude, amplitude) {
99
- return averageAmplitude + Math.abs(amplitude);
100
- }, 0) / array.length;
101
- }
102
-
103
- function cognitiveServicesAsyncToPromise(fn) {
104
- return function () {
105
- for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
106
- args[_key] = arguments[_key];
107
- }
108
-
109
- return new Promise(function (resolve, reject) {
110
- return fn.apply(void 0, args.concat([resolve, reject]));
111
- });
112
- };
113
- }
114
-
115
- var SpeechRecognitionEvent = /*#__PURE__*/function (_Event) {
116
- (0, _inherits2.default)(SpeechRecognitionEvent, _Event);
117
-
118
- var _super = _createSuper(SpeechRecognitionEvent);
119
-
120
- function SpeechRecognitionEvent(type) {
121
- var _this;
122
-
123
- var _ref2 = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {},
124
- data = _ref2.data,
125
- emma = _ref2.emma,
126
- interpretation = _ref2.interpretation,
127
- resultIndex = _ref2.resultIndex,
128
- results = _ref2.results;
129
-
130
- (0, _classCallCheck2.default)(this, SpeechRecognitionEvent);
131
- _this = _super.call(this, type);
132
- _this.data = data;
133
- _this.emma = emma;
134
- _this.interpretation = interpretation;
135
- _this.resultIndex = resultIndex;
136
- _this.results = results;
137
- return _this;
138
- }
139
-
140
- return (0, _createClass2.default)(SpeechRecognitionEvent);
141
- }(_es.Event);
142
-
143
- function prepareAudioConfig(audioConfig) {
144
- var originalAttach = audioConfig.attach;
145
- var boundOriginalAttach = audioConfig.attach.bind(audioConfig);
146
- var firstChunk;
147
- var muted; // We modify "attach" function and detect when audible chunk is read.
148
- // We will only modify "attach" function once.
149
-
150
- audioConfig.attach = /*#__PURE__*/(0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee2() {
151
- var reader;
152
- return _regenerator.default.wrap(function _callee2$(_context2) {
153
- while (1) {
154
- switch (_context2.prev = _context2.next) {
155
- case 0:
156
- _context2.next = 2;
157
- return boundOriginalAttach();
158
-
159
- case 2:
160
- reader = _context2.sent;
161
- return _context2.abrupt("return", _objectSpread(_objectSpread({}, reader), {}, {
162
- read: function () {
163
- var _read = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee() {
164
- var chunk;
165
- return _regenerator.default.wrap(function _callee$(_context) {
166
- while (1) {
167
- switch (_context.prev = _context.next) {
168
- case 0:
169
- _context.next = 2;
170
- return reader.read();
171
-
172
- case 2:
173
- chunk = _context.sent;
174
-
175
- // The magic number 150 is measured by:
176
- // 1. Set microphone volume to 0
177
- // 2. Observe the amplitude (100-110) for the first few chunks
178
- // (There is a short static caught when turning on the microphone)
179
- // 3. Set the number a bit higher than the observation
180
- if (!firstChunk && averageAmplitude(chunk.buffer) > 150) {
181
- audioConfig.events.onEvent({
182
- name: 'FirstAudibleChunk'
183
- });
184
- firstChunk = true;
185
- }
186
-
187
- if (!muted) {
188
- _context.next = 6;
189
- break;
190
- }
191
-
192
- return _context.abrupt("return", {
193
- buffer: new ArrayBuffer(0),
194
- isEnd: true,
195
- timeReceived: Date.now()
196
- });
197
-
198
- case 6:
199
- return _context.abrupt("return", chunk);
200
-
201
- case 7:
202
- case "end":
203
- return _context.stop();
204
- }
205
- }
206
- }, _callee);
207
- }));
208
-
209
- function read() {
210
- return _read.apply(this, arguments);
211
- }
212
-
213
- return read;
214
- }()
215
- }));
216
-
217
- case 4:
218
- case "end":
219
- return _context2.stop();
220
- }
221
- }
222
- }, _callee2);
223
- }));
224
- return {
225
- audioConfig: audioConfig,
226
- pause: function pause() {
227
- muted = true;
228
- },
229
- unprepare: function unprepare() {
230
- audioConfig.attach = originalAttach;
231
- }
232
- };
233
- }
234
-
235
- function createSpeechRecognitionPonyfillFromRecognizer(_ref4) {
236
- var createRecognizer = _ref4.createRecognizer,
237
- enableTelemetry = _ref4.enableTelemetry,
238
- looseEvents = _ref4.looseEvents,
239
- referenceGrammars = _ref4.referenceGrammars,
240
- textNormalization = _ref4.textNormalization;
241
- // If enableTelemetry is set to null or non-boolean, we will default to true.
242
- SpeechRecognizer.enableTelemetry(enableTelemetry !== false);
243
-
244
- var SpeechRecognition = /*#__PURE__*/function (_EventTarget) {
245
- (0, _inherits2.default)(SpeechRecognition, _EventTarget);
246
-
247
- var _super2 = _createSuper(SpeechRecognition);
248
-
249
- function SpeechRecognition() {
250
- var _this2;
251
-
252
- (0, _classCallCheck2.default)(this, SpeechRecognition);
253
- _this2 = _super2.call(this);
254
- _this2._continuous = false;
255
- _this2._interimResults = false;
256
- _this2._lang = typeof window !== 'undefined' ? window.document.documentElement.getAttribute('lang') || window.navigator.language : 'en-US';
257
- _this2._grammars = new _SpeechGrammarList.default();
258
- _this2._maxAlternatives = 1;
259
- return _this2;
260
- }
261
-
262
- (0, _createClass2.default)(SpeechRecognition, [{
263
- key: "emitCognitiveServices",
264
- value: function emitCognitiveServices(type, event) {
265
- this.dispatchEvent(new SpeechRecognitionEvent('cognitiveservices', {
266
- data: _objectSpread(_objectSpread({}, event), {}, {
267
- type: type
268
- })
269
- }));
270
- }
271
- }, {
272
- key: "continuous",
273
- get: function get() {
274
- return this._continuous;
275
- },
276
- set: function set(value) {
277
- this._continuous = value;
278
- }
279
- }, {
280
- key: "grammars",
281
- get: function get() {
282
- return this._grammars;
283
- },
284
- set: function set(value) {
285
- if (value instanceof _SpeechGrammarList.default) {
286
- this._grammars = value;
287
- } else {
288
- throw new Error("The provided value is not of type 'SpeechGrammarList'");
289
- }
290
- }
291
- }, {
292
- key: "interimResults",
293
- get: function get() {
294
- return this._interimResults;
295
- },
296
- set: function set(value) {
297
- this._interimResults = value;
298
- }
299
- }, {
300
- key: "maxAlternatives",
301
- get: function get() {
302
- return this._maxAlternatives;
303
- },
304
- set: function set(value) {
305
- this._maxAlternatives = value;
306
- }
307
- }, {
308
- key: "lang",
309
- get: function get() {
310
- return this._lang;
311
- },
312
- set: function set(value) {
313
- this._lang = value;
314
- }
315
- }, {
316
- key: "onaudioend",
317
- get: function get() {
318
- return (0, _es.getEventAttributeValue)(this, 'audioend');
319
- },
320
- set: function set(value) {
321
- (0, _es.setEventAttributeValue)(this, 'audioend', value);
322
- }
323
- }, {
324
- key: "onaudiostart",
325
- get: function get() {
326
- return (0, _es.getEventAttributeValue)(this, 'audiostart');
327
- },
328
- set: function set(value) {
329
- (0, _es.setEventAttributeValue)(this, 'audiostart', value);
330
- }
331
- }, {
332
- key: "oncognitiveservices",
333
- get: function get() {
334
- return (0, _es.getEventAttributeValue)(this, 'cognitiveservices');
335
- },
336
- set: function set(value) {
337
- (0, _es.setEventAttributeValue)(this, 'cognitiveservices', value);
338
- }
339
- }, {
340
- key: "onend",
341
- get: function get() {
342
- return (0, _es.getEventAttributeValue)(this, 'end');
343
- },
344
- set: function set(value) {
345
- (0, _es.setEventAttributeValue)(this, 'end', value);
346
- }
347
- }, {
348
- key: "onerror",
349
- get: function get() {
350
- return (0, _es.getEventAttributeValue)(this, 'error');
351
- },
352
- set: function set(value) {
353
- (0, _es.setEventAttributeValue)(this, 'error', value);
354
- }
355
- }, {
356
- key: "onresult",
357
- get: function get() {
358
- return (0, _es.getEventAttributeValue)(this, 'result');
359
- },
360
- set: function set(value) {
361
- (0, _es.setEventAttributeValue)(this, 'result', value);
362
- }
363
- }, {
364
- key: "onsoundend",
365
- get: function get() {
366
- return (0, _es.getEventAttributeValue)(this, 'soundend');
367
- },
368
- set: function set(value) {
369
- (0, _es.setEventAttributeValue)(this, 'soundend', value);
370
- }
371
- }, {
372
- key: "onsoundstart",
373
- get: function get() {
374
- return (0, _es.getEventAttributeValue)(this, 'soundstart');
375
- },
376
- set: function set(value) {
377
- (0, _es.setEventAttributeValue)(this, 'soundstart', value);
378
- }
379
- }, {
380
- key: "onspeechend",
381
- get: function get() {
382
- return (0, _es.getEventAttributeValue)(this, 'speechend');
383
- },
384
- set: function set(value) {
385
- (0, _es.setEventAttributeValue)(this, 'speechend', value);
386
- }
387
- }, {
388
- key: "onspeechstart",
389
- get: function get() {
390
- return (0, _es.getEventAttributeValue)(this, 'speechstart');
391
- },
392
- set: function set(value) {
393
- (0, _es.setEventAttributeValue)(this, 'speechstart', value);
394
- }
395
- }, {
396
- key: "onstart",
397
- get: function get() {
398
- return (0, _es.getEventAttributeValue)(this, 'start');
399
- },
400
- set: function set(value) {
401
- (0, _es.setEventAttributeValue)(this, 'start', value);
402
- }
403
- }, {
404
- key: "start",
405
- value: function start() {
406
- var _this3 = this;
407
-
408
- this._startOnce().catch(function (err) {
409
- _this3.dispatchEvent(new ErrorEvent('error', {
410
- error: err,
411
- message: err && (err.stack || err.message)
412
- }));
413
- });
414
- }
415
- }, {
416
- key: "_startOnce",
417
- value: function () {
418
- var _startOnce2 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee3() {
419
- var _this4 = this;
420
-
421
- var recognizer, _prepareAudioConfig, pause, unprepare, queue, soundStarted, speechStarted, stopping, _recognizer$audioConf, detachAudioConfigEvent, phrases, dynamicGrammar, audioStarted, finalEvent, finalizedResults, _loop, loop, _ret;
422
-
423
- return _regenerator.default.wrap(function _callee3$(_context4) {
424
- while (1) {
425
- switch (_context4.prev = _context4.next) {
426
- case 0:
427
- _context4.next = 2;
428
- return createRecognizer(this.lang);
429
-
430
- case 2:
431
- recognizer = _context4.sent;
432
- _prepareAudioConfig = prepareAudioConfig(recognizer.audioConfig), pause = _prepareAudioConfig.pause, unprepare = _prepareAudioConfig.unprepare;
433
- _context4.prev = 4;
434
- queue = (0, _createPromiseQueue.default)();
435
- _recognizer$audioConf = recognizer.audioConfig.events.attach(function (event) {
436
- var name = event.name;
437
-
438
- if (name === 'AudioSourceReadyEvent') {
439
- queue.push({
440
- audioSourceReady: {}
441
- });
442
- } else if (name === 'AudioSourceOffEvent') {
443
- queue.push({
444
- audioSourceOff: {}
445
- });
446
- } else if (name === 'FirstAudibleChunk') {
447
- queue.push({
448
- firstAudibleChunk: {}
449
- });
450
- }
451
- }), detachAudioConfigEvent = _recognizer$audioConf.detach;
452
-
453
- recognizer.canceled = function (_, _ref5) {
454
- var errorDetails = _ref5.errorDetails,
455
- offset = _ref5.offset,
456
- reason = _ref5.reason,
457
- sessionId = _ref5.sessionId;
458
- queue.push({
459
- canceled: {
460
- errorDetails: errorDetails,
461
- offset: offset,
462
- reason: reason,
463
- sessionId: sessionId
464
- }
465
- });
466
- };
467
-
468
- recognizer.recognized = function (_, _ref6) {
469
- var offset = _ref6.offset,
470
- result = _ref6.result,
471
- sessionId = _ref6.sessionId;
472
- queue.push({
473
- recognized: {
474
- offset: offset,
475
- result: serializeRecognitionResult(result),
476
- sessionId: sessionId
477
- }
478
- });
479
- };
480
-
481
- recognizer.recognizing = function (_, _ref7) {
482
- var offset = _ref7.offset,
483
- result = _ref7.result,
484
- sessionId = _ref7.sessionId;
485
- queue.push({
486
- recognizing: {
487
- offset: offset,
488
- result: serializeRecognitionResult(result),
489
- sessionId: sessionId
490
- }
491
- });
492
- };
493
-
494
- recognizer.sessionStarted = function (_, _ref8) {
495
- var sessionId = _ref8.sessionId;
496
- queue.push({
497
- sessionStarted: {
498
- sessionId: sessionId
499
- }
500
- });
501
- };
502
-
503
- recognizer.sessionStopped = function (_, _ref9) {
504
- var sessionId = _ref9.sessionId;
505
- // "sessionStopped" is never fired, probably because we are using startContinuousRecognitionAsync instead of recognizeOnceAsync.
506
- queue.push({
507
- sessionStopped: {
508
- sessionId: sessionId
509
- }
510
- });
511
- };
512
-
513
- recognizer.speechStartDetected = function (_, _ref10) {
514
- var offset = _ref10.offset,
515
- sessionId = _ref10.sessionId;
516
- queue.push({
517
- speechStartDetected: {
518
- offset: offset,
519
- sessionId: sessionId
520
- }
521
- });
522
- };
523
-
524
- recognizer.speechEndDetected = function (_, _ref11) {
525
- var sessionId = _ref11.sessionId;
526
- // "speechEndDetected" is never fired, probably because we are using startContinuousRecognitionAsync instead of recognizeOnceAsync.
527
- // Update: "speechEndDetected" is fired for DLSpeech.listenOnceAsync()
528
- queue.push({
529
- speechEndDetected: {
530
- sessionId: sessionId
531
- }
532
- });
533
- };
534
-
535
- phrases = this.grammars.phrases; // HACK: We are using the internal of SpeechRecognizer because they did not expose it
536
-
537
- dynamicGrammar = recognizer.privReco.dynamicGrammar;
538
- referenceGrammars && referenceGrammars.length && dynamicGrammar.addReferenceGrammar(referenceGrammars);
539
- phrases && phrases.length && dynamicGrammar.addPhrase(phrases);
540
- _context4.next = 20;
541
- return cognitiveServicesAsyncToPromise(recognizer.startContinuousRecognitionAsync.bind(recognizer))();
542
-
543
- case 20:
544
- if (recognizer.stopContinuousRecognitionAsync) {
545
- this.abort = function () {
546
- return queue.push({
547
- abort: {}
548
- });
549
- };
550
-
551
- this.stop = function () {
552
- return queue.push({
553
- stop: {}
554
- });
555
- };
556
- } else {
557
- this.abort = this.stop = undefined;
558
- }
559
-
560
- finalizedResults = [];
561
- _loop = /*#__PURE__*/_regenerator.default.mark(function _loop(loop) {
562
- var event, abort, audioSourceOff, audioSourceReady, canceled, firstAudibleChunk, recognized, recognizing, stop, errorMessage, result, recognizable;
563
- return _regenerator.default.wrap(function _loop$(_context3) {
564
- while (1) {
565
- switch (_context3.prev = _context3.next) {
566
- case 0:
567
- _context3.next = 2;
568
- return queue.shift();
569
-
570
- case 2:
571
- event = _context3.sent;
572
- abort = event.abort, audioSourceOff = event.audioSourceOff, audioSourceReady = event.audioSourceReady, canceled = event.canceled, firstAudibleChunk = event.firstAudibleChunk, recognized = event.recognized, recognizing = event.recognizing, stop = event.stop; // We are emitting event "cognitiveservices" for debugging purpose.
573
-
574
- Object.keys(event).forEach(function (name) {
575
- return _this4.emitCognitiveServices(name, event[name]);
576
- });
577
- errorMessage = canceled && canceled.errorDetails;
578
-
579
- if (!/Permission[\t-\r \xA0\u1680\u2000-\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF]denied/.test(errorMessage || '')) {
580
- _context3.next = 9;
581
- break;
582
- }
583
-
584
- // If microphone is not allowed, we should not emit "start" event.
585
- finalEvent = {
586
- error: 'not-allowed',
587
- type: 'error'
588
- };
589
- return _context3.abrupt("return", "break");
590
-
591
- case 9:
592
- if (!loop) {
593
- _this4.dispatchEvent(new SpeechRecognitionEvent('start'));
594
- }
595
-
596
- if (!errorMessage) {
597
- _context3.next = 15;
598
- break;
599
- }
600
-
601
- if (/1006/.test(errorMessage)) {
602
- if (!audioStarted) {
603
- _this4.dispatchEvent(new SpeechRecognitionEvent('audiostart'));
604
-
605
- _this4.dispatchEvent(new SpeechRecognitionEvent('audioend'));
606
- }
607
-
608
- finalEvent = {
609
- error: 'network',
610
- type: 'error'
611
- };
612
- } else {
613
- finalEvent = {
614
- error: 'unknown',
615
- type: 'error'
616
- };
617
- }
618
-
619
- return _context3.abrupt("return", "break");
620
-
621
- case 15:
622
- if (!(abort || stop)) {
623
- _context3.next = 22;
624
- break;
625
- }
626
-
627
- if (abort) {
628
- finalEvent = {
629
- error: 'aborted',
630
- type: 'error'
631
- }; // If we are aborting, we will ignore lingering recognizing/recognized events. But if we are stopping, we need them.
632
-
633
- stopping = 'abort';
634
- } else {
635
- // When we pause, we will send { isEnd: true }, Speech Services will send us "recognized" event.
636
- pause();
637
- stopping = 'stop';
638
- } // Abort should not be dispatched without support of "stopContinuousRecognitionAsync".
639
- // But for defensive purpose, we make sure "stopContinuousRecognitionAsync" is available before we can call.
640
-
641
-
642
- if (!(abort && recognizer.stopContinuousRecognitionAsync)) {
643
- _context3.next = 20;
644
- break;
645
- }
646
-
647
- _context3.next = 20;
648
- return cognitiveServicesAsyncToPromise(recognizer.stopContinuousRecognitionAsync.bind(recognizer))();
649
-
650
- case 20:
651
- _context3.next = 61;
652
- break;
653
-
654
- case 22:
655
- if (!audioSourceReady) {
656
- _context3.next = 27;
657
- break;
658
- }
659
-
660
- _this4.dispatchEvent(new SpeechRecognitionEvent('audiostart'));
661
-
662
- audioStarted = true;
663
- _context3.next = 61;
664
- break;
665
-
666
- case 27:
667
- if (!firstAudibleChunk) {
668
- _context3.next = 32;
669
- break;
670
- }
671
-
672
- _this4.dispatchEvent(new SpeechRecognitionEvent('soundstart'));
673
-
674
- soundStarted = true;
675
- _context3.next = 61;
676
- break;
677
-
678
- case 32:
679
- if (!audioSourceOff) {
680
- _context3.next = 40;
681
- break;
682
- }
683
-
684
- // Looks like we don't need this line and all the tests are still working.
685
- // Guessing probably stopping is already truthy.
686
- // stopping = true;
687
- speechStarted && _this4.dispatchEvent(new SpeechRecognitionEvent('speechend'));
688
- soundStarted && _this4.dispatchEvent(new SpeechRecognitionEvent('soundend'));
689
- audioStarted && _this4.dispatchEvent(new SpeechRecognitionEvent('audioend'));
690
- audioStarted = soundStarted = speechStarted = false;
691
- return _context3.abrupt("return", "break");
692
-
693
- case 40:
694
- if (!(stopping !== 'abort')) {
695
- _context3.next = 61;
696
- break;
697
- }
698
-
699
- if (!(recognized && recognized.result && recognized.result.reason === ResultReason.NoMatch)) {
700
- _context3.next = 45;
701
- break;
702
- }
703
-
704
- finalEvent = {
705
- error: 'no-speech',
706
- type: 'error'
707
- };
708
- _context3.next = 61;
709
- break;
710
-
711
- case 45:
712
- if (!(recognized || recognizing)) {
713
- _context3.next = 61;
714
- break;
715
- }
716
-
717
- if (!audioStarted) {
718
- // Unconfirmed prevention of quirks
719
- _this4.dispatchEvent(new SpeechRecognitionEvent('audiostart'));
720
-
721
- audioStarted = true;
722
- }
723
-
724
- if (!soundStarted) {
725
- _this4.dispatchEvent(new SpeechRecognitionEvent('soundstart'));
726
-
727
- soundStarted = true;
728
- }
729
-
730
- if (!speechStarted) {
731
- _this4.dispatchEvent(new SpeechRecognitionEvent('speechstart'));
732
-
733
- speechStarted = true;
734
- }
735
-
736
- if (!recognized) {
737
- _context3.next = 60;
738
- break;
739
- }
740
-
741
- result = (0, _cognitiveServiceEventResultToWebSpeechRecognitionResultList.default)(recognized.result, {
742
- maxAlternatives: _this4.maxAlternatives,
743
- textNormalization: textNormalization
744
- });
745
- recognizable = !!result[0].transcript;
746
-
747
- if (recognizable) {
748
- finalizedResults = [].concat((0, _toConsumableArray2.default)(finalizedResults), [result]);
749
- _this4.continuous && _this4.dispatchEvent(new SpeechRecognitionEvent('result', {
750
- results: finalizedResults
751
- }));
752
- } // If it is continuous, we just sent the finalized results. So we don't need to send it again after "audioend" event.
753
-
754
-
755
- if (_this4.continuous && recognizable) {
756
- finalEvent = null;
757
- } else {
758
- finalEvent = {
759
- results: finalizedResults,
760
- type: 'result'
761
- };
762
- }
763
-
764
- if (!(!_this4.continuous && recognizer.stopContinuousRecognitionAsync)) {
765
- _context3.next = 57;
766
- break;
767
- }
768
-
769
- _context3.next = 57;
770
- return cognitiveServicesAsyncToPromise(recognizer.stopContinuousRecognitionAsync.bind(recognizer))();
771
-
772
- case 57:
773
- // If event order can be loosened, we can send the recognized event as soon as we receive it.
774
- // 1. If it is not recognizable (no-speech), we should send an "error" event just before "end" event. We will not loosen "error" events.
775
- if (looseEvents && finalEvent && recognizable) {
776
- _this4.dispatchEvent(new SpeechRecognitionEvent(finalEvent.type, finalEvent));
777
-
778
- finalEvent = null;
779
- }
780
-
781
- _context3.next = 61;
782
- break;
783
-
784
- case 60:
785
- if (recognizing) {
786
- _this4.interimResults && _this4.dispatchEvent(new SpeechRecognitionEvent('result', {
787
- results: [].concat((0, _toConsumableArray2.default)(finalizedResults), [(0, _cognitiveServiceEventResultToWebSpeechRecognitionResultList.default)(recognizing.result, {
788
- maxAlternatives: _this4.maxAlternatives,
789
- textNormalization: textNormalization
790
- })])
791
- }));
792
- }
793
-
794
- case 61:
795
- case "end":
796
- return _context3.stop();
797
- }
798
- }
799
- }, _loop);
800
- });
801
- loop = 0;
802
-
803
- case 24:
804
- if (!(!stopping || audioStarted)) {
805
- _context4.next = 32;
806
- break;
807
- }
808
-
809
- return _context4.delegateYield(_loop(loop), "t0", 26);
810
-
811
- case 26:
812
- _ret = _context4.t0;
813
-
814
- if (!(_ret === "break")) {
815
- _context4.next = 29;
816
- break;
817
- }
818
-
819
- return _context4.abrupt("break", 32);
820
-
821
- case 29:
822
- loop++;
823
- _context4.next = 24;
824
- break;
825
-
826
- case 32:
827
- if (speechStarted) {
828
- this.dispatchEvent(new SpeechRecognitionEvent('speechend'));
829
- }
830
-
831
- if (soundStarted) {
832
- this.dispatchEvent(new SpeechRecognitionEvent('soundend'));
833
- }
834
-
835
- if (audioStarted) {
836
- this.dispatchEvent(new SpeechRecognitionEvent('audioend'));
837
- }
838
-
839
- if (finalEvent) {
840
- if (finalEvent.type === 'result' && !finalEvent.results.length) {
841
- finalEvent = {
842
- error: 'no-speech',
843
- type: 'error'
844
- };
845
- }
846
-
847
- if (finalEvent.type === 'error') {
848
- this.dispatchEvent(new ErrorEvent('error', finalEvent));
849
- } else {
850
- this.dispatchEvent(new SpeechRecognitionEvent(finalEvent.type, finalEvent));
851
- }
852
- } // Even though there is no "start" event emitted, we will still emit "end" event
853
- // This is mainly for "microphone blocked" story.
854
-
855
-
856
- this.dispatchEvent(new SpeechRecognitionEvent('end'));
857
- detachAudioConfigEvent();
858
- _context4.next = 44;
859
- break;
860
-
861
- case 40:
862
- _context4.prev = 40;
863
- _context4.t1 = _context4["catch"](4);
864
- // Logging out the erorr because Speech SDK would fail silently.
865
- console.error(_context4.t1);
866
- throw _context4.t1;
867
-
868
- case 44:
869
- _context4.prev = 44;
870
- unprepare();
871
- recognizer.dispose();
872
- return _context4.finish(44);
873
-
874
- case 48:
875
- case "end":
876
- return _context4.stop();
877
- }
878
- }
879
- }, _callee3, this, [[4, 40, 44, 48]]);
880
- }));
881
-
882
- function _startOnce() {
883
- return _startOnce2.apply(this, arguments);
884
- }
885
-
886
- return _startOnce;
887
- }()
888
- }]);
889
- return SpeechRecognition;
890
- }(_es.EventTarget);
891
-
892
- return {
893
- SpeechGrammarList: _SpeechGrammarList.default,
894
- SpeechRecognition: SpeechRecognition,
895
- SpeechRecognitionEvent: SpeechRecognitionEvent
896
- };
897
- }
898
-
899
- var _default = function _default(options) {
900
- var _patchOptions = (0, _patchOptions2.default)(options),
901
- _patchOptions$audioCo = _patchOptions.audioConfig,
902
- audioConfig = _patchOptions$audioCo === void 0 ? AudioConfig.fromDefaultMicrophoneInput() : _patchOptions$audioCo,
903
- _patchOptions$enableT = _patchOptions.enableTelemetry,
904
- enableTelemetry = _patchOptions$enableT === void 0 ? true : _patchOptions$enableT,
905
- fetchCredentials = _patchOptions.fetchCredentials,
906
- looseEvents = _patchOptions.looseEvents,
907
- referenceGrammars = _patchOptions.referenceGrammars,
908
- speechRecognitionEndpointId = _patchOptions.speechRecognitionEndpointId,
909
- _patchOptions$textNor = _patchOptions.textNormalization,
910
- textNormalization = _patchOptions$textNor === void 0 ? 'display' : _patchOptions$textNor;
911
-
912
- if (!audioConfig && (!window.navigator.mediaDevices || !window.navigator.mediaDevices.getUserMedia)) {
913
- console.warn('web-speech-cognitive-services: This browser does not support WebRTC and it will not work with Cognitive Services Speech Services.');
914
- return {};
915
- }
916
-
917
- var createRecognizer = /*#__PURE__*/function () {
918
- var _ref12 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee4(lang) {
919
- var _yield$fetchCredentia, authorizationToken, region, speechRecognitionHostname, subscriptionKey, speechConfig, host;
920
-
921
- return _regenerator.default.wrap(function _callee4$(_context5) {
922
- while (1) {
923
- switch (_context5.prev = _context5.next) {
924
- case 0:
925
- _context5.next = 2;
926
- return fetchCredentials();
927
-
928
- case 2:
929
- _yield$fetchCredentia = _context5.sent;
930
- authorizationToken = _yield$fetchCredentia.authorizationToken;
931
- region = _yield$fetchCredentia.region;
932
- speechRecognitionHostname = _yield$fetchCredentia.speechRecognitionHostname;
933
- subscriptionKey = _yield$fetchCredentia.subscriptionKey;
934
-
935
- if (speechRecognitionHostname) {
936
- host = {
937
- hostname: speechRecognitionHostname,
938
- port: 443,
939
- protocol: 'wss:'
940
- };
941
-
942
- if (authorizationToken) {
943
- speechConfig = SpeechConfig.fromHost(host);
944
- speechConfig.authorizationToken = authorizationToken;
945
- } else {
946
- speechConfig = SpeechConfig.fromHost(host, subscriptionKey);
947
- }
948
- } else {
949
- speechConfig = authorizationToken ? SpeechConfig.fromAuthorizationToken(authorizationToken, region) : SpeechConfig.fromSubscription(subscriptionKey, region);
950
- }
951
-
952
- if (speechRecognitionEndpointId) {
953
- speechConfig.endpointId = speechRecognitionEndpointId;
954
- }
955
-
956
- speechConfig.outputFormat = OutputFormat.Detailed;
957
- speechConfig.speechRecognitionLanguage = lang || 'en-US';
958
- return _context5.abrupt("return", new SpeechRecognizer(speechConfig, audioConfig));
959
-
960
- case 12:
961
- case "end":
962
- return _context5.stop();
963
- }
964
- }
965
- }, _callee4);
966
- }));
967
-
968
- return function createRecognizer(_x) {
969
- return _ref12.apply(this, arguments);
970
- };
971
- }();
972
-
973
- return createSpeechRecognitionPonyfillFromRecognizer({
974
- audioConfig: audioConfig,
975
- createRecognizer: createRecognizer,
976
- enableTelemetry: enableTelemetry,
977
- looseEvents: looseEvents,
978
- referenceGrammars: referenceGrammars,
979
- textNormalization: textNormalization
980
- });
981
- };
982
-
983
- exports.default = _default;
984
- //# sourceMappingURL=createSpeechRecognitionPonyfill.js.map