web-speech-cognitive-services 8.0.0-main.428d2a8 → 8.0.0-main.5903868
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/web-speech-cognitive-services.d.mts +123 -26
- package/dist/web-speech-cognitive-services.d.ts +123 -26
- package/dist/web-speech-cognitive-services.development.js +1339 -1251
- package/dist/web-speech-cognitive-services.development.js.map +1 -1
- package/dist/web-speech-cognitive-services.js +225 -140
- package/dist/web-speech-cognitive-services.js.map +1 -1
- package/dist/web-speech-cognitive-services.mjs +220 -135
- package/dist/web-speech-cognitive-services.mjs.map +1 -1
- package/dist/web-speech-cognitive-services.production.min.js +12 -12
- package/dist/web-speech-cognitive-services.production.min.js.map +1 -1
- package/package.json +2 -2
|
@@ -38,9 +38,6 @@ __export(src_exports, {
|
|
|
38
38
|
});
|
|
39
39
|
module.exports = __toCommonJS(src_exports);
|
|
40
40
|
|
|
41
|
-
// src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js
|
|
42
|
-
var import_event_target_shim = require("event-target-shim");
|
|
43
|
-
|
|
44
41
|
// ../../node_modules/p-defer/index.js
|
|
45
42
|
function pDefer() {
|
|
46
43
|
const deferred = {};
|
|
@@ -169,53 +166,111 @@ var SpeechSDK_default = {
|
|
|
169
166
|
SpeechRecognizer: import_microsoft_cognitiveservices_speech.SpeechRecognizer
|
|
170
167
|
};
|
|
171
168
|
|
|
172
|
-
// src/
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
169
|
+
// src/SpeechServices/SpeechToText/SpeechRecognitionAlternative.ts
|
|
170
|
+
var SpeechRecognitionAlternative = class {
|
|
171
|
+
constructor({ confidence, transcript }) {
|
|
172
|
+
this.#confidence = confidence;
|
|
173
|
+
this.#transcript = transcript;
|
|
174
|
+
}
|
|
175
|
+
#confidence;
|
|
176
|
+
#transcript;
|
|
177
|
+
get confidence() {
|
|
178
|
+
return this.#confidence;
|
|
179
|
+
}
|
|
180
|
+
get transcript() {
|
|
181
|
+
return this.#transcript;
|
|
182
|
+
}
|
|
183
|
+
};
|
|
184
|
+
|
|
185
|
+
// src/SpeechServices/SpeechToText/FakeArray.ts
|
|
186
|
+
var FakeArray = class {
|
|
187
|
+
constructor(array) {
|
|
188
|
+
if (!array) {
|
|
189
|
+
throw new Error("array must be set.");
|
|
190
|
+
}
|
|
191
|
+
this.#array = array;
|
|
192
|
+
for (const key in array) {
|
|
193
|
+
Object.defineProperty(this, key, {
|
|
194
|
+
enumerable: true,
|
|
195
|
+
get() {
|
|
196
|
+
return array[key];
|
|
197
|
+
}
|
|
198
|
+
});
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
#array;
|
|
202
|
+
[Symbol.iterator]() {
|
|
203
|
+
return this.#array[Symbol.iterator]();
|
|
204
|
+
}
|
|
205
|
+
get length() {
|
|
206
|
+
return this.#array.length;
|
|
207
|
+
}
|
|
208
|
+
};
|
|
209
|
+
|
|
210
|
+
// src/SpeechServices/SpeechToText/SpeechRecognitionResult.ts
|
|
211
|
+
var SpeechRecognitionResult = class extends FakeArray {
|
|
212
|
+
constructor(init) {
|
|
213
|
+
super(init.results);
|
|
214
|
+
this.#isFinal = init.isFinal;
|
|
215
|
+
}
|
|
216
|
+
#isFinal;
|
|
217
|
+
get isFinal() {
|
|
218
|
+
return this.#isFinal;
|
|
219
|
+
}
|
|
220
|
+
};
|
|
189
221
|
|
|
190
|
-
// src/SpeechServices/SpeechToText/
|
|
222
|
+
// src/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResult.ts
|
|
191
223
|
var {
|
|
192
224
|
ResultReason: { RecognizingSpeech, RecognizedSpeech }
|
|
193
225
|
} = SpeechSDK_default;
|
|
194
|
-
function
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
226
|
+
function cognitiveServiceEventResultToWebSpeechRecognitionResult_default(result, init) {
|
|
227
|
+
const { maxAlternatives = Infinity, textNormalization = "display" } = init || {};
|
|
228
|
+
const json = typeof result.json === "string" ? JSON.parse(result.json) : result.json;
|
|
229
|
+
if (result.reason === RecognizingSpeech || result.reason === RecognizedSpeech && !json.NBest) {
|
|
230
|
+
return new SpeechRecognitionResult({
|
|
231
|
+
isFinal: result.reason === RecognizedSpeech,
|
|
232
|
+
results: [
|
|
233
|
+
new SpeechRecognitionAlternative({
|
|
234
|
+
confidence: 0.5,
|
|
235
|
+
transcript: result.text
|
|
236
|
+
})
|
|
237
|
+
]
|
|
238
|
+
});
|
|
206
239
|
} else if (result.reason === RecognizedSpeech) {
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
240
|
+
return new SpeechRecognitionResult({
|
|
241
|
+
isFinal: true,
|
|
242
|
+
results: (json.NBest || []).slice(0, maxAlternatives).map(
|
|
243
|
+
({ Confidence: confidence, Display: display, ITN: itn, Lexical: lexical, MaskedITN: maskedITN }) => new SpeechRecognitionAlternative({
|
|
244
|
+
confidence,
|
|
245
|
+
transcript: textNormalization === "itn" ? itn : textNormalization === "lexical" ? lexical : textNormalization === "maskeditn" ? maskedITN : display
|
|
246
|
+
})
|
|
247
|
+
)
|
|
248
|
+
});
|
|
215
249
|
}
|
|
216
|
-
return [];
|
|
250
|
+
return new SpeechRecognitionResult({ isFinal: false, results: [] });
|
|
217
251
|
}
|
|
218
252
|
|
|
253
|
+
// src/SpeechServices/SpeechToText/EventListenerMap.ts
|
|
254
|
+
var EventListenerMap = class {
|
|
255
|
+
constructor(eventTarget) {
|
|
256
|
+
this.#eventTarget = eventTarget;
|
|
257
|
+
this.#propertyMap = {};
|
|
258
|
+
}
|
|
259
|
+
#eventTarget;
|
|
260
|
+
#propertyMap;
|
|
261
|
+
getProperty(name) {
|
|
262
|
+
return this.#propertyMap[name];
|
|
263
|
+
}
|
|
264
|
+
setProperty(name, value) {
|
|
265
|
+
const existing = this.#propertyMap[name];
|
|
266
|
+
existing && this.#eventTarget.removeEventListener(name, existing);
|
|
267
|
+
if (value) {
|
|
268
|
+
this.#eventTarget.addEventListener(name, value);
|
|
269
|
+
}
|
|
270
|
+
this.#propertyMap[name] = value;
|
|
271
|
+
}
|
|
272
|
+
};
|
|
273
|
+
|
|
219
274
|
// src/SpeechServices/SpeechToText/SpeechGrammarList.js
|
|
220
275
|
var SpeechGrammarList_default = class {
|
|
221
276
|
constructor() {
|
|
@@ -238,6 +293,53 @@ var SpeechGrammarList_default = class {
|
|
|
238
293
|
}
|
|
239
294
|
};
|
|
240
295
|
|
|
296
|
+
// src/SpeechServices/SpeechToText/SpeechRecognitionErrorEvent.ts
|
|
297
|
+
var SpeechRecognitionErrorEvent = class extends Event {
|
|
298
|
+
constructor(type, { error, message }) {
|
|
299
|
+
super(type);
|
|
300
|
+
this.#error = error;
|
|
301
|
+
this.#message = message;
|
|
302
|
+
}
|
|
303
|
+
#error;
|
|
304
|
+
#message;
|
|
305
|
+
get error() {
|
|
306
|
+
return this.#error;
|
|
307
|
+
}
|
|
308
|
+
get message() {
|
|
309
|
+
return this.#message;
|
|
310
|
+
}
|
|
311
|
+
};
|
|
312
|
+
|
|
313
|
+
// src/SpeechServices/SpeechToText/SpeechRecognitionResultList.ts
|
|
314
|
+
var SpeechRecognitionResultList = class extends FakeArray {
|
|
315
|
+
constructor(result) {
|
|
316
|
+
super(result);
|
|
317
|
+
}
|
|
318
|
+
};
|
|
319
|
+
|
|
320
|
+
// src/SpeechServices/SpeechToText/SpeechRecognitionEvent.ts
|
|
321
|
+
var SpeechRecognitionEvent = class extends Event {
|
|
322
|
+
constructor(type, { data, resultIndex, results } = {}) {
|
|
323
|
+
super(type);
|
|
324
|
+
this.#data = data;
|
|
325
|
+
this.#resultIndex = resultIndex;
|
|
326
|
+
this.#results = results || new SpeechRecognitionResultList([]);
|
|
327
|
+
}
|
|
328
|
+
#data;
|
|
329
|
+
// TODO: "resultIndex" should be set.
|
|
330
|
+
#resultIndex;
|
|
331
|
+
#results;
|
|
332
|
+
get data() {
|
|
333
|
+
return this.#data;
|
|
334
|
+
}
|
|
335
|
+
get resultIndex() {
|
|
336
|
+
return this.#resultIndex;
|
|
337
|
+
}
|
|
338
|
+
get results() {
|
|
339
|
+
return this.#results;
|
|
340
|
+
}
|
|
341
|
+
};
|
|
342
|
+
|
|
241
343
|
// src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js
|
|
242
344
|
var { AudioConfig: AudioConfig2, OutputFormat: OutputFormat2, ResultReason: ResultReason2, SpeechConfig: SpeechConfig2, SpeechRecognizer: SpeechRecognizer2 } = SpeechSDK_default;
|
|
243
345
|
function serializeRecognitionResult({ duration, errorDetails, json, offset, properties, reason, resultId, text }) {
|
|
@@ -259,16 +361,6 @@ function averageAmplitude(arrayBuffer) {
|
|
|
259
361
|
function cognitiveServicesAsyncToPromise(fn) {
|
|
260
362
|
return (...args) => new Promise((resolve, reject) => fn(...args, resolve, reject));
|
|
261
363
|
}
|
|
262
|
-
var SpeechRecognitionEvent = class extends import_event_target_shim.Event {
|
|
263
|
-
constructor(type, { data, emma, interpretation, resultIndex, results } = {}) {
|
|
264
|
-
super(type);
|
|
265
|
-
this.data = data;
|
|
266
|
-
this.emma = emma;
|
|
267
|
-
this.interpretation = interpretation;
|
|
268
|
-
this.resultIndex = resultIndex;
|
|
269
|
-
this.results = results;
|
|
270
|
-
}
|
|
271
|
-
};
|
|
272
364
|
function prepareAudioConfig(audioConfig) {
|
|
273
365
|
const originalAttach = audioConfig.attach;
|
|
274
366
|
const boundOriginalAttach = audioConfig.attach.bind(audioConfig);
|
|
@@ -309,7 +401,7 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
309
401
|
textNormalization
|
|
310
402
|
}) {
|
|
311
403
|
SpeechRecognizer2.enableTelemetry(enableTelemetry !== false);
|
|
312
|
-
class SpeechRecognition extends
|
|
404
|
+
class SpeechRecognition extends EventTarget {
|
|
313
405
|
constructor() {
|
|
314
406
|
super();
|
|
315
407
|
this._continuous = false;
|
|
@@ -317,7 +409,10 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
317
409
|
this._lang = typeof window !== "undefined" ? window.document.documentElement.getAttribute("lang") || window.navigator.language : "en-US";
|
|
318
410
|
this._grammars = new SpeechGrammarList_default();
|
|
319
411
|
this._maxAlternatives = 1;
|
|
412
|
+
this.#eventListenerMap = new EventListenerMap(this);
|
|
320
413
|
}
|
|
414
|
+
/** @type { import('./SpeechRecognitionEventListenerMap').SpeechRecognitionEventListenerMap } */
|
|
415
|
+
#eventListenerMap;
|
|
321
416
|
emitCognitiveServices(type, event) {
|
|
322
417
|
this.dispatchEvent(
|
|
323
418
|
new SpeechRecognitionEvent("cognitiveservices", {
|
|
@@ -362,75 +457,88 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
362
457
|
set lang(value) {
|
|
363
458
|
this._lang = value;
|
|
364
459
|
}
|
|
460
|
+
/** @type { ((event: SpeechRecognitionEvent<'audioend'>) => void) | undefined } */
|
|
365
461
|
get onaudioend() {
|
|
366
|
-
return
|
|
462
|
+
return this.#eventListenerMap.getProperty("audioend");
|
|
367
463
|
}
|
|
368
464
|
set onaudioend(value) {
|
|
369
|
-
|
|
465
|
+
this.#eventListenerMap.setProperty("audioend", value);
|
|
370
466
|
}
|
|
467
|
+
/** @type { ((event: SpeechRecognitionEvent<'audiostart'>) => void) | undefined } */
|
|
371
468
|
get onaudiostart() {
|
|
372
|
-
return
|
|
469
|
+
return this.#eventListenerMap.getProperty("audiostart");
|
|
373
470
|
}
|
|
374
471
|
set onaudiostart(value) {
|
|
375
|
-
|
|
472
|
+
this.#eventListenerMap.setProperty("audiostart", value);
|
|
376
473
|
}
|
|
474
|
+
/** @type { ((event: SpeechRecognitionEvent<'cognitiveservices'>) => void) | undefined } */
|
|
377
475
|
get oncognitiveservices() {
|
|
378
|
-
return
|
|
476
|
+
return this.#eventListenerMap.getProperty("cognitiveservices");
|
|
379
477
|
}
|
|
380
478
|
set oncognitiveservices(value) {
|
|
381
|
-
|
|
479
|
+
this.#eventListenerMap.setProperty("cognitiveservices", value);
|
|
382
480
|
}
|
|
481
|
+
/** @type { ((event: SpeechRecognitionEvent<'end'>) => void) | undefined } */
|
|
383
482
|
get onend() {
|
|
384
|
-
return
|
|
483
|
+
return this.#eventListenerMap.getProperty("end");
|
|
385
484
|
}
|
|
386
485
|
set onend(value) {
|
|
387
|
-
|
|
486
|
+
this.#eventListenerMap.setProperty("end", value);
|
|
388
487
|
}
|
|
488
|
+
/** @type { ((event: SpeechRecognitionEvent<'error'>) => void) | undefined } */
|
|
389
489
|
get onerror() {
|
|
390
|
-
return
|
|
490
|
+
return this.#eventListenerMap.getProperty("error");
|
|
391
491
|
}
|
|
392
492
|
set onerror(value) {
|
|
393
|
-
|
|
493
|
+
this.#eventListenerMap.setProperty("error", value);
|
|
394
494
|
}
|
|
495
|
+
/** @type { ((event: SpeechRecognitionEvent<'result'>) => void) | undefined } */
|
|
395
496
|
get onresult() {
|
|
396
|
-
return
|
|
497
|
+
return this.#eventListenerMap.getProperty("result");
|
|
397
498
|
}
|
|
398
499
|
set onresult(value) {
|
|
399
|
-
|
|
500
|
+
this.#eventListenerMap.setProperty("result", value);
|
|
400
501
|
}
|
|
502
|
+
/** @type { ((event: SpeechRecognitionEvent<'soundend'>) => void) | undefined } */
|
|
401
503
|
get onsoundend() {
|
|
402
|
-
return
|
|
504
|
+
return this.#eventListenerMap.getProperty("soundend");
|
|
403
505
|
}
|
|
404
506
|
set onsoundend(value) {
|
|
405
|
-
|
|
507
|
+
this.#eventListenerMap.setProperty("soundend", value);
|
|
406
508
|
}
|
|
509
|
+
/** @type { ((event: SpeechRecognitionEvent<'soundstart'>) => void) | undefined } */
|
|
407
510
|
get onsoundstart() {
|
|
408
|
-
return
|
|
511
|
+
return this.#eventListenerMap.getProperty("soundstart");
|
|
409
512
|
}
|
|
410
513
|
set onsoundstart(value) {
|
|
411
|
-
|
|
514
|
+
this.#eventListenerMap.setProperty("soundstart", value);
|
|
412
515
|
}
|
|
516
|
+
/** @type { ((event: SpeechRecognitionEvent<'speechend'>) => void) | undefined } */
|
|
413
517
|
get onspeechend() {
|
|
414
|
-
return
|
|
518
|
+
return this.#eventListenerMap.getProperty("speechend");
|
|
415
519
|
}
|
|
416
520
|
set onspeechend(value) {
|
|
417
|
-
|
|
521
|
+
this.#eventListenerMap.setProperty("speechend", value);
|
|
418
522
|
}
|
|
523
|
+
/** @type { ((event: SpeechRecognitionEvent<'speechstart'>) => void) | undefined } */
|
|
419
524
|
get onspeechstart() {
|
|
420
|
-
return
|
|
525
|
+
return this.#eventListenerMap.getProperty("speechstart");
|
|
421
526
|
}
|
|
422
527
|
set onspeechstart(value) {
|
|
423
|
-
|
|
528
|
+
this.#eventListenerMap.setProperty("speechstart", value);
|
|
424
529
|
}
|
|
530
|
+
/** @type { ((event: SpeechRecognitionEvent<'start'>) => void) | undefined } */
|
|
425
531
|
get onstart() {
|
|
426
|
-
return
|
|
532
|
+
return this.#eventListenerMap.getProperty("start");
|
|
427
533
|
}
|
|
428
534
|
set onstart(value) {
|
|
429
|
-
|
|
535
|
+
this.#eventListenerMap.setProperty("start", value);
|
|
430
536
|
}
|
|
431
537
|
start() {
|
|
432
538
|
this._startOnce().catch((err) => {
|
|
433
|
-
this.dispatchEvent(
|
|
539
|
+
this.dispatchEvent(
|
|
540
|
+
new SpeechRecognitionErrorEvent("error", { error: err, message: err && (err.stack || err.message) })
|
|
541
|
+
);
|
|
434
542
|
});
|
|
435
543
|
}
|
|
436
544
|
async _startOnce() {
|
|
@@ -520,10 +628,7 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
520
628
|
Object.keys(event).forEach((name) => this.emitCognitiveServices(name, event[name]));
|
|
521
629
|
const errorMessage = canceled && canceled.errorDetails;
|
|
522
630
|
if (/Permission\sdenied/u.test(errorMessage || "")) {
|
|
523
|
-
finalEvent = {
|
|
524
|
-
error: "not-allowed",
|
|
525
|
-
type: "error"
|
|
526
|
-
};
|
|
631
|
+
finalEvent = new SpeechRecognitionErrorEvent("error", { error: "not-allowed" });
|
|
527
632
|
break;
|
|
528
633
|
}
|
|
529
634
|
if (!loop) {
|
|
@@ -535,23 +640,14 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
535
640
|
this.dispatchEvent(new SpeechRecognitionEvent("audiostart"));
|
|
536
641
|
this.dispatchEvent(new SpeechRecognitionEvent("audioend"));
|
|
537
642
|
}
|
|
538
|
-
finalEvent = {
|
|
539
|
-
error: "network",
|
|
540
|
-
type: "error"
|
|
541
|
-
};
|
|
643
|
+
finalEvent = new SpeechRecognitionErrorEvent("error", { error: "network" });
|
|
542
644
|
} else {
|
|
543
|
-
finalEvent = {
|
|
544
|
-
error: "unknown",
|
|
545
|
-
type: "error"
|
|
546
|
-
};
|
|
645
|
+
finalEvent = new SpeechRecognitionErrorEvent("error", { error: "unknown" });
|
|
547
646
|
}
|
|
548
647
|
break;
|
|
549
648
|
} else if (abort || stop) {
|
|
550
649
|
if (abort) {
|
|
551
|
-
finalEvent = {
|
|
552
|
-
error: "aborted",
|
|
553
|
-
type: "error"
|
|
554
|
-
};
|
|
650
|
+
finalEvent = new SpeechRecognitionErrorEvent("error", { error: "aborted" });
|
|
555
651
|
stopping = "abort";
|
|
556
652
|
} else {
|
|
557
653
|
pause();
|
|
@@ -575,10 +671,7 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
575
671
|
} else if (stopping !== "abort") {
|
|
576
672
|
if (recognized && recognized.result && recognized.result.reason === ResultReason2.NoMatch) {
|
|
577
673
|
if (!this.continuous || stopping === "stop") {
|
|
578
|
-
finalEvent = {
|
|
579
|
-
results: [],
|
|
580
|
-
type: "result"
|
|
581
|
-
};
|
|
674
|
+
finalEvent = new SpeechRecognitionEvent("result", { results: finalizedResults });
|
|
582
675
|
recognizer.stopContinuousRecognitionAsync && await cognitiveServicesAsyncToPromise(recognizer.stopContinuousRecognitionAsync.bind(recognizer))();
|
|
583
676
|
break;
|
|
584
677
|
}
|
|
@@ -596,7 +689,7 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
596
689
|
speechStarted = true;
|
|
597
690
|
}
|
|
598
691
|
if (recognized) {
|
|
599
|
-
const result =
|
|
692
|
+
const result = cognitiveServiceEventResultToWebSpeechRecognitionResult_default(recognized.result, {
|
|
600
693
|
maxAlternatives: this.maxAlternatives,
|
|
601
694
|
textNormalization
|
|
602
695
|
});
|
|
@@ -605,35 +698,34 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
605
698
|
finalizedResults = [...finalizedResults, result];
|
|
606
699
|
this.continuous && this.dispatchEvent(
|
|
607
700
|
new SpeechRecognitionEvent("result", {
|
|
608
|
-
results: finalizedResults
|
|
701
|
+
results: new SpeechRecognitionResultList(finalizedResults)
|
|
609
702
|
})
|
|
610
703
|
);
|
|
611
704
|
}
|
|
612
705
|
if (this.continuous && recognizable) {
|
|
613
|
-
finalEvent =
|
|
706
|
+
finalEvent = void 0;
|
|
614
707
|
} else {
|
|
615
|
-
finalEvent = {
|
|
616
|
-
results: finalizedResults
|
|
617
|
-
|
|
618
|
-
};
|
|
708
|
+
finalEvent = new SpeechRecognitionEvent("result", {
|
|
709
|
+
results: new SpeechRecognitionResultList(finalizedResults)
|
|
710
|
+
});
|
|
619
711
|
}
|
|
620
712
|
if ((!this.continuous || stopping === "stop") && recognizer.stopContinuousRecognitionAsync) {
|
|
621
713
|
await cognitiveServicesAsyncToPromise(recognizer.stopContinuousRecognitionAsync.bind(recognizer))();
|
|
622
714
|
}
|
|
623
715
|
if (looseEvents && finalEvent && recognizable) {
|
|
624
|
-
this.dispatchEvent(
|
|
625
|
-
finalEvent =
|
|
716
|
+
this.dispatchEvent(finalEvent);
|
|
717
|
+
finalEvent = void 0;
|
|
626
718
|
}
|
|
627
719
|
} else if (recognizing) {
|
|
628
720
|
this.interimResults && this.dispatchEvent(
|
|
629
721
|
new SpeechRecognitionEvent("result", {
|
|
630
|
-
results: [
|
|
722
|
+
results: new SpeechRecognitionResultList([
|
|
631
723
|
...finalizedResults,
|
|
632
|
-
|
|
724
|
+
cognitiveServiceEventResultToWebSpeechRecognitionResult_default(recognizing.result, {
|
|
633
725
|
maxAlternatives: this.maxAlternatives,
|
|
634
726
|
textNormalization
|
|
635
727
|
})
|
|
636
|
-
]
|
|
728
|
+
])
|
|
637
729
|
})
|
|
638
730
|
);
|
|
639
731
|
}
|
|
@@ -651,16 +743,9 @@ function createSpeechRecognitionPonyfillFromRecognizer({
|
|
|
651
743
|
}
|
|
652
744
|
if (finalEvent) {
|
|
653
745
|
if (finalEvent.type === "result" && !finalEvent.results.length) {
|
|
654
|
-
finalEvent = {
|
|
655
|
-
error: "no-speech",
|
|
656
|
-
type: "error"
|
|
657
|
-
};
|
|
658
|
-
}
|
|
659
|
-
if (finalEvent.type === "error") {
|
|
660
|
-
this.dispatchEvent(new ErrorEvent("error", finalEvent));
|
|
661
|
-
} else {
|
|
662
|
-
this.dispatchEvent(new SpeechRecognitionEvent(finalEvent.type, finalEvent));
|
|
746
|
+
finalEvent = new SpeechRecognitionErrorEvent("error", { error: "no-speech" });
|
|
663
747
|
}
|
|
748
|
+
this.dispatchEvent(finalEvent);
|
|
664
749
|
}
|
|
665
750
|
this.dispatchEvent(new SpeechRecognitionEvent("end"));
|
|
666
751
|
detachAudioConfigEvent();
|
|
@@ -732,7 +817,7 @@ var createSpeechRecognitionPonyfill_default = (options) => {
|
|
|
732
817
|
var SpeechToText_default = createSpeechRecognitionPonyfill_default;
|
|
733
818
|
|
|
734
819
|
// src/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js
|
|
735
|
-
var
|
|
820
|
+
var import_event_target_shim3 = require("event-target-shim");
|
|
736
821
|
var import_async = require("on-error-resume-next/async");
|
|
737
822
|
|
|
738
823
|
// src/SpeechServices/TextToSpeech/AudioContextQueue.js
|
|
@@ -808,8 +893,8 @@ var AudioContextQueue_default = class {
|
|
|
808
893
|
};
|
|
809
894
|
|
|
810
895
|
// src/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js
|
|
811
|
-
var
|
|
812
|
-
var SpeechSynthesisEvent = class extends
|
|
896
|
+
var import_event_target_shim = require("event-target-shim");
|
|
897
|
+
var SpeechSynthesisEvent = class extends import_event_target_shim.Event {
|
|
813
898
|
constructor(type) {
|
|
814
899
|
super(type);
|
|
815
900
|
}
|
|
@@ -817,7 +902,7 @@ var SpeechSynthesisEvent = class extends import_event_target_shim2.Event {
|
|
|
817
902
|
|
|
818
903
|
// src/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js
|
|
819
904
|
var import_event_as_promise = require("event-as-promise");
|
|
820
|
-
var
|
|
905
|
+
var import_event_target_shim2 = require("event-target-shim");
|
|
821
906
|
|
|
822
907
|
// src/SpeechServices/TextToSpeech/fetchSpeechData.js
|
|
823
908
|
var import_base64_arraybuffer = require("base64-arraybuffer");
|
|
@@ -932,7 +1017,7 @@ function playDecoded(audioContext, audioBuffer, source) {
|
|
|
932
1017
|
}
|
|
933
1018
|
});
|
|
934
1019
|
}
|
|
935
|
-
var SpeechSynthesisUtterance = class extends
|
|
1020
|
+
var SpeechSynthesisUtterance = class extends import_event_target_shim2.EventTarget {
|
|
936
1021
|
constructor(text) {
|
|
937
1022
|
super();
|
|
938
1023
|
this._lang = null;
|
|
@@ -956,46 +1041,46 @@ var SpeechSynthesisUtterance = class extends import_event_target_shim3.EventTarg
|
|
|
956
1041
|
this._lang = value;
|
|
957
1042
|
}
|
|
958
1043
|
get onboundary() {
|
|
959
|
-
return (0,
|
|
1044
|
+
return (0, import_event_target_shim2.getEventAttributeValue)(this, "boundary");
|
|
960
1045
|
}
|
|
961
1046
|
set onboundary(value) {
|
|
962
|
-
(0,
|
|
1047
|
+
(0, import_event_target_shim2.setEventAttributeValue)(this, "boundary", value);
|
|
963
1048
|
}
|
|
964
1049
|
get onend() {
|
|
965
|
-
return (0,
|
|
1050
|
+
return (0, import_event_target_shim2.getEventAttributeValue)(this, "end");
|
|
966
1051
|
}
|
|
967
1052
|
set onend(value) {
|
|
968
|
-
(0,
|
|
1053
|
+
(0, import_event_target_shim2.setEventAttributeValue)(this, "end", value);
|
|
969
1054
|
}
|
|
970
1055
|
get onerror() {
|
|
971
|
-
return (0,
|
|
1056
|
+
return (0, import_event_target_shim2.getEventAttributeValue)(this, "error");
|
|
972
1057
|
}
|
|
973
1058
|
set onerror(value) {
|
|
974
|
-
(0,
|
|
1059
|
+
(0, import_event_target_shim2.setEventAttributeValue)(this, "error", value);
|
|
975
1060
|
}
|
|
976
1061
|
get onmark() {
|
|
977
|
-
return (0,
|
|
1062
|
+
return (0, import_event_target_shim2.getEventAttributeValue)(this, "mark");
|
|
978
1063
|
}
|
|
979
1064
|
set onmark(value) {
|
|
980
|
-
(0,
|
|
1065
|
+
(0, import_event_target_shim2.setEventAttributeValue)(this, "mark", value);
|
|
981
1066
|
}
|
|
982
1067
|
get onpause() {
|
|
983
|
-
return (0,
|
|
1068
|
+
return (0, import_event_target_shim2.getEventAttributeValue)(this, "pause");
|
|
984
1069
|
}
|
|
985
1070
|
set onpause(value) {
|
|
986
|
-
(0,
|
|
1071
|
+
(0, import_event_target_shim2.setEventAttributeValue)(this, "pause", value);
|
|
987
1072
|
}
|
|
988
1073
|
get onresume() {
|
|
989
|
-
return (0,
|
|
1074
|
+
return (0, import_event_target_shim2.getEventAttributeValue)(this, "resume");
|
|
990
1075
|
}
|
|
991
1076
|
set onresume(value) {
|
|
992
|
-
(0,
|
|
1077
|
+
(0, import_event_target_shim2.setEventAttributeValue)(this, "resume", value);
|
|
993
1078
|
}
|
|
994
1079
|
get onstart() {
|
|
995
|
-
return (0,
|
|
1080
|
+
return (0, import_event_target_shim2.getEventAttributeValue)(this, "start");
|
|
996
1081
|
}
|
|
997
1082
|
set onstart(value) {
|
|
998
|
-
(0,
|
|
1083
|
+
(0, import_event_target_shim2.setEventAttributeValue)(this, "start", value);
|
|
999
1084
|
}
|
|
1000
1085
|
get pitch() {
|
|
1001
1086
|
return this._pitch;
|
|
@@ -1147,7 +1232,7 @@ var createSpeechSynthesisPonyfill_default = (options) => {
|
|
|
1147
1232
|
);
|
|
1148
1233
|
return {};
|
|
1149
1234
|
}
|
|
1150
|
-
class SpeechSynthesis extends
|
|
1235
|
+
class SpeechSynthesis extends import_event_target_shim3.EventTarget {
|
|
1151
1236
|
constructor() {
|
|
1152
1237
|
super();
|
|
1153
1238
|
this.queue = new AudioContextQueue_default({ audioContext, ponyfill });
|
|
@@ -1160,10 +1245,10 @@ var createSpeechSynthesisPonyfill_default = (options) => {
|
|
|
1160
1245
|
return EMPTY_ARRAY;
|
|
1161
1246
|
}
|
|
1162
1247
|
get onvoiceschanged() {
|
|
1163
|
-
return (0,
|
|
1248
|
+
return (0, import_event_target_shim3.getEventAttributeValue)(this, "voiceschanged");
|
|
1164
1249
|
}
|
|
1165
1250
|
set onvoiceschanged(value) {
|
|
1166
|
-
(0,
|
|
1251
|
+
(0, import_event_target_shim3.setEventAttributeValue)(this, "voiceschanged", value);
|
|
1167
1252
|
}
|
|
1168
1253
|
pause() {
|
|
1169
1254
|
this.queue.pause();
|
|
@@ -1258,7 +1343,7 @@ function createSpeechServicesPonyfill(options = {}, ...args) {
|
|
|
1258
1343
|
}
|
|
1259
1344
|
var meta = document.createElement("meta");
|
|
1260
1345
|
meta.setAttribute("name", "web-speech-cognitive-services");
|
|
1261
|
-
meta.setAttribute("content", `version=${"8.0.0-main.
|
|
1346
|
+
meta.setAttribute("content", `version=${"8.0.0-main.5903868"}`);
|
|
1262
1347
|
document.head.appendChild(meta);
|
|
1263
1348
|
// Annotate the CommonJS export names for ESM import in node:
|
|
1264
1349
|
0 && (module.exports = {
|