web-speech-cognitive-services 8.0.0-main.478b2e9 → 8.0.0-main.5903868

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -38,66 +38,6 @@ __export(src_exports, {
38
38
  });
39
39
  module.exports = __toCommonJS(src_exports);
40
40
 
41
- // src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js
42
- var import_event_target_shim = require("event-target-shim");
43
-
44
- // src/Util/arrayToMap.js
45
- function arrayToMap_default(array, extras) {
46
- const map = {
47
- ...[].reduce.call(
48
- array,
49
- (map2, value, index) => {
50
- map2[index] = value;
51
- return map2;
52
- },
53
- {}
54
- ),
55
- ...extras,
56
- length: array.length,
57
- [Symbol.iterator]: () => [].slice.call(map)[Symbol.iterator]()
58
- };
59
- return map;
60
- }
61
-
62
- // src/SpeechServices/SpeechSDK.js
63
- var import_microsoft_cognitiveservices_speech = require("microsoft-cognitiveservices-speech-sdk/distrib/lib/microsoft.cognitiveservices.speech.sdk");
64
- var SpeechSDK_default = {
65
- AudioConfig: import_microsoft_cognitiveservices_speech.AudioConfig,
66
- OutputFormat: import_microsoft_cognitiveservices_speech.OutputFormat,
67
- ResultReason: import_microsoft_cognitiveservices_speech.ResultReason,
68
- SpeechConfig: import_microsoft_cognitiveservices_speech.SpeechConfig,
69
- SpeechRecognizer: import_microsoft_cognitiveservices_speech.SpeechRecognizer
70
- };
71
-
72
- // src/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResultList.js
73
- var {
74
- ResultReason: { RecognizingSpeech, RecognizedSpeech }
75
- } = SpeechSDK_default;
76
- function cognitiveServiceEventResultToWebSpeechRecognitionResultList_default(result, { maxAlternatives = Infinity, textNormalization = "display" } = {}) {
77
- if (result.reason === RecognizingSpeech || result.reason === RecognizedSpeech && !result.json.NBest) {
78
- const resultList = [
79
- {
80
- confidence: 0.5,
81
- transcript: result.text
82
- }
83
- ];
84
- if (result.reason === RecognizedSpeech) {
85
- resultList.isFinal = true;
86
- }
87
- return resultList;
88
- } else if (result.reason === RecognizedSpeech) {
89
- const resultList = arrayToMap_default(
90
- (result.json.NBest || []).slice(0, maxAlternatives).map(({ Confidence: confidence, Display: display, ITN: itn, Lexical: lexical, MaskedITN: maskedITN }) => ({
91
- confidence,
92
- transcript: textNormalization === "itn" ? itn : textNormalization === "lexical" ? lexical : textNormalization === "maskeditn" ? maskedITN : display
93
- })),
94
- { isFinal: true }
95
- );
96
- return resultList;
97
- }
98
- return [];
99
- }
100
-
101
41
  // ../../node_modules/p-defer/index.js
102
42
  function pDefer() {
103
43
  const deferred = {};
@@ -216,6 +156,121 @@ function patchOptions({
216
156
  };
217
157
  }
218
158
 
159
+ // src/SpeechServices/SpeechSDK.js
160
+ var import_microsoft_cognitiveservices_speech = require("microsoft-cognitiveservices-speech-sdk/distrib/lib/microsoft.cognitiveservices.speech.sdk");
161
+ var SpeechSDK_default = {
162
+ AudioConfig: import_microsoft_cognitiveservices_speech.AudioConfig,
163
+ OutputFormat: import_microsoft_cognitiveservices_speech.OutputFormat,
164
+ ResultReason: import_microsoft_cognitiveservices_speech.ResultReason,
165
+ SpeechConfig: import_microsoft_cognitiveservices_speech.SpeechConfig,
166
+ SpeechRecognizer: import_microsoft_cognitiveservices_speech.SpeechRecognizer
167
+ };
168
+
169
+ // src/SpeechServices/SpeechToText/SpeechRecognitionAlternative.ts
170
+ var SpeechRecognitionAlternative = class {
171
+ constructor({ confidence, transcript }) {
172
+ this.#confidence = confidence;
173
+ this.#transcript = transcript;
174
+ }
175
+ #confidence;
176
+ #transcript;
177
+ get confidence() {
178
+ return this.#confidence;
179
+ }
180
+ get transcript() {
181
+ return this.#transcript;
182
+ }
183
+ };
184
+
185
+ // src/SpeechServices/SpeechToText/FakeArray.ts
186
+ var FakeArray = class {
187
+ constructor(array) {
188
+ if (!array) {
189
+ throw new Error("array must be set.");
190
+ }
191
+ this.#array = array;
192
+ for (const key in array) {
193
+ Object.defineProperty(this, key, {
194
+ enumerable: true,
195
+ get() {
196
+ return array[key];
197
+ }
198
+ });
199
+ }
200
+ }
201
+ #array;
202
+ [Symbol.iterator]() {
203
+ return this.#array[Symbol.iterator]();
204
+ }
205
+ get length() {
206
+ return this.#array.length;
207
+ }
208
+ };
209
+
210
+ // src/SpeechServices/SpeechToText/SpeechRecognitionResult.ts
211
+ var SpeechRecognitionResult = class extends FakeArray {
212
+ constructor(init) {
213
+ super(init.results);
214
+ this.#isFinal = init.isFinal;
215
+ }
216
+ #isFinal;
217
+ get isFinal() {
218
+ return this.#isFinal;
219
+ }
220
+ };
221
+
222
+ // src/SpeechServices/SpeechToText/cognitiveServiceEventResultToWebSpeechRecognitionResult.ts
223
+ var {
224
+ ResultReason: { RecognizingSpeech, RecognizedSpeech }
225
+ } = SpeechSDK_default;
226
+ function cognitiveServiceEventResultToWebSpeechRecognitionResult_default(result, init) {
227
+ const { maxAlternatives = Infinity, textNormalization = "display" } = init || {};
228
+ const json = typeof result.json === "string" ? JSON.parse(result.json) : result.json;
229
+ if (result.reason === RecognizingSpeech || result.reason === RecognizedSpeech && !json.NBest) {
230
+ return new SpeechRecognitionResult({
231
+ isFinal: result.reason === RecognizedSpeech,
232
+ results: [
233
+ new SpeechRecognitionAlternative({
234
+ confidence: 0.5,
235
+ transcript: result.text
236
+ })
237
+ ]
238
+ });
239
+ } else if (result.reason === RecognizedSpeech) {
240
+ return new SpeechRecognitionResult({
241
+ isFinal: true,
242
+ results: (json.NBest || []).slice(0, maxAlternatives).map(
243
+ ({ Confidence: confidence, Display: display, ITN: itn, Lexical: lexical, MaskedITN: maskedITN }) => new SpeechRecognitionAlternative({
244
+ confidence,
245
+ transcript: textNormalization === "itn" ? itn : textNormalization === "lexical" ? lexical : textNormalization === "maskeditn" ? maskedITN : display
246
+ })
247
+ )
248
+ });
249
+ }
250
+ return new SpeechRecognitionResult({ isFinal: false, results: [] });
251
+ }
252
+
253
+ // src/SpeechServices/SpeechToText/EventListenerMap.ts
254
+ var EventListenerMap = class {
255
+ constructor(eventTarget) {
256
+ this.#eventTarget = eventTarget;
257
+ this.#propertyMap = {};
258
+ }
259
+ #eventTarget;
260
+ #propertyMap;
261
+ getProperty(name) {
262
+ return this.#propertyMap[name];
263
+ }
264
+ setProperty(name, value) {
265
+ const existing = this.#propertyMap[name];
266
+ existing && this.#eventTarget.removeEventListener(name, existing);
267
+ if (value) {
268
+ this.#eventTarget.addEventListener(name, value);
269
+ }
270
+ this.#propertyMap[name] = value;
271
+ }
272
+ };
273
+
219
274
  // src/SpeechServices/SpeechToText/SpeechGrammarList.js
220
275
  var SpeechGrammarList_default = class {
221
276
  constructor() {
@@ -238,6 +293,53 @@ var SpeechGrammarList_default = class {
238
293
  }
239
294
  };
240
295
 
296
+ // src/SpeechServices/SpeechToText/SpeechRecognitionErrorEvent.ts
297
+ var SpeechRecognitionErrorEvent = class extends Event {
298
+ constructor(type, { error, message }) {
299
+ super(type);
300
+ this.#error = error;
301
+ this.#message = message;
302
+ }
303
+ #error;
304
+ #message;
305
+ get error() {
306
+ return this.#error;
307
+ }
308
+ get message() {
309
+ return this.#message;
310
+ }
311
+ };
312
+
313
+ // src/SpeechServices/SpeechToText/SpeechRecognitionResultList.ts
314
+ var SpeechRecognitionResultList = class extends FakeArray {
315
+ constructor(result) {
316
+ super(result);
317
+ }
318
+ };
319
+
320
+ // src/SpeechServices/SpeechToText/SpeechRecognitionEvent.ts
321
+ var SpeechRecognitionEvent = class extends Event {
322
+ constructor(type, { data, resultIndex, results } = {}) {
323
+ super(type);
324
+ this.#data = data;
325
+ this.#resultIndex = resultIndex;
326
+ this.#results = results || new SpeechRecognitionResultList([]);
327
+ }
328
+ #data;
329
+ // TODO: "resultIndex" should be set.
330
+ #resultIndex;
331
+ #results;
332
+ get data() {
333
+ return this.#data;
334
+ }
335
+ get resultIndex() {
336
+ return this.#resultIndex;
337
+ }
338
+ get results() {
339
+ return this.#results;
340
+ }
341
+ };
342
+
241
343
  // src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js
242
344
  var { AudioConfig: AudioConfig2, OutputFormat: OutputFormat2, ResultReason: ResultReason2, SpeechConfig: SpeechConfig2, SpeechRecognizer: SpeechRecognizer2 } = SpeechSDK_default;
243
345
  function serializeRecognitionResult({ duration, errorDetails, json, offset, properties, reason, resultId, text }) {
@@ -259,16 +361,6 @@ function averageAmplitude(arrayBuffer) {
259
361
  function cognitiveServicesAsyncToPromise(fn) {
260
362
  return (...args) => new Promise((resolve, reject) => fn(...args, resolve, reject));
261
363
  }
262
- var SpeechRecognitionEvent = class extends import_event_target_shim.Event {
263
- constructor(type, { data, emma, interpretation, resultIndex, results } = {}) {
264
- super(type);
265
- this.data = data;
266
- this.emma = emma;
267
- this.interpretation = interpretation;
268
- this.resultIndex = resultIndex;
269
- this.results = results;
270
- }
271
- };
272
364
  function prepareAudioConfig(audioConfig) {
273
365
  const originalAttach = audioConfig.attach;
274
366
  const boundOriginalAttach = audioConfig.attach.bind(audioConfig);
@@ -309,7 +401,7 @@ function createSpeechRecognitionPonyfillFromRecognizer({
309
401
  textNormalization
310
402
  }) {
311
403
  SpeechRecognizer2.enableTelemetry(enableTelemetry !== false);
312
- class SpeechRecognition extends import_event_target_shim.EventTarget {
404
+ class SpeechRecognition extends EventTarget {
313
405
  constructor() {
314
406
  super();
315
407
  this._continuous = false;
@@ -317,7 +409,10 @@ function createSpeechRecognitionPonyfillFromRecognizer({
317
409
  this._lang = typeof window !== "undefined" ? window.document.documentElement.getAttribute("lang") || window.navigator.language : "en-US";
318
410
  this._grammars = new SpeechGrammarList_default();
319
411
  this._maxAlternatives = 1;
412
+ this.#eventListenerMap = new EventListenerMap(this);
320
413
  }
414
+ /** @type { import('./SpeechRecognitionEventListenerMap').SpeechRecognitionEventListenerMap } */
415
+ #eventListenerMap;
321
416
  emitCognitiveServices(type, event) {
322
417
  this.dispatchEvent(
323
418
  new SpeechRecognitionEvent("cognitiveservices", {
@@ -362,75 +457,88 @@ function createSpeechRecognitionPonyfillFromRecognizer({
362
457
  set lang(value) {
363
458
  this._lang = value;
364
459
  }
460
+ /** @type { ((event: SpeechRecognitionEvent<'audioend'>) => void) | undefined } */
365
461
  get onaudioend() {
366
- return (0, import_event_target_shim.getEventAttributeValue)(this, "audioend");
462
+ return this.#eventListenerMap.getProperty("audioend");
367
463
  }
368
464
  set onaudioend(value) {
369
- (0, import_event_target_shim.setEventAttributeValue)(this, "audioend", value);
465
+ this.#eventListenerMap.setProperty("audioend", value);
370
466
  }
467
+ /** @type { ((event: SpeechRecognitionEvent<'audiostart'>) => void) | undefined } */
371
468
  get onaudiostart() {
372
- return (0, import_event_target_shim.getEventAttributeValue)(this, "audiostart");
469
+ return this.#eventListenerMap.getProperty("audiostart");
373
470
  }
374
471
  set onaudiostart(value) {
375
- (0, import_event_target_shim.setEventAttributeValue)(this, "audiostart", value);
472
+ this.#eventListenerMap.setProperty("audiostart", value);
376
473
  }
474
+ /** @type { ((event: SpeechRecognitionEvent<'cognitiveservices'>) => void) | undefined } */
377
475
  get oncognitiveservices() {
378
- return (0, import_event_target_shim.getEventAttributeValue)(this, "cognitiveservices");
476
+ return this.#eventListenerMap.getProperty("cognitiveservices");
379
477
  }
380
478
  set oncognitiveservices(value) {
381
- (0, import_event_target_shim.setEventAttributeValue)(this, "cognitiveservices", value);
479
+ this.#eventListenerMap.setProperty("cognitiveservices", value);
382
480
  }
481
+ /** @type { ((event: SpeechRecognitionEvent<'end'>) => void) | undefined } */
383
482
  get onend() {
384
- return (0, import_event_target_shim.getEventAttributeValue)(this, "end");
483
+ return this.#eventListenerMap.getProperty("end");
385
484
  }
386
485
  set onend(value) {
387
- (0, import_event_target_shim.setEventAttributeValue)(this, "end", value);
486
+ this.#eventListenerMap.setProperty("end", value);
388
487
  }
488
+ /** @type { ((event: SpeechRecognitionEvent<'error'>) => void) | undefined } */
389
489
  get onerror() {
390
- return (0, import_event_target_shim.getEventAttributeValue)(this, "error");
490
+ return this.#eventListenerMap.getProperty("error");
391
491
  }
392
492
  set onerror(value) {
393
- (0, import_event_target_shim.setEventAttributeValue)(this, "error", value);
493
+ this.#eventListenerMap.setProperty("error", value);
394
494
  }
495
+ /** @type { ((event: SpeechRecognitionEvent<'result'>) => void) | undefined } */
395
496
  get onresult() {
396
- return (0, import_event_target_shim.getEventAttributeValue)(this, "result");
497
+ return this.#eventListenerMap.getProperty("result");
397
498
  }
398
499
  set onresult(value) {
399
- (0, import_event_target_shim.setEventAttributeValue)(this, "result", value);
500
+ this.#eventListenerMap.setProperty("result", value);
400
501
  }
502
+ /** @type { ((event: SpeechRecognitionEvent<'soundend'>) => void) | undefined } */
401
503
  get onsoundend() {
402
- return (0, import_event_target_shim.getEventAttributeValue)(this, "soundend");
504
+ return this.#eventListenerMap.getProperty("soundend");
403
505
  }
404
506
  set onsoundend(value) {
405
- (0, import_event_target_shim.setEventAttributeValue)(this, "soundend", value);
507
+ this.#eventListenerMap.setProperty("soundend", value);
406
508
  }
509
+ /** @type { ((event: SpeechRecognitionEvent<'soundstart'>) => void) | undefined } */
407
510
  get onsoundstart() {
408
- return (0, import_event_target_shim.getEventAttributeValue)(this, "soundstart");
511
+ return this.#eventListenerMap.getProperty("soundstart");
409
512
  }
410
513
  set onsoundstart(value) {
411
- (0, import_event_target_shim.setEventAttributeValue)(this, "soundstart", value);
514
+ this.#eventListenerMap.setProperty("soundstart", value);
412
515
  }
516
+ /** @type { ((event: SpeechRecognitionEvent<'speechend'>) => void) | undefined } */
413
517
  get onspeechend() {
414
- return (0, import_event_target_shim.getEventAttributeValue)(this, "speechend");
518
+ return this.#eventListenerMap.getProperty("speechend");
415
519
  }
416
520
  set onspeechend(value) {
417
- (0, import_event_target_shim.setEventAttributeValue)(this, "speechend", value);
521
+ this.#eventListenerMap.setProperty("speechend", value);
418
522
  }
523
+ /** @type { ((event: SpeechRecognitionEvent<'speechstart'>) => void) | undefined } */
419
524
  get onspeechstart() {
420
- return (0, import_event_target_shim.getEventAttributeValue)(this, "speechstart");
525
+ return this.#eventListenerMap.getProperty("speechstart");
421
526
  }
422
527
  set onspeechstart(value) {
423
- (0, import_event_target_shim.setEventAttributeValue)(this, "speechstart", value);
528
+ this.#eventListenerMap.setProperty("speechstart", value);
424
529
  }
530
+ /** @type { ((event: SpeechRecognitionEvent<'start'>) => void) | undefined } */
425
531
  get onstart() {
426
- return (0, import_event_target_shim.getEventAttributeValue)(this, "start");
532
+ return this.#eventListenerMap.getProperty("start");
427
533
  }
428
534
  set onstart(value) {
429
- (0, import_event_target_shim.setEventAttributeValue)(this, "start", value);
535
+ this.#eventListenerMap.setProperty("start", value);
430
536
  }
431
537
  start() {
432
538
  this._startOnce().catch((err) => {
433
- this.dispatchEvent(new ErrorEvent("error", { error: err, message: err && (err.stack || err.message) }));
539
+ this.dispatchEvent(
540
+ new SpeechRecognitionErrorEvent("error", { error: err, message: err && (err.stack || err.message) })
541
+ );
434
542
  });
435
543
  }
436
544
  async _startOnce() {
@@ -520,10 +628,7 @@ function createSpeechRecognitionPonyfillFromRecognizer({
520
628
  Object.keys(event).forEach((name) => this.emitCognitiveServices(name, event[name]));
521
629
  const errorMessage = canceled && canceled.errorDetails;
522
630
  if (/Permission\sdenied/u.test(errorMessage || "")) {
523
- finalEvent = {
524
- error: "not-allowed",
525
- type: "error"
526
- };
631
+ finalEvent = new SpeechRecognitionErrorEvent("error", { error: "not-allowed" });
527
632
  break;
528
633
  }
529
634
  if (!loop) {
@@ -535,23 +640,14 @@ function createSpeechRecognitionPonyfillFromRecognizer({
535
640
  this.dispatchEvent(new SpeechRecognitionEvent("audiostart"));
536
641
  this.dispatchEvent(new SpeechRecognitionEvent("audioend"));
537
642
  }
538
- finalEvent = {
539
- error: "network",
540
- type: "error"
541
- };
643
+ finalEvent = new SpeechRecognitionErrorEvent("error", { error: "network" });
542
644
  } else {
543
- finalEvent = {
544
- error: "unknown",
545
- type: "error"
546
- };
645
+ finalEvent = new SpeechRecognitionErrorEvent("error", { error: "unknown" });
547
646
  }
548
647
  break;
549
648
  } else if (abort || stop) {
550
649
  if (abort) {
551
- finalEvent = {
552
- error: "aborted",
553
- type: "error"
554
- };
650
+ finalEvent = new SpeechRecognitionErrorEvent("error", { error: "aborted" });
555
651
  stopping = "abort";
556
652
  } else {
557
653
  pause();
@@ -574,10 +670,11 @@ function createSpeechRecognitionPonyfillFromRecognizer({
574
670
  break;
575
671
  } else if (stopping !== "abort") {
576
672
  if (recognized && recognized.result && recognized.result.reason === ResultReason2.NoMatch) {
577
- finalEvent = {
578
- error: "no-speech",
579
- type: "error"
580
- };
673
+ if (!this.continuous || stopping === "stop") {
674
+ finalEvent = new SpeechRecognitionEvent("result", { results: finalizedResults });
675
+ recognizer.stopContinuousRecognitionAsync && await cognitiveServicesAsyncToPromise(recognizer.stopContinuousRecognitionAsync.bind(recognizer))();
676
+ break;
677
+ }
581
678
  } else if (recognized || recognizing) {
582
679
  if (!audioStarted) {
583
680
  this.dispatchEvent(new SpeechRecognitionEvent("audiostart"));
@@ -592,7 +689,7 @@ function createSpeechRecognitionPonyfillFromRecognizer({
592
689
  speechStarted = true;
593
690
  }
594
691
  if (recognized) {
595
- const result = cognitiveServiceEventResultToWebSpeechRecognitionResultList_default(recognized.result, {
692
+ const result = cognitiveServiceEventResultToWebSpeechRecognitionResult_default(recognized.result, {
596
693
  maxAlternatives: this.maxAlternatives,
597
694
  textNormalization
598
695
  });
@@ -601,35 +698,34 @@ function createSpeechRecognitionPonyfillFromRecognizer({
601
698
  finalizedResults = [...finalizedResults, result];
602
699
  this.continuous && this.dispatchEvent(
603
700
  new SpeechRecognitionEvent("result", {
604
- results: finalizedResults
701
+ results: new SpeechRecognitionResultList(finalizedResults)
605
702
  })
606
703
  );
607
704
  }
608
705
  if (this.continuous && recognizable) {
609
- finalEvent = null;
706
+ finalEvent = void 0;
610
707
  } else {
611
- finalEvent = {
612
- results: finalizedResults,
613
- type: "result"
614
- };
708
+ finalEvent = new SpeechRecognitionEvent("result", {
709
+ results: new SpeechRecognitionResultList(finalizedResults)
710
+ });
615
711
  }
616
- if (!this.continuous && recognizer.stopContinuousRecognitionAsync) {
712
+ if ((!this.continuous || stopping === "stop") && recognizer.stopContinuousRecognitionAsync) {
617
713
  await cognitiveServicesAsyncToPromise(recognizer.stopContinuousRecognitionAsync.bind(recognizer))();
618
714
  }
619
715
  if (looseEvents && finalEvent && recognizable) {
620
- this.dispatchEvent(new SpeechRecognitionEvent(finalEvent.type, finalEvent));
621
- finalEvent = null;
716
+ this.dispatchEvent(finalEvent);
717
+ finalEvent = void 0;
622
718
  }
623
719
  } else if (recognizing) {
624
720
  this.interimResults && this.dispatchEvent(
625
721
  new SpeechRecognitionEvent("result", {
626
- results: [
722
+ results: new SpeechRecognitionResultList([
627
723
  ...finalizedResults,
628
- cognitiveServiceEventResultToWebSpeechRecognitionResultList_default(recognizing.result, {
724
+ cognitiveServiceEventResultToWebSpeechRecognitionResult_default(recognizing.result, {
629
725
  maxAlternatives: this.maxAlternatives,
630
726
  textNormalization
631
727
  })
632
- ]
728
+ ])
633
729
  })
634
730
  );
635
731
  }
@@ -647,16 +743,9 @@ function createSpeechRecognitionPonyfillFromRecognizer({
647
743
  }
648
744
  if (finalEvent) {
649
745
  if (finalEvent.type === "result" && !finalEvent.results.length) {
650
- finalEvent = {
651
- error: "no-speech",
652
- type: "error"
653
- };
654
- }
655
- if (finalEvent.type === "error") {
656
- this.dispatchEvent(new ErrorEvent("error", finalEvent));
657
- } else {
658
- this.dispatchEvent(new SpeechRecognitionEvent(finalEvent.type, finalEvent));
746
+ finalEvent = new SpeechRecognitionErrorEvent("error", { error: "no-speech" });
659
747
  }
748
+ this.dispatchEvent(finalEvent);
660
749
  }
661
750
  this.dispatchEvent(new SpeechRecognitionEvent("end"));
662
751
  detachAudioConfigEvent();
@@ -728,8 +817,8 @@ var createSpeechRecognitionPonyfill_default = (options) => {
728
817
  var SpeechToText_default = createSpeechRecognitionPonyfill_default;
729
818
 
730
819
  // src/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js
731
- var import_event_target_shim4 = require("event-target-shim");
732
- var import_on_error_resume_next = __toESM(require("on-error-resume-next"));
820
+ var import_event_target_shim3 = require("event-target-shim");
821
+ var import_async = require("on-error-resume-next/async");
733
822
 
734
823
  // src/SpeechServices/TextToSpeech/AudioContextQueue.js
735
824
  var import_memoize_one = __toESM(require("memoize-one"));
@@ -803,91 +892,17 @@ var AudioContextQueue_default = class {
803
892
  }
804
893
  };
805
894
 
806
- // src/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js
807
- var SpeechSynthesisVoice_default = class {
808
- constructor({ gender, lang, voiceURI }) {
809
- this._default = false;
810
- this._gender = gender;
811
- this._lang = lang;
812
- this._localService = false;
813
- this._name = voiceURI;
814
- this._voiceURI = voiceURI;
815
- }
816
- get default() {
817
- return this._default;
818
- }
819
- get gender() {
820
- return this._gender;
821
- }
822
- get lang() {
823
- return this._lang;
824
- }
825
- get localService() {
826
- return this._localService;
827
- }
828
- get name() {
829
- return this._name;
830
- }
831
- get voiceURI() {
832
- return this._voiceURI;
833
- }
834
- };
835
-
836
- // src/SpeechServices/TextToSpeech/fetchCustomVoices.js
837
- async function fetchCustomVoices({ customVoiceHostname, deploymentId, region, subscriptionKey }) {
838
- const hostname = customVoiceHostname || `${region}.customvoice.api.speech.microsoft.com`;
839
- const res = await fetch(
840
- `https://${encodeURI(hostname)}/api/texttospeech/v2.0/endpoints/${encodeURIComponent(deploymentId)}`,
841
- {
842
- headers: {
843
- accept: "application/json",
844
- "ocp-apim-subscription-key": subscriptionKey
845
- }
846
- }
847
- );
848
- if (!res.ok) {
849
- throw new Error("Failed to fetch custom voices");
850
- }
851
- return res.json();
852
- }
853
- async function fetchCustomVoices_default({ customVoiceHostname, deploymentId, region, subscriptionKey }) {
854
- const { models } = await fetchCustomVoices({ customVoiceHostname, deploymentId, region, subscriptionKey });
855
- return models.map(
856
- ({ properties: { Gender: gender }, locale: lang, name: voiceURI }) => new SpeechSynthesisVoice_default({ gender, lang, voiceURI })
857
- ).sort(({ name: x }, { name: y }) => x > y ? 1 : x < y ? -1 : 0);
858
- }
859
-
860
- // src/SpeechServices/TextToSpeech/fetchVoices.js
861
- async function fetchVoices({ authorizationToken, region, speechSynthesisHostname, subscriptionKey }) {
862
- const hostname = speechSynthesisHostname || `${encodeURI(region)}.tts.speech.microsoft.com`;
863
- const res = await fetch(`https://${hostname}/cognitiveservices/voices/list`, {
864
- headers: {
865
- "content-type": "application/json",
866
- ...authorizationToken ? {
867
- authorization: `Bearer ${authorizationToken}`
868
- } : {
869
- "Ocp-Apim-Subscription-Key": subscriptionKey
870
- }
871
- }
872
- });
873
- if (!res.ok) {
874
- throw new Error("Failed to fetch voices");
875
- }
876
- const voices = await res.json();
877
- return voices.map(({ Gender: gender, Locale: lang, Name: voiceURI }) => new SpeechSynthesisVoice_default({ gender, lang, voiceURI })).sort(({ name: x }, { name: y }) => x > y ? 1 : x < y ? -1 : 0);
878
- }
879
-
880
895
  // src/SpeechServices/TextToSpeech/SpeechSynthesisEvent.js
881
- var import_event_target_shim2 = require("event-target-shim");
882
- var SpeechSynthesisEvent = class extends import_event_target_shim2.Event {
896
+ var import_event_target_shim = require("event-target-shim");
897
+ var SpeechSynthesisEvent = class extends import_event_target_shim.Event {
883
898
  constructor(type) {
884
899
  super(type);
885
900
  }
886
901
  };
887
902
 
888
903
  // src/SpeechServices/TextToSpeech/SpeechSynthesisUtterance.js
889
- var import_event_target_shim3 = require("event-target-shim");
890
- var import_event_as_promise = __toESM(require("event-as-promise"));
904
+ var import_event_as_promise = require("event-as-promise");
905
+ var import_event_target_shim2 = require("event-target-shim");
891
906
 
892
907
  // src/SpeechServices/TextToSpeech/fetchSpeechData.js
893
908
  var import_base64_arraybuffer = require("base64-arraybuffer");
@@ -982,8 +997,8 @@ function asyncDecodeAudioData(audioContext, arrayBuffer) {
982
997
  }
983
998
  function playDecoded(audioContext, audioBuffer, source) {
984
999
  return new Promise((resolve, reject) => {
985
- const audioContextClosed = new import_event_as_promise.default();
986
- const sourceEnded = new import_event_as_promise.default();
1000
+ const audioContextClosed = new import_event_as_promise.EventAsPromise();
1001
+ const sourceEnded = new import_event_as_promise.EventAsPromise();
987
1002
  const unsubscribe = subscribeEvent(
988
1003
  audioContext,
989
1004
  "statechange",
@@ -1002,7 +1017,7 @@ function playDecoded(audioContext, audioBuffer, source) {
1002
1017
  }
1003
1018
  });
1004
1019
  }
1005
- var SpeechSynthesisUtterance = class extends import_event_target_shim3.EventTarget {
1020
+ var SpeechSynthesisUtterance = class extends import_event_target_shim2.EventTarget {
1006
1021
  constructor(text) {
1007
1022
  super();
1008
1023
  this._lang = null;
@@ -1026,46 +1041,46 @@ var SpeechSynthesisUtterance = class extends import_event_target_shim3.EventTarg
1026
1041
  this._lang = value;
1027
1042
  }
1028
1043
  get onboundary() {
1029
- return (0, import_event_target_shim3.getEventAttributeValue)(this, "boundary");
1044
+ return (0, import_event_target_shim2.getEventAttributeValue)(this, "boundary");
1030
1045
  }
1031
1046
  set onboundary(value) {
1032
- (0, import_event_target_shim3.setEventAttributeValue)(this, "boundary", value);
1047
+ (0, import_event_target_shim2.setEventAttributeValue)(this, "boundary", value);
1033
1048
  }
1034
1049
  get onend() {
1035
- return (0, import_event_target_shim3.getEventAttributeValue)(this, "end");
1050
+ return (0, import_event_target_shim2.getEventAttributeValue)(this, "end");
1036
1051
  }
1037
1052
  set onend(value) {
1038
- (0, import_event_target_shim3.setEventAttributeValue)(this, "end", value);
1053
+ (0, import_event_target_shim2.setEventAttributeValue)(this, "end", value);
1039
1054
  }
1040
1055
  get onerror() {
1041
- return (0, import_event_target_shim3.getEventAttributeValue)(this, "error");
1056
+ return (0, import_event_target_shim2.getEventAttributeValue)(this, "error");
1042
1057
  }
1043
1058
  set onerror(value) {
1044
- (0, import_event_target_shim3.setEventAttributeValue)(this, "error", value);
1059
+ (0, import_event_target_shim2.setEventAttributeValue)(this, "error", value);
1045
1060
  }
1046
1061
  get onmark() {
1047
- return (0, import_event_target_shim3.getEventAttributeValue)(this, "mark");
1062
+ return (0, import_event_target_shim2.getEventAttributeValue)(this, "mark");
1048
1063
  }
1049
1064
  set onmark(value) {
1050
- (0, import_event_target_shim3.setEventAttributeValue)(this, "mark", value);
1065
+ (0, import_event_target_shim2.setEventAttributeValue)(this, "mark", value);
1051
1066
  }
1052
1067
  get onpause() {
1053
- return (0, import_event_target_shim3.getEventAttributeValue)(this, "pause");
1068
+ return (0, import_event_target_shim2.getEventAttributeValue)(this, "pause");
1054
1069
  }
1055
1070
  set onpause(value) {
1056
- (0, import_event_target_shim3.setEventAttributeValue)(this, "pause", value);
1071
+ (0, import_event_target_shim2.setEventAttributeValue)(this, "pause", value);
1057
1072
  }
1058
1073
  get onresume() {
1059
- return (0, import_event_target_shim3.getEventAttributeValue)(this, "resume");
1074
+ return (0, import_event_target_shim2.getEventAttributeValue)(this, "resume");
1060
1075
  }
1061
1076
  set onresume(value) {
1062
- (0, import_event_target_shim3.setEventAttributeValue)(this, "resume", value);
1077
+ (0, import_event_target_shim2.setEventAttributeValue)(this, "resume", value);
1063
1078
  }
1064
1079
  get onstart() {
1065
- return (0, import_event_target_shim3.getEventAttributeValue)(this, "start");
1080
+ return (0, import_event_target_shim2.getEventAttributeValue)(this, "start");
1066
1081
  }
1067
1082
  set onstart(value) {
1068
- (0, import_event_target_shim3.setEventAttributeValue)(this, "start", value);
1083
+ (0, import_event_target_shim2.setEventAttributeValue)(this, "start", value);
1069
1084
  }
1070
1085
  get pitch() {
1071
1086
  return this._pitch;
@@ -1124,6 +1139,80 @@ var SpeechSynthesisUtterance = class extends import_event_target_shim3.EventTarg
1124
1139
  };
1125
1140
  var SpeechSynthesisUtterance_default = SpeechSynthesisUtterance;
1126
1141
 
1142
+ // src/SpeechServices/TextToSpeech/SpeechSynthesisVoice.js
1143
+ var SpeechSynthesisVoice_default = class {
1144
+ constructor({ gender, lang, voiceURI }) {
1145
+ this._default = false;
1146
+ this._gender = gender;
1147
+ this._lang = lang;
1148
+ this._localService = false;
1149
+ this._name = voiceURI;
1150
+ this._voiceURI = voiceURI;
1151
+ }
1152
+ get default() {
1153
+ return this._default;
1154
+ }
1155
+ get gender() {
1156
+ return this._gender;
1157
+ }
1158
+ get lang() {
1159
+ return this._lang;
1160
+ }
1161
+ get localService() {
1162
+ return this._localService;
1163
+ }
1164
+ get name() {
1165
+ return this._name;
1166
+ }
1167
+ get voiceURI() {
1168
+ return this._voiceURI;
1169
+ }
1170
+ };
1171
+
1172
+ // src/SpeechServices/TextToSpeech/fetchCustomVoices.js
1173
+ async function fetchCustomVoices({ customVoiceHostname, deploymentId, region, subscriptionKey }) {
1174
+ const hostname = customVoiceHostname || `${region}.customvoice.api.speech.microsoft.com`;
1175
+ const res = await fetch(
1176
+ `https://${encodeURI(hostname)}/api/texttospeech/v2.0/endpoints/${encodeURIComponent(deploymentId)}`,
1177
+ {
1178
+ headers: {
1179
+ accept: "application/json",
1180
+ "ocp-apim-subscription-key": subscriptionKey
1181
+ }
1182
+ }
1183
+ );
1184
+ if (!res.ok) {
1185
+ throw new Error("Failed to fetch custom voices");
1186
+ }
1187
+ return res.json();
1188
+ }
1189
+ async function fetchCustomVoices_default({ customVoiceHostname, deploymentId, region, subscriptionKey }) {
1190
+ const { models } = await fetchCustomVoices({ customVoiceHostname, deploymentId, region, subscriptionKey });
1191
+ return models.map(
1192
+ ({ properties: { Gender: gender }, locale: lang, name: voiceURI }) => new SpeechSynthesisVoice_default({ gender, lang, voiceURI })
1193
+ ).sort(({ name: x }, { name: y }) => x > y ? 1 : x < y ? -1 : 0);
1194
+ }
1195
+
1196
+ // src/SpeechServices/TextToSpeech/fetchVoices.js
1197
+ async function fetchVoices({ authorizationToken, region, speechSynthesisHostname, subscriptionKey }) {
1198
+ const hostname = speechSynthesisHostname || `${encodeURI(region)}.tts.speech.microsoft.com`;
1199
+ const res = await fetch(`https://${hostname}/cognitiveservices/voices/list`, {
1200
+ headers: {
1201
+ "content-type": "application/json",
1202
+ ...authorizationToken ? {
1203
+ authorization: `Bearer ${authorizationToken}`
1204
+ } : {
1205
+ "Ocp-Apim-Subscription-Key": subscriptionKey
1206
+ }
1207
+ }
1208
+ });
1209
+ if (!res.ok) {
1210
+ throw new Error("Failed to fetch voices");
1211
+ }
1212
+ const voices = await res.json();
1213
+ return voices.map(({ Gender: gender, Locale: lang, Name: voiceURI }) => new SpeechSynthesisVoice_default({ gender, lang, voiceURI })).sort(({ name: x }, { name: y }) => x > y ? 1 : x < y ? -1 : 0);
1214
+ }
1215
+
1127
1216
  // src/SpeechServices/TextToSpeech/createSpeechSynthesisPonyfill.js
1128
1217
  var DEFAULT_OUTPUT_FORMAT2 = "audio-24khz-160kbitrate-mono-mp3";
1129
1218
  var EMPTY_ARRAY = [];
@@ -1143,7 +1232,7 @@ var createSpeechSynthesisPonyfill_default = (options) => {
1143
1232
  );
1144
1233
  return {};
1145
1234
  }
1146
- class SpeechSynthesis extends import_event_target_shim4.EventTarget {
1235
+ class SpeechSynthesis extends import_event_target_shim3.EventTarget {
1147
1236
  constructor() {
1148
1237
  super();
1149
1238
  this.queue = new AudioContextQueue_default({ audioContext, ponyfill });
@@ -1156,10 +1245,10 @@ var createSpeechSynthesisPonyfill_default = (options) => {
1156
1245
  return EMPTY_ARRAY;
1157
1246
  }
1158
1247
  get onvoiceschanged() {
1159
- return (0, import_event_target_shim4.getEventAttributeValue)(this, "voiceschanged");
1248
+ return (0, import_event_target_shim3.getEventAttributeValue)(this, "voiceschanged");
1160
1249
  }
1161
1250
  set onvoiceschanged(value) {
1162
- (0, import_event_target_shim4.setEventAttributeValue)(this, "voiceschanged", value);
1251
+ (0, import_event_target_shim3.setEventAttributeValue)(this, "voiceschanged", value);
1163
1252
  }
1164
1253
  pause() {
1165
1254
  this.queue.pause();
@@ -1200,7 +1289,7 @@ var createSpeechSynthesisPonyfill_default = (options) => {
1200
1289
  console.warn(
1201
1290
  "web-speech-cognitive-services: Listing of custom voice models are only available when using subscription key."
1202
1291
  );
1203
- await (0, import_on_error_resume_next.default)(async () => {
1292
+ await (0, import_async.onErrorResumeNext)(async () => {
1204
1293
  const voices = await fetchCustomVoices_default({
1205
1294
  customVoiceHostname,
1206
1295
  deploymentId: speechSynthesisDeploymentId,
@@ -1212,7 +1301,7 @@ var createSpeechSynthesisPonyfill_default = (options) => {
1212
1301
  });
1213
1302
  }
1214
1303
  } else {
1215
- await (0, import_on_error_resume_next.default)(async () => {
1304
+ await (0, import_async.onErrorResumeNext)(async () => {
1216
1305
  const voices = await fetchVoices(await fetchCredentials());
1217
1306
  this.getVoices = () => voices;
1218
1307
  });
@@ -1254,7 +1343,7 @@ function createSpeechServicesPonyfill(options = {}, ...args) {
1254
1343
  }
1255
1344
  var meta = document.createElement("meta");
1256
1345
  meta.setAttribute("name", "web-speech-cognitive-services");
1257
- meta.setAttribute("content", `version=${"8.0.0-main.478b2e9"}`);
1346
+ meta.setAttribute("content", `version=${"8.0.0-main.5903868"}`);
1258
1347
  document.head.appendChild(meta);
1259
1348
  // Annotate the CommonJS export names for ESM import in node:
1260
1349
  0 && (module.exports = {