web-speech-cognitive-services 8.0.0-main.ccf35da → 8.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -23440,17 +23440,21 @@
23440
23440
  return isFunction(fnOrValue) ? fnOrValue() : fnOrValue;
23441
23441
  }
23442
23442
 
23443
- // src/SpeechServices/patchOptions.js
23443
+ // src/SpeechServices/patchOptions.ts
23444
23444
  var shouldWarnOnSubscriptionKey = true;
23445
- function patchOptions({
23446
- authorizationToken,
23447
- credentials,
23448
- looseEvent,
23449
- looseEvents,
23450
- region = "westus",
23451
- subscriptionKey,
23452
- ...otherOptions
23453
- } = {}) {
23445
+ function patchOptions(init) {
23446
+ const {
23447
+ audioConfig,
23448
+ authorizationToken,
23449
+ enableTelemetry,
23450
+ looseEvent,
23451
+ referenceGrammars,
23452
+ region = "westus",
23453
+ speechRecognitionEndpointId,
23454
+ subscriptionKey,
23455
+ textNormalization
23456
+ } = init;
23457
+ let { credentials, looseEvents } = init;
23454
23458
  if (typeof looseEvent !== "undefined") {
23455
23459
  console.warn('web-speech-cognitive-services: The option "looseEvent" should be named as "looseEvents".');
23456
23460
  looseEvents = looseEvent;
@@ -23462,11 +23466,12 @@
23462
23466
  console.warn(
23463
23467
  "web-speech-cognitive-services: We are deprecating authorizationToken, region, and subscriptionKey. Please use credentials instead. The deprecated option will be removed on or after 2020-11-14."
23464
23468
  );
23465
- credentials = async () => authorizationToken ? { authorizationToken: await resolveFunctionOrReturnValue(authorizationToken), region } : { region, subscriptionKey: await resolveFunctionOrReturnValue(subscriptionKey) };
23469
+ credentials = async () => typeof init.authorizationToken !== "undefined" ? { authorizationToken: await resolveFunctionOrReturnValue(init.authorizationToken), region } : { region, subscriptionKey: await resolveFunctionOrReturnValue(init.subscriptionKey) };
23466
23470
  }
23467
23471
  }
23468
- return {
23469
- ...otherOptions,
23472
+ return Object.freeze({
23473
+ audioConfig,
23474
+ enableTelemetry,
23470
23475
  fetchCredentials: async () => {
23471
23476
  const {
23472
23477
  authorizationToken: authorizationToken2,
@@ -23501,21 +23506,23 @@
23501
23506
  );
23502
23507
  shouldWarnOnSubscriptionKey = false;
23503
23508
  }
23504
- const resolvedCredentials = authorizationToken2 ? { authorizationToken: authorizationToken2 } : { subscriptionKey: subscriptionKey2 };
23505
- if (region2) {
23506
- resolvedCredentials.region = region2;
23507
- } else {
23508
- resolvedCredentials.customVoiceHostname = customVoiceHostname;
23509
- resolvedCredentials.speechRecognitionHostname = speechRecognitionHostname;
23510
- resolvedCredentials.speechSynthesisHostname = speechSynthesisHostname;
23511
- }
23512
- return resolvedCredentials;
23509
+ return {
23510
+ ...typeof authorizationToken2 !== "undefined" ? { authorizationToken: authorizationToken2 } : { subscriptionKey: subscriptionKey2 },
23511
+ ...typeof region2 !== "undefined" ? { region: region2 } : {
23512
+ customVoiceHostname,
23513
+ speechRecognitionHostname,
23514
+ speechSynthesisHostname
23515
+ }
23516
+ };
23513
23517
  },
23514
- looseEvents
23515
- };
23518
+ looseEvents: !!looseEvents,
23519
+ referenceGrammars: referenceGrammars && Object.freeze([...referenceGrammars]),
23520
+ speechRecognitionEndpointId,
23521
+ textNormalization
23522
+ });
23516
23523
  }
23517
23524
 
23518
- // src/SpeechServices/SpeechSDK.js
23525
+ // src/SpeechServices/SpeechSDK.ts
23519
23526
  var import_microsoft_cognitiveservices_speech = __toESM(require_microsoft_cognitiveservices_speech_sdk());
23520
23527
  var SpeechSDK_default = {
23521
23528
  AudioConfig: import_microsoft_cognitiveservices_speech.AudioConfig,
@@ -23652,82 +23659,6 @@
23652
23659
  );
23653
23660
  }
23654
23661
 
23655
- // src/SpeechServices/SpeechToText/SpeechGrammarList.ts
23656
- var SpeechGrammarList = class {
23657
- constructor() {
23658
- this.#phrases = [];
23659
- }
23660
- addFromString() {
23661
- throw new Error("JSGF is not supported");
23662
- }
23663
- #phrases;
23664
- get phrases() {
23665
- return this.#phrases;
23666
- }
23667
- set phrases(value) {
23668
- if (Array.isArray(value)) {
23669
- this.#phrases = Object.freeze([...value]);
23670
- } else if (typeof value === "string") {
23671
- this.#phrases = Object.freeze([value]);
23672
- } else {
23673
- throw new Error(`The provided value is not an array or of type 'string'`);
23674
- }
23675
- }
23676
- };
23677
-
23678
- // src/SpeechServices/SpeechToText/SpeechRecognitionErrorEvent.ts
23679
- var SpeechRecognitionErrorEvent = class extends Event {
23680
- constructor(type, { error, message }) {
23681
- super(type);
23682
- this.#error = error;
23683
- this.#message = message;
23684
- }
23685
- #error;
23686
- #message;
23687
- get error() {
23688
- return this.#error;
23689
- }
23690
- get message() {
23691
- return this.#message;
23692
- }
23693
- get type() {
23694
- return "error";
23695
- }
23696
- };
23697
-
23698
- // src/SpeechServices/SpeechToText/SpeechRecognitionResultList.ts
23699
- var SpeechRecognitionResultList = class extends FakeArray {
23700
- constructor(result) {
23701
- super(result);
23702
- }
23703
- };
23704
-
23705
- // src/SpeechServices/SpeechToText/SpeechRecognitionEvent.ts
23706
- var SpeechRecognitionEvent = class extends Event {
23707
- constructor(type, { data, resultIndex, results } = {}) {
23708
- super(type);
23709
- this.#data = data;
23710
- this.#resultIndex = resultIndex;
23711
- this.#results = results || new SpeechRecognitionResultList([]);
23712
- }
23713
- #data;
23714
- // TODO: "resultIndex" should be set.
23715
- #resultIndex;
23716
- #results;
23717
- get data() {
23718
- return this.#data;
23719
- }
23720
- get resultIndex() {
23721
- return this.#resultIndex;
23722
- }
23723
- get results() {
23724
- return this.#results;
23725
- }
23726
- get type() {
23727
- return super.type;
23728
- }
23729
- };
23730
-
23731
23662
  // src/SpeechServices/SpeechToText/private/EventListenerMap.ts
23732
23663
  var EventListenerMap = class {
23733
23664
  constructor(eventTarget) {
@@ -23816,6 +23747,91 @@
23816
23747
  });
23817
23748
  }
23818
23749
 
23750
+ // src/SpeechServices/SpeechToText/SpeechGrammarList.ts
23751
+ var SpeechGrammarList = class {
23752
+ constructor() {
23753
+ this.#phrases = [];
23754
+ }
23755
+ addFromString() {
23756
+ throw new Error("JSGF is not supported");
23757
+ }
23758
+ addFromURI() {
23759
+ throw new Error("JSGF is not supported");
23760
+ }
23761
+ item() {
23762
+ throw new Error("JSGF is not supported");
23763
+ }
23764
+ get length() {
23765
+ throw new Error("JSGF is not supported");
23766
+ }
23767
+ #phrases;
23768
+ get phrases() {
23769
+ return this.#phrases;
23770
+ }
23771
+ set phrases(value) {
23772
+ if (Array.isArray(value)) {
23773
+ this.#phrases = Object.freeze([...value]);
23774
+ } else if (typeof value === "string") {
23775
+ this.#phrases = Object.freeze([value]);
23776
+ } else {
23777
+ throw new Error(`The provided value is not an array or of type 'string'`);
23778
+ }
23779
+ }
23780
+ };
23781
+
23782
+ // src/SpeechServices/SpeechToText/SpeechRecognitionErrorEvent.ts
23783
+ var SpeechRecognitionErrorEvent = class extends Event {
23784
+ constructor(type, { error, message }) {
23785
+ super(type);
23786
+ this.#error = error;
23787
+ this.#message = message;
23788
+ }
23789
+ #error;
23790
+ #message;
23791
+ get error() {
23792
+ return this.#error;
23793
+ }
23794
+ get message() {
23795
+ return this.#message;
23796
+ }
23797
+ get type() {
23798
+ return "error";
23799
+ }
23800
+ };
23801
+
23802
+ // src/SpeechServices/SpeechToText/SpeechRecognitionResultList.ts
23803
+ var SpeechRecognitionResultList = class extends FakeArray {
23804
+ constructor(result) {
23805
+ super(result);
23806
+ }
23807
+ };
23808
+
23809
+ // src/SpeechServices/SpeechToText/SpeechRecognitionEvent.ts
23810
+ var SpeechRecognitionEvent = class extends Event {
23811
+ constructor(type, { data, resultIndex, results } = {}) {
23812
+ super(type);
23813
+ this.#data = data;
23814
+ this.#resultIndex = resultIndex;
23815
+ this.#results = results || new SpeechRecognitionResultList([]);
23816
+ }
23817
+ #data;
23818
+ // TODO: "resultIndex" should be set.
23819
+ #resultIndex;
23820
+ #results;
23821
+ get data() {
23822
+ return this.#data;
23823
+ }
23824
+ get resultIndex() {
23825
+ return this.#resultIndex;
23826
+ }
23827
+ get results() {
23828
+ return this.#results;
23829
+ }
23830
+ get type() {
23831
+ return super.type;
23832
+ }
23833
+ };
23834
+
23819
23835
  // src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfillFromRecognizer.ts
23820
23836
  var { ResultReason: ResultReason2, SpeechRecognizer: SpeechRecognizer2 } = SpeechSDK_default;
23821
23837
  function createSpeechRecognitionPonyfillFromRecognizer({
@@ -24025,7 +24041,7 @@
24025
24041
  };
24026
24042
  const { phrases } = this.grammars;
24027
24043
  const { dynamicGrammar } = recognizer["privReco"];
24028
- referenceGrammars && referenceGrammars.length && dynamicGrammar.addReferenceGrammar(referenceGrammars);
24044
+ referenceGrammars && referenceGrammars.length && dynamicGrammar.addReferenceGrammar([...referenceGrammars]);
24029
24045
  phrases && phrases.length && dynamicGrammar.addPhrase([...phrases]);
24030
24046
  await cognitiveServicesAsyncToPromise(recognizer.startContinuousRecognitionAsync, recognizer)();
24031
24047
  if (typeof recognizer.stopContinuousRecognitionAsync === "function") {
@@ -24194,7 +24210,7 @@
24194
24210
  };
24195
24211
  }
24196
24212
 
24197
- // src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.js
24213
+ // src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.ts
24198
24214
  var { AudioConfig: AudioConfig2, OutputFormat: OutputFormat2, SpeechConfig: SpeechConfig2, SpeechRecognizer: SpeechRecognizer3 } = SpeechSDK_default;
24199
24215
  function createSpeechRecognitionPonyfill(options) {
24200
24216
  const {
@@ -24209,24 +24225,24 @@
24209
24225
  textNormalization = "display"
24210
24226
  } = patchOptions(options);
24211
24227
  if (!audioConfig && (!window.navigator.mediaDevices || !window.navigator.mediaDevices.getUserMedia)) {
24212
- console.warn(
24213
- "web-speech-cognitive-services: This browser does not support WebRTC and it will not work with Cognitive Services Speech Services."
24228
+ throw new Error(
24229
+ "web-speech-cognitive-services: This browser does not support Media Capture and Streams API and it will not work with Cognitive Services Speech Services."
24214
24230
  );
24215
- return {};
24216
24231
  }
24217
24232
  const createRecognizer = async (lang) => {
24218
- const { authorizationToken, region, speechRecognitionHostname, subscriptionKey } = await fetchCredentials();
24233
+ const credentials = await fetchCredentials();
24219
24234
  let speechConfig;
24220
- if (speechRecognitionHostname) {
24221
- const host = { hostname: speechRecognitionHostname, port: 443, protocol: "wss:" };
24222
- if (authorizationToken) {
24235
+ if (typeof credentials.speechRecognitionHostname !== "undefined") {
24236
+ const host = new URL("wss://hostname:443");
24237
+ host.hostname = credentials.speechRecognitionHostname;
24238
+ if (credentials.authorizationToken) {
24223
24239
  speechConfig = SpeechConfig2.fromHost(host);
24224
- speechConfig.authorizationToken = authorizationToken;
24240
+ speechConfig.authorizationToken = credentials.authorizationToken;
24225
24241
  } else {
24226
- speechConfig = SpeechConfig2.fromHost(host, subscriptionKey);
24242
+ speechConfig = SpeechConfig2.fromHost(host, credentials.subscriptionKey);
24227
24243
  }
24228
24244
  } else {
24229
- speechConfig = authorizationToken ? SpeechConfig2.fromAuthorizationToken(authorizationToken, region) : SpeechConfig2.fromSubscription(subscriptionKey, region);
24245
+ speechConfig = typeof credentials.authorizationToken !== "undefined" ? SpeechConfig2.fromAuthorizationToken(credentials.authorizationToken, credentials.region) : SpeechConfig2.fromSubscription(credentials.subscriptionKey, credentials.region);
24230
24246
  }
24231
24247
  if (speechRecognitionEndpointId) {
24232
24248
  speechConfig.endpointId = speechRecognitionEndpointId;
@@ -24236,7 +24252,6 @@
24236
24252
  return new SpeechRecognizer3(speechConfig, audioConfig);
24237
24253
  };
24238
24254
  return createSpeechRecognitionPonyfillFromRecognizer({
24239
- audioConfig,
24240
24255
  createRecognizer,
24241
24256
  enableTelemetry,
24242
24257
  looseEvents,
@@ -24245,9 +24260,6 @@
24245
24260
  });
24246
24261
  }
24247
24262
 
24248
- // src/SpeechServices/SpeechToText.js
24249
- var SpeechToText_default = createSpeechRecognitionPonyfill;
24250
-
24251
24263
  // ../../node_modules/event-target-shim/index.mjs
24252
24264
  function assertType(condition, message, ...args) {
24253
24265
  if (!condition) {
@@ -25713,16 +25725,16 @@
25713
25725
  // src/SpeechServices/TextToSpeech.js
25714
25726
  var TextToSpeech_default = createSpeechSynthesisPonyfill_default;
25715
25727
 
25716
- // src/SpeechServices.js
25717
- function createSpeechServicesPonyfill(options = {}, ...args) {
25728
+ // src/SpeechServices.ts
25729
+ function createSpeechServicesPonyfill(options = {}) {
25718
25730
  return {
25719
- ...SpeechToText_default(options, ...args),
25720
- ...TextToSpeech_default(options, ...args)
25731
+ ...createSpeechRecognitionPonyfill(options),
25732
+ ...TextToSpeech_default(options)
25721
25733
  };
25722
25734
  }
25723
25735
  var meta = document.createElement("meta");
25724
25736
  meta.setAttribute("name", "web-speech-cognitive-services");
25725
- meta.setAttribute("content", `version=${"8.0.0-main.ccf35da"}`);
25737
+ meta.setAttribute("content", `version=${"8.0.0"}`);
25726
25738
  document.head.appendChild(meta);
25727
25739
 
25728
25740
  // src/index.umd.js