web-speech-cognitive-services 8.1.3-main.6ed2e3d → 8.1.3-main.b33949a

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -735,7 +735,7 @@
735
735
  Object.defineProperty(exports, "__esModule", { value: true });
736
736
  exports.createNoDashGuid = exports.createGuid = void 0;
737
737
  var uuid_1 = require_commonjs_browser();
738
- var createGuid = () => uuid_1.v4();
738
+ var createGuid = () => (0, uuid_1.v4)();
739
739
  exports.createGuid = createGuid;
740
740
  var createNoDashGuid = () => createGuid().replace(new RegExp("-", "g"), "").toUpperCase();
741
741
  exports.createNoDashGuid = createNoDashGuid;
@@ -760,7 +760,7 @@
760
760
  var PlatformEvent = class {
761
761
  constructor(eventName, eventType) {
762
762
  this.privName = eventName;
763
- this.privEventId = Guid_js_1.createNoDashGuid();
763
+ this.privEventId = (0, Guid_js_1.createNoDashGuid)();
764
764
  this.privEventTime = (/* @__PURE__ */ new Date()).toISOString();
765
765
  this.privEventType = eventType;
766
766
  this.privMetadata = {};
@@ -876,7 +876,7 @@
876
876
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/ConnectionEvents.js"(exports) {
877
877
  "use strict";
878
878
  Object.defineProperty(exports, "__esModule", { value: true });
879
- exports.ConnectionMessageSentEvent = exports.ConnectionMessageReceivedEvent = exports.ConnectionEstablishErrorEvent = exports.ConnectionErrorEvent = exports.ConnectionClosedEvent = exports.ConnectionEstablishedEvent = exports.ConnectionStartEvent = exports.ConnectionEvent = exports.ServiceEvent = void 0;
879
+ exports.ConnectionRedirectEvent = exports.ConnectionMessageSentEvent = exports.ConnectionMessageReceivedEvent = exports.ConnectionEstablishErrorEvent = exports.ConnectionErrorEvent = exports.ConnectionClosedEvent = exports.ConnectionEstablishedEvent = exports.ConnectionStartEvent = exports.ConnectionEvent = exports.ServiceEvent = void 0;
880
880
  var PlatformEvent_js_1 = require_PlatformEvent();
881
881
  var ServiceEvent = class extends PlatformEvent_js_1.PlatformEvent {
882
882
  constructor(eventName, jsonstring, eventType = PlatformEvent_js_1.EventType.Info) {
@@ -988,6 +988,24 @@
988
988
  }
989
989
  };
990
990
  exports.ConnectionMessageSentEvent = ConnectionMessageSentEvent;
991
+ var ConnectionRedirectEvent = class extends ConnectionEvent {
992
+ constructor(connectionId, redirectUrl, originalUrl, context) {
993
+ super("ConnectionRedirectEvent", connectionId, PlatformEvent_js_1.EventType.Info);
994
+ this.privRedirectUrl = redirectUrl;
995
+ this.privOriginalUrl = originalUrl;
996
+ this.privContext = context;
997
+ }
998
+ get redirectUrl() {
999
+ return this.privRedirectUrl;
1000
+ }
1001
+ get originalUrl() {
1002
+ return this.privOriginalUrl;
1003
+ }
1004
+ get context() {
1005
+ return this.privContext;
1006
+ }
1007
+ };
1008
+ exports.ConnectionRedirectEvent = ConnectionRedirectEvent;
991
1009
  }
992
1010
  });
993
1011
 
@@ -1071,7 +1089,7 @@
1071
1089
  this.privMessageType = messageType;
1072
1090
  this.privBody = body;
1073
1091
  this.privHeaders = headers ? headers : {};
1074
- this.privId = id ? id : Guid_js_1.createNoDashGuid();
1092
+ this.privId = id ? id : (0, Guid_js_1.createNoDashGuid)();
1075
1093
  switch (this.messageType) {
1076
1094
  case MessageType.Binary:
1077
1095
  this.privSize = this.binaryBody !== null ? this.binaryBody.byteLength : 0;
@@ -1224,7 +1242,7 @@
1224
1242
  }
1225
1243
  }
1226
1244
  attach(onEventCallback) {
1227
- const id = Guid_js_1.createNoDashGuid();
1245
+ const id = (0, Guid_js_1.createNoDashGuid)();
1228
1246
  this.privEventListeners[id] = onEventCallback;
1229
1247
  return {
1230
1248
  detach: () => {
@@ -1937,7 +1955,7 @@
1937
1955
  }
1938
1956
  this.privMessageType = messageType;
1939
1957
  this.privPayload = payload;
1940
- this.privId = id ? id : Guid_js_1.createNoDashGuid();
1958
+ this.privId = id ? id : (0, Guid_js_1.createNoDashGuid)();
1941
1959
  }
1942
1960
  get messageType() {
1943
1961
  return this.privMessageType;
@@ -2040,7 +2058,7 @@
2040
2058
  constructor(streamId) {
2041
2059
  this.privIsWriteEnded = false;
2042
2060
  this.privIsReadEnded = false;
2043
- this.privId = streamId ? streamId : Guid_js_1.createNoDashGuid();
2061
+ this.privId = streamId ? streamId : (0, Guid_js_1.createNoDashGuid)();
2044
2062
  this.privReaderQueue = new Queue_js_1.Queue();
2045
2063
  }
2046
2064
  get isClosed() {
@@ -2122,7 +2140,7 @@
2122
2140
  var ChunkedArrayBufferStream = class extends Exports_js_1.Stream {
2123
2141
  constructor(targetChunkSize, streamId) {
2124
2142
  super(streamId);
2125
- this.privTargetChunkSize = targetChunkSize;
2143
+ this.privTargetChunkSize = Math.round(targetChunkSize);
2126
2144
  this.privNextBufferReadyBytes = 0;
2127
2145
  }
2128
2146
  writeStreamChunk(chunk) {
@@ -2419,22 +2437,19 @@
2419
2437
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Exports.js"(exports) {
2420
2438
  "use strict";
2421
2439
  var __createBinding = exports && exports.__createBinding || (Object.create ? function(o, m, k, k2) {
2422
- if (k2 === void 0)
2423
- k2 = k;
2440
+ if (k2 === void 0) k2 = k;
2424
2441
  Object.defineProperty(o, k2, { enumerable: true, get: function() {
2425
2442
  return m[k];
2426
2443
  } });
2427
2444
  } : function(o, m, k, k2) {
2428
- if (k2 === void 0)
2429
- k2 = k;
2445
+ if (k2 === void 0) k2 = k;
2430
2446
  o[k2] = m[k];
2431
2447
  });
2432
2448
  var __exportStar = exports && exports.__exportStar || function(m, exports2) {
2433
- for (var p in m)
2434
- if (p !== "default" && !exports2.hasOwnProperty(p))
2435
- __createBinding(exports2, m, p);
2449
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports2, p)) __createBinding(exports2, m, p);
2436
2450
  };
2437
2451
  Object.defineProperty(exports, "__esModule", { value: true });
2452
+ exports.TranslationStatus = void 0;
2438
2453
  __exportStar(require_AudioSourceEvents(), exports);
2439
2454
  __exportStar(require_ConnectionEvents(), exports);
2440
2455
  __exportStar(require_ConnectionMessage(), exports);
@@ -2690,6 +2705,12 @@
2690
2705
  throw new Error("throwIfNotUndefined:" + name);
2691
2706
  }
2692
2707
  }
2708
+ static throwIfNumberOutOfRange(value, name, rangeStart, rangeEnd) {
2709
+ _Contracts.throwIfNullOrUndefined(value, name);
2710
+ if (value < rangeStart || value > rangeEnd) {
2711
+ throw new Error("throwIfNumberOutOfRange:" + name + " (must be between " + rangeStart.toString() + " and " + rangeEnd.toString() + ")");
2712
+ }
2713
+ }
2693
2714
  };
2694
2715
  exports.Contracts = Contracts;
2695
2716
  }
@@ -2700,14 +2721,12 @@
2700
2721
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.browser/ConsoleLoggingListener.js"(exports) {
2701
2722
  "use strict";
2702
2723
  var __createBinding = exports && exports.__createBinding || (Object.create ? function(o, m, k, k2) {
2703
- if (k2 === void 0)
2704
- k2 = k;
2724
+ if (k2 === void 0) k2 = k;
2705
2725
  Object.defineProperty(o, k2, { enumerable: true, get: function() {
2706
2726
  return m[k];
2707
2727
  } });
2708
2728
  } : function(o, m, k, k2) {
2709
- if (k2 === void 0)
2710
- k2 = k;
2729
+ if (k2 === void 0) k2 = k;
2711
2730
  o[k2] = m[k];
2712
2731
  });
2713
2732
  var __setModuleDefault = exports && exports.__setModuleDefault || (Object.create ? function(o, v) {
@@ -2716,13 +2735,10 @@
2716
2735
  o["default"] = v;
2717
2736
  });
2718
2737
  var __importStar = exports && exports.__importStar || function(mod) {
2719
- if (mod && mod.__esModule)
2720
- return mod;
2738
+ if (mod && mod.__esModule) return mod;
2721
2739
  var result = {};
2722
2740
  if (mod != null) {
2723
- for (var k in mod)
2724
- if (k !== "default" && Object.hasOwnProperty.call(mod, k))
2725
- __createBinding(result, mod, k);
2741
+ for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
2726
2742
  }
2727
2743
  __setModuleDefault(result, mod);
2728
2744
  return result;
@@ -2828,6 +2844,8 @@
2828
2844
  AudioFormatTag2[AudioFormatTag2["ALaw"] = 8] = "ALaw";
2829
2845
  AudioFormatTag2[AudioFormatTag2["FLAC"] = 9] = "FLAC";
2830
2846
  AudioFormatTag2[AudioFormatTag2["OPUS"] = 10] = "OPUS";
2847
+ AudioFormatTag2[AudioFormatTag2["AMR_WB"] = 11] = "AMR_WB";
2848
+ AudioFormatTag2[AudioFormatTag2["G722"] = 12] = "G722";
2831
2849
  })(AudioFormatTag = exports.AudioFormatTag || (exports.AudioFormatTag = {}));
2832
2850
  var AudioStreamFormat = class {
2833
2851
  /**
@@ -2988,7 +3006,7 @@
2988
3006
  this.deviceId = deviceId;
2989
3007
  this.privStreams = {};
2990
3008
  this.privOutputChunkSize = _MicAudioSource.AUDIOFORMAT.avgBytesPerSec / 10;
2991
- this.privId = audioSourceId ? audioSourceId : Exports_js_2.createNoDashGuid();
3009
+ this.privId = audioSourceId ? audioSourceId : (0, Exports_js_2.createNoDashGuid)();
2992
3010
  this.privEvents = new Exports_js_2.EventSource();
2993
3011
  this.privMediaStream = mediaStream || null;
2994
3012
  this.privIsClosing = false;
@@ -3205,7 +3223,7 @@
3205
3223
  constructor(file, filename, audioSourceId) {
3206
3224
  this.privStreams = {};
3207
3225
  this.privHeaderEnd = 44;
3208
- this.privId = audioSourceId ? audioSourceId : Exports_js_2.createNoDashGuid();
3226
+ this.privId = audioSourceId ? audioSourceId : (0, Exports_js_2.createNoDashGuid)();
3209
3227
  this.privEvents = new Exports_js_2.EventSource();
3210
3228
  this.privSource = file;
3211
3229
  if (typeof window !== "undefined" && typeof Blob !== "undefined" && this.privSource instanceof Blob) {
@@ -3522,9 +3540,9 @@
3522
3540
  }
3523
3541
  });
3524
3542
 
3525
- // (disabled):../../node_modules/microsoft-cognitiveservices-speech-sdk/node_modules/ws/browser.js
3543
+ // (disabled):../../node_modules/ws/browser.js
3526
3544
  var require_browser = __commonJS({
3527
- "(disabled):../../node_modules/microsoft-cognitiveservices-speech-sdk/node_modules/ws/browser.js"() {
3545
+ "(disabled):../../node_modules/ws/browser.js"() {
3528
3546
  }
3529
3547
  });
3530
3548
 
@@ -3533,14 +3551,12 @@
3533
3551
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.browser/WebsocketMessageAdapter.js"(exports) {
3534
3552
  "use strict";
3535
3553
  var __createBinding = exports && exports.__createBinding || (Object.create ? function(o, m, k, k2) {
3536
- if (k2 === void 0)
3537
- k2 = k;
3554
+ if (k2 === void 0) k2 = k;
3538
3555
  Object.defineProperty(o, k2, { enumerable: true, get: function() {
3539
3556
  return m[k];
3540
3557
  } });
3541
3558
  } : function(o, m, k, k2) {
3542
- if (k2 === void 0)
3543
- k2 = k;
3559
+ if (k2 === void 0) k2 = k;
3544
3560
  o[k2] = m[k];
3545
3561
  });
3546
3562
  var __setModuleDefault = exports && exports.__setModuleDefault || (Object.create ? function(o, v) {
@@ -3549,13 +3565,10 @@
3549
3565
  o["default"] = v;
3550
3566
  });
3551
3567
  var __importStar = exports && exports.__importStar || function(mod) {
3552
- if (mod && mod.__esModule)
3553
- return mod;
3568
+ if (mod && mod.__esModule) return mod;
3554
3569
  var result = {};
3555
3570
  if (mod != null) {
3556
- for (var k in mod)
3557
- if (k !== "default" && Object.hasOwnProperty.call(mod, k))
3558
- __createBinding(result, mod, k);
3571
+ for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
3559
3572
  }
3560
3573
  __setModuleDefault(result, mod);
3561
3574
  return result;
@@ -3589,6 +3602,7 @@
3589
3602
  this.privHeaders = headers;
3590
3603
  this.privEnableCompression = enableCompression;
3591
3604
  this.privHeaders[HeaderNames_js_1.HeaderNames.ConnectionId] = this.privConnectionId;
3605
+ this.privHeaders.connectionId = this.privConnectionId;
3592
3606
  this.privLastErrorReceived = "";
3593
3607
  }
3594
3608
  get state() {
@@ -3609,9 +3623,6 @@
3609
3623
  this.privCertificateValidatedDeferral.resolve();
3610
3624
  this.privWebsocketClient = new WebSocket(this.privUri);
3611
3625
  } else {
3612
- const options = { headers: this.privHeaders, perMessageDeflate: this.privEnableCompression };
3613
- this.privCertificateValidatedDeferral.resolve();
3614
- options.agent = this.getAgent();
3615
3626
  const uri = new URL(this.privUri);
3616
3627
  let protocol = uri.protocol;
3617
3628
  if (protocol?.toLocaleLowerCase() === "wss:") {
@@ -3619,8 +3630,15 @@
3619
3630
  } else if (protocol?.toLocaleLowerCase() === "ws:") {
3620
3631
  protocol = "http:";
3621
3632
  }
3633
+ const options = { headers: this.privHeaders, perMessageDeflate: this.privEnableCompression, followRedirects: protocol.toLocaleLowerCase() === "https:" };
3634
+ this.privCertificateValidatedDeferral.resolve();
3635
+ options.agent = this.getAgent();
3622
3636
  options.agent.protocol = protocol;
3623
3637
  this.privWebsocketClient = new ws_1.default(this.privUri, options);
3638
+ this.privWebsocketClient.on("redirect", (redirectUrl) => {
3639
+ const event = new Exports_js_1.ConnectionRedirectEvent(this.privConnectionId, redirectUrl, this.privUri, `Getting redirect URL from endpoint ${this.privUri} with redirect URL '${redirectUrl}'`);
3640
+ Exports_js_1.Events.instance.onEvent(event);
3641
+ });
3624
3642
  }
3625
3643
  this.privWebsocketClient.binaryType = "arraybuffer";
3626
3644
  this.privReceivingMessageQueue = new Exports_js_1.Queue();
@@ -3783,7 +3801,7 @@
3783
3801
  };
3784
3802
  if (!!proxyInfo.UserName) {
3785
3803
  httpProxyOptions.headers = {
3786
- "Proxy-Authentication": "Basic " + new Buffer(`${proxyInfo.UserName}:${proxyInfo.Password === void 0 ? "" : proxyInfo.Password}`).toString("base64")
3804
+ "Proxy-Authentication": "Basic " + Buffer.from(`${proxyInfo.UserName}:${proxyInfo.Password === void 0 ? "" : proxyInfo.Password}`).toString("base64")
3787
3805
  };
3788
3806
  } else {
3789
3807
  httpProxyOptions.headers = {};
@@ -3877,7 +3895,7 @@
3877
3895
  }
3878
3896
  }
3879
3897
  this.privUri = uri + queryParams;
3880
- this.privId = connectionId ? connectionId : Exports_js_1.createNoDashGuid();
3898
+ this.privId = connectionId ? connectionId : (0, Exports_js_1.createNoDashGuid)();
3881
3899
  this.privConnectionMessageAdapter = new WebsocketMessageAdapter_js_1.WebsocketMessageAdapter(this.privUri, this.id, this.privMessageFormatter, proxyInfo, headers, enableCompression);
3882
3900
  }
3883
3901
  async dispose() {
@@ -3966,7 +3984,7 @@
3966
3984
  }
3967
3985
  }
3968
3986
  return this.privAudioNode.read().then((result) => {
3969
- if (result && result.buffer) {
3987
+ if (result && result.buffer && this.privBuffers) {
3970
3988
  this.privBuffers.push(new BufferEntry(result, this.privBufferSerial++, this.privBufferedBytes));
3971
3989
  this.privBufferedBytes += result.buffer.byteLength;
3972
3990
  }
@@ -4031,14 +4049,12 @@
4031
4049
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/sdk/Audio/AudioFileWriter.js"(exports) {
4032
4050
  "use strict";
4033
4051
  var __createBinding = exports && exports.__createBinding || (Object.create ? function(o, m, k, k2) {
4034
- if (k2 === void 0)
4035
- k2 = k;
4052
+ if (k2 === void 0) k2 = k;
4036
4053
  Object.defineProperty(o, k2, { enumerable: true, get: function() {
4037
4054
  return m[k];
4038
4055
  } });
4039
4056
  } : function(o, m, k, k2) {
4040
- if (k2 === void 0)
4041
- k2 = k;
4057
+ if (k2 === void 0) k2 = k;
4042
4058
  o[k2] = m[k];
4043
4059
  });
4044
4060
  var __setModuleDefault = exports && exports.__setModuleDefault || (Object.create ? function(o, v) {
@@ -4047,13 +4063,10 @@
4047
4063
  o["default"] = v;
4048
4064
  });
4049
4065
  var __importStar = exports && exports.__importStar || function(mod) {
4050
- if (mod && mod.__esModule)
4051
- return mod;
4066
+ if (mod && mod.__esModule) return mod;
4052
4067
  var result = {};
4053
4068
  if (mod != null) {
4054
- for (var k in mod)
4055
- if (k !== "default" && Object.hasOwnProperty.call(mod, k))
4056
- __createBinding(result, mod, k);
4069
+ for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
4057
4070
  }
4058
4071
  __setModuleDefault(result, mod);
4059
4072
  return result;
@@ -4182,7 +4195,7 @@
4182
4195
  this.privFormat = format2;
4183
4196
  }
4184
4197
  this.privEvents = new Exports_js_2.EventSource();
4185
- this.privId = Guid_js_1.createNoDashGuid();
4198
+ this.privId = (0, Guid_js_1.createNoDashGuid)();
4186
4199
  this.privStream = new Exports_js_2.ChunkedArrayBufferStream(this.privFormat.avgBytesPerSec / 10);
4187
4200
  }
4188
4201
  /**
@@ -4313,7 +4326,7 @@
4313
4326
  this.privFormat = format2;
4314
4327
  }
4315
4328
  this.privEvents = new Exports_js_2.EventSource();
4316
- this.privId = Guid_js_1.createNoDashGuid();
4329
+ this.privId = (0, Guid_js_1.createNoDashGuid)();
4317
4330
  this.privCallback = callback;
4318
4331
  this.privIsClosed = false;
4319
4332
  this.privBufferSize = this.privFormat.avgBytesPerSec / 10;
@@ -4452,6 +4465,8 @@
4452
4465
  SpeechSynthesisOutputFormat2[SpeechSynthesisOutputFormat2["Riff22050Hz16BitMonoPcm"] = 34] = "Riff22050Hz16BitMonoPcm";
4453
4466
  SpeechSynthesisOutputFormat2[SpeechSynthesisOutputFormat2["Raw44100Hz16BitMonoPcm"] = 35] = "Raw44100Hz16BitMonoPcm";
4454
4467
  SpeechSynthesisOutputFormat2[SpeechSynthesisOutputFormat2["Riff44100Hz16BitMonoPcm"] = 36] = "Riff44100Hz16BitMonoPcm";
4468
+ SpeechSynthesisOutputFormat2[SpeechSynthesisOutputFormat2["AmrWb16000Hz"] = 37] = "AmrWb16000Hz";
4469
+ SpeechSynthesisOutputFormat2[SpeechSynthesisOutputFormat2["G72216Khz64Kbps"] = 38] = "G72216Khz64Kbps";
4455
4470
  })(SpeechSynthesisOutputFormat = exports.SpeechSynthesisOutputFormat || (exports.SpeechSynthesisOutputFormat = {}));
4456
4471
  }
4457
4472
  });
@@ -4571,6 +4586,10 @@
4571
4586
  return new _AudioOutputFormatImpl(AudioStreamFormat_js_1.AudioFormatTag.PCM, 1, 44100, 88200, 2, 16, speechSynthesisOutputFormatString, speechSynthesisOutputFormatString, false);
4572
4587
  case "riff-44100hz-16bit-mono-pcm":
4573
4588
  return new _AudioOutputFormatImpl(AudioStreamFormat_js_1.AudioFormatTag.PCM, 1, 44100, 88200, 2, 16, speechSynthesisOutputFormatString, "raw-44100hz-16bit-mono-pcm", true);
4589
+ case "amr-wb-16000h":
4590
+ return new _AudioOutputFormatImpl(AudioStreamFormat_js_1.AudioFormatTag.AMR_WB, 1, 16e3, 3052, 2, 16, speechSynthesisOutputFormatString, speechSynthesisOutputFormatString, false);
4591
+ case "g722-16khz-64kbps":
4592
+ return new _AudioOutputFormatImpl(AudioStreamFormat_js_1.AudioFormatTag.G722, 1, 16e3, 8e3, 2, 16, speechSynthesisOutputFormatString, speechSynthesisOutputFormatString, false);
4574
4593
  case "riff-16khz-16bit-mono-pcm":
4575
4594
  default:
4576
4595
  return new _AudioOutputFormatImpl(AudioStreamFormat_js_1.AudioFormatTag.PCM, 1, 16e3, 32e3, 2, 16, "riff-16khz-16bit-mono-pcm", "raw-16khz-16bit-mono-pcm", true);
@@ -4677,7 +4696,9 @@
4677
4696
  [SpeechSynthesisOutputFormat_js_1.SpeechSynthesisOutputFormat.Raw22050Hz16BitMonoPcm]: "raw-22050hz-16bit-mono-pcm",
4678
4697
  [SpeechSynthesisOutputFormat_js_1.SpeechSynthesisOutputFormat.Riff22050Hz16BitMonoPcm]: "riff-22050hz-16bit-mono-pcm",
4679
4698
  [SpeechSynthesisOutputFormat_js_1.SpeechSynthesisOutputFormat.Raw44100Hz16BitMonoPcm]: "raw-44100hz-16bit-mono-pcm",
4680
- [SpeechSynthesisOutputFormat_js_1.SpeechSynthesisOutputFormat.Riff44100Hz16BitMonoPcm]: "riff-44100hz-16bit-mono-pcm"
4699
+ [SpeechSynthesisOutputFormat_js_1.SpeechSynthesisOutputFormat.Riff44100Hz16BitMonoPcm]: "riff-44100hz-16bit-mono-pcm",
4700
+ [SpeechSynthesisOutputFormat_js_1.SpeechSynthesisOutputFormat.AmrWb16000Hz]: "amr-wb-16000hz",
4701
+ [SpeechSynthesisOutputFormat_js_1.SpeechSynthesisOutputFormat.G72216Khz64Kbps]: "g722-16khz-64kbps"
4681
4702
  };
4682
4703
  }
4683
4704
  });
@@ -4731,7 +4752,7 @@
4731
4752
  */
4732
4753
  constructor() {
4733
4754
  super();
4734
- this.privId = Exports_js_1.createNoDashGuid();
4755
+ this.privId = (0, Exports_js_1.createNoDashGuid)();
4735
4756
  this.privStream = new Exports_js_1.Stream();
4736
4757
  }
4737
4758
  /**
@@ -4866,7 +4887,7 @@
4866
4887
  */
4867
4888
  constructor(callback) {
4868
4889
  super();
4869
- this.privId = Exports_js_1.createNoDashGuid();
4890
+ this.privId = (0, Exports_js_1.createNoDashGuid)();
4870
4891
  this.privCallback = callback;
4871
4892
  }
4872
4893
  // eslint-disable-next-line @typescript-eslint/no-empty-function
@@ -6043,31 +6064,30 @@
6043
6064
  return speechImpl;
6044
6065
  }
6045
6066
  /**
6046
- * Creates an instance of the speech config with specified endpoint and subscription key.
6047
- * This method is intended only for users who use a non-standard service endpoint or parameters.
6048
- * Note: Please use your LanguageUnderstanding subscription key in case you want to use the Intent recognizer.
6049
- * Note: The query parameters specified in the endpoint URL are not changed, even if they are set by any other APIs.
6050
- * For example, if language is defined in the uri as query parameter "language=de-DE", and also set by
6051
- * SpeechConfig.speechRecognitionLanguage = "en-US", the language setting in uri takes precedence,
6052
- * and the effective language is "de-DE". Only the parameters that are not specified in the
6053
- * endpoint URL can be set by other APIs.
6054
- * Note: To use authorization token with fromEndpoint, pass an empty string to the subscriptionKey in the
6055
- * fromEndpoint method, and then set authorizationToken="token" on the created SpeechConfig instance to
6056
- * use the authorization token.
6057
- * @member SpeechConfig.fromEndpoint
6058
- * @function
6059
- * @public
6060
- * @param {URL} endpoint - The service endpoint to connect to.
6061
- * @param {string} subscriptionKey - The subscription key. If a subscription key is not specified, an authorization token must be set.
6062
- * @returns {SpeechConfig} A speech factory instance.
6067
+ * Internal implementation of fromEndpoint() overloads. Accepts either a subscription key or a TokenCredential.
6068
+ * @private
6063
6069
  */
6064
- static fromEndpoint(endpoint, subscriptionKey) {
6070
+ static fromEndpoint(endpoint, auth) {
6065
6071
  Contracts_js_1.Contracts.throwIfNull(endpoint, "endpoint");
6066
- const speechImpl = new SpeechConfigImpl();
6067
- speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Endpoint, endpoint.href);
6068
- if (void 0 !== subscriptionKey) {
6069
- speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key, subscriptionKey);
6072
+ const isValidString = typeof auth === "string" && auth.trim().length > 0;
6073
+ const isTokenCredential = typeof auth === "object" && auth !== null && typeof auth.getToken === "function";
6074
+ const isKeyCredential = typeof auth === "object" && auth !== null && typeof auth.key === "string";
6075
+ if (auth !== void 0 && !isValidString && !isTokenCredential && !isKeyCredential) {
6076
+ throw new Error("Invalid 'auth' parameter: expected a non-empty API key string, a TokenCredential, or a KeyCredential.");
6077
+ }
6078
+ let speechImpl;
6079
+ if (typeof auth === "string") {
6080
+ speechImpl = new SpeechConfigImpl();
6081
+ speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key, auth);
6082
+ } else if (typeof auth === "object" && typeof auth.getToken === "function") {
6083
+ speechImpl = new SpeechConfigImpl(auth);
6084
+ } else if (typeof auth === "object" && typeof auth.key === "string") {
6085
+ speechImpl = new SpeechConfigImpl();
6086
+ speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key, auth.key);
6087
+ } else {
6088
+ speechImpl = new SpeechConfigImpl();
6070
6089
  }
6090
+ speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Endpoint, endpoint.href);
6071
6091
  return speechImpl;
6072
6092
  }
6073
6093
  /**
@@ -6089,6 +6109,7 @@
6089
6109
  Contracts_js_1.Contracts.throwIfNull(hostName, "hostName");
6090
6110
  const speechImpl = new SpeechConfigImpl();
6091
6111
  speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Host, hostName.protocol + "//" + hostName.hostname + (hostName.port === "" ? "" : ":" + hostName.port));
6112
+ speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_RecognitionEndpointVersion, "1");
6092
6113
  if (void 0 !== subscriptionKey) {
6093
6114
  speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key, subscriptionKey);
6094
6115
  }
@@ -6131,11 +6152,12 @@
6131
6152
  };
6132
6153
  exports.SpeechConfig = SpeechConfig3;
6133
6154
  var SpeechConfigImpl = class _SpeechConfigImpl extends SpeechConfig3 {
6134
- constructor() {
6155
+ constructor(tokenCredential) {
6135
6156
  super();
6136
6157
  this.privProperties = new Exports_js_2.PropertyCollection();
6137
6158
  this.speechRecognitionLanguage = "en-US";
6138
6159
  this.outputFormat = Exports_js_2.OutputFormat.Simple;
6160
+ this.privTokenCredential = tokenCredential;
6139
6161
  }
6140
6162
  get properties() {
6141
6163
  return this.privProperties;
@@ -6179,6 +6201,9 @@
6179
6201
  set endpointId(value) {
6180
6202
  this.privProperties.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_EndpointId, value);
6181
6203
  }
6204
+ get tokenCredential() {
6205
+ return this.privTokenCredential;
6206
+ }
6182
6207
  setProperty(name, value) {
6183
6208
  Contracts_js_1.Contracts.throwIfNull(value, "value");
6184
6209
  this.privProperties.setProperty(name, value);
@@ -6205,12 +6230,13 @@
6205
6230
  }
6206
6231
  requestWordLevelTimestamps() {
6207
6232
  this.privProperties.setProperty(Exports_js_2.PropertyId.SpeechServiceResponse_RequestWordLevelTimestamps, "true");
6233
+ this.privProperties.setProperty(Exports_js_1.OutputFormatPropertyName, Exports_js_2.OutputFormat[Exports_js_2.OutputFormat.Detailed]);
6208
6234
  }
6209
6235
  enableDictation() {
6210
6236
  this.privProperties.setProperty(Exports_js_1.ForceDictationPropertyName, "true");
6211
6237
  }
6212
6238
  clone() {
6213
- const ret = new _SpeechConfigImpl();
6239
+ const ret = new _SpeechConfigImpl(this.tokenCredential);
6214
6240
  ret.privProperties = this.privProperties.clone();
6215
6241
  return ret;
6216
6242
  }
@@ -6320,38 +6346,40 @@
6320
6346
  return speechImpl;
6321
6347
  }
6322
6348
  /**
6323
- * Creates an instance of the speech translation config with specified endpoint and subscription key.
6324
- * This method is intended only for users who use a non-standard service endpoint or paramters.
6325
- * Note: The query properties specified in the endpoint URL are not changed, even if they are
6326
- * set by any other APIs. For example, if language is defined in the uri as query parameter
6327
- * "language=de-DE", and also set by the speechRecognitionLanguage property, the language
6328
- * setting in uri takes precedence, and the effective language is "de-DE".
6329
- * Only the properties that are not specified in the endpoint URL can be set by other APIs.
6330
- * Note: To use authorization token with fromEndpoint, pass an empty string to the subscriptionKey in the
6331
- * fromEndpoint method, and then set authorizationToken="token" on the created SpeechConfig instance to
6332
- * use the authorization token.
6333
- * @member SpeechTranslationConfig.fromEndpoint
6334
- * @function
6335
- * @public
6336
- * @param {URL} endpoint - The service endpoint to connect to.
6337
- * @param {string} subscriptionKey - The subscription key.
6338
- * @returns {SpeechTranslationConfig} A speech config instance.
6349
+ * Internal implementation of fromEndpoint() overloads. Accepts either a subscription key or a TokenCredential.
6350
+ * @private
6339
6351
  */
6340
- static fromEndpoint(endpoint, subscriptionKey) {
6352
+ static fromEndpoint(endpoint, auth) {
6341
6353
  Contracts_js_1.Contracts.throwIfNull(endpoint, "endpoint");
6342
- Contracts_js_1.Contracts.throwIfNull(subscriptionKey, "subscriptionKey");
6343
- const ret = new SpeechTranslationConfigImpl();
6344
- ret.properties.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Endpoint, endpoint.href);
6345
- ret.properties.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key, subscriptionKey);
6346
- return ret;
6354
+ const isValidString = typeof auth === "string" && auth.trim().length > 0;
6355
+ const isTokenCredential = typeof auth === "object" && auth !== null && typeof auth.getToken === "function";
6356
+ const isKeyCredential = typeof auth === "object" && auth !== null && typeof auth.key === "string";
6357
+ if (auth !== void 0 && !isValidString && !isTokenCredential && !isKeyCredential) {
6358
+ throw new Error("Invalid 'auth' parameter: expected a non-empty API key string, a TokenCredential, or a KeyCredential.");
6359
+ }
6360
+ let speechImpl;
6361
+ if (typeof auth === "string") {
6362
+ speechImpl = new SpeechTranslationConfigImpl();
6363
+ speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key, auth);
6364
+ } else if (typeof auth === "object" && typeof auth.getToken === "function") {
6365
+ speechImpl = new SpeechTranslationConfigImpl(auth);
6366
+ } else if (typeof auth === "object" && typeof auth.key === "string") {
6367
+ speechImpl = new SpeechTranslationConfigImpl();
6368
+ speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key, auth.key);
6369
+ } else {
6370
+ speechImpl = new SpeechTranslationConfigImpl();
6371
+ }
6372
+ speechImpl.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Endpoint, endpoint.href);
6373
+ return speechImpl;
6347
6374
  }
6348
6375
  };
6349
6376
  exports.SpeechTranslationConfig = SpeechTranslationConfig;
6350
6377
  var SpeechTranslationConfigImpl = class extends SpeechTranslationConfig {
6351
- constructor() {
6378
+ constructor(tokenCredential) {
6352
6379
  super();
6353
6380
  this.privSpeechProperties = new Exports_js_2.PropertyCollection();
6354
6381
  this.outputFormat = Exports_js_2.OutputFormat.Simple;
6382
+ this.privTokenCredential = tokenCredential;
6355
6383
  }
6356
6384
  /**
6357
6385
  * Gets/Sets the authorization token.
@@ -6490,6 +6518,9 @@
6490
6518
  get region() {
6491
6519
  return this.privSpeechProperties.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Region);
6492
6520
  }
6521
+ get tokenCredential() {
6522
+ return this.privTokenCredential;
6523
+ }
6493
6524
  setProxy(proxyHostName, proxyPort, proxyUserName, proxyPassword) {
6494
6525
  this.setProperty(Exports_js_2.PropertyId[Exports_js_2.PropertyId.SpeechServiceConnection_ProxyHostName], proxyHostName);
6495
6526
  this.setProperty(Exports_js_2.PropertyId[Exports_js_2.PropertyId.SpeechServiceConnection_ProxyPort], proxyPort);
@@ -6705,66 +6736,70 @@
6705
6736
  PropertyId3[PropertyId3["SpeechServiceConnection_TranslationToLanguages"] = 6] = "SpeechServiceConnection_TranslationToLanguages";
6706
6737
  PropertyId3[PropertyId3["SpeechServiceConnection_TranslationVoice"] = 7] = "SpeechServiceConnection_TranslationVoice";
6707
6738
  PropertyId3[PropertyId3["SpeechServiceConnection_TranslationFeatures"] = 8] = "SpeechServiceConnection_TranslationFeatures";
6708
- PropertyId3[PropertyId3["SpeechServiceConnection_IntentRegion"] = 9] = "SpeechServiceConnection_IntentRegion";
6709
- PropertyId3[PropertyId3["SpeechServiceConnection_ProxyHostName"] = 10] = "SpeechServiceConnection_ProxyHostName";
6710
- PropertyId3[PropertyId3["SpeechServiceConnection_ProxyPort"] = 11] = "SpeechServiceConnection_ProxyPort";
6711
- PropertyId3[PropertyId3["SpeechServiceConnection_ProxyUserName"] = 12] = "SpeechServiceConnection_ProxyUserName";
6712
- PropertyId3[PropertyId3["SpeechServiceConnection_ProxyPassword"] = 13] = "SpeechServiceConnection_ProxyPassword";
6713
- PropertyId3[PropertyId3["SpeechServiceConnection_RecoMode"] = 14] = "SpeechServiceConnection_RecoMode";
6714
- PropertyId3[PropertyId3["SpeechServiceConnection_RecoLanguage"] = 15] = "SpeechServiceConnection_RecoLanguage";
6715
- PropertyId3[PropertyId3["Speech_SessionId"] = 16] = "Speech_SessionId";
6716
- PropertyId3[PropertyId3["SpeechServiceConnection_SynthLanguage"] = 17] = "SpeechServiceConnection_SynthLanguage";
6717
- PropertyId3[PropertyId3["SpeechServiceConnection_SynthVoice"] = 18] = "SpeechServiceConnection_SynthVoice";
6718
- PropertyId3[PropertyId3["SpeechServiceConnection_SynthOutputFormat"] = 19] = "SpeechServiceConnection_SynthOutputFormat";
6719
- PropertyId3[PropertyId3["SpeechServiceConnection_AutoDetectSourceLanguages"] = 20] = "SpeechServiceConnection_AutoDetectSourceLanguages";
6720
- PropertyId3[PropertyId3["SpeechServiceResponse_RequestDetailedResultTrueFalse"] = 21] = "SpeechServiceResponse_RequestDetailedResultTrueFalse";
6721
- PropertyId3[PropertyId3["SpeechServiceResponse_RequestProfanityFilterTrueFalse"] = 22] = "SpeechServiceResponse_RequestProfanityFilterTrueFalse";
6722
- PropertyId3[PropertyId3["SpeechServiceResponse_JsonResult"] = 23] = "SpeechServiceResponse_JsonResult";
6723
- PropertyId3[PropertyId3["SpeechServiceResponse_JsonErrorDetails"] = 24] = "SpeechServiceResponse_JsonErrorDetails";
6724
- PropertyId3[PropertyId3["CancellationDetails_Reason"] = 25] = "CancellationDetails_Reason";
6725
- PropertyId3[PropertyId3["CancellationDetails_ReasonText"] = 26] = "CancellationDetails_ReasonText";
6726
- PropertyId3[PropertyId3["CancellationDetails_ReasonDetailedText"] = 27] = "CancellationDetails_ReasonDetailedText";
6727
- PropertyId3[PropertyId3["LanguageUnderstandingServiceResponse_JsonResult"] = 28] = "LanguageUnderstandingServiceResponse_JsonResult";
6728
- PropertyId3[PropertyId3["SpeechServiceConnection_Url"] = 29] = "SpeechServiceConnection_Url";
6729
- PropertyId3[PropertyId3["SpeechServiceConnection_InitialSilenceTimeoutMs"] = 30] = "SpeechServiceConnection_InitialSilenceTimeoutMs";
6730
- PropertyId3[PropertyId3["SpeechServiceConnection_EndSilenceTimeoutMs"] = 31] = "SpeechServiceConnection_EndSilenceTimeoutMs";
6731
- PropertyId3[PropertyId3["Speech_SegmentationSilenceTimeoutMs"] = 32] = "Speech_SegmentationSilenceTimeoutMs";
6732
- PropertyId3[PropertyId3["SpeechServiceConnection_EnableAudioLogging"] = 33] = "SpeechServiceConnection_EnableAudioLogging";
6733
- PropertyId3[PropertyId3["SpeechServiceConnection_LanguageIdMode"] = 34] = "SpeechServiceConnection_LanguageIdMode";
6734
- PropertyId3[PropertyId3["SpeechServiceConnection_RecognitionEndpointVersion"] = 35] = "SpeechServiceConnection_RecognitionEndpointVersion";
6735
- PropertyId3[PropertyId3["SpeechServiceConnection_SpeakerIdMode"] = 36] = "SpeechServiceConnection_SpeakerIdMode";
6736
- PropertyId3[PropertyId3["SpeechServiceResponse_ProfanityOption"] = 37] = "SpeechServiceResponse_ProfanityOption";
6737
- PropertyId3[PropertyId3["SpeechServiceResponse_PostProcessingOption"] = 38] = "SpeechServiceResponse_PostProcessingOption";
6738
- PropertyId3[PropertyId3["SpeechServiceResponse_RequestWordLevelTimestamps"] = 39] = "SpeechServiceResponse_RequestWordLevelTimestamps";
6739
- PropertyId3[PropertyId3["SpeechServiceResponse_StablePartialResultThreshold"] = 40] = "SpeechServiceResponse_StablePartialResultThreshold";
6740
- PropertyId3[PropertyId3["SpeechServiceResponse_OutputFormatOption"] = 41] = "SpeechServiceResponse_OutputFormatOption";
6741
- PropertyId3[PropertyId3["SpeechServiceResponse_TranslationRequestStablePartialResult"] = 42] = "SpeechServiceResponse_TranslationRequestStablePartialResult";
6742
- PropertyId3[PropertyId3["SpeechServiceResponse_RequestWordBoundary"] = 43] = "SpeechServiceResponse_RequestWordBoundary";
6743
- PropertyId3[PropertyId3["SpeechServiceResponse_RequestPunctuationBoundary"] = 44] = "SpeechServiceResponse_RequestPunctuationBoundary";
6744
- PropertyId3[PropertyId3["SpeechServiceResponse_RequestSentenceBoundary"] = 45] = "SpeechServiceResponse_RequestSentenceBoundary";
6745
- PropertyId3[PropertyId3["Conversation_ApplicationId"] = 46] = "Conversation_ApplicationId";
6746
- PropertyId3[PropertyId3["Conversation_DialogType"] = 47] = "Conversation_DialogType";
6747
- PropertyId3[PropertyId3["Conversation_Initial_Silence_Timeout"] = 48] = "Conversation_Initial_Silence_Timeout";
6748
- PropertyId3[PropertyId3["Conversation_From_Id"] = 49] = "Conversation_From_Id";
6749
- PropertyId3[PropertyId3["Conversation_Conversation_Id"] = 50] = "Conversation_Conversation_Id";
6750
- PropertyId3[PropertyId3["Conversation_Custom_Voice_Deployment_Ids"] = 51] = "Conversation_Custom_Voice_Deployment_Ids";
6751
- PropertyId3[PropertyId3["Conversation_Speech_Activity_Template"] = 52] = "Conversation_Speech_Activity_Template";
6752
- PropertyId3[PropertyId3["Conversation_Request_Bot_Status_Messages"] = 53] = "Conversation_Request_Bot_Status_Messages";
6753
- PropertyId3[PropertyId3["Conversation_Agent_Connection_Id"] = 54] = "Conversation_Agent_Connection_Id";
6754
- PropertyId3[PropertyId3["SpeechServiceConnection_Host"] = 55] = "SpeechServiceConnection_Host";
6755
- PropertyId3[PropertyId3["ConversationTranslator_Host"] = 56] = "ConversationTranslator_Host";
6756
- PropertyId3[PropertyId3["ConversationTranslator_Name"] = 57] = "ConversationTranslator_Name";
6757
- PropertyId3[PropertyId3["ConversationTranslator_CorrelationId"] = 58] = "ConversationTranslator_CorrelationId";
6758
- PropertyId3[PropertyId3["ConversationTranslator_Token"] = 59] = "ConversationTranslator_Token";
6759
- PropertyId3[PropertyId3["PronunciationAssessment_ReferenceText"] = 60] = "PronunciationAssessment_ReferenceText";
6760
- PropertyId3[PropertyId3["PronunciationAssessment_GradingSystem"] = 61] = "PronunciationAssessment_GradingSystem";
6761
- PropertyId3[PropertyId3["PronunciationAssessment_Granularity"] = 62] = "PronunciationAssessment_Granularity";
6762
- PropertyId3[PropertyId3["PronunciationAssessment_EnableMiscue"] = 63] = "PronunciationAssessment_EnableMiscue";
6763
- PropertyId3[PropertyId3["PronunciationAssessment_Json"] = 64] = "PronunciationAssessment_Json";
6764
- PropertyId3[PropertyId3["PronunciationAssessment_Params"] = 65] = "PronunciationAssessment_Params";
6765
- PropertyId3[PropertyId3["SpeakerRecognition_Api_Version"] = 66] = "SpeakerRecognition_Api_Version";
6766
- PropertyId3[PropertyId3["WebWorkerLoadType"] = 67] = "WebWorkerLoadType";
6767
- PropertyId3[PropertyId3["TalkingAvatarService_WebRTC_SDP"] = 68] = "TalkingAvatarService_WebRTC_SDP";
6739
+ PropertyId3[PropertyId3["SpeechServiceConnection_TranslationCategoryId"] = 9] = "SpeechServiceConnection_TranslationCategoryId";
6740
+ PropertyId3[PropertyId3["SpeechServiceConnection_IntentRegion"] = 10] = "SpeechServiceConnection_IntentRegion";
6741
+ PropertyId3[PropertyId3["SpeechServiceConnection_ProxyHostName"] = 11] = "SpeechServiceConnection_ProxyHostName";
6742
+ PropertyId3[PropertyId3["SpeechServiceConnection_ProxyPort"] = 12] = "SpeechServiceConnection_ProxyPort";
6743
+ PropertyId3[PropertyId3["SpeechServiceConnection_ProxyUserName"] = 13] = "SpeechServiceConnection_ProxyUserName";
6744
+ PropertyId3[PropertyId3["SpeechServiceConnection_ProxyPassword"] = 14] = "SpeechServiceConnection_ProxyPassword";
6745
+ PropertyId3[PropertyId3["SpeechServiceConnection_RecoMode"] = 15] = "SpeechServiceConnection_RecoMode";
6746
+ PropertyId3[PropertyId3["SpeechServiceConnection_RecoLanguage"] = 16] = "SpeechServiceConnection_RecoLanguage";
6747
+ PropertyId3[PropertyId3["Speech_SessionId"] = 17] = "Speech_SessionId";
6748
+ PropertyId3[PropertyId3["SpeechServiceConnection_SynthLanguage"] = 18] = "SpeechServiceConnection_SynthLanguage";
6749
+ PropertyId3[PropertyId3["SpeechServiceConnection_SynthVoice"] = 19] = "SpeechServiceConnection_SynthVoice";
6750
+ PropertyId3[PropertyId3["SpeechServiceConnection_SynthOutputFormat"] = 20] = "SpeechServiceConnection_SynthOutputFormat";
6751
+ PropertyId3[PropertyId3["SpeechServiceConnection_AutoDetectSourceLanguages"] = 21] = "SpeechServiceConnection_AutoDetectSourceLanguages";
6752
+ PropertyId3[PropertyId3["SpeechServiceResponse_RequestDetailedResultTrueFalse"] = 22] = "SpeechServiceResponse_RequestDetailedResultTrueFalse";
6753
+ PropertyId3[PropertyId3["SpeechServiceResponse_RequestProfanityFilterTrueFalse"] = 23] = "SpeechServiceResponse_RequestProfanityFilterTrueFalse";
6754
+ PropertyId3[PropertyId3["SpeechServiceResponse_JsonResult"] = 24] = "SpeechServiceResponse_JsonResult";
6755
+ PropertyId3[PropertyId3["SpeechServiceResponse_JsonErrorDetails"] = 25] = "SpeechServiceResponse_JsonErrorDetails";
6756
+ PropertyId3[PropertyId3["CancellationDetails_Reason"] = 26] = "CancellationDetails_Reason";
6757
+ PropertyId3[PropertyId3["CancellationDetails_ReasonText"] = 27] = "CancellationDetails_ReasonText";
6758
+ PropertyId3[PropertyId3["CancellationDetails_ReasonDetailedText"] = 28] = "CancellationDetails_ReasonDetailedText";
6759
+ PropertyId3[PropertyId3["LanguageUnderstandingServiceResponse_JsonResult"] = 29] = "LanguageUnderstandingServiceResponse_JsonResult";
6760
+ PropertyId3[PropertyId3["SpeechServiceConnection_Url"] = 30] = "SpeechServiceConnection_Url";
6761
+ PropertyId3[PropertyId3["SpeechServiceConnection_InitialSilenceTimeoutMs"] = 31] = "SpeechServiceConnection_InitialSilenceTimeoutMs";
6762
+ PropertyId3[PropertyId3["SpeechServiceConnection_EndSilenceTimeoutMs"] = 32] = "SpeechServiceConnection_EndSilenceTimeoutMs";
6763
+ PropertyId3[PropertyId3["Speech_SegmentationSilenceTimeoutMs"] = 33] = "Speech_SegmentationSilenceTimeoutMs";
6764
+ PropertyId3[PropertyId3["Speech_SegmentationMaximumTimeMs"] = 34] = "Speech_SegmentationMaximumTimeMs";
6765
+ PropertyId3[PropertyId3["Speech_SegmentationStrategy"] = 35] = "Speech_SegmentationStrategy";
6766
+ PropertyId3[PropertyId3["SpeechServiceConnection_EnableAudioLogging"] = 36] = "SpeechServiceConnection_EnableAudioLogging";
6767
+ PropertyId3[PropertyId3["SpeechServiceConnection_LanguageIdMode"] = 37] = "SpeechServiceConnection_LanguageIdMode";
6768
+ PropertyId3[PropertyId3["SpeechServiceConnection_RecognitionEndpointVersion"] = 38] = "SpeechServiceConnection_RecognitionEndpointVersion";
6769
+ PropertyId3[PropertyId3["SpeechServiceConnection_SpeakerIdMode"] = 39] = "SpeechServiceConnection_SpeakerIdMode";
6770
+ PropertyId3[PropertyId3["SpeechServiceResponse_ProfanityOption"] = 40] = "SpeechServiceResponse_ProfanityOption";
6771
+ PropertyId3[PropertyId3["SpeechServiceResponse_PostProcessingOption"] = 41] = "SpeechServiceResponse_PostProcessingOption";
6772
+ PropertyId3[PropertyId3["SpeechServiceResponse_RequestWordLevelTimestamps"] = 42] = "SpeechServiceResponse_RequestWordLevelTimestamps";
6773
+ PropertyId3[PropertyId3["SpeechServiceResponse_StablePartialResultThreshold"] = 43] = "SpeechServiceResponse_StablePartialResultThreshold";
6774
+ PropertyId3[PropertyId3["SpeechServiceResponse_OutputFormatOption"] = 44] = "SpeechServiceResponse_OutputFormatOption";
6775
+ PropertyId3[PropertyId3["SpeechServiceResponse_TranslationRequestStablePartialResult"] = 45] = "SpeechServiceResponse_TranslationRequestStablePartialResult";
6776
+ PropertyId3[PropertyId3["SpeechServiceResponse_RequestWordBoundary"] = 46] = "SpeechServiceResponse_RequestWordBoundary";
6777
+ PropertyId3[PropertyId3["SpeechServiceResponse_RequestPunctuationBoundary"] = 47] = "SpeechServiceResponse_RequestPunctuationBoundary";
6778
+ PropertyId3[PropertyId3["SpeechServiceResponse_RequestSentenceBoundary"] = 48] = "SpeechServiceResponse_RequestSentenceBoundary";
6779
+ PropertyId3[PropertyId3["SpeechServiceResponse_DiarizeIntermediateResults"] = 49] = "SpeechServiceResponse_DiarizeIntermediateResults";
6780
+ PropertyId3[PropertyId3["Conversation_ApplicationId"] = 50] = "Conversation_ApplicationId";
6781
+ PropertyId3[PropertyId3["Conversation_DialogType"] = 51] = "Conversation_DialogType";
6782
+ PropertyId3[PropertyId3["Conversation_Initial_Silence_Timeout"] = 52] = "Conversation_Initial_Silence_Timeout";
6783
+ PropertyId3[PropertyId3["Conversation_From_Id"] = 53] = "Conversation_From_Id";
6784
+ PropertyId3[PropertyId3["Conversation_Conversation_Id"] = 54] = "Conversation_Conversation_Id";
6785
+ PropertyId3[PropertyId3["Conversation_Custom_Voice_Deployment_Ids"] = 55] = "Conversation_Custom_Voice_Deployment_Ids";
6786
+ PropertyId3[PropertyId3["Conversation_Speech_Activity_Template"] = 56] = "Conversation_Speech_Activity_Template";
6787
+ PropertyId3[PropertyId3["Conversation_Request_Bot_Status_Messages"] = 57] = "Conversation_Request_Bot_Status_Messages";
6788
+ PropertyId3[PropertyId3["Conversation_Agent_Connection_Id"] = 58] = "Conversation_Agent_Connection_Id";
6789
+ PropertyId3[PropertyId3["SpeechServiceConnection_Host"] = 59] = "SpeechServiceConnection_Host";
6790
+ PropertyId3[PropertyId3["ConversationTranslator_Host"] = 60] = "ConversationTranslator_Host";
6791
+ PropertyId3[PropertyId3["ConversationTranslator_Name"] = 61] = "ConversationTranslator_Name";
6792
+ PropertyId3[PropertyId3["ConversationTranslator_CorrelationId"] = 62] = "ConversationTranslator_CorrelationId";
6793
+ PropertyId3[PropertyId3["ConversationTranslator_Token"] = 63] = "ConversationTranslator_Token";
6794
+ PropertyId3[PropertyId3["PronunciationAssessment_ReferenceText"] = 64] = "PronunciationAssessment_ReferenceText";
6795
+ PropertyId3[PropertyId3["PronunciationAssessment_GradingSystem"] = 65] = "PronunciationAssessment_GradingSystem";
6796
+ PropertyId3[PropertyId3["PronunciationAssessment_Granularity"] = 66] = "PronunciationAssessment_Granularity";
6797
+ PropertyId3[PropertyId3["PronunciationAssessment_EnableMiscue"] = 67] = "PronunciationAssessment_EnableMiscue";
6798
+ PropertyId3[PropertyId3["PronunciationAssessment_Json"] = 68] = "PronunciationAssessment_Json";
6799
+ PropertyId3[PropertyId3["PronunciationAssessment_Params"] = 69] = "PronunciationAssessment_Params";
6800
+ PropertyId3[PropertyId3["SpeakerRecognition_Api_Version"] = 70] = "SpeakerRecognition_Api_Version";
6801
+ PropertyId3[PropertyId3["WebWorkerLoadType"] = 71] = "WebWorkerLoadType";
6802
+ PropertyId3[PropertyId3["TalkingAvatarService_WebRTC_SDP"] = 72] = "TalkingAvatarService_WebRTC_SDP";
6768
6803
  })(PropertyId2 = exports.PropertyId || (exports.PropertyId = {}));
6769
6804
  }
6770
6805
  });
@@ -6787,11 +6822,12 @@
6787
6822
  * @param {PropertyCollection} properties - A set of properties to set on the recognizer
6788
6823
  * @param {IConnectionFactory} connectionFactory - The factory class used to create a custom IConnection for the recognizer
6789
6824
  */
6790
- constructor(audioConfig, properties, connectionFactory) {
6825
+ constructor(audioConfig, properties, connectionFactory, tokenCredential) {
6791
6826
  this.audioConfig = audioConfig !== void 0 ? audioConfig : Exports_js_3.AudioConfig.fromDefaultMicrophoneInput();
6792
6827
  this.privDisposed = false;
6793
6828
  this.privProperties = properties.clone();
6794
6829
  this.privConnectionFactory = connectionFactory;
6830
+ this.tokenCredential = tokenCredential;
6795
6831
  this.implCommonRecognizerSetup();
6796
6832
  }
6797
6833
  /**
@@ -6802,7 +6838,7 @@
6802
6838
  */
6803
6839
  close(cb, errorCb) {
6804
6840
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposed);
6805
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, errorCb);
6841
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, errorCb);
6806
6842
  }
6807
6843
  /**
6808
6844
  * @Internal
@@ -6868,7 +6904,7 @@
6868
6904
  osVersion = navigator.appVersion;
6869
6905
  }
6870
6906
  const recognizerConfig = this.createRecognizerConfig(new Exports_js_1.SpeechServiceConfig(new Exports_js_1.Context(new Exports_js_1.OS(osPlatform, osName, osVersion))));
6871
- this.privReco = this.createServiceRecognizer(_Recognizer.getAuthFromProperties(this.privProperties), this.privConnectionFactory, this.audioConfig, recognizerConfig);
6907
+ this.privReco = this.createServiceRecognizer(_Recognizer.getAuth(this.privProperties, this.tokenCredential), this.privConnectionFactory, this.audioConfig, recognizerConfig);
6872
6908
  }
6873
6909
  async recognizeOnceAsyncImpl(recognitionMode) {
6874
6910
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposed);
@@ -6894,22 +6930,57 @@
6894
6930
  }
6895
6931
  return;
6896
6932
  }
6897
- static getAuthFromProperties(properties) {
6933
+ static getAuth(properties, tokenCredential) {
6898
6934
  const subscriptionKey = properties.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_Key, void 0);
6899
- const authentication = subscriptionKey && subscriptionKey !== "" ? new Exports_js_1.CognitiveSubscriptionKeyAuthentication(subscriptionKey) : new Exports_js_1.CognitiveTokenAuthentication(() => {
6935
+ if (subscriptionKey && subscriptionKey !== "") {
6936
+ return new Exports_js_1.CognitiveSubscriptionKeyAuthentication(subscriptionKey);
6937
+ }
6938
+ if (tokenCredential) {
6939
+ return new Exports_js_1.CognitiveTokenAuthentication(async () => {
6940
+ try {
6941
+ const tokenResponse = await tokenCredential.getToken("https://cognitiveservices.azure.com/.default");
6942
+ return tokenResponse?.token ?? "";
6943
+ } catch (err) {
6944
+ throw err;
6945
+ }
6946
+ }, async () => {
6947
+ try {
6948
+ const tokenResponse = await tokenCredential.getToken("https://cognitiveservices.azure.com/.default");
6949
+ return tokenResponse?.token ?? "";
6950
+ } catch (err) {
6951
+ throw err;
6952
+ }
6953
+ });
6954
+ }
6955
+ return new Exports_js_1.CognitiveTokenAuthentication(() => {
6900
6956
  const authorizationToken = properties.getProperty(Exports_js_3.PropertyId.SpeechServiceAuthorization_Token, void 0);
6901
6957
  return Promise.resolve(authorizationToken);
6902
6958
  }, () => {
6903
6959
  const authorizationToken = properties.getProperty(Exports_js_3.PropertyId.SpeechServiceAuthorization_Token, void 0);
6904
6960
  return Promise.resolve(authorizationToken);
6905
6961
  });
6906
- return authentication;
6907
6962
  }
6908
6963
  };
6909
6964
  exports.Recognizer = Recognizer;
6910
6965
  }
6911
6966
  });
6912
6967
 
6968
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/PhraseDetectionContext.js
6969
+ var require_PhraseDetectionContext = __commonJS({
6970
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/PhraseDetectionContext.js"(exports) {
6971
+ "use strict";
6972
+ Object.defineProperty(exports, "__esModule", { value: true });
6973
+ exports.RecognitionMode = void 0;
6974
+ var RecognitionMode;
6975
+ (function(RecognitionMode2) {
6976
+ RecognitionMode2["Interactive"] = "Interactive";
6977
+ RecognitionMode2["Dictation"] = "Dictation";
6978
+ RecognitionMode2["Conversation"] = "Conversation";
6979
+ RecognitionMode2["None"] = "None";
6980
+ })(RecognitionMode = exports.RecognitionMode || (exports.RecognitionMode = {}));
6981
+ }
6982
+ });
6983
+
6913
6984
  // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/sdk/SpeechRecognizer.js
6914
6985
  var require_SpeechRecognizer = __commonJS({
6915
6986
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/sdk/SpeechRecognizer.js"(exports) {
@@ -6917,6 +6988,7 @@
6917
6988
  Object.defineProperty(exports, "__esModule", { value: true });
6918
6989
  exports.SpeechRecognizer = void 0;
6919
6990
  var Exports_js_1 = require_Exports7();
6991
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
6920
6992
  var Exports_js_2 = require_Exports();
6921
6993
  var Contracts_js_1 = require_Contracts();
6922
6994
  var Exports_js_3 = require_Exports3();
@@ -6931,7 +7003,7 @@
6931
7003
  const speechConfigImpl = speechConfig;
6932
7004
  Contracts_js_1.Contracts.throwIfNull(speechConfigImpl, "speechConfig");
6933
7005
  Contracts_js_1.Contracts.throwIfNullOrWhitespace(speechConfigImpl.properties.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_RecoLanguage), Exports_js_3.PropertyId[Exports_js_3.PropertyId.SpeechServiceConnection_RecoLanguage]);
6934
- super(audioConfig, speechConfigImpl.properties, new Exports_js_1.SpeechConnectionFactory());
7006
+ super(audioConfig, speechConfigImpl.properties, new Exports_js_1.SpeechConnectionFactory(), speechConfig.tokenCredential);
6935
7007
  this.privDisposedRecognizer = false;
6936
7008
  }
6937
7009
  /**
@@ -7028,7 +7100,7 @@
7028
7100
  * @param err - Callback invoked in case of an error.
7029
7101
  */
7030
7102
  recognizeOnceAsync(cb, err) {
7031
- Exports_js_2.marshalPromiseToCallbacks(this.recognizeOnceAsyncImpl(Exports_js_1.RecognitionMode.Interactive), cb, err);
7103
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.recognizeOnceAsyncImpl(PhraseDetectionContext_js_1.RecognitionMode.Interactive), cb, err);
7032
7104
  }
7033
7105
  /**
7034
7106
  * Starts speech recognition, until stopContinuousRecognitionAsync() is called.
@@ -7040,7 +7112,7 @@
7040
7112
  * @param err - Callback invoked in case of an error.
7041
7113
  */
7042
7114
  startContinuousRecognitionAsync(cb, err) {
7043
- Exports_js_2.marshalPromiseToCallbacks(this.startContinuousRecognitionAsyncImpl(Exports_js_1.RecognitionMode.Conversation), cb, err);
7115
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.startContinuousRecognitionAsyncImpl(this.properties.getProperty(Exports_js_1.ForceDictationPropertyName, void 0) === void 0 ? PhraseDetectionContext_js_1.RecognitionMode.Conversation : PhraseDetectionContext_js_1.RecognitionMode.Dictation), cb, err);
7044
7116
  }
7045
7117
  /**
7046
7118
  * Stops continuous speech recognition.
@@ -7051,7 +7123,7 @@
7051
7123
  * @param err - Callback invoked in case of an error.
7052
7124
  */
7053
7125
  stopContinuousRecognitionAsync(cb, err) {
7054
- Exports_js_2.marshalPromiseToCallbacks(this.stopContinuousRecognitionAsyncImpl(), cb, err);
7126
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.stopContinuousRecognitionAsyncImpl(), cb, err);
7055
7127
  }
7056
7128
  /**
7057
7129
  * Starts speech recognition with keyword spotting, until
@@ -7096,7 +7168,7 @@
7096
7168
  */
7097
7169
  close(cb, errorCb) {
7098
7170
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposedRecognizer);
7099
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, errorCb);
7171
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, errorCb);
7100
7172
  }
7101
7173
  /**
7102
7174
  * Disposes any resources held by the object.
@@ -7134,6 +7206,7 @@
7134
7206
  Object.defineProperty(exports, "__esModule", { value: true });
7135
7207
  exports.IntentRecognizer = void 0;
7136
7208
  var Exports_js_1 = require_Exports7();
7209
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
7137
7210
  var Exports_js_2 = require_Exports();
7138
7211
  var Contracts_js_1 = require_Contracts();
7139
7212
  var Exports_js_3 = require_Exports3();
@@ -7214,12 +7287,12 @@
7214
7287
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposedIntentRecognizer);
7215
7288
  if (Object.keys(this.privAddedLmIntents).length !== 0 || void 0 !== this.privUmbrellaIntent) {
7216
7289
  const context = this.buildSpeechContext();
7217
- this.privReco.speechContext.setSection("intent", context.Intent);
7290
+ this.privReco.speechContext.getContext().intent = context.Intent;
7218
7291
  this.privReco.dynamicGrammar.addReferenceGrammar(context.ReferenceGrammars);
7219
7292
  const intentReco = this.privReco;
7220
7293
  intentReco.setIntents(this.privAddedLmIntents, this.privUmbrellaIntent);
7221
7294
  }
7222
- Exports_js_2.marshalPromiseToCallbacks(this.recognizeOnceAsyncImpl(Exports_js_1.RecognitionMode.Interactive), cb, err);
7295
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.recognizeOnceAsyncImpl(PhraseDetectionContext_js_1.RecognitionMode.Interactive), cb, err);
7223
7296
  }
7224
7297
  /**
7225
7298
  * Starts speech recognition, until stopContinuousRecognitionAsync() is called.
@@ -7233,12 +7306,12 @@
7233
7306
  startContinuousRecognitionAsync(cb, err) {
7234
7307
  if (Object.keys(this.privAddedLmIntents).length !== 0 || void 0 !== this.privUmbrellaIntent) {
7235
7308
  const context = this.buildSpeechContext();
7236
- this.privReco.speechContext.setSection("intent", context.Intent);
7309
+ this.privReco.speechContext.getContext().intent = context.Intent;
7237
7310
  this.privReco.dynamicGrammar.addReferenceGrammar(context.ReferenceGrammars);
7238
7311
  const intentReco = this.privReco;
7239
7312
  intentReco.setIntents(this.privAddedLmIntents, this.privUmbrellaIntent);
7240
7313
  }
7241
- Exports_js_2.marshalPromiseToCallbacks(this.startContinuousRecognitionAsyncImpl(Exports_js_1.RecognitionMode.Conversation), cb, err);
7314
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.startContinuousRecognitionAsyncImpl(PhraseDetectionContext_js_1.RecognitionMode.Conversation), cb, err);
7242
7315
  }
7243
7316
  /**
7244
7317
  * Stops continuous intent recognition.
@@ -7249,7 +7322,7 @@
7249
7322
  * @param err - Callback invoked in case of an error.
7250
7323
  */
7251
7324
  stopContinuousRecognitionAsync(cb, err) {
7252
- Exports_js_2.marshalPromiseToCallbacks(this.stopContinuousRecognitionAsyncImpl(), cb, err);
7325
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.stopContinuousRecognitionAsyncImpl(), cb, err);
7253
7326
  }
7254
7327
  /**
7255
7328
  * Starts speech recognition with keyword spotting, until stopKeywordRecognitionAsync() is called.
@@ -7347,7 +7420,7 @@
7347
7420
  */
7348
7421
  close(cb, errorCb) {
7349
7422
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposedIntentRecognizer);
7350
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, errorCb);
7423
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, errorCb);
7351
7424
  }
7352
7425
  createRecognizerConfig(speechConfig) {
7353
7426
  return new Exports_js_1.RecognizerConfig(speechConfig, this.privProperties);
@@ -7551,7 +7624,7 @@
7551
7624
  * be notified when the connection is established.
7552
7625
  */
7553
7626
  openConnection(cb, err) {
7554
- Exports_js_2.marshalPromiseToCallbacks(this.privInternalData.connect(), cb, err);
7627
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.privInternalData.connect(), cb, err);
7555
7628
  }
7556
7629
  /**
7557
7630
  * Closes the connection the service.
@@ -7563,7 +7636,7 @@
7563
7636
  if (this.privInternalData instanceof Exports_js_1.SynthesisAdapterBase) {
7564
7637
  throw new Error("Disconnecting a synthesizer's connection is currently not supported");
7565
7638
  } else {
7566
- Exports_js_2.marshalPromiseToCallbacks(this.privInternalData.disconnect(), cb, err);
7639
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.privInternalData.disconnect(), cb, err);
7567
7640
  }
7568
7641
  }
7569
7642
  /**
@@ -7579,11 +7652,18 @@
7579
7652
  if (path.toLowerCase() !== "speech.context") {
7580
7653
  throw new Error("Only speech.context message property sets are currently supported for recognizer");
7581
7654
  } else {
7582
- this.privInternalData.speechContext.setSection(propertyName, propertyValue);
7655
+ const context = this.privInternalData.speechContext.getContext();
7656
+ context[propertyName] = propertyValue;
7583
7657
  }
7584
7658
  } else if (this.privInternalData instanceof Exports_js_1.SynthesisAdapterBase) {
7585
- if (path.toLowerCase() !== "synthesis.context") {
7586
- throw new Error("Only synthesis.context message property sets are currently supported for synthesizer");
7659
+ if (path.toLowerCase() !== "speech.config" && path.toLowerCase() !== "synthesis.context") {
7660
+ throw new Error("Only speech.config and synthesis.context message paths are currently supported for synthesizer");
7661
+ } else if (path.toLowerCase() === "speech.config") {
7662
+ if (propertyName.toLowerCase() !== "context") {
7663
+ throw new Error("Only context property is currently supported for speech.config message path for synthesizer");
7664
+ } else {
7665
+ this.privInternalData.synthesizerConfig.setContextFromJson(propertyValue);
7666
+ }
7587
7667
  } else {
7588
7668
  this.privInternalData.synthesisContext.setSection(propertyName, propertyValue);
7589
7669
  }
@@ -7598,7 +7678,7 @@
7598
7678
  * @param error A callback to indicate an error.
7599
7679
  */
7600
7680
  sendMessageAsync(path, payload, success, error) {
7601
- Exports_js_2.marshalPromiseToCallbacks(this.privInternalData.sendNetworkMessage(path, payload), success, error);
7681
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.privInternalData.sendNetworkMessage(path, payload), success, error);
7602
7682
  }
7603
7683
  /**
7604
7684
  * Dispose of associated resources.
@@ -7643,6 +7723,7 @@
7643
7723
  Object.defineProperty(exports, "__esModule", { value: true });
7644
7724
  exports.TranslationRecognizer = void 0;
7645
7725
  var Exports_js_1 = require_Exports7();
7726
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
7646
7727
  var Exports_js_2 = require_Exports();
7647
7728
  var Connection_js_1 = require_Connection();
7648
7729
  var Contracts_js_1 = require_Contracts();
@@ -7658,7 +7739,7 @@
7658
7739
  constructor(speechConfig, audioConfig, connectionFactory) {
7659
7740
  const configImpl = speechConfig;
7660
7741
  Contracts_js_1.Contracts.throwIfNull(configImpl, "speechConfig");
7661
- super(audioConfig, configImpl.properties, connectionFactory || new Exports_js_1.TranslationConnectionFactory());
7742
+ super(audioConfig, configImpl.properties, connectionFactory || new Exports_js_1.TranslationConnectionFactory(), speechConfig.tokenCredential);
7662
7743
  this.privDisposedTranslationRecognizer = false;
7663
7744
  if (this.properties.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationVoice, void 0) !== void 0) {
7664
7745
  Contracts_js_1.Contracts.throwIfNullOrWhitespace(this.properties.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationVoice), Exports_js_3.PropertyId[Exports_js_3.PropertyId.SpeechServiceConnection_TranslationVoice]);
@@ -7676,6 +7757,9 @@
7676
7757
  static FromConfig(speechTranslationConfig, autoDetectSourceLanguageConfig, audioConfig) {
7677
7758
  const speechTranslationConfigImpl = speechTranslationConfig;
7678
7759
  autoDetectSourceLanguageConfig.properties.mergeTo(speechTranslationConfigImpl.properties);
7760
+ if (autoDetectSourceLanguageConfig.properties.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_AutoDetectSourceLanguages, void 0) === Exports_js_1.AutoDetectSourceLanguagesOpenRangeOptionName) {
7761
+ speechTranslationConfigImpl.properties.setProperty(Exports_js_3.PropertyId.SpeechServiceConnection_RecoLanguage, "en-US");
7762
+ }
7679
7763
  return new _TranslationRecognizer(speechTranslationConfig, audioConfig);
7680
7764
  }
7681
7765
  /**
@@ -7756,7 +7840,7 @@
7756
7840
  */
7757
7841
  recognizeOnceAsync(cb, err) {
7758
7842
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposedTranslationRecognizer);
7759
- Exports_js_2.marshalPromiseToCallbacks(this.recognizeOnceAsyncImpl(Exports_js_1.RecognitionMode.Interactive), cb, err);
7843
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.recognizeOnceAsyncImpl(PhraseDetectionContext_js_1.RecognitionMode.Interactive), cb, err);
7760
7844
  }
7761
7845
  /**
7762
7846
  * Starts recognition and translation, until stopContinuousRecognitionAsync() is called.
@@ -7768,7 +7852,7 @@
7768
7852
  * @param err - Callback invoked in case of an error.
7769
7853
  */
7770
7854
  startContinuousRecognitionAsync(cb, err) {
7771
- Exports_js_2.marshalPromiseToCallbacks(this.startContinuousRecognitionAsyncImpl(Exports_js_1.RecognitionMode.Conversation), cb, err);
7855
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.startContinuousRecognitionAsyncImpl(PhraseDetectionContext_js_1.RecognitionMode.Conversation), cb, err);
7772
7856
  }
7773
7857
  /**
7774
7858
  * Stops continuous recognition and translation.
@@ -7779,7 +7863,7 @@
7779
7863
  * @param err - Callback invoked in case of an error.
7780
7864
  */
7781
7865
  stopContinuousRecognitionAsync(cb, err) {
7782
- Exports_js_2.marshalPromiseToCallbacks(this.stopContinuousRecognitionAsyncImpl(), cb, err);
7866
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.stopContinuousRecognitionAsyncImpl(), cb, err);
7783
7867
  }
7784
7868
  /**
7785
7869
  * dynamically remove a language from list of target language
@@ -7832,7 +7916,7 @@
7832
7916
  */
7833
7917
  close(cb, errorCb) {
7834
7918
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposedTranslationRecognizer);
7835
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, errorCb);
7919
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, errorCb);
7836
7920
  }
7837
7921
  /**
7838
7922
  * handles ConnectionEstablishedEvent for conversation translation scenarios.
@@ -7969,7 +8053,7 @@
7969
8053
  * @returns {NoMatchDetails} The no match details object being created.
7970
8054
  */
7971
8055
  static fromResult(result) {
7972
- const simpleSpeech = Exports_js_1.SimpleSpeechPhrase.fromJSON(result.json);
8056
+ const simpleSpeech = Exports_js_1.SimpleSpeechPhrase.fromJSON(result.json, 0);
7973
8057
  let reason = Exports_js_2.NoMatchReason.NotRecognized;
7974
8058
  switch (simpleSpeech.RecognitionStatus) {
7975
8059
  case Exports_js_1.RecognitionStatus.BabbleTimeout:
@@ -8204,7 +8288,7 @@
8204
8288
  let reason = Exports_js_2.CancellationReason.Error;
8205
8289
  let errorCode = Exports_js_2.CancellationErrorCode.NoError;
8206
8290
  if (result instanceof Exports_js_2.RecognitionResult && !!result.json) {
8207
- const simpleSpeech = Exports_js_1.SimpleSpeechPhrase.fromJSON(result.json);
8291
+ const simpleSpeech = Exports_js_1.SimpleSpeechPhrase.fromJSON(result.json, 0);
8208
8292
  reason = Exports_js_1.EnumTranslation.implTranslateCancelResult(simpleSpeech.RecognitionStatus);
8209
8293
  }
8210
8294
  if (!!result.properties) {
@@ -8286,6 +8370,7 @@
8286
8370
  "use strict";
8287
8371
  Object.defineProperty(exports, "__esModule", { value: true });
8288
8372
  exports.PhraseListGrammar = void 0;
8373
+ var Contracts_js_1 = require_Contracts();
8289
8374
  var PhraseListGrammar = class _PhraseListGrammar {
8290
8375
  constructor(recogBase) {
8291
8376
  this.privGrammerBuilder = recogBase.dynamicGrammar;
@@ -8318,6 +8403,16 @@
8318
8403
  clear() {
8319
8404
  this.privGrammerBuilder.clearPhrases();
8320
8405
  }
8406
+ /**
8407
+ * Sets the phrase list grammar biasing weight.
8408
+ * The allowed range is [0.0, 2.0].
8409
+ * The default weight is 1.0. Value zero disables the phrase list.
8410
+ * @param weight Phrase list grammar biasing weight.
8411
+ */
8412
+ setWeight(weight) {
8413
+ Contracts_js_1.Contracts.throwIfNumberOutOfRange(weight, "weight", 0, 2);
8414
+ this.privGrammerBuilder.setWeight(weight);
8415
+ }
8321
8416
  };
8322
8417
  exports.PhraseListGrammar = PhraseListGrammar;
8323
8418
  }
@@ -8422,6 +8517,7 @@
8422
8517
  * @returns {string} The current value, or provided default, of the given property.
8423
8518
  */
8424
8519
  getProperty(name, def) {
8520
+ void def;
8425
8521
  return this.privSpeechConfig.getProperty(name);
8426
8522
  }
8427
8523
  /**
@@ -8444,6 +8540,7 @@
8444
8540
  }
8445
8541
  }
8446
8542
  setServiceProperty(name, value, channel) {
8543
+ void channel;
8447
8544
  this.privSpeechConfig.setServiceProperty(name, value);
8448
8545
  }
8449
8546
  /**
@@ -8550,6 +8647,7 @@
8550
8647
  * @returns {BotFrameworkConfig} A new bot framework configuration instance.
8551
8648
  */
8552
8649
  static fromHost(host, subscriptionKey, botId) {
8650
+ void botId;
8553
8651
  Contracts_js_1.Contracts.throwIfNullOrUndefined(host, "host");
8554
8652
  const resolvedHost = host instanceof URL ? host : new URL(`wss://${host}.convai.speech.azure.us`);
8555
8653
  Contracts_js_1.Contracts.throwIfNullOrUndefined(resolvedHost, "resolvedHost");
@@ -8698,6 +8796,8 @@
8698
8796
  QueryParameterNames.EnableWordLevelTimestamps = "wordLevelTimestamps";
8699
8797
  QueryParameterNames.EndSilenceTimeoutMs = "endSilenceTimeoutMs";
8700
8798
  QueryParameterNames.SegmentationSilenceTimeoutMs = "segmentationSilenceTimeoutMs";
8799
+ QueryParameterNames.SegmentationMaximumTimeMs = "segmentationMaximumTimeMs";
8800
+ QueryParameterNames.SegmentationStrategy = "segmentationStrategy";
8701
8801
  QueryParameterNames.Format = "format";
8702
8802
  QueryParameterNames.InitialSilenceTimeoutMs = "initialSilenceTimeoutMs";
8703
8803
  QueryParameterNames.Language = "language";
@@ -8721,7 +8821,8 @@
8721
8821
  Object.defineProperty(exports, "__esModule", { value: true });
8722
8822
  exports.ConnectionFactoryBase = void 0;
8723
8823
  var Exports_js_1 = require_Exports7();
8724
- var Exports_js_2 = require_Exports3();
8824
+ var Exports_js_2 = require_Exports();
8825
+ var Exports_js_3 = require_Exports3();
8725
8826
  var QueryParameterNames_js_1 = require_QueryParameterNames();
8726
8827
  var ConnectionFactoryBase = class {
8727
8828
  static getHostSuffix(region) {
@@ -8737,14 +8838,14 @@
8737
8838
  }
8738
8839
  setCommonUrlParams(config, queryParams, endpoint) {
8739
8840
  const propertyIdToParameterMap = /* @__PURE__ */ new Map([
8740
- [Exports_js_2.PropertyId.Speech_SegmentationSilenceTimeoutMs, QueryParameterNames_js_1.QueryParameterNames.SegmentationSilenceTimeoutMs],
8741
- [Exports_js_2.PropertyId.SpeechServiceConnection_EnableAudioLogging, QueryParameterNames_js_1.QueryParameterNames.EnableAudioLogging],
8742
- [Exports_js_2.PropertyId.SpeechServiceConnection_EndSilenceTimeoutMs, QueryParameterNames_js_1.QueryParameterNames.EndSilenceTimeoutMs],
8743
- [Exports_js_2.PropertyId.SpeechServiceConnection_InitialSilenceTimeoutMs, QueryParameterNames_js_1.QueryParameterNames.InitialSilenceTimeoutMs],
8744
- [Exports_js_2.PropertyId.SpeechServiceResponse_PostProcessingOption, QueryParameterNames_js_1.QueryParameterNames.Postprocessing],
8745
- [Exports_js_2.PropertyId.SpeechServiceResponse_ProfanityOption, QueryParameterNames_js_1.QueryParameterNames.Profanity],
8746
- [Exports_js_2.PropertyId.SpeechServiceResponse_RequestWordLevelTimestamps, QueryParameterNames_js_1.QueryParameterNames.EnableWordLevelTimestamps],
8747
- [Exports_js_2.PropertyId.SpeechServiceResponse_StablePartialResultThreshold, QueryParameterNames_js_1.QueryParameterNames.StableIntermediateThreshold]
8841
+ [Exports_js_3.PropertyId.Speech_SegmentationSilenceTimeoutMs, QueryParameterNames_js_1.QueryParameterNames.SegmentationSilenceTimeoutMs],
8842
+ [Exports_js_3.PropertyId.SpeechServiceConnection_EnableAudioLogging, QueryParameterNames_js_1.QueryParameterNames.EnableAudioLogging],
8843
+ [Exports_js_3.PropertyId.SpeechServiceConnection_EndSilenceTimeoutMs, QueryParameterNames_js_1.QueryParameterNames.EndSilenceTimeoutMs],
8844
+ [Exports_js_3.PropertyId.SpeechServiceConnection_InitialSilenceTimeoutMs, QueryParameterNames_js_1.QueryParameterNames.InitialSilenceTimeoutMs],
8845
+ [Exports_js_3.PropertyId.SpeechServiceResponse_PostProcessingOption, QueryParameterNames_js_1.QueryParameterNames.Postprocessing],
8846
+ [Exports_js_3.PropertyId.SpeechServiceResponse_ProfanityOption, QueryParameterNames_js_1.QueryParameterNames.Profanity],
8847
+ [Exports_js_3.PropertyId.SpeechServiceResponse_RequestWordLevelTimestamps, QueryParameterNames_js_1.QueryParameterNames.EnableWordLevelTimestamps],
8848
+ [Exports_js_3.PropertyId.SpeechServiceResponse_StablePartialResultThreshold, QueryParameterNames_js_1.QueryParameterNames.StableIntermediateThreshold]
8748
8849
  ]);
8749
8850
  propertyIdToParameterMap.forEach((parameterName, propertyId) => {
8750
8851
  this.setUrlParameter(propertyId, parameterName, config, queryParams, endpoint);
@@ -8760,6 +8861,26 @@
8760
8861
  queryParams[parameterName] = value.toLocaleLowerCase();
8761
8862
  }
8762
8863
  }
8864
+ static async getRedirectUrlFromEndpoint(endpoint) {
8865
+ const redirectUrl = new URL(endpoint);
8866
+ redirectUrl.protocol = "https:";
8867
+ redirectUrl.port = "443";
8868
+ const params = redirectUrl.searchParams;
8869
+ params.append("GenerateRedirectResponse", "true");
8870
+ const redirectedUrlString = redirectUrl.toString();
8871
+ Exports_js_2.Events.instance.onEvent(new Exports_js_2.ConnectionRedirectEvent("", redirectedUrlString, void 0, "ConnectionFactoryBase: redirectUrl request"));
8872
+ const redirectResponse = await fetch(redirectedUrlString);
8873
+ if (redirectResponse.status !== 200) {
8874
+ return endpoint;
8875
+ }
8876
+ const redirectUrlString = await redirectResponse.text();
8877
+ Exports_js_2.Events.instance.onEvent(new Exports_js_2.ConnectionRedirectEvent("", redirectUrlString, endpoint, "ConnectionFactoryBase: redirectUrlString"));
8878
+ try {
8879
+ return new URL(redirectUrlString.trim()).toString();
8880
+ } catch (error) {
8881
+ return endpoint;
8882
+ }
8883
+ }
8763
8884
  };
8764
8885
  exports.ConnectionFactoryBase = ConnectionFactoryBase;
8765
8886
  }
@@ -8811,7 +8932,7 @@
8811
8932
  }
8812
8933
  this.setCommonUrlParams(config, queryParams, endpoint);
8813
8934
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "false") === "true";
8814
- return new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_4.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
8935
+ return Promise.resolve(new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_4.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId));
8815
8936
  }
8816
8937
  };
8817
8938
  exports.DialogConnectionFactory = DialogConnectionFactory;
@@ -8828,6 +8949,7 @@
8828
8949
  exports.DialogServiceConnector = void 0;
8829
8950
  var DialogConnectorFactory_js_1 = require_DialogConnectorFactory();
8830
8951
  var Exports_js_1 = require_Exports7();
8952
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
8831
8953
  var Exports_js_2 = require_Exports();
8832
8954
  var Contracts_js_1 = require_Contracts();
8833
8955
  var Exports_js_3 = require_Exports3();
@@ -8860,7 +8982,7 @@
8860
8982
  * @public
8861
8983
  */
8862
8984
  connect(cb, err) {
8863
- Exports_js_2.marshalPromiseToCallbacks(this.privReco.connect(), cb, err);
8985
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.privReco.connect(), cb, err);
8864
8986
  }
8865
8987
  /**
8866
8988
  * Closes the connection the service.
@@ -8869,7 +8991,7 @@
8869
8991
  * If disconnect() is called during a recognition, recognition will fail and cancel with an error.
8870
8992
  */
8871
8993
  disconnect(cb, err) {
8872
- Exports_js_2.marshalPromiseToCallbacks(this.privReco.disconnect(), cb, err);
8994
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.privReco.disconnect(), cb, err);
8873
8995
  }
8874
8996
  /**
8875
8997
  * Gets the authorization token used to communicate with the service.
@@ -8933,7 +9055,7 @@
8933
9055
  await this.implRecognizerStop();
8934
9056
  this.isTurnComplete = false;
8935
9057
  const ret = new Exports_js_2.Deferred();
8936
- await this.privReco.recognize(Exports_js_1.RecognitionMode.Conversation, ret.resolve, ret.reject);
9058
+ await this.privReco.recognize(PhraseDetectionContext_js_1.RecognitionMode.Conversation, ret.resolve, ret.reject);
8937
9059
  const e = await ret.promise;
8938
9060
  await this.implRecognizerStop();
8939
9061
  return e;
@@ -8943,13 +9065,13 @@
8943
9065
  this.dispose(true).catch(() => {
8944
9066
  });
8945
9067
  });
8946
- Exports_js_2.marshalPromiseToCallbacks(retPromise.finally(() => {
9068
+ (0, Exports_js_2.marshalPromiseToCallbacks)(retPromise.finally(() => {
8947
9069
  this.isTurnComplete = true;
8948
9070
  }), cb, err);
8949
9071
  }
8950
9072
  }
8951
9073
  sendActivityAsync(activity, cb, errCb) {
8952
- Exports_js_2.marshalPromiseToCallbacks(this.privReco.sendMessage(activity), cb, errCb);
9074
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.privReco.sendMessage(activity), cb, errCb);
8953
9075
  }
8954
9076
  /**
8955
9077
  * closes all external resources held by an instance of this class.
@@ -8959,7 +9081,7 @@
8959
9081
  */
8960
9082
  close(cb, err) {
8961
9083
  Contracts_js_1.Contracts.throwIfDisposed(this.privIsDisposed);
8962
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, err);
9084
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, err);
8963
9085
  }
8964
9086
  async dispose(disposing) {
8965
9087
  if (this.privIsDisposed) {
@@ -9975,6 +10097,7 @@
9975
10097
  static fromOpenRange() {
9976
10098
  const config = new _AutoDetectSourceLanguageConfig();
9977
10099
  config.properties.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_AutoDetectSourceLanguages, Exports_js_1.AutoDetectSourceLanguagesOpenRangeOptionName);
10100
+ config.properties.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_RecoLanguage, "en-US");
9978
10101
  return config;
9979
10102
  }
9980
10103
  /**
@@ -10242,7 +10365,7 @@
10242
10365
  let err;
10243
10366
  if (typeof arg2 === "string") {
10244
10367
  conversationImpl = new ConversationImpl(speechConfig, arg2);
10245
- Exports_js_2.marshalPromiseToCallbacks((async () => {
10368
+ (0, Exports_js_2.marshalPromiseToCallbacks)((async () => {
10246
10369
  })(), arg3, arg4);
10247
10370
  } else {
10248
10371
  conversationImpl = new ConversationImpl(speechConfig);
@@ -10593,7 +10716,7 @@
10593
10716
  */
10594
10717
  addParticipantAsync(participant, cb, err) {
10595
10718
  Contracts_js_1.Contracts.throwIfNullOrUndefined(participant, "Participant");
10596
- Exports_js_2.marshalPromiseToCallbacks(this.addParticipantImplAsync(participant), cb, err);
10719
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.addParticipantImplAsync(participant), cb, err);
10597
10720
  }
10598
10721
  /**
10599
10722
  * Join a conversation as a participant.
@@ -10628,7 +10751,7 @@
10628
10751
  * @param err
10629
10752
  */
10630
10753
  deleteConversationAsync(cb, err) {
10631
- Exports_js_2.marshalPromiseToCallbacks(this.deleteConversationImplAsync(), cb, err);
10754
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.deleteConversationImplAsync(), cb, err);
10632
10755
  }
10633
10756
  async deleteConversationImplAsync() {
10634
10757
  Contracts_js_1.Contracts.throwIfNullOrUndefined(this.privProperties, this.privErrors.permissionDeniedConnect);
@@ -10642,7 +10765,7 @@
10642
10765
  * @param err
10643
10766
  */
10644
10767
  endConversationAsync(cb, err) {
10645
- Exports_js_2.marshalPromiseToCallbacks(this.endConversationImplAsync(), cb, err);
10768
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.endConversationImplAsync(), cb, err);
10646
10769
  }
10647
10770
  endConversationImplAsync() {
10648
10771
  return this.close(true);
@@ -10739,7 +10862,7 @@
10739
10862
  try {
10740
10863
  Contracts_js_1.Contracts.throwIfDisposed(this.privIsDisposed);
10741
10864
  if (!!this.privTranscriberRecognizer && userId.hasOwnProperty("id")) {
10742
- Exports_js_2.marshalPromiseToCallbacks(this.removeParticipantImplAsync(userId), cb, err);
10865
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.removeParticipantImplAsync(userId), cb, err);
10743
10866
  } else {
10744
10867
  Contracts_js_1.Contracts.throwIfDisposed(this.privConversationRecognizer.isDisposed());
10745
10868
  Contracts_js_1.Contracts.throwIfNullOrUndefined(this.privRoom, this.privErrors.permissionDeniedSend);
@@ -11422,7 +11545,7 @@
11422
11545
  headers[authInfo.headerName] = authInfo.token;
11423
11546
  }
11424
11547
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "").toUpperCase() === "TRUE";
11425
- return new Exports_js_1.WebsocketConnection(endpointUrl, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
11548
+ return Promise.resolve(new Exports_js_1.WebsocketConnection(endpointUrl, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId));
11426
11549
  }
11427
11550
  };
11428
11551
  exports.ConversationTranslatorConnectionFactory = ConversationTranslatorConnectionFactory;
@@ -11644,7 +11767,7 @@
11644
11767
  * @param err
11645
11768
  */
11646
11769
  leaveConversationAsync(cb, err) {
11647
- Exports_js_2.marshalPromiseToCallbacks((async () => {
11770
+ (0, Exports_js_2.marshalPromiseToCallbacks)((async () => {
11648
11771
  await this.cancelSpeech();
11649
11772
  await this.privConversation.endConversationImplAsync();
11650
11773
  await this.privConversation.deleteConversationImplAsync();
@@ -11672,7 +11795,7 @@
11672
11795
  * @param err
11673
11796
  */
11674
11797
  startTranscribingAsync(cb, err) {
11675
- Exports_js_2.marshalPromiseToCallbacks((async () => {
11798
+ (0, Exports_js_2.marshalPromiseToCallbacks)((async () => {
11676
11799
  try {
11677
11800
  Contracts_js_1.Contracts.throwIfNullOrUndefined(this.privConversation, this.privErrors.permissionDeniedSend);
11678
11801
  Contracts_js_1.Contracts.throwIfNullOrUndefined(this.privConversation.room.token, this.privErrors.permissionDeniedConnect);
@@ -11698,7 +11821,7 @@
11698
11821
  * @param err
11699
11822
  */
11700
11823
  stopTranscribingAsync(cb, err) {
11701
- Exports_js_2.marshalPromiseToCallbacks((async () => {
11824
+ (0, Exports_js_2.marshalPromiseToCallbacks)((async () => {
11702
11825
  try {
11703
11826
  if (!this.privIsSpeaking) {
11704
11827
  await this.cancelSpeech();
@@ -11717,7 +11840,7 @@
11717
11840
  return this.privIsDisposed;
11718
11841
  }
11719
11842
  dispose(reason, success, err) {
11720
- Exports_js_2.marshalPromiseToCallbacks((async () => {
11843
+ (0, Exports_js_2.marshalPromiseToCallbacks)((async () => {
11721
11844
  if (this.isDisposed && !this.privIsSpeaking) {
11722
11845
  return;
11723
11846
  }
@@ -11784,6 +11907,7 @@
11784
11907
  Object.defineProperty(exports, "__esModule", { value: true });
11785
11908
  exports.ConversationTranscriber = void 0;
11786
11909
  var Exports_js_1 = require_Exports7();
11910
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
11787
11911
  var Exports_js_2 = require_Exports();
11788
11912
  var Contracts_js_1 = require_Contracts();
11789
11913
  var Exports_js_3 = require_Exports3();
@@ -11798,7 +11922,7 @@
11798
11922
  const speechConfigImpl = speechConfig;
11799
11923
  Contracts_js_1.Contracts.throwIfNull(speechConfigImpl, "speechConfig");
11800
11924
  Contracts_js_1.Contracts.throwIfNullOrWhitespace(speechConfigImpl.properties.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_RecoLanguage), Exports_js_3.PropertyId[Exports_js_3.PropertyId.SpeechServiceConnection_RecoLanguage]);
11801
- super(audioConfig, speechConfigImpl.properties, new Exports_js_1.ConversationTranscriberConnectionFactory());
11925
+ super(audioConfig, speechConfigImpl.properties, new Exports_js_1.ConversationTranscriberConnectionFactory(), speechConfig.tokenCredential);
11802
11926
  this.privProperties.setProperty(Exports_js_3.PropertyId.SpeechServiceConnection_RecognitionEndpointVersion, "2");
11803
11927
  this.privDisposedRecognizer = false;
11804
11928
  }
@@ -11893,7 +12017,7 @@
11893
12017
  * @param err - Callback invoked in case of an error.
11894
12018
  */
11895
12019
  startTranscribingAsync(cb, err) {
11896
- Exports_js_2.marshalPromiseToCallbacks(this.startContinuousRecognitionAsyncImpl(Exports_js_1.RecognitionMode.Conversation), cb, err);
12020
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.startContinuousRecognitionAsyncImpl(PhraseDetectionContext_js_1.RecognitionMode.Conversation), cb, err);
11897
12021
  }
11898
12022
  /**
11899
12023
  * Stops conversation transcription.
@@ -11904,7 +12028,7 @@
11904
12028
  * @param err - Callback invoked in case of an error.
11905
12029
  */
11906
12030
  stopTranscribingAsync(cb, err) {
11907
- Exports_js_2.marshalPromiseToCallbacks(this.stopContinuousRecognitionAsyncImpl(), cb, err);
12031
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.stopContinuousRecognitionAsyncImpl(), cb, err);
11908
12032
  }
11909
12033
  /**
11910
12034
  * closes all external resources held by an instance of this class.
@@ -11914,7 +12038,7 @@
11914
12038
  */
11915
12039
  close(cb, errorCb) {
11916
12040
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposedRecognizer);
11917
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, errorCb);
12041
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, errorCb);
11918
12042
  }
11919
12043
  /**
11920
12044
  * Disposes any resources held by the object.
@@ -12056,7 +12180,7 @@
12056
12180
  Contracts_js_1.Contracts.throwIfNullOrUndefined(speechConfig.subscriptionKey, Exports_js_1.ConversationConnectionConfig.restErrors.invalidArgs.replace("{arg}", "SpeechServiceConnection_Key"));
12057
12181
  }
12058
12182
  const meetingImpl = new MeetingImpl(speechConfig, meetingId);
12059
- Exports_js_2.marshalPromiseToCallbacks((async () => {
12183
+ (0, Exports_js_2.marshalPromiseToCallbacks)((async () => {
12060
12184
  })(), arg3, arg4);
12061
12185
  return meetingImpl;
12062
12186
  }
@@ -12369,7 +12493,7 @@
12369
12493
  */
12370
12494
  addParticipantAsync(participant, cb, err) {
12371
12495
  Contracts_js_1.Contracts.throwIfNullOrUndefined(participant, "Participant");
12372
- Exports_js_2.marshalPromiseToCallbacks(this.addParticipantImplAsync(participant), cb, err);
12496
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.addParticipantImplAsync(participant), cb, err);
12373
12497
  }
12374
12498
  /**
12375
12499
  * Join a meeting as a participant.
@@ -12404,7 +12528,7 @@
12404
12528
  * @param err
12405
12529
  */
12406
12530
  deleteMeetingAsync(cb, err) {
12407
- Exports_js_2.marshalPromiseToCallbacks(this.deleteMeetingImplAsync(), cb, err);
12531
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.deleteMeetingImplAsync(), cb, err);
12408
12532
  }
12409
12533
  async deleteMeetingImplAsync() {
12410
12534
  Contracts_js_1.Contracts.throwIfNullOrUndefined(this.privProperties, this.privErrors.permissionDeniedConnect);
@@ -12418,7 +12542,7 @@
12418
12542
  * @param err
12419
12543
  */
12420
12544
  endMeetingAsync(cb, err) {
12421
- Exports_js_2.marshalPromiseToCallbacks(this.endMeetingImplAsync(), cb, err);
12545
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.endMeetingImplAsync(), cb, err);
12422
12546
  }
12423
12547
  endMeetingImplAsync() {
12424
12548
  return this.close(true);
@@ -12515,7 +12639,7 @@
12515
12639
  try {
12516
12640
  Contracts_js_1.Contracts.throwIfDisposed(this.privIsDisposed);
12517
12641
  if (!!this.privTranscriberRecognizer && userId.hasOwnProperty("id")) {
12518
- Exports_js_2.marshalPromiseToCallbacks(this.removeParticipantImplAsync(userId), cb, err);
12642
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.removeParticipantImplAsync(userId), cb, err);
12519
12643
  } else {
12520
12644
  Contracts_js_1.Contracts.throwIfDisposed(this.privConversationRecognizer.isDisposed());
12521
12645
  Contracts_js_1.Contracts.throwIfNullOrUndefined(this.privRoom, this.privErrors.permissionDeniedSend);
@@ -13005,7 +13129,7 @@
13005
13129
  this.privRecognizer = new Exports_js_1.TranscriberRecognizer(meeting.config, this.privAudioConfig);
13006
13130
  Contracts_js_1.Contracts.throwIfNullOrUndefined(this.privRecognizer, "Recognizer");
13007
13131
  this.privRecognizer.connectMeetingCallbacks(this);
13008
- Exports_js_2.marshalPromiseToCallbacks(meetingImpl.connectTranscriberRecognizer(this.privRecognizer), cb, err);
13132
+ (0, Exports_js_2.marshalPromiseToCallbacks)(meetingImpl.connectTranscriberRecognizer(this.privRecognizer), cb, err);
13009
13133
  }
13010
13134
  /**
13011
13135
  * Starts meeting transcription, until stopTranscribingAsync() is called.
@@ -13036,7 +13160,7 @@
13036
13160
  */
13037
13161
  leaveMeetingAsync(cb, err) {
13038
13162
  this.privRecognizer.disconnectCallbacks();
13039
- Exports_js_2.marshalPromiseToCallbacks((async () => {
13163
+ (0, Exports_js_2.marshalPromiseToCallbacks)((async () => {
13040
13164
  return;
13041
13165
  })(), cb, err);
13042
13166
  }
@@ -13048,7 +13172,7 @@
13048
13172
  */
13049
13173
  close(cb, errorCb) {
13050
13174
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposedRecognizer);
13051
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, errorCb);
13175
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, errorCb);
13052
13176
  }
13053
13177
  /**
13054
13178
  * Disposes any resources held by the object.
@@ -13122,6 +13246,7 @@
13122
13246
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/sdk/Transcription/Exports.js"(exports) {
13123
13247
  "use strict";
13124
13248
  Object.defineProperty(exports, "__esModule", { value: true });
13249
+ exports.ConversationTranscriptionResult = exports.MeetingTranscriber = exports.MeetingTranscriptionCanceledEventArgs = exports.MeetingImpl = exports.Meeting = exports.ParticipantChangedReason = exports.User = exports.Participant = exports.ConversationTranscriber = exports.ConversationTranslator = exports.ConversationTranslationResult = exports.ConversationTranslationEventArgs = exports.ConversationTranslationCanceledEventArgs = exports.ConversationParticipantsChangedEventArgs = exports.ConversationExpirationEventArgs = exports.ConversationCommon = exports.ConversationImpl = exports.Conversation = void 0;
13125
13250
  var Conversation_js_1 = require_Conversation();
13126
13251
  Object.defineProperty(exports, "Conversation", { enumerable: true, get: function() {
13127
13252
  return Conversation_js_1.Conversation;
@@ -13217,6 +13342,7 @@
13217
13342
  this.privDisposed = false;
13218
13343
  this.privSynthesizing = false;
13219
13344
  this.synthesisRequestQueue = new Exports_js_2.Queue();
13345
+ this.tokenCredential = speechConfig.tokenCredential;
13220
13346
  }
13221
13347
  /**
13222
13348
  * Gets the authorization token used to communicate with the service.
@@ -13300,7 +13426,7 @@
13300
13426
  ["en-PH"]: "en-PH-JamesNeural",
13301
13427
  ["en-SG"]: "en-SG-LunaNeural",
13302
13428
  ["en-TZ"]: "en-TZ-ElimuNeural",
13303
- ["en-US"]: "en-US-JennyNeural",
13429
+ ["en-US"]: "en-US-AvaMultilingualNeural",
13304
13430
  ["en-ZA"]: "en-ZA-LeahNeural",
13305
13431
  ["es-AR"]: "es-AR-ElenaNeural",
13306
13432
  ["es-BO"]: "es-BO-MarceloNeural",
@@ -13448,7 +13574,21 @@
13448
13574
  }
13449
13575
  const synthesizerConfig = this.createSynthesizerConfig(new Exports_js_1.SpeechServiceConfig(new Exports_js_1.Context(new Exports_js_1.OS(osPlatform, osName, osVersion))));
13450
13576
  const subscriptionKey = this.privProperties.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_Key, void 0);
13451
- const authentication = subscriptionKey && subscriptionKey !== "" ? new Exports_js_1.CognitiveSubscriptionKeyAuthentication(subscriptionKey) : new Exports_js_1.CognitiveTokenAuthentication(() => {
13577
+ const authentication = subscriptionKey && subscriptionKey !== "" ? new Exports_js_1.CognitiveSubscriptionKeyAuthentication(subscriptionKey) : this.tokenCredential ? new Exports_js_1.CognitiveTokenAuthentication(async () => {
13578
+ try {
13579
+ const tokenResponse = await this.tokenCredential.getToken("https://cognitiveservices.azure.com/.default");
13580
+ return tokenResponse?.token ?? "";
13581
+ } catch (err) {
13582
+ throw err;
13583
+ }
13584
+ }, async () => {
13585
+ try {
13586
+ const tokenResponse = await this.tokenCredential.getToken("https://cognitiveservices.azure.com/.default");
13587
+ return tokenResponse?.token ?? "";
13588
+ } catch (err) {
13589
+ throw err;
13590
+ }
13591
+ }) : new Exports_js_1.CognitiveTokenAuthentication(() => {
13452
13592
  const authorizationToken = this.privProperties.getProperty(Exports_js_3.PropertyId.SpeechServiceAuthorization_Token, void 0);
13453
13593
  return Promise.resolve(authorizationToken);
13454
13594
  }, () => {
@@ -13571,7 +13711,7 @@
13571
13711
  */
13572
13712
  close(cb, err) {
13573
13713
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposed);
13574
- Exports_js_2.marshalPromiseToCallbacks(this.dispose(true), cb, err);
13714
+ (0, Exports_js_2.marshalPromiseToCallbacks)(this.dispose(true), cb, err);
13575
13715
  }
13576
13716
  /**
13577
13717
  * @Internal
@@ -13599,7 +13739,7 @@
13599
13739
  speakImpl(text, IsSsml, cb, err, dataStream) {
13600
13740
  try {
13601
13741
  Contracts_js_1.Contracts.throwIfDisposed(this.privDisposed);
13602
- const requestId = Exports_js_2.createNoDashGuid();
13742
+ const requestId = (0, Exports_js_2.createNoDashGuid)();
13603
13743
  let audioDestination;
13604
13744
  if (dataStream instanceof Exports_js_3.PushAudioOutputStreamCallback) {
13605
13745
  audioDestination = new AudioOutputStream_js_1.PushAudioOutputStreamImpl(dataStream);
@@ -13645,7 +13785,7 @@
13645
13785
  }
13646
13786
  }
13647
13787
  async getVoices(locale) {
13648
- const requestId = Exports_js_2.createNoDashGuid();
13788
+ const requestId = (0, Exports_js_2.createNoDashGuid)();
13649
13789
  const response = await this.privRestAdapter.getVoicesList(requestId);
13650
13790
  if (response.ok && Array.isArray(response.json)) {
13651
13791
  let json = response.json;
@@ -14062,16 +14202,22 @@
14062
14202
  })(SynthesisVoiceGender = exports.SynthesisVoiceGender || (exports.SynthesisVoiceGender = {}));
14063
14203
  var SynthesisVoiceType;
14064
14204
  (function(SynthesisVoiceType2) {
14205
+ SynthesisVoiceType2[SynthesisVoiceType2["Unknown"] = 0] = "Unknown";
14065
14206
  SynthesisVoiceType2[SynthesisVoiceType2["OnlineNeural"] = 1] = "OnlineNeural";
14066
14207
  SynthesisVoiceType2[SynthesisVoiceType2["OnlineStandard"] = 2] = "OnlineStandard";
14067
14208
  SynthesisVoiceType2[SynthesisVoiceType2["OfflineNeural"] = 3] = "OfflineNeural";
14068
14209
  SynthesisVoiceType2[SynthesisVoiceType2["OfflineStandard"] = 4] = "OfflineStandard";
14210
+ SynthesisVoiceType2[SynthesisVoiceType2["OnlineNeuralHD"] = 5] = "OnlineNeuralHD";
14069
14211
  })(SynthesisVoiceType = exports.SynthesisVoiceType || (exports.SynthesisVoiceType = {}));
14070
14212
  var GENDER_LOOKUP = {
14071
14213
  [SynthesisVoiceGender[SynthesisVoiceGender.Neutral]]: SynthesisVoiceGender.Neutral,
14072
14214
  [SynthesisVoiceGender[SynthesisVoiceGender.Male]]: SynthesisVoiceGender.Male,
14073
14215
  [SynthesisVoiceGender[SynthesisVoiceGender.Female]]: SynthesisVoiceGender.Female
14074
14216
  };
14217
+ var VOICE_TYPE_LOOKUP = {
14218
+ Neural: SynthesisVoiceType.OnlineNeural,
14219
+ NeuralHD: SynthesisVoiceType.OnlineNeuralHD
14220
+ };
14075
14221
  var VoiceInfo = class {
14076
14222
  constructor(json) {
14077
14223
  this.privStyleList = [];
@@ -14082,7 +14228,7 @@
14082
14228
  this.privLocaleName = json.LocaleName;
14083
14229
  this.privDisplayName = json.DisplayName;
14084
14230
  this.privLocalName = json.LocalName;
14085
- this.privVoiceType = json.VoiceType.endsWith("Standard") ? SynthesisVoiceType.OnlineStandard : SynthesisVoiceType.OnlineNeural;
14231
+ this.privVoiceType = VOICE_TYPE_LOOKUP[json.VoiceType] || SynthesisVoiceType.Unknown;
14086
14232
  this.privGender = GENDER_LOOKUP[json.Gender] || SynthesisVoiceGender.Unknown;
14087
14233
  if (!!json.StyleList && Array.isArray(json.StyleList)) {
14088
14234
  for (const style of json.StyleList) {
@@ -14101,6 +14247,9 @@
14101
14247
  if (Array.isArray(json.RolePlayList)) {
14102
14248
  this.privRolePlayList = [...json.RolePlayList];
14103
14249
  }
14250
+ if (json.VoiceTag) {
14251
+ this.privVoiceTag = json.VoiceTag;
14252
+ }
14104
14253
  }
14105
14254
  }
14106
14255
  get name() {
@@ -14148,6 +14297,9 @@
14148
14297
  get rolePlayList() {
14149
14298
  return this.privRolePlayList;
14150
14299
  }
14300
+ get voiceTag() {
14301
+ return this.privVoiceTag;
14302
+ }
14151
14303
  };
14152
14304
  exports.VoiceInfo = VoiceInfo;
14153
14305
  }
@@ -14170,7 +14322,9 @@
14170
14322
  [AudioStreamFormat_js_1.AudioFormatTag.OGG_OPUS]: "audio/ogg",
14171
14323
  [AudioStreamFormat_js_1.AudioFormatTag.WEBM_OPUS]: "audio/webm; codecs=opus",
14172
14324
  [AudioStreamFormat_js_1.AudioFormatTag.ALaw]: "audio/x-wav",
14173
- [AudioStreamFormat_js_1.AudioFormatTag.FLAC]: "audio/flac"
14325
+ [AudioStreamFormat_js_1.AudioFormatTag.FLAC]: "audio/flac",
14326
+ [AudioStreamFormat_js_1.AudioFormatTag.AMR_WB]: "audio/amr-wb",
14327
+ [AudioStreamFormat_js_1.AudioFormatTag.G722]: "audio/G722"
14174
14328
  };
14175
14329
  var SpeakerAudioDestination = class {
14176
14330
  constructor(audioDestinationId) {
@@ -14178,7 +14332,7 @@
14178
14332
  this.privAppendingToBuffer = false;
14179
14333
  this.privMediaSourceOpened = false;
14180
14334
  this.privBytesReceived = 0;
14181
- this.privId = audioDestinationId ? audioDestinationId : Exports_js_1.createNoDashGuid();
14335
+ this.privId = audioDestinationId ? audioDestinationId : (0, Exports_js_1.createNoDashGuid)();
14182
14336
  this.privIsPaused = false;
14183
14337
  this.privIsClosed = false;
14184
14338
  }
@@ -14802,6 +14956,7 @@
14802
14956
  */
14803
14957
  constructor(character, style, videoFormat) {
14804
14958
  this.privCustomized = false;
14959
+ this.privUseBuiltInVoice = false;
14805
14960
  Contracts_js_1.Contracts.throwIfNullOrWhitespace(character, "character");
14806
14961
  this.character = character;
14807
14962
  this.style = style;
@@ -14823,17 +14978,60 @@
14823
14978
  this.privCustomized = value;
14824
14979
  }
14825
14980
  /**
14826
- * Sets the background color.
14981
+ * Indicates whether to use built-in voice for custom avatar.
14982
+ */
14983
+ get useBuiltInVoice() {
14984
+ return this.privUseBuiltInVoice;
14985
+ }
14986
+ /**
14987
+ * Sets whether to use built-in voice for custom avatar.
14988
+ */
14989
+ set useBuiltInVoice(value) {
14990
+ this.privUseBuiltInVoice = value;
14991
+ }
14992
+ /**
14993
+ * Gets the background color.
14827
14994
  */
14828
14995
  get backgroundColor() {
14829
14996
  return this.privBackgroundColor;
14830
14997
  }
14831
14998
  /**
14832
- * Gets the background color.
14999
+ * Sets the background color.
14833
15000
  */
14834
15001
  set backgroundColor(value) {
14835
15002
  this.privBackgroundColor = value;
14836
15003
  }
15004
+ /**
15005
+ * Gets the background image.
15006
+ */
15007
+ get backgroundImage() {
15008
+ return this.privBackgroundImage;
15009
+ }
15010
+ /**
15011
+ * Sets the background image.
15012
+ * @param {URL} value - The background image.
15013
+ */
15014
+ set backgroundImage(value) {
15015
+ this.privBackgroundImage = value;
15016
+ }
15017
+ /**
15018
+ * Gets the remote ICE servers.
15019
+ * @remarks This method is designed to be used internally in the SDK.
15020
+ * @returns {RTCIceServer[]} The remote ICE servers.
15021
+ */
15022
+ get remoteIceServers() {
15023
+ return this.privRemoteIceServers;
15024
+ }
15025
+ /**
15026
+ * Sets the remote ICE servers.
15027
+ * @remarks Normally, the ICE servers are gathered from the PeerConnection,
15028
+ * set this property to override the ICE servers. E.g., the ICE servers are
15029
+ * different in client and server side.
15030
+ * @param {RTCIceServer[]} value - The remote ICE servers.
15031
+ */
15032
+ set remoteIceServers(value) {
15033
+ this.privRemoteIceServers = value;
15034
+ }
14837
15035
  };
14838
15036
  exports.AvatarConfig = AvatarConfig;
14839
15037
  }
@@ -14905,9 +15103,9 @@
14905
15103
  var QueryParameterNames_js_1 = require_QueryParameterNames();
14906
15104
  var SpeechSynthesisConnectionFactory = class {
14907
15105
  constructor() {
14908
- this.synthesisUri = "/cognitiveservices/websocket/v1";
15106
+ this.synthesisUri = "/tts/cognitiveservices/websocket/v1";
14909
15107
  }
14910
- create(config, authInfo, connectionId) {
15108
+ async create(config, authInfo, connectionId) {
14911
15109
  let endpoint = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Endpoint, void 0);
14912
15110
  const region = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Region, void 0);
14913
15111
  const hostSuffix = ConnectionFactoryBase_js_1.ConnectionFactoryBase.getHostSuffix(region);
@@ -14930,6 +15128,14 @@
14930
15128
  queryParams[QueryParameterNames_js_1.QueryParameterNames.EnableAvatar] = "true";
14931
15129
  }
14932
15130
  }
15131
+ if (!!endpoint) {
15132
+ const endpointUrl = new URL(endpoint);
15133
+ const pathName = endpointUrl.pathname;
15134
+ if (pathName === "" || pathName === "/") {
15135
+ endpointUrl.pathname = this.synthesisUri;
15136
+ endpoint = await ConnectionFactoryBase_js_1.ConnectionFactoryBase.getRedirectUrlFromEndpoint(endpointUrl.toString());
15137
+ }
15138
+ }
14933
15139
  if (!endpoint) {
14934
15140
  endpoint = host + this.synthesisUri;
14935
15141
  }
@@ -15103,7 +15309,7 @@
15103
15309
  return config;
15104
15310
  }
15105
15311
  async speak(text, isSSML2) {
15106
- const requestId = Exports_js_2.createNoDashGuid();
15312
+ const requestId = (0, Exports_js_2.createNoDashGuid)();
15107
15313
  const deferredResult = new Exports_js_2.Deferred();
15108
15314
  this.synthesisRequestQueue.enqueue(new Synthesizer_js_1.SynthesisRequest(requestId, text, isSSML2, (e) => {
15109
15315
  deferredResult.resolve(e);
@@ -15247,6 +15453,9 @@
15247
15453
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/sdk/Exports.js"(exports) {
15248
15454
  "use strict";
15249
15455
  Object.defineProperty(exports, "__esModule", { value: true });
15456
+ exports.ConnectionEventArgs = exports.CancellationErrorCode = exports.CancellationDetails = exports.CancellationDetailsBase = exports.IntentRecognitionCanceledEventArgs = exports.TranslationRecognitionCanceledEventArgs = exports.NoMatchDetails = exports.NoMatchReason = exports.Translations = exports.TranslationRecognizer = exports.VoiceProfileType = exports.IntentRecognizer = exports.SpeechRecognizer = exports.Recognizer = exports.PropertyId = exports.PropertyCollection = exports.SpeechTranslationConfigImpl = exports.SpeechTranslationConfig = exports.SpeechConfigImpl = exports.SpeechConfig = exports.ResultReason = exports.TranslationSynthesisResult = exports.TranslationRecognitionResult = exports.TranslationSynthesisEventArgs = exports.TranslationRecognitionEventArgs = exports.SpeechRecognitionCanceledEventArgs = exports.MeetingTranscriptionEventArgs = exports.ConversationTranscriptionEventArgs = exports.SpeechRecognitionEventArgs = exports.LanguageUnderstandingModel = exports.IntentRecognitionResult = exports.SpeechRecognitionResult = exports.RecognitionResult = exports.IntentRecognitionEventArgs = exports.OutputFormat = exports.RecognitionEventArgs = exports.SessionEventArgs = exports.KeywordRecognitionModel = exports.PushAudioOutputStreamCallback = exports.PullAudioInputStreamCallback = exports.CancellationReason = exports.PushAudioOutputStream = exports.PullAudioOutputStream = exports.AudioOutputStream = exports.PushAudioInputStream = exports.PullAudioInputStream = exports.AudioInputStream = exports.AudioFormatTag = exports.AudioStreamFormat = exports.AudioConfig = void 0;
15457
+ exports.SpeechSynthesisEventArgs = exports.SpeechSynthesisResult = exports.SynthesisResult = exports.SpeechSynthesizer = exports.SpeechSynthesisOutputFormat = exports.Synthesizer = exports.User = exports.ParticipantChangedReason = exports.Participant = exports.MeetingTranscriber = exports.Meeting = exports.ConversationTranscriptionResult = exports.ConversationTranscriber = exports.ConversationTranslator = exports.ConversationTranslationResult = exports.ConversationTranslationEventArgs = exports.ConversationTranslationCanceledEventArgs = exports.ConversationParticipantsChangedEventArgs = exports.ConversationExpirationEventArgs = exports.Conversation = exports.SpeakerRecognitionCancellationDetails = exports.SpeakerRecognitionResultType = exports.SpeakerRecognitionResult = exports.SourceLanguageConfig = exports.AutoDetectSourceLanguageResult = exports.AutoDetectSourceLanguageConfig = exports.SpeakerVerificationModel = exports.SpeakerIdentificationModel = exports.SpeakerRecognizer = exports.VoiceProfileClient = exports.VoiceProfilePhraseResult = exports.VoiceProfileCancellationDetails = exports.VoiceProfileResult = exports.VoiceProfileEnrollmentCancellationDetails = exports.VoiceProfileEnrollmentResult = exports.VoiceProfile = exports.ConnectionMessage = exports.ConnectionMessageEventArgs = exports.BaseAudioPlayer = exports.ProfanityOption = exports.ServicePropertyChannel = exports.TurnStatusReceivedEventArgs = exports.ActivityReceivedEventArgs = exports.DialogServiceConnector = exports.CustomCommandsConfig = exports.BotFrameworkConfig = exports.DialogServiceConfig = exports.PhraseListGrammar = exports.Connection = exports.ServiceEventArgs = void 0;
15458
+ exports.LogLevel = exports.Diagnostics = exports.AvatarWebRTCConnectionResult = exports.Coordinate = exports.AvatarVideoFormat = exports.AvatarSynthesizer = exports.AvatarEventArgs = exports.AvatarConfig = exports.LanguageIdMode = exports.PronunciationAssessmentResult = exports.PronunciationAssessmentConfig = exports.PronunciationAssessmentGranularity = exports.PronunciationAssessmentGradingSystem = exports.MeetingTranscriptionCanceledEventArgs = exports.ConversationTranscriptionCanceledEventArgs = exports.SpeakerAudioDestination = exports.VoiceInfo = exports.SynthesisVoiceType = exports.SynthesisVoiceGender = exports.SynthesisVoicesResult = exports.SpeechSynthesisBoundaryType = exports.SpeechSynthesisVisemeEventArgs = exports.SpeechSynthesisBookmarkEventArgs = exports.SpeechSynthesisWordBoundaryEventArgs = void 0;
15250
15459
  var AudioConfig_js_1 = require_AudioConfig();
15251
15460
  Object.defineProperty(exports, "AudioConfig", { enumerable: true, get: function() {
15252
15461
  return AudioConfig_js_1.AudioConfig;
@@ -15642,6 +15851,12 @@
15642
15851
  return SynthesisVoicesResult_js_1.SynthesisVoicesResult;
15643
15852
  } });
15644
15853
  var VoiceInfo_js_1 = require_VoiceInfo();
15854
+ Object.defineProperty(exports, "SynthesisVoiceGender", { enumerable: true, get: function() {
15855
+ return VoiceInfo_js_1.SynthesisVoiceGender;
15856
+ } });
15857
+ Object.defineProperty(exports, "SynthesisVoiceType", { enumerable: true, get: function() {
15858
+ return VoiceInfo_js_1.SynthesisVoiceType;
15859
+ } });
15645
15860
  Object.defineProperty(exports, "VoiceInfo", { enumerable: true, get: function() {
15646
15861
  return VoiceInfo_js_1.VoiceInfo;
15647
15862
  } });
@@ -15792,8 +16007,7 @@
15792
16007
  throw new Error(`Unknown type: ${typeof arg}`);
15793
16008
  }
15794
16009
  });
15795
- if (!method)
15796
- method = "GET";
16010
+ if (!method) method = "GET";
15797
16011
  if (statusCodes.size === 0) {
15798
16012
  statusCodes.add(200);
15799
16013
  }
@@ -15822,8 +16036,7 @@
15822
16036
  this.arrayBuffer = res.arrayBuffer.bind(res);
15823
16037
  let buffer;
15824
16038
  const get = () => {
15825
- if (!buffer)
15826
- buffer = this.arrayBuffer();
16039
+ if (!buffer) buffer = this.arrayBuffer();
15827
16040
  return buffer;
15828
16041
  };
15829
16042
  Object.defineProperty(this, "responseBody", { get });
@@ -15836,8 +16049,7 @@
15836
16049
  var mkrequest = (statusCodes, method, encoding, headers, baseurl) => async (_url, body, _headers = {}) => {
15837
16050
  _url = baseurl + (_url || "");
15838
16051
  let parsed = new URL(_url);
15839
- if (!headers)
15840
- headers = {};
16052
+ if (!headers) headers = {};
15841
16053
  if (parsed.username) {
15842
16054
  headers.Authorization = "Basic " + btoa(parsed.username + ":" + parsed.password);
15843
16055
  parsed = new URL(parsed.protocol + "//" + parsed.host + parsed.pathname + parsed.search);
@@ -15860,14 +16072,10 @@
15860
16072
  if (!statusCodes.has(resp.status)) {
15861
16073
  throw new StatusError(resp);
15862
16074
  }
15863
- if (encoding === "json")
15864
- return resp.json();
15865
- else if (encoding === "buffer")
15866
- return resp.arrayBuffer();
15867
- else if (encoding === "string")
15868
- return resp.text();
15869
- else
15870
- return resp;
16075
+ if (encoding === "json") return resp.json();
16076
+ else if (encoding === "buffer") return resp.arrayBuffer();
16077
+ else if (encoding === "string") return resp.text();
16078
+ else return resp;
15871
16079
  };
15872
16080
  module.exports = core(mkrequest);
15873
16081
  }
@@ -15937,7 +16145,7 @@
15937
16145
  };
15938
16146
  };
15939
16147
  const send = (postData) => {
15940
- const sendRequest = bent_1.default(uri, requestCommand, this.privHeaders, 200, 201, 202, 204, 400, 401, 402, 403, 404);
16148
+ const sendRequest = (0, bent_1.default)(uri, requestCommand, this.privHeaders, 200, 201, 202, 204, 400, 401, 402, 403, 404);
15941
16149
  const params = this.queryParams(queryParams) === "" ? "" : `?${this.queryParams(queryParams)}`;
15942
16150
  sendRequest(params, postData).then(async (data) => {
15943
16151
  if (method === RestRequestType.Delete || data.statusCode === 204) {
@@ -16033,20 +16241,16 @@
16033
16241
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.browser/Exports.js"(exports) {
16034
16242
  "use strict";
16035
16243
  var __createBinding = exports && exports.__createBinding || (Object.create ? function(o, m, k, k2) {
16036
- if (k2 === void 0)
16037
- k2 = k;
16244
+ if (k2 === void 0) k2 = k;
16038
16245
  Object.defineProperty(o, k2, { enumerable: true, get: function() {
16039
16246
  return m[k];
16040
16247
  } });
16041
16248
  } : function(o, m, k, k2) {
16042
- if (k2 === void 0)
16043
- k2 = k;
16249
+ if (k2 === void 0) k2 = k;
16044
16250
  o[k2] = m[k];
16045
16251
  });
16046
16252
  var __exportStar = exports && exports.__exportStar || function(m, exports2) {
16047
- for (var p in m)
16048
- if (p !== "default" && !exports2.hasOwnProperty(p))
16049
- __createBinding(exports2, m, p);
16253
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports2, p)) __createBinding(exports2, m, p);
16050
16254
  };
16051
16255
  Object.defineProperty(exports, "__esModule", { value: true });
16052
16256
  __exportStar(require_ConsoleLoggingListener(), exports);
@@ -16095,7 +16299,7 @@
16095
16299
  headers[HeaderNames_js_1.HeaderNames.ConnectionId] = connectionId;
16096
16300
  config.parameters.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Url, endpoint);
16097
16301
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "false") === "true";
16098
- return new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
16302
+ return Promise.resolve(new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId));
16099
16303
  }
16100
16304
  getSpeechRegionFromIntentRegion(intentRegion) {
16101
16305
  switch (intentRegion) {
@@ -16190,7 +16394,7 @@
16190
16394
  headers[HeaderNames_js_1.HeaderNames.SpIDAuthKey] = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Key);
16191
16395
  config.parameters.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Url, endpoint);
16192
16396
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "false") === "true";
16193
- return new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
16397
+ return Promise.resolve(new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId));
16194
16398
  }
16195
16399
  scenarioToPath(mode) {
16196
16400
  switch (mode) {
@@ -16432,6 +16636,143 @@
16432
16636
  }
16433
16637
  });
16434
16638
 
16639
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/Segmentation.js
16640
+ var require_Segmentation = __commonJS({
16641
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/Segmentation.js"(exports) {
16642
+ "use strict";
16643
+ Object.defineProperty(exports, "__esModule", { value: true });
16644
+ exports.SegmentationMode = void 0;
16645
+ var SegmentationMode;
16646
+ (function(SegmentationMode2) {
16647
+ SegmentationMode2["Normal"] = "Normal";
16648
+ SegmentationMode2["Disabled"] = "Disabled";
16649
+ SegmentationMode2["Custom"] = "Custom";
16650
+ SegmentationMode2["Semantic"] = "Semantic";
16651
+ })(SegmentationMode = exports.SegmentationMode || (exports.SegmentationMode = {}));
16652
+ }
16653
+ });
16654
+
16655
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/Translation/OnSuccess.js
16656
+ var require_OnSuccess = __commonJS({
16657
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/Translation/OnSuccess.js"(exports) {
16658
+ "use strict";
16659
+ Object.defineProperty(exports, "__esModule", { value: true });
16660
+ exports.NextAction = void 0;
16661
+ var NextAction;
16662
+ (function(NextAction2) {
16663
+ NextAction2["None"] = "None";
16664
+ NextAction2["Synthesize"] = "Synthesize";
16665
+ })(NextAction = exports.NextAction || (exports.NextAction = {}));
16666
+ }
16667
+ });
16668
+
16669
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/Translation/InterimResults.js
16670
+ var require_InterimResults = __commonJS({
16671
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/Translation/InterimResults.js"(exports) {
16672
+ "use strict";
16673
+ Object.defineProperty(exports, "__esModule", { value: true });
16674
+ exports.Mode = void 0;
16675
+ var Mode;
16676
+ (function(Mode2) {
16677
+ Mode2["None"] = "None";
16678
+ Mode2["Always"] = "Always";
16679
+ })(Mode = exports.Mode || (exports.Mode = {}));
16680
+ }
16681
+ });
16682
+
16683
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/LanguageId/LanguageIdContext.js
16684
+ var require_LanguageIdContext = __commonJS({
16685
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/LanguageId/LanguageIdContext.js"(exports) {
16686
+ "use strict";
16687
+ Object.defineProperty(exports, "__esModule", { value: true });
16688
+ exports.LanguageIdDetectionPriority = exports.LanguageIdDetectionMode = void 0;
16689
+ var LanguageIdDetectionMode;
16690
+ (function(LanguageIdDetectionMode2) {
16691
+ LanguageIdDetectionMode2["DetectAtAudioStart"] = "DetectAtAudioStart";
16692
+ LanguageIdDetectionMode2["DetectContinuous"] = "DetectContinuous";
16693
+ LanguageIdDetectionMode2["DetectSegments"] = "DetectSegments";
16694
+ })(LanguageIdDetectionMode = exports.LanguageIdDetectionMode || (exports.LanguageIdDetectionMode = {}));
16695
+ var LanguageIdDetectionPriority;
16696
+ (function(LanguageIdDetectionPriority2) {
16697
+ LanguageIdDetectionPriority2["Auto"] = "Auto";
16698
+ LanguageIdDetectionPriority2["PrioritizeLatency"] = "PrioritizeLatency";
16699
+ LanguageIdDetectionPriority2["PrioritizeAccuracy"] = "PrioritizeAccuracy";
16700
+ })(LanguageIdDetectionPriority = exports.LanguageIdDetectionPriority || (exports.LanguageIdDetectionPriority = {}));
16701
+ }
16702
+ });
16703
+
16704
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/LanguageId/OnSuccess.js
16705
+ var require_OnSuccess2 = __commonJS({
16706
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/LanguageId/OnSuccess.js"(exports) {
16707
+ "use strict";
16708
+ Object.defineProperty(exports, "__esModule", { value: true });
16709
+ exports.NextAction = void 0;
16710
+ var NextAction;
16711
+ (function(NextAction2) {
16712
+ NextAction2["Recognize"] = "Recognize";
16713
+ NextAction2["None"] = "None";
16714
+ })(NextAction = exports.NextAction || (exports.NextAction = {}));
16715
+ }
16716
+ });
16717
+
16718
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/LanguageId/OnUnknown.js
16719
+ var require_OnUnknown = __commonJS({
16720
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/LanguageId/OnUnknown.js"(exports) {
16721
+ "use strict";
16722
+ Object.defineProperty(exports, "__esModule", { value: true });
16723
+ exports.OnUnknownAction = void 0;
16724
+ var OnUnknownAction;
16725
+ (function(OnUnknownAction2) {
16726
+ OnUnknownAction2["RecognizeWithDefaultLanguage"] = "RecognizeWithDefaultLanguage";
16727
+ OnUnknownAction2["None"] = "None";
16728
+ })(OnUnknownAction = exports.OnUnknownAction || (exports.OnUnknownAction = {}));
16729
+ }
16730
+ });
16731
+
16732
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseOutput/InterimResults.js
16733
+ var require_InterimResults2 = __commonJS({
16734
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseOutput/InterimResults.js"(exports) {
16735
+ "use strict";
16736
+ Object.defineProperty(exports, "__esModule", { value: true });
16737
+ exports.ResultType = void 0;
16738
+ var ResultType;
16739
+ (function(ResultType2) {
16740
+ ResultType2["Auto"] = "Auto";
16741
+ ResultType2["StableFragment"] = "StableFragment";
16742
+ ResultType2["Hypothesis"] = "Hypothesis";
16743
+ ResultType2["None"] = "None";
16744
+ })(ResultType = exports.ResultType || (exports.ResultType = {}));
16745
+ }
16746
+ });
16747
+
16748
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseOutput/PhraseResults.js
16749
+ var require_PhraseResults = __commonJS({
16750
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseOutput/PhraseResults.js"(exports) {
16751
+ "use strict";
16752
+ Object.defineProperty(exports, "__esModule", { value: true });
16753
+ exports.PhraseResultOutputType = void 0;
16754
+ var PhraseResultOutputType;
16755
+ (function(PhraseResultOutputType2) {
16756
+ PhraseResultOutputType2["Always"] = "Always";
16757
+ PhraseResultOutputType2["None"] = "None";
16758
+ })(PhraseResultOutputType = exports.PhraseResultOutputType || (exports.PhraseResultOutputType = {}));
16759
+ }
16760
+ });
16761
+
16762
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/OnSuccess.js
16763
+ var require_OnSuccess3 = __commonJS({
16764
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/OnSuccess.js"(exports) {
16765
+ "use strict";
16766
+ Object.defineProperty(exports, "__esModule", { value: true });
16767
+ exports.NextAction = void 0;
16768
+ var NextAction;
16769
+ (function(NextAction2) {
16770
+ NextAction2["None"] = "None";
16771
+ NextAction2["Translate"] = "Translate";
16772
+ })(NextAction = exports.NextAction || (exports.NextAction = {}));
16773
+ }
16774
+ });
16775
+
16435
16776
  // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceRecognizerBase.js
16436
16777
  var require_ServiceRecognizerBase = __commonJS({
16437
16778
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceRecognizerBase.js"(exports) {
@@ -16443,6 +16784,16 @@
16443
16784
  var Exports_js_3 = require_Exports3();
16444
16785
  var Exports_js_4 = require_Exports7();
16445
16786
  var SpeechConnectionMessage_Internal_js_1 = require_SpeechConnectionMessage_Internal();
16787
+ var Segmentation_js_1 = require_Segmentation();
16788
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
16789
+ var OnSuccess_js_1 = require_OnSuccess();
16790
+ var InterimResults_js_1 = require_InterimResults();
16791
+ var LanguageIdContext_js_1 = require_LanguageIdContext();
16792
+ var OnSuccess_js_2 = require_OnSuccess2();
16793
+ var OnUnknown_js_1 = require_OnUnknown();
16794
+ var InterimResults_js_2 = require_InterimResults2();
16795
+ var PhraseResults_js_1 = require_PhraseResults();
16796
+ var OnSuccess_js_3 = require_OnSuccess3();
16446
16797
  var ServiceRecognizerBase = class _ServiceRecognizerBase {
16447
16798
  constructor(authentication, connectionFactory, audioSource, recognizerConfig, recognizer) {
16448
16799
  this.privConnectionConfigurationPromise = void 0;
@@ -16494,6 +16845,9 @@
16494
16845
  if (typeof window !== "undefined") {
16495
16846
  this.privSetTimeout = window.setTimeout.bind(window);
16496
16847
  }
16848
+ if (typeof globalThis !== "undefined") {
16849
+ this.privSetTimeout = globalThis.setTimeout.bind(globalThis);
16850
+ }
16497
16851
  }
16498
16852
  this.connectionEvents.attach((connectionEvent) => {
16499
16853
  if (connectionEvent.name === "ConnectionClosedEvent") {
@@ -16504,95 +16858,134 @@
16504
16858
  }
16505
16859
  });
16506
16860
  if (this.privEnableSpeakerId) {
16507
- this.privDiarizationSessionId = Exports_js_2.createNoDashGuid();
16861
+ this.privDiarizationSessionId = (0, Exports_js_2.createNoDashGuid)();
16508
16862
  }
16509
- this.setLanguageIdJson();
16510
- this.setOutputDetailLevelJson();
16511
16863
  }
16512
16864
  setTranslationJson() {
16513
16865
  const targetLanguages = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationToLanguages, void 0);
16514
16866
  if (targetLanguages !== void 0) {
16515
16867
  const languages = targetLanguages.split(",");
16516
16868
  const translationVoice = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationVoice, void 0);
16517
- const action = translationVoice !== void 0 ? "Synthesize" : "None";
16518
- this.privSpeechContext.setSection("translation", {
16869
+ const categoryId = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationCategoryId, void 0);
16870
+ const action = translationVoice !== void 0 ? OnSuccess_js_1.NextAction.Synthesize : OnSuccess_js_1.NextAction.None;
16871
+ this.privSpeechContext.getContext().translation = {
16872
+ onPassthrough: { action },
16519
16873
  onSuccess: { action },
16520
- output: { interimResults: { mode: "Always" } },
16874
+ output: {
16875
+ includePassThroughResults: true,
16876
+ interimResults: { mode: InterimResults_js_1.Mode.Always }
16877
+ },
16521
16878
  targetLanguages: languages
16522
- });
16879
+ };
16880
+ if (categoryId !== void 0) {
16881
+ this.privSpeechContext.getContext().translation.category = categoryId;
16882
+ }
16523
16883
  if (translationVoice !== void 0) {
16524
16884
  const languageToVoiceMap = {};
16525
16885
  for (const lang of languages) {
16526
16886
  languageToVoiceMap[lang] = translationVoice;
16527
16887
  }
16528
- this.privSpeechContext.setSection("synthesis", {
16888
+ this.privSpeechContext.getContext().synthesis = {
16529
16889
  defaultVoices: languageToVoiceMap
16530
- });
16890
+ };
16531
16891
  }
16892
+ const phraseDetection = this.privSpeechContext.getContext().phraseDetection || {};
16893
+ phraseDetection.onSuccess = { action: OnSuccess_js_3.NextAction.Translate };
16894
+ phraseDetection.onInterim = { action: OnSuccess_js_3.NextAction.Translate };
16895
+ this.privSpeechContext.getContext().phraseDetection = phraseDetection;
16532
16896
  }
16533
16897
  }
16534
16898
  setSpeechSegmentationTimeoutJson() {
16535
- const speechSegmentationTimeout = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.Speech_SegmentationSilenceTimeoutMs, void 0);
16536
- if (speechSegmentationTimeout !== void 0) {
16537
- const mode = this.recognitionMode === Exports_js_4.RecognitionMode.Conversation ? "CONVERSATION" : this.recognitionMode === Exports_js_4.RecognitionMode.Dictation ? "DICTATION" : "INTERACTIVE";
16538
- const segmentationSilenceTimeoutMs = parseInt(speechSegmentationTimeout, 10);
16539
- const phraseDetection = this.privSpeechContext.getSection("phraseDetection");
16540
- phraseDetection.mode = mode;
16541
- phraseDetection[mode] = {
16542
- segmentation: {
16543
- mode: "Custom",
16544
- segmentationSilenceTimeoutMs
16545
- }
16546
- };
16547
- this.privSpeechContext.setSection("phraseDetection", phraseDetection);
16899
+ const speechSegmentationSilenceTimeoutMs = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.Speech_SegmentationSilenceTimeoutMs, void 0);
16900
+ const speechSegmentationMaximumTimeMs = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.Speech_SegmentationMaximumTimeMs, void 0);
16901
+ const speechSegmentationStrategy = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.Speech_SegmentationStrategy, void 0);
16902
+ const segmentation = {
16903
+ mode: Segmentation_js_1.SegmentationMode.Normal
16904
+ };
16905
+ let configuredSegment = false;
16906
+ if (speechSegmentationStrategy !== void 0) {
16907
+ configuredSegment = true;
16908
+ let segMode = Segmentation_js_1.SegmentationMode.Normal;
16909
+ switch (speechSegmentationStrategy.toLowerCase()) {
16910
+ case "default":
16911
+ break;
16912
+ case "time":
16913
+ segMode = Segmentation_js_1.SegmentationMode.Custom;
16914
+ break;
16915
+ case "semantic":
16916
+ segMode = Segmentation_js_1.SegmentationMode.Semantic;
16917
+ break;
16918
+ }
16919
+ segmentation.mode = segMode;
16920
+ }
16921
+ if (speechSegmentationSilenceTimeoutMs !== void 0) {
16922
+ configuredSegment = true;
16923
+ const segmentationSilenceTimeoutMs = parseInt(speechSegmentationSilenceTimeoutMs, 10);
16924
+ segmentation.mode = Segmentation_js_1.SegmentationMode.Custom;
16925
+ segmentation.segmentationSilenceTimeoutMs = segmentationSilenceTimeoutMs;
16926
+ }
16927
+ if (speechSegmentationMaximumTimeMs !== void 0) {
16928
+ configuredSegment = true;
16929
+ const segmentationMaximumTimeMs = parseInt(speechSegmentationMaximumTimeMs, 10);
16930
+ segmentation.mode = Segmentation_js_1.SegmentationMode.Custom;
16931
+ segmentation.segmentationForcedTimeoutMs = segmentationMaximumTimeMs;
16932
+ }
16933
+ if (configuredSegment) {
16934
+ const phraseDetection = this.privSpeechContext.getContext().phraseDetection || {};
16935
+ phraseDetection.mode = this.recognitionMode;
16936
+ switch (this.recognitionMode) {
16937
+ case PhraseDetectionContext_js_1.RecognitionMode.Conversation:
16938
+ phraseDetection.conversation = phraseDetection.conversation ?? { segmentation: {} };
16939
+ phraseDetection.conversation.segmentation = segmentation;
16940
+ break;
16941
+ case PhraseDetectionContext_js_1.RecognitionMode.Interactive:
16942
+ phraseDetection.interactive = phraseDetection.interactive ?? { segmentation: {} };
16943
+ phraseDetection.interactive.segmentation = segmentation;
16944
+ break;
16945
+ case PhraseDetectionContext_js_1.RecognitionMode.Dictation:
16946
+ phraseDetection.dictation = phraseDetection.dictation ?? {};
16947
+ phraseDetection.dictation.segmentation = segmentation;
16948
+ break;
16949
+ }
16950
+ this.privSpeechContext.getContext().phraseDetection = phraseDetection;
16548
16951
  }
16549
16952
  }
16550
16953
  setLanguageIdJson() {
16551
- const phraseDetection = this.privSpeechContext.getSection("phraseDetection");
16954
+ const phraseDetection = this.privSpeechContext.getContext().phraseDetection || {};
16552
16955
  if (this.privRecognizerConfig.autoDetectSourceLanguages !== void 0) {
16553
16956
  const sourceLanguages = this.privRecognizerConfig.autoDetectSourceLanguages.split(",");
16957
+ if (sourceLanguages.length === 1 && sourceLanguages[0] === Exports_js_4.AutoDetectSourceLanguagesOpenRangeOptionName) {
16958
+ sourceLanguages[0] = "UND";
16959
+ }
16554
16960
  let speechContextLidMode;
16555
16961
  if (this.privRecognizerConfig.languageIdMode === "Continuous") {
16556
- speechContextLidMode = "DetectContinuous";
16962
+ speechContextLidMode = LanguageIdContext_js_1.LanguageIdDetectionMode.DetectContinuous;
16557
16963
  } else {
16558
- speechContextLidMode = "DetectAtAudioStart";
16964
+ speechContextLidMode = LanguageIdContext_js_1.LanguageIdDetectionMode.DetectAtAudioStart;
16559
16965
  }
16560
- this.privSpeechContext.setSection("languageId", {
16561
- Priority: "PrioritizeLatency",
16966
+ this.privSpeechContext.getContext().languageId = {
16562
16967
  languages: sourceLanguages,
16563
16968
  mode: speechContextLidMode,
16564
- onSuccess: { action: "Recognize" },
16565
- onUnknown: { action: "None" }
16566
- });
16567
- this.privSpeechContext.setSection("phraseOutput", {
16969
+ onSuccess: { action: OnSuccess_js_2.NextAction.Recognize },
16970
+ onUnknown: { action: OnUnknown_js_1.OnUnknownAction.None },
16971
+ priority: LanguageIdContext_js_1.LanguageIdDetectionPriority.PrioritizeLatency
16972
+ };
16973
+ this.privSpeechContext.getContext().phraseOutput = {
16568
16974
  interimResults: {
16569
- resultType: "Auto"
16975
+ resultType: InterimResults_js_2.ResultType.Auto
16570
16976
  },
16571
16977
  phraseResults: {
16572
- resultType: "Always"
16978
+ resultType: PhraseResults_js_1.PhraseResultOutputType.Always
16573
16979
  }
16574
- });
16980
+ };
16575
16981
  const customModels = this.privRecognizerConfig.sourceLanguageModels;
16576
16982
  if (customModels !== void 0) {
16577
16983
  phraseDetection.customModels = customModels;
16578
- phraseDetection.onInterim = { action: "None" };
16579
- phraseDetection.onSuccess = { action: "None" };
16984
+ phraseDetection.onInterim = { action: OnSuccess_js_3.NextAction.None };
16985
+ phraseDetection.onSuccess = { action: OnSuccess_js_3.NextAction.None };
16580
16986
  }
16581
16987
  }
16582
- const targetLanguages = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationToLanguages, void 0);
16583
- if (targetLanguages !== void 0) {
16584
- phraseDetection.onInterim = { action: "Translate" };
16585
- phraseDetection.onSuccess = { action: "Translate" };
16586
- this.privSpeechContext.setSection("phraseOutput", {
16587
- interimResults: {
16588
- resultType: "None"
16589
- },
16590
- phraseResults: {
16591
- resultType: "None"
16592
- }
16593
- });
16594
- }
16595
- this.privSpeechContext.setSection("phraseDetection", phraseDetection);
16988
+ this.privSpeechContext.getContext().phraseDetection = phraseDetection;
16596
16989
  }
16597
16990
  setOutputDetailLevelJson() {
16598
16991
  if (this.privEnableSpeakerId) {
@@ -16636,9 +17029,9 @@
16636
17029
  }
16637
17030
  async dispose(reason) {
16638
17031
  this.privIsDisposed = true;
16639
- if (this.privConnectionConfigurationPromise !== void 0) {
17032
+ if (this.privConnectionPromise !== void 0) {
16640
17033
  try {
16641
- const connection = await this.privConnectionConfigurationPromise;
17034
+ const connection = await this.privConnectionPromise;
16642
17035
  await connection.dispose(reason);
16643
17036
  } catch (error) {
16644
17037
  return;
@@ -16661,8 +17054,18 @@
16661
17054
  }
16662
17055
  this.privConnectionConfigurationPromise = void 0;
16663
17056
  this.privRecognizerConfig.recognitionMode = recoMode;
16664
- this.setSpeechSegmentationTimeoutJson();
17057
+ if (this.privRecognizerConfig.recognitionEndpointVersion === "2") {
17058
+ const phraseDetection = this.privSpeechContext.getContext().phraseDetection || {};
17059
+ phraseDetection.mode = recoMode;
17060
+ this.privSpeechContext.getContext().phraseDetection = phraseDetection;
17061
+ }
17062
+ this.setLanguageIdJson();
16665
17063
  this.setTranslationJson();
17064
+ if (this.privRecognizerConfig.autoDetectSourceLanguages !== void 0 && this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationToLanguages, void 0) !== void 0) {
17065
+ this.setupTranslationWithLanguageId();
17066
+ }
17067
+ this.setSpeechSegmentationTimeoutJson();
17068
+ this.setOutputDetailLevelJson();
16666
17069
  this.privSuccessCallback = successCallback;
16667
17070
  this.privErrorCallback = errorCallBack;
16668
17071
  this.privRequestSession.startNewRecognition();
@@ -16810,7 +17213,7 @@
16810
17213
  this.privRequestSession.onServiceTurnStartResponse();
16811
17214
  break;
16812
17215
  case "speech.startdetected":
16813
- const speechStartDetected = Exports_js_4.SpeechDetected.fromJSON(connectionMessage.textBody);
17216
+ const speechStartDetected = Exports_js_4.SpeechDetected.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
16814
17217
  const speechStartEventArgs = new Exports_js_3.RecognitionEventArgs(speechStartDetected.Offset, this.privRequestSession.sessionId);
16815
17218
  if (!!this.privRecognizer.speechStartDetected) {
16816
17219
  this.privRecognizer.speechStartDetected(this.privRecognizer, speechStartEventArgs);
@@ -16823,7 +17226,7 @@
16823
17226
  } else {
16824
17227
  json = "{ Offset: 0 }";
16825
17228
  }
16826
- const speechStopDetected = Exports_js_4.SpeechDetected.fromJSON(json);
17229
+ const speechStopDetected = Exports_js_4.SpeechDetected.fromJSON(json, this.privRequestSession.currentTurnAudioOffset);
16827
17230
  const speechStopEventArgs = new Exports_js_3.RecognitionEventArgs(speechStopDetected.Offset + this.privRequestSession.currentTurnAudioOffset, this.privRequestSession.sessionId);
16828
17231
  if (!!this.privRecognizer.speechEndDetected) {
16829
17232
  this.privRecognizer.speechEndDetected(this.privRecognizer, speechStopEventArgs);
@@ -16862,7 +17265,7 @@
16862
17265
  }
16863
17266
  updateSpeakerDiarizationAudioOffset() {
16864
17267
  const bytesSent = this.privRequestSession.recognitionBytesSent;
16865
- const audioOffsetMs = bytesSent / this.privAverageBytesPerMs;
17268
+ const audioOffsetMs = this.privAverageBytesPerMs !== 0 ? bytesSent / this.privAverageBytesPerMs : 0;
16866
17269
  this.privSpeechContext.setSpeakerDiarizationAudioOffsetMs(audioOffsetMs);
16867
17270
  }
16868
17271
  sendSpeechContext(connection, generateNewRequestId) {
@@ -16878,6 +17281,34 @@
16878
17281
  }
16879
17282
  return;
16880
17283
  }
17284
+ setupTranslationWithLanguageId() {
17285
+ const targetLanguages = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationToLanguages, void 0);
17286
+ const hasLanguageId = this.privRecognizerConfig.autoDetectSourceLanguages !== void 0;
17287
+ if (targetLanguages !== void 0 && hasLanguageId) {
17288
+ this.privSpeechContext.getContext().phraseOutput = {
17289
+ interimResults: {
17290
+ resultType: InterimResults_js_2.ResultType.None
17291
+ },
17292
+ phraseResults: {
17293
+ resultType: PhraseResults_js_1.PhraseResultOutputType.None
17294
+ }
17295
+ };
17296
+ const translationContext = this.privSpeechContext.getContext().translation;
17297
+ if (translationContext) {
17298
+ const customModels = this.privRecognizerConfig.sourceLanguageModels;
17299
+ if (customModels !== void 0 && customModels.length > 0) {
17300
+ const phraseDetection = this.privSpeechContext.getContext().phraseDetection || {};
17301
+ phraseDetection.customModels = customModels;
17302
+ this.privSpeechContext.getContext().phraseDetection = phraseDetection;
17303
+ }
17304
+ const translationVoice = this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_TranslationVoice, void 0);
17305
+ if (translationVoice !== void 0) {
17306
+ translationContext.onSuccess = { action: OnSuccess_js_1.NextAction.Synthesize };
17307
+ translationContext.onPassthrough = { action: OnSuccess_js_1.NextAction.Synthesize };
17308
+ }
17309
+ }
17310
+ }
17311
+ }
16881
17312
  noOp() {
16882
17313
  return;
16883
17314
  }
@@ -17015,17 +17446,18 @@
17015
17446
  }
17016
17447
  async retryableConnect() {
17017
17448
  let isUnAuthorized = false;
17018
- this.privAuthFetchEventId = Exports_js_2.createNoDashGuid();
17449
+ this.privAuthFetchEventId = (0, Exports_js_2.createNoDashGuid)();
17019
17450
  const sessionId = this.privRequestSession.sessionId;
17020
- this.privConnectionId = sessionId !== void 0 ? sessionId : Exports_js_2.createNoDashGuid();
17451
+ this.privConnectionId = sessionId !== void 0 ? sessionId : (0, Exports_js_2.createNoDashGuid)();
17021
17452
  this.privRequestSession.onPreConnectionStart(this.privAuthFetchEventId, this.privConnectionId);
17022
17453
  let lastStatusCode = 0;
17023
17454
  let lastReason = "";
17024
17455
  while (this.privRequestSession.numConnectionAttempts <= this.privRecognizerConfig.maxRetryCount) {
17456
+ this.privRequestSession.onRetryConnection();
17025
17457
  const authPromise = isUnAuthorized ? this.privAuthentication.fetchOnExpiry(this.privAuthFetchEventId) : this.privAuthentication.fetch(this.privAuthFetchEventId);
17026
17458
  const auth = await authPromise;
17027
17459
  await this.privRequestSession.onAuthCompleted(false);
17028
- const connection = this.privConnectionFactory.create(this.privRecognizerConfig, auth, this.privConnectionId);
17460
+ const connection = await this.privConnectionFactory.create(this.privRecognizerConfig, auth, this.privConnectionId);
17029
17461
  this.privRequestSession.listenForServiceTelemetry(connection.events);
17030
17462
  connection.events.attach((event) => {
17031
17463
  this.connectionEvents.onEvent(event);
@@ -17039,7 +17471,6 @@
17039
17471
  }
17040
17472
  lastStatusCode = response.statusCode;
17041
17473
  lastReason = response.reason;
17042
- this.privRequestSession.onRetryConnection();
17043
17474
  }
17044
17475
  await this.privRequestSession.onConnectionEstablishCompleted(lastStatusCode, lastReason);
17045
17476
  return Promise.reject(`Unable to contact server. StatusCode: ${lastStatusCode}, ${this.privRecognizerConfig.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_Endpoint)} Reason: ${lastReason}`);
@@ -17101,12 +17532,19 @@
17101
17532
  this.handleSpeechHypothesisMessage = (textBody) => this.handleSpeechHypothesis(textBody);
17102
17533
  }
17103
17534
  processTypeSpecificMessages(connectionMessage) {
17535
+ void connectionMessage;
17104
17536
  return;
17105
17537
  }
17106
17538
  handleRecognizedCallback(result, offset, sessionId) {
17539
+ void result;
17540
+ void offset;
17541
+ void sessionId;
17107
17542
  return;
17108
17543
  }
17109
17544
  handleRecognizingCallback(result, duration, sessionId) {
17545
+ void result;
17546
+ void duration;
17547
+ void sessionId;
17110
17548
  return;
17111
17549
  }
17112
17550
  async processSpeechMessages(connectionMessage) {
@@ -17131,42 +17569,41 @@
17131
17569
  return processed;
17132
17570
  }
17133
17571
  cancelRecognition(sessionId, requestId, cancellationReason, errorCode, error) {
17572
+ void sessionId;
17573
+ void requestId;
17574
+ void cancellationReason;
17575
+ void errorCode;
17576
+ void error;
17134
17577
  }
17135
17578
  async handleSpeechPhrase(textBody) {
17136
- const simple = Exports_js_2.SimpleSpeechPhrase.fromJSON(textBody);
17579
+ const simple = Exports_js_2.SimpleSpeechPhrase.fromJSON(textBody, this.privRequestSession.currentTurnAudioOffset);
17137
17580
  const resultReason = Exports_js_2.EnumTranslation.implTranslateRecognitionResult(simple.RecognitionStatus);
17138
17581
  let result;
17139
17582
  const resultProps = new Exports_js_1.PropertyCollection();
17140
17583
  resultProps.setProperty(Exports_js_1.PropertyId.SpeechServiceResponse_JsonResult, textBody);
17141
- const simpleOffset = simple.Offset + this.privRequestSession.currentTurnAudioOffset;
17142
- let offset = simpleOffset;
17143
- this.privRequestSession.onPhraseRecognized(this.privRequestSession.currentTurnAudioOffset + simple.Offset + simple.Duration);
17584
+ this.privRequestSession.onPhraseRecognized(simple.Offset + simple.Duration);
17144
17585
  if (Exports_js_1.ResultReason.Canceled === resultReason) {
17145
17586
  const cancelReason = Exports_js_2.EnumTranslation.implTranslateCancelResult(simple.RecognitionStatus);
17146
17587
  const cancellationErrorCode = Exports_js_2.EnumTranslation.implTranslateCancelErrorCode(simple.RecognitionStatus);
17147
17588
  await this.cancelRecognitionLocal(cancelReason, cancellationErrorCode, Exports_js_2.EnumTranslation.implTranslateErrorDetails(cancellationErrorCode));
17148
17589
  } else {
17149
- if (!(this.privRequestSession.isSpeechEnded && resultReason === Exports_js_1.ResultReason.NoMatch && simple.RecognitionStatus !== Exports_js_2.RecognitionStatus.InitialSilenceTimeout)) {
17590
+ if (simple.RecognitionStatus !== Exports_js_2.RecognitionStatus.EndOfDictation) {
17150
17591
  if (this.privRecognizerConfig.parameters.getProperty(Exports_js_2.OutputFormatPropertyName) === Exports_js_1.OutputFormat[Exports_js_1.OutputFormat.Simple]) {
17151
- result = new Exports_js_1.SpeechRecognitionResult(this.privRequestSession.requestId, resultReason, simple.DisplayText, simple.Duration, simpleOffset, simple.Language, simple.LanguageDetectionConfidence, simple.SpeakerId, void 0, textBody, resultProps);
17592
+ result = new Exports_js_1.SpeechRecognitionResult(this.privRequestSession.requestId, resultReason, simple.DisplayText, simple.Duration, simple.Offset, simple.Language, simple.LanguageDetectionConfidence, simple.SpeakerId, void 0, simple.asJson(), resultProps);
17152
17593
  } else {
17153
- const detailed = Exports_js_2.DetailedSpeechPhrase.fromJSON(textBody);
17154
- const totalOffset = detailed.Offset + this.privRequestSession.currentTurnAudioOffset;
17155
- const offsetCorrectedJson = detailed.getJsonWithCorrectedOffsets(totalOffset);
17156
- result = new Exports_js_1.SpeechRecognitionResult(this.privRequestSession.requestId, resultReason, detailed.Text, detailed.Duration, totalOffset, detailed.Language, detailed.LanguageDetectionConfidence, detailed.SpeakerId, void 0, offsetCorrectedJson, resultProps);
17157
- offset = result.offset;
17594
+ const detailed = Exports_js_2.DetailedSpeechPhrase.fromJSON(textBody, this.privRequestSession.currentTurnAudioOffset);
17595
+ result = new Exports_js_1.SpeechRecognitionResult(this.privRequestSession.requestId, resultReason, detailed.Text, detailed.Duration, detailed.Offset, detailed.Language, detailed.LanguageDetectionConfidence, detailed.SpeakerId, void 0, detailed.asJson(), resultProps);
17158
17596
  }
17159
- this.handleRecognizedCallback(result, offset, this.privRequestSession.sessionId);
17597
+ this.handleRecognizedCallback(result, result.offset, this.privRequestSession.sessionId);
17160
17598
  }
17161
17599
  }
17162
17600
  }
17163
17601
  handleSpeechHypothesis(textBody) {
17164
- const hypothesis = Exports_js_2.SpeechHypothesis.fromJSON(textBody);
17165
- const offset = hypothesis.Offset + this.privRequestSession.currentTurnAudioOffset;
17602
+ const hypothesis = Exports_js_2.SpeechHypothesis.fromJSON(textBody, this.privRequestSession.currentTurnAudioOffset);
17166
17603
  const resultProps = new Exports_js_1.PropertyCollection();
17167
17604
  resultProps.setProperty(Exports_js_1.PropertyId.SpeechServiceResponse_JsonResult, textBody);
17168
- const result = new Exports_js_1.SpeechRecognitionResult(this.privRequestSession.requestId, Exports_js_1.ResultReason.RecognizingSpeech, hypothesis.Text, hypothesis.Duration, offset, hypothesis.Language, hypothesis.LanguageDetectionConfidence, hypothesis.SpeakerId, void 0, textBody, resultProps);
17169
- this.privRequestSession.onHypothesis(offset);
17605
+ const result = new Exports_js_1.SpeechRecognitionResult(this.privRequestSession.requestId, Exports_js_1.ResultReason.RecognizingSpeech, hypothesis.Text, hypothesis.Duration, hypothesis.Offset, hypothesis.Language, hypothesis.LanguageDetectionConfidence, hypothesis.SpeakerId, void 0, hypothesis.asJson(), resultProps);
17606
+ this.privRequestSession.onHypothesis(hypothesis.Offset);
17170
17607
  this.handleRecognizingCallback(result, hypothesis.Duration, this.privRequestSession.sessionId);
17171
17608
  }
17172
17609
  };
@@ -17179,15 +17616,10 @@
17179
17616
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/RecognizerConfig.js"(exports) {
17180
17617
  "use strict";
17181
17618
  Object.defineProperty(exports, "__esModule", { value: true });
17182
- exports.RecognizerConfig = exports.SpeechResultFormat = exports.RecognitionMode = void 0;
17619
+ exports.RecognizerConfig = exports.SpeechResultFormat = void 0;
17183
17620
  var Exports_js_1 = require_Exports3();
17184
17621
  var Exports_js_2 = require_Exports7();
17185
- var RecognitionMode;
17186
- (function(RecognitionMode2) {
17187
- RecognitionMode2[RecognitionMode2["Interactive"] = 0] = "Interactive";
17188
- RecognitionMode2[RecognitionMode2["Conversation"] = 1] = "Conversation";
17189
- RecognitionMode2[RecognitionMode2["Dictation"] = 2] = "Dictation";
17190
- })(RecognitionMode = exports.RecognitionMode || (exports.RecognitionMode = {}));
17622
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
17191
17623
  var SpeechResultFormat;
17192
17624
  (function(SpeechResultFormat2) {
17193
17625
  SpeechResultFormat2[SpeechResultFormat2["Simple"] = 0] = "Simple";
@@ -17209,8 +17641,8 @@
17209
17641
  }
17210
17642
  set recognitionMode(value) {
17211
17643
  this.privRecognitionMode = value;
17212
- this.privRecognitionActivityTimeout = value === RecognitionMode.Interactive ? 8e3 : 25e3;
17213
- this.privSpeechServiceConfig.Recognition = RecognitionMode[value];
17644
+ this.privRecognitionActivityTimeout = value === PhraseDetectionContext_js_1.RecognitionMode.Interactive ? 8e3 : 25e3;
17645
+ this.privSpeechServiceConfig.Recognition = PhraseDetectionContext_js_1.RecognitionMode[value];
17214
17646
  }
17215
17647
  get SpeechServiceConfig() {
17216
17648
  return this.privSpeechServiceConfig;
@@ -17219,7 +17651,7 @@
17219
17651
  return this.privRecognitionActivityTimeout;
17220
17652
  }
17221
17653
  get isContinuousRecognition() {
17222
- return this.privRecognitionMode !== RecognitionMode.Interactive;
17654
+ return this.privRecognitionMode !== PhraseDetectionContext_js_1.RecognitionMode.Interactive;
17223
17655
  }
17224
17656
  get languageIdMode() {
17225
17657
  return this.privLanguageIdMode;
@@ -17228,7 +17660,10 @@
17228
17660
  return this.parameters.getProperty(Exports_js_1.PropertyId.SpeechServiceConnection_AutoDetectSourceLanguages, void 0);
17229
17661
  }
17230
17662
  get recognitionEndpointVersion() {
17231
- return this.parameters.getProperty(Exports_js_1.PropertyId.SpeechServiceConnection_RecognitionEndpointVersion, void 0);
17663
+ return this.parameters.getProperty(Exports_js_1.PropertyId.SpeechServiceConnection_RecognitionEndpointVersion, "2");
17664
+ }
17665
+ set recognitionEndpointVersion(version) {
17666
+ this.parameters.setProperty(Exports_js_1.PropertyId.SpeechServiceConnection_RecognitionEndpointVersion, version);
17232
17667
  }
17233
17668
  get sourceLanguageModels() {
17234
17669
  const models = [];
@@ -17404,15 +17839,16 @@
17404
17839
  var Exports_js_4 = require_Exports7();
17405
17840
  var HeaderNames_js_1 = require_HeaderNames();
17406
17841
  var QueryParameterNames_js_1 = require_QueryParameterNames();
17842
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
17407
17843
  var SpeechConnectionFactory = class extends ConnectionFactoryBase_js_1.ConnectionFactoryBase {
17408
17844
  constructor() {
17409
17845
  super(...arguments);
17410
17846
  this.interactiveRelativeUri = "/speech/recognition/interactive/cognitiveservices/v1";
17411
17847
  this.conversationRelativeUri = "/speech/recognition/conversation/cognitiveservices/v1";
17412
17848
  this.dictationRelativeUri = "/speech/recognition/dictation/cognitiveservices/v1";
17413
- this.universalUri = "/speech/universal/v";
17849
+ this.universalUri = "/stt/speech/universal/v";
17414
17850
  }
17415
- create(config, authInfo, connectionId) {
17851
+ async create(config, authInfo, connectionId) {
17416
17852
  let endpoint = config.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_Endpoint, void 0);
17417
17853
  const region = config.parameters.getProperty(Exports_js_3.PropertyId.SpeechServiceConnection_Region, void 0);
17418
17854
  const hostSuffix = ConnectionFactoryBase_js_1.ConnectionFactoryBase.getHostSuffix(region);
@@ -17436,9 +17872,17 @@
17436
17872
  queryParams[QueryParameterNames_js_1.QueryParameterNames.EnableLanguageId] = "true";
17437
17873
  }
17438
17874
  this.setCommonUrlParams(config, queryParams, endpoint);
17875
+ if (!!endpoint) {
17876
+ const endpointUrl = new URL(endpoint);
17877
+ const pathName = endpointUrl.pathname;
17878
+ if (pathName === "" || pathName === "/") {
17879
+ endpointUrl.pathname = this.universalUri + config.recognitionEndpointVersion;
17880
+ endpoint = await ConnectionFactoryBase_js_1.ConnectionFactoryBase.getRedirectUrlFromEndpoint(endpointUrl.toString());
17881
+ }
17882
+ }
17439
17883
  if (!endpoint) {
17440
17884
  switch (config.recognitionMode) {
17441
- case Exports_js_4.RecognitionMode.Conversation:
17885
+ case PhraseDetectionContext_js_1.RecognitionMode.Conversation:
17442
17886
  if (config.parameters.getProperty(Exports_js_2.ForceDictationPropertyName, "false") === "true") {
17443
17887
  endpoint = host + this.dictationRelativeUri;
17444
17888
  } else {
@@ -17449,7 +17893,7 @@
17449
17893
  }
17450
17894
  }
17451
17895
  break;
17452
- case Exports_js_4.RecognitionMode.Dictation:
17896
+ case PhraseDetectionContext_js_1.RecognitionMode.Dictation:
17453
17897
  endpoint = host + this.dictationRelativeUri;
17454
17898
  break;
17455
17899
  default:
@@ -17466,6 +17910,7 @@
17466
17910
  headers[authInfo.headerName] = authInfo.token;
17467
17911
  }
17468
17912
  headers[HeaderNames_js_1.HeaderNames.ConnectionId] = connectionId;
17913
+ headers.connectionId = connectionId;
17469
17914
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "false") === "true";
17470
17915
  const webSocketConnection = new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_4.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
17471
17916
  const uri = webSocketConnection.uri;
@@ -17493,9 +17938,9 @@
17493
17938
  var ConversationTranscriberConnectionFactory = class extends ConnectionFactoryBase_js_1.ConnectionFactoryBase {
17494
17939
  constructor() {
17495
17940
  super(...arguments);
17496
- this.universalUri = "/speech/universal/v2";
17941
+ this.universalUri = "/stt/speech/universal/v2";
17497
17942
  }
17498
- create(config, authInfo, connectionId) {
17943
+ async create(config, authInfo, connectionId) {
17499
17944
  let endpoint = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Endpoint, void 0);
17500
17945
  const region = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Region, void 0);
17501
17946
  const hostSuffix = ConnectionFactoryBase_js_1.ConnectionFactoryBase.getHostSuffix(region);
@@ -17516,6 +17961,14 @@
17516
17961
  queryParams[QueryParameterNames_js_1.QueryParameterNames.EnableLanguageId] = "true";
17517
17962
  }
17518
17963
  this.setV2UrlParams(config, queryParams, endpoint);
17964
+ if (!!endpoint) {
17965
+ const endpointUrl = new URL(endpoint);
17966
+ const pathName = endpointUrl.pathname;
17967
+ if (pathName === "" || pathName === "/") {
17968
+ endpointUrl.pathname = this.universalUri;
17969
+ endpoint = await ConnectionFactoryBase_js_1.ConnectionFactoryBase.getRedirectUrlFromEndpoint(endpointUrl.toString());
17970
+ }
17971
+ }
17519
17972
  if (!endpoint) {
17520
17973
  endpoint = `${host}${this.universalUri}`;
17521
17974
  }
@@ -17588,7 +18041,7 @@
17588
18041
  headers[HeaderNames_js_1.HeaderNames.ConnectionId] = connectionId;
17589
18042
  config.parameters.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Url, endpoint);
17590
18043
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "false") === "true";
17591
- return new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
18044
+ return Promise.resolve(new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId));
17592
18045
  }
17593
18046
  setQueryParams(queryParams, config, endpointUrl) {
17594
18047
  const endpointId = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_EndpointId, void 0);
@@ -17624,14 +18077,25 @@
17624
18077
  var Exports_js_3 = require_Exports7();
17625
18078
  var HeaderNames_js_1 = require_HeaderNames();
17626
18079
  var QueryParameterNames_js_1 = require_QueryParameterNames();
18080
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
17627
18081
  var TranslationConnectionFactory = class extends ConnectionFactoryBase_js_1.ConnectionFactoryBase {
17628
- create(config, authInfo, connectionId) {
17629
- const endpoint = this.getEndpointUrl(config);
18082
+ constructor() {
18083
+ super(...arguments);
18084
+ this.universalUri = "/stt/speech/universal/v2";
18085
+ this.translationV1Uri = "/speech/translation/cognitiveservices/v1";
18086
+ }
18087
+ async create(config, authInfo, connectionId) {
18088
+ let endpoint = this.getEndpointUrl(config);
17630
18089
  const queryParams = {};
17631
- if (config.autoDetectSourceLanguages !== void 0) {
17632
- queryParams[QueryParameterNames_js_1.QueryParameterNames.EnableLanguageId] = "true";
17633
- }
17634
18090
  this.setQueryParams(queryParams, config, endpoint);
18091
+ if (!!endpoint) {
18092
+ const endpointUrl = new URL(endpoint);
18093
+ const pathName = endpointUrl.pathname;
18094
+ if (pathName === "" || pathName === "/") {
18095
+ endpointUrl.pathname = this.universalUri;
18096
+ endpoint = await ConnectionFactoryBase_js_1.ConnectionFactoryBase.getRedirectUrlFromEndpoint(endpointUrl.toString());
18097
+ }
18098
+ }
17635
18099
  const headers = {};
17636
18100
  if (authInfo.token !== void 0 && authInfo.token !== "") {
17637
18101
  headers[authInfo.headerName] = authInfo.token;
@@ -17639,20 +18103,26 @@
17639
18103
  headers[HeaderNames_js_1.HeaderNames.ConnectionId] = connectionId;
17640
18104
  config.parameters.setProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Url, endpoint);
17641
18105
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "false") === "true";
17642
- return new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
18106
+ const webSocketConnection = new Exports_js_1.WebsocketConnection(endpoint, queryParams, headers, new Exports_js_3.WebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
18107
+ return webSocketConnection;
17643
18108
  }
17644
18109
  getEndpointUrl(config, returnRegionPlaceholder) {
17645
18110
  const region = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Region);
17646
18111
  const hostSuffix = ConnectionFactoryBase_js_1.ConnectionFactoryBase.getHostSuffix(region);
17647
18112
  let endpointUrl = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Endpoint, void 0);
17648
- if (!endpointUrl) {
17649
- if (config.autoDetectSourceLanguages !== void 0) {
17650
- const host = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Host, "wss://{region}.stt.speech" + hostSuffix);
17651
- endpointUrl = host + "/speech/universal/v2";
17652
- } else {
17653
- const host = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Host, "wss://{region}.s2s.speech" + hostSuffix);
17654
- endpointUrl = host + "/speech/translation/cognitiveservices/v1";
18113
+ if (endpointUrl) {
18114
+ if (returnRegionPlaceholder === true) {
18115
+ return endpointUrl;
17655
18116
  }
18117
+ return StringUtils_js_1.StringUtils.formatString(endpointUrl, { region });
18118
+ }
18119
+ const forceV1Endpoint = config.parameters.getProperty("SPEECH-ForceV1Endpoint", "false") === "true";
18120
+ if (forceV1Endpoint) {
18121
+ const host = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Host, "wss://{region}.s2s.speech" + hostSuffix);
18122
+ endpointUrl = host + this.translationV1Uri;
18123
+ } else {
18124
+ const host = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_Host, "wss://{region}.stt.speech" + hostSuffix);
18125
+ endpointUrl = host + this.universalUri;
17656
18126
  }
17657
18127
  if (returnRegionPlaceholder === true) {
17658
18128
  return endpointUrl;
@@ -17662,13 +18132,13 @@
17662
18132
  setQueryParams(queryParams, config, endpointUrl) {
17663
18133
  queryParams.from = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_RecoLanguage);
17664
18134
  queryParams.to = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_TranslationToLanguages);
17665
- queryParams.scenario = config.recognitionMode === Exports_js_3.RecognitionMode.Interactive ? "interactive" : config.recognitionMode === Exports_js_3.RecognitionMode.Conversation ? "conversation" : "";
18135
+ queryParams.scenario = config.recognitionMode === PhraseDetectionContext_js_1.RecognitionMode.Interactive ? "interactive" : config.recognitionMode === PhraseDetectionContext_js_1.RecognitionMode.Conversation ? "conversation" : "";
17666
18136
  this.setCommonUrlParams(config, queryParams, endpointUrl);
17667
18137
  this.setUrlParameter(Exports_js_2.PropertyId.SpeechServiceResponse_TranslationRequestStablePartialResult, QueryParameterNames_js_1.QueryParameterNames.StableTranslation, config, queryParams, endpointUrl);
17668
18138
  const translationVoice = config.parameters.getProperty(Exports_js_2.PropertyId.SpeechServiceConnection_TranslationVoice, void 0);
17669
18139
  if (translationVoice !== void 0) {
17670
18140
  queryParams.voice = translationVoice;
17671
- queryParams.features = "texttospeech";
18141
+ queryParams.features = "requireVoice";
17672
18142
  }
17673
18143
  }
17674
18144
  };
@@ -17837,19 +18307,20 @@
17837
18307
  var Contracts_js_1 = require_Contracts();
17838
18308
  var TranslationStatus_js_1 = require_TranslationStatus();
17839
18309
  var TranslationHypothesis = class _TranslationHypothesis {
17840
- constructor(hypothesis) {
18310
+ constructor(hypothesis, baseOffset) {
17841
18311
  this.privTranslationHypothesis = hypothesis;
17842
- this.privTranslationHypothesis.Translation.TranslationStatus = TranslationStatus_js_1.TranslationStatus[this.privTranslationHypothesis.Translation.TranslationStatus];
18312
+ this.privTranslationHypothesis.Offset += baseOffset;
18313
+ this.privTranslationHypothesis.Translation.TranslationStatus = this.mapTranslationStatus(this.privTranslationHypothesis.Translation.TranslationStatus);
17843
18314
  }
17844
- static fromJSON(json) {
17845
- return new _TranslationHypothesis(JSON.parse(json));
18315
+ static fromJSON(json, baseOffset) {
18316
+ return new _TranslationHypothesis(JSON.parse(json), baseOffset);
17846
18317
  }
17847
- static fromTranslationResponse(translationHypothesis) {
18318
+ static fromTranslationResponse(translationHypothesis, baseOffset) {
17848
18319
  Contracts_js_1.Contracts.throwIfNullOrUndefined(translationHypothesis, "translationHypothesis");
17849
18320
  const hypothesis = translationHypothesis.SpeechHypothesis;
17850
18321
  translationHypothesis.SpeechHypothesis = void 0;
17851
18322
  hypothesis.Translation = translationHypothesis;
17852
- return new _TranslationHypothesis(hypothesis);
18323
+ return new _TranslationHypothesis(hypothesis, baseOffset);
17853
18324
  }
17854
18325
  get Duration() {
17855
18326
  return this.privTranslationHypothesis.Duration;
@@ -17866,6 +18337,20 @@
17866
18337
  get Language() {
17867
18338
  return this.privTranslationHypothesis.PrimaryLanguage?.Language;
17868
18339
  }
18340
+ asJson() {
18341
+ const jsonObj = { ...this.privTranslationHypothesis };
18342
+ return jsonObj.Translation !== void 0 ? JSON.stringify({
18343
+ ...jsonObj,
18344
+ TranslationStatus: TranslationStatus_js_1.TranslationStatus[jsonObj.Translation.TranslationStatus]
18345
+ }) : JSON.stringify(jsonObj);
18346
+ }
18347
+ mapTranslationStatus(status) {
18348
+ if (typeof status === "string") {
18349
+ return TranslationStatus_js_1.TranslationStatus[status];
18350
+ } else if (typeof status === "number") {
18351
+ return status;
18352
+ }
18353
+ }
17869
18354
  };
17870
18355
  exports.TranslationHypothesis = TranslationHypothesis;
17871
18356
  }
@@ -17881,23 +18366,24 @@
17881
18366
  var Exports_js_1 = require_Exports7();
17882
18367
  var TranslationStatus_js_1 = require_TranslationStatus();
17883
18368
  var TranslationPhrase = class _TranslationPhrase {
17884
- constructor(phrase) {
18369
+ constructor(phrase, baseOffset) {
17885
18370
  this.privTranslationPhrase = phrase;
17886
- this.privTranslationPhrase.RecognitionStatus = Exports_js_1.RecognitionStatus[this.privTranslationPhrase.RecognitionStatus];
18371
+ this.privTranslationPhrase.Offset += baseOffset;
18372
+ this.privTranslationPhrase.RecognitionStatus = this.mapRecognitionStatus(this.privTranslationPhrase.RecognitionStatus);
17887
18373
  if (this.privTranslationPhrase.Translation !== void 0) {
17888
- this.privTranslationPhrase.Translation.TranslationStatus = TranslationStatus_js_1.TranslationStatus[this.privTranslationPhrase.Translation.TranslationStatus];
18374
+ this.privTranslationPhrase.Translation.TranslationStatus = this.mapTranslationStatus(this.privTranslationPhrase.Translation.TranslationStatus);
17889
18375
  }
17890
18376
  }
17891
- static fromJSON(json) {
17892
- return new _TranslationPhrase(JSON.parse(json));
18377
+ static fromJSON(json, baseOffset) {
18378
+ return new _TranslationPhrase(JSON.parse(json), baseOffset);
17893
18379
  }
17894
- static fromTranslationResponse(translationResponse) {
18380
+ static fromTranslationResponse(translationResponse, baseOffset) {
17895
18381
  Contracts_js_1.Contracts.throwIfNullOrUndefined(translationResponse, "translationResponse");
17896
18382
  const phrase = translationResponse.SpeechPhrase;
17897
18383
  translationResponse.SpeechPhrase = void 0;
17898
18384
  phrase.Translation = translationResponse;
17899
18385
  phrase.Text = phrase.DisplayText;
17900
- return new _TranslationPhrase(phrase);
18386
+ return new _TranslationPhrase(phrase, baseOffset);
17901
18387
  }
17902
18388
  get RecognitionStatus() {
17903
18389
  return this.privTranslationPhrase.RecognitionStatus;
@@ -17920,6 +18406,34 @@
17920
18406
  get Translation() {
17921
18407
  return this.privTranslationPhrase.Translation;
17922
18408
  }
18409
+ asJson() {
18410
+ const jsonObj = { ...this.privTranslationPhrase };
18411
+ const serializedObj = {
18412
+ ...jsonObj,
18413
+ RecognitionStatus: Exports_js_1.RecognitionStatus[jsonObj.RecognitionStatus]
18414
+ };
18415
+ if (jsonObj.Translation) {
18416
+ serializedObj.Translation = {
18417
+ ...jsonObj.Translation,
18418
+ TranslationStatus: TranslationStatus_js_1.TranslationStatus[jsonObj.Translation.TranslationStatus]
18419
+ };
18420
+ }
18421
+ return JSON.stringify(serializedObj);
18422
+ }
18423
+ mapRecognitionStatus(status) {
18424
+ if (typeof status === "string") {
18425
+ return Exports_js_1.RecognitionStatus[status];
18426
+ } else if (typeof status === "number") {
18427
+ return status;
18428
+ }
18429
+ }
18430
+ mapTranslationStatus(status) {
18431
+ if (typeof status === "string") {
18432
+ return TranslationStatus_js_1.TranslationStatus[status];
18433
+ } else if (typeof status === "number") {
18434
+ return status;
18435
+ }
18436
+ }
17923
18437
  };
17924
18438
  exports.TranslationPhrase = TranslationPhrase;
17925
18439
  }
@@ -17951,7 +18465,8 @@
17951
18465
  return true;
17952
18466
  }
17953
18467
  const handleTranslationPhrase = async (translatedPhrase) => {
17954
- this.privRequestSession.onPhraseRecognized(this.privRequestSession.currentTurnAudioOffset + translatedPhrase.Offset + translatedPhrase.Duration);
18468
+ resultProps.setProperty(Exports_js_2.PropertyId.SpeechServiceResponse_JsonResult, translatedPhrase.asJson());
18469
+ this.privRequestSession.onPhraseRecognized(translatedPhrase.Offset + translatedPhrase.Duration);
17955
18470
  if (translatedPhrase.RecognitionStatus === Exports_js_3.RecognitionStatus.Success) {
17956
18471
  const result = this.fireEventForResult(translatedPhrase, resultProps);
17957
18472
  if (!!this.privTranslationRecognizer.recognized) {
@@ -17973,13 +18488,13 @@
17973
18488
  }
17974
18489
  } else {
17975
18490
  const reason = Exports_js_3.EnumTranslation.implTranslateRecognitionResult(translatedPhrase.RecognitionStatus);
17976
- const result = new Exports_js_2.TranslationRecognitionResult(void 0, this.privRequestSession.requestId, reason, translatedPhrase.Text, translatedPhrase.Duration, this.privRequestSession.currentTurnAudioOffset + translatedPhrase.Offset, translatedPhrase.Language, translatedPhrase.Confidence, void 0, connectionMessage.textBody, resultProps);
18491
+ const result = new Exports_js_2.TranslationRecognitionResult(void 0, this.privRequestSession.requestId, reason, translatedPhrase.Text, translatedPhrase.Duration, translatedPhrase.Offset, translatedPhrase.Language, translatedPhrase.Confidence, void 0, translatedPhrase.asJson(), resultProps);
17977
18492
  if (reason === Exports_js_2.ResultReason.Canceled) {
17978
18493
  const cancelReason = Exports_js_3.EnumTranslation.implTranslateCancelResult(translatedPhrase.RecognitionStatus);
17979
18494
  const cancellationErrorCode = Exports_js_3.EnumTranslation.implTranslateCancelErrorCode(translatedPhrase.RecognitionStatus);
17980
18495
  await this.cancelRecognitionLocal(cancelReason, cancellationErrorCode, Exports_js_3.EnumTranslation.implTranslateErrorDetails(cancellationErrorCode));
17981
18496
  } else {
17982
- if (!(this.privRequestSession.isSpeechEnded && reason === Exports_js_2.ResultReason.NoMatch && translatedPhrase.RecognitionStatus !== Exports_js_3.RecognitionStatus.InitialSilenceTimeout)) {
18497
+ if (translatedPhrase.RecognitionStatus !== Exports_js_3.RecognitionStatus.EndOfDictation) {
17983
18498
  const ev = new Exports_js_2.TranslationRecognitionEventArgs(result, result.offset, this.privRequestSession.sessionId);
17984
18499
  if (!!this.privTranslationRecognizer.recognized) {
17985
18500
  try {
@@ -17987,25 +18502,26 @@
17987
18502
  } catch (error) {
17988
18503
  }
17989
18504
  }
17990
- }
17991
- if (!!this.privSuccessCallback) {
17992
- try {
17993
- this.privSuccessCallback(result);
17994
- } catch (e) {
17995
- if (!!this.privErrorCallback) {
17996
- this.privErrorCallback(e);
18505
+ if (!!this.privSuccessCallback) {
18506
+ try {
18507
+ this.privSuccessCallback(result);
18508
+ } catch (e) {
18509
+ if (!!this.privErrorCallback) {
18510
+ this.privErrorCallback(e);
18511
+ }
17997
18512
  }
18513
+ this.privSuccessCallback = void 0;
18514
+ this.privErrorCallback = void 0;
17998
18515
  }
17999
- this.privSuccessCallback = void 0;
18000
- this.privErrorCallback = void 0;
18001
18516
  }
18002
18517
  }
18003
18518
  processed = true;
18004
18519
  }
18005
18520
  };
18006
- const handleTranslationHypothesis = (hypothesis, resultProperties) => {
18007
- const result = this.fireEventForResult(hypothesis, resultProperties);
18008
- this.privRequestSession.onHypothesis(this.privRequestSession.currentTurnAudioOffset + result.offset);
18521
+ const handleTranslationHypothesis = (hypothesis) => {
18522
+ resultProps.setProperty(Exports_js_2.PropertyId.SpeechServiceResponse_JsonResult, hypothesis.asJson());
18523
+ const result = this.fireEventForResult(hypothesis, resultProps);
18524
+ this.privRequestSession.onHypothesis(result.offset);
18009
18525
  if (!!this.privTranslationRecognizer.recognizing) {
18010
18526
  try {
18011
18527
  this.privTranslationRecognizer.recognizing(this.privTranslationRecognizer, result);
@@ -18019,23 +18535,24 @@
18019
18535
  }
18020
18536
  switch (connectionMessage.path.toLowerCase()) {
18021
18537
  case "translation.hypothesis":
18022
- handleTranslationHypothesis(Exports_js_3.TranslationHypothesis.fromJSON(connectionMessage.textBody), resultProps);
18538
+ handleTranslationHypothesis(Exports_js_3.TranslationHypothesis.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset));
18023
18539
  break;
18024
18540
  case "translation.response":
18025
18541
  const phrase = JSON.parse(connectionMessage.textBody);
18026
18542
  if (!!phrase.SpeechPhrase) {
18027
- await handleTranslationPhrase(Exports_js_3.TranslationPhrase.fromTranslationResponse(phrase));
18543
+ await handleTranslationPhrase(Exports_js_3.TranslationPhrase.fromTranslationResponse(phrase, this.privRequestSession.currentTurnAudioOffset));
18028
18544
  } else {
18029
18545
  const hypothesis = JSON.parse(connectionMessage.textBody);
18030
18546
  if (!!hypothesis.SpeechHypothesis) {
18031
- handleTranslationHypothesis(Exports_js_3.TranslationHypothesis.fromTranslationResponse(hypothesis), resultProps);
18547
+ handleTranslationHypothesis(Exports_js_3.TranslationHypothesis.fromTranslationResponse(hypothesis, this.privRequestSession.currentTurnAudioOffset));
18032
18548
  }
18033
18549
  }
18034
18550
  break;
18035
18551
  case "translation.phrase":
18036
- await handleTranslationPhrase(Exports_js_3.TranslationPhrase.fromJSON(connectionMessage.textBody));
18552
+ await handleTranslationPhrase(Exports_js_3.TranslationPhrase.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset));
18037
18553
  break;
18038
18554
  case "translation.synthesis":
18555
+ case "audio":
18039
18556
  this.sendSynthesisAudio(connectionMessage.binaryBody, this.privRequestSession.sessionId);
18040
18557
  processed = true;
18041
18558
  break;
@@ -18112,9 +18629,9 @@
18112
18629
  }
18113
18630
  }
18114
18631
  }
18115
- handleRecognizingCallback(result, duration, sessionId) {
18632
+ handleRecognizingCallback(result, offset, sessionId) {
18116
18633
  try {
18117
- const ev = new Exports_js_2.TranslationRecognitionEventArgs(Exports_js_2.TranslationRecognitionResult.fromSpeechRecognitionResult(result), duration, sessionId);
18634
+ const ev = new Exports_js_2.TranslationRecognitionEventArgs(Exports_js_2.TranslationRecognitionResult.fromSpeechRecognitionResult(result), offset, sessionId);
18118
18635
  this.privTranslationRecognizer.recognizing(this.privTranslationRecognizer, ev);
18119
18636
  } catch (error) {
18120
18637
  }
@@ -18147,9 +18664,8 @@
18147
18664
  resultReason = Exports_js_2.ResultReason.TranslatingSpeech;
18148
18665
  }
18149
18666
  const language = serviceResult.Language;
18150
- const offset = serviceResult.Offset + this.privRequestSession.currentTurnAudioOffset;
18151
- const result = new Exports_js_2.TranslationRecognitionResult(translations, this.privRequestSession.requestId, resultReason, serviceResult.Text, serviceResult.Duration, offset, language, confidence, serviceResult.Translation.FailureReason, JSON.stringify(serviceResult), properties);
18152
- const ev = new Exports_js_2.TranslationRecognitionEventArgs(result, offset, this.privRequestSession.sessionId);
18667
+ const result = new Exports_js_2.TranslationRecognitionResult(translations, this.privRequestSession.requestId, resultReason, serviceResult.Text, serviceResult.Duration, serviceResult.Offset, language, confidence, serviceResult.Translation.FailureReason, serviceResult.asJson(), properties);
18668
+ const ev = new Exports_js_2.TranslationRecognitionEventArgs(result, serviceResult.Offset, this.privRequestSession.sessionId);
18153
18669
  return ev;
18154
18670
  }
18155
18671
  sendSynthesisAudio(audio, sessionId) {
@@ -18175,11 +18691,12 @@
18175
18691
  Object.defineProperty(exports, "__esModule", { value: true });
18176
18692
  exports.SpeechDetected = void 0;
18177
18693
  var SpeechDetected = class _SpeechDetected {
18178
- constructor(json) {
18694
+ constructor(json, baseOffset) {
18179
18695
  this.privSpeechStartDetected = JSON.parse(json);
18696
+ this.privSpeechStartDetected.Offset += baseOffset;
18180
18697
  }
18181
- static fromJSON(json) {
18182
- return new _SpeechDetected(json);
18698
+ static fromJSON(json, baseOffset) {
18699
+ return new _SpeechDetected(json, baseOffset);
18183
18700
  }
18184
18701
  get Offset() {
18185
18702
  return this.privSpeechStartDetected.Offset;
@@ -18196,11 +18713,18 @@
18196
18713
  Object.defineProperty(exports, "__esModule", { value: true });
18197
18714
  exports.SpeechHypothesis = void 0;
18198
18715
  var SpeechHypothesis = class _SpeechHypothesis {
18199
- constructor(json) {
18716
+ constructor(json, baseOffset) {
18200
18717
  this.privSpeechHypothesis = JSON.parse(json);
18718
+ this.updateOffset(baseOffset);
18201
18719
  }
18202
- static fromJSON(json) {
18203
- return new _SpeechHypothesis(json);
18720
+ static fromJSON(json, baseOffset) {
18721
+ return new _SpeechHypothesis(json, baseOffset);
18722
+ }
18723
+ updateOffset(baseOffset) {
18724
+ this.privSpeechHypothesis.Offset += baseOffset;
18725
+ }
18726
+ asJson() {
18727
+ return JSON.stringify(this.privSpeechHypothesis);
18204
18728
  }
18205
18729
  get Text() {
18206
18730
  return this.privSpeechHypothesis.Text;
@@ -18232,11 +18756,12 @@
18232
18756
  Object.defineProperty(exports, "__esModule", { value: true });
18233
18757
  exports.SpeechKeyword = void 0;
18234
18758
  var SpeechKeyword = class _SpeechKeyword {
18235
- constructor(json) {
18759
+ constructor(json, baseOffset) {
18236
18760
  this.privSpeechKeyword = JSON.parse(json);
18761
+ this.privSpeechKeyword.Offset += baseOffset;
18237
18762
  }
18238
- static fromJSON(json) {
18239
- return new _SpeechKeyword(json);
18763
+ static fromJSON(json, baseOffset) {
18764
+ return new _SpeechKeyword(json, baseOffset);
18240
18765
  }
18241
18766
  get Status() {
18242
18767
  return this.privSpeechKeyword.Status;
@@ -18250,6 +18775,9 @@
18250
18775
  get Duration() {
18251
18776
  return this.privSpeechKeyword.Duration;
18252
18777
  }
18778
+ asJson() {
18779
+ return JSON.stringify(this.privSpeechKeyword);
18780
+ }
18253
18781
  };
18254
18782
  exports.SpeechKeyword = SpeechKeyword;
18255
18783
  }
@@ -18271,29 +18799,28 @@
18271
18799
  async processTypeSpecificMessages(connectionMessage) {
18272
18800
  let result;
18273
18801
  const resultProps = new Exports_js_1.PropertyCollection();
18274
- resultProps.setProperty(Exports_js_1.PropertyId.SpeechServiceResponse_JsonResult, connectionMessage.textBody);
18275
18802
  let processed = false;
18276
18803
  switch (connectionMessage.path.toLowerCase()) {
18277
18804
  case "speech.hypothesis":
18278
18805
  case "speech.fragment":
18279
- const hypothesis = Exports_js_2.SpeechHypothesis.fromJSON(connectionMessage.textBody);
18280
- const offset = hypothesis.Offset + this.privRequestSession.currentTurnAudioOffset;
18806
+ const hypothesis = Exports_js_2.SpeechHypothesis.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
18807
+ resultProps.setProperty(Exports_js_1.PropertyId.SpeechServiceResponse_JsonResult, hypothesis.asJson());
18281
18808
  result = new Exports_js_1.SpeechRecognitionResult(
18282
18809
  this.privRequestSession.requestId,
18283
18810
  Exports_js_1.ResultReason.RecognizingSpeech,
18284
18811
  hypothesis.Text,
18285
18812
  hypothesis.Duration,
18286
- offset,
18813
+ hypothesis.Offset,
18287
18814
  hypothesis.Language,
18288
18815
  hypothesis.LanguageDetectionConfidence,
18289
18816
  void 0,
18290
18817
  // Speaker Id
18291
18818
  void 0,
18292
- connectionMessage.textBody,
18819
+ hypothesis.asJson(),
18293
18820
  resultProps
18294
18821
  );
18295
- this.privRequestSession.onHypothesis(offset);
18296
- const ev = new Exports_js_1.SpeechRecognitionEventArgs(result, hypothesis.Duration, this.privRequestSession.sessionId);
18822
+ this.privRequestSession.onHypothesis(hypothesis.Offset);
18823
+ const ev = new Exports_js_1.SpeechRecognitionEventArgs(result, hypothesis.Offset, this.privRequestSession.sessionId);
18297
18824
  if (!!this.privSpeechRecognizer.recognizing) {
18298
18825
  try {
18299
18826
  this.privSpeechRecognizer.recognizing(this.privSpeechRecognizer, ev);
@@ -18303,55 +18830,56 @@
18303
18830
  processed = true;
18304
18831
  break;
18305
18832
  case "speech.phrase":
18306
- const simple = Exports_js_2.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody);
18833
+ const simple = Exports_js_2.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
18834
+ resultProps.setProperty(Exports_js_1.PropertyId.SpeechServiceResponse_JsonResult, simple.asJson());
18307
18835
  const resultReason = Exports_js_2.EnumTranslation.implTranslateRecognitionResult(simple.RecognitionStatus, this.privExpectContentAssessmentResponse);
18308
- this.privRequestSession.onPhraseRecognized(this.privRequestSession.currentTurnAudioOffset + simple.Offset + simple.Duration);
18836
+ this.privRequestSession.onPhraseRecognized(simple.Offset + simple.Duration);
18309
18837
  if (Exports_js_1.ResultReason.Canceled === resultReason) {
18310
18838
  const cancelReason = Exports_js_2.EnumTranslation.implTranslateCancelResult(simple.RecognitionStatus);
18311
18839
  const cancellationErrorCode = Exports_js_2.EnumTranslation.implTranslateCancelErrorCode(simple.RecognitionStatus);
18312
18840
  await this.cancelRecognitionLocal(cancelReason, cancellationErrorCode, Exports_js_2.EnumTranslation.implTranslateErrorDetails(cancellationErrorCode));
18313
18841
  } else {
18314
- if (!(this.privRequestSession.isSpeechEnded && resultReason === Exports_js_1.ResultReason.NoMatch && simple.RecognitionStatus !== Exports_js_2.RecognitionStatus.InitialSilenceTimeout)) {
18315
- if (this.privRecognizerConfig.parameters.getProperty(Exports_js_2.OutputFormatPropertyName) === Exports_js_1.OutputFormat[Exports_js_1.OutputFormat.Simple]) {
18316
- result = new Exports_js_1.SpeechRecognitionResult(
18317
- this.privRequestSession.requestId,
18318
- resultReason,
18319
- simple.DisplayText,
18320
- simple.Duration,
18321
- simple.Offset + this.privRequestSession.currentTurnAudioOffset,
18322
- simple.Language,
18323
- simple.LanguageDetectionConfidence,
18324
- void 0,
18325
- // Speaker Id
18326
- void 0,
18327
- connectionMessage.textBody,
18328
- resultProps
18329
- );
18330
- } else {
18331
- const detailed = Exports_js_2.DetailedSpeechPhrase.fromJSON(connectionMessage.textBody);
18332
- const totalOffset = detailed.Offset + this.privRequestSession.currentTurnAudioOffset;
18333
- const offsetCorrectedJson = detailed.getJsonWithCorrectedOffsets(totalOffset);
18334
- result = new Exports_js_1.SpeechRecognitionResult(
18335
- this.privRequestSession.requestId,
18336
- resultReason,
18337
- detailed.RecognitionStatus === Exports_js_2.RecognitionStatus.Success ? detailed.NBest[0].Display : void 0,
18338
- detailed.Duration,
18339
- totalOffset,
18340
- detailed.Language,
18341
- detailed.LanguageDetectionConfidence,
18342
- void 0,
18343
- // Speaker Id
18344
- void 0,
18345
- offsetCorrectedJson,
18346
- resultProps
18347
- );
18348
- }
18349
- const event = new Exports_js_1.SpeechRecognitionEventArgs(result, result.offset, this.privRequestSession.sessionId);
18350
- if (!!this.privSpeechRecognizer.recognized) {
18351
- try {
18352
- this.privSpeechRecognizer.recognized(this.privSpeechRecognizer, event);
18353
- } catch (error) {
18354
- }
18842
+ if (simple.RecognitionStatus === Exports_js_2.RecognitionStatus.EndOfDictation) {
18843
+ break;
18844
+ }
18845
+ if (this.privRecognizerConfig.parameters.getProperty(Exports_js_2.OutputFormatPropertyName) === Exports_js_1.OutputFormat[Exports_js_1.OutputFormat.Simple]) {
18846
+ result = new Exports_js_1.SpeechRecognitionResult(
18847
+ this.privRequestSession.requestId,
18848
+ resultReason,
18849
+ simple.DisplayText,
18850
+ simple.Duration,
18851
+ simple.Offset,
18852
+ simple.Language,
18853
+ simple.LanguageDetectionConfidence,
18854
+ void 0,
18855
+ // Speaker Id
18856
+ void 0,
18857
+ simple.asJson(),
18858
+ resultProps
18859
+ );
18860
+ } else {
18861
+ const detailed = Exports_js_2.DetailedSpeechPhrase.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
18862
+ resultProps.setProperty(Exports_js_1.PropertyId.SpeechServiceResponse_JsonResult, detailed.asJson());
18863
+ result = new Exports_js_1.SpeechRecognitionResult(
18864
+ this.privRequestSession.requestId,
18865
+ resultReason,
18866
+ detailed.RecognitionStatus === Exports_js_2.RecognitionStatus.Success ? detailed.NBest[0].Display : "",
18867
+ detailed.Duration,
18868
+ detailed.Offset,
18869
+ detailed.Language,
18870
+ detailed.LanguageDetectionConfidence,
18871
+ void 0,
18872
+ // Speaker Id
18873
+ void 0,
18874
+ detailed.asJson(),
18875
+ resultProps
18876
+ );
18877
+ }
18878
+ const event = new Exports_js_1.SpeechRecognitionEventArgs(result, result.offset, this.privRequestSession.sessionId);
18879
+ if (!!this.privSpeechRecognizer.recognized) {
18880
+ try {
18881
+ this.privSpeechRecognizer.recognized(this.privSpeechRecognizer, event);
18882
+ } catch (error) {
18355
18883
  }
18356
18884
  }
18357
18885
  if (!!this.privSuccessCallback) {
@@ -18417,6 +18945,25 @@
18417
18945
  }
18418
18946
  });
18419
18947
 
18948
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/SpeakerDiarization.js
18949
+ var require_SpeakerDiarization = __commonJS({
18950
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseDetection/SpeakerDiarization.js"(exports) {
18951
+ "use strict";
18952
+ Object.defineProperty(exports, "__esModule", { value: true });
18953
+ exports.IdentityProvider = exports.SpeakerDiarizationMode = void 0;
18954
+ var SpeakerDiarizationMode;
18955
+ (function(SpeakerDiarizationMode2) {
18956
+ SpeakerDiarizationMode2["None"] = "None";
18957
+ SpeakerDiarizationMode2["Identity"] = "Identity";
18958
+ SpeakerDiarizationMode2["Anonymous"] = "Anonymous";
18959
+ })(SpeakerDiarizationMode = exports.SpeakerDiarizationMode || (exports.SpeakerDiarizationMode = {}));
18960
+ var IdentityProvider;
18961
+ (function(IdentityProvider2) {
18962
+ IdentityProvider2["CallCenter"] = "CallCenter";
18963
+ })(IdentityProvider = exports.IdentityProvider || (exports.IdentityProvider = {}));
18964
+ }
18965
+ });
18966
+
18420
18967
  // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ConversationTranscriptionServiceRecognizer.js
18421
18968
  var require_ConversationTranscriptionServiceRecognizer = __commonJS({
18422
18969
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ConversationTranscriptionServiceRecognizer.js"(exports) {
@@ -18425,6 +18972,8 @@
18425
18972
  exports.ConversationTranscriptionServiceRecognizer = void 0;
18426
18973
  var Exports_js_1 = require_Exports3();
18427
18974
  var Exports_js_2 = require_Exports7();
18975
+ var SpeakerDiarization_js_1 = require_SpeakerDiarization();
18976
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
18428
18977
  var ConversationTranscriptionServiceRecognizer = class extends Exports_js_2.ServiceRecognizerBase {
18429
18978
  constructor(authentication, connectionFactory, audioSource, recognizerConfig, conversationTranscriber) {
18430
18979
  super(authentication, connectionFactory, audioSource, recognizerConfig, conversationTranscriber);
@@ -18433,14 +18982,15 @@
18433
18982
  }
18434
18983
  setSpeakerDiarizationJson() {
18435
18984
  if (this.privEnableSpeakerId) {
18436
- const phraseDetection = this.privSpeechContext.getSection("phraseDetection");
18437
- phraseDetection.mode = "Conversation";
18985
+ const phraseDetection = this.privSpeechContext.getContext().phraseDetection || {};
18986
+ phraseDetection.mode = PhraseDetectionContext_js_1.RecognitionMode.Conversation;
18438
18987
  const speakerDiarization = {};
18439
- speakerDiarization.mode = "Anonymous";
18988
+ speakerDiarization.mode = SpeakerDiarization_js_1.SpeakerDiarizationMode.Anonymous;
18440
18989
  speakerDiarization.audioSessionId = this.privDiarizationSessionId;
18441
18990
  speakerDiarization.audioOffsetMs = 0;
18991
+ speakerDiarization.diarizeIntermediates = this.privRecognizerConfig.parameters.getProperty(Exports_js_1.PropertyId.SpeechServiceResponse_DiarizeIntermediateResults, "false") === "true";
18442
18992
  phraseDetection.speakerDiarization = speakerDiarization;
18443
- this.privSpeechContext.setSection("phraseDetection", phraseDetection);
18993
+ this.privSpeechContext.getContext().phraseDetection = phraseDetection;
18444
18994
  }
18445
18995
  }
18446
18996
  async processTypeSpecificMessages(connectionMessage) {
@@ -18451,23 +19001,9 @@
18451
19001
  switch (connectionMessage.path.toLowerCase()) {
18452
19002
  case "speech.hypothesis":
18453
19003
  case "speech.fragment":
18454
- const hypothesis = Exports_js_2.SpeechHypothesis.fromJSON(connectionMessage.textBody);
18455
- const offset = hypothesis.Offset + this.privRequestSession.currentTurnAudioOffset;
18456
- result = new Exports_js_1.ConversationTranscriptionResult(
18457
- this.privRequestSession.requestId,
18458
- Exports_js_1.ResultReason.RecognizingSpeech,
18459
- hypothesis.Text,
18460
- hypothesis.Duration,
18461
- offset,
18462
- hypothesis.Language,
18463
- hypothesis.LanguageDetectionConfidence,
18464
- void 0,
18465
- // Speaker Id
18466
- void 0,
18467
- connectionMessage.textBody,
18468
- resultProps
18469
- );
18470
- this.privRequestSession.onHypothesis(offset);
19004
+ const hypothesis = Exports_js_2.SpeechHypothesis.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
19005
+ result = new Exports_js_1.ConversationTranscriptionResult(this.privRequestSession.requestId, Exports_js_1.ResultReason.RecognizingSpeech, hypothesis.Text, hypothesis.Duration, hypothesis.Offset, hypothesis.Language, hypothesis.LanguageDetectionConfidence, hypothesis.SpeakerId, void 0, hypothesis.asJson(), resultProps);
19006
+ this.privRequestSession.onHypothesis(hypothesis.Offset);
18471
19007
  const ev = new Exports_js_1.ConversationTranscriptionEventArgs(result, hypothesis.Duration, this.privRequestSession.sessionId);
18472
19008
  if (!!this.privConversationTranscriber.transcribing) {
18473
19009
  try {
@@ -18478,9 +19014,9 @@
18478
19014
  processed = true;
18479
19015
  break;
18480
19016
  case "speech.phrase":
18481
- const simple = Exports_js_2.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody);
19017
+ const simple = Exports_js_2.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
18482
19018
  const resultReason = Exports_js_2.EnumTranslation.implTranslateRecognitionResult(simple.RecognitionStatus);
18483
- this.privRequestSession.onPhraseRecognized(this.privRequestSession.currentTurnAudioOffset + simple.Offset + simple.Duration);
19019
+ this.privRequestSession.onPhraseRecognized(simple.Offset + simple.Duration);
18484
19020
  if (Exports_js_1.ResultReason.Canceled === resultReason) {
18485
19021
  const cancelReason = Exports_js_2.EnumTranslation.implTranslateCancelResult(simple.RecognitionStatus);
18486
19022
  const cancellationErrorCode = Exports_js_2.EnumTranslation.implTranslateCancelErrorCode(simple.RecognitionStatus);
@@ -18488,12 +19024,10 @@
18488
19024
  } else {
18489
19025
  if (!(this.privRequestSession.isSpeechEnded && resultReason === Exports_js_1.ResultReason.NoMatch && simple.RecognitionStatus !== Exports_js_2.RecognitionStatus.InitialSilenceTimeout)) {
18490
19026
  if (this.privRecognizerConfig.parameters.getProperty(Exports_js_2.OutputFormatPropertyName) === Exports_js_1.OutputFormat[Exports_js_1.OutputFormat.Simple]) {
18491
- result = new Exports_js_1.ConversationTranscriptionResult(this.privRequestSession.requestId, resultReason, simple.DisplayText, simple.Duration, simple.Offset + this.privRequestSession.currentTurnAudioOffset, simple.Language, simple.LanguageDetectionConfidence, simple.SpeakerId, void 0, connectionMessage.textBody, resultProps);
19027
+ result = new Exports_js_1.ConversationTranscriptionResult(this.privRequestSession.requestId, resultReason, simple.DisplayText, simple.Duration, simple.Offset, simple.Language, simple.LanguageDetectionConfidence, simple.SpeakerId, void 0, simple.asJson(), resultProps);
18492
19028
  } else {
18493
- const detailed = Exports_js_2.DetailedSpeechPhrase.fromJSON(connectionMessage.textBody);
18494
- const totalOffset = detailed.Offset + this.privRequestSession.currentTurnAudioOffset;
18495
- const offsetCorrectedJson = detailed.getJsonWithCorrectedOffsets(totalOffset);
18496
- result = new Exports_js_1.ConversationTranscriptionResult(this.privRequestSession.requestId, resultReason, detailed.RecognitionStatus === Exports_js_2.RecognitionStatus.Success ? detailed.NBest[0].Display : void 0, detailed.Duration, totalOffset, detailed.Language, detailed.LanguageDetectionConfidence, simple.SpeakerId, void 0, offsetCorrectedJson, resultProps);
19029
+ const detailed = Exports_js_2.DetailedSpeechPhrase.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
19030
+ result = new Exports_js_1.ConversationTranscriptionResult(this.privRequestSession.requestId, resultReason, detailed.RecognitionStatus === Exports_js_2.RecognitionStatus.Success ? detailed.NBest[0].Display : void 0, detailed.Duration, detailed.Offset, detailed.Language, detailed.LanguageDetectionConfidence, simple.SpeakerId, void 0, detailed.asJson(), resultProps);
18497
19031
  }
18498
19032
  const event = new Exports_js_1.ConversationTranscriptionEventArgs(result, result.offset, this.privRequestSession.sessionId);
18499
19033
  if (!!this.privConversationTranscriber.transcribed) {
@@ -18683,39 +19217,37 @@
18683
19217
  exports.DetailedSpeechPhrase = void 0;
18684
19218
  var Exports_js_1 = require_Exports7();
18685
19219
  var DetailedSpeechPhrase = class _DetailedSpeechPhrase {
18686
- constructor(json) {
19220
+ constructor(json, baseOffset) {
18687
19221
  this.privDetailedSpeechPhrase = JSON.parse(json);
18688
- this.privDetailedSpeechPhrase.RecognitionStatus = Exports_js_1.RecognitionStatus[this.privDetailedSpeechPhrase.RecognitionStatus];
19222
+ this.privDetailedSpeechPhrase.RecognitionStatus = this.mapRecognitionStatus(this.privDetailedSpeechPhrase.RecognitionStatus);
19223
+ this.updateOffsets(baseOffset);
18689
19224
  }
18690
- static fromJSON(json) {
18691
- return new _DetailedSpeechPhrase(json);
19225
+ static fromJSON(json, baseOffset) {
19226
+ return new _DetailedSpeechPhrase(json, baseOffset);
18692
19227
  }
18693
- getJsonWithCorrectedOffsets(baseOffset) {
19228
+ updateOffsets(baseOffset) {
19229
+ this.privDetailedSpeechPhrase.Offset += baseOffset;
18694
19230
  if (!!this.privDetailedSpeechPhrase.NBest) {
18695
- let firstWordOffset;
18696
19231
  for (const phrase of this.privDetailedSpeechPhrase.NBest) {
18697
- if (!!phrase.Words && !!phrase.Words[0]) {
18698
- firstWordOffset = phrase.Words[0].Offset;
18699
- break;
18700
- }
18701
- }
18702
- if (!!firstWordOffset && firstWordOffset < baseOffset) {
18703
- const offset = baseOffset - firstWordOffset;
18704
- for (const details of this.privDetailedSpeechPhrase.NBest) {
18705
- if (!!details.Words) {
18706
- for (const word of details.Words) {
18707
- word.Offset += offset;
18708
- }
19232
+ if (!!phrase.Words) {
19233
+ for (const word of phrase.Words) {
19234
+ word.Offset += baseOffset;
18709
19235
  }
18710
- if (!!details.DisplayWords) {
18711
- for (const word of details.DisplayWords) {
18712
- word.Offset += offset;
18713
- }
19236
+ }
19237
+ if (!!phrase.DisplayWords) {
19238
+ for (const word of phrase.DisplayWords) {
19239
+ word.Offset += baseOffset;
18714
19240
  }
18715
19241
  }
18716
19242
  }
18717
19243
  }
18718
- return JSON.stringify(this.privDetailedSpeechPhrase);
19244
+ }
19245
+ asJson() {
19246
+ const jsonObj = { ...this.privDetailedSpeechPhrase };
19247
+ return JSON.stringify({
19248
+ ...jsonObj,
19249
+ RecognitionStatus: Exports_js_1.RecognitionStatus[jsonObj.RecognitionStatus]
19250
+ });
18719
19251
  }
18720
19252
  get RecognitionStatus() {
18721
19253
  return this.privDetailedSpeechPhrase.RecognitionStatus;
@@ -18744,6 +19276,13 @@
18744
19276
  get SpeakerId() {
18745
19277
  return this.privDetailedSpeechPhrase.SpeakerId;
18746
19278
  }
19279
+ mapRecognitionStatus(status) {
19280
+ if (typeof status === "string") {
19281
+ return Exports_js_1.RecognitionStatus[status];
19282
+ } else if (typeof status === "number") {
19283
+ return status;
19284
+ }
19285
+ }
18747
19286
  };
18748
19287
  exports.DetailedSpeechPhrase = DetailedSpeechPhrase;
18749
19288
  }
@@ -18757,12 +19296,23 @@
18757
19296
  exports.SimpleSpeechPhrase = void 0;
18758
19297
  var Exports_js_1 = require_Exports7();
18759
19298
  var SimpleSpeechPhrase = class _SimpleSpeechPhrase {
18760
- constructor(json) {
19299
+ constructor(json, baseOffset = 0) {
18761
19300
  this.privSimpleSpeechPhrase = JSON.parse(json);
18762
- this.privSimpleSpeechPhrase.RecognitionStatus = Exports_js_1.RecognitionStatus[this.privSimpleSpeechPhrase.RecognitionStatus];
19301
+ this.privSimpleSpeechPhrase.RecognitionStatus = this.mapRecognitionStatus(this.privSimpleSpeechPhrase.RecognitionStatus);
19302
+ this.updateOffset(baseOffset);
18763
19303
  }
18764
- static fromJSON(json) {
18765
- return new _SimpleSpeechPhrase(json);
19304
+ static fromJSON(json, baseOffset) {
19305
+ return new _SimpleSpeechPhrase(json, baseOffset);
19306
+ }
19307
+ updateOffset(baseOffset) {
19308
+ this.privSimpleSpeechPhrase.Offset += baseOffset;
19309
+ }
19310
+ asJson() {
19311
+ const jsonObj = { ...this.privSimpleSpeechPhrase };
19312
+ return JSON.stringify({
19313
+ ...jsonObj,
19314
+ RecognitionStatus: Exports_js_1.RecognitionStatus[jsonObj.RecognitionStatus]
19315
+ });
18766
19316
  }
18767
19317
  get RecognitionStatus() {
18768
19318
  return this.privSimpleSpeechPhrase.RecognitionStatus;
@@ -18785,6 +19335,13 @@
18785
19335
  get SpeakerId() {
18786
19336
  return this.privSimpleSpeechPhrase.SpeakerId;
18787
19337
  }
19338
+ mapRecognitionStatus(status) {
19339
+ if (typeof status === "string") {
19340
+ return Exports_js_1.RecognitionStatus[status];
19341
+ } else if (typeof status === "number") {
19342
+ return status;
19343
+ }
19344
+ }
18788
19345
  };
18789
19346
  exports.SimpleSpeechPhrase = SimpleSpeechPhrase;
18790
19347
  }
@@ -18826,6 +19383,7 @@
18826
19383
  super(authentication, connectionFactory, audioSource, recognizerConfig, recognizer);
18827
19384
  this.privIntentRecognizer = recognizer;
18828
19385
  this.privIntentDataSent = false;
19386
+ recognizerConfig.recognitionEndpointVersion = "1";
18829
19387
  }
18830
19388
  setIntents(addedIntents, umbrellaIntent) {
18831
19389
  this.privAddedLmIntents = addedIntents;
@@ -18842,10 +19400,10 @@
18842
19400
  }
18843
19401
  switch (connectionMessage.path.toLowerCase()) {
18844
19402
  case "speech.hypothesis":
18845
- const speechHypothesis = Exports_js_3.SpeechHypothesis.fromJSON(connectionMessage.textBody);
18846
- result = new Exports_js_2.IntentRecognitionResult(void 0, this.privRequestSession.requestId, Exports_js_2.ResultReason.RecognizingIntent, speechHypothesis.Text, speechHypothesis.Duration, speechHypothesis.Offset + this.privRequestSession.currentTurnAudioOffset, speechHypothesis.Language, speechHypothesis.LanguageDetectionConfidence, void 0, connectionMessage.textBody, resultProps);
19403
+ const speechHypothesis = Exports_js_3.SpeechHypothesis.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
19404
+ result = new Exports_js_2.IntentRecognitionResult(void 0, this.privRequestSession.requestId, Exports_js_2.ResultReason.RecognizingIntent, speechHypothesis.Text, speechHypothesis.Duration, speechHypothesis.Offset, speechHypothesis.Language, speechHypothesis.LanguageDetectionConfidence, void 0, speechHypothesis.asJson(), resultProps);
18847
19405
  this.privRequestSession.onHypothesis(result.offset);
18848
- ev = new Exports_js_2.IntentRecognitionEventArgs(result, speechHypothesis.Offset + this.privRequestSession.currentTurnAudioOffset, this.privRequestSession.sessionId);
19406
+ ev = new Exports_js_2.IntentRecognitionEventArgs(result, speechHypothesis.Offset, this.privRequestSession.sessionId);
18849
19407
  if (!!this.privIntentRecognizer.recognizing) {
18850
19408
  try {
18851
19409
  this.privIntentRecognizer.recognizing(this.privIntentRecognizer, ev);
@@ -18855,8 +19413,8 @@
18855
19413
  processed = true;
18856
19414
  break;
18857
19415
  case "speech.phrase":
18858
- const simple = Exports_js_3.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody);
18859
- result = new Exports_js_2.IntentRecognitionResult(void 0, this.privRequestSession.requestId, Exports_js_3.EnumTranslation.implTranslateRecognitionResult(simple.RecognitionStatus), simple.DisplayText, simple.Duration, simple.Offset + this.privRequestSession.currentTurnAudioOffset, simple.Language, simple.LanguageDetectionConfidence, void 0, connectionMessage.textBody, resultProps);
19416
+ const simple = Exports_js_3.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
19417
+ result = new Exports_js_2.IntentRecognitionResult(void 0, this.privRequestSession.requestId, Exports_js_3.EnumTranslation.implTranslateRecognitionResult(simple.RecognitionStatus), simple.DisplayText, simple.Duration, simple.Offset, simple.Language, simple.LanguageDetectionConfidence, void 0, simple.asJson(), resultProps);
18860
19418
  ev = new Exports_js_2.IntentRecognitionEventArgs(result, result.offset, this.privRequestSession.sessionId);
18861
19419
  const sendEvent = () => {
18862
19420
  if (!!this.privIntentRecognizer.recognized) {
@@ -19235,8 +19793,8 @@
19235
19793
  this.privInTurn = false;
19236
19794
  this.privConnectionAttempts = 0;
19237
19795
  this.privAudioSourceId = audioSourceId;
19238
- this.privRequestId = Exports_js_1.createNoDashGuid();
19239
- this.privAudioNodeId = Exports_js_1.createNoDashGuid();
19796
+ this.privRequestId = (0, Exports_js_1.createNoDashGuid)();
19797
+ this.privAudioNodeId = (0, Exports_js_1.createNoDashGuid)();
19240
19798
  this.privTurnDeferral = new Exports_js_1.Deferred();
19241
19799
  this.privTurnDeferral.resolve();
19242
19800
  }
@@ -19336,7 +19894,7 @@
19336
19894
  }
19337
19895
  }
19338
19896
  onSpeechContext() {
19339
- this.privRequestId = Exports_js_1.createNoDashGuid();
19897
+ this.privRequestId = (0, Exports_js_1.createNoDashGuid)();
19340
19898
  }
19341
19899
  onServiceTurnStartResponse() {
19342
19900
  if (!!this.privTurnDeferral && !!this.privInTurn) {
@@ -19421,32 +19979,67 @@
19421
19979
  }
19422
19980
  });
19423
19981
 
19982
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseOutput/PhraseOutput.js
19983
+ var require_PhraseOutput = __commonJS({
19984
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/PhraseOutput/PhraseOutput.js"(exports) {
19985
+ "use strict";
19986
+ Object.defineProperty(exports, "__esModule", { value: true });
19987
+ exports.TentativePhraseResultsOption = exports.OutputFormat = exports.PhraseExtension = exports.PhraseOption = void 0;
19988
+ var PhraseOption;
19989
+ (function(PhraseOption2) {
19990
+ PhraseOption2["WordTimings"] = "WordTimings";
19991
+ PhraseOption2["SNR"] = "SNR";
19992
+ PhraseOption2["Pronunciation"] = "Pronunciation";
19993
+ PhraseOption2["WordPronunciation"] = "WordPronunciation";
19994
+ PhraseOption2["WordConfidence"] = "WordConfidence";
19995
+ PhraseOption2["Words"] = "Words";
19996
+ PhraseOption2["Sentiment"] = "Sentiment";
19997
+ PhraseOption2["PronunciationAssessment"] = "PronunciationAssessment";
19998
+ PhraseOption2["ContentAssessment"] = "ContentAssessment";
19999
+ PhraseOption2["PhraseAMScore"] = "PhraseAMScore";
20000
+ PhraseOption2["PhraseLMScore"] = "PhraseLMScore";
20001
+ PhraseOption2["WordAMScore"] = "WordAMScore";
20002
+ PhraseOption2["WordLMScore"] = "WordLMScore";
20003
+ PhraseOption2["RuleTree"] = "RuleTree";
20004
+ PhraseOption2["NBestTimings"] = "NBestTimings";
20005
+ PhraseOption2["DecoderDiagnostics"] = "DecoderDiagnostics";
20006
+ PhraseOption2["DisplayWordTimings"] = "DisplayWordTimings";
20007
+ PhraseOption2["DisplayWords"] = "DisplayWords";
20008
+ })(PhraseOption = exports.PhraseOption || (exports.PhraseOption = {}));
20009
+ var PhraseExtension;
20010
+ (function(PhraseExtension2) {
20011
+ PhraseExtension2["Graph"] = "Graph";
20012
+ PhraseExtension2["Corrections"] = "Corrections";
20013
+ PhraseExtension2["Sentiment"] = "Sentiment";
20014
+ })(PhraseExtension = exports.PhraseExtension || (exports.PhraseExtension = {}));
20015
+ var OutputFormat3;
20016
+ (function(OutputFormat4) {
20017
+ OutputFormat4["Simple"] = "Simple";
20018
+ OutputFormat4["Detailed"] = "Detailed";
20019
+ })(OutputFormat3 = exports.OutputFormat || (exports.OutputFormat = {}));
20020
+ var TentativePhraseResultsOption;
20021
+ (function(TentativePhraseResultsOption2) {
20022
+ TentativePhraseResultsOption2["None"] = "None";
20023
+ TentativePhraseResultsOption2["Always"] = "Always";
20024
+ })(TentativePhraseResultsOption = exports.TentativePhraseResultsOption || (exports.TentativePhraseResultsOption = {}));
20025
+ }
20026
+ });
20027
+
19424
20028
  // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/SpeechContext.js
19425
20029
  var require_SpeechContext = __commonJS({
19426
20030
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/SpeechContext.js"(exports) {
19427
20031
  "use strict";
19428
20032
  Object.defineProperty(exports, "__esModule", { value: true });
19429
20033
  exports.SpeechContext = void 0;
20034
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
20035
+ var PhraseOutput_js_1 = require_PhraseOutput();
19430
20036
  var SpeechContext = class {
19431
20037
  constructor(dynamicGrammar) {
19432
20038
  this.privContext = {};
19433
20039
  this.privDynamicGrammar = dynamicGrammar;
19434
20040
  }
19435
- /**
19436
- * Gets a section of the speech.context object.
19437
- * @param sectionName Name of the section to get.
19438
- * @return string or Context JSON serializable object that represents the value.
19439
- */
19440
- getSection(sectionName) {
19441
- return this.privContext[sectionName] || {};
19442
- }
19443
- /**
19444
- * Adds a section to the speech.context object.
19445
- * @param sectionName Name of the section to add.
19446
- * @param value JSON serializable object that represents the value.
19447
- */
19448
- setSection(sectionName, value) {
19449
- this.privContext[sectionName] = value;
20041
+ getContext() {
20042
+ return this.privContext;
19450
20043
  }
19451
20044
  /**
19452
20045
  * @Internal
@@ -19466,20 +20059,20 @@
19466
20059
  pronunciationAssessment: {}
19467
20060
  };
19468
20061
  }
19469
- this.privContext.phraseDetection.enrichment.pronunciationAssessment = JSON.parse(params);
20062
+ this.privContext.phraseDetection.enrichment.pronunciationAssessment = JSON.parse(params) || {};
19470
20063
  if (isSpeakerDiarizationEnabled) {
19471
- this.privContext.phraseDetection.mode = "Conversation";
20064
+ this.privContext.phraseDetection.mode = PhraseDetectionContext_js_1.RecognitionMode.Conversation;
19472
20065
  }
19473
20066
  this.setWordLevelTimings();
19474
- this.privContext.phraseOutput.detailed.options.push("PronunciationAssessment");
19475
- if (this.privContext.phraseOutput.detailed.options.indexOf("SNR") === -1) {
19476
- this.privContext.phraseOutput.detailed.options.push("SNR");
20067
+ this.privContext.phraseOutput.detailed.options.push(PhraseOutput_js_1.PhraseOption.PronunciationAssessment);
20068
+ if (this.privContext.phraseOutput.detailed.options.indexOf(PhraseOutput_js_1.PhraseOption.SNR) === -1) {
20069
+ this.privContext.phraseOutput.detailed.options.push(PhraseOutput_js_1.PhraseOption.SNR);
19477
20070
  }
19478
20071
  if (!!contentAssessmentTopic) {
19479
20072
  this.privContext.phraseDetection.enrichment.contentAssessment = {
19480
20073
  topic: contentAssessmentTopic
19481
20074
  };
19482
- this.privContext.phraseOutput.detailed.options.push("ContentAssessment");
20075
+ this.privContext.phraseOutput.detailed.options.push(PhraseOutput_js_1.PhraseOption.ContentAssessment);
19483
20076
  }
19484
20077
  }
19485
20078
  setDetailedOutputFormat() {
@@ -19487,8 +20080,7 @@
19487
20080
  this.privContext.phraseOutput = {
19488
20081
  detailed: {
19489
20082
  options: []
19490
- },
19491
- format: {}
20083
+ }
19492
20084
  };
19493
20085
  }
19494
20086
  if (this.privContext.phraseOutput.detailed === void 0) {
@@ -19496,15 +20088,14 @@
19496
20088
  options: []
19497
20089
  };
19498
20090
  }
19499
- this.privContext.phraseOutput.format = "Detailed";
20091
+ this.privContext.phraseOutput.format = PhraseOutput_js_1.OutputFormat.Detailed;
19500
20092
  }
19501
20093
  setWordLevelTimings() {
19502
20094
  if (this.privContext.phraseOutput === void 0) {
19503
20095
  this.privContext.phraseOutput = {
19504
20096
  detailed: {
19505
20097
  options: []
19506
- },
19507
- format: {}
20098
+ }
19508
20099
  };
19509
20100
  }
19510
20101
  if (this.privContext.phraseOutput.detailed === void 0) {
@@ -19512,9 +20103,9 @@
19512
20103
  options: []
19513
20104
  };
19514
20105
  }
19515
- this.privContext.phraseOutput.format = "Detailed";
19516
- if (this.privContext.phraseOutput.detailed.options.indexOf("WordTimings") === -1) {
19517
- this.privContext.phraseOutput.detailed.options.push("WordTimings");
20106
+ this.privContext.phraseOutput.format = PhraseOutput_js_1.OutputFormat.Detailed;
20107
+ if (this.privContext.phraseOutput.detailed.options.indexOf(PhraseOutput_js_1.PhraseOption.WordTimings) === -1) {
20108
+ this.privContext.phraseOutput.detailed.options.push(PhraseOutput_js_1.PhraseOption.WordTimings);
19518
20109
  }
19519
20110
  }
19520
20111
  setSpeakerDiarizationAudioOffsetMs(audioOffsetMs) {
@@ -19522,7 +20113,7 @@
19522
20113
  }
19523
20114
  toJSON() {
19524
20115
  const dgi = this.privDynamicGrammar.generateGrammarObject();
19525
- this.setSection("dgi", dgi);
20116
+ this.privContext.dgi = dgi;
19526
20117
  const ret = JSON.stringify(this.privContext);
19527
20118
  return ret;
19528
20119
  }
@@ -19531,13 +20122,42 @@
19531
20122
  }
19532
20123
  });
19533
20124
 
20125
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/Dgi/Group.js
20126
+ var require_Group = __commonJS({
20127
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/Dgi/Group.js"(exports) {
20128
+ "use strict";
20129
+ Object.defineProperty(exports, "__esModule", { value: true });
20130
+ exports.SubstringMatchType = exports.GroupType = void 0;
20131
+ var GroupType;
20132
+ (function(GroupType2) {
20133
+ GroupType2["IntentText"] = "IntentText";
20134
+ GroupType2["IntentEntity"] = "IntentEntity";
20135
+ GroupType2["Generic"] = "Generic";
20136
+ GroupType2["People"] = "People";
20137
+ GroupType2["Place"] = "Place";
20138
+ GroupType2["DynamicEntity"] = "DynamicEntity";
20139
+ })(GroupType = exports.GroupType || (exports.GroupType = {}));
20140
+ var SubstringMatchType;
20141
+ (function(SubstringMatchType2) {
20142
+ SubstringMatchType2["None"] = "None";
20143
+ SubstringMatchType2["LeftRooted"] = "LeftRooted";
20144
+ SubstringMatchType2["PartialName"] = "PartialName";
20145
+ SubstringMatchType2["MiddleOfSentence"] = "MiddleOfSentence";
20146
+ })(SubstringMatchType = exports.SubstringMatchType || (exports.SubstringMatchType = {}));
20147
+ }
20148
+ });
20149
+
19534
20150
  // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/DynamicGrammarBuilder.js
19535
20151
  var require_DynamicGrammarBuilder = __commonJS({
19536
20152
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/DynamicGrammarBuilder.js"(exports) {
19537
20153
  "use strict";
19538
20154
  Object.defineProperty(exports, "__esModule", { value: true });
19539
20155
  exports.DynamicGrammarBuilder = void 0;
20156
+ var Group_js_1 = require_Group();
19540
20157
  var DynamicGrammarBuilder = class {
20158
+ constructor() {
20159
+ this.privWeight = 1;
20160
+ }
19541
20161
  // Adds one more reference phrases to the dynamic grammar to send.
19542
20162
  // All added phrases are generic phrases.
19543
20163
  addPhrase(phrase) {
@@ -19569,6 +20189,10 @@
19569
20189
  clearGrammars() {
19570
20190
  this.privGrammars = void 0;
19571
20191
  }
20192
+ // Sets the weight for the dynamic grammar.
20193
+ setWeight(weight) {
20194
+ this.privWeight = weight;
20195
+ }
19572
20196
  // Generates an object that represents the dynamic grammar used by the Speech Service.
19573
20197
  // This is done by building an object with the correct layout based on the phrases and reference grammars added to this instance
19574
20198
  // of a DynamicGrammarBuilder
@@ -19577,15 +20201,16 @@
19577
20201
  return void 0;
19578
20202
  }
19579
20203
  const retObj = {};
19580
- retObj.ReferenceGrammars = this.privGrammars;
20204
+ retObj.referenceGrammars = this.privGrammars;
19581
20205
  if (void 0 !== this.privPhrases && 0 !== this.privPhrases.length) {
19582
20206
  const retPhrases = [];
19583
20207
  this.privPhrases.forEach((value) => {
19584
20208
  retPhrases.push({
19585
- Text: value
20209
+ text: value
19586
20210
  });
19587
20211
  });
19588
- retObj.Groups = [{ Type: "Generic", Items: retPhrases }];
20212
+ retObj.groups = [{ type: Group_js_1.GroupType.Generic, items: retPhrases }];
20213
+ retObj.bias = this.privWeight;
19589
20214
  }
19590
20215
  return retObj;
19591
20216
  }
@@ -19594,14 +20219,6 @@
19594
20219
  }
19595
20220
  });
19596
20221
 
19597
- // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/DynamicGrammarInterfaces.js
19598
- var require_DynamicGrammarInterfaces = __commonJS({
19599
- "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/DynamicGrammarInterfaces.js"(exports) {
19600
- "use strict";
19601
- Object.defineProperty(exports, "__esModule", { value: true });
19602
- }
19603
- });
19604
-
19605
20222
  // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/ActivityResponsePayload.js
19606
20223
  var require_ActivityResponsePayload = __commonJS({
19607
20224
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/ActivityResponsePayload.js"(exports) {
@@ -19728,6 +20345,38 @@
19728
20345
  }
19729
20346
  });
19730
20347
 
20348
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/InvocationSource.js
20349
+ var require_InvocationSource = __commonJS({
20350
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/InvocationSource.js"(exports) {
20351
+ "use strict";
20352
+ Object.defineProperty(exports, "__esModule", { value: true });
20353
+ exports.InvocationSource = void 0;
20354
+ var InvocationSource;
20355
+ (function(InvocationSource2) {
20356
+ InvocationSource2["None"] = "None";
20357
+ InvocationSource2["VoiceActivationWithKeyword"] = "VoiceActivationWithKeyword";
20358
+ })(InvocationSource = exports.InvocationSource || (exports.InvocationSource = {}));
20359
+ }
20360
+ });
20361
+
20362
+ // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/KeywordDetection/KeywordDetection.js
20363
+ var require_KeywordDetection = __commonJS({
20364
+ "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/ServiceMessages/KeywordDetection/KeywordDetection.js"(exports) {
20365
+ "use strict";
20366
+ Object.defineProperty(exports, "__esModule", { value: true });
20367
+ exports.OnRejectAction = exports.KeywordDetectionType = void 0;
20368
+ var KeywordDetectionType;
20369
+ (function(KeywordDetectionType2) {
20370
+ KeywordDetectionType2["StartTrigger"] = "StartTrigger";
20371
+ })(KeywordDetectionType = exports.KeywordDetectionType || (exports.KeywordDetectionType = {}));
20372
+ var OnRejectAction;
20373
+ (function(OnRejectAction2) {
20374
+ OnRejectAction2["EndOfTurn"] = "EndOfTurn";
20375
+ OnRejectAction2["Continue"] = "Continue";
20376
+ })(OnRejectAction = exports.OnRejectAction || (exports.OnRejectAction = {}));
20377
+ }
20378
+ });
20379
+
19731
20380
  // ../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/DialogServiceAdapter.js
19732
20381
  var require_DialogServiceAdapter = __commonJS({
19733
20382
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/DialogServiceAdapter.js"(exports) {
@@ -19742,6 +20391,8 @@
19742
20391
  var DialogServiceTurnStateManager_js_1 = require_DialogServiceTurnStateManager();
19743
20392
  var Exports_js_4 = require_Exports7();
19744
20393
  var ActivityResponsePayload_js_1 = require_ActivityResponsePayload();
20394
+ var InvocationSource_js_1 = require_InvocationSource();
20395
+ var KeywordDetection_js_1 = require_KeywordDetection();
19745
20396
  var SpeechConnectionMessage_Internal_js_1 = require_SpeechConnectionMessage_Internal();
19746
20397
  var DialogServiceAdapter = class extends Exports_js_4.ServiceRecognizerBase {
19747
20398
  constructor(authentication, connectionFactory, audioSource, recognizerConfig, dialogServiceConnector) {
@@ -19764,8 +20415,8 @@
19764
20415
  });
19765
20416
  }
19766
20417
  async sendMessage(message) {
19767
- const interactionGuid = Exports_js_2.createGuid();
19768
- const requestId = Exports_js_2.createNoDashGuid();
20418
+ const interactionGuid = (0, Exports_js_2.createGuid)();
20419
+ const requestId = (0, Exports_js_2.createNoDashGuid)();
19769
20420
  const agentMessage = {
19770
20421
  context: {
19771
20422
  interactionId: interactionGuid
@@ -19793,8 +20444,8 @@
19793
20444
  let processed;
19794
20445
  switch (connectionMessage.path.toLowerCase()) {
19795
20446
  case "speech.phrase":
19796
- const speechPhrase = Exports_js_4.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody);
19797
- this.privRequestSession.onPhraseRecognized(this.privRequestSession.currentTurnAudioOffset + speechPhrase.Offset + speechPhrase.Duration);
20447
+ const speechPhrase = Exports_js_4.SimpleSpeechPhrase.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
20448
+ this.privRequestSession.onPhraseRecognized(speechPhrase.Offset + speechPhrase.Duration);
19798
20449
  if (speechPhrase.RecognitionStatus !== Exports_js_4.RecognitionStatus.TooManyRequests && speechPhrase.RecognitionStatus !== Exports_js_4.RecognitionStatus.Error) {
19799
20450
  const args = this.fireEventForResult(speechPhrase, resultProps);
19800
20451
  this.privLastResult = args.result;
@@ -19808,11 +20459,10 @@
19808
20459
  processed = true;
19809
20460
  break;
19810
20461
  case "speech.hypothesis":
19811
- const hypothesis = Exports_js_4.SpeechHypothesis.fromJSON(connectionMessage.textBody);
19812
- const offset = hypothesis.Offset + this.privRequestSession.currentTurnAudioOffset;
19813
- result = new Exports_js_3.SpeechRecognitionResult(this.privRequestSession.requestId, Exports_js_3.ResultReason.RecognizingSpeech, hypothesis.Text, hypothesis.Duration, offset, hypothesis.Language, hypothesis.LanguageDetectionConfidence, void 0, void 0, connectionMessage.textBody, resultProps);
19814
- this.privRequestSession.onHypothesis(offset);
19815
- const ev = new Exports_js_3.SpeechRecognitionEventArgs(result, hypothesis.Duration, this.privRequestSession.sessionId);
20462
+ const hypothesis = Exports_js_4.SpeechHypothesis.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
20463
+ result = new Exports_js_3.SpeechRecognitionResult(this.privRequestSession.requestId, Exports_js_3.ResultReason.RecognizingSpeech, hypothesis.Text, hypothesis.Duration, hypothesis.Offset, hypothesis.Language, hypothesis.LanguageDetectionConfidence, void 0, void 0, hypothesis.asJson(), resultProps);
20464
+ this.privRequestSession.onHypothesis(hypothesis.Offset);
20465
+ const ev = new Exports_js_3.SpeechRecognitionEventArgs(result, hypothesis.Offset, this.privRequestSession.sessionId);
19816
20466
  if (!!this.privDialogServiceConnector.recognizing) {
19817
20467
  try {
19818
20468
  this.privDialogServiceConnector.recognizing(this.privDialogServiceConnector, ev);
@@ -19822,8 +20472,8 @@
19822
20472
  processed = true;
19823
20473
  break;
19824
20474
  case "speech.keyword":
19825
- const keyword = Exports_js_4.SpeechKeyword.fromJSON(connectionMessage.textBody);
19826
- result = new Exports_js_3.SpeechRecognitionResult(this.privRequestSession.requestId, keyword.Status === "Accepted" ? Exports_js_3.ResultReason.RecognizedKeyword : Exports_js_3.ResultReason.NoMatch, keyword.Text, keyword.Duration, keyword.Offset, void 0, void 0, void 0, void 0, connectionMessage.textBody, resultProps);
20475
+ const keyword = Exports_js_4.SpeechKeyword.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
20476
+ result = new Exports_js_3.SpeechRecognitionResult(this.privRequestSession.requestId, keyword.Status === "Accepted" ? Exports_js_3.ResultReason.RecognizedKeyword : Exports_js_3.ResultReason.NoMatch, keyword.Text, keyword.Duration, keyword.Offset, void 0, void 0, void 0, void 0, keyword.asJson(), resultProps);
19827
20477
  if (keyword.Status !== "Accepted") {
19828
20478
  this.privLastResult = result;
19829
20479
  }
@@ -19974,7 +20624,7 @@
19974
20624
  }
19975
20625
  break;
19976
20626
  case "speech.startdetected":
19977
- const speechStartDetected = Exports_js_4.SpeechDetected.fromJSON(connectionMessage.textBody);
20627
+ const speechStartDetected = Exports_js_4.SpeechDetected.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);
19978
20628
  const speechStartEventArgs = new Exports_js_3.RecognitionEventArgs(speechStartDetected.Offset, this.privRequestSession.sessionId);
19979
20629
  if (!!this.privRecognizer.speechStartDetected) {
19980
20630
  this.privRecognizer.speechStartDetected(this.privRecognizer, speechStartEventArgs);
@@ -19987,9 +20637,9 @@
19987
20637
  } else {
19988
20638
  json = "{ Offset: 0 }";
19989
20639
  }
19990
- const speechStopDetected = Exports_js_4.SpeechDetected.fromJSON(json);
19991
- this.privRequestSession.onServiceRecognized(speechStopDetected.Offset + this.privRequestSession.currentTurnAudioOffset);
19992
- const speechStopEventArgs = new Exports_js_3.RecognitionEventArgs(speechStopDetected.Offset + this.privRequestSession.currentTurnAudioOffset, this.privRequestSession.sessionId);
20640
+ const speechStopDetected = Exports_js_4.SpeechDetected.fromJSON(json, this.privRequestSession.currentTurnAudioOffset);
20641
+ this.privRequestSession.onServiceRecognized(speechStopDetected.Offset);
20642
+ const speechStopEventArgs = new Exports_js_3.RecognitionEventArgs(speechStopDetected.Offset, this.privRequestSession.sessionId);
19993
20643
  if (!!this.privRecognizer.speechEndDetected) {
19994
20644
  this.privRecognizer.speechEndDetected(this.privRecognizer, speechStopEventArgs);
19995
20645
  }
@@ -20087,7 +20737,7 @@
20087
20737
  return;
20088
20738
  }
20089
20739
  sendAgentContext(connection) {
20090
- const guid = Exports_js_2.createGuid();
20740
+ const guid = (0, Exports_js_2.createGuid)();
20091
20741
  const speechActivityTemplate = this.privDialogServiceConnector.properties.getProperty(Exports_js_3.PropertyId.Conversation_Speech_Activity_Template);
20092
20742
  const agentContext = {
20093
20743
  channelData: "",
@@ -20102,9 +20752,8 @@
20102
20752
  }
20103
20753
  fireEventForResult(serviceResult, properties) {
20104
20754
  const resultReason = Exports_js_4.EnumTranslation.implTranslateRecognitionResult(serviceResult.RecognitionStatus);
20105
- const offset = serviceResult.Offset + this.privRequestSession.currentTurnAudioOffset;
20106
- const result = new Exports_js_3.SpeechRecognitionResult(this.privRequestSession.requestId, resultReason, serviceResult.DisplayText, serviceResult.Duration, offset, serviceResult.Language, serviceResult.LanguageDetectionConfidence, void 0, void 0, JSON.stringify(serviceResult), properties);
20107
- const ev = new Exports_js_3.SpeechRecognitionEventArgs(result, offset, this.privRequestSession.sessionId);
20755
+ const result = new Exports_js_3.SpeechRecognitionResult(this.privRequestSession.requestId, resultReason, serviceResult.DisplayText, serviceResult.Duration, serviceResult.Offset, serviceResult.Language, serviceResult.LanguageDetectionConfidence, void 0, void 0, serviceResult.asJson(), properties);
20756
+ const ev = new Exports_js_3.SpeechRecognitionEventArgs(result, serviceResult.Offset, this.privRequestSession.sessionId);
20108
20757
  return ev;
20109
20758
  }
20110
20759
  handleResponseMessage(responseMessage) {
@@ -20157,22 +20806,23 @@
20157
20806
  const keywordDurations = keywordDurationPropertyValue === void 0 ? [] : keywordDurationPropertyValue.split(";");
20158
20807
  const keywordDefinitionArray = [];
20159
20808
  for (let i = 0; i < keywords.length; i++) {
20160
- const definition = {};
20161
- definition.text = keywords[i];
20809
+ const definition = {
20810
+ text: keywords[i]
20811
+ };
20162
20812
  if (i < keywordOffsets.length) {
20163
- definition.offset = Number(keywordOffsets[i]);
20813
+ definition.startOffset = Number(keywordOffsets[i]);
20164
20814
  }
20165
20815
  if (i < keywordDurations.length) {
20166
20816
  definition.duration = Number(keywordDurations[i]);
20167
20817
  }
20168
20818
  keywordDefinitionArray.push(definition);
20169
20819
  }
20170
- this.speechContext.setSection("invocationSource", "VoiceActivationWithKeyword");
20171
- this.speechContext.setSection("keywordDetection", [{
20820
+ this.speechContext.getContext().invocationSource = InvocationSource_js_1.InvocationSource.VoiceActivationWithKeyword;
20821
+ this.speechContext.getContext().keywordDetection = [{
20172
20822
  clientDetectedKeywords: keywordDefinitionArray,
20173
- onReject: { action: "EndOfTurn" },
20174
- type: "startTrigger"
20175
- }]);
20823
+ onReject: { action: KeywordDetection_js_1.OnRejectAction.EndOfTurn },
20824
+ type: KeywordDetection_js_1.KeywordDetectionType.StartTrigger
20825
+ }];
20176
20826
  }
20177
20827
  };
20178
20828
  exports.DialogServiceAdapter = DialogServiceAdapter;
@@ -20486,7 +21136,7 @@
20486
21136
  var ConversationConnectionFactory = class extends ConnectionFactoryBase_js_1.ConnectionFactoryBase {
20487
21137
  create(config, authInfo, connectionId) {
20488
21138
  const endpointHost = config.parameters.getProperty(Exports_js_3.PropertyId.ConversationTranslator_Host, ConversationConnectionConfig_js_1.ConversationConnectionConfig.host);
20489
- const correlationId = config.parameters.getProperty(Exports_js_3.PropertyId.ConversationTranslator_CorrelationId, Exports_js_2.createGuid());
21139
+ const correlationId = config.parameters.getProperty(Exports_js_3.PropertyId.ConversationTranslator_CorrelationId, (0, Exports_js_2.createGuid)());
20490
21140
  const endpoint = `wss://${endpointHost}${ConversationConnectionConfig_js_1.ConversationConnectionConfig.webSocketPath}`;
20491
21141
  const token = config.parameters.getProperty(Exports_js_3.PropertyId.ConversationTranslator_Token, void 0);
20492
21142
  Contracts_js_1.Contracts.throwIfNullOrUndefined(token, "token");
@@ -20495,7 +21145,7 @@
20495
21145
  queryParams[ConversationConnectionConfig_js_1.ConversationConnectionConfig.configParams.token] = token;
20496
21146
  queryParams[ConversationConnectionConfig_js_1.ConversationConnectionConfig.configParams.correlationId] = correlationId;
20497
21147
  const enableCompression = config.parameters.getProperty("SPEECH-EnableWebsocketCompression", "false") === "true";
20498
- return new Exports_js_1.WebsocketConnection(endpoint, queryParams, {}, new ConversationWebsocketMessageFormatter_js_1.ConversationWebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId);
21148
+ return Promise.resolve(new Exports_js_1.WebsocketConnection(endpoint, queryParams, {}, new ConversationWebsocketMessageFormatter_js_1.ConversationWebsocketMessageFormatter(), Exports_js_1.ProxyInfo.fromRecognizerConfig(config), enableCompression, connectionId));
20499
21149
  }
20500
21150
  };
20501
21151
  exports.ConversationConnectionFactory = ConversationConnectionFactory;
@@ -20514,7 +21164,7 @@
20514
21164
  this.privIsDisposed = false;
20515
21165
  this.privDetachables = new Array();
20516
21166
  this.privSessionId = sessionId;
20517
- this.privRequestId = Exports_js_1.createNoDashGuid();
21167
+ this.privRequestId = (0, Exports_js_1.createNoDashGuid)();
20518
21168
  this.privRequestCompletionDeferral = new Exports_js_1.Deferred();
20519
21169
  }
20520
21170
  get sessionId() {
@@ -20545,7 +21195,7 @@
20545
21195
  if (!continuousRecognition) {
20546
21196
  this.onComplete();
20547
21197
  } else {
20548
- this.privRequestId = Exports_js_1.createNoDashGuid();
21198
+ this.privRequestId = (0, Exports_js_1.createNoDashGuid)();
20549
21199
  }
20550
21200
  }
20551
21201
  async dispose() {
@@ -20983,6 +21633,7 @@
20983
21633
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/Transcription/ServiceMessages/Exports.js"(exports) {
20984
21634
  "use strict";
20985
21635
  Object.defineProperty(exports, "__esModule", { value: true });
21636
+ exports.TextResponsePayload = exports.SpeechResponsePayload = exports.ParticipantPayloadResponse = exports.ParticipantsListPayloadResponse = exports.CommandResponsePayload = void 0;
20986
21637
  var CommandResponsePayload_js_1 = require_CommandResponsePayload();
20987
21638
  Object.defineProperty(exports, "CommandResponsePayload", { enumerable: true, get: function() {
20988
21639
  return CommandResponsePayload_js_1.CommandResponsePayload;
@@ -21030,7 +21681,7 @@
21030
21681
  this.postConnectImplOverride = (connection) => this.conversationConnectImpl(connection);
21031
21682
  this.configConnectionOverride = () => this.configConnection();
21032
21683
  this.disconnectOverride = () => this.privDisconnect();
21033
- this.privConversationRequestSession = new ConversationRequestSession_js_1.ConversationRequestSession(Exports_js_1.createNoDashGuid());
21684
+ this.privConversationRequestSession = new ConversationRequestSession_js_1.ConversationRequestSession((0, Exports_js_1.createNoDashGuid)());
21034
21685
  this.privConversationConnectionFactory = connectionFactory;
21035
21686
  this.privConversationIsDisposed = false;
21036
21687
  }
@@ -21111,6 +21762,11 @@
21111
21762
  case "command":
21112
21763
  const commandPayload = Exports_js_4.CommandResponsePayload.fromJSON(message.textBody);
21113
21764
  switch (commandPayload.command.toLowerCase()) {
21765
+ /**
21766
+ * 'ParticpantList' is the first message sent to the user after the websocket connection has opened.
21767
+ * The consuming client must wait for this message to arrive
21768
+ * before starting to send their own data.
21769
+ */
21114
21770
  case "participantlist":
21115
21771
  const participantsPayload = Exports_js_4.ParticipantsListPayloadResponse.fromJSON(message.textBody);
21116
21772
  const participantsResult = participantsPayload.participants.map((p) => {
@@ -21129,46 +21785,79 @@
21129
21785
  this.privConversationServiceConnector.participantsListReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantsListEventArgs(participantsPayload.roomid, participantsPayload.token, participantsPayload.translateTo, participantsPayload.profanityFilter, participantsPayload.roomProfanityFilter, participantsPayload.roomLocked, participantsPayload.muteAll, participantsResult, sessionId));
21130
21786
  }
21131
21787
  break;
21788
+ /**
21789
+ * 'SetTranslateToLanguages' represents the list of languages being used in the Conversation by all users(?).
21790
+ * This is sent at the start of the Conversation
21791
+ */
21132
21792
  case "settranslatetolanguages":
21133
21793
  if (!!this.privConversationServiceConnector.participantUpdateCommandReceived) {
21134
21794
  this.privConversationServiceConnector.participantUpdateCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantAttributeEventArgs(commandPayload.participantId, ConversationTranslatorInterfaces_js_1.ConversationTranslatorCommandTypes.setTranslateToLanguages, commandPayload.value, sessionId));
21135
21795
  }
21136
21796
  break;
21797
+ /**
21798
+ * 'SetProfanityFiltering' lets the client set the level of profanity filtering.
21799
+ * If sent by the participant the setting will effect only their own profanity level.
21800
+ * If sent by the host, the setting will effect all participants including the host.
21801
+ * Note: the profanity filters differ from Speech Service (?): 'marked', 'raw', 'removed', 'tagged'
21802
+ */
21137
21803
  case "setprofanityfiltering":
21138
21804
  if (!!this.privConversationServiceConnector.participantUpdateCommandReceived) {
21139
21805
  this.privConversationServiceConnector.participantUpdateCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantAttributeEventArgs(commandPayload.participantId, ConversationTranslatorInterfaces_js_1.ConversationTranslatorCommandTypes.setProfanityFiltering, commandPayload.value, sessionId));
21140
21806
  }
21141
21807
  break;
21808
+ /**
21809
+ * 'SetMute' is sent if the participant has been muted by the host.
21810
+ * Check the 'participantId' to determine if the current user has been muted.
21811
+ */
21142
21812
  case "setmute":
21143
21813
  if (!!this.privConversationServiceConnector.participantUpdateCommandReceived) {
21144
21814
  this.privConversationServiceConnector.participantUpdateCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantAttributeEventArgs(commandPayload.participantId, ConversationTranslatorInterfaces_js_1.ConversationTranslatorCommandTypes.setMute, commandPayload.value, sessionId));
21145
21815
  }
21146
21816
  break;
21817
+ /**
21818
+ * 'SetMuteAll' is sent if the Conversation has been muted by the host.
21819
+ */
21147
21820
  case "setmuteall":
21148
21821
  if (!!this.privConversationServiceConnector.muteAllCommandReceived) {
21149
21822
  this.privConversationServiceConnector.muteAllCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.MuteAllEventArgs(commandPayload.value, sessionId));
21150
21823
  }
21151
21824
  break;
21825
+ /**
21826
+ * 'RoomExpirationWarning' is sent towards the end of the Conversation session to give a timeout warning.
21827
+ */
21152
21828
  case "roomexpirationwarning":
21153
21829
  if (!!this.privConversationServiceConnector.conversationExpiration) {
21154
21830
  this.privConversationServiceConnector.conversationExpiration(this.privConversationServiceConnector, new Exports_js_2.ConversationExpirationEventArgs(commandPayload.value, this.privConversationRequestSession.sessionId));
21155
21831
  }
21156
21832
  break;
21833
+ /**
21834
+ * 'SetUseTts' is sent as a confirmation if the user requests TTS to be turned on or off.
21835
+ */
21157
21836
  case "setusetts":
21158
21837
  if (!!this.privConversationServiceConnector.participantUpdateCommandReceived) {
21159
21838
  this.privConversationServiceConnector.participantUpdateCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantAttributeEventArgs(commandPayload.participantId, ConversationTranslatorInterfaces_js_1.ConversationTranslatorCommandTypes.setUseTTS, commandPayload.value, sessionId));
21160
21839
  }
21161
21840
  break;
21841
+ /**
21842
+ * 'SetLockState' is set if the host has locked or unlocked the Conversation.
21843
+ */
21162
21844
  case "setlockstate":
21163
21845
  if (!!this.privConversationServiceConnector.lockRoomCommandReceived) {
21164
21846
  this.privConversationServiceConnector.lockRoomCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.LockRoomEventArgs(commandPayload.value, sessionId));
21165
21847
  }
21166
21848
  break;
21849
+ /**
21850
+ * 'ChangeNickname' is received if a user changes their display name.
21851
+ * Any cached particpiants list should be updated to reflect the display name.
21852
+ */
21167
21853
  case "changenickname":
21168
21854
  if (!!this.privConversationServiceConnector.participantUpdateCommandReceived) {
21169
21855
  this.privConversationServiceConnector.participantUpdateCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantAttributeEventArgs(commandPayload.participantId, ConversationTranslatorInterfaces_js_1.ConversationTranslatorCommandTypes.changeNickname, commandPayload.value, sessionId));
21170
21856
  }
21171
21857
  break;
21858
+ /**
21859
+ * 'JoinSession' is sent when a user joins the Conversation.
21860
+ */
21172
21861
  case "joinsession":
21173
21862
  const joinParticipantPayload = Exports_js_4.ParticipantPayloadResponse.fromJSON(message.textBody);
21174
21863
  const joiningParticipant = {
@@ -21184,6 +21873,9 @@
21184
21873
  this.privConversationServiceConnector.participantJoinCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantEventArgs(joiningParticipant, sessionId));
21185
21874
  }
21186
21875
  break;
21876
+ /**
21877
+ * 'LeaveSession' is sent when a user leaves the Conversation'.
21878
+ */
21187
21879
  case "leavesession":
21188
21880
  const leavingParticipant = {
21189
21881
  id: commandPayload.participantId
@@ -21192,6 +21884,10 @@
21192
21884
  this.privConversationServiceConnector.participantLeaveCommandReceived(this.privConversationServiceConnector, new ConversationTranslatorEventArgs_js_1.ParticipantEventArgs(leavingParticipant, sessionId));
21193
21885
  }
21194
21886
  break;
21887
+ /**
21888
+ * 'DisconnectSession' is sent when a user is disconnected from the session (e.g. network problem).
21889
+ * Check the 'ParticipantId' to check whether the message is for the current user.
21890
+ */
21195
21891
  case "disconnectsession":
21196
21892
  const disconnectParticipant = {
21197
21893
  id: commandPayload.participantId
@@ -21208,11 +21904,20 @@
21208
21904
  this.authentication = token;
21209
21905
  this.privConversationServiceConnector.onToken(token);
21210
21906
  break;
21907
+ /**
21908
+ * Message not recognized.
21909
+ */
21211
21910
  default:
21212
21911
  break;
21213
21912
  }
21214
21913
  break;
21914
+ /**
21915
+ * 'partial' (or 'hypothesis') represents a unfinalized speech message.
21916
+ */
21215
21917
  case "partial":
21918
+ /**
21919
+ * 'final' (or 'phrase') represents a finalized speech message.
21920
+ */
21216
21921
  case "final":
21217
21922
  const speechPayload = Exports_js_4.SpeechResponsePayload.fromJSON(message.textBody);
21218
21923
  const conversationResultReason = conversationMessageType === "final" ? Exports_js_2.ResultReason.TranslatedParticipantSpeech : Exports_js_2.ResultReason.TranslatingParticipantSpeech;
@@ -21236,6 +21941,9 @@
21236
21941
  }
21237
21942
  }
21238
21943
  break;
21944
+ /**
21945
+ * "translated_message" is a text message or instant message (IM).
21946
+ */
21239
21947
  case "translated_message":
21240
21948
  const textPayload = Exports_js_4.TextResponsePayload.fromJSON(message.textBody);
21241
21949
  const textResult = new Exports_js_2.ConversationTranslationResult(textPayload.participantId, this.getTranslations(textPayload.translations), textPayload.language, void 0, void 0, textPayload.originalText, void 0, void 0, void 0, message.textBody, void 0);
@@ -21563,6 +22271,7 @@
21563
22271
  var Contracts_js_1 = require_Contracts();
21564
22272
  var Exports_js_2 = require_Exports3();
21565
22273
  var Exports_js_3 = require_Exports7();
22274
+ var PhraseDetectionContext_js_1 = require_PhraseDetectionContext();
21566
22275
  var TranscriberRecognizer = class extends Exports_js_2.Recognizer {
21567
22276
  /**
21568
22277
  * TranscriberRecognizer constructor.
@@ -21616,10 +22325,10 @@
21616
22325
  return this.isMeetingRecognizer;
21617
22326
  }
21618
22327
  startContinuousRecognitionAsync(cb, err) {
21619
- Exports_js_1.marshalPromiseToCallbacks(this.startContinuousRecognitionAsyncImpl(Exports_js_3.RecognitionMode.Conversation), cb, err);
22328
+ (0, Exports_js_1.marshalPromiseToCallbacks)(this.startContinuousRecognitionAsyncImpl(PhraseDetectionContext_js_1.RecognitionMode.Conversation), cb, err);
21620
22329
  }
21621
22330
  stopContinuousRecognitionAsync(cb, err) {
21622
- Exports_js_1.marshalPromiseToCallbacks(this.stopContinuousRecognitionAsyncImpl(), cb, err);
22331
+ (0, Exports_js_1.marshalPromiseToCallbacks)(this.stopContinuousRecognitionAsyncImpl(), cb, err);
21623
22332
  }
21624
22333
  async close() {
21625
22334
  if (!this.privDisposedRecognizer) {
@@ -21720,6 +22429,7 @@
21720
22429
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/Transcription/Exports.js"(exports) {
21721
22430
  "use strict";
21722
22431
  Object.defineProperty(exports, "__esModule", { value: true });
22432
+ exports.InternalParticipants = exports.ConversationTranslatorMessageTypes = exports.ConversationTranslatorCommandTypes = exports.ParticipantsListEventArgs = exports.ParticipantEventArgs = exports.ParticipantAttributeEventArgs = exports.MuteAllEventArgs = exports.LockRoomEventArgs = exports.ConversationReceivedTranslationEventArgs = exports.TranscriberRecognizer = exports.ConversationRecognizerFactory = exports.ConversationConnectionConfig = exports.ConversationManager = void 0;
21723
22433
  var ConversationManager_js_1 = require_ConversationManager();
21724
22434
  Object.defineProperty(exports, "ConversationManager", { enumerable: true, get: function() {
21725
22435
  return ConversationManager_js_1.ConversationManager;
@@ -21874,7 +22584,7 @@
21874
22584
  this.privNextSearchTextIndex = 0;
21875
22585
  this.privSentenceOffset = 0;
21876
22586
  this.privNextSearchSentenceIndex = 0;
21877
- this.privRequestId = Exports_js_1.createNoDashGuid();
22587
+ this.privRequestId = (0, Exports_js_1.createNoDashGuid)();
21878
22588
  this.privTurnDeferral = new Exports_js_1.Deferred();
21879
22589
  this.privTurnDeferral.resolve();
21880
22590
  }
@@ -22166,6 +22876,9 @@
22166
22876
  }
22167
22877
  });
22168
22878
  }
22879
+ get synthesizerConfig() {
22880
+ return this.privSynthesizerConfig;
22881
+ }
22169
22882
  get synthesisContext() {
22170
22883
  return this.privSynthesisContext;
22171
22884
  }
@@ -22390,13 +23103,13 @@
22390
23103
  return this.connectImpl();
22391
23104
  });
22392
23105
  }
22393
- this.privAuthFetchEventId = Exports_js_1.createNoDashGuid();
22394
- this.privConnectionId = Exports_js_1.createNoDashGuid();
23106
+ this.privAuthFetchEventId = (0, Exports_js_1.createNoDashGuid)();
23107
+ this.privConnectionId = (0, Exports_js_1.createNoDashGuid)();
22395
23108
  this.privSynthesisTurn.onPreConnectionStart(this.privAuthFetchEventId);
22396
23109
  const authPromise = isUnAuthorized ? this.privAuthentication.fetchOnExpiry(this.privAuthFetchEventId) : this.privAuthentication.fetch(this.privAuthFetchEventId);
22397
23110
  this.privConnectionPromise = authPromise.then(async (result) => {
22398
23111
  this.privSynthesisTurn.onAuthCompleted(false);
22399
- const connection = this.privConnectionFactory.create(this.privSynthesizerConfig, result, this.privConnectionId);
23112
+ const connection = await this.privConnectionFactory.create(this.privSynthesizerConfig, result, this.privConnectionId);
22400
23113
  connection.events.attach((event) => {
22401
23114
  this.connectionEvents.onEvent(event);
22402
23115
  });
@@ -22527,16 +23240,20 @@
22527
23240
  name: "WebRTC",
22528
23241
  webrtcConfig: {
22529
23242
  clientDescription: btoa(this.privSynthesizerConfig.parameters.getProperty(Exports_js_1.PropertyId.TalkingAvatarService_WebRTC_SDP)),
22530
- iceServers: this.privAvatarSynthesizer.iceServers
23243
+ iceServers: this.privAvatarConfig.remoteIceServers ?? this.privAvatarSynthesizer.iceServers
22531
23244
  }
22532
23245
  },
22533
23246
  talkingAvatar: {
22534
23247
  background: {
22535
- color: this.privAvatarConfig.backgroundColor
23248
+ color: this.privAvatarConfig.backgroundColor,
23249
+ image: {
23250
+ url: this.privAvatarConfig.backgroundImage?.toString()
23251
+ }
22536
23252
  },
22537
23253
  character: this.privAvatarConfig.character,
22538
23254
  customized: this.privAvatarConfig.customized,
22539
- style: this.privAvatarConfig.style
23255
+ style: this.privAvatarConfig.style,
23256
+ useBuiltInVoice: this.privAvatarConfig.useBuiltInVoice
22540
23257
  }
22541
23258
  };
22542
23259
  }
@@ -22711,6 +23428,21 @@
22711
23428
  get SpeechServiceConfig() {
22712
23429
  return this.privSpeechServiceConfig;
22713
23430
  }
23431
+ setContextFromJson(contextJson) {
23432
+ const context = JSON.parse(contextJson);
23433
+ if (context.system) {
23434
+ this.privSpeechServiceConfig.Context.system = context.system;
23435
+ }
23436
+ if (context.os) {
23437
+ this.privSpeechServiceConfig.Context.os = context.os;
23438
+ }
23439
+ if (context.audio) {
23440
+ this.privSpeechServiceConfig.Context.audio = context.audio;
23441
+ }
23442
+ if (context.synthesis) {
23443
+ this.privSpeechServiceConfig.Context.synthesis = context.synthesis;
23444
+ }
23445
+ }
22714
23446
  };
22715
23447
  exports.SynthesizerConfig = SynthesizerConfig;
22716
23448
  }
@@ -22943,6 +23675,7 @@
22943
23675
  resultProps.setProperty(Exports_js_3.PropertyId.SpeechServiceResponse_JsonResult, connectionMessage.textBody);
22944
23676
  }
22945
23677
  switch (connectionMessage.path.toLowerCase()) {
23678
+ // Profile management response for create, fetch, delete, reset
22946
23679
  case "speaker.profiles":
22947
23680
  const response = JSON.parse(connectionMessage.textBody);
22948
23681
  switch (response.operation.toLowerCase()) {
@@ -22962,11 +23695,13 @@
22962
23695
  }
22963
23696
  processed = true;
22964
23697
  break;
23698
+ // Activation and authorization phrase response
22965
23699
  case "speaker.phrases":
22966
23700
  const phraseResponse = JSON.parse(connectionMessage.textBody);
22967
23701
  this.handlePhrasesResponse(phraseResponse, connectionMessage.requestId);
22968
23702
  processed = true;
22969
23703
  break;
23704
+ // Enrollment response
22970
23705
  case "speaker.profile.enrollment":
22971
23706
  const enrollmentResponse = JSON.parse(connectionMessage.textBody);
22972
23707
  const result = new Exports_js_3.VoiceProfileEnrollmentResult(this.enrollmentReasonFrom(!!enrollmentResponse.enrollment ? enrollmentResponse.enrollment.enrollmentStatus : enrollmentResponse.status.statusCode), !!enrollmentResponse.enrollment ? JSON.stringify(enrollmentResponse.enrollment) : void 0, enrollmentResponse.status.reason);
@@ -23264,7 +23999,7 @@
23264
23999
  exports.Context = Context;
23265
24000
  var System = class {
23266
24001
  constructor() {
23267
- const SPEECHSDK_CLIENTSDK_VERSION = "1.36.0";
24002
+ const SPEECHSDK_CLIENTSDK_VERSION = "1.45.0";
23268
24003
  this.name = "SpeechSDK";
23269
24004
  this.version = SPEECHSDK_CLIENTSDK_VERSION;
23270
24005
  this.build = "JavaScript";
@@ -23319,23 +24054,19 @@
23319
24054
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.speech/Exports.js"(exports) {
23320
24055
  "use strict";
23321
24056
  var __createBinding = exports && exports.__createBinding || (Object.create ? function(o, m, k, k2) {
23322
- if (k2 === void 0)
23323
- k2 = k;
24057
+ if (k2 === void 0) k2 = k;
23324
24058
  Object.defineProperty(o, k2, { enumerable: true, get: function() {
23325
24059
  return m[k];
23326
24060
  } });
23327
24061
  } : function(o, m, k, k2) {
23328
- if (k2 === void 0)
23329
- k2 = k;
24062
+ if (k2 === void 0) k2 = k;
23330
24063
  o[k2] = m[k];
23331
24064
  });
23332
24065
  var __exportStar = exports && exports.__exportStar || function(m, exports2) {
23333
- for (var p in m)
23334
- if (p !== "default" && !exports2.hasOwnProperty(p))
23335
- __createBinding(exports2, m, p);
24066
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports2, p)) __createBinding(exports2, m, p);
23336
24067
  };
23337
24068
  Object.defineProperty(exports, "__esModule", { value: true });
23338
- exports.AutoDetectSourceLanguagesOpenRangeOptionName = exports.ForceDictationPropertyName = exports.ServicePropertiesPropertyName = exports.CancellationErrorCodePropertyName = exports.OutputFormatPropertyName = void 0;
24069
+ exports.AutoDetectSourceLanguagesOpenRangeOptionName = exports.ForceDictationPropertyName = exports.ServicePropertiesPropertyName = exports.CancellationErrorCodePropertyName = exports.OutputFormatPropertyName = exports.SpeechSynthesisAdapter = exports.AvatarSynthesisAdapter = void 0;
23339
24070
  __exportStar(require_CognitiveSubscriptionKeyAuthentication(), exports);
23340
24071
  __exportStar(require_CognitiveTokenAuthentication(), exports);
23341
24072
  __exportStar(require_IAuthentication(), exports);
@@ -23375,7 +24106,6 @@
23375
24106
  __exportStar(require_RequestSession(), exports);
23376
24107
  __exportStar(require_SpeechContext(), exports);
23377
24108
  __exportStar(require_DynamicGrammarBuilder(), exports);
23378
- __exportStar(require_DynamicGrammarInterfaces(), exports);
23379
24109
  __exportStar(require_DialogServiceAdapter(), exports);
23380
24110
  __exportStar(require_AgentConfig(), exports);
23381
24111
  __exportStar(require_Exports6(), exports);
@@ -23401,7 +24131,7 @@
23401
24131
  exports.CancellationErrorCodePropertyName = "CancellationErrorCode";
23402
24132
  exports.ServicePropertiesPropertyName = "ServiceProperties";
23403
24133
  exports.ForceDictationPropertyName = "ForceDictation";
23404
- exports.AutoDetectSourceLanguagesOpenRangeOptionName = "OpenRange";
24134
+ exports.AutoDetectSourceLanguagesOpenRangeOptionName = "UND";
23405
24135
  }
23406
24136
  });
23407
24137
 
@@ -23410,20 +24140,16 @@
23410
24140
  "../../node_modules/microsoft-cognitiveservices-speech-sdk/distrib/lib/microsoft.cognitiveservices.speech.sdk.js"(exports) {
23411
24141
  "use strict";
23412
24142
  var __createBinding = exports && exports.__createBinding || (Object.create ? function(o, m, k, k2) {
23413
- if (k2 === void 0)
23414
- k2 = k;
24143
+ if (k2 === void 0) k2 = k;
23415
24144
  Object.defineProperty(o, k2, { enumerable: true, get: function() {
23416
24145
  return m[k];
23417
24146
  } });
23418
24147
  } : function(o, m, k, k2) {
23419
- if (k2 === void 0)
23420
- k2 = k;
24148
+ if (k2 === void 0) k2 = k;
23421
24149
  o[k2] = m[k];
23422
24150
  });
23423
24151
  var __exportStar = exports && exports.__exportStar || function(m, exports2) {
23424
- for (var p in m)
23425
- if (p !== "default" && !exports2.hasOwnProperty(p))
23426
- __createBinding(exports2, m, p);
24152
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports2, p)) __createBinding(exports2, m, p);
23427
24153
  };
23428
24154
  Object.defineProperty(exports, "__esModule", { value: true });
23429
24155
  var Exports_js_1 = require_Exports7();
@@ -23435,12 +24161,9 @@
23435
24161
  // src/SpeechServices/SpeechToText/createSpeechRecognitionPonyfill.ts
23436
24162
  var import_microsoft_cognitiveservices_speech_sdk2 = __toESM(require_microsoft_cognitiveservices_speech_sdk());
23437
24163
 
23438
- // node_modules/valibot/dist/index.js
23439
- var EMOJI_REGEX = (
23440
- // eslint-disable-next-line redos-detector/no-unsafe-regex, regexp/no-dupe-disjunctions -- false positives
23441
- new RegExp("^(?:[\\u{1F1E6}-\\u{1F1FF}]{2}|\\u{1F3F4}[\\u{E0061}-\\u{E007A}]{2}[\\u{E0030}-\\u{E0039}\\u{E0061}-\\u{E007A}]{1,3}\\u{E007F}|(?:\\p{Emoji}\\uFE0F\\u20E3?|\\p{Emoji_Modifier_Base}\\p{Emoji_Modifier}?|\\p{Emoji_Presentation})(?:\\u200D(?:\\p{Emoji}\\uFE0F\\u20E3?|\\p{Emoji_Modifier_Base}\\p{Emoji_Modifier}?|\\p{Emoji_Presentation}))*)+$", "u")
23442
- );
24164
+ // ../../node_modules/valibot/dist/index.js
23443
24165
  var store;
24166
+ // @__NO_SIDE_EFFECTS__
23444
24167
  function getGlobalConfig(config2) {
23445
24168
  return {
23446
24169
  lang: config2?.lang ?? store?.lang,
@@ -23450,17 +24173,21 @@
23450
24173
  };
23451
24174
  }
23452
24175
  var store2;
24176
+ // @__NO_SIDE_EFFECTS__
23453
24177
  function getGlobalMessage(lang) {
23454
24178
  return store2?.get(lang);
23455
24179
  }
23456
24180
  var store3;
24181
+ // @__NO_SIDE_EFFECTS__
23457
24182
  function getSchemaMessage(lang) {
23458
24183
  return store3?.get(lang);
23459
24184
  }
23460
24185
  var store4;
24186
+ // @__NO_SIDE_EFFECTS__
23461
24187
  function getSpecificMessage(reference, lang) {
23462
24188
  return store4?.get(reference)?.get(lang);
23463
24189
  }
24190
+ // @__NO_SIDE_EFFECTS__
23464
24191
  function _stringify(input) {
23465
24192
  const type = typeof input;
23466
24193
  if (type === "string") {
@@ -23477,7 +24204,7 @@
23477
24204
  function _addIssue(context, label, dataset, config2, other) {
23478
24205
  const input = other && "input" in other ? other.input : dataset.value;
23479
24206
  const expected = other?.expected ?? context.expects ?? null;
23480
- const received = other?.received ?? _stringify(input);
24207
+ const received = other?.received ?? /* @__PURE__ */ _stringify(input);
23481
24208
  const issue = {
23482
24209
  kind: context.kind,
23483
24210
  type: context.type,
@@ -23493,12 +24220,12 @@
23493
24220
  abortPipeEarly: config2.abortPipeEarly
23494
24221
  };
23495
24222
  const isSchema = context.kind === "schema";
23496
- const message = other?.message ?? context.message ?? getSpecificMessage(context.reference, issue.lang) ?? (isSchema ? getSchemaMessage(issue.lang) : null) ?? config2.message ?? getGlobalMessage(issue.lang);
23497
- if (message) {
23498
- issue.message = typeof message === "function" ? (
24223
+ const message2 = other?.message ?? context.message ?? /* @__PURE__ */ getSpecificMessage(context.reference, issue.lang) ?? (isSchema ? /* @__PURE__ */ getSchemaMessage(issue.lang) : null) ?? config2.message ?? /* @__PURE__ */ getGlobalMessage(issue.lang);
24224
+ if (message2 !== void 0) {
24225
+ issue.message = typeof message2 === "function" ? (
23499
24226
  // @ts-expect-error
23500
- message(issue)
23501
- ) : message;
24227
+ message2(issue)
24228
+ ) : message2;
23502
24229
  }
23503
24230
  if (isSchema) {
23504
24231
  dataset.typed = false;
@@ -23509,18 +24236,25 @@
23509
24236
  dataset.issues = [issue];
23510
24237
  }
23511
24238
  }
23512
- function _joinExpects(values, separator) {
23513
- const list = [...new Set(values)];
24239
+ // @__NO_SIDE_EFFECTS__
24240
+ function _getStandardProps(context) {
24241
+ return {
24242
+ version: 1,
24243
+ vendor: "valibot",
24244
+ validate(value2) {
24245
+ return context["~run"]({ value: value2 }, /* @__PURE__ */ getGlobalConfig());
24246
+ }
24247
+ };
24248
+ }
24249
+ // @__NO_SIDE_EFFECTS__
24250
+ function _joinExpects(values2, separator) {
24251
+ const list = [...new Set(values2)];
23514
24252
  if (list.length > 1) {
23515
24253
  return `(${list.join(` ${separator} `)})`;
23516
24254
  }
23517
24255
  return list[0] ?? "never";
23518
24256
  }
23519
24257
  var ValiError = class extends Error {
23520
- /**
23521
- * The error issues.
23522
- */
23523
- issues;
23524
24258
  /**
23525
24259
  * Creates a Valibot error with useful information.
23526
24260
  *
@@ -23532,55 +24266,63 @@
23532
24266
  this.issues = issues;
23533
24267
  }
23534
24268
  };
23535
- function maxValue(requirement, message) {
24269
+ var EMOJI_REGEX = (
24270
+ // eslint-disable-next-line redos-detector/no-unsafe-regex, regexp/no-dupe-disjunctions -- false positives
24271
+ new RegExp("^(?:[\\u{1F1E6}-\\u{1F1FF}]{2}|\\u{1F3F4}[\\u{E0061}-\\u{E007A}]{2}[\\u{E0030}-\\u{E0039}\\u{E0061}-\\u{E007A}]{1,3}\\u{E007F}|(?:\\p{Emoji}\\uFE0F\\u20E3?|\\p{Emoji_Modifier_Base}\\p{Emoji_Modifier}?|\\p{Emoji_Presentation})(?:\\u200D(?:\\p{Emoji}\\uFE0F\\u20E3?|\\p{Emoji_Modifier_Base}\\p{Emoji_Modifier}?|\\p{Emoji_Presentation}))*)+$", "u")
24272
+ );
24273
+ // @__NO_SIDE_EFFECTS__
24274
+ function maxValue(requirement, message2) {
23536
24275
  return {
23537
24276
  kind: "validation",
23538
24277
  type: "max_value",
23539
24278
  reference: maxValue,
23540
24279
  async: false,
23541
- expects: `<=${requirement instanceof Date ? requirement.toJSON() : _stringify(requirement)}`,
24280
+ expects: `<=${requirement instanceof Date ? requirement.toJSON() : /* @__PURE__ */ _stringify(requirement)}`,
23542
24281
  requirement,
23543
- message,
23544
- _run(dataset, config2) {
23545
- if (dataset.typed && dataset.value > this.requirement) {
24282
+ message: message2,
24283
+ "~run"(dataset, config2) {
24284
+ if (dataset.typed && !(dataset.value <= this.requirement)) {
23546
24285
  _addIssue(this, "value", dataset, config2, {
23547
- received: dataset.value instanceof Date ? dataset.value.toJSON() : _stringify(dataset.value)
24286
+ received: dataset.value instanceof Date ? dataset.value.toJSON() : /* @__PURE__ */ _stringify(dataset.value)
23548
24287
  });
23549
24288
  }
23550
24289
  return dataset;
23551
24290
  }
23552
24291
  };
23553
24292
  }
23554
- function minValue(requirement, message) {
24293
+ // @__NO_SIDE_EFFECTS__
24294
+ function minValue(requirement, message2) {
23555
24295
  return {
23556
24296
  kind: "validation",
23557
24297
  type: "min_value",
23558
24298
  reference: minValue,
23559
24299
  async: false,
23560
- expects: `>=${requirement instanceof Date ? requirement.toJSON() : _stringify(requirement)}`,
24300
+ expects: `>=${requirement instanceof Date ? requirement.toJSON() : /* @__PURE__ */ _stringify(requirement)}`,
23561
24301
  requirement,
23562
- message,
23563
- _run(dataset, config2) {
23564
- if (dataset.typed && dataset.value < this.requirement) {
24302
+ message: message2,
24303
+ "~run"(dataset, config2) {
24304
+ if (dataset.typed && !(dataset.value >= this.requirement)) {
23565
24305
  _addIssue(this, "value", dataset, config2, {
23566
- received: dataset.value instanceof Date ? dataset.value.toJSON() : _stringify(dataset.value)
24306
+ received: dataset.value instanceof Date ? dataset.value.toJSON() : /* @__PURE__ */ _stringify(dataset.value)
23567
24307
  });
23568
24308
  }
23569
24309
  return dataset;
23570
24310
  }
23571
24311
  };
23572
24312
  }
24313
+ // @__NO_SIDE_EFFECTS__
23573
24314
  function readonly() {
23574
24315
  return {
23575
24316
  kind: "transformation",
23576
24317
  type: "readonly",
23577
24318
  reference: readonly,
23578
24319
  async: false,
23579
- _run(dataset) {
24320
+ "~run"(dataset) {
23580
24321
  return dataset;
23581
24322
  }
23582
24323
  };
23583
24324
  }
24325
+ // @__NO_SIDE_EFFECTS__
23584
24326
  function transform(operation) {
23585
24327
  return {
23586
24328
  kind: "transformation",
@@ -23588,12 +24330,23 @@
23588
24330
  reference: transform,
23589
24331
  async: false,
23590
24332
  operation,
23591
- _run(dataset) {
24333
+ "~run"(dataset) {
23592
24334
  dataset.value = this.operation(dataset.value);
23593
24335
  return dataset;
23594
24336
  }
23595
24337
  };
23596
24338
  }
24339
+ // @__NO_SIDE_EFFECTS__
24340
+ function getFallback(schema, dataset, config2) {
24341
+ return typeof schema.fallback === "function" ? (
24342
+ // @ts-expect-error
24343
+ schema.fallback(dataset, config2)
24344
+ ) : (
24345
+ // @ts-expect-error
24346
+ schema.fallback
24347
+ );
24348
+ }
24349
+ // @__NO_SIDE_EFFECTS__
23597
24350
  function getDefault(schema, dataset, config2) {
23598
24351
  return typeof schema.default === "function" ? (
23599
24352
  // @ts-expect-error
@@ -23603,7 +24356,8 @@
23603
24356
  schema.default
23604
24357
  );
23605
24358
  }
23606
- function array(item, message) {
24359
+ // @__NO_SIDE_EFFECTS__
24360
+ function array(item, message2) {
23607
24361
  return {
23608
24362
  kind: "schema",
23609
24363
  type: "array",
@@ -23611,15 +24365,18 @@
23611
24365
  expects: "Array",
23612
24366
  async: false,
23613
24367
  item,
23614
- message,
23615
- _run(dataset, config2) {
24368
+ message: message2,
24369
+ get "~standard"() {
24370
+ return /* @__PURE__ */ _getStandardProps(this);
24371
+ },
24372
+ "~run"(dataset, config2) {
23616
24373
  const input = dataset.value;
23617
24374
  if (Array.isArray(input)) {
23618
24375
  dataset.typed = true;
23619
24376
  dataset.value = [];
23620
24377
  for (let key = 0; key < input.length; key++) {
23621
24378
  const value2 = input[key];
23622
- const itemDataset = this.item._run({ typed: false, value: value2 }, config2);
24379
+ const itemDataset = this.item["~run"]({ value: value2 }, config2);
23623
24380
  if (itemDataset.issues) {
23624
24381
  const pathItem = {
23625
24382
  type: "array",
@@ -23656,15 +24413,19 @@
23656
24413
  }
23657
24414
  };
23658
24415
  }
23659
- function boolean(message) {
24416
+ // @__NO_SIDE_EFFECTS__
24417
+ function boolean(message2) {
23660
24418
  return {
23661
24419
  kind: "schema",
23662
24420
  type: "boolean",
23663
24421
  reference: boolean,
23664
24422
  expects: "boolean",
23665
24423
  async: false,
23666
- message,
23667
- _run(dataset, config2) {
24424
+ message: message2,
24425
+ get "~standard"() {
24426
+ return /* @__PURE__ */ _getStandardProps(this);
24427
+ },
24428
+ "~run"(dataset, config2) {
23668
24429
  if (typeof dataset.value === "boolean") {
23669
24430
  dataset.typed = true;
23670
24431
  } else {
@@ -23674,18 +24435,27 @@
23674
24435
  }
23675
24436
  };
23676
24437
  }
23677
- function enum_(enum__, message) {
23678
- const options = Object.entries(enum__).filter(([key]) => isNaN(+key)).map(([, value2]) => value2);
24438
+ // @__NO_SIDE_EFFECTS__
24439
+ function enum_(enum__, message2) {
24440
+ const options = [];
24441
+ for (const key in enum__) {
24442
+ if (`${+key}` !== key || typeof enum__[key] !== "string" || !Object.is(enum__[enum__[key]], +key)) {
24443
+ options.push(enum__[key]);
24444
+ }
24445
+ }
23679
24446
  return {
23680
24447
  kind: "schema",
23681
24448
  type: "enum",
23682
24449
  reference: enum_,
23683
- expects: _joinExpects(options.map(_stringify), "|"),
24450
+ expects: /* @__PURE__ */ _joinExpects(options.map(_stringify), "|"),
23684
24451
  async: false,
23685
24452
  enum: enum__,
23686
24453
  options,
23687
- message,
23688
- _run(dataset, config2) {
24454
+ message: message2,
24455
+ get "~standard"() {
24456
+ return /* @__PURE__ */ _getStandardProps(this);
24457
+ },
24458
+ "~run"(dataset, config2) {
23689
24459
  if (this.options.includes(dataset.value)) {
23690
24460
  dataset.typed = true;
23691
24461
  } else {
@@ -23695,15 +24465,19 @@
23695
24465
  }
23696
24466
  };
23697
24467
  }
23698
- function function_(message) {
24468
+ // @__NO_SIDE_EFFECTS__
24469
+ function function_(message2) {
23699
24470
  return {
23700
24471
  kind: "schema",
23701
24472
  type: "function",
23702
24473
  reference: function_,
23703
24474
  expects: "Function",
23704
24475
  async: false,
23705
- message,
23706
- _run(dataset, config2) {
24476
+ message: message2,
24477
+ get "~standard"() {
24478
+ return /* @__PURE__ */ _getStandardProps(this);
24479
+ },
24480
+ "~run"(dataset, config2) {
23707
24481
  if (typeof dataset.value === "function") {
23708
24482
  dataset.typed = true;
23709
24483
  } else {
@@ -23713,6 +24487,7 @@
23713
24487
  }
23714
24488
  };
23715
24489
  }
24490
+ // @__NO_SIDE_EFFECTS__
23716
24491
  function _merge(value1, value2) {
23717
24492
  if (typeof value1 === typeof value2) {
23718
24493
  if (value1 === value2 || value1 instanceof Date && value2 instanceof Date && +value1 === +value2) {
@@ -23721,7 +24496,7 @@
23721
24496
  if (value1 && value2 && value1.constructor === Object && value2.constructor === Object) {
23722
24497
  for (const key in value2) {
23723
24498
  if (key in value1) {
23724
- const dataset = _merge(value1[key], value2[key]);
24499
+ const dataset = /* @__PURE__ */ _merge(value1[key], value2[key]);
23725
24500
  if (dataset.issue) {
23726
24501
  return dataset;
23727
24502
  }
@@ -23735,7 +24510,7 @@
23735
24510
  if (Array.isArray(value1) && Array.isArray(value2)) {
23736
24511
  if (value1.length === value2.length) {
23737
24512
  for (let index = 0; index < value1.length; index++) {
23738
- const dataset = _merge(value1[index], value2[index]);
24513
+ const dataset = /* @__PURE__ */ _merge(value1[index], value2[index]);
23739
24514
  if (dataset.issue) {
23740
24515
  return dataset;
23741
24516
  }
@@ -23747,28 +24522,29 @@
23747
24522
  }
23748
24523
  return { issue: true };
23749
24524
  }
23750
- function intersect(options, message) {
24525
+ // @__NO_SIDE_EFFECTS__
24526
+ function intersect(options, message2) {
23751
24527
  return {
23752
24528
  kind: "schema",
23753
24529
  type: "intersect",
23754
24530
  reference: intersect,
23755
- expects: _joinExpects(
24531
+ expects: /* @__PURE__ */ _joinExpects(
23756
24532
  options.map((option) => option.expects),
23757
24533
  "&"
23758
24534
  ),
23759
24535
  async: false,
23760
24536
  options,
23761
- message,
23762
- _run(dataset, config2) {
24537
+ message: message2,
24538
+ get "~standard"() {
24539
+ return /* @__PURE__ */ _getStandardProps(this);
24540
+ },
24541
+ "~run"(dataset, config2) {
23763
24542
  if (this.options.length) {
23764
24543
  const input = dataset.value;
23765
24544
  let outputs;
23766
24545
  dataset.typed = true;
23767
24546
  for (const schema of this.options) {
23768
- const optionDataset = schema._run(
23769
- { typed: false, value: input },
23770
- config2
23771
- );
24547
+ const optionDataset = schema["~run"]({ value: input }, config2);
23772
24548
  if (optionDataset.issues) {
23773
24549
  if (dataset.issues) {
23774
24550
  dataset.issues.push(...optionDataset.issues);
@@ -23794,7 +24570,7 @@
23794
24570
  if (dataset.typed) {
23795
24571
  dataset.value = outputs[0];
23796
24572
  for (let index = 1; index < outputs.length; index++) {
23797
- const mergeDataset = _merge(dataset.value, outputs[index]);
24573
+ const mergeDataset = /* @__PURE__ */ _merge(dataset.value, outputs[index]);
23798
24574
  if (mergeDataset.issue) {
23799
24575
  _addIssue(this, "type", dataset, config2, {
23800
24576
  received: "unknown"
@@ -23811,15 +24587,19 @@
23811
24587
  }
23812
24588
  };
23813
24589
  }
23814
- function number(message) {
24590
+ // @__NO_SIDE_EFFECTS__
24591
+ function number(message2) {
23815
24592
  return {
23816
24593
  kind: "schema",
23817
24594
  type: "number",
23818
24595
  reference: number,
23819
24596
  expects: "number",
23820
24597
  async: false,
23821
- message,
23822
- _run(dataset, config2) {
24598
+ message: message2,
24599
+ get "~standard"() {
24600
+ return /* @__PURE__ */ _getStandardProps(this);
24601
+ },
24602
+ "~run"(dataset, config2) {
23823
24603
  if (typeof dataset.value === "number" && !isNaN(dataset.value)) {
23824
24604
  dataset.typed = true;
23825
24605
  } else {
@@ -23829,56 +24609,82 @@
23829
24609
  }
23830
24610
  };
23831
24611
  }
23832
- function object(entries, message) {
24612
+ // @__NO_SIDE_EFFECTS__
24613
+ function object(entries2, message2) {
23833
24614
  return {
23834
24615
  kind: "schema",
23835
24616
  type: "object",
23836
24617
  reference: object,
23837
24618
  expects: "Object",
23838
24619
  async: false,
23839
- entries,
23840
- message,
23841
- _run(dataset, config2) {
24620
+ entries: entries2,
24621
+ message: message2,
24622
+ get "~standard"() {
24623
+ return /* @__PURE__ */ _getStandardProps(this);
24624
+ },
24625
+ "~run"(dataset, config2) {
23842
24626
  const input = dataset.value;
23843
24627
  if (input && typeof input === "object") {
23844
24628
  dataset.typed = true;
23845
24629
  dataset.value = {};
23846
24630
  for (const key in this.entries) {
23847
- const value2 = input[key];
23848
- const valueDataset = this.entries[key]._run(
23849
- { typed: false, value: value2 },
23850
- config2
23851
- );
23852
- if (valueDataset.issues) {
23853
- const pathItem = {
23854
- type: "object",
23855
- origin: "value",
23856
- input,
23857
- key,
23858
- value: value2
23859
- };
23860
- for (const issue of valueDataset.issues) {
23861
- if (issue.path) {
23862
- issue.path.unshift(pathItem);
23863
- } else {
23864
- issue.path = [pathItem];
24631
+ const valueSchema = this.entries[key];
24632
+ if (key in input || (valueSchema.type === "exact_optional" || valueSchema.type === "optional" || valueSchema.type === "nullish") && // @ts-expect-error
24633
+ valueSchema.default !== void 0) {
24634
+ const value2 = key in input ? (
24635
+ // @ts-expect-error
24636
+ input[key]
24637
+ ) : /* @__PURE__ */ getDefault(valueSchema);
24638
+ const valueDataset = valueSchema["~run"]({ value: value2 }, config2);
24639
+ if (valueDataset.issues) {
24640
+ const pathItem = {
24641
+ type: "object",
24642
+ origin: "value",
24643
+ input,
24644
+ key,
24645
+ value: value2
24646
+ };
24647
+ for (const issue of valueDataset.issues) {
24648
+ if (issue.path) {
24649
+ issue.path.unshift(pathItem);
24650
+ } else {
24651
+ issue.path = [pathItem];
24652
+ }
24653
+ dataset.issues?.push(issue);
24654
+ }
24655
+ if (!dataset.issues) {
24656
+ dataset.issues = valueDataset.issues;
24657
+ }
24658
+ if (config2.abortEarly) {
24659
+ dataset.typed = false;
24660
+ break;
23865
24661
  }
23866
- dataset.issues?.push(issue);
23867
24662
  }
23868
- if (!dataset.issues) {
23869
- dataset.issues = valueDataset.issues;
24663
+ if (!valueDataset.typed) {
24664
+ dataset.typed = false;
23870
24665
  }
24666
+ dataset.value[key] = valueDataset.value;
24667
+ } else if (valueSchema.fallback !== void 0) {
24668
+ dataset.value[key] = /* @__PURE__ */ getFallback(valueSchema);
24669
+ } else if (valueSchema.type !== "exact_optional" && valueSchema.type !== "optional" && valueSchema.type !== "nullish") {
24670
+ _addIssue(this, "key", dataset, config2, {
24671
+ input: void 0,
24672
+ expected: `"${key}"`,
24673
+ path: [
24674
+ {
24675
+ type: "object",
24676
+ origin: "key",
24677
+ input,
24678
+ key,
24679
+ // @ts-expect-error
24680
+ value: input[key]
24681
+ }
24682
+ ]
24683
+ });
23871
24684
  if (config2.abortEarly) {
23872
- dataset.typed = false;
23873
24685
  break;
23874
24686
  }
23875
24687
  }
23876
- if (!valueDataset.typed) {
23877
- dataset.typed = false;
23878
- }
23879
- if (valueDataset.value !== void 0 || key in input) {
23880
- dataset.value[key] = valueDataset.value;
23881
- }
23882
24688
  }
23883
24689
  } else {
23884
24690
  _addIssue(this, "type", dataset, config2);
@@ -23887,45 +24693,46 @@
23887
24693
  }
23888
24694
  };
23889
24695
  }
23890
- function optional(wrapped, ...args) {
23891
- const schema = {
24696
+ // @__NO_SIDE_EFFECTS__
24697
+ function optional(wrapped, default_) {
24698
+ return {
23892
24699
  kind: "schema",
23893
24700
  type: "optional",
23894
24701
  reference: optional,
23895
24702
  expects: `(${wrapped.expects} | undefined)`,
23896
24703
  async: false,
23897
24704
  wrapped,
23898
- _run(dataset, config2) {
24705
+ default: default_,
24706
+ get "~standard"() {
24707
+ return /* @__PURE__ */ _getStandardProps(this);
24708
+ },
24709
+ "~run"(dataset, config2) {
23899
24710
  if (dataset.value === void 0) {
23900
- if ("default" in this) {
23901
- dataset.value = getDefault(
23902
- this,
23903
- dataset,
23904
- config2
23905
- );
24711
+ if (this.default !== void 0) {
24712
+ dataset.value = /* @__PURE__ */ getDefault(this, dataset, config2);
23906
24713
  }
23907
24714
  if (dataset.value === void 0) {
23908
24715
  dataset.typed = true;
23909
24716
  return dataset;
23910
24717
  }
23911
24718
  }
23912
- return this.wrapped._run(dataset, config2);
24719
+ return this.wrapped["~run"](dataset, config2);
23913
24720
  }
23914
24721
  };
23915
- if (0 in args) {
23916
- schema.default = args[0];
23917
- }
23918
- return schema;
23919
24722
  }
23920
- function string(message) {
24723
+ // @__NO_SIDE_EFFECTS__
24724
+ function string(message2) {
23921
24725
  return {
23922
24726
  kind: "schema",
23923
24727
  type: "string",
23924
24728
  reference: string,
23925
24729
  expects: "string",
23926
24730
  async: false,
23927
- message,
23928
- _run(dataset, config2) {
24731
+ message: message2,
24732
+ get "~standard"() {
24733
+ return /* @__PURE__ */ _getStandardProps(this);
24734
+ },
24735
+ "~run"(dataset, config2) {
23929
24736
  if (typeof dataset.value === "string") {
23930
24737
  dataset.typed = true;
23931
24738
  } else {
@@ -23935,15 +24742,19 @@
23935
24742
  }
23936
24743
  };
23937
24744
  }
23938
- function undefined_(message) {
24745
+ // @__NO_SIDE_EFFECTS__
24746
+ function undefined_(message2) {
23939
24747
  return {
23940
24748
  kind: "schema",
23941
24749
  type: "undefined",
23942
24750
  reference: undefined_,
23943
24751
  expects: "undefined",
23944
24752
  async: false,
23945
- message,
23946
- _run(dataset, config2) {
24753
+ message: message2,
24754
+ get "~standard"() {
24755
+ return /* @__PURE__ */ _getStandardProps(this);
24756
+ },
24757
+ "~run"(dataset, config2) {
23947
24758
  if (dataset.value === void 0) {
23948
24759
  dataset.typed = true;
23949
24760
  } else {
@@ -23953,6 +24764,7 @@
23953
24764
  }
23954
24765
  };
23955
24766
  }
24767
+ // @__NO_SIDE_EFFECTS__
23956
24768
  function _subIssues(datasets) {
23957
24769
  let issues;
23958
24770
  if (datasets) {
@@ -23966,27 +24778,28 @@
23966
24778
  }
23967
24779
  return issues;
23968
24780
  }
23969
- function union(options, message) {
24781
+ // @__NO_SIDE_EFFECTS__
24782
+ function union(options, message2) {
23970
24783
  return {
23971
24784
  kind: "schema",
23972
24785
  type: "union",
23973
24786
  reference: union,
23974
- expects: _joinExpects(
24787
+ expects: /* @__PURE__ */ _joinExpects(
23975
24788
  options.map((option) => option.expects),
23976
24789
  "|"
23977
24790
  ),
23978
24791
  async: false,
23979
24792
  options,
23980
- message,
23981
- _run(dataset, config2) {
24793
+ message: message2,
24794
+ get "~standard"() {
24795
+ return /* @__PURE__ */ _getStandardProps(this);
24796
+ },
24797
+ "~run"(dataset, config2) {
23982
24798
  let validDataset;
23983
24799
  let typedDatasets;
23984
24800
  let untypedDatasets;
23985
24801
  for (const schema of this.options) {
23986
- const optionDataset = schema._run(
23987
- { typed: false, value: dataset.value },
23988
- config2
23989
- );
24802
+ const optionDataset = schema["~run"]({ value: dataset.value }, config2);
23990
24803
  if (optionDataset.typed) {
23991
24804
  if (optionDataset.issues) {
23992
24805
  if (typedDatasets) {
@@ -24014,14 +24827,14 @@
24014
24827
  return typedDatasets[0];
24015
24828
  }
24016
24829
  _addIssue(this, "type", dataset, config2, {
24017
- issues: _subIssues(typedDatasets)
24830
+ issues: /* @__PURE__ */ _subIssues(typedDatasets)
24018
24831
  });
24019
24832
  dataset.typed = true;
24020
24833
  } else if (untypedDatasets?.length === 1) {
24021
24834
  return untypedDatasets[0];
24022
24835
  } else {
24023
24836
  _addIssue(this, "type", dataset, config2, {
24024
- issues: _subIssues(untypedDatasets)
24837
+ issues: /* @__PURE__ */ _subIssues(untypedDatasets)
24025
24838
  });
24026
24839
  }
24027
24840
  return dataset;
@@ -24029,20 +24842,21 @@
24029
24842
  };
24030
24843
  }
24031
24844
  function parse(schema, input, config2) {
24032
- const dataset = schema._run(
24033
- { typed: false, value: input },
24034
- getGlobalConfig(config2)
24035
- );
24845
+ const dataset = schema["~run"]({ value: input }, /* @__PURE__ */ getGlobalConfig(config2));
24036
24846
  if (dataset.issues) {
24037
24847
  throw new ValiError(dataset.issues);
24038
24848
  }
24039
24849
  return dataset.value;
24040
24850
  }
24851
+ // @__NO_SIDE_EFFECTS__
24041
24852
  function pipe(...pipe2) {
24042
24853
  return {
24043
24854
  ...pipe2[0],
24044
24855
  pipe: pipe2,
24045
- _run(dataset, config2) {
24856
+ get "~standard"() {
24857
+ return /* @__PURE__ */ _getStandardProps(this);
24858
+ },
24859
+ "~run"(dataset, config2) {
24046
24860
  for (const item of pipe2) {
24047
24861
  if (item.kind !== "metadata") {
24048
24862
  if (dataset.issues && (item.kind === "schema" || item.kind === "transformation")) {
@@ -24050,7 +24864,7 @@
24050
24864
  break;
24051
24865
  }
24052
24866
  if (!dataset.issues || !config2.abortEarly && !config2.abortPipeEarly) {
24053
- dataset = item._run(dataset, config2);
24867
+ dataset = item["~run"](dataset, config2);
24054
24868
  }
24055
24869
  }
24056
24870
  }
@@ -26405,7 +27219,7 @@
26405
27219
  }
26406
27220
  var meta = document.createElement("meta");
26407
27221
  meta.setAttribute("name", "web-speech-cognitive-services");
26408
- meta.setAttribute("content", `version=${"8.1.3-main.6ed2e3d"}`);
27222
+ meta.setAttribute("content", `version=${"8.1.3-main.b33949a"}`);
26409
27223
  document.head.appendChild(meta);
26410
27224
 
26411
27225
  // src/index.umd.js