@stream-io/video-client 1.32.0 → 1.33.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/CHANGELOG.md +16 -0
  2. package/dist/index.browser.es.js +344 -91
  3. package/dist/index.browser.es.js.map +1 -1
  4. package/dist/index.cjs.js +345 -92
  5. package/dist/index.cjs.js.map +1 -1
  6. package/dist/index.es.js +344 -91
  7. package/dist/index.es.js.map +1 -1
  8. package/dist/src/Call.d.ts +3 -2
  9. package/dist/src/devices/AudioDeviceManager.d.ts +25 -0
  10. package/dist/src/devices/AudioDeviceManagerState.d.ts +24 -0
  11. package/dist/src/devices/CameraManager.d.ts +2 -2
  12. package/dist/src/devices/CameraManagerState.d.ts +3 -4
  13. package/dist/src/devices/{InputMediaDeviceManager.d.ts → DeviceManager.d.ts} +6 -6
  14. package/dist/src/devices/{InputMediaDeviceManagerState.d.ts → DeviceManagerState.d.ts} +4 -4
  15. package/dist/src/devices/MicrophoneManager.d.ts +5 -3
  16. package/dist/src/devices/MicrophoneManagerState.d.ts +6 -10
  17. package/dist/src/devices/ScreenShareManager.d.ts +4 -2
  18. package/dist/src/devices/ScreenShareState.d.ts +6 -2
  19. package/dist/src/devices/SpeakerState.d.ts +4 -4
  20. package/dist/src/devices/index.d.ts +2 -2
  21. package/dist/src/gen/coordinator/index.d.ts +169 -2
  22. package/dist/src/gen/video/sfu/models/models.d.ts +43 -0
  23. package/dist/src/rtc/BasePeerConnection.d.ts +2 -12
  24. package/dist/src/rtc/Publisher.d.ts +9 -6
  25. package/dist/src/rtc/Subscriber.d.ts +2 -1
  26. package/dist/src/rtc/TransceiverCache.d.ts +10 -11
  27. package/dist/src/rtc/index.d.ts +1 -1
  28. package/dist/src/rtc/{videoLayers.d.ts → layers.d.ts} +7 -1
  29. package/dist/src/rtc/types.d.ts +31 -0
  30. package/dist/src/sorting/participants.d.ts +5 -2
  31. package/package.json +3 -2
  32. package/src/Call.ts +13 -6
  33. package/src/__tests__/Call.publishing.test.ts +14 -3
  34. package/src/__tests__/StreamVideoClient.api.test.ts +1 -1
  35. package/src/devices/AudioDeviceManager.ts +61 -0
  36. package/src/devices/AudioDeviceManagerState.ts +44 -0
  37. package/src/devices/CameraManager.ts +4 -4
  38. package/src/devices/CameraManagerState.ts +9 -8
  39. package/src/devices/{InputMediaDeviceManager.ts → DeviceManager.ts} +11 -8
  40. package/src/devices/{InputMediaDeviceManagerState.ts → DeviceManagerState.ts} +7 -4
  41. package/src/devices/MicrophoneManager.ts +26 -6
  42. package/src/devices/MicrophoneManagerState.ts +18 -19
  43. package/src/devices/ScreenShareManager.ts +23 -4
  44. package/src/devices/ScreenShareState.ts +11 -3
  45. package/src/devices/SpeakerState.ts +6 -14
  46. package/src/devices/__tests__/CameraManager.test.ts +1 -0
  47. package/src/devices/__tests__/{InputMediaDeviceManager.test.ts → DeviceManager.test.ts} +4 -4
  48. package/src/devices/__tests__/{InputMediaDeviceManagerFilters.test.ts → DeviceManagerFilters.test.ts} +4 -4
  49. package/src/devices/__tests__/{InputMediaDeviceManagerState.test.ts → DeviceManagerState.test.ts} +2 -2
  50. package/src/devices/__tests__/MicrophoneManager.test.ts +41 -1
  51. package/src/devices/__tests__/NoiseCancellationStub.ts +3 -1
  52. package/src/devices/__tests__/ScreenShareManager.test.ts +5 -1
  53. package/src/devices/index.ts +2 -2
  54. package/src/events/__tests__/internal.test.ts +25 -11
  55. package/src/gen/coordinator/index.ts +169 -2
  56. package/src/gen/video/sfu/models/models.ts +65 -0
  57. package/src/rtc/BasePeerConnection.ts +1 -16
  58. package/src/rtc/Publisher.ts +74 -31
  59. package/src/rtc/Subscriber.ts +2 -4
  60. package/src/rtc/TransceiverCache.ts +23 -27
  61. package/src/rtc/__tests__/Publisher.test.ts +61 -29
  62. package/src/rtc/__tests__/{videoLayers.test.ts → layers.test.ts} +76 -1
  63. package/src/rtc/index.ts +2 -1
  64. package/src/rtc/{videoLayers.ts → layers.ts} +28 -7
  65. package/src/rtc/types.ts +44 -0
  66. package/src/sorting/__tests__/sorting.test.ts +106 -0
  67. package/src/sorting/participants.ts +30 -14
  68. package/src/sorting/presets.ts +16 -4
  69. package/src/store/CallState.ts +36 -10
  70. package/src/store/__tests__/CallState.test.ts +20 -2
package/dist/index.es.js CHANGED
@@ -114,6 +114,7 @@ const OwnCapability = {
114
114
  REMOVE_CALL_MEMBER: 'remove-call-member',
115
115
  SCREENSHARE: 'screenshare',
116
116
  SEND_AUDIO: 'send-audio',
117
+ SEND_CLOSED_CAPTIONS_CALL: 'send-closed-captions-call',
117
118
  SEND_VIDEO: 'send-video',
118
119
  START_BROADCAST_CALL: 'start-broadcast-call',
119
120
  START_CLOSED_CAPTIONS_CALL: 'start-closed-captions-call',
@@ -959,6 +960,24 @@ var ParticipantSource;
959
960
  */
960
961
  ParticipantSource[ParticipantSource["SRT"] = 5] = "SRT";
961
962
  })(ParticipantSource || (ParticipantSource = {}));
963
+ /**
964
+ * @generated from protobuf enum stream.video.sfu.models.AudioBitrateProfile
965
+ */
966
+ var AudioBitrateProfile;
967
+ (function (AudioBitrateProfile) {
968
+ /**
969
+ * @generated from protobuf enum value: AUDIO_BITRATE_PROFILE_VOICE_STANDARD_UNSPECIFIED = 0;
970
+ */
971
+ AudioBitrateProfile[AudioBitrateProfile["VOICE_STANDARD_UNSPECIFIED"] = 0] = "VOICE_STANDARD_UNSPECIFIED";
972
+ /**
973
+ * @generated from protobuf enum value: AUDIO_BITRATE_PROFILE_VOICE_HIGH_QUALITY = 1;
974
+ */
975
+ AudioBitrateProfile[AudioBitrateProfile["VOICE_HIGH_QUALITY"] = 1] = "VOICE_HIGH_QUALITY";
976
+ /**
977
+ * @generated from protobuf enum value: AUDIO_BITRATE_PROFILE_MUSIC_HIGH_QUALITY = 2;
978
+ */
979
+ AudioBitrateProfile[AudioBitrateProfile["MUSIC_HIGH_QUALITY"] = 2] = "MUSIC_HIGH_QUALITY";
980
+ })(AudioBitrateProfile || (AudioBitrateProfile = {}));
962
981
  /**
963
982
  * @generated from protobuf enum stream.video.sfu.models.ErrorCode
964
983
  */
@@ -1572,6 +1591,13 @@ class PublishOption$Type extends MessageType {
1572
1591
  kind: 'scalar',
1573
1592
  T: 8 /*ScalarType.BOOL*/,
1574
1593
  },
1594
+ {
1595
+ no: 10,
1596
+ name: 'audio_bitrate_profiles',
1597
+ kind: 'message',
1598
+ repeat: 2 /*RepeatType.UNPACKED*/,
1599
+ T: () => AudioBitrate,
1600
+ },
1575
1601
  ]);
1576
1602
  }
1577
1603
  }
@@ -1635,6 +1661,28 @@ let ICETrickle$Type$1 = class ICETrickle$Type extends MessageType {
1635
1661
  */
1636
1662
  const ICETrickle$1 = new ICETrickle$Type$1();
1637
1663
  // @generated message type with reflection information, may provide speed optimized methods
1664
+ class AudioBitrate$Type extends MessageType {
1665
+ constructor() {
1666
+ super('stream.video.sfu.models.AudioBitrate', [
1667
+ {
1668
+ no: 1,
1669
+ name: 'profile',
1670
+ kind: 'enum',
1671
+ T: () => [
1672
+ 'stream.video.sfu.models.AudioBitrateProfile',
1673
+ AudioBitrateProfile,
1674
+ 'AUDIO_BITRATE_PROFILE_',
1675
+ ],
1676
+ },
1677
+ { no: 2, name: 'bitrate', kind: 'scalar', T: 5 /*ScalarType.INT32*/ },
1678
+ ]);
1679
+ }
1680
+ }
1681
+ /**
1682
+ * @generated MessageType for protobuf message stream.video.sfu.models.AudioBitrate
1683
+ */
1684
+ const AudioBitrate = new AudioBitrate$Type();
1685
+ // @generated message type with reflection information, may provide speed optimized methods
1638
1686
  class TrackInfo$Type extends MessageType {
1639
1687
  constructor() {
1640
1688
  super('stream.video.sfu.models.TrackInfo', [
@@ -1985,6 +2033,8 @@ var models = /*#__PURE__*/Object.freeze({
1985
2033
  get AndroidThermalState () { return AndroidThermalState; },
1986
2034
  AppleState: AppleState,
1987
2035
  get AppleThermalState () { return AppleThermalState; },
2036
+ AudioBitrate: AudioBitrate,
2037
+ get AudioBitrateProfile () { return AudioBitrateProfile; },
1988
2038
  Browser: Browser,
1989
2039
  Call: Call$1,
1990
2040
  get CallEndedReason () { return CallEndedReason; },
@@ -4531,9 +4581,11 @@ const speaking = (a, b) => {
4531
4581
  * @param b the second participant.
4532
4582
  */
4533
4583
  const screenSharing = (a, b) => {
4534
- if (hasScreenShare(a) && !hasScreenShare(b))
4584
+ const hasA = hasScreenShare(a);
4585
+ const hasB = hasScreenShare(b);
4586
+ if (hasA && !hasB)
4535
4587
  return -1;
4536
- if (!hasScreenShare(a) && hasScreenShare(b))
4588
+ if (!hasA && hasB)
4537
4589
  return 1;
4538
4590
  return 0;
4539
4591
  };
@@ -4544,9 +4596,11 @@ const screenSharing = (a, b) => {
4544
4596
  * @param b the second participant.
4545
4597
  */
4546
4598
  const publishingVideo = (a, b) => {
4547
- if (hasVideo(a) && !hasVideo(b))
4599
+ const hasA = hasVideo(a);
4600
+ const hasB = hasVideo(b);
4601
+ if (hasA && !hasB)
4548
4602
  return -1;
4549
- if (!hasVideo(a) && hasVideo(b))
4603
+ if (!hasA && hasB)
4550
4604
  return 1;
4551
4605
  return 0;
4552
4606
  };
@@ -4557,9 +4611,11 @@ const publishingVideo = (a, b) => {
4557
4611
  * @param b the second participant.
4558
4612
  */
4559
4613
  const publishingAudio = (a, b) => {
4560
- if (hasAudio(a) && !hasAudio(b))
4614
+ const hasA = hasAudio(a);
4615
+ const hasB = hasAudio(b);
4616
+ if (hasA && !hasB)
4561
4617
  return -1;
4562
- if (!hasAudio(a) && hasAudio(b))
4618
+ if (!hasA && hasB)
4563
4619
  return 1;
4564
4620
  return 0;
4565
4621
  };
@@ -4590,14 +4646,22 @@ const pinned = (a, b) => {
4590
4646
  * A comparator creator which will set up a comparator which prioritizes
4591
4647
  * participants who are from a specific source (e.g., WebRTC, RTMP, WHIP...).
4592
4648
  *
4593
- * @param source the source to prioritize.
4649
+ * The priority of a source is determined by the order of the sources passed in.
4650
+ * e.g. [SRT, RTMP, WHIP] will prioritize SRT sources first, then RTMP, then WHIP.
4651
+ *
4652
+ * @param sources the sources to prioritize.
4594
4653
  */
4595
- const withParticipantSource = (source) => (a, b) => {
4596
- if (a.source === source && b.source !== source)
4597
- return -1;
4598
- if (a.source !== source && b.source === source)
4599
- return 1;
4600
- return 0;
4654
+ const withParticipantSource = (...sources) => {
4655
+ const priority = (i) => (i === -1 ? Number.MAX_SAFE_INTEGER : i);
4656
+ return (a, b) => {
4657
+ const priorityA = priority(sources.indexOf(a.source));
4658
+ const priorityB = priority(sources.indexOf(b.source));
4659
+ if (priorityA < priorityB)
4660
+ return -1;
4661
+ if (priorityA > priorityB)
4662
+ return 1;
4663
+ return 0;
4664
+ };
4601
4665
  };
4602
4666
  /**
4603
4667
  * A comparator creator which will set up a comparator which prioritizes
@@ -4621,9 +4685,11 @@ const reactionType = (type) => {
4621
4685
  * @param roles the roles to prioritize.
4622
4686
  */
4623
4687
  const role = (...roles) => (a, b) => {
4624
- if (hasAnyRole(a, roles) && !hasAnyRole(b, roles))
4688
+ const hasA = hasAnyRole(a, roles);
4689
+ const hasB = hasAnyRole(b, roles);
4690
+ if (hasA && !hasB)
4625
4691
  return -1;
4626
- if (!hasAnyRole(a, roles) && hasAnyRole(b, roles))
4692
+ if (!hasA && hasB)
4627
4693
  return 1;
4628
4694
  return 0;
4629
4695
  };
@@ -4656,23 +4722,27 @@ const ifInvisibleOrUnknownBy = conditional((a, b) => a.viewportVisibilityState?.
4656
4722
  a.viewportVisibilityState?.videoTrack === VisibilityState.UNKNOWN ||
4657
4723
  b.viewportVisibilityState?.videoTrack === VisibilityState.INVISIBLE ||
4658
4724
  b.viewportVisibilityState?.videoTrack === VisibilityState.UNKNOWN);
4725
+ /**
4726
+ * A comparator that prioritizes participants with video ingress sources.
4727
+ */
4728
+ const withVideoIngressSource = withParticipantSource(ParticipantSource.RTMP, ParticipantSource.SRT, ParticipantSource.WHIP, ParticipantSource.RTSP);
4659
4729
  /**
4660
4730
  * The default sorting preset.
4661
4731
  */
4662
- const defaultSortPreset = combineComparators(pinned, screenSharing, ifInvisibleBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4732
+ const defaultSortPreset = combineComparators(screenSharing, pinned, ifInvisibleBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4663
4733
  /**
4664
4734
  * The sorting preset for speaker layout.
4665
4735
  */
4666
- const speakerLayoutSortPreset = combineComparators(pinned, screenSharing, dominantSpeaker, ifInvisibleBy(combineComparators(speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4736
+ const speakerLayoutSortPreset = combineComparators(screenSharing, pinned, dominantSpeaker, ifInvisibleBy(combineComparators(speaking, reactionType('raised-hand'), withVideoIngressSource, publishingVideo, publishingAudio)));
4667
4737
  /**
4668
4738
  * The sorting preset for layouts that don't render all participants but
4669
4739
  * instead, render them in pages.
4670
4740
  */
4671
- const paginatedLayoutSortPreset = combineComparators(pinned, ifInvisibleOrUnknownBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4741
+ const paginatedLayoutSortPreset = combineComparators(pinned, ifInvisibleOrUnknownBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), withVideoIngressSource, publishingVideo, publishingAudio)));
4672
4742
  /**
4673
4743
  * The sorting preset for livestreams and audio rooms.
4674
4744
  */
4675
- const livestreamOrAudioRoomSortPreset = combineComparators(ifInvisibleBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), withParticipantSource(ParticipantSource.RTMP), publishingVideo, publishingAudio)), role('admin', 'host', 'speaker'));
4745
+ const livestreamOrAudioRoomSortPreset = combineComparators(ifInvisibleBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), withVideoIngressSource, publishingVideo, publishingAudio)), role('admin', 'host', 'speaker'));
4676
4746
 
4677
4747
  /**
4678
4748
  * Returns the default egress object - when no egress data is available.
@@ -5010,14 +5080,32 @@ class CallState {
5010
5080
  * @param pins the latest pins from the server.
5011
5081
  */
5012
5082
  this.setServerSidePins = (pins) => {
5013
- const pinsLookup = pins.reduce((lookup, pin) => {
5014
- lookup[pin.sessionId] = Date.now();
5083
+ const now = Date.now();
5084
+ const unknownSymbol = Symbol('unknown');
5085
+ // generate a lookup table of pinnedAt timestamps by userId and sessionId
5086
+ // if there are multiple pins for the same userId, then we set the pinnedAt
5087
+ // to `unknown` (for that userId lookup) so that we don't apply any pin for that participant
5088
+ // this is to avoid conflicts during reconstruction of the pin state after reconnections
5089
+ // as sessionIds can change
5090
+ const pinnedAtByIdentifier = pins.reduce((lookup, pin, index) => {
5091
+ var _a;
5092
+ const pinnedAt = now + (pins.length - index);
5093
+ if (lookup[pin.userId]) {
5094
+ lookup[pin.userId] = unknownSymbol;
5095
+ }
5096
+ else {
5097
+ lookup[pin.userId] = pinnedAt;
5098
+ }
5099
+ lookup[_a = pin.sessionId] ?? (lookup[_a] = pinnedAt);
5015
5100
  return lookup;
5016
5101
  }, {});
5017
5102
  return this.setParticipants((participants) => participants.map((participant) => {
5018
- const serverSidePinnedAt = pinsLookup[participant.sessionId];
5103
+ // first check by sessionId as that is 100% correct, then by attempt reconstruction by userId
5104
+ const serverSidePinnedAt = pinnedAtByIdentifier[participant.sessionId] ??
5105
+ pinnedAtByIdentifier[participant.userId];
5019
5106
  // the participant is newly pinned
5020
- if (serverSidePinnedAt) {
5107
+ if (typeof serverSidePinnedAt === 'number' &&
5108
+ typeof participant.pin?.pinnedAt !== 'number') {
5021
5109
  return {
5022
5110
  ...participant,
5023
5111
  pin: {
@@ -5028,7 +5116,8 @@ class CallState {
5028
5116
  }
5029
5117
  // the participant is no longer pinned server side
5030
5118
  // we need to reset the pin
5031
- if (participant.pin && !participant.pin.isLocalPin) {
5119
+ if (typeof serverSidePinnedAt !== 'number' &&
5120
+ participant.pin?.isLocalPin === false) {
5032
5121
  return {
5033
5122
  ...participant,
5034
5123
  pin: undefined,
@@ -5765,7 +5854,7 @@ const getSdkVersion = (sdk) => {
5765
5854
  return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
5766
5855
  };
5767
5856
 
5768
- const version = "1.32.0";
5857
+ const version = "1.33.1";
5769
5858
  const [major, minor, patch] = version.split('.');
5770
5859
  let sdkInfo = {
5771
5860
  type: SdkType.PLAIN_JAVASCRIPT,
@@ -6963,15 +7052,24 @@ class TransceiverCache {
6963
7052
  /**
6964
7053
  * Adds a transceiver to the cache.
6965
7054
  */
6966
- this.add = (publishOption, transceiver) => {
6967
- this.cache.push({ publishOption, transceiver });
6968
- this.transceiverOrder.push(transceiver);
7055
+ this.add = (bundle) => {
7056
+ this.cache.push(bundle);
7057
+ this.transceiverOrder.push(bundle.transceiver);
6969
7058
  };
6970
7059
  /**
6971
7060
  * Gets the transceiver for the given publish option.
6972
7061
  */
6973
7062
  this.get = (publishOption) => {
6974
- return this.findTransceiver(publishOption)?.transceiver;
7063
+ return this.cache.find((bundle) => bundle.publishOption.id === publishOption.id &&
7064
+ bundle.publishOption.trackType === publishOption.trackType);
7065
+ };
7066
+ /**
7067
+ * Updates the cached bundle with the given patch.
7068
+ */
7069
+ this.update = (publishOption, patch) => {
7070
+ const bundle = this.get(publishOption);
7071
+ if (bundle)
7072
+ Object.assign(bundle, patch);
6975
7073
  };
6976
7074
  /**
6977
7075
  * Checks if the cache has the given publish option.
@@ -7017,10 +7115,6 @@ class TransceiverCache {
7017
7115
  this.layers.push({ publishOption, layers });
7018
7116
  }
7019
7117
  };
7020
- this.findTransceiver = (publishOption) => {
7021
- return this.cache.find((item) => item.publishOption.id === publishOption.id &&
7022
- item.publishOption.trackType === publishOption.trackType);
7023
- };
7024
7118
  this.findLayer = (publishOption) => {
7025
7119
  return this.layers.find((item) => item.publishOption.id === publishOption.id &&
7026
7120
  item.publishOption.trackType === publishOption.trackType);
@@ -7078,10 +7172,20 @@ const toTrackType = (trackType) => {
7078
7172
  };
7079
7173
  const isAudioTrackType = (trackType) => trackType === TrackType.AUDIO || trackType === TrackType.SCREEN_SHARE_AUDIO;
7080
7174
 
7081
- const defaultBitratePerRid = {
7082
- q: 300000,
7083
- h: 750000,
7084
- f: 1250000,
7175
+ /**
7176
+ * Prepares the audio layer for the given track.
7177
+ * Based on the provided audio bitrate profile, we apply the appropriate bitrate.
7178
+ */
7179
+ const computeAudioLayers = (publishOption, options) => {
7180
+ const { audioBitrateProfile } = options;
7181
+ const profileConfig = publishOption.audioBitrateProfiles?.find((config) => config.profile === audioBitrateProfile);
7182
+ const maxBitrate = profileConfig?.bitrate ||
7183
+ {
7184
+ [AudioBitrateProfile.VOICE_STANDARD_UNSPECIFIED]: 64000,
7185
+ [AudioBitrateProfile.VOICE_HIGH_QUALITY]: 128000,
7186
+ [AudioBitrateProfile.MUSIC_HIGH_QUALITY]: 128000,
7187
+ }[audioBitrateProfile || AudioBitrateProfile.VOICE_STANDARD_UNSPECIFIED];
7188
+ return [{ maxBitrate }];
7085
7189
  };
7086
7190
  /**
7087
7191
  * In SVC, we need to send only one video encoding (layer).
@@ -7094,7 +7198,7 @@ const toSvcEncodings = (layers) => {
7094
7198
  if (!layers)
7095
7199
  return;
7096
7200
  // we take the highest quality layer, and we assign it to `q` encoder.
7097
- const withRid = (rid) => (l) => l.rid === rid;
7201
+ const withRid = (rid) => (layer) => layer.rid === rid;
7098
7202
  const highestLayer = layers.find(withRid('f')) ||
7099
7203
  layers.find(withRid('h')) ||
7100
7204
  layers.find(withRid('q'));
@@ -7148,7 +7252,8 @@ const computeVideoLayers = (videoTrack, publishOption) => {
7148
7252
  rid,
7149
7253
  width: Math.round(width / downscaleFactor),
7150
7254
  height: Math.round(height / downscaleFactor),
7151
- maxBitrate: Math.round(maxBitrate / bitrateFactor) || defaultBitratePerRid[rid],
7255
+ maxBitrate: Math.round(maxBitrate / bitrateFactor) ||
7256
+ { q: 300000, h: 750000, f: 1250000 }[rid],
7152
7257
  maxFramerate: fps,
7153
7258
  };
7154
7259
  if (svcCodec) {
@@ -7314,8 +7419,9 @@ class Publisher extends BasePeerConnection {
7314
7419
  *
7315
7420
  * @param track the track to publish.
7316
7421
  * @param trackType the track type to publish.
7422
+ * @param options the publish options to use.
7317
7423
  */
7318
- this.publish = async (track, trackType) => {
7424
+ this.publish = async (track, trackType, options = {}) => {
7319
7425
  if (!this.publishOptions.some((o) => o.trackType === trackType)) {
7320
7426
  throw new Error(`No publish options found for ${TrackType[trackType]}`);
7321
7427
  }
@@ -7325,13 +7431,13 @@ class Publisher extends BasePeerConnection {
7325
7431
  // create a clone of the track as otherwise the same trackId will
7326
7432
  // appear in the SDP in multiple transceivers
7327
7433
  const trackToPublish = this.cloneTrack(track);
7328
- const transceiver = this.transceiverCache.get(publishOption);
7434
+ const { transceiver } = this.transceiverCache.get(publishOption) || {};
7329
7435
  if (!transceiver) {
7330
- await this.addTransceiver(trackToPublish, publishOption);
7436
+ await this.addTransceiver(trackToPublish, publishOption, options);
7331
7437
  }
7332
7438
  else {
7333
7439
  const previousTrack = transceiver.sender.track;
7334
- await this.updateTransceiver(transceiver, trackToPublish, trackType);
7440
+ await this.updateTransceiver(transceiver, trackToPublish, trackType, options);
7335
7441
  if (!isReactNative()) {
7336
7442
  this.stopTrack(previousTrack);
7337
7443
  }
@@ -7341,11 +7447,13 @@ class Publisher extends BasePeerConnection {
7341
7447
  /**
7342
7448
  * Adds a new transceiver carrying the given track to the peer connection.
7343
7449
  */
7344
- this.addTransceiver = async (track, publishOption) => {
7345
- const videoEncodings = computeVideoLayers(track, publishOption);
7450
+ this.addTransceiver = async (track, publishOption, options) => {
7451
+ const encodings = isAudioTrackType(publishOption.trackType)
7452
+ ? computeAudioLayers(publishOption, options)
7453
+ : computeVideoLayers(track, publishOption);
7346
7454
  const sendEncodings = isSvcCodec(publishOption.codec?.name)
7347
- ? toSvcEncodings(videoEncodings)
7348
- : videoEncodings;
7455
+ ? toSvcEncodings(encodings)
7456
+ : encodings;
7349
7457
  const transceiver = this.pc.addTransceiver(track, {
7350
7458
  direction: 'sendonly',
7351
7459
  sendEncodings,
@@ -7355,20 +7463,49 @@ class Publisher extends BasePeerConnection {
7355
7463
  await transceiver.sender.setParameters(params);
7356
7464
  const trackType = publishOption.trackType;
7357
7465
  this.logger('debug', `Added ${TrackType[trackType]} transceiver`);
7358
- this.transceiverCache.add(publishOption, transceiver);
7466
+ this.transceiverCache.add({ publishOption, transceiver, options });
7359
7467
  this.trackIdToTrackType.set(track.id, trackType);
7360
7468
  await this.negotiate();
7361
7469
  };
7362
7470
  /**
7363
7471
  * Updates the transceiver with the given track and track type.
7364
7472
  */
7365
- this.updateTransceiver = async (transceiver, track, trackType) => {
7473
+ this.updateTransceiver = async (transceiver, track, trackType, options = {}) => {
7366
7474
  const sender = transceiver.sender;
7367
7475
  if (sender.track)
7368
7476
  this.trackIdToTrackType.delete(sender.track.id);
7369
7477
  await sender.replaceTrack(track);
7370
7478
  if (track)
7371
7479
  this.trackIdToTrackType.set(track.id, trackType);
7480
+ if (isAudioTrackType(trackType)) {
7481
+ await this.updateAudioPublishOptions(trackType, options);
7482
+ }
7483
+ };
7484
+ /**
7485
+ * Updates the publish options for the given track type.
7486
+ */
7487
+ this.updateAudioPublishOptions = async (trackType, options) => {
7488
+ for (const publishOption of this.publishOptions) {
7489
+ if (publishOption.trackType !== trackType)
7490
+ continue;
7491
+ const bundle = this.transceiverCache.get(publishOption);
7492
+ if (!bundle)
7493
+ continue;
7494
+ const { transceiver, options: current } = bundle;
7495
+ if (current.audioBitrateProfile !== options.audioBitrateProfile) {
7496
+ const encodings = computeAudioLayers(publishOption, options);
7497
+ if (encodings && encodings.length > 0) {
7498
+ const params = transceiver.sender.getParameters();
7499
+ const [currentEncoding] = params.encodings;
7500
+ const [targetEncoding] = encodings;
7501
+ if (currentEncoding.maxBitrate !== targetEncoding.maxBitrate) {
7502
+ currentEncoding.maxBitrate = targetEncoding.maxBitrate;
7503
+ }
7504
+ await transceiver.sender.setParameters(params);
7505
+ }
7506
+ }
7507
+ this.transceiverCache.update(publishOption, { options });
7508
+ }
7372
7509
  };
7373
7510
  /**
7374
7511
  * Synchronizes the current Publisher state with the provided publish options.
@@ -7383,12 +7520,12 @@ class Publisher extends BasePeerConnection {
7383
7520
  continue;
7384
7521
  const item = this.transceiverCache.find((i) => !!i.transceiver.sender.track &&
7385
7522
  i.publishOption.trackType === trackType);
7386
- if (!item || !item.transceiver)
7523
+ if (!item)
7387
7524
  continue;
7388
7525
  // take the track from the existing transceiver for the same track type,
7389
7526
  // clone it and publish it with the new publish options
7390
7527
  const track = this.cloneTrack(item.transceiver.sender.track);
7391
- await this.addTransceiver(track, publishOption);
7528
+ await this.addTransceiver(track, publishOption, item.options);
7392
7529
  }
7393
7530
  // stop publishing with options not required anymore -> [vp9]
7394
7531
  for (const item of this.transceiverCache.items()) {
@@ -7568,11 +7705,9 @@ class Publisher extends BasePeerConnection {
7568
7705
  this.getAnnouncedTracks = (sdp) => {
7569
7706
  const trackInfos = [];
7570
7707
  for (const bundle of this.transceiverCache.items()) {
7571
- const { transceiver, publishOption } = bundle;
7572
- const track = transceiver.sender.track;
7573
- if (!track)
7708
+ if (!bundle.transceiver.sender.track)
7574
7709
  continue;
7575
- trackInfos.push(this.toTrackInfo(transceiver, publishOption, sdp));
7710
+ trackInfos.push(this.toTrackInfo(bundle, sdp));
7576
7711
  }
7577
7712
  return trackInfos;
7578
7713
  };
@@ -7585,17 +7720,18 @@ class Publisher extends BasePeerConnection {
7585
7720
  const sdp = this.pc.localDescription?.sdp;
7586
7721
  const trackInfos = [];
7587
7722
  for (const publishOption of this.publishOptions) {
7588
- const transceiver = this.transceiverCache.get(publishOption);
7589
- if (!transceiver || !transceiver.sender.track)
7723
+ const bundle = this.transceiverCache.get(publishOption);
7724
+ if (!bundle || !bundle.transceiver.sender.track)
7590
7725
  continue;
7591
- trackInfos.push(this.toTrackInfo(transceiver, publishOption, sdp));
7726
+ trackInfos.push(this.toTrackInfo(bundle, sdp));
7592
7727
  }
7593
7728
  return trackInfos;
7594
7729
  };
7595
7730
  /**
7596
7731
  * Converts the given transceiver to a `TrackInfo` object.
7597
7732
  */
7598
- this.toTrackInfo = (transceiver, publishOption, sdp) => {
7733
+ this.toTrackInfo = (bundle, sdp) => {
7734
+ const { transceiver, publishOption } = bundle;
7599
7735
  const track = transceiver.sender.track;
7600
7736
  const isTrackLive = track.readyState === 'live';
7601
7737
  const layers = isTrackLive
@@ -7603,15 +7739,16 @@ class Publisher extends BasePeerConnection {
7603
7739
  : this.transceiverCache.getLayers(publishOption);
7604
7740
  this.transceiverCache.setLayers(publishOption, layers);
7605
7741
  const isAudioTrack = isAudioTrackType(publishOption.trackType);
7606
- const isStereo = isAudioTrack && track.getSettings().channelCount === 2;
7607
7742
  const transceiverIndex = this.transceiverCache.indexOf(transceiver);
7608
7743
  const audioSettings = this.state.settings?.audio;
7744
+ const stereo = publishOption.trackType === TrackType.SCREEN_SHARE_AUDIO ||
7745
+ (isAudioTrack && !!audioSettings?.hifi_audio_enabled);
7609
7746
  return {
7610
7747
  trackId: track.id,
7611
7748
  layers: toVideoLayers(layers),
7612
7749
  trackType: publishOption.trackType,
7613
7750
  mid: extractMid(transceiver, transceiverIndex, sdp),
7614
- stereo: isStereo,
7751
+ stereo,
7615
7752
  dtx: isAudioTrack && !!audioSettings?.opus_dtx_enabled,
7616
7753
  red: isAudioTrack && !!audioSettings?.redundant_coding_enabled,
7617
7754
  muted: !isTrackLive,
@@ -9895,7 +10032,7 @@ function resolveDeviceId(deviceId, kind) {
9895
10032
  */
9896
10033
  const isMobile = () => /Mobi/i.test(navigator.userAgent);
9897
10034
 
9898
- class InputMediaDeviceManager {
10035
+ class DeviceManager {
9899
10036
  constructor(call, state, trackType) {
9900
10037
  /**
9901
10038
  * if true, stops the media stream when call is left
@@ -10102,8 +10239,8 @@ class InputMediaDeviceManager {
10102
10239
  }
10103
10240
  });
10104
10241
  }
10105
- publishStream(stream) {
10106
- return this.call.publish(stream, this.trackType);
10242
+ publishStream(stream, options) {
10243
+ return this.call.publish(stream, this.trackType, options);
10107
10244
  }
10108
10245
  stopPublishStream() {
10109
10246
  return this.call.stopPublish(this.trackType);
@@ -10339,16 +10476,15 @@ class InputMediaDeviceManager {
10339
10476
  }
10340
10477
  }
10341
10478
 
10342
- class InputMediaDeviceManagerState {
10479
+ class DeviceManagerState {
10343
10480
  /**
10344
- * Constructs new InputMediaDeviceManagerState instance.
10481
+ * Constructs a new InputMediaDeviceManagerState instance.
10345
10482
  *
10346
10483
  * @param disableMode the disable mode to use.
10347
10484
  * @param permission the BrowserPermission to use for querying.
10348
10485
  * `undefined` means no permission is required.
10349
10486
  */
10350
- constructor(disableMode = 'stop-tracks', permission) {
10351
- this.disableMode = disableMode;
10487
+ constructor(disableMode, permission) {
10352
10488
  this.statusSubject = new BehaviorSubject(undefined);
10353
10489
  this.optimisticStatusSubject = new BehaviorSubject(undefined);
10354
10490
  this.mediaStreamSubject = new BehaviorSubject(undefined);
@@ -10379,6 +10515,7 @@ class InputMediaDeviceManagerState {
10379
10515
  * The default constraints for the device.
10380
10516
  */
10381
10517
  this.defaultConstraints$ = this.defaultConstraintsSubject.asObservable();
10518
+ this.disableMode = disableMode;
10382
10519
  this.hasBrowserPermission$ = permission
10383
10520
  ? permission.asObservable().pipe(shareReplay(1))
10384
10521
  : of(true);
@@ -10465,10 +10602,15 @@ class InputMediaDeviceManagerState {
10465
10602
  }
10466
10603
  }
10467
10604
 
10468
- class CameraManagerState extends InputMediaDeviceManagerState {
10605
+ class CameraManagerState extends DeviceManagerState {
10469
10606
  constructor() {
10470
10607
  super('stop-tracks', getVideoBrowserPermission());
10471
10608
  this.directionSubject = new BehaviorSubject(undefined);
10609
+ /**
10610
+ * Observable that emits the preferred camera direction
10611
+ * front - means the camera facing the user
10612
+ * back - means the camera facing the environment
10613
+ */
10472
10614
  this.direction$ = this.directionSubject
10473
10615
  .asObservable()
10474
10616
  .pipe(distinctUntilChanged());
@@ -10508,7 +10650,7 @@ class CameraManagerState extends InputMediaDeviceManagerState {
10508
10650
  }
10509
10651
  }
10510
10652
 
10511
- class CameraManager extends InputMediaDeviceManager {
10653
+ class CameraManager extends DeviceManager {
10512
10654
  /**
10513
10655
  * Constructs a new CameraManager.
10514
10656
  *
@@ -10654,18 +10796,87 @@ class CameraManager extends InputMediaDeviceManager {
10654
10796
  }
10655
10797
  }
10656
10798
 
10657
- class MicrophoneManagerState extends InputMediaDeviceManagerState {
10799
+ /**
10800
+ * Base class for High Fidelity enabled Device Managers.
10801
+ */
10802
+ class AudioDeviceManager extends DeviceManager {
10803
+ /**
10804
+ * Sets the audio bitrate profile and stereo mode.
10805
+ */
10806
+ async setAudioBitrateProfile(profile) {
10807
+ if (!this.call.state.settings?.audio.hifi_audio_enabled) {
10808
+ throw new Error('High Fidelity audio is not enabled for this call');
10809
+ }
10810
+ this.doSetAudioBitrateProfile(profile);
10811
+ this.state.setAudioBitrateProfile(profile);
10812
+ if (this.enabled) {
10813
+ await this.applySettingsToStream();
10814
+ }
10815
+ }
10816
+ /**
10817
+ * Overrides the default `publishStream` method to inject the audio bitrate profile.
10818
+ */
10819
+ publishStream(stream, options) {
10820
+ return super.publishStream(stream, {
10821
+ audioBitrateProfile: this.state.audioBitrateProfile,
10822
+ ...options,
10823
+ });
10824
+ }
10825
+ }
10826
+ /**
10827
+ * Prepares a new MediaTrackConstraints set based on the provided arguments.
10828
+ */
10829
+ const createAudioConstraints = (profile) => {
10830
+ const stereo = profile === AudioBitrateProfile.MUSIC_HIGH_QUALITY;
10831
+ return {
10832
+ echoCancellation: !stereo,
10833
+ noiseSuppression: !stereo,
10834
+ autoGainControl: !stereo,
10835
+ channelCount: { ideal: stereo ? 2 : 1 },
10836
+ };
10837
+ };
10838
+
10839
+ /**
10840
+ * Base state class for High Fidelity enabled device managers.
10841
+ */
10842
+ class AudioDeviceManagerState extends DeviceManagerState {
10843
+ /**
10844
+ * Constructs a new AudioDeviceManagerState instance.
10845
+ */
10846
+ constructor(disableMode, permission, profile) {
10847
+ super(disableMode, permission);
10848
+ this.audioBitrateProfileSubject = new BehaviorSubject(profile);
10849
+ this.audioBitrateProfile$ = this.audioBitrateProfileSubject
10850
+ .asObservable()
10851
+ .pipe(distinctUntilChanged());
10852
+ }
10853
+ /**
10854
+ * Returns the current audio bitrate profile.
10855
+ */
10856
+ get audioBitrateProfile() {
10857
+ return getCurrentValue(this.audioBitrateProfile$);
10858
+ }
10859
+ /**
10860
+ * Sets the audio bitrate profile and stereo mode.
10861
+ */
10862
+ setAudioBitrateProfile(profile) {
10863
+ setCurrentValue(this.audioBitrateProfileSubject, profile);
10864
+ }
10865
+ }
10866
+
10867
+ class MicrophoneManagerState extends AudioDeviceManagerState {
10658
10868
  constructor(disableMode) {
10659
- super(disableMode, getAudioBrowserPermission());
10869
+ super(disableMode, getAudioBrowserPermission(), AudioBitrateProfile.VOICE_STANDARD_UNSPECIFIED);
10660
10870
  this.speakingWhileMutedSubject = new BehaviorSubject(false);
10871
+ /**
10872
+ * An Observable that emits `true` if the user's microphone is muted, but they're speaking.
10873
+ */
10661
10874
  this.speakingWhileMuted$ = this.speakingWhileMutedSubject
10662
10875
  .asObservable()
10663
10876
  .pipe(distinctUntilChanged());
10664
10877
  }
10665
10878
  /**
10666
- * `true` if the user's microphone is muted but they'are speaking.
10667
- *
10668
- * This feature is not available in the React Native SDK.
10879
+ * `true` if the user's microphone is muted but they're speaking.
10669
10880
  */
10670
10881
  get speakingWhileMuted() {
10671
10882
  return getCurrentValue(this.speakingWhileMuted$);
@@ -10898,7 +11109,7 @@ class RNSpeechDetector {
10898
11109
  }
10899
11110
  }
10900
11111
 
10901
- class MicrophoneManager extends InputMediaDeviceManager {
11112
+ class MicrophoneManager extends AudioDeviceManager {
10902
11113
  constructor(call, disableMode = 'stop-tracks') {
10903
11114
  super(call, new MicrophoneManagerState(disableMode), TrackType.AUDIO);
10904
11115
  this.speakingWhileMutedNotificationEnabled = true;
@@ -11094,6 +11305,21 @@ class MicrophoneManager extends InputMediaDeviceManager {
11094
11305
  getStream(constraints) {
11095
11306
  return getAudioStream(constraints, this.call.tracer);
11096
11307
  }
11308
+ doSetAudioBitrateProfile(profile) {
11309
+ this.setDefaultConstraints({
11310
+ ...this.state.defaultConstraints,
11311
+ ...createAudioConstraints(profile),
11312
+ });
11313
+ if (this.noiseCancellation) {
11314
+ const disableAudioProcessing = profile === AudioBitrateProfile.MUSIC_HIGH_QUALITY;
11315
+ if (disableAudioProcessing) {
11316
+ this.noiseCancellation.disable(); // disable for high quality music mode
11317
+ }
11318
+ else {
11319
+ this.noiseCancellation.enable(); // restore it for other modes if available
11320
+ }
11321
+ }
11322
+ }
11097
11323
  async startSpeakingWhileMutedDetection(deviceId) {
11098
11324
  await withoutConcurrency(this.soundDetectorConcurrencyTag, async () => {
11099
11325
  await this.stopSpeakingWhileMutedDetection();
@@ -11130,9 +11356,12 @@ class MicrophoneManager extends InputMediaDeviceManager {
11130
11356
  }
11131
11357
  }
11132
11358
 
11133
- class ScreenShareState extends InputMediaDeviceManagerState {
11359
+ class ScreenShareState extends AudioDeviceManagerState {
11360
+ /**
11361
+ * Constructs a new ScreenShareState instance.
11362
+ */
11134
11363
  constructor() {
11135
- super(...arguments);
11364
+ super('stop-tracks', undefined, AudioBitrateProfile.MUSIC_HIGH_QUALITY);
11136
11365
  this.audioEnabledSubject = new BehaviorSubject(true);
11137
11366
  this.settingsSubject = new BehaviorSubject(undefined);
11138
11367
  /**
@@ -11181,7 +11410,7 @@ class ScreenShareState extends InputMediaDeviceManagerState {
11181
11410
  }
11182
11411
  }
11183
11412
 
11184
- class ScreenShareManager extends InputMediaDeviceManager {
11413
+ class ScreenShareManager extends AudioDeviceManager {
11185
11414
  constructor(call) {
11186
11415
  super(call, new ScreenShareState(), TrackType.SCREEN_SHARE);
11187
11416
  }
@@ -11191,6 +11420,7 @@ class ScreenShareManager extends InputMediaDeviceManager {
11191
11420
  const maybeTargetResolution = settings?.screensharing.target_resolution;
11192
11421
  if (maybeTargetResolution) {
11193
11422
  this.setDefaultConstraints({
11423
+ ...this.state.defaultConstraints,
11194
11424
  video: {
11195
11425
  width: maybeTargetResolution.width,
11196
11426
  height: maybeTargetResolution.height,
@@ -11247,6 +11477,19 @@ class ScreenShareManager extends InputMediaDeviceManager {
11247
11477
  }
11248
11478
  return stream;
11249
11479
  }
11480
+ doSetAudioBitrateProfile(profile) {
11481
+ const { defaultConstraints } = this.state;
11482
+ const baseAudioConstraints = typeof defaultConstraints?.audio !== 'boolean'
11483
+ ? defaultConstraints?.audio
11484
+ : null;
11485
+ this.setDefaultConstraints({
11486
+ ...defaultConstraints,
11487
+ audio: {
11488
+ ...baseAudioConstraints,
11489
+ ...createAudioConstraints(profile),
11490
+ },
11491
+ });
11492
+ }
11250
11493
  async stopPublishStream() {
11251
11494
  return this.call.stopPublish(TrackType.SCREEN_SHARE, TrackType.SCREEN_SHARE_AUDIO);
11252
11495
  }
@@ -11260,19 +11503,27 @@ class ScreenShareManager extends InputMediaDeviceManager {
11260
11503
 
11261
11504
  class SpeakerState {
11262
11505
  constructor(tracer) {
11506
+ this.tracer = tracer;
11263
11507
  this.selectedDeviceSubject = new BehaviorSubject('');
11264
11508
  this.volumeSubject = new BehaviorSubject(1);
11265
11509
  /**
11266
11510
  * [Tells if the browser supports audio output change on 'audio' elements](https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/setSinkId).
11267
11511
  */
11268
11512
  this.isDeviceSelectionSupported = checkIfAudioOutputChangeSupported();
11269
- this.tracer = tracer;
11513
+ /**
11514
+ * An Observable that emits the currently selected device
11515
+ *
11516
+ * Note: this feature is not supported in React Native
11517
+ */
11270
11518
  this.selectedDevice$ = this.selectedDeviceSubject
11271
11519
  .asObservable()
11272
11520
  .pipe(distinctUntilChanged());
11273
- this.volume$ = this.volumeSubject
11274
- .asObservable()
11275
- .pipe(distinctUntilChanged());
11521
+ /**
11522
+ * An Observable that emits the currently selected volume
11523
+ *
11524
+ * Note: this feature is not supported in React Native
11525
+ */
11526
+ this.volume$ = this.volumeSubject.asObservable().pipe(distinctUntilChanged());
11276
11527
  }
11277
11528
  /**
11278
11529
  * The currently selected device
@@ -12597,10 +12848,11 @@ class Call {
12597
12848
  *
12598
12849
  * @param mediaStream the media stream to publish.
12599
12850
  * @param trackType the type of the track to announce.
12851
+ * @param options the publish options.
12600
12852
  */
12601
- this.publish = async (mediaStream, trackType) => {
12853
+ this.publish = async (mediaStream, trackType, options) => {
12602
12854
  if (!this.sfuClient)
12603
- throw new Error(`Call not joined yet.`);
12855
+ throw new Error(`Call is not joined yet`);
12604
12856
  // joining is in progress, and we should wait until the client is ready
12605
12857
  await this.sfuClient.joinTask;
12606
12858
  if (!this.permissionsContext.canPublish(trackType)) {
@@ -12618,14 +12870,15 @@ class Call {
12618
12870
  throw new Error(`Can't publish ended tracks.`);
12619
12871
  }
12620
12872
  pushToIfMissing(this.trackPublishOrder, trackType);
12621
- await this.publisher.publish(track, trackType);
12873
+ await this.publisher.publish(track, trackType, options);
12622
12874
  const trackTypes = [trackType];
12623
12875
  if (trackType === TrackType.SCREEN_SHARE) {
12624
12876
  const [audioTrack] = mediaStream.getAudioTracks();
12625
12877
  if (audioTrack) {
12626
- pushToIfMissing(this.trackPublishOrder, TrackType.SCREEN_SHARE_AUDIO);
12627
- await this.publisher.publish(audioTrack, TrackType.SCREEN_SHARE_AUDIO);
12628
- trackTypes.push(TrackType.SCREEN_SHARE_AUDIO);
12878
+ const screenShareAudio = TrackType.SCREEN_SHARE_AUDIO;
12879
+ pushToIfMissing(this.trackPublishOrder, screenShareAudio);
12880
+ await this.publisher.publish(audioTrack, screenShareAudio, options);
12881
+ trackTypes.push(screenShareAudio);
12629
12882
  }
12630
12883
  }
12631
12884
  if (track.kind === 'video') {
@@ -14544,7 +14797,7 @@ class StreamClient {
14544
14797
  this.getUserAgent = () => {
14545
14798
  if (!this.cachedUserAgent) {
14546
14799
  const { clientAppIdentifier = {} } = this.options;
14547
- const { sdkName = 'js', sdkVersion = "1.32.0", ...extras } = clientAppIdentifier;
14800
+ const { sdkName = 'js', sdkVersion = "1.33.1", ...extras } = clientAppIdentifier;
14548
14801
  this.cachedUserAgent = [
14549
14802
  `stream-video-${sdkName}-v${sdkVersion}`,
14550
14803
  ...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
@@ -15141,5 +15394,5 @@ class StreamVideoClient {
15141
15394
  }
15142
15395
  StreamVideoClient._instances = new Map();
15143
15396
 
15144
- export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressVideoLayerRequestCodecEnum, InputMediaDeviceManager, InputMediaDeviceManagerState, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getLogLevel, getLogger, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, isPinned, livestreamOrAudioRoomSortPreset, logLevels, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setLogLevel, setLogger, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, withParticipantSource };
15397
+ export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getLogLevel, getLogger, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, isPinned, livestreamOrAudioRoomSortPreset, logLevels, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setLogLevel, setLogger, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, withParticipantSource };
15145
15398
  //# sourceMappingURL=index.es.js.map