@stream-io/video-client 1.31.0 → 1.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/CHANGELOG.md +21 -0
  2. package/dist/index.browser.es.js +350 -83
  3. package/dist/index.browser.es.js.map +1 -1
  4. package/dist/index.cjs.js +351 -84
  5. package/dist/index.cjs.js.map +1 -1
  6. package/dist/index.es.js +350 -83
  7. package/dist/index.es.js.map +1 -1
  8. package/dist/src/Call.d.ts +3 -2
  9. package/dist/src/StreamVideoClient.d.ts +2 -0
  10. package/dist/src/coordinator/connection/types.d.ts +4 -0
  11. package/dist/src/devices/AudioDeviceManager.d.ts +25 -0
  12. package/dist/src/devices/AudioDeviceManagerState.d.ts +24 -0
  13. package/dist/src/devices/CameraManager.d.ts +2 -2
  14. package/dist/src/devices/CameraManagerState.d.ts +3 -4
  15. package/dist/src/devices/{InputMediaDeviceManager.d.ts → DeviceManager.d.ts} +6 -6
  16. package/dist/src/devices/{InputMediaDeviceManagerState.d.ts → DeviceManagerState.d.ts} +4 -4
  17. package/dist/src/devices/MicrophoneManager.d.ts +5 -3
  18. package/dist/src/devices/MicrophoneManagerState.d.ts +6 -10
  19. package/dist/src/devices/ScreenShareManager.d.ts +4 -2
  20. package/dist/src/devices/ScreenShareState.d.ts +6 -2
  21. package/dist/src/devices/SpeakerState.d.ts +4 -4
  22. package/dist/src/devices/index.d.ts +2 -2
  23. package/dist/src/gen/coordinator/index.d.ts +169 -2
  24. package/dist/src/gen/video/sfu/event/events.d.ts +8 -0
  25. package/dist/src/gen/video/sfu/models/models.d.ts +43 -0
  26. package/dist/src/rtc/BasePeerConnection.d.ts +2 -12
  27. package/dist/src/rtc/Publisher.d.ts +9 -6
  28. package/dist/src/rtc/Subscriber.d.ts +2 -1
  29. package/dist/src/rtc/TransceiverCache.d.ts +10 -11
  30. package/dist/src/rtc/index.d.ts +1 -1
  31. package/dist/src/rtc/{videoLayers.d.ts → layers.d.ts} +7 -1
  32. package/dist/src/rtc/types.d.ts +31 -0
  33. package/package.json +3 -2
  34. package/src/Call.ts +19 -12
  35. package/src/StreamVideoClient.ts +42 -3
  36. package/src/__tests__/Call.publishing.test.ts +14 -3
  37. package/src/__tests__/StreamVideoClient.api.test.ts +1 -1
  38. package/src/coordinator/connection/types.ts +5 -0
  39. package/src/devices/AudioDeviceManager.ts +61 -0
  40. package/src/devices/AudioDeviceManagerState.ts +44 -0
  41. package/src/devices/CameraManager.ts +4 -4
  42. package/src/devices/CameraManagerState.ts +9 -8
  43. package/src/devices/{InputMediaDeviceManager.ts → DeviceManager.ts} +11 -8
  44. package/src/devices/{InputMediaDeviceManagerState.ts → DeviceManagerState.ts} +7 -4
  45. package/src/devices/MicrophoneManager.ts +26 -6
  46. package/src/devices/MicrophoneManagerState.ts +18 -19
  47. package/src/devices/ScreenShareManager.ts +23 -4
  48. package/src/devices/ScreenShareState.ts +11 -3
  49. package/src/devices/SpeakerState.ts +6 -14
  50. package/src/devices/__tests__/CameraManager.test.ts +1 -0
  51. package/src/devices/__tests__/{InputMediaDeviceManager.test.ts → DeviceManager.test.ts} +4 -4
  52. package/src/devices/__tests__/{InputMediaDeviceManagerFilters.test.ts → DeviceManagerFilters.test.ts} +4 -4
  53. package/src/devices/__tests__/{InputMediaDeviceManagerState.test.ts → DeviceManagerState.test.ts} +2 -2
  54. package/src/devices/__tests__/MicrophoneManager.test.ts +41 -1
  55. package/src/devices/__tests__/NoiseCancellationStub.ts +3 -1
  56. package/src/devices/__tests__/ScreenShareManager.test.ts +5 -1
  57. package/src/devices/index.ts +2 -2
  58. package/src/events/__tests__/internal.test.ts +25 -11
  59. package/src/gen/coordinator/index.ts +169 -2
  60. package/src/gen/video/sfu/event/events.ts +14 -0
  61. package/src/gen/video/sfu/models/models.ts +65 -0
  62. package/src/rtc/BasePeerConnection.ts +1 -16
  63. package/src/rtc/Publisher.ts +74 -31
  64. package/src/rtc/Subscriber.ts +2 -4
  65. package/src/rtc/TransceiverCache.ts +23 -27
  66. package/src/rtc/__tests__/Publisher.test.ts +61 -29
  67. package/src/rtc/__tests__/{videoLayers.test.ts → layers.test.ts} +76 -1
  68. package/src/rtc/index.ts +2 -1
  69. package/src/rtc/{videoLayers.ts → layers.ts} +28 -7
  70. package/src/rtc/types.ts +44 -0
  71. package/src/sorting/presets.ts +2 -2
  72. package/src/store/CallState.ts +36 -10
  73. package/src/store/__tests__/CallState.test.ts +20 -2
@@ -113,6 +113,7 @@ const OwnCapability = {
113
113
  REMOVE_CALL_MEMBER: 'remove-call-member',
114
114
  SCREENSHARE: 'screenshare',
115
115
  SEND_AUDIO: 'send-audio',
116
+ SEND_CLOSED_CAPTIONS_CALL: 'send-closed-captions-call',
116
117
  SEND_VIDEO: 'send-video',
117
118
  START_BROADCAST_CALL: 'start-broadcast-call',
118
119
  START_CLOSED_CAPTIONS_CALL: 'start-closed-captions-call',
@@ -958,6 +959,24 @@ var ParticipantSource;
958
959
  */
959
960
  ParticipantSource[ParticipantSource["SRT"] = 5] = "SRT";
960
961
  })(ParticipantSource || (ParticipantSource = {}));
962
+ /**
963
+ * @generated from protobuf enum stream.video.sfu.models.AudioBitrateProfile
964
+ */
965
+ var AudioBitrateProfile;
966
+ (function (AudioBitrateProfile) {
967
+ /**
968
+ * @generated from protobuf enum value: AUDIO_BITRATE_PROFILE_VOICE_STANDARD_UNSPECIFIED = 0;
969
+ */
970
+ AudioBitrateProfile[AudioBitrateProfile["VOICE_STANDARD_UNSPECIFIED"] = 0] = "VOICE_STANDARD_UNSPECIFIED";
971
+ /**
972
+ * @generated from protobuf enum value: AUDIO_BITRATE_PROFILE_VOICE_HIGH_QUALITY = 1;
973
+ */
974
+ AudioBitrateProfile[AudioBitrateProfile["VOICE_HIGH_QUALITY"] = 1] = "VOICE_HIGH_QUALITY";
975
+ /**
976
+ * @generated from protobuf enum value: AUDIO_BITRATE_PROFILE_MUSIC_HIGH_QUALITY = 2;
977
+ */
978
+ AudioBitrateProfile[AudioBitrateProfile["MUSIC_HIGH_QUALITY"] = 2] = "MUSIC_HIGH_QUALITY";
979
+ })(AudioBitrateProfile || (AudioBitrateProfile = {}));
961
980
  /**
962
981
  * @generated from protobuf enum stream.video.sfu.models.ErrorCode
963
982
  */
@@ -1571,6 +1590,13 @@ class PublishOption$Type extends MessageType {
1571
1590
  kind: 'scalar',
1572
1591
  T: 8 /*ScalarType.BOOL*/,
1573
1592
  },
1593
+ {
1594
+ no: 10,
1595
+ name: 'audio_bitrate_profiles',
1596
+ kind: 'message',
1597
+ repeat: 2 /*RepeatType.UNPACKED*/,
1598
+ T: () => AudioBitrate,
1599
+ },
1574
1600
  ]);
1575
1601
  }
1576
1602
  }
@@ -1634,6 +1660,28 @@ let ICETrickle$Type$1 = class ICETrickle$Type extends MessageType {
1634
1660
  */
1635
1661
  const ICETrickle$1 = new ICETrickle$Type$1();
1636
1662
  // @generated message type with reflection information, may provide speed optimized methods
1663
+ class AudioBitrate$Type extends MessageType {
1664
+ constructor() {
1665
+ super('stream.video.sfu.models.AudioBitrate', [
1666
+ {
1667
+ no: 1,
1668
+ name: 'profile',
1669
+ kind: 'enum',
1670
+ T: () => [
1671
+ 'stream.video.sfu.models.AudioBitrateProfile',
1672
+ AudioBitrateProfile,
1673
+ 'AUDIO_BITRATE_PROFILE_',
1674
+ ],
1675
+ },
1676
+ { no: 2, name: 'bitrate', kind: 'scalar', T: 5 /*ScalarType.INT32*/ },
1677
+ ]);
1678
+ }
1679
+ }
1680
+ /**
1681
+ * @generated MessageType for protobuf message stream.video.sfu.models.AudioBitrate
1682
+ */
1683
+ const AudioBitrate = new AudioBitrate$Type();
1684
+ // @generated message type with reflection information, may provide speed optimized methods
1637
1685
  class TrackInfo$Type extends MessageType {
1638
1686
  constructor() {
1639
1687
  super('stream.video.sfu.models.TrackInfo', [
@@ -1984,6 +2032,8 @@ var models = /*#__PURE__*/Object.freeze({
1984
2032
  get AndroidThermalState () { return AndroidThermalState; },
1985
2033
  AppleState: AppleState,
1986
2034
  get AppleThermalState () { return AppleThermalState; },
2035
+ AudioBitrate: AudioBitrate,
2036
+ get AudioBitrateProfile () { return AudioBitrateProfile; },
1987
2037
  Browser: Browser,
1988
2038
  Call: Call$1,
1989
2039
  get CallEndedReason () { return CallEndedReason; },
@@ -2949,6 +2999,12 @@ class JoinRequest$Type extends MessageType {
2949
2999
  super('stream.video.sfu.event.JoinRequest', [
2950
3000
  { no: 1, name: 'token', kind: 'scalar', T: 9 /*ScalarType.STRING*/ },
2951
3001
  { no: 2, name: 'session_id', kind: 'scalar', T: 9 /*ScalarType.STRING*/ },
3002
+ {
3003
+ no: 13,
3004
+ name: 'unified_session_id',
3005
+ kind: 'scalar',
3006
+ T: 9 /*ScalarType.STRING*/,
3007
+ },
2952
3008
  {
2953
3009
  no: 3,
2954
3010
  name: 'subscriber_sdp',
@@ -4652,11 +4708,11 @@ const ifInvisibleOrUnknownBy = conditional((a, b) => a.viewportVisibilityState?.
4652
4708
  /**
4653
4709
  * The default sorting preset.
4654
4710
  */
4655
- const defaultSortPreset = combineComparators(pinned, screenSharing, ifInvisibleBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4711
+ const defaultSortPreset = combineComparators(screenSharing, pinned, ifInvisibleBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4656
4712
  /**
4657
4713
  * The sorting preset for speaker layout.
4658
4714
  */
4659
- const speakerLayoutSortPreset = combineComparators(pinned, screenSharing, dominantSpeaker, ifInvisibleBy(combineComparators(speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4715
+ const speakerLayoutSortPreset = combineComparators(screenSharing, pinned, dominantSpeaker, ifInvisibleBy(combineComparators(speaking, reactionType('raised-hand'), publishingVideo, publishingAudio)));
4660
4716
  /**
4661
4717
  * The sorting preset for layouts that don't render all participants but
4662
4718
  * instead, render them in pages.
@@ -5003,14 +5059,32 @@ class CallState {
5003
5059
  * @param pins the latest pins from the server.
5004
5060
  */
5005
5061
  this.setServerSidePins = (pins) => {
5006
- const pinsLookup = pins.reduce((lookup, pin) => {
5007
- lookup[pin.sessionId] = Date.now();
5062
+ const now = Date.now();
5063
+ const unknownSymbol = Symbol('unknown');
5064
+ // generate a lookup table of pinnedAt timestamps by userId and sessionId
5065
+ // if there are multiple pins for the same userId, then we set the pinnedAt
5066
+ // to `unknown` (for that userId lookup) so that we don't apply any pin for that participant
5067
+ // this is to avoid conflicts during reconstruction of the pin state after reconnections
5068
+ // as sessionIds can change
5069
+ const pinnedAtByIdentifier = pins.reduce((lookup, pin, index) => {
5070
+ var _a;
5071
+ const pinnedAt = now + (pins.length - index);
5072
+ if (lookup[pin.userId]) {
5073
+ lookup[pin.userId] = unknownSymbol;
5074
+ }
5075
+ else {
5076
+ lookup[pin.userId] = pinnedAt;
5077
+ }
5078
+ lookup[_a = pin.sessionId] ?? (lookup[_a] = pinnedAt);
5008
5079
  return lookup;
5009
5080
  }, {});
5010
5081
  return this.setParticipants((participants) => participants.map((participant) => {
5011
- const serverSidePinnedAt = pinsLookup[participant.sessionId];
5082
+ // first check by sessionId as that is 100% correct, then by attempt reconstruction by userId
5083
+ const serverSidePinnedAt = pinnedAtByIdentifier[participant.sessionId] ??
5084
+ pinnedAtByIdentifier[participant.userId];
5012
5085
  // the participant is newly pinned
5013
- if (serverSidePinnedAt) {
5086
+ if (typeof serverSidePinnedAt === 'number' &&
5087
+ typeof participant.pin?.pinnedAt !== 'number') {
5014
5088
  return {
5015
5089
  ...participant,
5016
5090
  pin: {
@@ -5021,7 +5095,8 @@ class CallState {
5021
5095
  }
5022
5096
  // the participant is no longer pinned server side
5023
5097
  // we need to reset the pin
5024
- if (participant.pin && !participant.pin.isLocalPin) {
5098
+ if (typeof serverSidePinnedAt !== 'number' &&
5099
+ participant.pin?.isLocalPin === false) {
5025
5100
  return {
5026
5101
  ...participant,
5027
5102
  pin: undefined,
@@ -5758,7 +5833,7 @@ const getSdkVersion = (sdk) => {
5758
5833
  return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
5759
5834
  };
5760
5835
 
5761
- const version = "1.31.0";
5836
+ const version = "1.33.0";
5762
5837
  const [major, minor, patch] = version.split('.');
5763
5838
  let sdkInfo = {
5764
5839
  type: SdkType.PLAIN_JAVASCRIPT,
@@ -6956,15 +7031,24 @@ class TransceiverCache {
6956
7031
  /**
6957
7032
  * Adds a transceiver to the cache.
6958
7033
  */
6959
- this.add = (publishOption, transceiver) => {
6960
- this.cache.push({ publishOption, transceiver });
6961
- this.transceiverOrder.push(transceiver);
7034
+ this.add = (bundle) => {
7035
+ this.cache.push(bundle);
7036
+ this.transceiverOrder.push(bundle.transceiver);
6962
7037
  };
6963
7038
  /**
6964
7039
  * Gets the transceiver for the given publish option.
6965
7040
  */
6966
7041
  this.get = (publishOption) => {
6967
- return this.findTransceiver(publishOption)?.transceiver;
7042
+ return this.cache.find((bundle) => bundle.publishOption.id === publishOption.id &&
7043
+ bundle.publishOption.trackType === publishOption.trackType);
7044
+ };
7045
+ /**
7046
+ * Updates the cached bundle with the given patch.
7047
+ */
7048
+ this.update = (publishOption, patch) => {
7049
+ const bundle = this.get(publishOption);
7050
+ if (bundle)
7051
+ Object.assign(bundle, patch);
6968
7052
  };
6969
7053
  /**
6970
7054
  * Checks if the cache has the given publish option.
@@ -7010,10 +7094,6 @@ class TransceiverCache {
7010
7094
  this.layers.push({ publishOption, layers });
7011
7095
  }
7012
7096
  };
7013
- this.findTransceiver = (publishOption) => {
7014
- return this.cache.find((item) => item.publishOption.id === publishOption.id &&
7015
- item.publishOption.trackType === publishOption.trackType);
7016
- };
7017
7097
  this.findLayer = (publishOption) => {
7018
7098
  return this.layers.find((item) => item.publishOption.id === publishOption.id &&
7019
7099
  item.publishOption.trackType === publishOption.trackType);
@@ -7071,10 +7151,20 @@ const toTrackType = (trackType) => {
7071
7151
  };
7072
7152
  const isAudioTrackType = (trackType) => trackType === TrackType.AUDIO || trackType === TrackType.SCREEN_SHARE_AUDIO;
7073
7153
 
7074
- const defaultBitratePerRid = {
7075
- q: 300000,
7076
- h: 750000,
7077
- f: 1250000,
7154
+ /**
7155
+ * Prepares the audio layer for the given track.
7156
+ * Based on the provided audio bitrate profile, we apply the appropriate bitrate.
7157
+ */
7158
+ const computeAudioLayers = (publishOption, options) => {
7159
+ const { audioBitrateProfile } = options;
7160
+ const profileConfig = publishOption.audioBitrateProfiles?.find((config) => config.profile === audioBitrateProfile);
7161
+ const maxBitrate = profileConfig?.bitrate ||
7162
+ {
7163
+ [AudioBitrateProfile.VOICE_STANDARD_UNSPECIFIED]: 64000,
7164
+ [AudioBitrateProfile.VOICE_HIGH_QUALITY]: 128000,
7165
+ [AudioBitrateProfile.MUSIC_HIGH_QUALITY]: 128000,
7166
+ }[audioBitrateProfile || AudioBitrateProfile.VOICE_STANDARD_UNSPECIFIED];
7167
+ return [{ maxBitrate }];
7078
7168
  };
7079
7169
  /**
7080
7170
  * In SVC, we need to send only one video encoding (layer).
@@ -7087,7 +7177,7 @@ const toSvcEncodings = (layers) => {
7087
7177
  if (!layers)
7088
7178
  return;
7089
7179
  // we take the highest quality layer, and we assign it to `q` encoder.
7090
- const withRid = (rid) => (l) => l.rid === rid;
7180
+ const withRid = (rid) => (layer) => layer.rid === rid;
7091
7181
  const highestLayer = layers.find(withRid('f')) ||
7092
7182
  layers.find(withRid('h')) ||
7093
7183
  layers.find(withRid('q'));
@@ -7141,7 +7231,8 @@ const computeVideoLayers = (videoTrack, publishOption) => {
7141
7231
  rid,
7142
7232
  width: Math.round(width / downscaleFactor),
7143
7233
  height: Math.round(height / downscaleFactor),
7144
- maxBitrate: Math.round(maxBitrate / bitrateFactor) || defaultBitratePerRid[rid],
7234
+ maxBitrate: Math.round(maxBitrate / bitrateFactor) ||
7235
+ { q: 300000, h: 750000, f: 1250000 }[rid],
7145
7236
  maxFramerate: fps,
7146
7237
  };
7147
7238
  if (svcCodec) {
@@ -7307,8 +7398,9 @@ class Publisher extends BasePeerConnection {
7307
7398
  *
7308
7399
  * @param track the track to publish.
7309
7400
  * @param trackType the track type to publish.
7401
+ * @param options the publish options to use.
7310
7402
  */
7311
- this.publish = async (track, trackType) => {
7403
+ this.publish = async (track, trackType, options = {}) => {
7312
7404
  if (!this.publishOptions.some((o) => o.trackType === trackType)) {
7313
7405
  throw new Error(`No publish options found for ${TrackType[trackType]}`);
7314
7406
  }
@@ -7318,13 +7410,13 @@ class Publisher extends BasePeerConnection {
7318
7410
  // create a clone of the track as otherwise the same trackId will
7319
7411
  // appear in the SDP in multiple transceivers
7320
7412
  const trackToPublish = this.cloneTrack(track);
7321
- const transceiver = this.transceiverCache.get(publishOption);
7413
+ const { transceiver } = this.transceiverCache.get(publishOption) || {};
7322
7414
  if (!transceiver) {
7323
- await this.addTransceiver(trackToPublish, publishOption);
7415
+ await this.addTransceiver(trackToPublish, publishOption, options);
7324
7416
  }
7325
7417
  else {
7326
7418
  const previousTrack = transceiver.sender.track;
7327
- await this.updateTransceiver(transceiver, trackToPublish, trackType);
7419
+ await this.updateTransceiver(transceiver, trackToPublish, trackType, options);
7328
7420
  if (!isReactNative()) {
7329
7421
  this.stopTrack(previousTrack);
7330
7422
  }
@@ -7334,11 +7426,13 @@ class Publisher extends BasePeerConnection {
7334
7426
  /**
7335
7427
  * Adds a new transceiver carrying the given track to the peer connection.
7336
7428
  */
7337
- this.addTransceiver = async (track, publishOption) => {
7338
- const videoEncodings = computeVideoLayers(track, publishOption);
7429
+ this.addTransceiver = async (track, publishOption, options) => {
7430
+ const encodings = isAudioTrackType(publishOption.trackType)
7431
+ ? computeAudioLayers(publishOption, options)
7432
+ : computeVideoLayers(track, publishOption);
7339
7433
  const sendEncodings = isSvcCodec(publishOption.codec?.name)
7340
- ? toSvcEncodings(videoEncodings)
7341
- : videoEncodings;
7434
+ ? toSvcEncodings(encodings)
7435
+ : encodings;
7342
7436
  const transceiver = this.pc.addTransceiver(track, {
7343
7437
  direction: 'sendonly',
7344
7438
  sendEncodings,
@@ -7348,20 +7442,49 @@ class Publisher extends BasePeerConnection {
7348
7442
  await transceiver.sender.setParameters(params);
7349
7443
  const trackType = publishOption.trackType;
7350
7444
  this.logger('debug', `Added ${TrackType[trackType]} transceiver`);
7351
- this.transceiverCache.add(publishOption, transceiver);
7445
+ this.transceiverCache.add({ publishOption, transceiver, options });
7352
7446
  this.trackIdToTrackType.set(track.id, trackType);
7353
7447
  await this.negotiate();
7354
7448
  };
7355
7449
  /**
7356
7450
  * Updates the transceiver with the given track and track type.
7357
7451
  */
7358
- this.updateTransceiver = async (transceiver, track, trackType) => {
7452
+ this.updateTransceiver = async (transceiver, track, trackType, options = {}) => {
7359
7453
  const sender = transceiver.sender;
7360
7454
  if (sender.track)
7361
7455
  this.trackIdToTrackType.delete(sender.track.id);
7362
7456
  await sender.replaceTrack(track);
7363
7457
  if (track)
7364
7458
  this.trackIdToTrackType.set(track.id, trackType);
7459
+ if (isAudioTrackType(trackType)) {
7460
+ await this.updateAudioPublishOptions(trackType, options);
7461
+ }
7462
+ };
7463
+ /**
7464
+ * Updates the publish options for the given track type.
7465
+ */
7466
+ this.updateAudioPublishOptions = async (trackType, options) => {
7467
+ for (const publishOption of this.publishOptions) {
7468
+ if (publishOption.trackType !== trackType)
7469
+ continue;
7470
+ const bundle = this.transceiverCache.get(publishOption);
7471
+ if (!bundle)
7472
+ continue;
7473
+ const { transceiver, options: current } = bundle;
7474
+ if (current.audioBitrateProfile !== options.audioBitrateProfile) {
7475
+ const encodings = computeAudioLayers(publishOption, options);
7476
+ if (encodings && encodings.length > 0) {
7477
+ const params = transceiver.sender.getParameters();
7478
+ const [currentEncoding] = params.encodings;
7479
+ const [targetEncoding] = encodings;
7480
+ if (currentEncoding.maxBitrate !== targetEncoding.maxBitrate) {
7481
+ currentEncoding.maxBitrate = targetEncoding.maxBitrate;
7482
+ }
7483
+ await transceiver.sender.setParameters(params);
7484
+ }
7485
+ }
7486
+ this.transceiverCache.update(publishOption, { options });
7487
+ }
7365
7488
  };
7366
7489
  /**
7367
7490
  * Synchronizes the current Publisher state with the provided publish options.
@@ -7376,12 +7499,12 @@ class Publisher extends BasePeerConnection {
7376
7499
  continue;
7377
7500
  const item = this.transceiverCache.find((i) => !!i.transceiver.sender.track &&
7378
7501
  i.publishOption.trackType === trackType);
7379
- if (!item || !item.transceiver)
7502
+ if (!item)
7380
7503
  continue;
7381
7504
  // take the track from the existing transceiver for the same track type,
7382
7505
  // clone it and publish it with the new publish options
7383
7506
  const track = this.cloneTrack(item.transceiver.sender.track);
7384
- await this.addTransceiver(track, publishOption);
7507
+ await this.addTransceiver(track, publishOption, item.options);
7385
7508
  }
7386
7509
  // stop publishing with options not required anymore -> [vp9]
7387
7510
  for (const item of this.transceiverCache.items()) {
@@ -7561,11 +7684,9 @@ class Publisher extends BasePeerConnection {
7561
7684
  this.getAnnouncedTracks = (sdp) => {
7562
7685
  const trackInfos = [];
7563
7686
  for (const bundle of this.transceiverCache.items()) {
7564
- const { transceiver, publishOption } = bundle;
7565
- const track = transceiver.sender.track;
7566
- if (!track)
7687
+ if (!bundle.transceiver.sender.track)
7567
7688
  continue;
7568
- trackInfos.push(this.toTrackInfo(transceiver, publishOption, sdp));
7689
+ trackInfos.push(this.toTrackInfo(bundle, sdp));
7569
7690
  }
7570
7691
  return trackInfos;
7571
7692
  };
@@ -7578,17 +7699,18 @@ class Publisher extends BasePeerConnection {
7578
7699
  const sdp = this.pc.localDescription?.sdp;
7579
7700
  const trackInfos = [];
7580
7701
  for (const publishOption of this.publishOptions) {
7581
- const transceiver = this.transceiverCache.get(publishOption);
7582
- if (!transceiver || !transceiver.sender.track)
7702
+ const bundle = this.transceiverCache.get(publishOption);
7703
+ if (!bundle || !bundle.transceiver.sender.track)
7583
7704
  continue;
7584
- trackInfos.push(this.toTrackInfo(transceiver, publishOption, sdp));
7705
+ trackInfos.push(this.toTrackInfo(bundle, sdp));
7585
7706
  }
7586
7707
  return trackInfos;
7587
7708
  };
7588
7709
  /**
7589
7710
  * Converts the given transceiver to a `TrackInfo` object.
7590
7711
  */
7591
- this.toTrackInfo = (transceiver, publishOption, sdp) => {
7712
+ this.toTrackInfo = (bundle, sdp) => {
7713
+ const { transceiver, publishOption } = bundle;
7592
7714
  const track = transceiver.sender.track;
7593
7715
  const isTrackLive = track.readyState === 'live';
7594
7716
  const layers = isTrackLive
@@ -7596,15 +7718,16 @@ class Publisher extends BasePeerConnection {
7596
7718
  : this.transceiverCache.getLayers(publishOption);
7597
7719
  this.transceiverCache.setLayers(publishOption, layers);
7598
7720
  const isAudioTrack = isAudioTrackType(publishOption.trackType);
7599
- const isStereo = isAudioTrack && track.getSettings().channelCount === 2;
7600
7721
  const transceiverIndex = this.transceiverCache.indexOf(transceiver);
7601
7722
  const audioSettings = this.state.settings?.audio;
7723
+ const stereo = publishOption.trackType === TrackType.SCREEN_SHARE_AUDIO ||
7724
+ (isAudioTrack && !!audioSettings?.hifi_audio_enabled);
7602
7725
  return {
7603
7726
  trackId: track.id,
7604
7727
  layers: toVideoLayers(layers),
7605
7728
  trackType: publishOption.trackType,
7606
7729
  mid: extractMid(transceiver, transceiverIndex, sdp),
7607
- stereo: isStereo,
7730
+ stereo,
7608
7731
  dtx: isAudioTrack && !!audioSettings?.opus_dtx_enabled,
7609
7732
  red: isAudioTrack && !!audioSettings?.redundant_coding_enabled,
7610
7733
  muted: !isTrackLive,
@@ -9888,7 +10011,7 @@ function resolveDeviceId(deviceId, kind) {
9888
10011
  */
9889
10012
  const isMobile = () => /Mobi/i.test(navigator.userAgent);
9890
10013
 
9891
- class InputMediaDeviceManager {
10014
+ class DeviceManager {
9892
10015
  constructor(call, state, trackType) {
9893
10016
  /**
9894
10017
  * if true, stops the media stream when call is left
@@ -10095,8 +10218,8 @@ class InputMediaDeviceManager {
10095
10218
  }
10096
10219
  });
10097
10220
  }
10098
- publishStream(stream) {
10099
- return this.call.publish(stream, this.trackType);
10221
+ publishStream(stream, options) {
10222
+ return this.call.publish(stream, this.trackType, options);
10100
10223
  }
10101
10224
  stopPublishStream() {
10102
10225
  return this.call.stopPublish(this.trackType);
@@ -10332,16 +10455,15 @@ class InputMediaDeviceManager {
10332
10455
  }
10333
10456
  }
10334
10457
 
10335
- class InputMediaDeviceManagerState {
10458
+ class DeviceManagerState {
10336
10459
  /**
10337
- * Constructs new InputMediaDeviceManagerState instance.
10460
+ * Constructs a new InputMediaDeviceManagerState instance.
10338
10461
  *
10339
10462
  * @param disableMode the disable mode to use.
10340
10463
  * @param permission the BrowserPermission to use for querying.
10341
10464
  * `undefined` means no permission is required.
10342
10465
  */
10343
- constructor(disableMode = 'stop-tracks', permission) {
10344
- this.disableMode = disableMode;
10466
+ constructor(disableMode, permission) {
10345
10467
  this.statusSubject = new BehaviorSubject(undefined);
10346
10468
  this.optimisticStatusSubject = new BehaviorSubject(undefined);
10347
10469
  this.mediaStreamSubject = new BehaviorSubject(undefined);
@@ -10372,6 +10494,7 @@ class InputMediaDeviceManagerState {
10372
10494
  * The default constraints for the device.
10373
10495
  */
10374
10496
  this.defaultConstraints$ = this.defaultConstraintsSubject.asObservable();
10497
+ this.disableMode = disableMode;
10375
10498
  this.hasBrowserPermission$ = permission
10376
10499
  ? permission.asObservable().pipe(shareReplay(1))
10377
10500
  : of(true);
@@ -10458,10 +10581,15 @@ class InputMediaDeviceManagerState {
10458
10581
  }
10459
10582
  }
10460
10583
 
10461
- class CameraManagerState extends InputMediaDeviceManagerState {
10584
+ class CameraManagerState extends DeviceManagerState {
10462
10585
  constructor() {
10463
10586
  super('stop-tracks', getVideoBrowserPermission());
10464
10587
  this.directionSubject = new BehaviorSubject(undefined);
10588
+ /**
10589
+ * Observable that emits the preferred camera direction
10590
+ * front - means the camera facing the user
10591
+ * back - means the camera facing the environment
10592
+ */
10465
10593
  this.direction$ = this.directionSubject
10466
10594
  .asObservable()
10467
10595
  .pipe(distinctUntilChanged());
@@ -10501,7 +10629,7 @@ class CameraManagerState extends InputMediaDeviceManagerState {
10501
10629
  }
10502
10630
  }
10503
10631
 
10504
- class CameraManager extends InputMediaDeviceManager {
10632
+ class CameraManager extends DeviceManager {
10505
10633
  /**
10506
10634
  * Constructs a new CameraManager.
10507
10635
  *
@@ -10647,18 +10775,87 @@ class CameraManager extends InputMediaDeviceManager {
10647
10775
  }
10648
10776
  }
10649
10777
 
10650
- class MicrophoneManagerState extends InputMediaDeviceManagerState {
10778
+ /**
10779
+ * Base class for High Fidelity enabled Device Managers.
10780
+ */
10781
+ class AudioDeviceManager extends DeviceManager {
10782
+ /**
10783
+ * Sets the audio bitrate profile and stereo mode.
10784
+ */
10785
+ async setAudioBitrateProfile(profile) {
10786
+ if (!this.call.state.settings?.audio.hifi_audio_enabled) {
10787
+ throw new Error('High Fidelity audio is not enabled for this call');
10788
+ }
10789
+ this.doSetAudioBitrateProfile(profile);
10790
+ this.state.setAudioBitrateProfile(profile);
10791
+ if (this.enabled) {
10792
+ await this.applySettingsToStream();
10793
+ }
10794
+ }
10795
+ /**
10796
+ * Overrides the default `publishStream` method to inject the audio bitrate profile.
10797
+ */
10798
+ publishStream(stream, options) {
10799
+ return super.publishStream(stream, {
10800
+ audioBitrateProfile: this.state.audioBitrateProfile,
10801
+ ...options,
10802
+ });
10803
+ }
10804
+ }
10805
+ /**
10806
+ * Prepares a new MediaTrackConstraints set based on the provided arguments.
10807
+ */
10808
+ const createAudioConstraints = (profile) => {
10809
+ const stereo = profile === AudioBitrateProfile.MUSIC_HIGH_QUALITY;
10810
+ return {
10811
+ echoCancellation: !stereo,
10812
+ noiseSuppression: !stereo,
10813
+ autoGainControl: !stereo,
10814
+ channelCount: { ideal: stereo ? 2 : 1 },
10815
+ };
10816
+ };
10817
+
10818
+ /**
10819
+ * Base state class for High Fidelity enabled device managers.
10820
+ */
10821
+ class AudioDeviceManagerState extends DeviceManagerState {
10822
+ /**
10823
+ * Constructs a new AudioDeviceManagerState instance.
10824
+ */
10825
+ constructor(disableMode, permission, profile) {
10826
+ super(disableMode, permission);
10827
+ this.audioBitrateProfileSubject = new BehaviorSubject(profile);
10828
+ this.audioBitrateProfile$ = this.audioBitrateProfileSubject
10829
+ .asObservable()
10830
+ .pipe(distinctUntilChanged());
10831
+ }
10832
+ /**
10833
+ * Returns the current audio bitrate profile.
10834
+ */
10835
+ get audioBitrateProfile() {
10836
+ return getCurrentValue(this.audioBitrateProfile$);
10837
+ }
10838
+ /**
10839
+ * Sets the audio bitrate profile and stereo mode.
10840
+ */
10841
+ setAudioBitrateProfile(profile) {
10842
+ setCurrentValue(this.audioBitrateProfileSubject, profile);
10843
+ }
10844
+ }
10845
+
10846
+ class MicrophoneManagerState extends AudioDeviceManagerState {
10651
10847
  constructor(disableMode) {
10652
- super(disableMode, getAudioBrowserPermission());
10848
+ super(disableMode, getAudioBrowserPermission(), AudioBitrateProfile.VOICE_STANDARD_UNSPECIFIED);
10653
10849
  this.speakingWhileMutedSubject = new BehaviorSubject(false);
10850
+ /**
10851
+ * An Observable that emits `true` if the user's microphone is muted, but they're speaking.
10852
+ */
10654
10853
  this.speakingWhileMuted$ = this.speakingWhileMutedSubject
10655
10854
  .asObservable()
10656
10855
  .pipe(distinctUntilChanged());
10657
10856
  }
10658
10857
  /**
10659
- * `true` if the user's microphone is muted but they'are speaking.
10660
- *
10661
- * This feature is not available in the React Native SDK.
10858
+ * `true` if the user's microphone is muted but they're speaking.
10662
10859
  */
10663
10860
  get speakingWhileMuted() {
10664
10861
  return getCurrentValue(this.speakingWhileMuted$);
@@ -10891,7 +11088,7 @@ class RNSpeechDetector {
10891
11088
  }
10892
11089
  }
10893
11090
 
10894
- class MicrophoneManager extends InputMediaDeviceManager {
11091
+ class MicrophoneManager extends AudioDeviceManager {
10895
11092
  constructor(call, disableMode = 'stop-tracks') {
10896
11093
  super(call, new MicrophoneManagerState(disableMode), TrackType.AUDIO);
10897
11094
  this.speakingWhileMutedNotificationEnabled = true;
@@ -11087,6 +11284,21 @@ class MicrophoneManager extends InputMediaDeviceManager {
11087
11284
  getStream(constraints) {
11088
11285
  return getAudioStream(constraints, this.call.tracer);
11089
11286
  }
11287
+ doSetAudioBitrateProfile(profile) {
11288
+ this.setDefaultConstraints({
11289
+ ...this.state.defaultConstraints,
11290
+ ...createAudioConstraints(profile),
11291
+ });
11292
+ if (this.noiseCancellation) {
11293
+ const disableAudioProcessing = profile === AudioBitrateProfile.MUSIC_HIGH_QUALITY;
11294
+ if (disableAudioProcessing) {
11295
+ this.noiseCancellation.disable(); // disable for high quality music mode
11296
+ }
11297
+ else {
11298
+ this.noiseCancellation.enable(); // restore it for other modes if available
11299
+ }
11300
+ }
11301
+ }
11090
11302
  async startSpeakingWhileMutedDetection(deviceId) {
11091
11303
  await withoutConcurrency(this.soundDetectorConcurrencyTag, async () => {
11092
11304
  await this.stopSpeakingWhileMutedDetection();
@@ -11123,9 +11335,12 @@ class MicrophoneManager extends InputMediaDeviceManager {
11123
11335
  }
11124
11336
  }
11125
11337
 
11126
- class ScreenShareState extends InputMediaDeviceManagerState {
11338
+ class ScreenShareState extends AudioDeviceManagerState {
11339
+ /**
11340
+ * Constructs a new ScreenShareState instance.
11341
+ */
11127
11342
  constructor() {
11128
- super(...arguments);
11343
+ super('stop-tracks', undefined, AudioBitrateProfile.MUSIC_HIGH_QUALITY);
11129
11344
  this.audioEnabledSubject = new BehaviorSubject(true);
11130
11345
  this.settingsSubject = new BehaviorSubject(undefined);
11131
11346
  /**
@@ -11174,7 +11389,7 @@ class ScreenShareState extends InputMediaDeviceManagerState {
11174
11389
  }
11175
11390
  }
11176
11391
 
11177
- class ScreenShareManager extends InputMediaDeviceManager {
11392
+ class ScreenShareManager extends AudioDeviceManager {
11178
11393
  constructor(call) {
11179
11394
  super(call, new ScreenShareState(), TrackType.SCREEN_SHARE);
11180
11395
  }
@@ -11184,6 +11399,7 @@ class ScreenShareManager extends InputMediaDeviceManager {
11184
11399
  const maybeTargetResolution = settings?.screensharing.target_resolution;
11185
11400
  if (maybeTargetResolution) {
11186
11401
  this.setDefaultConstraints({
11402
+ ...this.state.defaultConstraints,
11187
11403
  video: {
11188
11404
  width: maybeTargetResolution.width,
11189
11405
  height: maybeTargetResolution.height,
@@ -11240,6 +11456,19 @@ class ScreenShareManager extends InputMediaDeviceManager {
11240
11456
  }
11241
11457
  return stream;
11242
11458
  }
11459
+ doSetAudioBitrateProfile(profile) {
11460
+ const { defaultConstraints } = this.state;
11461
+ const baseAudioConstraints = typeof defaultConstraints?.audio !== 'boolean'
11462
+ ? defaultConstraints?.audio
11463
+ : null;
11464
+ this.setDefaultConstraints({
11465
+ ...defaultConstraints,
11466
+ audio: {
11467
+ ...baseAudioConstraints,
11468
+ ...createAudioConstraints(profile),
11469
+ },
11470
+ });
11471
+ }
11243
11472
  async stopPublishStream() {
11244
11473
  return this.call.stopPublish(TrackType.SCREEN_SHARE, TrackType.SCREEN_SHARE_AUDIO);
11245
11474
  }
@@ -11253,19 +11482,27 @@ class ScreenShareManager extends InputMediaDeviceManager {
11253
11482
 
11254
11483
  class SpeakerState {
11255
11484
  constructor(tracer) {
11485
+ this.tracer = tracer;
11256
11486
  this.selectedDeviceSubject = new BehaviorSubject('');
11257
11487
  this.volumeSubject = new BehaviorSubject(1);
11258
11488
  /**
11259
11489
  * [Tells if the browser supports audio output change on 'audio' elements](https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/setSinkId).
11260
11490
  */
11261
11491
  this.isDeviceSelectionSupported = checkIfAudioOutputChangeSupported();
11262
- this.tracer = tracer;
11492
+ /**
11493
+ * An Observable that emits the currently selected device
11494
+ *
11495
+ * Note: this feature is not supported in React Native
11496
+ */
11263
11497
  this.selectedDevice$ = this.selectedDeviceSubject
11264
11498
  .asObservable()
11265
11499
  .pipe(distinctUntilChanged());
11266
- this.volume$ = this.volumeSubject
11267
- .asObservable()
11268
- .pipe(distinctUntilChanged());
11500
+ /**
11501
+ * An Observable that emits the currently selected volume
11502
+ *
11503
+ * Note: this feature is not supported in React Native
11504
+ */
11505
+ this.volume$ = this.volumeSubject.asObservable().pipe(distinctUntilChanged());
11269
11506
  }
11270
11507
  /**
11271
11508
  * The currently selected device
@@ -11869,7 +12106,6 @@ class Call {
11869
12106
  if ([CallingState.JOINED, CallingState.JOINING].includes(callingState)) {
11870
12107
  throw new Error(`Illegal State: call.join() shall be called only once`);
11871
12108
  }
11872
- this.state.setCallingState(CallingState.JOINING);
11873
12109
  // we will count the number of join failures per SFU.
11874
12110
  // once the number of failures reaches 2, we will piggyback on the `migrating_from`
11875
12111
  // field to force the coordinator to provide us another SFU
@@ -11898,8 +12134,6 @@ class Call {
11898
12134
  joinData.migrating_from = sfuId;
11899
12135
  }
11900
12136
  if (attempt === maxJoinRetries - 1) {
11901
- // restore the previous call state if the join-flow fails
11902
- this.state.setCallingState(callingState);
11903
12137
  throw err;
11904
12138
  }
11905
12139
  }
@@ -11959,6 +12193,7 @@ class Call {
11959
12193
  })
11960
12194
  : previousSfuClient;
11961
12195
  this.sfuClient = sfuClient;
12196
+ this.unifiedSessionId ?? (this.unifiedSessionId = sfuClient.sessionId);
11962
12197
  this.dynascaleManager.setSfuClient(sfuClient);
11963
12198
  const clientDetails = await getClientDetails();
11964
12199
  // we don't need to send JoinRequest if we are re-using an existing healthy SFU client
@@ -11982,6 +12217,7 @@ class Call {
11982
12217
  : [];
11983
12218
  try {
11984
12219
  const { callState, fastReconnectDeadlineSeconds, publishOptions } = await sfuClient.join({
12220
+ unifiedSessionId: this.unifiedSessionId,
11985
12221
  subscriberSdp,
11986
12222
  publisherSdp,
11987
12223
  clientDetails,
@@ -12027,6 +12263,7 @@ class Call {
12027
12263
  statsOptions,
12028
12264
  publishOptions: this.currentPublishOptions || [],
12029
12265
  closePreviousInstances: !performingMigration,
12266
+ unifiedSessionId: this.unifiedSessionId,
12030
12267
  });
12031
12268
  }
12032
12269
  // make sure we only track connection timing if we are not calling this method as part of a reconnection flow
@@ -12153,7 +12390,7 @@ class Call {
12153
12390
  * @internal
12154
12391
  */
12155
12392
  this.initPublisherAndSubscriber = (opts) => {
12156
- const { sfuClient, connectionConfig, clientDetails, statsOptions, publishOptions, closePreviousInstances, } = opts;
12393
+ const { sfuClient, connectionConfig, clientDetails, statsOptions, publishOptions, closePreviousInstances, unifiedSessionId, } = opts;
12157
12394
  const { enable_rtc_stats: enableTracing } = statsOptions;
12158
12395
  if (closePreviousInstances && this.subscriber) {
12159
12396
  this.subscriber.dispose();
@@ -12208,7 +12445,6 @@ class Call {
12208
12445
  this.tracer.setEnabled(enableTracing);
12209
12446
  this.sfuStatsReporter?.stop();
12210
12447
  if (statsOptions?.reporting_interval_ms > 0) {
12211
- this.unifiedSessionId ?? (this.unifiedSessionId = sfuClient.sessionId);
12212
12448
  this.sfuStatsReporter = new SfuStatsReporter(sfuClient, {
12213
12449
  clientDetails,
12214
12450
  options: statsOptions,
@@ -12218,7 +12454,7 @@ class Call {
12218
12454
  camera: this.camera,
12219
12455
  state: this.state,
12220
12456
  tracer: this.tracer,
12221
- unifiedSessionId: this.unifiedSessionId,
12457
+ unifiedSessionId,
12222
12458
  });
12223
12459
  this.sfuStatsReporter.start();
12224
12460
  }
@@ -12591,10 +12827,11 @@ class Call {
12591
12827
  *
12592
12828
  * @param mediaStream the media stream to publish.
12593
12829
  * @param trackType the type of the track to announce.
12830
+ * @param options the publish options.
12594
12831
  */
12595
- this.publish = async (mediaStream, trackType) => {
12832
+ this.publish = async (mediaStream, trackType, options) => {
12596
12833
  if (!this.sfuClient)
12597
- throw new Error(`Call not joined yet.`);
12834
+ throw new Error(`Call is not joined yet`);
12598
12835
  // joining is in progress, and we should wait until the client is ready
12599
12836
  await this.sfuClient.joinTask;
12600
12837
  if (!this.permissionsContext.canPublish(trackType)) {
@@ -12612,14 +12849,15 @@ class Call {
12612
12849
  throw new Error(`Can't publish ended tracks.`);
12613
12850
  }
12614
12851
  pushToIfMissing(this.trackPublishOrder, trackType);
12615
- await this.publisher.publish(track, trackType);
12852
+ await this.publisher.publish(track, trackType, options);
12616
12853
  const trackTypes = [trackType];
12617
12854
  if (trackType === TrackType.SCREEN_SHARE) {
12618
12855
  const [audioTrack] = mediaStream.getAudioTracks();
12619
12856
  if (audioTrack) {
12620
- pushToIfMissing(this.trackPublishOrder, TrackType.SCREEN_SHARE_AUDIO);
12621
- await this.publisher.publish(audioTrack, TrackType.SCREEN_SHARE_AUDIO);
12622
- trackTypes.push(TrackType.SCREEN_SHARE_AUDIO);
12857
+ const screenShareAudio = TrackType.SCREEN_SHARE_AUDIO;
12858
+ pushToIfMissing(this.trackPublishOrder, screenShareAudio);
12859
+ await this.publisher.publish(audioTrack, screenShareAudio, options);
12860
+ trackTypes.push(screenShareAudio);
12623
12861
  }
12624
12862
  }
12625
12863
  if (track.kind === 'video') {
@@ -14540,7 +14778,7 @@ class StreamClient {
14540
14778
  this.getUserAgent = () => {
14541
14779
  if (!this.cachedUserAgent) {
14542
14780
  const { clientAppIdentifier = {} } = this.options;
14543
- const { sdkName = 'js', sdkVersion = "1.31.0", ...extras } = clientAppIdentifier;
14781
+ const { sdkName = 'js', sdkVersion = "1.33.0", ...extras } = clientAppIdentifier;
14544
14782
  this.cachedUserAgent = [
14545
14783
  `stream-video-${sdkName}-v${sdkVersion}`,
14546
14784
  ...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
@@ -14729,6 +14967,7 @@ class StreamVideoClient {
14729
14967
  this.effectsRegistered = false;
14730
14968
  this.eventHandlersToUnregister = [];
14731
14969
  this.connectionConcurrencyTag = Symbol('connectionConcurrencyTag');
14970
+ this.rejectCallWhenBusy = false;
14732
14971
  this.registerClientInstance = (apiKey, user) => {
14733
14972
  const instanceKey = getInstanceKey(apiKey, user);
14734
14973
  if (StreamVideoClient._instances.has(instanceKey)) {
@@ -14774,7 +15013,16 @@ class StreamVideoClient {
14774
15013
  let call = this.writeableStateStore.findCall(e.call.type, e.call.id);
14775
15014
  if (call) {
14776
15015
  if (ringing) {
14777
- await call.updateFromRingingEvent(e);
15016
+ if (this.shouldRejectCall(call.cid)) {
15017
+ this.logger('info', `Leaving call with busy reject reason ${call.cid} because user is busy`);
15018
+ // remove the instance from the state store
15019
+ await call.leave();
15020
+ // explicitly reject the call with busy reason as calling state was not ringing before and leave would not call it therefore
15021
+ await call.reject('busy');
15022
+ }
15023
+ else {
15024
+ await call.updateFromRingingEvent(e);
15025
+ }
14778
15026
  }
14779
15027
  else {
14780
15028
  call.state.updateFromCallResponse(e.call);
@@ -14789,11 +15037,19 @@ class StreamVideoClient {
14789
15037
  clientStore: this.writeableStateStore,
14790
15038
  ringing,
14791
15039
  });
14792
- call.state.updateFromCallResponse(e.call);
14793
15040
  if (ringing) {
14794
- await call.get();
15041
+ if (this.shouldRejectCall(call.cid)) {
15042
+ this.logger('info', `Rejecting call ${call.cid} because user is busy`);
15043
+ // call is not in the state store yet, so just reject api is enough
15044
+ await call.reject('busy');
15045
+ }
15046
+ else {
15047
+ await call.updateFromRingingEvent(e);
15048
+ await call.get();
15049
+ }
14795
15050
  }
14796
15051
  else {
15052
+ call.state.updateFromCallResponse(e.call);
14797
15053
  this.writeableStateStore.registerCall(call);
14798
15054
  this.logger('info', `New call created and registered: ${call.cid}`);
14799
15055
  }
@@ -15060,6 +15316,16 @@ class StreamVideoClient {
15060
15316
  this.connectAnonymousUser = async (user, tokenOrProvider) => {
15061
15317
  return withoutConcurrency(this.connectionConcurrencyTag, () => this.streamClient.connectAnonymousUser(user, tokenOrProvider));
15062
15318
  };
15319
+ this.shouldRejectCall = (currentCallId) => {
15320
+ if (!this.rejectCallWhenBusy)
15321
+ return false;
15322
+ const hasOngoingRingingCall = this.state.calls.some((c) => c.cid !== currentCallId &&
15323
+ c.ringing &&
15324
+ c.state.callingState !== CallingState.IDLE &&
15325
+ c.state.callingState !== CallingState.LEFT &&
15326
+ c.state.callingState !== CallingState.RECONNECTING_FAILED);
15327
+ return hasOngoingRingingCall;
15328
+ };
15063
15329
  const apiKey = typeof apiKeyOrArgs === 'string' ? apiKeyOrArgs : apiKeyOrArgs.apiKey;
15064
15330
  const clientOptions = typeof apiKeyOrArgs === 'string' ? opts : apiKeyOrArgs.options;
15065
15331
  if (clientOptions?.enableTimerWorker)
@@ -15067,6 +15333,7 @@ class StreamVideoClient {
15067
15333
  const rootLogger = clientOptions?.logger || logToConsole;
15068
15334
  setLogger(rootLogger, clientOptions?.logLevel || 'warn');
15069
15335
  this.logger = getLogger(['client']);
15336
+ this.rejectCallWhenBusy = clientOptions?.rejectCallWhenBusy ?? false;
15070
15337
  this.streamClient = createCoordinatorClient(apiKey, clientOptions);
15071
15338
  this.writeableStateStore = new StreamVideoWriteableStateStore();
15072
15339
  this.readOnlyStateStore = new StreamVideoReadOnlyStateStore(this.writeableStateStore);
@@ -15108,5 +15375,5 @@ class StreamVideoClient {
15108
15375
  }
15109
15376
  StreamVideoClient._instances = new Map();
15110
15377
 
15111
- export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressVideoLayerRequestCodecEnum, InputMediaDeviceManager, InputMediaDeviceManagerState, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getLogLevel, getLogger, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, isPinned, livestreamOrAudioRoomSortPreset, logLevels, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setLogLevel, setLogger, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, withParticipantSource };
15378
+ export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getLogLevel, getLogger, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, isPinned, livestreamOrAudioRoomSortPreset, logLevels, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setLogLevel, setLogger, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, withParticipantSource };
15112
15379
  //# sourceMappingURL=index.browser.es.js.map