@stream-io/video-client 1.41.3 → 1.42.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,23 @@
2
2
 
3
3
  This file was generated using [@jscutlery/semver](https://github.com/jscutlery/semver).
4
4
 
5
+ ## [1.42.1](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.42.0...@stream-io/video-client-1.42.1) (2026-02-10)
6
+
7
+ ### Bug Fixes
8
+
9
+ - respect device permissions when detecting speech while muted ([#2115](https://github.com/GetStream/stream-video-js/issues/2115)) ([fe98768](https://github.com/GetStream/stream-video-js/commit/fe98768a9bf695fc5355905939884594c11ac2b9)), closes [#2110](https://github.com/GetStream/stream-video-js/issues/2110)
10
+
11
+ ## [1.42.0](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.3...@stream-io/video-client-1.42.0) (2026-02-06)
12
+
13
+ ### Features
14
+
15
+ - Detectors for broken microphone setup ([#2090](https://github.com/GetStream/stream-video-js/issues/2090)) ([552b3f4](https://github.com/GetStream/stream-video-js/commit/552b3f4e3c54e0b6fa67221cd510f4ea1f6f8a61))
16
+
17
+ ### Bug Fixes
18
+
19
+ - **react:** apply defaultConstraints to speaking-while-muted detection stream ([#2103](https://github.com/GetStream/stream-video-js/issues/2103)) ([28b5538](https://github.com/GetStream/stream-video-js/commit/28b55380778723fc308d37396c8095a5a3ef7aa2))
20
+ - start speaking while muted detection in pristine state too ([#2110](https://github.com/GetStream/stream-video-js/issues/2110)) ([bc093bc](https://github.com/GetStream/stream-video-js/commit/bc093bc3ac2451541524b134a9044131a69964af))
21
+
5
22
  ## [1.41.3](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.2...@stream-io/video-client-1.41.3) (2026-01-30)
6
23
 
7
24
  ### Bug Fixes
@@ -4789,7 +4789,7 @@ const hasVideo = (p) => p.publishedTracks.includes(TrackType.VIDEO);
4789
4789
  *
4790
4790
  * @param p the participant to check.
4791
4791
  */
4792
- const hasAudio = (p) => p.publishedTracks.includes(TrackType.AUDIO);
4792
+ const hasAudio$1 = (p) => p.publishedTracks.includes(TrackType.AUDIO);
4793
4793
  /**
4794
4794
  * Check if a participant is screen sharing.
4795
4795
  *
@@ -4890,8 +4890,8 @@ const publishingVideo = (a, b) => {
4890
4890
  * @param b the second participant.
4891
4891
  */
4892
4892
  const publishingAudio = (a, b) => {
4893
- const hasA = hasAudio(a);
4894
- const hasB = hasAudio(b);
4893
+ const hasA = hasAudio$1(a);
4894
+ const hasB = hasAudio$1(b);
4895
4895
  if (hasA && !hasB)
4896
4896
  return -1;
4897
4897
  if (!hasA && hasB)
@@ -6188,7 +6188,7 @@ const getSdkVersion = (sdk) => {
6188
6188
  return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
6189
6189
  };
6190
6190
 
6191
- const version = "1.41.3";
6191
+ const version = "1.42.1";
6192
6192
  const [major, minor, patch] = version.split('.');
6193
6193
  let sdkInfo = {
6194
6194
  type: SdkType.PLAIN_JAVASCRIPT,
@@ -10379,7 +10379,7 @@ const getAudioStream = async (trackConstraints, tracer) => {
10379
10379
  videoLoggerSystem
10380
10380
  .getLogger('devices')
10381
10381
  .warn('Failed to get audio stream, will try again with relaxed constraints', { error, constraints, relaxedConstraints });
10382
- return getAudioStream(relaxedConstraints);
10382
+ return getAudioStream(relaxedConstraints, tracer);
10383
10383
  }
10384
10384
  videoLoggerSystem.getLogger('devices').error('Failed to get audio stream', {
10385
10385
  error,
@@ -11376,9 +11376,6 @@ class MicrophoneManagerState extends AudioDeviceManagerState {
11376
11376
  }
11377
11377
  }
11378
11378
 
11379
- const DETECTION_FREQUENCY_IN_MS = 500;
11380
- const AUDIO_LEVEL_THRESHOLD = 150;
11381
- const FFT_SIZE = 128;
11382
11379
  /**
11383
11380
  * Creates a new sound detector.
11384
11381
  *
@@ -11388,7 +11385,7 @@ const FFT_SIZE = 128;
11388
11385
  * @returns a clean-up function which once invoked stops the sound detector.
11389
11386
  */
11390
11387
  const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options = {}) => {
11391
- const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
11388
+ const { detectionFrequencyInMs = 500, audioLevelThreshold = 150, fftSize = 128, destroyStreamOnStop = true, } = options;
11392
11389
  const audioContext = new AudioContext();
11393
11390
  const analyser = audioContext.createAnalyser();
11394
11391
  analyser.fftSize = fftSize;
@@ -11429,6 +11426,101 @@ const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options =
11429
11426
  };
11430
11427
  };
11431
11428
 
11429
+ /**
11430
+ * Analyzes frequency data to determine if audio is being captured.
11431
+ */
11432
+ const hasAudio = (analyser, threshold) => {
11433
+ const data = new Uint8Array(analyser.frequencyBinCount);
11434
+ analyser.getByteFrequencyData(data);
11435
+ return data.some((value) => value > threshold);
11436
+ };
11437
+ /** Helper for "no event" transitions */
11438
+ const noEmit = (nextState) => ({
11439
+ shouldEmit: false,
11440
+ nextState,
11441
+ });
11442
+ /** Helper for event-emitting transitions */
11443
+ const emit = (capturesAudio, nextState) => ({ shouldEmit: true, nextState, capturesAudio });
11444
+ /**
11445
+ * State transition function - computes next state and whether to emit an event.
11446
+ */
11447
+ const transitionState = (state, audioDetected, options) => {
11448
+ if (audioDetected) {
11449
+ return state.kind === 'IDLE' || state.kind === 'EMITTING'
11450
+ ? emit(true, state)
11451
+ : noEmit(state);
11452
+ }
11453
+ const { noAudioThresholdMs, emitIntervalMs } = options;
11454
+ const now = Date.now();
11455
+ switch (state.kind) {
11456
+ case 'IDLE':
11457
+ return noEmit({ kind: 'DETECTING', noAudioStartTime: now });
11458
+ case 'DETECTING': {
11459
+ const { noAudioStartTime } = state;
11460
+ const elapsed = now - noAudioStartTime;
11461
+ return elapsed >= noAudioThresholdMs
11462
+ ? emit(false, { kind: 'EMITTING', noAudioStartTime, lastEmitTime: now })
11463
+ : noEmit(state);
11464
+ }
11465
+ case 'EMITTING': {
11466
+ const timeSinceLastEmit = now - state.lastEmitTime;
11467
+ return timeSinceLastEmit >= emitIntervalMs
11468
+ ? emit(false, { ...state, lastEmitTime: now })
11469
+ : noEmit(state);
11470
+ }
11471
+ }
11472
+ };
11473
+ /**
11474
+ * Creates and configures an audio analyzer for the given stream.
11475
+ */
11476
+ const createAudioAnalyzer = (audioStream, fftSize) => {
11477
+ const audioContext = new AudioContext();
11478
+ const analyser = audioContext.createAnalyser();
11479
+ analyser.fftSize = fftSize;
11480
+ const microphone = audioContext.createMediaStreamSource(audioStream);
11481
+ microphone.connect(analyser);
11482
+ return { audioContext, analyser };
11483
+ };
11484
+ /**
11485
+ * Creates a new no-audio detector that monitors continuous absence of audio on an audio stream.
11486
+ *
11487
+ * @param audioStream the audio stream to observe.
11488
+ * @param options custom options for the no-audio detector.
11489
+ * @returns a cleanup function which once invoked stops the no-audio detector.
11490
+ */
11491
+ const createNoAudioDetector = (audioStream, options) => {
11492
+ const { detectionFrequencyInMs = 350, audioLevelThreshold = 0, fftSize = 256, onCaptureStatusChange, } = options;
11493
+ let state = { kind: 'IDLE' };
11494
+ const { audioContext, analyser } = createAudioAnalyzer(audioStream, fftSize);
11495
+ const detectionIntervalId = setInterval(() => {
11496
+ const [audioTrack] = audioStream.getAudioTracks();
11497
+ if (!audioTrack?.enabled || audioTrack.readyState === 'ended') {
11498
+ state = { kind: 'IDLE' };
11499
+ return;
11500
+ }
11501
+ const audioDetected = hasAudio(analyser, audioLevelThreshold);
11502
+ const transition = transitionState(state, audioDetected, options);
11503
+ state = transition.nextState;
11504
+ if (!transition.shouldEmit)
11505
+ return;
11506
+ const { capturesAudio } = transition;
11507
+ onCaptureStatusChange(capturesAudio);
11508
+ if (capturesAudio) {
11509
+ stop().catch((err) => {
11510
+ const logger = videoLoggerSystem.getLogger('NoAudioDetector');
11511
+ logger.error('Error stopping no-audio detector', err);
11512
+ });
11513
+ }
11514
+ }, detectionFrequencyInMs);
11515
+ async function stop() {
11516
+ clearInterval(detectionIntervalId);
11517
+ if (audioContext.state !== 'closed') {
11518
+ await audioContext.close();
11519
+ }
11520
+ }
11521
+ return stop;
11522
+ };
11523
+
11432
11524
  class RNSpeechDetector {
11433
11525
  constructor(externalAudioStream) {
11434
11526
  this.pc1 = new RTCPeerConnection({});
@@ -11440,16 +11532,10 @@ class RNSpeechDetector {
11440
11532
  */
11441
11533
  async start(onSoundDetectedStateChanged) {
11442
11534
  try {
11443
- let audioStream;
11444
- if (this.externalAudioStream != null) {
11445
- audioStream = this.externalAudioStream;
11446
- }
11447
- else {
11448
- audioStream = await navigator.mediaDevices.getUserMedia({
11449
- audio: true,
11450
- });
11451
- this.audioStream = audioStream;
11452
- }
11535
+ const audioStream = this.externalAudioStream != null
11536
+ ? this.externalAudioStream
11537
+ : await navigator.mediaDevices.getUserMedia({ audio: true });
11538
+ this.audioStream = audioStream;
11453
11539
  this.pc1.addEventListener('icecandidate', async (e) => {
11454
11540
  await this.pc2.addIceCandidate(e.candidate);
11455
11541
  });
@@ -11473,9 +11559,9 @@ class RNSpeechDetector {
11473
11559
  const answer = await this.pc2.createAnswer();
11474
11560
  await this.pc1.setRemoteDescription(answer);
11475
11561
  await this.pc2.setLocalDescription(answer);
11476
- const unsub = this.onSpeakingDetectedStateChange(onSoundDetectedStateChanged);
11562
+ const unsubscribe = this.onSpeakingDetectedStateChange(onSoundDetectedStateChanged);
11477
11563
  return () => {
11478
- unsub();
11564
+ unsubscribe();
11479
11565
  this.stop();
11480
11566
  };
11481
11567
  }
@@ -11515,7 +11601,7 @@ class RNSpeechDetector {
11515
11601
  const silenceTimeout = 5000; // Reset baseline after 5 seconds of silence
11516
11602
  const checkAudioLevel = async () => {
11517
11603
  try {
11518
- const stats = (await this.pc1.getStats());
11604
+ const stats = await this.pc1.getStats();
11519
11605
  const report = flatten(stats);
11520
11606
  // Audio levels are present inside stats of type `media-source` and of kind `audio`
11521
11607
  const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
@@ -11569,8 +11655,7 @@ class RNSpeechDetector {
11569
11655
  logger.error('error checking audio level from stats', error);
11570
11656
  }
11571
11657
  };
11572
- // Call checkAudioLevel periodically (every 100ms)
11573
- const intervalId = setInterval(checkAudioLevel, 100);
11658
+ const intervalId = setInterval(checkAudioLevel, 250);
11574
11659
  return () => {
11575
11660
  clearInterval(intervalId);
11576
11661
  clearTimeout(speechTimer);
@@ -11596,15 +11681,19 @@ class MicrophoneManager extends AudioDeviceManager {
11596
11681
  super(call, new MicrophoneManagerState(disableMode), TrackType.AUDIO);
11597
11682
  this.speakingWhileMutedNotificationEnabled = true;
11598
11683
  this.soundDetectorConcurrencyTag = Symbol('soundDetectorConcurrencyTag');
11684
+ this.silenceThresholdMs = 5000;
11599
11685
  }
11600
11686
  setup() {
11687
+ if (this.areSubscriptionsSetUp)
11688
+ return;
11601
11689
  super.setup();
11602
11690
  this.subscriptions.push(createSafeAsyncSubscription(combineLatest([
11603
11691
  this.call.state.callingState$,
11604
11692
  this.call.state.ownCapabilities$,
11605
11693
  this.state.selectedDevice$,
11606
11694
  this.state.status$,
11607
- ]), async ([callingState, ownCapabilities, deviceId, status]) => {
11695
+ this.state.browserPermissionState$,
11696
+ ]), async ([callingState, ownCapabilities, deviceId, status, permissionState,]) => {
11608
11697
  try {
11609
11698
  if (callingState === CallingState.LEFT) {
11610
11699
  await this.stopSpeakingWhileMutedDetection();
@@ -11614,7 +11703,8 @@ class MicrophoneManager extends AudioDeviceManager {
11614
11703
  if (!this.speakingWhileMutedNotificationEnabled)
11615
11704
  return;
11616
11705
  if (ownCapabilities.includes(OwnCapability.SEND_AUDIO)) {
11617
- if (status === 'disabled') {
11706
+ const hasPermission = await this.hasPermission(permissionState);
11707
+ if (hasPermission && status !== 'enabled') {
11618
11708
  await this.startSpeakingWhileMutedDetection(deviceId);
11619
11709
  }
11620
11710
  else {
@@ -11663,6 +11753,40 @@ class MicrophoneManager extends AudioDeviceManager {
11663
11753
  });
11664
11754
  }
11665
11755
  }));
11756
+ if (!isReactNative()) {
11757
+ const unsubscribe = createSafeAsyncSubscription(combineLatest([this.state.status$, this.state.mediaStream$]), async ([status, mediaStream]) => {
11758
+ if (this.noAudioDetectorCleanup) {
11759
+ const cleanup = this.noAudioDetectorCleanup;
11760
+ this.noAudioDetectorCleanup = undefined;
11761
+ await cleanup().catch((err) => {
11762
+ this.logger.warn('Failed to stop no-audio detector', err);
11763
+ });
11764
+ }
11765
+ if (status !== 'enabled' || !mediaStream)
11766
+ return;
11767
+ if (this.silenceThresholdMs <= 0)
11768
+ return;
11769
+ const deviceId = this.state.selectedDevice;
11770
+ const devices = getCurrentValue(this.listDevices());
11771
+ const label = devices.find((d) => d.deviceId === deviceId)?.label;
11772
+ this.noAudioDetectorCleanup = createNoAudioDetector(mediaStream, {
11773
+ noAudioThresholdMs: this.silenceThresholdMs,
11774
+ emitIntervalMs: this.silenceThresholdMs,
11775
+ onCaptureStatusChange: (capturesAudio) => {
11776
+ const event = {
11777
+ type: 'mic.capture_report',
11778
+ call_cid: this.call.cid,
11779
+ capturesAudio,
11780
+ deviceId,
11781
+ label,
11782
+ };
11783
+ this.call.tracer.trace('mic.capture_report', event);
11784
+ this.call.streamClient.dispatchEvent(event);
11785
+ },
11786
+ });
11787
+ });
11788
+ this.subscriptions.push(unsubscribe);
11789
+ }
11666
11790
  }
11667
11791
  /**
11668
11792
  * Enables noise cancellation for the microphone.
@@ -11760,6 +11884,45 @@ class MicrophoneManager extends AudioDeviceManager {
11760
11884
  this.speakingWhileMutedNotificationEnabled = false;
11761
11885
  await this.stopSpeakingWhileMutedDetection();
11762
11886
  }
11887
+ /**
11888
+ * Sets the silence threshold in milliseconds for no-audio detection.
11889
+ * When the microphone is enabled but produces no audio for this duration,
11890
+ * a 'mic.capture_report' event will be emitted.
11891
+ *
11892
+ * @param thresholdMs the threshold in milliseconds (default: 5000).
11893
+ * Set to 0 or a negative value to disable no-audio detection.
11894
+ */
11895
+ setSilenceThreshold(thresholdMs) {
11896
+ this.silenceThresholdMs = thresholdMs;
11897
+ }
11898
+ /**
11899
+ * Performs audio capture test on a specific microphone.
11900
+ *
11901
+ * This method is only available in browser environments (not React Native).
11902
+ *
11903
+ * @param deviceId The device ID to test.
11904
+ * @param options Optional test configuration.
11905
+ * @returns Promise that resolves with the test result (true or false).
11906
+ */
11907
+ async performTest(deviceId, options) {
11908
+ if (isReactNative())
11909
+ throw new Error('Not available in React Native');
11910
+ const stream = await this.getStream({ deviceId: { exact: deviceId } });
11911
+ const { testDurationMs = 3000 } = options || {};
11912
+ const { promise, resolve } = promiseWithResolvers();
11913
+ const cleanup = createNoAudioDetector(stream, {
11914
+ noAudioThresholdMs: testDurationMs,
11915
+ emitIntervalMs: testDurationMs,
11916
+ onCaptureStatusChange: async (capturesAudio) => {
11917
+ resolve(capturesAudio);
11918
+ await cleanup().catch((err) => {
11919
+ this.logger.warn('Failed to stop detector during test', err);
11920
+ });
11921
+ disposeOfMediaStream(stream);
11922
+ },
11923
+ });
11924
+ return promise;
11925
+ }
11763
11926
  /**
11764
11927
  * Applies the audio settings to the microphone.
11765
11928
  * @param settings the audio settings to apply.
@@ -11808,13 +11971,12 @@ class MicrophoneManager extends AudioDeviceManager {
11808
11971
  }
11809
11972
  async startSpeakingWhileMutedDetection(deviceId) {
11810
11973
  await withoutConcurrency(this.soundDetectorConcurrencyTag, async () => {
11811
- await this.stopSpeakingWhileMutedDetection();
11812
11974
  if (isReactNative()) {
11813
11975
  this.rnSpeechDetector = new RNSpeechDetector();
11814
11976
  const unsubscribe = await this.rnSpeechDetector.start((event) => {
11815
11977
  this.state.setSpeakingWhileMuted(event.isSoundDetected);
11816
11978
  });
11817
- this.soundDetectorCleanup = () => {
11979
+ this.soundDetectorCleanup = async () => {
11818
11980
  unsubscribe();
11819
11981
  this.rnSpeechDetector = undefined;
11820
11982
  };
@@ -11822,6 +11984,7 @@ class MicrophoneManager extends AudioDeviceManager {
11822
11984
  else {
11823
11985
  // Need to start a new stream that's not connected to publisher
11824
11986
  const stream = await this.getStream({
11987
+ ...this.state.defaultConstraints,
11825
11988
  deviceId: { exact: deviceId },
11826
11989
  });
11827
11990
  this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
@@ -11840,6 +12003,20 @@ class MicrophoneManager extends AudioDeviceManager {
11840
12003
  await soundDetectorCleanup();
11841
12004
  });
11842
12005
  }
12006
+ async hasPermission(permissionState) {
12007
+ if (!isReactNative())
12008
+ return permissionState === 'granted';
12009
+ const nativePermissions = globalThis.streamRNVideoSDK?.permissions;
12010
+ if (!nativePermissions)
12011
+ return true; // assume granted
12012
+ try {
12013
+ return await nativePermissions.check('microphone');
12014
+ }
12015
+ catch (err) {
12016
+ this.logger.warn('Failed to check permission', err);
12017
+ return false;
12018
+ }
12019
+ }
11843
12020
  }
11844
12021
 
11845
12022
  class ScreenShareState extends AudioDeviceManagerState {
@@ -11901,6 +12078,8 @@ class ScreenShareManager extends AudioDeviceManager {
11901
12078
  super(call, new ScreenShareState(), TrackType.SCREEN_SHARE);
11902
12079
  }
11903
12080
  setup() {
12081
+ if (this.areSubscriptionsSetUp)
12082
+ return;
11904
12083
  super.setup();
11905
12084
  this.subscriptions.push(createSubscription(this.call.state.settings$, (settings) => {
11906
12085
  const maybeTargetResolution = settings?.screensharing.target_resolution;
@@ -12247,6 +12426,9 @@ class Call {
12247
12426
  this.leaveCallHooks.add(registerEventHandlers(this, this.dispatcher));
12248
12427
  this.registerEffects();
12249
12428
  this.registerReconnectHandlers();
12429
+ // Set up the device managers again. Although this is already done
12430
+ // in the DeviceManager's constructor, they'll need to be re-set up
12431
+ // in the cases where a call instance is recycled (join -> leave -> join).
12250
12432
  this.camera.setup();
12251
12433
  this.microphone.setup();
12252
12434
  this.screenShare.setup();
@@ -15346,7 +15528,7 @@ class StreamClient {
15346
15528
  this.getUserAgent = () => {
15347
15529
  if (!this.cachedUserAgent) {
15348
15530
  const { clientAppIdentifier = {} } = this.options;
15349
- const { sdkName = 'js', sdkVersion = "1.41.3", ...extras } = clientAppIdentifier;
15531
+ const { sdkName = 'js', sdkVersion = "1.42.1", ...extras } = clientAppIdentifier;
15350
15532
  this.cachedUserAgent = [
15351
15533
  `stream-video-${sdkName}-v${sdkVersion}`,
15352
15534
  ...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
@@ -15982,5 +16164,5 @@ const humanize = (n) => {
15982
16164
  return String(n);
15983
16165
  };
15984
16166
 
15985
- export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
16167
+ export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio$1 as hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
15986
16168
  //# sourceMappingURL=index.browser.es.js.map