@stream-io/video-client 1.41.3 → 1.42.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,17 @@
2
2
 
3
3
  This file was generated using [@jscutlery/semver](https://github.com/jscutlery/semver).
4
4
 
5
+ ## [1.42.0](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.3...@stream-io/video-client-1.42.0) (2026-02-06)
6
+
7
+ ### Features
8
+
9
+ - Detectors for broken microphone setup ([#2090](https://github.com/GetStream/stream-video-js/issues/2090)) ([552b3f4](https://github.com/GetStream/stream-video-js/commit/552b3f4e3c54e0b6fa67221cd510f4ea1f6f8a61))
10
+
11
+ ### Bug Fixes
12
+
13
+ - **react:** apply defaultConstraints to speaking-while-muted detection stream ([#2103](https://github.com/GetStream/stream-video-js/issues/2103)) ([28b5538](https://github.com/GetStream/stream-video-js/commit/28b55380778723fc308d37396c8095a5a3ef7aa2))
14
+ - start speaking while muted detection in pristine state too ([#2110](https://github.com/GetStream/stream-video-js/issues/2110)) ([bc093bc](https://github.com/GetStream/stream-video-js/commit/bc093bc3ac2451541524b134a9044131a69964af))
15
+
5
16
  ## [1.41.3](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.2...@stream-io/video-client-1.41.3) (2026-01-30)
6
17
 
7
18
  ### Bug Fixes
@@ -4789,7 +4789,7 @@ const hasVideo = (p) => p.publishedTracks.includes(TrackType.VIDEO);
4789
4789
  *
4790
4790
  * @param p the participant to check.
4791
4791
  */
4792
- const hasAudio = (p) => p.publishedTracks.includes(TrackType.AUDIO);
4792
+ const hasAudio$1 = (p) => p.publishedTracks.includes(TrackType.AUDIO);
4793
4793
  /**
4794
4794
  * Check if a participant is screen sharing.
4795
4795
  *
@@ -4890,8 +4890,8 @@ const publishingVideo = (a, b) => {
4890
4890
  * @param b the second participant.
4891
4891
  */
4892
4892
  const publishingAudio = (a, b) => {
4893
- const hasA = hasAudio(a);
4894
- const hasB = hasAudio(b);
4893
+ const hasA = hasAudio$1(a);
4894
+ const hasB = hasAudio$1(b);
4895
4895
  if (hasA && !hasB)
4896
4896
  return -1;
4897
4897
  if (!hasA && hasB)
@@ -6188,7 +6188,7 @@ const getSdkVersion = (sdk) => {
6188
6188
  return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
6189
6189
  };
6190
6190
 
6191
- const version = "1.41.3";
6191
+ const version = "1.42.0";
6192
6192
  const [major, minor, patch] = version.split('.');
6193
6193
  let sdkInfo = {
6194
6194
  type: SdkType.PLAIN_JAVASCRIPT,
@@ -10379,7 +10379,7 @@ const getAudioStream = async (trackConstraints, tracer) => {
10379
10379
  videoLoggerSystem
10380
10380
  .getLogger('devices')
10381
10381
  .warn('Failed to get audio stream, will try again with relaxed constraints', { error, constraints, relaxedConstraints });
10382
- return getAudioStream(relaxedConstraints);
10382
+ return getAudioStream(relaxedConstraints, tracer);
10383
10383
  }
10384
10384
  videoLoggerSystem.getLogger('devices').error('Failed to get audio stream', {
10385
10385
  error,
@@ -11376,9 +11376,6 @@ class MicrophoneManagerState extends AudioDeviceManagerState {
11376
11376
  }
11377
11377
  }
11378
11378
 
11379
- const DETECTION_FREQUENCY_IN_MS = 500;
11380
- const AUDIO_LEVEL_THRESHOLD = 150;
11381
- const FFT_SIZE = 128;
11382
11379
  /**
11383
11380
  * Creates a new sound detector.
11384
11381
  *
@@ -11388,7 +11385,7 @@ const FFT_SIZE = 128;
11388
11385
  * @returns a clean-up function which once invoked stops the sound detector.
11389
11386
  */
11390
11387
  const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options = {}) => {
11391
- const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
11388
+ const { detectionFrequencyInMs = 500, audioLevelThreshold = 150, fftSize = 128, destroyStreamOnStop = true, } = options;
11392
11389
  const audioContext = new AudioContext();
11393
11390
  const analyser = audioContext.createAnalyser();
11394
11391
  analyser.fftSize = fftSize;
@@ -11429,6 +11426,101 @@ const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options =
11429
11426
  };
11430
11427
  };
11431
11428
 
11429
+ /**
11430
+ * Analyzes frequency data to determine if audio is being captured.
11431
+ */
11432
+ const hasAudio = (analyser, threshold) => {
11433
+ const data = new Uint8Array(analyser.frequencyBinCount);
11434
+ analyser.getByteFrequencyData(data);
11435
+ return data.some((value) => value > threshold);
11436
+ };
11437
+ /** Helper for "no event" transitions */
11438
+ const noEmit = (nextState) => ({
11439
+ shouldEmit: false,
11440
+ nextState,
11441
+ });
11442
+ /** Helper for event-emitting transitions */
11443
+ const emit = (capturesAudio, nextState) => ({ shouldEmit: true, nextState, capturesAudio });
11444
+ /**
11445
+ * State transition function - computes next state and whether to emit an event.
11446
+ */
11447
+ const transitionState = (state, audioDetected, options) => {
11448
+ if (audioDetected) {
11449
+ return state.kind === 'IDLE' || state.kind === 'EMITTING'
11450
+ ? emit(true, state)
11451
+ : noEmit(state);
11452
+ }
11453
+ const { noAudioThresholdMs, emitIntervalMs } = options;
11454
+ const now = Date.now();
11455
+ switch (state.kind) {
11456
+ case 'IDLE':
11457
+ return noEmit({ kind: 'DETECTING', noAudioStartTime: now });
11458
+ case 'DETECTING': {
11459
+ const { noAudioStartTime } = state;
11460
+ const elapsed = now - noAudioStartTime;
11461
+ return elapsed >= noAudioThresholdMs
11462
+ ? emit(false, { kind: 'EMITTING', noAudioStartTime, lastEmitTime: now })
11463
+ : noEmit(state);
11464
+ }
11465
+ case 'EMITTING': {
11466
+ const timeSinceLastEmit = now - state.lastEmitTime;
11467
+ return timeSinceLastEmit >= emitIntervalMs
11468
+ ? emit(false, { ...state, lastEmitTime: now })
11469
+ : noEmit(state);
11470
+ }
11471
+ }
11472
+ };
11473
+ /**
11474
+ * Creates and configures an audio analyzer for the given stream.
11475
+ */
11476
+ const createAudioAnalyzer = (audioStream, fftSize) => {
11477
+ const audioContext = new AudioContext();
11478
+ const analyser = audioContext.createAnalyser();
11479
+ analyser.fftSize = fftSize;
11480
+ const microphone = audioContext.createMediaStreamSource(audioStream);
11481
+ microphone.connect(analyser);
11482
+ return { audioContext, analyser };
11483
+ };
11484
+ /**
11485
+ * Creates a new no-audio detector that monitors continuous absence of audio on an audio stream.
11486
+ *
11487
+ * @param audioStream the audio stream to observe.
11488
+ * @param options custom options for the no-audio detector.
11489
+ * @returns a cleanup function which once invoked stops the no-audio detector.
11490
+ */
11491
+ const createNoAudioDetector = (audioStream, options) => {
11492
+ const { detectionFrequencyInMs = 350, audioLevelThreshold = 0, fftSize = 256, onCaptureStatusChange, } = options;
11493
+ let state = { kind: 'IDLE' };
11494
+ const { audioContext, analyser } = createAudioAnalyzer(audioStream, fftSize);
11495
+ const detectionIntervalId = setInterval(() => {
11496
+ const [audioTrack] = audioStream.getAudioTracks();
11497
+ if (!audioTrack?.enabled || audioTrack.readyState === 'ended') {
11498
+ state = { kind: 'IDLE' };
11499
+ return;
11500
+ }
11501
+ const audioDetected = hasAudio(analyser, audioLevelThreshold);
11502
+ const transition = transitionState(state, audioDetected, options);
11503
+ state = transition.nextState;
11504
+ if (!transition.shouldEmit)
11505
+ return;
11506
+ const { capturesAudio } = transition;
11507
+ onCaptureStatusChange(capturesAudio);
11508
+ if (capturesAudio) {
11509
+ stop().catch((err) => {
11510
+ const logger = videoLoggerSystem.getLogger('NoAudioDetector');
11511
+ logger.error('Error stopping no-audio detector', err);
11512
+ });
11513
+ }
11514
+ }, detectionFrequencyInMs);
11515
+ async function stop() {
11516
+ clearInterval(detectionIntervalId);
11517
+ if (audioContext.state !== 'closed') {
11518
+ await audioContext.close();
11519
+ }
11520
+ }
11521
+ return stop;
11522
+ };
11523
+
11432
11524
  class RNSpeechDetector {
11433
11525
  constructor(externalAudioStream) {
11434
11526
  this.pc1 = new RTCPeerConnection({});
@@ -11440,16 +11532,10 @@ class RNSpeechDetector {
11440
11532
  */
11441
11533
  async start(onSoundDetectedStateChanged) {
11442
11534
  try {
11443
- let audioStream;
11444
- if (this.externalAudioStream != null) {
11445
- audioStream = this.externalAudioStream;
11446
- }
11447
- else {
11448
- audioStream = await navigator.mediaDevices.getUserMedia({
11449
- audio: true,
11450
- });
11451
- this.audioStream = audioStream;
11452
- }
11535
+ const audioStream = this.externalAudioStream != null
11536
+ ? this.externalAudioStream
11537
+ : await navigator.mediaDevices.getUserMedia({ audio: true });
11538
+ this.audioStream = audioStream;
11453
11539
  this.pc1.addEventListener('icecandidate', async (e) => {
11454
11540
  await this.pc2.addIceCandidate(e.candidate);
11455
11541
  });
@@ -11473,9 +11559,9 @@ class RNSpeechDetector {
11473
11559
  const answer = await this.pc2.createAnswer();
11474
11560
  await this.pc1.setRemoteDescription(answer);
11475
11561
  await this.pc2.setLocalDescription(answer);
11476
- const unsub = this.onSpeakingDetectedStateChange(onSoundDetectedStateChanged);
11562
+ const unsubscribe = this.onSpeakingDetectedStateChange(onSoundDetectedStateChanged);
11477
11563
  return () => {
11478
- unsub();
11564
+ unsubscribe();
11479
11565
  this.stop();
11480
11566
  };
11481
11567
  }
@@ -11515,7 +11601,7 @@ class RNSpeechDetector {
11515
11601
  const silenceTimeout = 5000; // Reset baseline after 5 seconds of silence
11516
11602
  const checkAudioLevel = async () => {
11517
11603
  try {
11518
- const stats = (await this.pc1.getStats());
11604
+ const stats = await this.pc1.getStats();
11519
11605
  const report = flatten(stats);
11520
11606
  // Audio levels are present inside stats of type `media-source` and of kind `audio`
11521
11607
  const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
@@ -11569,8 +11655,7 @@ class RNSpeechDetector {
11569
11655
  logger.error('error checking audio level from stats', error);
11570
11656
  }
11571
11657
  };
11572
- // Call checkAudioLevel periodically (every 100ms)
11573
- const intervalId = setInterval(checkAudioLevel, 100);
11658
+ const intervalId = setInterval(checkAudioLevel, 250);
11574
11659
  return () => {
11575
11660
  clearInterval(intervalId);
11576
11661
  clearTimeout(speechTimer);
@@ -11596,8 +11681,11 @@ class MicrophoneManager extends AudioDeviceManager {
11596
11681
  super(call, new MicrophoneManagerState(disableMode), TrackType.AUDIO);
11597
11682
  this.speakingWhileMutedNotificationEnabled = true;
11598
11683
  this.soundDetectorConcurrencyTag = Symbol('soundDetectorConcurrencyTag');
11684
+ this.silenceThresholdMs = 5000;
11599
11685
  }
11600
11686
  setup() {
11687
+ if (this.areSubscriptionsSetUp)
11688
+ return;
11601
11689
  super.setup();
11602
11690
  this.subscriptions.push(createSafeAsyncSubscription(combineLatest([
11603
11691
  this.call.state.callingState$,
@@ -11614,7 +11702,7 @@ class MicrophoneManager extends AudioDeviceManager {
11614
11702
  if (!this.speakingWhileMutedNotificationEnabled)
11615
11703
  return;
11616
11704
  if (ownCapabilities.includes(OwnCapability.SEND_AUDIO)) {
11617
- if (status === 'disabled') {
11705
+ if (status !== 'enabled') {
11618
11706
  await this.startSpeakingWhileMutedDetection(deviceId);
11619
11707
  }
11620
11708
  else {
@@ -11663,6 +11751,40 @@ class MicrophoneManager extends AudioDeviceManager {
11663
11751
  });
11664
11752
  }
11665
11753
  }));
11754
+ if (!isReactNative()) {
11755
+ const unsubscribe = createSafeAsyncSubscription(combineLatest([this.state.status$, this.state.mediaStream$]), async ([status, mediaStream]) => {
11756
+ if (this.noAudioDetectorCleanup) {
11757
+ const cleanup = this.noAudioDetectorCleanup;
11758
+ this.noAudioDetectorCleanup = undefined;
11759
+ await cleanup().catch((err) => {
11760
+ this.logger.warn('Failed to stop no-audio detector', err);
11761
+ });
11762
+ }
11763
+ if (status !== 'enabled' || !mediaStream)
11764
+ return;
11765
+ if (this.silenceThresholdMs <= 0)
11766
+ return;
11767
+ const deviceId = this.state.selectedDevice;
11768
+ const devices = getCurrentValue(this.listDevices());
11769
+ const label = devices.find((d) => d.deviceId === deviceId)?.label;
11770
+ this.noAudioDetectorCleanup = createNoAudioDetector(mediaStream, {
11771
+ noAudioThresholdMs: this.silenceThresholdMs,
11772
+ emitIntervalMs: this.silenceThresholdMs,
11773
+ onCaptureStatusChange: (capturesAudio) => {
11774
+ const event = {
11775
+ type: 'mic.capture_report',
11776
+ call_cid: this.call.cid,
11777
+ capturesAudio,
11778
+ deviceId,
11779
+ label,
11780
+ };
11781
+ this.call.tracer.trace('mic.capture_report', event);
11782
+ this.call.streamClient.dispatchEvent(event);
11783
+ },
11784
+ });
11785
+ });
11786
+ this.subscriptions.push(unsubscribe);
11787
+ }
11666
11788
  }
11667
11789
  /**
11668
11790
  * Enables noise cancellation for the microphone.
@@ -11760,6 +11882,45 @@ class MicrophoneManager extends AudioDeviceManager {
11760
11882
  this.speakingWhileMutedNotificationEnabled = false;
11761
11883
  await this.stopSpeakingWhileMutedDetection();
11762
11884
  }
11885
+ /**
11886
+ * Sets the silence threshold in milliseconds for no-audio detection.
11887
+ * When the microphone is enabled but produces no audio for this duration,
11888
+ * a 'mic.capture_report' event will be emitted.
11889
+ *
11890
+ * @param thresholdMs the threshold in milliseconds (default: 5000).
11891
+ * Set to 0 or a negative value to disable no-audio detection.
11892
+ */
11893
+ setSilenceThreshold(thresholdMs) {
11894
+ this.silenceThresholdMs = thresholdMs;
11895
+ }
11896
+ /**
11897
+ * Performs audio capture test on a specific microphone.
11898
+ *
11899
+ * This method is only available in browser environments (not React Native).
11900
+ *
11901
+ * @param deviceId The device ID to test.
11902
+ * @param options Optional test configuration.
11903
+ * @returns Promise that resolves with the test result (true or false).
11904
+ */
11905
+ async performTest(deviceId, options) {
11906
+ if (isReactNative())
11907
+ throw new Error('Not available in React Native');
11908
+ const stream = await this.getStream({ deviceId: { exact: deviceId } });
11909
+ const { testDurationMs = 3000 } = options || {};
11910
+ const { promise, resolve } = promiseWithResolvers();
11911
+ const cleanup = createNoAudioDetector(stream, {
11912
+ noAudioThresholdMs: testDurationMs,
11913
+ emitIntervalMs: testDurationMs,
11914
+ onCaptureStatusChange: async (capturesAudio) => {
11915
+ resolve(capturesAudio);
11916
+ await cleanup().catch((err) => {
11917
+ this.logger.warn('Failed to stop detector during test', err);
11918
+ });
11919
+ disposeOfMediaStream(stream);
11920
+ },
11921
+ });
11922
+ return promise;
11923
+ }
11763
11924
  /**
11764
11925
  * Applies the audio settings to the microphone.
11765
11926
  * @param settings the audio settings to apply.
@@ -11808,13 +11969,12 @@ class MicrophoneManager extends AudioDeviceManager {
11808
11969
  }
11809
11970
  async startSpeakingWhileMutedDetection(deviceId) {
11810
11971
  await withoutConcurrency(this.soundDetectorConcurrencyTag, async () => {
11811
- await this.stopSpeakingWhileMutedDetection();
11812
11972
  if (isReactNative()) {
11813
11973
  this.rnSpeechDetector = new RNSpeechDetector();
11814
11974
  const unsubscribe = await this.rnSpeechDetector.start((event) => {
11815
11975
  this.state.setSpeakingWhileMuted(event.isSoundDetected);
11816
11976
  });
11817
- this.soundDetectorCleanup = () => {
11977
+ this.soundDetectorCleanup = async () => {
11818
11978
  unsubscribe();
11819
11979
  this.rnSpeechDetector = undefined;
11820
11980
  };
@@ -11822,6 +11982,7 @@ class MicrophoneManager extends AudioDeviceManager {
11822
11982
  else {
11823
11983
  // Need to start a new stream that's not connected to publisher
11824
11984
  const stream = await this.getStream({
11985
+ ...this.state.defaultConstraints,
11825
11986
  deviceId: { exact: deviceId },
11826
11987
  });
11827
11988
  this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
@@ -11901,6 +12062,8 @@ class ScreenShareManager extends AudioDeviceManager {
11901
12062
  super(call, new ScreenShareState(), TrackType.SCREEN_SHARE);
11902
12063
  }
11903
12064
  setup() {
12065
+ if (this.areSubscriptionsSetUp)
12066
+ return;
11904
12067
  super.setup();
11905
12068
  this.subscriptions.push(createSubscription(this.call.state.settings$, (settings) => {
11906
12069
  const maybeTargetResolution = settings?.screensharing.target_resolution;
@@ -12247,6 +12410,9 @@ class Call {
12247
12410
  this.leaveCallHooks.add(registerEventHandlers(this, this.dispatcher));
12248
12411
  this.registerEffects();
12249
12412
  this.registerReconnectHandlers();
12413
+ // Set up the device managers again. Although this is already done
12414
+ // in the DeviceManager's constructor, they'll need to be re-set up
12415
+ // in the cases where a call instance is recycled (join -> leave -> join).
12250
12416
  this.camera.setup();
12251
12417
  this.microphone.setup();
12252
12418
  this.screenShare.setup();
@@ -15346,7 +15512,7 @@ class StreamClient {
15346
15512
  this.getUserAgent = () => {
15347
15513
  if (!this.cachedUserAgent) {
15348
15514
  const { clientAppIdentifier = {} } = this.options;
15349
- const { sdkName = 'js', sdkVersion = "1.41.3", ...extras } = clientAppIdentifier;
15515
+ const { sdkName = 'js', sdkVersion = "1.42.0", ...extras } = clientAppIdentifier;
15350
15516
  this.cachedUserAgent = [
15351
15517
  `stream-video-${sdkName}-v${sdkVersion}`,
15352
15518
  ...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
@@ -15982,5 +16148,5 @@ const humanize = (n) => {
15982
16148
  return String(n);
15983
16149
  };
15984
16150
 
15985
- export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
16151
+ export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio$1 as hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
15986
16152
  //# sourceMappingURL=index.browser.es.js.map