@stream-io/video-client 1.41.2 → 1.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.browser.es.js +240 -48
- package/dist/index.browser.es.js.map +1 -1
- package/dist/index.cjs.js +240 -48
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.es.js +240 -48
- package/dist/index.es.js.map +1 -1
- package/dist/src/coordinator/connection/types.d.ts +19 -3
- package/dist/src/devices/DeviceManager.d.ts +2 -2
- package/dist/src/devices/MicrophoneManager.d.ts +23 -0
- package/dist/src/helpers/no-audio-detector.d.ts +43 -0
- package/dist/src/stats/SfuStatsReporter.d.ts +2 -1
- package/dist/src/stats/types.d.ts +10 -0
- package/package.json +1 -1
- package/src/Call.ts +8 -9
- package/src/coordinator/connection/types.ts +20 -2
- package/src/devices/DeviceManager.ts +2 -2
- package/src/devices/MicrophoneManager.ts +96 -4
- package/src/devices/ScreenShareManager.ts +1 -0
- package/src/devices/__tests__/MicrophoneManager.test.ts +102 -1
- package/src/devices/__tests__/MicrophoneManagerRN.test.ts +7 -1
- package/src/devices/__tests__/mocks.ts +1 -0
- package/src/devices/__tests__/web-audio.mocks.ts +72 -0
- package/src/devices/devices.ts +1 -1
- package/src/helpers/RNSpeechDetector.ts +12 -23
- package/src/helpers/__tests__/no-audio-detector.test.ts +271 -0
- package/src/helpers/no-audio-detector.ts +179 -0
- package/src/helpers/sound-detector.ts +3 -7
- package/src/stats/CallStateStatsReporter.ts +22 -0
- package/src/stats/SfuStatsReporter.ts +22 -14
- package/src/stats/types.ts +11 -0
package/dist/index.cjs.js
CHANGED
|
@@ -4809,7 +4809,7 @@ const hasVideo = (p) => p.publishedTracks.includes(TrackType.VIDEO);
|
|
|
4809
4809
|
*
|
|
4810
4810
|
* @param p the participant to check.
|
|
4811
4811
|
*/
|
|
4812
|
-
const hasAudio = (p) => p.publishedTracks.includes(TrackType.AUDIO);
|
|
4812
|
+
const hasAudio$1 = (p) => p.publishedTracks.includes(TrackType.AUDIO);
|
|
4813
4813
|
/**
|
|
4814
4814
|
* Check if a participant is screen sharing.
|
|
4815
4815
|
*
|
|
@@ -4910,8 +4910,8 @@ const publishingVideo = (a, b) => {
|
|
|
4910
4910
|
* @param b the second participant.
|
|
4911
4911
|
*/
|
|
4912
4912
|
const publishingAudio = (a, b) => {
|
|
4913
|
-
const hasA = hasAudio(a);
|
|
4914
|
-
const hasB = hasAudio(b);
|
|
4913
|
+
const hasA = hasAudio$1(a);
|
|
4914
|
+
const hasB = hasAudio$1(b);
|
|
4915
4915
|
if (hasA && !hasB)
|
|
4916
4916
|
return -1;
|
|
4917
4917
|
if (!hasA && hasB)
|
|
@@ -6208,7 +6208,7 @@ const getSdkVersion = (sdk) => {
|
|
|
6208
6208
|
return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
|
|
6209
6209
|
};
|
|
6210
6210
|
|
|
6211
|
-
const version = "1.
|
|
6211
|
+
const version = "1.42.0";
|
|
6212
6212
|
const [major, minor, patch] = version.split('.');
|
|
6213
6213
|
let sdkInfo = {
|
|
6214
6214
|
type: SdkType.PLAIN_JAVASCRIPT,
|
|
@@ -6531,6 +6531,16 @@ const createStatsReporter = ({ subscriber, publisher, state, datacenter, polling
|
|
|
6531
6531
|
stop,
|
|
6532
6532
|
};
|
|
6533
6533
|
};
|
|
6534
|
+
/**
|
|
6535
|
+
* Extracts camera statistics from a media source.
|
|
6536
|
+
*
|
|
6537
|
+
* @param mediaSource the media source stats to extract camera info from.
|
|
6538
|
+
*/
|
|
6539
|
+
const getCameraStats = (mediaSource) => ({
|
|
6540
|
+
frameRate: mediaSource.framesPerSecond,
|
|
6541
|
+
frameWidth: mediaSource.width,
|
|
6542
|
+
frameHeight: mediaSource.height,
|
|
6543
|
+
});
|
|
6534
6544
|
/**
|
|
6535
6545
|
* Transforms raw RTC stats into a slimmer and uniform across browsers format.
|
|
6536
6546
|
*
|
|
@@ -6556,6 +6566,7 @@ const transform = (report, opts) => {
|
|
|
6556
6566
|
}
|
|
6557
6567
|
let trackType;
|
|
6558
6568
|
let audioLevel;
|
|
6569
|
+
let camera;
|
|
6559
6570
|
let concealedSamples;
|
|
6560
6571
|
let concealmentEvents;
|
|
6561
6572
|
let packetsReceived;
|
|
@@ -6571,6 +6582,9 @@ const transform = (report, opts) => {
|
|
|
6571
6582
|
typeof mediaSource.audioLevel === 'number') {
|
|
6572
6583
|
audioLevel = mediaSource.audioLevel;
|
|
6573
6584
|
}
|
|
6585
|
+
if (trackKind === 'video') {
|
|
6586
|
+
camera = getCameraStats(mediaSource);
|
|
6587
|
+
}
|
|
6574
6588
|
}
|
|
6575
6589
|
}
|
|
6576
6590
|
else if (kind === 'subscriber' && trackKind === 'audio') {
|
|
@@ -6604,6 +6618,7 @@ const transform = (report, opts) => {
|
|
|
6604
6618
|
concealmentEvents,
|
|
6605
6619
|
packetsReceived,
|
|
6606
6620
|
packetsLost,
|
|
6621
|
+
camera,
|
|
6607
6622
|
};
|
|
6608
6623
|
});
|
|
6609
6624
|
return {
|
|
@@ -6623,6 +6638,7 @@ const getEmptyVideoStats = (stats) => {
|
|
|
6623
6638
|
highestFrameWidth: 0,
|
|
6624
6639
|
highestFrameHeight: 0,
|
|
6625
6640
|
highestFramesPerSecond: 0,
|
|
6641
|
+
camera: {},
|
|
6626
6642
|
codec: '',
|
|
6627
6643
|
codecPerTrackType: {},
|
|
6628
6644
|
timestamp: Date.now(),
|
|
@@ -6667,6 +6683,9 @@ const aggregate = (stats) => {
|
|
|
6667
6683
|
acc.highestFramesPerSecond = stream.framesPerSecond || 0;
|
|
6668
6684
|
maxArea = streamArea;
|
|
6669
6685
|
}
|
|
6686
|
+
if (stream.trackType === TrackType.VIDEO) {
|
|
6687
|
+
acc.camera = stream.camera;
|
|
6688
|
+
}
|
|
6670
6689
|
qualityLimitationReasons.add(stream.qualityLimitationReason || '');
|
|
6671
6690
|
return acc;
|
|
6672
6691
|
}, aggregatedStats);
|
|
@@ -6731,6 +6750,7 @@ const aggregateAudio = (stats) => {
|
|
|
6731
6750
|
class SfuStatsReporter {
|
|
6732
6751
|
constructor(sfuClient, { options, clientDetails, subscriber, publisher, microphone, camera, state, tracer, unifiedSessionId, }) {
|
|
6733
6752
|
this.logger = videoLoggerSystem.getLogger('SfuStatsReporter');
|
|
6753
|
+
this.reportCount = 0;
|
|
6734
6754
|
this.inputDevices = new Map();
|
|
6735
6755
|
this.observeDevice = (device, kind) => {
|
|
6736
6756
|
const { browserPermissionState$ } = device.state;
|
|
@@ -6832,17 +6852,31 @@ class SfuStatsReporter {
|
|
|
6832
6852
|
throw err;
|
|
6833
6853
|
}
|
|
6834
6854
|
};
|
|
6855
|
+
this.scheduleNextReport = () => {
|
|
6856
|
+
const intervals = [1500, 3000, 3000, 5000];
|
|
6857
|
+
if (this.reportCount < intervals.length) {
|
|
6858
|
+
this.timeoutId = setTimeout(() => {
|
|
6859
|
+
this.flush();
|
|
6860
|
+
this.reportCount++;
|
|
6861
|
+
this.scheduleNextReport();
|
|
6862
|
+
}, intervals[this.reportCount]);
|
|
6863
|
+
}
|
|
6864
|
+
else {
|
|
6865
|
+
clearInterval(this.intervalId);
|
|
6866
|
+
this.intervalId = setInterval(() => {
|
|
6867
|
+
this.flush();
|
|
6868
|
+
}, this.options.reporting_interval_ms);
|
|
6869
|
+
}
|
|
6870
|
+
};
|
|
6835
6871
|
this.start = () => {
|
|
6836
6872
|
if (this.options.reporting_interval_ms <= 0)
|
|
6837
6873
|
return;
|
|
6838
6874
|
this.observeDevice(this.microphone, 'mic');
|
|
6839
6875
|
this.observeDevice(this.camera, 'camera');
|
|
6876
|
+
this.reportCount = 0;
|
|
6840
6877
|
clearInterval(this.intervalId);
|
|
6841
|
-
this.
|
|
6842
|
-
|
|
6843
|
-
this.logger.warn('Failed to report stats', err);
|
|
6844
|
-
});
|
|
6845
|
-
}, this.options.reporting_interval_ms);
|
|
6878
|
+
clearTimeout(this.timeoutId);
|
|
6879
|
+
this.scheduleNextReport();
|
|
6846
6880
|
};
|
|
6847
6881
|
this.stop = () => {
|
|
6848
6882
|
this.unsubscribeDevicePermissionsSubscription?.();
|
|
@@ -6854,20 +6888,13 @@ class SfuStatsReporter {
|
|
|
6854
6888
|
this.intervalId = undefined;
|
|
6855
6889
|
clearTimeout(this.timeoutId);
|
|
6856
6890
|
this.timeoutId = undefined;
|
|
6891
|
+
this.reportCount = 0;
|
|
6857
6892
|
};
|
|
6858
6893
|
this.flush = () => {
|
|
6859
6894
|
this.run().catch((err) => {
|
|
6860
6895
|
this.logger.warn('Failed to flush report stats', err);
|
|
6861
6896
|
});
|
|
6862
6897
|
};
|
|
6863
|
-
this.scheduleOne = (timeout) => {
|
|
6864
|
-
clearTimeout(this.timeoutId);
|
|
6865
|
-
this.timeoutId = setTimeout(() => {
|
|
6866
|
-
this.run().catch((err) => {
|
|
6867
|
-
this.logger.warn('Failed to report stats', err);
|
|
6868
|
-
});
|
|
6869
|
-
}, timeout);
|
|
6870
|
-
};
|
|
6871
6898
|
this.sfuClient = sfuClient;
|
|
6872
6899
|
this.options = options;
|
|
6873
6900
|
this.subscriber = subscriber;
|
|
@@ -10372,7 +10399,7 @@ const getAudioStream = async (trackConstraints, tracer) => {
|
|
|
10372
10399
|
videoLoggerSystem
|
|
10373
10400
|
.getLogger('devices')
|
|
10374
10401
|
.warn('Failed to get audio stream, will try again with relaxed constraints', { error, constraints, relaxedConstraints });
|
|
10375
|
-
return getAudioStream(relaxedConstraints);
|
|
10402
|
+
return getAudioStream(relaxedConstraints, tracer);
|
|
10376
10403
|
}
|
|
10377
10404
|
videoLoggerSystem.getLogger('devices').error('Failed to get audio stream', {
|
|
10378
10405
|
error,
|
|
@@ -11369,9 +11396,6 @@ class MicrophoneManagerState extends AudioDeviceManagerState {
|
|
|
11369
11396
|
}
|
|
11370
11397
|
}
|
|
11371
11398
|
|
|
11372
|
-
const DETECTION_FREQUENCY_IN_MS = 500;
|
|
11373
|
-
const AUDIO_LEVEL_THRESHOLD = 150;
|
|
11374
|
-
const FFT_SIZE = 128;
|
|
11375
11399
|
/**
|
|
11376
11400
|
* Creates a new sound detector.
|
|
11377
11401
|
*
|
|
@@ -11381,7 +11405,7 @@ const FFT_SIZE = 128;
|
|
|
11381
11405
|
* @returns a clean-up function which once invoked stops the sound detector.
|
|
11382
11406
|
*/
|
|
11383
11407
|
const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options = {}) => {
|
|
11384
|
-
const { detectionFrequencyInMs =
|
|
11408
|
+
const { detectionFrequencyInMs = 500, audioLevelThreshold = 150, fftSize = 128, destroyStreamOnStop = true, } = options;
|
|
11385
11409
|
const audioContext = new AudioContext();
|
|
11386
11410
|
const analyser = audioContext.createAnalyser();
|
|
11387
11411
|
analyser.fftSize = fftSize;
|
|
@@ -11422,6 +11446,101 @@ const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options =
|
|
|
11422
11446
|
};
|
|
11423
11447
|
};
|
|
11424
11448
|
|
|
11449
|
+
/**
|
|
11450
|
+
* Analyzes frequency data to determine if audio is being captured.
|
|
11451
|
+
*/
|
|
11452
|
+
const hasAudio = (analyser, threshold) => {
|
|
11453
|
+
const data = new Uint8Array(analyser.frequencyBinCount);
|
|
11454
|
+
analyser.getByteFrequencyData(data);
|
|
11455
|
+
return data.some((value) => value > threshold);
|
|
11456
|
+
};
|
|
11457
|
+
/** Helper for "no event" transitions */
|
|
11458
|
+
const noEmit = (nextState) => ({
|
|
11459
|
+
shouldEmit: false,
|
|
11460
|
+
nextState,
|
|
11461
|
+
});
|
|
11462
|
+
/** Helper for event-emitting transitions */
|
|
11463
|
+
const emit = (capturesAudio, nextState) => ({ shouldEmit: true, nextState, capturesAudio });
|
|
11464
|
+
/**
|
|
11465
|
+
* State transition function - computes next state and whether to emit an event.
|
|
11466
|
+
*/
|
|
11467
|
+
const transitionState = (state, audioDetected, options) => {
|
|
11468
|
+
if (audioDetected) {
|
|
11469
|
+
return state.kind === 'IDLE' || state.kind === 'EMITTING'
|
|
11470
|
+
? emit(true, state)
|
|
11471
|
+
: noEmit(state);
|
|
11472
|
+
}
|
|
11473
|
+
const { noAudioThresholdMs, emitIntervalMs } = options;
|
|
11474
|
+
const now = Date.now();
|
|
11475
|
+
switch (state.kind) {
|
|
11476
|
+
case 'IDLE':
|
|
11477
|
+
return noEmit({ kind: 'DETECTING', noAudioStartTime: now });
|
|
11478
|
+
case 'DETECTING': {
|
|
11479
|
+
const { noAudioStartTime } = state;
|
|
11480
|
+
const elapsed = now - noAudioStartTime;
|
|
11481
|
+
return elapsed >= noAudioThresholdMs
|
|
11482
|
+
? emit(false, { kind: 'EMITTING', noAudioStartTime, lastEmitTime: now })
|
|
11483
|
+
: noEmit(state);
|
|
11484
|
+
}
|
|
11485
|
+
case 'EMITTING': {
|
|
11486
|
+
const timeSinceLastEmit = now - state.lastEmitTime;
|
|
11487
|
+
return timeSinceLastEmit >= emitIntervalMs
|
|
11488
|
+
? emit(false, { ...state, lastEmitTime: now })
|
|
11489
|
+
: noEmit(state);
|
|
11490
|
+
}
|
|
11491
|
+
}
|
|
11492
|
+
};
|
|
11493
|
+
/**
|
|
11494
|
+
* Creates and configures an audio analyzer for the given stream.
|
|
11495
|
+
*/
|
|
11496
|
+
const createAudioAnalyzer = (audioStream, fftSize) => {
|
|
11497
|
+
const audioContext = new AudioContext();
|
|
11498
|
+
const analyser = audioContext.createAnalyser();
|
|
11499
|
+
analyser.fftSize = fftSize;
|
|
11500
|
+
const microphone = audioContext.createMediaStreamSource(audioStream);
|
|
11501
|
+
microphone.connect(analyser);
|
|
11502
|
+
return { audioContext, analyser };
|
|
11503
|
+
};
|
|
11504
|
+
/**
|
|
11505
|
+
* Creates a new no-audio detector that monitors continuous absence of audio on an audio stream.
|
|
11506
|
+
*
|
|
11507
|
+
* @param audioStream the audio stream to observe.
|
|
11508
|
+
* @param options custom options for the no-audio detector.
|
|
11509
|
+
* @returns a cleanup function which once invoked stops the no-audio detector.
|
|
11510
|
+
*/
|
|
11511
|
+
const createNoAudioDetector = (audioStream, options) => {
|
|
11512
|
+
const { detectionFrequencyInMs = 350, audioLevelThreshold = 0, fftSize = 256, onCaptureStatusChange, } = options;
|
|
11513
|
+
let state = { kind: 'IDLE' };
|
|
11514
|
+
const { audioContext, analyser } = createAudioAnalyzer(audioStream, fftSize);
|
|
11515
|
+
const detectionIntervalId = setInterval(() => {
|
|
11516
|
+
const [audioTrack] = audioStream.getAudioTracks();
|
|
11517
|
+
if (!audioTrack?.enabled || audioTrack.readyState === 'ended') {
|
|
11518
|
+
state = { kind: 'IDLE' };
|
|
11519
|
+
return;
|
|
11520
|
+
}
|
|
11521
|
+
const audioDetected = hasAudio(analyser, audioLevelThreshold);
|
|
11522
|
+
const transition = transitionState(state, audioDetected, options);
|
|
11523
|
+
state = transition.nextState;
|
|
11524
|
+
if (!transition.shouldEmit)
|
|
11525
|
+
return;
|
|
11526
|
+
const { capturesAudio } = transition;
|
|
11527
|
+
onCaptureStatusChange(capturesAudio);
|
|
11528
|
+
if (capturesAudio) {
|
|
11529
|
+
stop().catch((err) => {
|
|
11530
|
+
const logger = videoLoggerSystem.getLogger('NoAudioDetector');
|
|
11531
|
+
logger.error('Error stopping no-audio detector', err);
|
|
11532
|
+
});
|
|
11533
|
+
}
|
|
11534
|
+
}, detectionFrequencyInMs);
|
|
11535
|
+
async function stop() {
|
|
11536
|
+
clearInterval(detectionIntervalId);
|
|
11537
|
+
if (audioContext.state !== 'closed') {
|
|
11538
|
+
await audioContext.close();
|
|
11539
|
+
}
|
|
11540
|
+
}
|
|
11541
|
+
return stop;
|
|
11542
|
+
};
|
|
11543
|
+
|
|
11425
11544
|
class RNSpeechDetector {
|
|
11426
11545
|
constructor(externalAudioStream) {
|
|
11427
11546
|
this.pc1 = new RTCPeerConnection({});
|
|
@@ -11433,16 +11552,10 @@ class RNSpeechDetector {
|
|
|
11433
11552
|
*/
|
|
11434
11553
|
async start(onSoundDetectedStateChanged) {
|
|
11435
11554
|
try {
|
|
11436
|
-
|
|
11437
|
-
|
|
11438
|
-
|
|
11439
|
-
|
|
11440
|
-
else {
|
|
11441
|
-
audioStream = await navigator.mediaDevices.getUserMedia({
|
|
11442
|
-
audio: true,
|
|
11443
|
-
});
|
|
11444
|
-
this.audioStream = audioStream;
|
|
11445
|
-
}
|
|
11555
|
+
const audioStream = this.externalAudioStream != null
|
|
11556
|
+
? this.externalAudioStream
|
|
11557
|
+
: await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
11558
|
+
this.audioStream = audioStream;
|
|
11446
11559
|
this.pc1.addEventListener('icecandidate', async (e) => {
|
|
11447
11560
|
await this.pc2.addIceCandidate(e.candidate);
|
|
11448
11561
|
});
|
|
@@ -11466,9 +11579,9 @@ class RNSpeechDetector {
|
|
|
11466
11579
|
const answer = await this.pc2.createAnswer();
|
|
11467
11580
|
await this.pc1.setRemoteDescription(answer);
|
|
11468
11581
|
await this.pc2.setLocalDescription(answer);
|
|
11469
|
-
const
|
|
11582
|
+
const unsubscribe = this.onSpeakingDetectedStateChange(onSoundDetectedStateChanged);
|
|
11470
11583
|
return () => {
|
|
11471
|
-
|
|
11584
|
+
unsubscribe();
|
|
11472
11585
|
this.stop();
|
|
11473
11586
|
};
|
|
11474
11587
|
}
|
|
@@ -11508,7 +11621,7 @@ class RNSpeechDetector {
|
|
|
11508
11621
|
const silenceTimeout = 5000; // Reset baseline after 5 seconds of silence
|
|
11509
11622
|
const checkAudioLevel = async () => {
|
|
11510
11623
|
try {
|
|
11511
|
-
const stats =
|
|
11624
|
+
const stats = await this.pc1.getStats();
|
|
11512
11625
|
const report = flatten(stats);
|
|
11513
11626
|
// Audio levels are present inside stats of type `media-source` and of kind `audio`
|
|
11514
11627
|
const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
|
|
@@ -11562,8 +11675,7 @@ class RNSpeechDetector {
|
|
|
11562
11675
|
logger.error('error checking audio level from stats', error);
|
|
11563
11676
|
}
|
|
11564
11677
|
};
|
|
11565
|
-
|
|
11566
|
-
const intervalId = setInterval(checkAudioLevel, 100);
|
|
11678
|
+
const intervalId = setInterval(checkAudioLevel, 250);
|
|
11567
11679
|
return () => {
|
|
11568
11680
|
clearInterval(intervalId);
|
|
11569
11681
|
clearTimeout(speechTimer);
|
|
@@ -11589,8 +11701,11 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11589
11701
|
super(call, new MicrophoneManagerState(disableMode), TrackType.AUDIO);
|
|
11590
11702
|
this.speakingWhileMutedNotificationEnabled = true;
|
|
11591
11703
|
this.soundDetectorConcurrencyTag = Symbol('soundDetectorConcurrencyTag');
|
|
11704
|
+
this.silenceThresholdMs = 5000;
|
|
11592
11705
|
}
|
|
11593
11706
|
setup() {
|
|
11707
|
+
if (this.areSubscriptionsSetUp)
|
|
11708
|
+
return;
|
|
11594
11709
|
super.setup();
|
|
11595
11710
|
this.subscriptions.push(createSafeAsyncSubscription(rxjs.combineLatest([
|
|
11596
11711
|
this.call.state.callingState$,
|
|
@@ -11607,7 +11722,7 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11607
11722
|
if (!this.speakingWhileMutedNotificationEnabled)
|
|
11608
11723
|
return;
|
|
11609
11724
|
if (ownCapabilities.includes(OwnCapability.SEND_AUDIO)) {
|
|
11610
|
-
if (status
|
|
11725
|
+
if (status !== 'enabled') {
|
|
11611
11726
|
await this.startSpeakingWhileMutedDetection(deviceId);
|
|
11612
11727
|
}
|
|
11613
11728
|
else {
|
|
@@ -11656,6 +11771,40 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11656
11771
|
});
|
|
11657
11772
|
}
|
|
11658
11773
|
}));
|
|
11774
|
+
if (!isReactNative()) {
|
|
11775
|
+
const unsubscribe = createSafeAsyncSubscription(rxjs.combineLatest([this.state.status$, this.state.mediaStream$]), async ([status, mediaStream]) => {
|
|
11776
|
+
if (this.noAudioDetectorCleanup) {
|
|
11777
|
+
const cleanup = this.noAudioDetectorCleanup;
|
|
11778
|
+
this.noAudioDetectorCleanup = undefined;
|
|
11779
|
+
await cleanup().catch((err) => {
|
|
11780
|
+
this.logger.warn('Failed to stop no-audio detector', err);
|
|
11781
|
+
});
|
|
11782
|
+
}
|
|
11783
|
+
if (status !== 'enabled' || !mediaStream)
|
|
11784
|
+
return;
|
|
11785
|
+
if (this.silenceThresholdMs <= 0)
|
|
11786
|
+
return;
|
|
11787
|
+
const deviceId = this.state.selectedDevice;
|
|
11788
|
+
const devices = getCurrentValue(this.listDevices());
|
|
11789
|
+
const label = devices.find((d) => d.deviceId === deviceId)?.label;
|
|
11790
|
+
this.noAudioDetectorCleanup = createNoAudioDetector(mediaStream, {
|
|
11791
|
+
noAudioThresholdMs: this.silenceThresholdMs,
|
|
11792
|
+
emitIntervalMs: this.silenceThresholdMs,
|
|
11793
|
+
onCaptureStatusChange: (capturesAudio) => {
|
|
11794
|
+
const event = {
|
|
11795
|
+
type: 'mic.capture_report',
|
|
11796
|
+
call_cid: this.call.cid,
|
|
11797
|
+
capturesAudio,
|
|
11798
|
+
deviceId,
|
|
11799
|
+
label,
|
|
11800
|
+
};
|
|
11801
|
+
this.call.tracer.trace('mic.capture_report', event);
|
|
11802
|
+
this.call.streamClient.dispatchEvent(event);
|
|
11803
|
+
},
|
|
11804
|
+
});
|
|
11805
|
+
});
|
|
11806
|
+
this.subscriptions.push(unsubscribe);
|
|
11807
|
+
}
|
|
11659
11808
|
}
|
|
11660
11809
|
/**
|
|
11661
11810
|
* Enables noise cancellation for the microphone.
|
|
@@ -11753,6 +11902,45 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11753
11902
|
this.speakingWhileMutedNotificationEnabled = false;
|
|
11754
11903
|
await this.stopSpeakingWhileMutedDetection();
|
|
11755
11904
|
}
|
|
11905
|
+
/**
|
|
11906
|
+
* Sets the silence threshold in milliseconds for no-audio detection.
|
|
11907
|
+
* When the microphone is enabled but produces no audio for this duration,
|
|
11908
|
+
* a 'mic.capture_report' event will be emitted.
|
|
11909
|
+
*
|
|
11910
|
+
* @param thresholdMs the threshold in milliseconds (default: 5000).
|
|
11911
|
+
* Set to 0 or a negative value to disable no-audio detection.
|
|
11912
|
+
*/
|
|
11913
|
+
setSilenceThreshold(thresholdMs) {
|
|
11914
|
+
this.silenceThresholdMs = thresholdMs;
|
|
11915
|
+
}
|
|
11916
|
+
/**
|
|
11917
|
+
* Performs audio capture test on a specific microphone.
|
|
11918
|
+
*
|
|
11919
|
+
* This method is only available in browser environments (not React Native).
|
|
11920
|
+
*
|
|
11921
|
+
* @param deviceId The device ID to test.
|
|
11922
|
+
* @param options Optional test configuration.
|
|
11923
|
+
* @returns Promise that resolves with the test result (true or false).
|
|
11924
|
+
*/
|
|
11925
|
+
async performTest(deviceId, options) {
|
|
11926
|
+
if (isReactNative())
|
|
11927
|
+
throw new Error('Not available in React Native');
|
|
11928
|
+
const stream = await this.getStream({ deviceId: { exact: deviceId } });
|
|
11929
|
+
const { testDurationMs = 3000 } = options || {};
|
|
11930
|
+
const { promise, resolve } = promiseWithResolvers();
|
|
11931
|
+
const cleanup = createNoAudioDetector(stream, {
|
|
11932
|
+
noAudioThresholdMs: testDurationMs,
|
|
11933
|
+
emitIntervalMs: testDurationMs,
|
|
11934
|
+
onCaptureStatusChange: async (capturesAudio) => {
|
|
11935
|
+
resolve(capturesAudio);
|
|
11936
|
+
await cleanup().catch((err) => {
|
|
11937
|
+
this.logger.warn('Failed to stop detector during test', err);
|
|
11938
|
+
});
|
|
11939
|
+
disposeOfMediaStream(stream);
|
|
11940
|
+
},
|
|
11941
|
+
});
|
|
11942
|
+
return promise;
|
|
11943
|
+
}
|
|
11756
11944
|
/**
|
|
11757
11945
|
* Applies the audio settings to the microphone.
|
|
11758
11946
|
* @param settings the audio settings to apply.
|
|
@@ -11801,13 +11989,12 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11801
11989
|
}
|
|
11802
11990
|
async startSpeakingWhileMutedDetection(deviceId) {
|
|
11803
11991
|
await withoutConcurrency(this.soundDetectorConcurrencyTag, async () => {
|
|
11804
|
-
await this.stopSpeakingWhileMutedDetection();
|
|
11805
11992
|
if (isReactNative()) {
|
|
11806
11993
|
this.rnSpeechDetector = new RNSpeechDetector();
|
|
11807
11994
|
const unsubscribe = await this.rnSpeechDetector.start((event) => {
|
|
11808
11995
|
this.state.setSpeakingWhileMuted(event.isSoundDetected);
|
|
11809
11996
|
});
|
|
11810
|
-
this.soundDetectorCleanup = () => {
|
|
11997
|
+
this.soundDetectorCleanup = async () => {
|
|
11811
11998
|
unsubscribe();
|
|
11812
11999
|
this.rnSpeechDetector = undefined;
|
|
11813
12000
|
};
|
|
@@ -11815,6 +12002,7 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11815
12002
|
else {
|
|
11816
12003
|
// Need to start a new stream that's not connected to publisher
|
|
11817
12004
|
const stream = await this.getStream({
|
|
12005
|
+
...this.state.defaultConstraints,
|
|
11818
12006
|
deviceId: { exact: deviceId },
|
|
11819
12007
|
});
|
|
11820
12008
|
this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
|
|
@@ -11894,6 +12082,8 @@ class ScreenShareManager extends AudioDeviceManager {
|
|
|
11894
12082
|
super(call, new ScreenShareState(), TrackType.SCREEN_SHARE);
|
|
11895
12083
|
}
|
|
11896
12084
|
setup() {
|
|
12085
|
+
if (this.areSubscriptionsSetUp)
|
|
12086
|
+
return;
|
|
11897
12087
|
super.setup();
|
|
11898
12088
|
this.subscriptions.push(createSubscription(this.call.state.settings$, (settings) => {
|
|
11899
12089
|
const maybeTargetResolution = settings?.screensharing.target_resolution;
|
|
@@ -12240,6 +12430,9 @@ class Call {
|
|
|
12240
12430
|
this.leaveCallHooks.add(registerEventHandlers(this, this.dispatcher));
|
|
12241
12431
|
this.registerEffects();
|
|
12242
12432
|
this.registerReconnectHandlers();
|
|
12433
|
+
// Set up the device managers again. Although this is already done
|
|
12434
|
+
// in the DeviceManager's constructor, they'll need to be re-set up
|
|
12435
|
+
// in the cases where a call instance is recycled (join -> leave -> join).
|
|
12243
12436
|
this.camera.setup();
|
|
12244
12437
|
this.microphone.setup();
|
|
12245
12438
|
this.screenShare.setup();
|
|
@@ -12467,6 +12660,8 @@ class Call {
|
|
|
12467
12660
|
}
|
|
12468
12661
|
this.statsReporter?.stop();
|
|
12469
12662
|
this.statsReporter = undefined;
|
|
12663
|
+
const leaveReason = message ?? reason ?? 'user is leaving the call';
|
|
12664
|
+
this.tracer.trace('call.leaveReason', leaveReason);
|
|
12470
12665
|
this.sfuStatsReporter?.flush();
|
|
12471
12666
|
this.sfuStatsReporter?.stop();
|
|
12472
12667
|
this.sfuStatsReporter = undefined;
|
|
@@ -12474,7 +12669,7 @@ class Call {
|
|
|
12474
12669
|
this.subscriber = undefined;
|
|
12475
12670
|
this.publisher?.dispose();
|
|
12476
12671
|
this.publisher = undefined;
|
|
12477
|
-
await this.sfuClient?.leaveAndClose(
|
|
12672
|
+
await this.sfuClient?.leaveAndClose(leaveReason);
|
|
12478
12673
|
this.sfuClient = undefined;
|
|
12479
12674
|
this.dynascaleManager.setSfuClient(undefined);
|
|
12480
12675
|
await this.dynascaleManager.dispose();
|
|
@@ -12616,6 +12811,7 @@ class Call {
|
|
|
12616
12811
|
* Unless you are implementing a custom "ringing" flow, you should not use this method.
|
|
12617
12812
|
*/
|
|
12618
12813
|
this.accept = async () => {
|
|
12814
|
+
this.tracer.trace('call.accept', '');
|
|
12619
12815
|
return this.streamClient.post(`${this.streamClientBasePath}/accept`);
|
|
12620
12816
|
};
|
|
12621
12817
|
/**
|
|
@@ -12628,6 +12824,7 @@ class Call {
|
|
|
12628
12824
|
* @param reason the reason for rejecting the call.
|
|
12629
12825
|
*/
|
|
12630
12826
|
this.reject = async (reason = 'decline') => {
|
|
12827
|
+
this.tracer.trace('call.reject', reason);
|
|
12631
12828
|
return this.streamClient.post(`${this.streamClientBasePath}/reject`, { reason: reason });
|
|
12632
12829
|
};
|
|
12633
12830
|
/**
|
|
@@ -13391,11 +13588,6 @@ class Call {
|
|
|
13391
13588
|
trackTypes.push(screenShareAudio);
|
|
13392
13589
|
}
|
|
13393
13590
|
}
|
|
13394
|
-
if (track.kind === 'video') {
|
|
13395
|
-
// schedules calibration report - the SFU will use the performance stats
|
|
13396
|
-
// to adjust the quality thresholds as early as possible
|
|
13397
|
-
this.sfuStatsReporter?.scheduleOne(3000);
|
|
13398
|
-
}
|
|
13399
13591
|
await this.updateLocalStreamState(mediaStream, ...trackTypes);
|
|
13400
13592
|
};
|
|
13401
13593
|
/**
|
|
@@ -15338,7 +15530,7 @@ class StreamClient {
|
|
|
15338
15530
|
this.getUserAgent = () => {
|
|
15339
15531
|
if (!this.cachedUserAgent) {
|
|
15340
15532
|
const { clientAppIdentifier = {} } = this.options;
|
|
15341
|
-
const { sdkName = 'js', sdkVersion = "1.
|
|
15533
|
+
const { sdkName = 'js', sdkVersion = "1.42.0", ...extras } = clientAppIdentifier;
|
|
15342
15534
|
this.cachedUserAgent = [
|
|
15343
15535
|
`stream-video-${sdkName}-v${sdkVersion}`,
|
|
15344
15536
|
...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
|
|
@@ -16064,7 +16256,7 @@ exports.getVideoBrowserPermission = getVideoBrowserPermission;
|
|
|
16064
16256
|
exports.getVideoDevices = getVideoDevices;
|
|
16065
16257
|
exports.getVideoStream = getVideoStream;
|
|
16066
16258
|
exports.getWebRTCInfo = getWebRTCInfo;
|
|
16067
|
-
exports.hasAudio = hasAudio;
|
|
16259
|
+
exports.hasAudio = hasAudio$1;
|
|
16068
16260
|
exports.hasPausedTrack = hasPausedTrack;
|
|
16069
16261
|
exports.hasScreenShare = hasScreenShare;
|
|
16070
16262
|
exports.hasScreenShareAudio = hasScreenShareAudio;
|