@stream-io/video-client 1.41.2 → 1.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.browser.es.js +240 -48
- package/dist/index.browser.es.js.map +1 -1
- package/dist/index.cjs.js +240 -48
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.es.js +240 -48
- package/dist/index.es.js.map +1 -1
- package/dist/src/coordinator/connection/types.d.ts +19 -3
- package/dist/src/devices/DeviceManager.d.ts +2 -2
- package/dist/src/devices/MicrophoneManager.d.ts +23 -0
- package/dist/src/helpers/no-audio-detector.d.ts +43 -0
- package/dist/src/stats/SfuStatsReporter.d.ts +2 -1
- package/dist/src/stats/types.d.ts +10 -0
- package/package.json +1 -1
- package/src/Call.ts +8 -9
- package/src/coordinator/connection/types.ts +20 -2
- package/src/devices/DeviceManager.ts +2 -2
- package/src/devices/MicrophoneManager.ts +96 -4
- package/src/devices/ScreenShareManager.ts +1 -0
- package/src/devices/__tests__/MicrophoneManager.test.ts +102 -1
- package/src/devices/__tests__/MicrophoneManagerRN.test.ts +7 -1
- package/src/devices/__tests__/mocks.ts +1 -0
- package/src/devices/__tests__/web-audio.mocks.ts +72 -0
- package/src/devices/devices.ts +1 -1
- package/src/helpers/RNSpeechDetector.ts +12 -23
- package/src/helpers/__tests__/no-audio-detector.test.ts +271 -0
- package/src/helpers/no-audio-detector.ts +179 -0
- package/src/helpers/sound-detector.ts +3 -7
- package/src/stats/CallStateStatsReporter.ts +22 -0
- package/src/stats/SfuStatsReporter.ts +22 -14
- package/src/stats/types.ts +11 -0
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,24 @@
|
|
|
2
2
|
|
|
3
3
|
This file was generated using [@jscutlery/semver](https://github.com/jscutlery/semver).
|
|
4
4
|
|
|
5
|
+
## [1.42.0](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.3...@stream-io/video-client-1.42.0) (2026-02-06)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
- Detectors for broken microphone setup ([#2090](https://github.com/GetStream/stream-video-js/issues/2090)) ([552b3f4](https://github.com/GetStream/stream-video-js/commit/552b3f4e3c54e0b6fa67221cd510f4ea1f6f8a61))
|
|
10
|
+
|
|
11
|
+
### Bug Fixes
|
|
12
|
+
|
|
13
|
+
- **react:** apply defaultConstraints to speaking-while-muted detection stream ([#2103](https://github.com/GetStream/stream-video-js/issues/2103)) ([28b5538](https://github.com/GetStream/stream-video-js/commit/28b55380778723fc308d37396c8095a5a3ef7aa2))
|
|
14
|
+
- start speaking while muted detection in pristine state too ([#2110](https://github.com/GetStream/stream-video-js/issues/2110)) ([bc093bc](https://github.com/GetStream/stream-video-js/commit/bc093bc3ac2451541524b134a9044131a69964af))
|
|
15
|
+
|
|
16
|
+
## [1.41.3](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.2...@stream-io/video-client-1.41.3) (2026-01-30)
|
|
17
|
+
|
|
18
|
+
### Bug Fixes
|
|
19
|
+
|
|
20
|
+
- **react:** improve logic for calculating the lower / upper threshold for video filter degradation ([#2094](https://github.com/GetStream/stream-video-js/issues/2094)) ([5cd2d5c](https://github.com/GetStream/stream-video-js/commit/5cd2d5cb34fc7bbdfaf9529eb9f8d33a40346cab))
|
|
21
|
+
- **stats:** adjust send stats frequency and include "leave reason" ([#2104](https://github.com/GetStream/stream-video-js/issues/2104)) ([0182832](https://github.com/GetStream/stream-video-js/commit/018283299bebe5d5078d4006ec86b6cd56884e77))
|
|
22
|
+
|
|
5
23
|
## [1.41.2](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.1...@stream-io/video-client-1.41.2) (2026-01-28)
|
|
6
24
|
|
|
7
25
|
- deduplicate RN compatibility assertions ([#2101](https://github.com/GetStream/stream-video-js/issues/2101)) ([5b9e6bc](https://github.com/GetStream/stream-video-js/commit/5b9e6bc227c55b067eea6345315bca015c8a7ee4))
|
package/dist/index.browser.es.js
CHANGED
|
@@ -4789,7 +4789,7 @@ const hasVideo = (p) => p.publishedTracks.includes(TrackType.VIDEO);
|
|
|
4789
4789
|
*
|
|
4790
4790
|
* @param p the participant to check.
|
|
4791
4791
|
*/
|
|
4792
|
-
const hasAudio = (p) => p.publishedTracks.includes(TrackType.AUDIO);
|
|
4792
|
+
const hasAudio$1 = (p) => p.publishedTracks.includes(TrackType.AUDIO);
|
|
4793
4793
|
/**
|
|
4794
4794
|
* Check if a participant is screen sharing.
|
|
4795
4795
|
*
|
|
@@ -4890,8 +4890,8 @@ const publishingVideo = (a, b) => {
|
|
|
4890
4890
|
* @param b the second participant.
|
|
4891
4891
|
*/
|
|
4892
4892
|
const publishingAudio = (a, b) => {
|
|
4893
|
-
const hasA = hasAudio(a);
|
|
4894
|
-
const hasB = hasAudio(b);
|
|
4893
|
+
const hasA = hasAudio$1(a);
|
|
4894
|
+
const hasB = hasAudio$1(b);
|
|
4895
4895
|
if (hasA && !hasB)
|
|
4896
4896
|
return -1;
|
|
4897
4897
|
if (!hasA && hasB)
|
|
@@ -6188,7 +6188,7 @@ const getSdkVersion = (sdk) => {
|
|
|
6188
6188
|
return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
|
|
6189
6189
|
};
|
|
6190
6190
|
|
|
6191
|
-
const version = "1.
|
|
6191
|
+
const version = "1.42.0";
|
|
6192
6192
|
const [major, minor, patch] = version.split('.');
|
|
6193
6193
|
let sdkInfo = {
|
|
6194
6194
|
type: SdkType.PLAIN_JAVASCRIPT,
|
|
@@ -6511,6 +6511,16 @@ const createStatsReporter = ({ subscriber, publisher, state, datacenter, polling
|
|
|
6511
6511
|
stop,
|
|
6512
6512
|
};
|
|
6513
6513
|
};
|
|
6514
|
+
/**
|
|
6515
|
+
* Extracts camera statistics from a media source.
|
|
6516
|
+
*
|
|
6517
|
+
* @param mediaSource the media source stats to extract camera info from.
|
|
6518
|
+
*/
|
|
6519
|
+
const getCameraStats = (mediaSource) => ({
|
|
6520
|
+
frameRate: mediaSource.framesPerSecond,
|
|
6521
|
+
frameWidth: mediaSource.width,
|
|
6522
|
+
frameHeight: mediaSource.height,
|
|
6523
|
+
});
|
|
6514
6524
|
/**
|
|
6515
6525
|
* Transforms raw RTC stats into a slimmer and uniform across browsers format.
|
|
6516
6526
|
*
|
|
@@ -6536,6 +6546,7 @@ const transform = (report, opts) => {
|
|
|
6536
6546
|
}
|
|
6537
6547
|
let trackType;
|
|
6538
6548
|
let audioLevel;
|
|
6549
|
+
let camera;
|
|
6539
6550
|
let concealedSamples;
|
|
6540
6551
|
let concealmentEvents;
|
|
6541
6552
|
let packetsReceived;
|
|
@@ -6551,6 +6562,9 @@ const transform = (report, opts) => {
|
|
|
6551
6562
|
typeof mediaSource.audioLevel === 'number') {
|
|
6552
6563
|
audioLevel = mediaSource.audioLevel;
|
|
6553
6564
|
}
|
|
6565
|
+
if (trackKind === 'video') {
|
|
6566
|
+
camera = getCameraStats(mediaSource);
|
|
6567
|
+
}
|
|
6554
6568
|
}
|
|
6555
6569
|
}
|
|
6556
6570
|
else if (kind === 'subscriber' && trackKind === 'audio') {
|
|
@@ -6584,6 +6598,7 @@ const transform = (report, opts) => {
|
|
|
6584
6598
|
concealmentEvents,
|
|
6585
6599
|
packetsReceived,
|
|
6586
6600
|
packetsLost,
|
|
6601
|
+
camera,
|
|
6587
6602
|
};
|
|
6588
6603
|
});
|
|
6589
6604
|
return {
|
|
@@ -6603,6 +6618,7 @@ const getEmptyVideoStats = (stats) => {
|
|
|
6603
6618
|
highestFrameWidth: 0,
|
|
6604
6619
|
highestFrameHeight: 0,
|
|
6605
6620
|
highestFramesPerSecond: 0,
|
|
6621
|
+
camera: {},
|
|
6606
6622
|
codec: '',
|
|
6607
6623
|
codecPerTrackType: {},
|
|
6608
6624
|
timestamp: Date.now(),
|
|
@@ -6647,6 +6663,9 @@ const aggregate = (stats) => {
|
|
|
6647
6663
|
acc.highestFramesPerSecond = stream.framesPerSecond || 0;
|
|
6648
6664
|
maxArea = streamArea;
|
|
6649
6665
|
}
|
|
6666
|
+
if (stream.trackType === TrackType.VIDEO) {
|
|
6667
|
+
acc.camera = stream.camera;
|
|
6668
|
+
}
|
|
6650
6669
|
qualityLimitationReasons.add(stream.qualityLimitationReason || '');
|
|
6651
6670
|
return acc;
|
|
6652
6671
|
}, aggregatedStats);
|
|
@@ -6711,6 +6730,7 @@ const aggregateAudio = (stats) => {
|
|
|
6711
6730
|
class SfuStatsReporter {
|
|
6712
6731
|
constructor(sfuClient, { options, clientDetails, subscriber, publisher, microphone, camera, state, tracer, unifiedSessionId, }) {
|
|
6713
6732
|
this.logger = videoLoggerSystem.getLogger('SfuStatsReporter');
|
|
6733
|
+
this.reportCount = 0;
|
|
6714
6734
|
this.inputDevices = new Map();
|
|
6715
6735
|
this.observeDevice = (device, kind) => {
|
|
6716
6736
|
const { browserPermissionState$ } = device.state;
|
|
@@ -6812,17 +6832,31 @@ class SfuStatsReporter {
|
|
|
6812
6832
|
throw err;
|
|
6813
6833
|
}
|
|
6814
6834
|
};
|
|
6835
|
+
this.scheduleNextReport = () => {
|
|
6836
|
+
const intervals = [1500, 3000, 3000, 5000];
|
|
6837
|
+
if (this.reportCount < intervals.length) {
|
|
6838
|
+
this.timeoutId = setTimeout(() => {
|
|
6839
|
+
this.flush();
|
|
6840
|
+
this.reportCount++;
|
|
6841
|
+
this.scheduleNextReport();
|
|
6842
|
+
}, intervals[this.reportCount]);
|
|
6843
|
+
}
|
|
6844
|
+
else {
|
|
6845
|
+
clearInterval(this.intervalId);
|
|
6846
|
+
this.intervalId = setInterval(() => {
|
|
6847
|
+
this.flush();
|
|
6848
|
+
}, this.options.reporting_interval_ms);
|
|
6849
|
+
}
|
|
6850
|
+
};
|
|
6815
6851
|
this.start = () => {
|
|
6816
6852
|
if (this.options.reporting_interval_ms <= 0)
|
|
6817
6853
|
return;
|
|
6818
6854
|
this.observeDevice(this.microphone, 'mic');
|
|
6819
6855
|
this.observeDevice(this.camera, 'camera');
|
|
6856
|
+
this.reportCount = 0;
|
|
6820
6857
|
clearInterval(this.intervalId);
|
|
6821
|
-
this.
|
|
6822
|
-
|
|
6823
|
-
this.logger.warn('Failed to report stats', err);
|
|
6824
|
-
});
|
|
6825
|
-
}, this.options.reporting_interval_ms);
|
|
6858
|
+
clearTimeout(this.timeoutId);
|
|
6859
|
+
this.scheduleNextReport();
|
|
6826
6860
|
};
|
|
6827
6861
|
this.stop = () => {
|
|
6828
6862
|
this.unsubscribeDevicePermissionsSubscription?.();
|
|
@@ -6834,20 +6868,13 @@ class SfuStatsReporter {
|
|
|
6834
6868
|
this.intervalId = undefined;
|
|
6835
6869
|
clearTimeout(this.timeoutId);
|
|
6836
6870
|
this.timeoutId = undefined;
|
|
6871
|
+
this.reportCount = 0;
|
|
6837
6872
|
};
|
|
6838
6873
|
this.flush = () => {
|
|
6839
6874
|
this.run().catch((err) => {
|
|
6840
6875
|
this.logger.warn('Failed to flush report stats', err);
|
|
6841
6876
|
});
|
|
6842
6877
|
};
|
|
6843
|
-
this.scheduleOne = (timeout) => {
|
|
6844
|
-
clearTimeout(this.timeoutId);
|
|
6845
|
-
this.timeoutId = setTimeout(() => {
|
|
6846
|
-
this.run().catch((err) => {
|
|
6847
|
-
this.logger.warn('Failed to report stats', err);
|
|
6848
|
-
});
|
|
6849
|
-
}, timeout);
|
|
6850
|
-
};
|
|
6851
6878
|
this.sfuClient = sfuClient;
|
|
6852
6879
|
this.options = options;
|
|
6853
6880
|
this.subscriber = subscriber;
|
|
@@ -10352,7 +10379,7 @@ const getAudioStream = async (trackConstraints, tracer) => {
|
|
|
10352
10379
|
videoLoggerSystem
|
|
10353
10380
|
.getLogger('devices')
|
|
10354
10381
|
.warn('Failed to get audio stream, will try again with relaxed constraints', { error, constraints, relaxedConstraints });
|
|
10355
|
-
return getAudioStream(relaxedConstraints);
|
|
10382
|
+
return getAudioStream(relaxedConstraints, tracer);
|
|
10356
10383
|
}
|
|
10357
10384
|
videoLoggerSystem.getLogger('devices').error('Failed to get audio stream', {
|
|
10358
10385
|
error,
|
|
@@ -11349,9 +11376,6 @@ class MicrophoneManagerState extends AudioDeviceManagerState {
|
|
|
11349
11376
|
}
|
|
11350
11377
|
}
|
|
11351
11378
|
|
|
11352
|
-
const DETECTION_FREQUENCY_IN_MS = 500;
|
|
11353
|
-
const AUDIO_LEVEL_THRESHOLD = 150;
|
|
11354
|
-
const FFT_SIZE = 128;
|
|
11355
11379
|
/**
|
|
11356
11380
|
* Creates a new sound detector.
|
|
11357
11381
|
*
|
|
@@ -11361,7 +11385,7 @@ const FFT_SIZE = 128;
|
|
|
11361
11385
|
* @returns a clean-up function which once invoked stops the sound detector.
|
|
11362
11386
|
*/
|
|
11363
11387
|
const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options = {}) => {
|
|
11364
|
-
const { detectionFrequencyInMs =
|
|
11388
|
+
const { detectionFrequencyInMs = 500, audioLevelThreshold = 150, fftSize = 128, destroyStreamOnStop = true, } = options;
|
|
11365
11389
|
const audioContext = new AudioContext();
|
|
11366
11390
|
const analyser = audioContext.createAnalyser();
|
|
11367
11391
|
analyser.fftSize = fftSize;
|
|
@@ -11402,6 +11426,101 @@ const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options =
|
|
|
11402
11426
|
};
|
|
11403
11427
|
};
|
|
11404
11428
|
|
|
11429
|
+
/**
|
|
11430
|
+
* Analyzes frequency data to determine if audio is being captured.
|
|
11431
|
+
*/
|
|
11432
|
+
const hasAudio = (analyser, threshold) => {
|
|
11433
|
+
const data = new Uint8Array(analyser.frequencyBinCount);
|
|
11434
|
+
analyser.getByteFrequencyData(data);
|
|
11435
|
+
return data.some((value) => value > threshold);
|
|
11436
|
+
};
|
|
11437
|
+
/** Helper for "no event" transitions */
|
|
11438
|
+
const noEmit = (nextState) => ({
|
|
11439
|
+
shouldEmit: false,
|
|
11440
|
+
nextState,
|
|
11441
|
+
});
|
|
11442
|
+
/** Helper for event-emitting transitions */
|
|
11443
|
+
const emit = (capturesAudio, nextState) => ({ shouldEmit: true, nextState, capturesAudio });
|
|
11444
|
+
/**
|
|
11445
|
+
* State transition function - computes next state and whether to emit an event.
|
|
11446
|
+
*/
|
|
11447
|
+
const transitionState = (state, audioDetected, options) => {
|
|
11448
|
+
if (audioDetected) {
|
|
11449
|
+
return state.kind === 'IDLE' || state.kind === 'EMITTING'
|
|
11450
|
+
? emit(true, state)
|
|
11451
|
+
: noEmit(state);
|
|
11452
|
+
}
|
|
11453
|
+
const { noAudioThresholdMs, emitIntervalMs } = options;
|
|
11454
|
+
const now = Date.now();
|
|
11455
|
+
switch (state.kind) {
|
|
11456
|
+
case 'IDLE':
|
|
11457
|
+
return noEmit({ kind: 'DETECTING', noAudioStartTime: now });
|
|
11458
|
+
case 'DETECTING': {
|
|
11459
|
+
const { noAudioStartTime } = state;
|
|
11460
|
+
const elapsed = now - noAudioStartTime;
|
|
11461
|
+
return elapsed >= noAudioThresholdMs
|
|
11462
|
+
? emit(false, { kind: 'EMITTING', noAudioStartTime, lastEmitTime: now })
|
|
11463
|
+
: noEmit(state);
|
|
11464
|
+
}
|
|
11465
|
+
case 'EMITTING': {
|
|
11466
|
+
const timeSinceLastEmit = now - state.lastEmitTime;
|
|
11467
|
+
return timeSinceLastEmit >= emitIntervalMs
|
|
11468
|
+
? emit(false, { ...state, lastEmitTime: now })
|
|
11469
|
+
: noEmit(state);
|
|
11470
|
+
}
|
|
11471
|
+
}
|
|
11472
|
+
};
|
|
11473
|
+
/**
|
|
11474
|
+
* Creates and configures an audio analyzer for the given stream.
|
|
11475
|
+
*/
|
|
11476
|
+
const createAudioAnalyzer = (audioStream, fftSize) => {
|
|
11477
|
+
const audioContext = new AudioContext();
|
|
11478
|
+
const analyser = audioContext.createAnalyser();
|
|
11479
|
+
analyser.fftSize = fftSize;
|
|
11480
|
+
const microphone = audioContext.createMediaStreamSource(audioStream);
|
|
11481
|
+
microphone.connect(analyser);
|
|
11482
|
+
return { audioContext, analyser };
|
|
11483
|
+
};
|
|
11484
|
+
/**
|
|
11485
|
+
* Creates a new no-audio detector that monitors continuous absence of audio on an audio stream.
|
|
11486
|
+
*
|
|
11487
|
+
* @param audioStream the audio stream to observe.
|
|
11488
|
+
* @param options custom options for the no-audio detector.
|
|
11489
|
+
* @returns a cleanup function which once invoked stops the no-audio detector.
|
|
11490
|
+
*/
|
|
11491
|
+
const createNoAudioDetector = (audioStream, options) => {
|
|
11492
|
+
const { detectionFrequencyInMs = 350, audioLevelThreshold = 0, fftSize = 256, onCaptureStatusChange, } = options;
|
|
11493
|
+
let state = { kind: 'IDLE' };
|
|
11494
|
+
const { audioContext, analyser } = createAudioAnalyzer(audioStream, fftSize);
|
|
11495
|
+
const detectionIntervalId = setInterval(() => {
|
|
11496
|
+
const [audioTrack] = audioStream.getAudioTracks();
|
|
11497
|
+
if (!audioTrack?.enabled || audioTrack.readyState === 'ended') {
|
|
11498
|
+
state = { kind: 'IDLE' };
|
|
11499
|
+
return;
|
|
11500
|
+
}
|
|
11501
|
+
const audioDetected = hasAudio(analyser, audioLevelThreshold);
|
|
11502
|
+
const transition = transitionState(state, audioDetected, options);
|
|
11503
|
+
state = transition.nextState;
|
|
11504
|
+
if (!transition.shouldEmit)
|
|
11505
|
+
return;
|
|
11506
|
+
const { capturesAudio } = transition;
|
|
11507
|
+
onCaptureStatusChange(capturesAudio);
|
|
11508
|
+
if (capturesAudio) {
|
|
11509
|
+
stop().catch((err) => {
|
|
11510
|
+
const logger = videoLoggerSystem.getLogger('NoAudioDetector');
|
|
11511
|
+
logger.error('Error stopping no-audio detector', err);
|
|
11512
|
+
});
|
|
11513
|
+
}
|
|
11514
|
+
}, detectionFrequencyInMs);
|
|
11515
|
+
async function stop() {
|
|
11516
|
+
clearInterval(detectionIntervalId);
|
|
11517
|
+
if (audioContext.state !== 'closed') {
|
|
11518
|
+
await audioContext.close();
|
|
11519
|
+
}
|
|
11520
|
+
}
|
|
11521
|
+
return stop;
|
|
11522
|
+
};
|
|
11523
|
+
|
|
11405
11524
|
class RNSpeechDetector {
|
|
11406
11525
|
constructor(externalAudioStream) {
|
|
11407
11526
|
this.pc1 = new RTCPeerConnection({});
|
|
@@ -11413,16 +11532,10 @@ class RNSpeechDetector {
|
|
|
11413
11532
|
*/
|
|
11414
11533
|
async start(onSoundDetectedStateChanged) {
|
|
11415
11534
|
try {
|
|
11416
|
-
|
|
11417
|
-
|
|
11418
|
-
|
|
11419
|
-
|
|
11420
|
-
else {
|
|
11421
|
-
audioStream = await navigator.mediaDevices.getUserMedia({
|
|
11422
|
-
audio: true,
|
|
11423
|
-
});
|
|
11424
|
-
this.audioStream = audioStream;
|
|
11425
|
-
}
|
|
11535
|
+
const audioStream = this.externalAudioStream != null
|
|
11536
|
+
? this.externalAudioStream
|
|
11537
|
+
: await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
11538
|
+
this.audioStream = audioStream;
|
|
11426
11539
|
this.pc1.addEventListener('icecandidate', async (e) => {
|
|
11427
11540
|
await this.pc2.addIceCandidate(e.candidate);
|
|
11428
11541
|
});
|
|
@@ -11446,9 +11559,9 @@ class RNSpeechDetector {
|
|
|
11446
11559
|
const answer = await this.pc2.createAnswer();
|
|
11447
11560
|
await this.pc1.setRemoteDescription(answer);
|
|
11448
11561
|
await this.pc2.setLocalDescription(answer);
|
|
11449
|
-
const
|
|
11562
|
+
const unsubscribe = this.onSpeakingDetectedStateChange(onSoundDetectedStateChanged);
|
|
11450
11563
|
return () => {
|
|
11451
|
-
|
|
11564
|
+
unsubscribe();
|
|
11452
11565
|
this.stop();
|
|
11453
11566
|
};
|
|
11454
11567
|
}
|
|
@@ -11488,7 +11601,7 @@ class RNSpeechDetector {
|
|
|
11488
11601
|
const silenceTimeout = 5000; // Reset baseline after 5 seconds of silence
|
|
11489
11602
|
const checkAudioLevel = async () => {
|
|
11490
11603
|
try {
|
|
11491
|
-
const stats =
|
|
11604
|
+
const stats = await this.pc1.getStats();
|
|
11492
11605
|
const report = flatten(stats);
|
|
11493
11606
|
// Audio levels are present inside stats of type `media-source` and of kind `audio`
|
|
11494
11607
|
const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
|
|
@@ -11542,8 +11655,7 @@ class RNSpeechDetector {
|
|
|
11542
11655
|
logger.error('error checking audio level from stats', error);
|
|
11543
11656
|
}
|
|
11544
11657
|
};
|
|
11545
|
-
|
|
11546
|
-
const intervalId = setInterval(checkAudioLevel, 100);
|
|
11658
|
+
const intervalId = setInterval(checkAudioLevel, 250);
|
|
11547
11659
|
return () => {
|
|
11548
11660
|
clearInterval(intervalId);
|
|
11549
11661
|
clearTimeout(speechTimer);
|
|
@@ -11569,8 +11681,11 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11569
11681
|
super(call, new MicrophoneManagerState(disableMode), TrackType.AUDIO);
|
|
11570
11682
|
this.speakingWhileMutedNotificationEnabled = true;
|
|
11571
11683
|
this.soundDetectorConcurrencyTag = Symbol('soundDetectorConcurrencyTag');
|
|
11684
|
+
this.silenceThresholdMs = 5000;
|
|
11572
11685
|
}
|
|
11573
11686
|
setup() {
|
|
11687
|
+
if (this.areSubscriptionsSetUp)
|
|
11688
|
+
return;
|
|
11574
11689
|
super.setup();
|
|
11575
11690
|
this.subscriptions.push(createSafeAsyncSubscription(combineLatest([
|
|
11576
11691
|
this.call.state.callingState$,
|
|
@@ -11587,7 +11702,7 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11587
11702
|
if (!this.speakingWhileMutedNotificationEnabled)
|
|
11588
11703
|
return;
|
|
11589
11704
|
if (ownCapabilities.includes(OwnCapability.SEND_AUDIO)) {
|
|
11590
|
-
if (status
|
|
11705
|
+
if (status !== 'enabled') {
|
|
11591
11706
|
await this.startSpeakingWhileMutedDetection(deviceId);
|
|
11592
11707
|
}
|
|
11593
11708
|
else {
|
|
@@ -11636,6 +11751,40 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11636
11751
|
});
|
|
11637
11752
|
}
|
|
11638
11753
|
}));
|
|
11754
|
+
if (!isReactNative()) {
|
|
11755
|
+
const unsubscribe = createSafeAsyncSubscription(combineLatest([this.state.status$, this.state.mediaStream$]), async ([status, mediaStream]) => {
|
|
11756
|
+
if (this.noAudioDetectorCleanup) {
|
|
11757
|
+
const cleanup = this.noAudioDetectorCleanup;
|
|
11758
|
+
this.noAudioDetectorCleanup = undefined;
|
|
11759
|
+
await cleanup().catch((err) => {
|
|
11760
|
+
this.logger.warn('Failed to stop no-audio detector', err);
|
|
11761
|
+
});
|
|
11762
|
+
}
|
|
11763
|
+
if (status !== 'enabled' || !mediaStream)
|
|
11764
|
+
return;
|
|
11765
|
+
if (this.silenceThresholdMs <= 0)
|
|
11766
|
+
return;
|
|
11767
|
+
const deviceId = this.state.selectedDevice;
|
|
11768
|
+
const devices = getCurrentValue(this.listDevices());
|
|
11769
|
+
const label = devices.find((d) => d.deviceId === deviceId)?.label;
|
|
11770
|
+
this.noAudioDetectorCleanup = createNoAudioDetector(mediaStream, {
|
|
11771
|
+
noAudioThresholdMs: this.silenceThresholdMs,
|
|
11772
|
+
emitIntervalMs: this.silenceThresholdMs,
|
|
11773
|
+
onCaptureStatusChange: (capturesAudio) => {
|
|
11774
|
+
const event = {
|
|
11775
|
+
type: 'mic.capture_report',
|
|
11776
|
+
call_cid: this.call.cid,
|
|
11777
|
+
capturesAudio,
|
|
11778
|
+
deviceId,
|
|
11779
|
+
label,
|
|
11780
|
+
};
|
|
11781
|
+
this.call.tracer.trace('mic.capture_report', event);
|
|
11782
|
+
this.call.streamClient.dispatchEvent(event);
|
|
11783
|
+
},
|
|
11784
|
+
});
|
|
11785
|
+
});
|
|
11786
|
+
this.subscriptions.push(unsubscribe);
|
|
11787
|
+
}
|
|
11639
11788
|
}
|
|
11640
11789
|
/**
|
|
11641
11790
|
* Enables noise cancellation for the microphone.
|
|
@@ -11733,6 +11882,45 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11733
11882
|
this.speakingWhileMutedNotificationEnabled = false;
|
|
11734
11883
|
await this.stopSpeakingWhileMutedDetection();
|
|
11735
11884
|
}
|
|
11885
|
+
/**
|
|
11886
|
+
* Sets the silence threshold in milliseconds for no-audio detection.
|
|
11887
|
+
* When the microphone is enabled but produces no audio for this duration,
|
|
11888
|
+
* a 'mic.capture_report' event will be emitted.
|
|
11889
|
+
*
|
|
11890
|
+
* @param thresholdMs the threshold in milliseconds (default: 5000).
|
|
11891
|
+
* Set to 0 or a negative value to disable no-audio detection.
|
|
11892
|
+
*/
|
|
11893
|
+
setSilenceThreshold(thresholdMs) {
|
|
11894
|
+
this.silenceThresholdMs = thresholdMs;
|
|
11895
|
+
}
|
|
11896
|
+
/**
|
|
11897
|
+
* Performs audio capture test on a specific microphone.
|
|
11898
|
+
*
|
|
11899
|
+
* This method is only available in browser environments (not React Native).
|
|
11900
|
+
*
|
|
11901
|
+
* @param deviceId The device ID to test.
|
|
11902
|
+
* @param options Optional test configuration.
|
|
11903
|
+
* @returns Promise that resolves with the test result (true or false).
|
|
11904
|
+
*/
|
|
11905
|
+
async performTest(deviceId, options) {
|
|
11906
|
+
if (isReactNative())
|
|
11907
|
+
throw new Error('Not available in React Native');
|
|
11908
|
+
const stream = await this.getStream({ deviceId: { exact: deviceId } });
|
|
11909
|
+
const { testDurationMs = 3000 } = options || {};
|
|
11910
|
+
const { promise, resolve } = promiseWithResolvers();
|
|
11911
|
+
const cleanup = createNoAudioDetector(stream, {
|
|
11912
|
+
noAudioThresholdMs: testDurationMs,
|
|
11913
|
+
emitIntervalMs: testDurationMs,
|
|
11914
|
+
onCaptureStatusChange: async (capturesAudio) => {
|
|
11915
|
+
resolve(capturesAudio);
|
|
11916
|
+
await cleanup().catch((err) => {
|
|
11917
|
+
this.logger.warn('Failed to stop detector during test', err);
|
|
11918
|
+
});
|
|
11919
|
+
disposeOfMediaStream(stream);
|
|
11920
|
+
},
|
|
11921
|
+
});
|
|
11922
|
+
return promise;
|
|
11923
|
+
}
|
|
11736
11924
|
/**
|
|
11737
11925
|
* Applies the audio settings to the microphone.
|
|
11738
11926
|
* @param settings the audio settings to apply.
|
|
@@ -11781,13 +11969,12 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11781
11969
|
}
|
|
11782
11970
|
async startSpeakingWhileMutedDetection(deviceId) {
|
|
11783
11971
|
await withoutConcurrency(this.soundDetectorConcurrencyTag, async () => {
|
|
11784
|
-
await this.stopSpeakingWhileMutedDetection();
|
|
11785
11972
|
if (isReactNative()) {
|
|
11786
11973
|
this.rnSpeechDetector = new RNSpeechDetector();
|
|
11787
11974
|
const unsubscribe = await this.rnSpeechDetector.start((event) => {
|
|
11788
11975
|
this.state.setSpeakingWhileMuted(event.isSoundDetected);
|
|
11789
11976
|
});
|
|
11790
|
-
this.soundDetectorCleanup = () => {
|
|
11977
|
+
this.soundDetectorCleanup = async () => {
|
|
11791
11978
|
unsubscribe();
|
|
11792
11979
|
this.rnSpeechDetector = undefined;
|
|
11793
11980
|
};
|
|
@@ -11795,6 +11982,7 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
11795
11982
|
else {
|
|
11796
11983
|
// Need to start a new stream that's not connected to publisher
|
|
11797
11984
|
const stream = await this.getStream({
|
|
11985
|
+
...this.state.defaultConstraints,
|
|
11798
11986
|
deviceId: { exact: deviceId },
|
|
11799
11987
|
});
|
|
11800
11988
|
this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
|
|
@@ -11874,6 +12062,8 @@ class ScreenShareManager extends AudioDeviceManager {
|
|
|
11874
12062
|
super(call, new ScreenShareState(), TrackType.SCREEN_SHARE);
|
|
11875
12063
|
}
|
|
11876
12064
|
setup() {
|
|
12065
|
+
if (this.areSubscriptionsSetUp)
|
|
12066
|
+
return;
|
|
11877
12067
|
super.setup();
|
|
11878
12068
|
this.subscriptions.push(createSubscription(this.call.state.settings$, (settings) => {
|
|
11879
12069
|
const maybeTargetResolution = settings?.screensharing.target_resolution;
|
|
@@ -12220,6 +12410,9 @@ class Call {
|
|
|
12220
12410
|
this.leaveCallHooks.add(registerEventHandlers(this, this.dispatcher));
|
|
12221
12411
|
this.registerEffects();
|
|
12222
12412
|
this.registerReconnectHandlers();
|
|
12413
|
+
// Set up the device managers again. Although this is already done
|
|
12414
|
+
// in the DeviceManager's constructor, they'll need to be re-set up
|
|
12415
|
+
// in the cases where a call instance is recycled (join -> leave -> join).
|
|
12223
12416
|
this.camera.setup();
|
|
12224
12417
|
this.microphone.setup();
|
|
12225
12418
|
this.screenShare.setup();
|
|
@@ -12447,6 +12640,8 @@ class Call {
|
|
|
12447
12640
|
}
|
|
12448
12641
|
this.statsReporter?.stop();
|
|
12449
12642
|
this.statsReporter = undefined;
|
|
12643
|
+
const leaveReason = message ?? reason ?? 'user is leaving the call';
|
|
12644
|
+
this.tracer.trace('call.leaveReason', leaveReason);
|
|
12450
12645
|
this.sfuStatsReporter?.flush();
|
|
12451
12646
|
this.sfuStatsReporter?.stop();
|
|
12452
12647
|
this.sfuStatsReporter = undefined;
|
|
@@ -12454,7 +12649,7 @@ class Call {
|
|
|
12454
12649
|
this.subscriber = undefined;
|
|
12455
12650
|
this.publisher?.dispose();
|
|
12456
12651
|
this.publisher = undefined;
|
|
12457
|
-
await this.sfuClient?.leaveAndClose(
|
|
12652
|
+
await this.sfuClient?.leaveAndClose(leaveReason);
|
|
12458
12653
|
this.sfuClient = undefined;
|
|
12459
12654
|
this.dynascaleManager.setSfuClient(undefined);
|
|
12460
12655
|
await this.dynascaleManager.dispose();
|
|
@@ -12596,6 +12791,7 @@ class Call {
|
|
|
12596
12791
|
* Unless you are implementing a custom "ringing" flow, you should not use this method.
|
|
12597
12792
|
*/
|
|
12598
12793
|
this.accept = async () => {
|
|
12794
|
+
this.tracer.trace('call.accept', '');
|
|
12599
12795
|
return this.streamClient.post(`${this.streamClientBasePath}/accept`);
|
|
12600
12796
|
};
|
|
12601
12797
|
/**
|
|
@@ -12608,6 +12804,7 @@ class Call {
|
|
|
12608
12804
|
* @param reason the reason for rejecting the call.
|
|
12609
12805
|
*/
|
|
12610
12806
|
this.reject = async (reason = 'decline') => {
|
|
12807
|
+
this.tracer.trace('call.reject', reason);
|
|
12611
12808
|
return this.streamClient.post(`${this.streamClientBasePath}/reject`, { reason: reason });
|
|
12612
12809
|
};
|
|
12613
12810
|
/**
|
|
@@ -13371,11 +13568,6 @@ class Call {
|
|
|
13371
13568
|
trackTypes.push(screenShareAudio);
|
|
13372
13569
|
}
|
|
13373
13570
|
}
|
|
13374
|
-
if (track.kind === 'video') {
|
|
13375
|
-
// schedules calibration report - the SFU will use the performance stats
|
|
13376
|
-
// to adjust the quality thresholds as early as possible
|
|
13377
|
-
this.sfuStatsReporter?.scheduleOne(3000);
|
|
13378
|
-
}
|
|
13379
13571
|
await this.updateLocalStreamState(mediaStream, ...trackTypes);
|
|
13380
13572
|
};
|
|
13381
13573
|
/**
|
|
@@ -15320,7 +15512,7 @@ class StreamClient {
|
|
|
15320
15512
|
this.getUserAgent = () => {
|
|
15321
15513
|
if (!this.cachedUserAgent) {
|
|
15322
15514
|
const { clientAppIdentifier = {} } = this.options;
|
|
15323
|
-
const { sdkName = 'js', sdkVersion = "1.
|
|
15515
|
+
const { sdkName = 'js', sdkVersion = "1.42.0", ...extras } = clientAppIdentifier;
|
|
15324
15516
|
this.cachedUserAgent = [
|
|
15325
15517
|
`stream-video-${sdkName}-v${sdkVersion}`,
|
|
15326
15518
|
...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
|
|
@@ -15956,5 +16148,5 @@ const humanize = (n) => {
|
|
|
15956
16148
|
return String(n);
|
|
15957
16149
|
};
|
|
15958
16150
|
|
|
15959
|
-
export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
|
|
16151
|
+
export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio$1 as hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
|
|
15960
16152
|
//# sourceMappingURL=index.browser.es.js.map
|