@stream-io/video-client 1.46.1 → 1.48.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.browser.es.js +31 -197
- package/dist/index.browser.es.js.map +1 -1
- package/dist/index.cjs.js +30 -197
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.d.ts +0 -1
- package/dist/index.es.js +31 -197
- package/dist/index.es.js.map +1 -1
- package/dist/src/devices/MicrophoneManager.d.ts +0 -1
- package/dist/src/gen/coordinator/index.d.ts +6 -0
- package/dist/src/types.d.ts +11 -0
- package/index.ts +0 -1
- package/package.json +1 -1
- package/src/devices/CameraManager.ts +9 -2
- package/src/devices/DeviceManager.ts +13 -3
- package/src/devices/MicrophoneManager.ts +17 -6
- package/src/devices/SpeakerManager.ts +16 -4
- package/src/devices/__tests__/CameraManager.test.ts +32 -0
- package/src/devices/__tests__/DeviceManager.test.ts +71 -0
- package/src/devices/__tests__/MicrophoneManager.test.ts +23 -0
- package/src/devices/__tests__/MicrophoneManagerRN.test.ts +28 -29
- package/src/devices/__tests__/SpeakerManager.test.ts +28 -0
- package/src/gen/coordinator/index.ts +6 -0
- package/src/types.ts +9 -0
- package/dist/src/helpers/RNSpeechDetector.d.ts +0 -23
- package/src/helpers/RNSpeechDetector.ts +0 -224
- package/src/helpers/__tests__/RNSpeechDetector.test.ts +0 -52
package/dist/index.d.ts
CHANGED
|
@@ -19,6 +19,5 @@ export * from './src/helpers/DynascaleManager';
|
|
|
19
19
|
export * from './src/helpers/ViewportTracker';
|
|
20
20
|
export * from './src/helpers/sound-detector';
|
|
21
21
|
export * from './src/helpers/participantUtils';
|
|
22
|
-
export * from './src/helpers/RNSpeechDetector';
|
|
23
22
|
export * as Browsers from './src/helpers/browsers';
|
|
24
23
|
export * from './src/logger';
|
package/dist/index.es.js
CHANGED
|
@@ -6285,7 +6285,7 @@ const getSdkVersion = (sdk) => {
|
|
|
6285
6285
|
return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
|
|
6286
6286
|
};
|
|
6287
6287
|
|
|
6288
|
-
const version = "1.
|
|
6288
|
+
const version = "1.48.0";
|
|
6289
6289
|
const [major, minor, patch] = version.split('.');
|
|
6290
6290
|
let sdkInfo = {
|
|
6291
6291
|
type: SdkType.PLAIN_JAVASCRIPT,
|
|
@@ -10866,8 +10866,14 @@ class DeviceManager {
|
|
|
10866
10866
|
this.handleDisconnectedOrReplacedDevices();
|
|
10867
10867
|
}
|
|
10868
10868
|
if (this.devicePersistence.enabled) {
|
|
10869
|
-
this.subscriptions.push(createSubscription(combineLatest([
|
|
10870
|
-
|
|
10869
|
+
this.subscriptions.push(createSubscription(combineLatest([
|
|
10870
|
+
this.state.selectedDevice$,
|
|
10871
|
+
this.state.status$,
|
|
10872
|
+
this.state.browserPermissionState$,
|
|
10873
|
+
]), ([selectedDevice, status, browserPermissionState]) => {
|
|
10874
|
+
if (!status ||
|
|
10875
|
+
(this.isTrackStoppedDueToTrackEnd && status === 'disabled') ||
|
|
10876
|
+
browserPermissionState !== 'granted')
|
|
10871
10877
|
return;
|
|
10872
10878
|
this.persistPreference(selectedDevice, status);
|
|
10873
10879
|
}));
|
|
@@ -11632,7 +11638,10 @@ class CameraManager extends DeviceManager {
|
|
|
11632
11638
|
const shouldApplyDefaults = this.state.status === undefined &&
|
|
11633
11639
|
this.state.optimisticStatus === undefined;
|
|
11634
11640
|
let persistedPreferencesApplied = false;
|
|
11635
|
-
|
|
11641
|
+
const permissionState = await firstValueFrom(this.state.browserPermissionState$);
|
|
11642
|
+
if (shouldApplyDefaults &&
|
|
11643
|
+
this.devicePersistence.enabled &&
|
|
11644
|
+
permissionState === 'granted') {
|
|
11636
11645
|
persistedPreferencesApplied =
|
|
11637
11646
|
await this.applyPersistedPreferences(enabledInCallType);
|
|
11638
11647
|
}
|
|
@@ -11924,192 +11933,6 @@ const createNoAudioDetector = (audioStream, options) => {
|
|
|
11924
11933
|
return stop;
|
|
11925
11934
|
};
|
|
11926
11935
|
|
|
11927
|
-
class RNSpeechDetector {
|
|
11928
|
-
constructor(externalAudioStream) {
|
|
11929
|
-
this.pc1 = new RTCPeerConnection({});
|
|
11930
|
-
this.pc2 = new RTCPeerConnection({});
|
|
11931
|
-
this.isStopped = false;
|
|
11932
|
-
this.externalAudioStream = externalAudioStream;
|
|
11933
|
-
}
|
|
11934
|
-
/**
|
|
11935
|
-
* Starts the speech detection.
|
|
11936
|
-
*/
|
|
11937
|
-
async start(onSoundDetectedStateChanged) {
|
|
11938
|
-
let detachListeners;
|
|
11939
|
-
let unsubscribe;
|
|
11940
|
-
try {
|
|
11941
|
-
this.isStopped = false;
|
|
11942
|
-
const audioStream = this.externalAudioStream != null
|
|
11943
|
-
? this.externalAudioStream
|
|
11944
|
-
: await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
11945
|
-
this.audioStream = audioStream;
|
|
11946
|
-
const onPc1IceCandidate = (e) => {
|
|
11947
|
-
this.forwardIceCandidate(this.pc2, e.candidate);
|
|
11948
|
-
};
|
|
11949
|
-
const onPc2IceCandidate = (e) => {
|
|
11950
|
-
this.forwardIceCandidate(this.pc1, e.candidate);
|
|
11951
|
-
};
|
|
11952
|
-
const onTrackPc2 = (e) => {
|
|
11953
|
-
e.streams[0].getTracks().forEach((track) => {
|
|
11954
|
-
// In RN, the remote track is automatically added to the audio output device
|
|
11955
|
-
// so we need to mute it to avoid hearing the audio back
|
|
11956
|
-
// @ts-expect-error _setVolume is a private method in react-native-webrtc
|
|
11957
|
-
track._setVolume(0);
|
|
11958
|
-
});
|
|
11959
|
-
};
|
|
11960
|
-
this.pc1.addEventListener('icecandidate', onPc1IceCandidate);
|
|
11961
|
-
this.pc2.addEventListener('icecandidate', onPc2IceCandidate);
|
|
11962
|
-
this.pc2.addEventListener('track', onTrackPc2);
|
|
11963
|
-
detachListeners = () => {
|
|
11964
|
-
this.pc1.removeEventListener('icecandidate', onPc1IceCandidate);
|
|
11965
|
-
this.pc2.removeEventListener('icecandidate', onPc2IceCandidate);
|
|
11966
|
-
this.pc2.removeEventListener('track', onTrackPc2);
|
|
11967
|
-
};
|
|
11968
|
-
audioStream
|
|
11969
|
-
.getTracks()
|
|
11970
|
-
.forEach((track) => this.pc1.addTrack(track, audioStream));
|
|
11971
|
-
const offer = await this.pc1.createOffer({});
|
|
11972
|
-
await this.pc2.setRemoteDescription(offer);
|
|
11973
|
-
await this.pc1.setLocalDescription(offer);
|
|
11974
|
-
const answer = await this.pc2.createAnswer();
|
|
11975
|
-
await this.pc1.setRemoteDescription(answer);
|
|
11976
|
-
await this.pc2.setLocalDescription(answer);
|
|
11977
|
-
unsubscribe = this.onSpeakingDetectedStateChange(onSoundDetectedStateChanged);
|
|
11978
|
-
return () => {
|
|
11979
|
-
detachListeners?.();
|
|
11980
|
-
unsubscribe?.();
|
|
11981
|
-
this.stop();
|
|
11982
|
-
};
|
|
11983
|
-
}
|
|
11984
|
-
catch (error) {
|
|
11985
|
-
detachListeners?.();
|
|
11986
|
-
unsubscribe?.();
|
|
11987
|
-
this.stop();
|
|
11988
|
-
const logger = videoLoggerSystem.getLogger('RNSpeechDetector');
|
|
11989
|
-
logger.error('error handling permissions: ', error);
|
|
11990
|
-
return () => { };
|
|
11991
|
-
}
|
|
11992
|
-
}
|
|
11993
|
-
/**
|
|
11994
|
-
* Stops the speech detection and releases all allocated resources.
|
|
11995
|
-
*/
|
|
11996
|
-
stop() {
|
|
11997
|
-
if (this.isStopped)
|
|
11998
|
-
return;
|
|
11999
|
-
this.isStopped = true;
|
|
12000
|
-
this.pc1.close();
|
|
12001
|
-
this.pc2.close();
|
|
12002
|
-
if (this.externalAudioStream != null) {
|
|
12003
|
-
this.externalAudioStream = undefined;
|
|
12004
|
-
}
|
|
12005
|
-
else {
|
|
12006
|
-
this.cleanupAudioStream();
|
|
12007
|
-
}
|
|
12008
|
-
}
|
|
12009
|
-
/**
|
|
12010
|
-
* Public method that detects the audio levels and returns the status.
|
|
12011
|
-
*/
|
|
12012
|
-
onSpeakingDetectedStateChange(onSoundDetectedStateChanged) {
|
|
12013
|
-
const initialBaselineNoiseLevel = 0.13;
|
|
12014
|
-
let baselineNoiseLevel = initialBaselineNoiseLevel;
|
|
12015
|
-
let speechDetected = false;
|
|
12016
|
-
let speechTimer;
|
|
12017
|
-
let silenceTimer;
|
|
12018
|
-
const audioLevelHistory = []; // Store recent audio levels for smoother detection
|
|
12019
|
-
const historyLength = 10;
|
|
12020
|
-
const silenceThreshold = 1.1;
|
|
12021
|
-
const resetThreshold = 0.9;
|
|
12022
|
-
const speechTimeout = 500; // Speech is set to true after 500ms of audio detection
|
|
12023
|
-
const silenceTimeout = 5000; // Reset baseline after 5 seconds of silence
|
|
12024
|
-
const checkAudioLevel = async () => {
|
|
12025
|
-
try {
|
|
12026
|
-
const stats = await this.pc1.getStats();
|
|
12027
|
-
const report = flatten(stats);
|
|
12028
|
-
// Audio levels are present inside stats of type `media-source` and of kind `audio`
|
|
12029
|
-
const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
|
|
12030
|
-
stat.kind === 'audio');
|
|
12031
|
-
if (audioMediaSourceStats) {
|
|
12032
|
-
const { audioLevel } = audioMediaSourceStats;
|
|
12033
|
-
if (audioLevel) {
|
|
12034
|
-
// Update audio level history (with max historyLength sized array)
|
|
12035
|
-
audioLevelHistory.push(audioLevel);
|
|
12036
|
-
if (audioLevelHistory.length > historyLength) {
|
|
12037
|
-
audioLevelHistory.shift();
|
|
12038
|
-
}
|
|
12039
|
-
// Calculate average audio level
|
|
12040
|
-
const avgAudioLevel = audioLevelHistory.reduce((a, b) => a + b, 0) /
|
|
12041
|
-
audioLevelHistory.length;
|
|
12042
|
-
// Update baseline (if necessary) based on silence detection
|
|
12043
|
-
if (avgAudioLevel < baselineNoiseLevel * silenceThreshold) {
|
|
12044
|
-
if (!silenceTimer) {
|
|
12045
|
-
silenceTimer = setTimeout(() => {
|
|
12046
|
-
baselineNoiseLevel = Math.min(avgAudioLevel * resetThreshold, initialBaselineNoiseLevel);
|
|
12047
|
-
}, silenceTimeout);
|
|
12048
|
-
}
|
|
12049
|
-
}
|
|
12050
|
-
else {
|
|
12051
|
-
clearTimeout(silenceTimer);
|
|
12052
|
-
silenceTimer = undefined;
|
|
12053
|
-
}
|
|
12054
|
-
// Speech detection with hysteresis
|
|
12055
|
-
if (avgAudioLevel > baselineNoiseLevel * 1.5) {
|
|
12056
|
-
if (!speechDetected) {
|
|
12057
|
-
speechDetected = true;
|
|
12058
|
-
onSoundDetectedStateChanged({
|
|
12059
|
-
isSoundDetected: true,
|
|
12060
|
-
audioLevel,
|
|
12061
|
-
});
|
|
12062
|
-
}
|
|
12063
|
-
clearTimeout(speechTimer);
|
|
12064
|
-
speechTimer = setTimeout(() => {
|
|
12065
|
-
speechDetected = false;
|
|
12066
|
-
onSoundDetectedStateChanged({
|
|
12067
|
-
isSoundDetected: false,
|
|
12068
|
-
audioLevel: 0,
|
|
12069
|
-
});
|
|
12070
|
-
}, speechTimeout);
|
|
12071
|
-
}
|
|
12072
|
-
}
|
|
12073
|
-
}
|
|
12074
|
-
}
|
|
12075
|
-
catch (error) {
|
|
12076
|
-
const logger = videoLoggerSystem.getLogger('RNSpeechDetector');
|
|
12077
|
-
logger.error('error checking audio level from stats', error);
|
|
12078
|
-
}
|
|
12079
|
-
};
|
|
12080
|
-
const intervalId = setInterval(checkAudioLevel, 250);
|
|
12081
|
-
return () => {
|
|
12082
|
-
clearInterval(intervalId);
|
|
12083
|
-
clearTimeout(speechTimer);
|
|
12084
|
-
clearTimeout(silenceTimer);
|
|
12085
|
-
};
|
|
12086
|
-
}
|
|
12087
|
-
cleanupAudioStream() {
|
|
12088
|
-
if (!this.audioStream) {
|
|
12089
|
-
return;
|
|
12090
|
-
}
|
|
12091
|
-
this.audioStream.getTracks().forEach((track) => track.stop());
|
|
12092
|
-
if (
|
|
12093
|
-
// @ts-expect-error release() is present in react-native-webrtc
|
|
12094
|
-
typeof this.audioStream.release === 'function') {
|
|
12095
|
-
// @ts-expect-error called to dispose the stream in RN
|
|
12096
|
-
this.audioStream.release();
|
|
12097
|
-
}
|
|
12098
|
-
}
|
|
12099
|
-
forwardIceCandidate(destination, candidate) {
|
|
12100
|
-
if (this.isStopped ||
|
|
12101
|
-
!candidate ||
|
|
12102
|
-
destination.signalingState === 'closed') {
|
|
12103
|
-
return;
|
|
12104
|
-
}
|
|
12105
|
-
destination.addIceCandidate(candidate).catch(() => {
|
|
12106
|
-
// silently ignore the error
|
|
12107
|
-
const logger = videoLoggerSystem.getLogger('RNSpeechDetector');
|
|
12108
|
-
logger.info('cannot add ice candidate - ignoring');
|
|
12109
|
-
});
|
|
12110
|
-
}
|
|
12111
|
-
}
|
|
12112
|
-
|
|
12113
11936
|
class MicrophoneManager extends AudioDeviceManager {
|
|
12114
11937
|
constructor(call, devicePersistence, disableMode = 'stop-tracks') {
|
|
12115
11938
|
super(call, new MicrophoneManagerState(disableMode, call.tracer), TrackType.AUDIO, devicePersistence);
|
|
@@ -12374,7 +12197,10 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
12374
12197
|
const shouldApplyDefaults = this.state.status === undefined &&
|
|
12375
12198
|
this.state.optimisticStatus === undefined;
|
|
12376
12199
|
let persistedPreferencesApplied = false;
|
|
12377
|
-
|
|
12200
|
+
const permissionState = await firstValueFrom(this.state.browserPermissionState$);
|
|
12201
|
+
if (shouldApplyDefaults &&
|
|
12202
|
+
this.devicePersistence.enabled &&
|
|
12203
|
+
permissionState === 'granted') {
|
|
12378
12204
|
persistedPreferencesApplied = await this.applyPersistedPreferences(true);
|
|
12379
12205
|
}
|
|
12380
12206
|
const canPublish = this.call.permissionsContext.canPublish(this.trackType);
|
|
@@ -12419,13 +12245,16 @@ class MicrophoneManager extends AudioDeviceManager {
|
|
|
12419
12245
|
return;
|
|
12420
12246
|
await this.teardownSpeakingWhileMutedDetection();
|
|
12421
12247
|
if (isReactNative()) {
|
|
12422
|
-
|
|
12423
|
-
|
|
12248
|
+
const speechActivity = globalThis.streamRNVideoSDK?.nativeEvents?.speechActivity;
|
|
12249
|
+
if (!speechActivity) {
|
|
12250
|
+
this.logger.warn('Native speech activity not available, make sure the "@stream-io/react-native-webrtc" peer dependency version is satisfied');
|
|
12251
|
+
return;
|
|
12252
|
+
}
|
|
12253
|
+
const unsubscribe = speechActivity.subscribe((event) => {
|
|
12424
12254
|
this.state.setSpeakingWhileMuted(event.isSoundDetected);
|
|
12425
12255
|
});
|
|
12426
12256
|
this.soundDetectorCleanup = async () => {
|
|
12427
12257
|
unsubscribe();
|
|
12428
|
-
this.rnSpeechDetector = undefined;
|
|
12429
12258
|
};
|
|
12430
12259
|
}
|
|
12431
12260
|
else {
|
|
@@ -12758,7 +12587,12 @@ class SpeakerManager {
|
|
|
12758
12587
|
}));
|
|
12759
12588
|
}
|
|
12760
12589
|
if (!isReactNative() && this.devicePersistence.enabled) {
|
|
12761
|
-
this.subscriptions.push(createSubscription(
|
|
12590
|
+
this.subscriptions.push(createSubscription(combineLatest([
|
|
12591
|
+
this.state.selectedDevice$,
|
|
12592
|
+
getAudioBrowserPermission(this.call.tracer).asStateObservable(),
|
|
12593
|
+
]), ([selectedDevice, browserPermissionState]) => {
|
|
12594
|
+
if (!selectedDevice || browserPermissionState !== 'granted')
|
|
12595
|
+
return;
|
|
12762
12596
|
this.persistSpeakerDevicePreference(selectedDevice);
|
|
12763
12597
|
}));
|
|
12764
12598
|
}
|
|
@@ -16133,7 +15967,7 @@ class StreamClient {
|
|
|
16133
15967
|
this.getUserAgent = () => {
|
|
16134
15968
|
if (!this.cachedUserAgent) {
|
|
16135
15969
|
const { clientAppIdentifier = {} } = this.options;
|
|
16136
|
-
const { sdkName = 'js', sdkVersion = "1.
|
|
15970
|
+
const { sdkName = 'js', sdkVersion = "1.48.0", ...extras } = clientAppIdentifier;
|
|
16137
15971
|
this.cachedUserAgent = [
|
|
16138
15972
|
`stream-video-${sdkName}-v${sdkVersion}`,
|
|
16139
15973
|
...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
|
|
@@ -16769,5 +16603,5 @@ const humanize = (n) => {
|
|
|
16769
16603
|
return String(n);
|
|
16770
16604
|
};
|
|
16771
16605
|
|
|
16772
|
-
export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability,
|
|
16606
|
+
export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio$1 as hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
|
|
16773
16607
|
//# sourceMappingURL=index.es.js.map
|