@stream-io/video-client 0.4.10 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/README.md +4 -5
- package/dist/index.browser.es.js +133 -24
- package/dist/index.browser.es.js.map +1 -1
- package/dist/index.cjs.js +133 -24
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.es.js +133 -24
- package/dist/index.es.js.map +1 -1
- package/dist/src/devices/MicrophoneManager.d.ts +1 -0
- package/dist/src/helpers/RNSpeechDetector.d.ts +18 -0
- package/dist/src/stats/types.d.ts +1 -0
- package/package.json +1 -1
- package/src/devices/MicrophoneManager.ts +24 -10
- package/src/devices/__tests__/MicrophoneManagerRN.test.ts +126 -0
- package/src/helpers/RNSpeechDetector.ts +112 -0
- package/src/logger.ts +6 -0
- package/src/stats/types.ts +1 -0
package/dist/index.es.js
CHANGED
|
@@ -5832,6 +5832,15 @@ const createSignalClient = (options) => {
|
|
|
5832
5832
|
return new SignalServerClient(transport);
|
|
5833
5833
|
};
|
|
5834
5834
|
|
|
5835
|
+
/**
|
|
5836
|
+
* Checks whether we are using React Native
|
|
5837
|
+
*/
|
|
5838
|
+
const isReactNative = () => {
|
|
5839
|
+
if (typeof navigator === 'undefined')
|
|
5840
|
+
return false;
|
|
5841
|
+
return navigator.product?.toLowerCase() === 'reactnative';
|
|
5842
|
+
};
|
|
5843
|
+
|
|
5835
5844
|
// log levels, sorted by verbosity
|
|
5836
5845
|
const logLevels = Object.freeze({
|
|
5837
5846
|
trace: 0,
|
|
@@ -5849,6 +5858,11 @@ const logToConsole = (logLevel, message, ...args) => {
|
|
|
5849
5858
|
logMethod = console.error;
|
|
5850
5859
|
break;
|
|
5851
5860
|
case 'warn':
|
|
5861
|
+
if (isReactNative()) {
|
|
5862
|
+
message = `WARN: ${message}`;
|
|
5863
|
+
logMethod = console.info;
|
|
5864
|
+
break;
|
|
5865
|
+
}
|
|
5852
5866
|
logMethod = console.warn;
|
|
5853
5867
|
break;
|
|
5854
5868
|
case 'info':
|
|
@@ -6035,15 +6049,6 @@ function getIceCandidate(candidate) {
|
|
|
6035
6049
|
}
|
|
6036
6050
|
}
|
|
6037
6051
|
|
|
6038
|
-
/**
|
|
6039
|
-
* Checks whether we are using React Native
|
|
6040
|
-
*/
|
|
6041
|
-
const isReactNative = () => {
|
|
6042
|
-
if (typeof navigator === 'undefined')
|
|
6043
|
-
return false;
|
|
6044
|
-
return navigator.product?.toLowerCase() === 'reactnative';
|
|
6045
|
-
};
|
|
6046
|
-
|
|
6047
6052
|
let sdkInfo;
|
|
6048
6053
|
let osInfo;
|
|
6049
6054
|
let deviceInfo;
|
|
@@ -9316,7 +9321,7 @@ const createStatsReporter = ({ subscriber, publisher, state, pollingIntervalInMs
|
|
|
9316
9321
|
const transform = (report, opts) => {
|
|
9317
9322
|
const { trackKind, kind } = opts;
|
|
9318
9323
|
const direction = kind === 'subscriber' ? 'inbound-rtp' : 'outbound-rtp';
|
|
9319
|
-
const stats = flatten(report);
|
|
9324
|
+
const stats = flatten$1(report);
|
|
9320
9325
|
const streams = stats
|
|
9321
9326
|
.filter((stat) => stat.type === direction &&
|
|
9322
9327
|
stat.kind === trackKind)
|
|
@@ -9411,7 +9416,7 @@ const aggregate = (stats) => {
|
|
|
9411
9416
|
*
|
|
9412
9417
|
* @param report the report to flatten.
|
|
9413
9418
|
*/
|
|
9414
|
-
const flatten = (report) => {
|
|
9419
|
+
const flatten$1 = (report) => {
|
|
9415
9420
|
const stats = [];
|
|
9416
9421
|
report.forEach((s) => {
|
|
9417
9422
|
stats.push(s);
|
|
@@ -10753,7 +10758,7 @@ class MicrophoneManagerState extends InputMediaDeviceManagerState {
|
|
|
10753
10758
|
}
|
|
10754
10759
|
|
|
10755
10760
|
const DETECTION_FREQUENCY_IN_MS = 500;
|
|
10756
|
-
const AUDIO_LEVEL_THRESHOLD = 150;
|
|
10761
|
+
const AUDIO_LEVEL_THRESHOLD$1 = 150;
|
|
10757
10762
|
const FFT_SIZE = 128;
|
|
10758
10763
|
/**
|
|
10759
10764
|
* Creates a new sound detector.
|
|
@@ -10764,7 +10769,7 @@ const FFT_SIZE = 128;
|
|
|
10764
10769
|
* @returns a clean-up function which once invoked stops the sound detector.
|
|
10765
10770
|
*/
|
|
10766
10771
|
const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options = {}) => {
|
|
10767
|
-
const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
|
|
10772
|
+
const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD$1, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
|
|
10768
10773
|
const audioContext = new AudioContext();
|
|
10769
10774
|
const analyser = audioContext.createAnalyser();
|
|
10770
10775
|
analyser.fftSize = fftSize;
|
|
@@ -10803,6 +10808,99 @@ const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options =
|
|
|
10803
10808
|
};
|
|
10804
10809
|
};
|
|
10805
10810
|
|
|
10811
|
+
/**
|
|
10812
|
+
* Flatten the stats report into an array of stats objects.
|
|
10813
|
+
*
|
|
10814
|
+
* @param report the report to flatten.
|
|
10815
|
+
*/
|
|
10816
|
+
const flatten = (report) => {
|
|
10817
|
+
const stats = [];
|
|
10818
|
+
report.forEach((s) => {
|
|
10819
|
+
stats.push(s);
|
|
10820
|
+
});
|
|
10821
|
+
return stats;
|
|
10822
|
+
};
|
|
10823
|
+
const AUDIO_LEVEL_THRESHOLD = 0.2;
|
|
10824
|
+
class RNSpeechDetector {
|
|
10825
|
+
constructor() {
|
|
10826
|
+
this.pc1 = new RTCPeerConnection({});
|
|
10827
|
+
this.pc2 = new RTCPeerConnection({});
|
|
10828
|
+
}
|
|
10829
|
+
/**
|
|
10830
|
+
* Starts the speech detection.
|
|
10831
|
+
*/
|
|
10832
|
+
async start() {
|
|
10833
|
+
try {
|
|
10834
|
+
const audioStream = await navigator.mediaDevices.getUserMedia({
|
|
10835
|
+
audio: true,
|
|
10836
|
+
});
|
|
10837
|
+
this.pc1.addEventListener('icecandidate', async (e) => {
|
|
10838
|
+
await this.pc2.addIceCandidate(e.candidate);
|
|
10839
|
+
});
|
|
10840
|
+
this.pc2.addEventListener('icecandidate', async (e) => {
|
|
10841
|
+
await this.pc1.addIceCandidate(e.candidate);
|
|
10842
|
+
});
|
|
10843
|
+
audioStream
|
|
10844
|
+
.getTracks()
|
|
10845
|
+
.forEach((track) => this.pc1.addTrack(track, audioStream));
|
|
10846
|
+
const offer = await this.pc1.createOffer({});
|
|
10847
|
+
await this.pc2.setRemoteDescription(offer);
|
|
10848
|
+
await this.pc1.setLocalDescription(offer);
|
|
10849
|
+
const answer = await this.pc2.createAnswer();
|
|
10850
|
+
await this.pc1.setRemoteDescription(answer);
|
|
10851
|
+
await this.pc2.setLocalDescription(answer);
|
|
10852
|
+
const audioTracks = audioStream.getAudioTracks();
|
|
10853
|
+
// We need to mute the audio track for this temporary stream, or else you will hear yourself twice while in the call.
|
|
10854
|
+
audioTracks.forEach((track) => (track.enabled = false));
|
|
10855
|
+
}
|
|
10856
|
+
catch (error) {
|
|
10857
|
+
console.error('Error connecting and negotiating between PeerConnections:', error);
|
|
10858
|
+
}
|
|
10859
|
+
}
|
|
10860
|
+
/**
|
|
10861
|
+
* Stops the speech detection and releases all allocated resources.
|
|
10862
|
+
*/
|
|
10863
|
+
stop() {
|
|
10864
|
+
this.pc1.close();
|
|
10865
|
+
this.pc2.close();
|
|
10866
|
+
if (this.intervalId) {
|
|
10867
|
+
clearInterval(this.intervalId);
|
|
10868
|
+
}
|
|
10869
|
+
}
|
|
10870
|
+
/**
|
|
10871
|
+
* Public method that detects the audio levels and returns the status.
|
|
10872
|
+
*/
|
|
10873
|
+
onSpeakingDetectedStateChange(onSoundDetectedStateChanged) {
|
|
10874
|
+
this.intervalId = setInterval(async () => {
|
|
10875
|
+
const stats = (await this.pc1.getStats());
|
|
10876
|
+
const report = flatten(stats);
|
|
10877
|
+
// Audio levels are present inside stats of type `media-source` and of kind `audio`
|
|
10878
|
+
const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
|
|
10879
|
+
stat.kind === 'audio');
|
|
10880
|
+
if (audioMediaSourceStats) {
|
|
10881
|
+
const { audioLevel } = audioMediaSourceStats;
|
|
10882
|
+
if (audioLevel) {
|
|
10883
|
+
if (audioLevel >= AUDIO_LEVEL_THRESHOLD) {
|
|
10884
|
+
onSoundDetectedStateChanged({
|
|
10885
|
+
isSoundDetected: true,
|
|
10886
|
+
audioLevel,
|
|
10887
|
+
});
|
|
10888
|
+
}
|
|
10889
|
+
else {
|
|
10890
|
+
onSoundDetectedStateChanged({
|
|
10891
|
+
isSoundDetected: false,
|
|
10892
|
+
audioLevel: 0,
|
|
10893
|
+
});
|
|
10894
|
+
}
|
|
10895
|
+
}
|
|
10896
|
+
}
|
|
10897
|
+
}, 1000);
|
|
10898
|
+
return () => {
|
|
10899
|
+
clearInterval(this.intervalId);
|
|
10900
|
+
};
|
|
10901
|
+
}
|
|
10902
|
+
}
|
|
10903
|
+
|
|
10806
10904
|
class MicrophoneManager extends InputMediaDeviceManager {
|
|
10807
10905
|
constructor(call) {
|
|
10808
10906
|
super(call, new MicrophoneManagerState(), TrackType.AUDIO);
|
|
@@ -10844,20 +10942,31 @@ class MicrophoneManager extends InputMediaDeviceManager {
|
|
|
10844
10942
|
return this.call.stopPublish(TrackType.AUDIO, stopTracks);
|
|
10845
10943
|
}
|
|
10846
10944
|
async startSpeakingWhileMutedDetection(deviceId) {
|
|
10945
|
+
await this.stopSpeakingWhileMutedDetection();
|
|
10847
10946
|
if (isReactNative()) {
|
|
10848
|
-
|
|
10947
|
+
this.rnSpeechDetector = new RNSpeechDetector();
|
|
10948
|
+
await this.rnSpeechDetector.start();
|
|
10949
|
+
const unsubscribe = this.rnSpeechDetector?.onSpeakingDetectedStateChange((event) => {
|
|
10950
|
+
this.state.setSpeakingWhileMuted(event.isSoundDetected);
|
|
10951
|
+
});
|
|
10952
|
+
this.soundDetectorCleanup = () => {
|
|
10953
|
+
unsubscribe();
|
|
10954
|
+
this.rnSpeechDetector?.stop();
|
|
10955
|
+
this.rnSpeechDetector = undefined;
|
|
10956
|
+
};
|
|
10957
|
+
}
|
|
10958
|
+
else {
|
|
10959
|
+
// Need to start a new stream that's not connected to publisher
|
|
10960
|
+
const stream = await this.getStream({
|
|
10961
|
+
deviceId,
|
|
10962
|
+
});
|
|
10963
|
+
this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
|
|
10964
|
+
this.state.setSpeakingWhileMuted(event.isSoundDetected);
|
|
10965
|
+
});
|
|
10849
10966
|
}
|
|
10850
|
-
await this.stopSpeakingWhileMutedDetection();
|
|
10851
|
-
// Need to start a new stream that's not connected to publisher
|
|
10852
|
-
const stream = await this.getStream({
|
|
10853
|
-
deviceId,
|
|
10854
|
-
});
|
|
10855
|
-
this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
|
|
10856
|
-
this.state.setSpeakingWhileMuted(event.isSoundDetected);
|
|
10857
|
-
});
|
|
10858
10967
|
}
|
|
10859
10968
|
async stopSpeakingWhileMutedDetection() {
|
|
10860
|
-
if (
|
|
10969
|
+
if (!this.soundDetectorCleanup) {
|
|
10861
10970
|
return;
|
|
10862
10971
|
}
|
|
10863
10972
|
this.state.setSpeakingWhileMuted(false);
|
|
@@ -14036,7 +14145,7 @@ class StreamClient {
|
|
|
14036
14145
|
});
|
|
14037
14146
|
};
|
|
14038
14147
|
this.getUserAgent = () => {
|
|
14039
|
-
const version = "0.
|
|
14148
|
+
const version = "0.5.1" ;
|
|
14040
14149
|
return (this.userAgent ||
|
|
14041
14150
|
`stream-video-javascript-client-${this.node ? 'node' : 'browser'}-${version}`);
|
|
14042
14151
|
};
|