@stream-io/video-client 0.5.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,13 @@
2
2
 
3
3
  This file was generated using [@jscutlery/semver](https://github.com/jscutlery/semver).
4
4
 
5
+ ### [0.5.1](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-0.5.0...@stream-io/video-client-0.5.1) (2023-12-05)
6
+
7
+
8
+ ### Features
9
+
10
+ * **client:** speaking while muted in React Native using temporary peer connection ([#1207](https://github.com/GetStream/stream-video-js/issues/1207)) ([9093006](https://github.com/GetStream/stream-video-js/commit/90930063503b6dfb83572dad8a31e45b16bf1685))
11
+
5
12
  ## [0.5.0](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-0.4.10...@stream-io/video-client-0.5.0) (2023-11-29)
6
13
 
7
14
 
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- # Official Plain-JS SDK and Low-Level Client for [Stream Video](https://getstream.io/video/docs/)
1
+ # Official JavaScript SDK and Low-Level Client for [Stream Video](https://getstream.io/video/docs/)
2
2
 
3
3
  <img src="../../.readme-assets/Github-Graphic-JS.jpg" alt="Stream Video for JavaScript Header image" style="box-shadow: 0 3px 10px rgb(0 0 0 / 0.2); border-radius: 1rem" />
4
4
 
@@ -7,7 +7,6 @@ Low-level Video SDK client for browser and Node.js integrations.
7
7
  ## **Quick Links**
8
8
 
9
9
  - [Register](https://getstream.io/chat/trial/) to get an API key for Stream Video
10
- TODO: add links to docs and tutorials
11
10
 
12
11
  ## What is Stream?
13
12
 
@@ -15,9 +14,9 @@ Stream allows developers to rapidly deploy scalable feeds, chat messaging and vi
15
14
 
16
15
  With Stream's video components, you can use their SDK to build in-app video calling, audio rooms, audio calls, or live streaming. The best place to get started is with their tutorials:
17
16
 
18
- - [Video & Audio Calling Tutorial](https://getstream.io/video/docs/javascript/tutorials/video-calling/)
19
- - Audio Rooms Tutorial
20
- - [Livestreaming Tutorial](https://getstream.io/video/docs/javascript/tutorials/livestream/)
17
+ - [Video and Audio Calling Tutorial](https://getstream.io/video/sdk/javascript/tutorial/video-calling/)
18
+ - [Audio Rooms Tutorial](https://getstream.io/video/sdk/javascript/tutorial/audio-room/)
19
+ - [Livestream Tutorial](https://getstream.io/video/sdk/javascript/tutorial/livestreaming/)
21
20
 
22
21
  Stream provides UI components and state handling that make it easy to build video calling for your app. All calls run on Stream's network of edge servers around the world, ensuring optimal latency and reliability.
23
22
 
@@ -9318,7 +9318,7 @@ const createStatsReporter = ({ subscriber, publisher, state, pollingIntervalInMs
9318
9318
  const transform = (report, opts) => {
9319
9319
  const { trackKind, kind } = opts;
9320
9320
  const direction = kind === 'subscriber' ? 'inbound-rtp' : 'outbound-rtp';
9321
- const stats = flatten(report);
9321
+ const stats = flatten$1(report);
9322
9322
  const streams = stats
9323
9323
  .filter((stat) => stat.type === direction &&
9324
9324
  stat.kind === trackKind)
@@ -9413,7 +9413,7 @@ const aggregate = (stats) => {
9413
9413
  *
9414
9414
  * @param report the report to flatten.
9415
9415
  */
9416
- const flatten = (report) => {
9416
+ const flatten$1 = (report) => {
9417
9417
  const stats = [];
9418
9418
  report.forEach((s) => {
9419
9419
  stats.push(s);
@@ -10755,7 +10755,7 @@ class MicrophoneManagerState extends InputMediaDeviceManagerState {
10755
10755
  }
10756
10756
 
10757
10757
  const DETECTION_FREQUENCY_IN_MS = 500;
10758
- const AUDIO_LEVEL_THRESHOLD = 150;
10758
+ const AUDIO_LEVEL_THRESHOLD$1 = 150;
10759
10759
  const FFT_SIZE = 128;
10760
10760
  /**
10761
10761
  * Creates a new sound detector.
@@ -10766,7 +10766,7 @@ const FFT_SIZE = 128;
10766
10766
  * @returns a clean-up function which once invoked stops the sound detector.
10767
10767
  */
10768
10768
  const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options = {}) => {
10769
- const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
10769
+ const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD$1, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
10770
10770
  const audioContext = new AudioContext();
10771
10771
  const analyser = audioContext.createAnalyser();
10772
10772
  analyser.fftSize = fftSize;
@@ -10805,6 +10805,99 @@ const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options =
10805
10805
  };
10806
10806
  };
10807
10807
 
10808
+ /**
10809
+ * Flatten the stats report into an array of stats objects.
10810
+ *
10811
+ * @param report the report to flatten.
10812
+ */
10813
+ const flatten = (report) => {
10814
+ const stats = [];
10815
+ report.forEach((s) => {
10816
+ stats.push(s);
10817
+ });
10818
+ return stats;
10819
+ };
10820
+ const AUDIO_LEVEL_THRESHOLD = 0.2;
10821
+ class RNSpeechDetector {
10822
+ constructor() {
10823
+ this.pc1 = new RTCPeerConnection({});
10824
+ this.pc2 = new RTCPeerConnection({});
10825
+ }
10826
+ /**
10827
+ * Starts the speech detection.
10828
+ */
10829
+ async start() {
10830
+ try {
10831
+ const audioStream = await navigator.mediaDevices.getUserMedia({
10832
+ audio: true,
10833
+ });
10834
+ this.pc1.addEventListener('icecandidate', async (e) => {
10835
+ await this.pc2.addIceCandidate(e.candidate);
10836
+ });
10837
+ this.pc2.addEventListener('icecandidate', async (e) => {
10838
+ await this.pc1.addIceCandidate(e.candidate);
10839
+ });
10840
+ audioStream
10841
+ .getTracks()
10842
+ .forEach((track) => this.pc1.addTrack(track, audioStream));
10843
+ const offer = await this.pc1.createOffer({});
10844
+ await this.pc2.setRemoteDescription(offer);
10845
+ await this.pc1.setLocalDescription(offer);
10846
+ const answer = await this.pc2.createAnswer();
10847
+ await this.pc1.setRemoteDescription(answer);
10848
+ await this.pc2.setLocalDescription(answer);
10849
+ const audioTracks = audioStream.getAudioTracks();
10850
+ // We need to mute the audio track for this temporary stream, or else you will hear yourself twice while in the call.
10851
+ audioTracks.forEach((track) => (track.enabled = false));
10852
+ }
10853
+ catch (error) {
10854
+ console.error('Error connecting and negotiating between PeerConnections:', error);
10855
+ }
10856
+ }
10857
+ /**
10858
+ * Stops the speech detection and releases all allocated resources.
10859
+ */
10860
+ stop() {
10861
+ this.pc1.close();
10862
+ this.pc2.close();
10863
+ if (this.intervalId) {
10864
+ clearInterval(this.intervalId);
10865
+ }
10866
+ }
10867
+ /**
10868
+ * Public method that detects the audio levels and returns the status.
10869
+ */
10870
+ onSpeakingDetectedStateChange(onSoundDetectedStateChanged) {
10871
+ this.intervalId = setInterval(async () => {
10872
+ const stats = (await this.pc1.getStats());
10873
+ const report = flatten(stats);
10874
+ // Audio levels are present inside stats of type `media-source` and of kind `audio`
10875
+ const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
10876
+ stat.kind === 'audio');
10877
+ if (audioMediaSourceStats) {
10878
+ const { audioLevel } = audioMediaSourceStats;
10879
+ if (audioLevel) {
10880
+ if (audioLevel >= AUDIO_LEVEL_THRESHOLD) {
10881
+ onSoundDetectedStateChanged({
10882
+ isSoundDetected: true,
10883
+ audioLevel,
10884
+ });
10885
+ }
10886
+ else {
10887
+ onSoundDetectedStateChanged({
10888
+ isSoundDetected: false,
10889
+ audioLevel: 0,
10890
+ });
10891
+ }
10892
+ }
10893
+ }
10894
+ }, 1000);
10895
+ return () => {
10896
+ clearInterval(this.intervalId);
10897
+ };
10898
+ }
10899
+ }
10900
+
10808
10901
  class MicrophoneManager extends InputMediaDeviceManager {
10809
10902
  constructor(call) {
10810
10903
  super(call, new MicrophoneManagerState(), TrackType.AUDIO);
@@ -10846,20 +10939,31 @@ class MicrophoneManager extends InputMediaDeviceManager {
10846
10939
  return this.call.stopPublish(TrackType.AUDIO, stopTracks);
10847
10940
  }
10848
10941
  async startSpeakingWhileMutedDetection(deviceId) {
10942
+ await this.stopSpeakingWhileMutedDetection();
10849
10943
  if (isReactNative()) {
10850
- return;
10944
+ this.rnSpeechDetector = new RNSpeechDetector();
10945
+ await this.rnSpeechDetector.start();
10946
+ const unsubscribe = this.rnSpeechDetector?.onSpeakingDetectedStateChange((event) => {
10947
+ this.state.setSpeakingWhileMuted(event.isSoundDetected);
10948
+ });
10949
+ this.soundDetectorCleanup = () => {
10950
+ unsubscribe();
10951
+ this.rnSpeechDetector?.stop();
10952
+ this.rnSpeechDetector = undefined;
10953
+ };
10954
+ }
10955
+ else {
10956
+ // Need to start a new stream that's not connected to publisher
10957
+ const stream = await this.getStream({
10958
+ deviceId,
10959
+ });
10960
+ this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
10961
+ this.state.setSpeakingWhileMuted(event.isSoundDetected);
10962
+ });
10851
10963
  }
10852
- await this.stopSpeakingWhileMutedDetection();
10853
- // Need to start a new stream that's not connected to publisher
10854
- const stream = await this.getStream({
10855
- deviceId,
10856
- });
10857
- this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
10858
- this.state.setSpeakingWhileMuted(event.isSoundDetected);
10859
- });
10860
10964
  }
10861
10965
  async stopSpeakingWhileMutedDetection() {
10862
- if (isReactNative() || !this.soundDetectorCleanup) {
10966
+ if (!this.soundDetectorCleanup) {
10863
10967
  return;
10864
10968
  }
10865
10969
  this.state.setSpeakingWhileMuted(false);
@@ -14037,7 +14141,7 @@ class StreamClient {
14037
14141
  });
14038
14142
  };
14039
14143
  this.getUserAgent = () => {
14040
- const version = "0.5.0" ;
14144
+ const version = "0.5.1" ;
14041
14145
  return (this.userAgent ||
14042
14146
  `stream-video-javascript-client-${this.node ? 'node' : 'browser'}-${version}`);
14043
14147
  };