@stream-io/video-client 0.4.10 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,24 @@
2
2
 
3
3
  This file was generated using [@jscutlery/semver](https://github.com/jscutlery/semver).
4
4
 
5
+ ### [0.5.1](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-0.5.0...@stream-io/video-client-0.5.1) (2023-12-05)
6
+
7
+
8
+ ### Features
9
+
10
+ * **client:** speaking while muted in React Native using temporary peer connection ([#1207](https://github.com/GetStream/stream-video-js/issues/1207)) ([9093006](https://github.com/GetStream/stream-video-js/commit/90930063503b6dfb83572dad8a31e45b16bf1685))
11
+
12
+ ## [0.5.0](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-0.4.10...@stream-io/video-client-0.5.0) (2023-11-29)
13
+
14
+
15
+ ### ⚠ BREAKING CHANGES
16
+
17
+ * **react-native:** move to webrtc 118 (#1197)
18
+
19
+ ### Features
20
+
21
+ * **react-native:** move to webrtc 118 ([#1197](https://github.com/GetStream/stream-video-js/issues/1197)) ([8cdbe11](https://github.com/GetStream/stream-video-js/commit/8cdbe11de069fcb6eae5643f5cef5c9612f6c805))
22
+
5
23
  ### [0.4.10](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-0.4.9...@stream-io/video-client-0.4.10) (2023-11-27)
6
24
 
7
25
 
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- # Official Plain-JS SDK and Low-Level Client for [Stream Video](https://getstream.io/video/docs/)
1
+ # Official JavaScript SDK and Low-Level Client for [Stream Video](https://getstream.io/video/docs/)
2
2
 
3
3
  <img src="../../.readme-assets/Github-Graphic-JS.jpg" alt="Stream Video for JavaScript Header image" style="box-shadow: 0 3px 10px rgb(0 0 0 / 0.2); border-radius: 1rem" />
4
4
 
@@ -7,7 +7,6 @@ Low-level Video SDK client for browser and Node.js integrations.
7
7
  ## **Quick Links**
8
8
 
9
9
  - [Register](https://getstream.io/chat/trial/) to get an API key for Stream Video
10
- TODO: add links to docs and tutorials
11
10
 
12
11
  ## What is Stream?
13
12
 
@@ -15,9 +14,9 @@ Stream allows developers to rapidly deploy scalable feeds, chat messaging and vi
15
14
 
16
15
  With Stream's video components, you can use their SDK to build in-app video calling, audio rooms, audio calls, or live streaming. The best place to get started is with their tutorials:
17
16
 
18
- - [Video & Audio Calling Tutorial](https://getstream.io/video/docs/javascript/tutorials/video-calling/)
19
- - Audio Rooms Tutorial
20
- - [Livestreaming Tutorial](https://getstream.io/video/docs/javascript/tutorials/livestream/)
17
+ - [Video and Audio Calling Tutorial](https://getstream.io/video/sdk/javascript/tutorial/video-calling/)
18
+ - [Audio Rooms Tutorial](https://getstream.io/video/sdk/javascript/tutorial/audio-room/)
19
+ - [Livestream Tutorial](https://getstream.io/video/sdk/javascript/tutorial/livestreaming/)
21
20
 
22
21
  Stream provides UI components and state handling that make it easy to build video calling for your app. All calls run on Stream's network of edge servers around the world, ensuring optimal latency and reliability.
23
22
 
@@ -5829,6 +5829,15 @@ const createSignalClient = (options) => {
5829
5829
  return new SignalServerClient(transport);
5830
5830
  };
5831
5831
 
5832
+ /**
5833
+ * Checks whether we are using React Native
5834
+ */
5835
+ const isReactNative = () => {
5836
+ if (typeof navigator === 'undefined')
5837
+ return false;
5838
+ return navigator.product?.toLowerCase() === 'reactnative';
5839
+ };
5840
+
5832
5841
  // log levels, sorted by verbosity
5833
5842
  const logLevels = Object.freeze({
5834
5843
  trace: 0,
@@ -5846,6 +5855,11 @@ const logToConsole = (logLevel, message, ...args) => {
5846
5855
  logMethod = console.error;
5847
5856
  break;
5848
5857
  case 'warn':
5858
+ if (isReactNative()) {
5859
+ message = `WARN: ${message}`;
5860
+ logMethod = console.info;
5861
+ break;
5862
+ }
5849
5863
  logMethod = console.warn;
5850
5864
  break;
5851
5865
  case 'info':
@@ -6032,15 +6046,6 @@ function getIceCandidate(candidate) {
6032
6046
  }
6033
6047
  }
6034
6048
 
6035
- /**
6036
- * Checks whether we are using React Native
6037
- */
6038
- const isReactNative = () => {
6039
- if (typeof navigator === 'undefined')
6040
- return false;
6041
- return navigator.product?.toLowerCase() === 'reactnative';
6042
- };
6043
-
6044
6049
  let sdkInfo;
6045
6050
  let osInfo;
6046
6051
  let deviceInfo;
@@ -9313,7 +9318,7 @@ const createStatsReporter = ({ subscriber, publisher, state, pollingIntervalInMs
9313
9318
  const transform = (report, opts) => {
9314
9319
  const { trackKind, kind } = opts;
9315
9320
  const direction = kind === 'subscriber' ? 'inbound-rtp' : 'outbound-rtp';
9316
- const stats = flatten(report);
9321
+ const stats = flatten$1(report);
9317
9322
  const streams = stats
9318
9323
  .filter((stat) => stat.type === direction &&
9319
9324
  stat.kind === trackKind)
@@ -9408,7 +9413,7 @@ const aggregate = (stats) => {
9408
9413
  *
9409
9414
  * @param report the report to flatten.
9410
9415
  */
9411
- const flatten = (report) => {
9416
+ const flatten$1 = (report) => {
9412
9417
  const stats = [];
9413
9418
  report.forEach((s) => {
9414
9419
  stats.push(s);
@@ -10750,7 +10755,7 @@ class MicrophoneManagerState extends InputMediaDeviceManagerState {
10750
10755
  }
10751
10756
 
10752
10757
  const DETECTION_FREQUENCY_IN_MS = 500;
10753
- const AUDIO_LEVEL_THRESHOLD = 150;
10758
+ const AUDIO_LEVEL_THRESHOLD$1 = 150;
10754
10759
  const FFT_SIZE = 128;
10755
10760
  /**
10756
10761
  * Creates a new sound detector.
@@ -10761,7 +10766,7 @@ const FFT_SIZE = 128;
10761
10766
  * @returns a clean-up function which once invoked stops the sound detector.
10762
10767
  */
10763
10768
  const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options = {}) => {
10764
- const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
10769
+ const { detectionFrequencyInMs = DETECTION_FREQUENCY_IN_MS, audioLevelThreshold = AUDIO_LEVEL_THRESHOLD$1, fftSize = FFT_SIZE, destroyStreamOnStop = true, } = options;
10765
10770
  const audioContext = new AudioContext();
10766
10771
  const analyser = audioContext.createAnalyser();
10767
10772
  analyser.fftSize = fftSize;
@@ -10800,6 +10805,99 @@ const createSoundDetector = (audioStream, onSoundDetectedStateChanged, options =
10800
10805
  };
10801
10806
  };
10802
10807
 
10808
+ /**
10809
+ * Flatten the stats report into an array of stats objects.
10810
+ *
10811
+ * @param report the report to flatten.
10812
+ */
10813
+ const flatten = (report) => {
10814
+ const stats = [];
10815
+ report.forEach((s) => {
10816
+ stats.push(s);
10817
+ });
10818
+ return stats;
10819
+ };
10820
+ const AUDIO_LEVEL_THRESHOLD = 0.2;
10821
+ class RNSpeechDetector {
10822
+ constructor() {
10823
+ this.pc1 = new RTCPeerConnection({});
10824
+ this.pc2 = new RTCPeerConnection({});
10825
+ }
10826
+ /**
10827
+ * Starts the speech detection.
10828
+ */
10829
+ async start() {
10830
+ try {
10831
+ const audioStream = await navigator.mediaDevices.getUserMedia({
10832
+ audio: true,
10833
+ });
10834
+ this.pc1.addEventListener('icecandidate', async (e) => {
10835
+ await this.pc2.addIceCandidate(e.candidate);
10836
+ });
10837
+ this.pc2.addEventListener('icecandidate', async (e) => {
10838
+ await this.pc1.addIceCandidate(e.candidate);
10839
+ });
10840
+ audioStream
10841
+ .getTracks()
10842
+ .forEach((track) => this.pc1.addTrack(track, audioStream));
10843
+ const offer = await this.pc1.createOffer({});
10844
+ await this.pc2.setRemoteDescription(offer);
10845
+ await this.pc1.setLocalDescription(offer);
10846
+ const answer = await this.pc2.createAnswer();
10847
+ await this.pc1.setRemoteDescription(answer);
10848
+ await this.pc2.setLocalDescription(answer);
10849
+ const audioTracks = audioStream.getAudioTracks();
10850
+ // We need to mute the audio track for this temporary stream, or else you will hear yourself twice while in the call.
10851
+ audioTracks.forEach((track) => (track.enabled = false));
10852
+ }
10853
+ catch (error) {
10854
+ console.error('Error connecting and negotiating between PeerConnections:', error);
10855
+ }
10856
+ }
10857
+ /**
10858
+ * Stops the speech detection and releases all allocated resources.
10859
+ */
10860
+ stop() {
10861
+ this.pc1.close();
10862
+ this.pc2.close();
10863
+ if (this.intervalId) {
10864
+ clearInterval(this.intervalId);
10865
+ }
10866
+ }
10867
+ /**
10868
+ * Public method that detects the audio levels and returns the status.
10869
+ */
10870
+ onSpeakingDetectedStateChange(onSoundDetectedStateChanged) {
10871
+ this.intervalId = setInterval(async () => {
10872
+ const stats = (await this.pc1.getStats());
10873
+ const report = flatten(stats);
10874
+ // Audio levels are present inside stats of type `media-source` and of kind `audio`
10875
+ const audioMediaSourceStats = report.find((stat) => stat.type === 'media-source' &&
10876
+ stat.kind === 'audio');
10877
+ if (audioMediaSourceStats) {
10878
+ const { audioLevel } = audioMediaSourceStats;
10879
+ if (audioLevel) {
10880
+ if (audioLevel >= AUDIO_LEVEL_THRESHOLD) {
10881
+ onSoundDetectedStateChanged({
10882
+ isSoundDetected: true,
10883
+ audioLevel,
10884
+ });
10885
+ }
10886
+ else {
10887
+ onSoundDetectedStateChanged({
10888
+ isSoundDetected: false,
10889
+ audioLevel: 0,
10890
+ });
10891
+ }
10892
+ }
10893
+ }
10894
+ }, 1000);
10895
+ return () => {
10896
+ clearInterval(this.intervalId);
10897
+ };
10898
+ }
10899
+ }
10900
+
10803
10901
  class MicrophoneManager extends InputMediaDeviceManager {
10804
10902
  constructor(call) {
10805
10903
  super(call, new MicrophoneManagerState(), TrackType.AUDIO);
@@ -10841,20 +10939,31 @@ class MicrophoneManager extends InputMediaDeviceManager {
10841
10939
  return this.call.stopPublish(TrackType.AUDIO, stopTracks);
10842
10940
  }
10843
10941
  async startSpeakingWhileMutedDetection(deviceId) {
10942
+ await this.stopSpeakingWhileMutedDetection();
10844
10943
  if (isReactNative()) {
10845
- return;
10944
+ this.rnSpeechDetector = new RNSpeechDetector();
10945
+ await this.rnSpeechDetector.start();
10946
+ const unsubscribe = this.rnSpeechDetector?.onSpeakingDetectedStateChange((event) => {
10947
+ this.state.setSpeakingWhileMuted(event.isSoundDetected);
10948
+ });
10949
+ this.soundDetectorCleanup = () => {
10950
+ unsubscribe();
10951
+ this.rnSpeechDetector?.stop();
10952
+ this.rnSpeechDetector = undefined;
10953
+ };
10954
+ }
10955
+ else {
10956
+ // Need to start a new stream that's not connected to publisher
10957
+ const stream = await this.getStream({
10958
+ deviceId,
10959
+ });
10960
+ this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
10961
+ this.state.setSpeakingWhileMuted(event.isSoundDetected);
10962
+ });
10846
10963
  }
10847
- await this.stopSpeakingWhileMutedDetection();
10848
- // Need to start a new stream that's not connected to publisher
10849
- const stream = await this.getStream({
10850
- deviceId,
10851
- });
10852
- this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
10853
- this.state.setSpeakingWhileMuted(event.isSoundDetected);
10854
- });
10855
10964
  }
10856
10965
  async stopSpeakingWhileMutedDetection() {
10857
- if (isReactNative() || !this.soundDetectorCleanup) {
10966
+ if (!this.soundDetectorCleanup) {
10858
10967
  return;
10859
10968
  }
10860
10969
  this.state.setSpeakingWhileMuted(false);
@@ -14032,7 +14141,7 @@ class StreamClient {
14032
14141
  });
14033
14142
  };
14034
14143
  this.getUserAgent = () => {
14035
- const version = "0.4.10" ;
14144
+ const version = "0.5.1" ;
14036
14145
  return (this.userAgent ||
14037
14146
  `stream-video-javascript-client-${this.node ? 'node' : 'browser'}-${version}`);
14038
14147
  };