@stream-io/video-client 1.40.3 → 1.41.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,24 @@
2
2
 
3
3
  This file was generated using [@jscutlery/semver](https://github.com/jscutlery/semver).
4
4
 
5
+ ## [1.41.1](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.41.0...@stream-io/video-client-1.41.1) (2026-01-26)
6
+
7
+ ### Bug Fixes
8
+
9
+ - **safari:** Handle interrupted AudioContext and AudioSession states ([#2098](https://github.com/GetStream/stream-video-js/issues/2098)) ([975901f](https://github.com/GetStream/stream-video-js/commit/975901f399b46479928ec1e9f32da7e47bba9ad3))
10
+ - use multiple settings to determine default audio device RN-338 ([#2096](https://github.com/GetStream/stream-video-js/issues/2096)) ([19cf136](https://github.com/GetStream/stream-video-js/commit/19cf13651112b647903587a84a70a555fc68fc9c)), closes [2BSettingsPriority.swift#L19](https://github.com/GetStream/2BSettingsPriority.swift/issues/L19)
11
+
12
+ ## [1.41.0](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.40.3...@stream-io/video-client-1.41.0) (2026-01-20)
13
+
14
+ ### Features
15
+
16
+ - **recording:** Support for Individual, Raw and Composite recording ([#2071](https://github.com/GetStream/stream-video-js/issues/2071)) ([e53269c](https://github.com/GetStream/stream-video-js/commit/e53269ce697121b70dbebaf4a6d2cf875440a2af))
17
+ - stereo audio output support RN-332 ([#2038](https://github.com/GetStream/stream-video-js/issues/2038)) ([2938037](https://github.com/GetStream/stream-video-js/commit/2938037d18e70ccf112a089eb3ec44cb034aed1d))
18
+
19
+ ### Bug Fixes
20
+
21
+ - add start bitrate even if there is no existing fmtp line ([#2088](https://github.com/GetStream/stream-video-js/issues/2088)) ([ae1f496](https://github.com/GetStream/stream-video-js/commit/ae1f4965a7ab0b00dbdea45090c6aed49eafabb7))
22
+
5
23
  ## [1.40.3](https://github.com/GetStream/stream-video-js/compare/@stream-io/video-client-1.40.2...@stream-io/video-client-1.40.3) (2026-01-16)
6
24
 
7
25
  ### Bug Fixes
@@ -26,6 +26,38 @@ const AudioSettingsResponseDefaultDeviceEnum = {
26
26
  SPEAKER: 'speaker',
27
27
  EARPIECE: 'earpiece',
28
28
  };
29
+ /**
30
+ * @export
31
+ */
32
+ const CallRecordingFailedEventRecordingTypeEnum = {
33
+ COMPOSITE: 'composite',
34
+ INDIVIDUAL: 'individual',
35
+ RAW: 'raw',
36
+ };
37
+ /**
38
+ * @export
39
+ */
40
+ const CallRecordingReadyEventRecordingTypeEnum = {
41
+ COMPOSITE: 'composite',
42
+ INDIVIDUAL: 'individual',
43
+ RAW: 'raw',
44
+ };
45
+ /**
46
+ * @export
47
+ */
48
+ const CallRecordingStartedEventRecordingTypeEnum = {
49
+ COMPOSITE: 'composite',
50
+ INDIVIDUAL: 'individual',
51
+ RAW: 'raw',
52
+ };
53
+ /**
54
+ * @export
55
+ */
56
+ const CallRecordingStoppedEventRecordingTypeEnum = {
57
+ COMPOSITE: 'composite',
58
+ INDIVIDUAL: 'individual',
59
+ RAW: 'raw',
60
+ };
29
61
  /**
30
62
  * @export
31
63
  */
@@ -61,6 +93,22 @@ const FrameRecordingSettingsResponseModeEnum = {
61
93
  DISABLED: 'disabled',
62
94
  AUTO_ON: 'auto-on',
63
95
  };
96
+ /**
97
+ * @export
98
+ */
99
+ const IndividualRecordingSettingsRequestModeEnum = {
100
+ AVAILABLE: 'available',
101
+ DISABLED: 'disabled',
102
+ AUTO_ON: 'auto-on',
103
+ };
104
+ /**
105
+ * @export
106
+ */
107
+ const IndividualRecordingSettingsResponseModeEnum = {
108
+ AVAILABLE: 'available',
109
+ DISABLED: 'disabled',
110
+ AUTO_ON: 'auto-on',
111
+ };
64
112
  /**
65
113
  * @export
66
114
  */
@@ -126,11 +174,15 @@ const OwnCapability = {
126
174
  START_BROADCAST_CALL: 'start-broadcast-call',
127
175
  START_CLOSED_CAPTIONS_CALL: 'start-closed-captions-call',
128
176
  START_FRAME_RECORD_CALL: 'start-frame-record-call',
177
+ START_INDIVIDUAL_RECORD_CALL: 'start-individual-record-call',
178
+ START_RAW_RECORD_CALL: 'start-raw-record-call',
129
179
  START_RECORD_CALL: 'start-record-call',
130
180
  START_TRANSCRIPTION_CALL: 'start-transcription-call',
131
181
  STOP_BROADCAST_CALL: 'stop-broadcast-call',
132
182
  STOP_CLOSED_CAPTIONS_CALL: 'stop-closed-captions-call',
133
183
  STOP_FRAME_RECORD_CALL: 'stop-frame-record-call',
184
+ STOP_INDIVIDUAL_RECORD_CALL: 'stop-individual-record-call',
185
+ STOP_RAW_RECORD_CALL: 'stop-raw-record-call',
134
186
  STOP_RECORD_CALL: 'stop-record-call',
135
187
  STOP_TRANSCRIPTION_CALL: 'stop-transcription-call',
136
188
  UPDATE_CALL: 'update-call',
@@ -168,6 +220,22 @@ const RTMPSettingsRequestQualityEnum = {
168
220
  PORTRAIT_1080X1920: 'portrait-1080x1920',
169
221
  PORTRAIT_1440X2560: 'portrait-1440x2560',
170
222
  };
223
+ /**
224
+ * @export
225
+ */
226
+ const RawRecordingSettingsRequestModeEnum = {
227
+ AVAILABLE: 'available',
228
+ DISABLED: 'disabled',
229
+ AUTO_ON: 'auto-on',
230
+ };
231
+ /**
232
+ * @export
233
+ */
234
+ const RawRecordingSettingsResponseModeEnum = {
235
+ AVAILABLE: 'available',
236
+ DISABLED: 'disabled',
237
+ AUTO_ON: 'auto-on',
238
+ };
171
239
  /**
172
240
  * @export
173
241
  */
@@ -3983,15 +4051,18 @@ const extractMid = (transceiver, transceiverInitIndex, sdp) => {
3983
4051
  return '';
3984
4052
  return String(transceiverInitIndex);
3985
4053
  };
3986
- /*
3987
- * Sets the start bitrate for the VP9 and H264 codecs in the SDP.
4054
+ /**
4055
+ * Sets the start bitrate for the VP9, H264, and AV1 codecs in the SDP.
3988
4056
  *
3989
4057
  * @param offerSdp the offer SDP to modify.
3990
- * @param startBitrate the start bitrate in kbps to set. Default is 1000 kbps.
4058
+ * @param maxBitrateKbps the maximum bitrate in kbps.
4059
+ * @param startBitrateFactor the factor (0-1) to multiply with maxBitrateKbps to get the start bitrate.
4060
+ * @param targetMid the media ID to target.
3991
4061
  */
3992
4062
  const setStartBitrate = (offerSdp, maxBitrateKbps, startBitrateFactor, targetMid) => {
3993
4063
  // start bitrate should be between 300kbps and max-bitrate-kbps
3994
- const startBitrate = Math.max(Math.min(maxBitrateKbps, startBitrateFactor * maxBitrateKbps), 300);
4064
+ // Clamp to max first, then ensure minimum of 300 (but never exceed max)
4065
+ const startBitrate = Math.min(maxBitrateKbps, Math.max(300, startBitrateFactor * maxBitrateKbps));
3995
4066
  const parsedSdp = parse(offerSdp);
3996
4067
  const targetCodecs = new Set(['av1', 'vp9', 'h264']);
3997
4068
  for (const media of parsedSdp.media) {
@@ -4002,14 +4073,27 @@ const setStartBitrate = (offerSdp, maxBitrateKbps, startBitrateFactor, targetMid
4002
4073
  for (const rtp of media.rtp) {
4003
4074
  if (!targetCodecs.has(rtp.codec.toLowerCase()))
4004
4075
  continue;
4005
- for (const fmtp of media.fmtp) {
4006
- if (fmtp.payload === rtp.payload) {
4007
- if (!fmtp.config.includes('x-google-start-bitrate')) {
4008
- fmtp.config += `;x-google-start-bitrate=${startBitrate}`;
4009
- }
4010
- break;
4076
+ // Find existing fmtp entry for this payload
4077
+ // Guard against media.fmtp being undefined when SDP has no a=fmtp lines
4078
+ const fmtpList = media.fmtp ?? (media.fmtp = []);
4079
+ const existingFmtp = fmtpList.find((fmtp) => fmtp.payload === rtp.payload);
4080
+ if (existingFmtp) {
4081
+ // Append to existing fmtp if not already present
4082
+ // Guard against undefined or empty config from malformed SDP
4083
+ const config = existingFmtp.config ?? '';
4084
+ if (!config.includes('x-google-start-bitrate')) {
4085
+ existingFmtp.config = config
4086
+ ? `${config};x-google-start-bitrate=${startBitrate}`
4087
+ : `x-google-start-bitrate=${startBitrate}`;
4011
4088
  }
4012
4089
  }
4090
+ else {
4091
+ // Create new fmtp entry if none exists
4092
+ fmtpList.push({
4093
+ payload: rtp.payload,
4094
+ config: `x-google-start-bitrate=${startBitrate}`,
4095
+ });
4096
+ }
4013
4097
  }
4014
4098
  }
4015
4099
  return write(parsedSdp);
@@ -4939,6 +5023,10 @@ const paginatedLayoutSortPreset = combineComparators(pinned, ifInvisibleOrUnknow
4939
5023
  */
4940
5024
  const livestreamOrAudioRoomSortPreset = combineComparators(ifInvisibleBy(combineComparators(dominantSpeaker, speaking, reactionType('raised-hand'), withVideoIngressSource, publishingVideo, publishingAudio)), role('admin', 'host', 'speaker'));
4941
5025
 
5026
+ const ensureExhausted = (x, message) => {
5027
+ videoLoggerSystem.getLogger('helpers').warn(message, x);
5028
+ };
5029
+
4942
5030
  /**
4943
5031
  * Returns the default egress object - when no egress data is available.
4944
5032
  */
@@ -4968,6 +5056,8 @@ class CallState {
4968
5056
  this.egressSubject = new BehaviorSubject(undefined);
4969
5057
  this.ingressSubject = new BehaviorSubject(undefined);
4970
5058
  this.recordingSubject = new BehaviorSubject(false);
5059
+ this.individualRecordingSubject = new BehaviorSubject(false);
5060
+ this.rawRecordingSubject = new BehaviorSubject(false);
4971
5061
  this.sessionSubject = new BehaviorSubject(undefined);
4972
5062
  this.settingsSubject = new BehaviorSubject(undefined);
4973
5063
  this.transcribingSubject = new BehaviorSubject(false);
@@ -5394,7 +5484,10 @@ class CallState {
5394
5484
  this.setCurrentValue(this.customSubject, call.custom);
5395
5485
  this.setCurrentValue(this.egressSubject, call.egress);
5396
5486
  this.setCurrentValue(this.ingressSubject, call.ingress);
5397
- this.setCurrentValue(this.recordingSubject, call.recording);
5487
+ const { individual_recording, composite_recording, raw_recording } = call.egress;
5488
+ this.setCurrentValue(this.recordingSubject, call.recording || composite_recording?.status === 'running');
5489
+ this.setCurrentValue(this.individualRecordingSubject, individual_recording?.status === 'running');
5490
+ this.setCurrentValue(this.rawRecordingSubject, raw_recording?.status === 'running');
5398
5491
  const s = this.setCurrentValue(this.sessionSubject, call.session);
5399
5492
  this.updateParticipantCountFromSession(s);
5400
5493
  this.setCurrentValue(this.settingsSubject, call.settings);
@@ -5470,6 +5563,21 @@ class CallState {
5470
5563
  },
5471
5564
  }));
5472
5565
  };
5566
+ this.updateFromRecordingEvent = (type, running) => {
5567
+ // handle the legacy format, where `type` is absent in the emitted events
5568
+ if (type === undefined || type === 'composite') {
5569
+ this.setCurrentValue(this.recordingSubject, running);
5570
+ }
5571
+ else if (type === 'individual') {
5572
+ this.setCurrentValue(this.individualRecordingSubject, running);
5573
+ }
5574
+ else if (type === 'raw') {
5575
+ this.setCurrentValue(this.rawRecordingSubject, running);
5576
+ }
5577
+ else {
5578
+ ensureExhausted(type, 'Unknown recording type');
5579
+ }
5580
+ };
5473
5581
  this.updateParticipantCountFromSession = (session) => {
5474
5582
  // when in JOINED state, we should use the participant count coming through
5475
5583
  // the SFU healthcheck event, as it's more accurate.
@@ -5704,6 +5812,8 @@ class CallState {
5704
5812
  }), distinctUntilChanged(isShallowEqual), shareReplay({ bufferSize: 1, refCount: true }));
5705
5813
  this.participantCount$ = duc(this.participantCountSubject);
5706
5814
  this.recording$ = duc(this.recordingSubject);
5815
+ this.individualRecording$ = duc(this.individualRecordingSubject);
5816
+ this.rawRecording$ = duc(this.rawRecordingSubject);
5707
5817
  this.transcribing$ = duc(this.transcribingSubject);
5708
5818
  this.captioning$ = duc(this.captioningSubject);
5709
5819
  this.eventHandlers = {
@@ -5770,9 +5880,15 @@ class CallState {
5770
5880
  },
5771
5881
  'call.permissions_updated': this.updateOwnCapabilities,
5772
5882
  'call.reaction_new': this.updateParticipantReaction,
5773
- 'call.recording_started': () => this.setCurrentValue(this.recordingSubject, true),
5774
- 'call.recording_stopped': () => this.setCurrentValue(this.recordingSubject, false),
5775
- 'call.recording_failed': () => this.setCurrentValue(this.recordingSubject, false),
5883
+ 'call.recording_started': (e) => {
5884
+ this.updateFromRecordingEvent(e.recording_type, true);
5885
+ },
5886
+ 'call.recording_stopped': (e) => {
5887
+ this.updateFromRecordingEvent(e.recording_type, false);
5888
+ },
5889
+ 'call.recording_failed': (e) => {
5890
+ this.updateFromRecordingEvent(e.recording_type, false);
5891
+ },
5776
5892
  'call.rejected': (e) => this.updateFromCallResponse(e.call),
5777
5893
  'call.ring': (e) => this.updateFromCallResponse(e.call),
5778
5894
  'call.missed': (e) => this.updateFromCallResponse(e.call),
@@ -5955,11 +6071,23 @@ class CallState {
5955
6071
  return this.getCurrentValue(this.ingress$);
5956
6072
  }
5957
6073
  /**
5958
- * Will provide the recording state of this call.
6074
+ * Will provide the composite recording state of this call.
5959
6075
  */
5960
6076
  get recording() {
5961
6077
  return this.getCurrentValue(this.recording$);
5962
6078
  }
6079
+ /**
6080
+ * Will provide the individual recording state of this call.
6081
+ */
6082
+ get individualRecording() {
6083
+ return this.getCurrentValue(this.individualRecording$);
6084
+ }
6085
+ /**
6086
+ * Will provide the raw recording state of this call.
6087
+ */
6088
+ get rawRecording() {
6089
+ return this.getCurrentValue(this.rawRecording$);
6090
+ }
5963
6091
  /**
5964
6092
  * Will provide the session data of this call.
5965
6093
  */
@@ -6060,7 +6188,7 @@ const getSdkVersion = (sdk) => {
6060
6188
  return sdk ? `${sdk.major}.${sdk.minor}.${sdk.patch}` : '0.0.0-development';
6061
6189
  };
6062
6190
 
6063
- const version = "1.40.3";
6191
+ const version = "1.41.1";
6064
6192
  const [major, minor, patch] = version.split('.');
6065
6193
  let sdkInfo = {
6066
6194
  type: SdkType.PLAIN_JAVASCRIPT,
@@ -7408,10 +7536,6 @@ class TransceiverCache {
7408
7536
  }
7409
7537
  }
7410
7538
 
7411
- const ensureExhausted = (x, message) => {
7412
- videoLoggerSystem.getLogger('helpers').warn(message, x);
7413
- };
7414
-
7415
7539
  const trackTypeToParticipantStreamKey = (trackType) => {
7416
7540
  switch (trackType) {
7417
7541
  case TrackType.SCREEN_SHARE:
@@ -7892,7 +8016,7 @@ class Publisher extends BasePeerConnection {
7892
8016
  let sdp = dangerouslyForceCodec
7893
8017
  ? removeCodecsExcept(baseSdp, dangerouslyForceCodec, fmtpLine)
7894
8018
  : baseSdp;
7895
- if (dangerouslySetStartBitrateFactor) {
8019
+ if (dangerouslySetStartBitrateFactor !== undefined) {
7896
8020
  this.transceiverCache.items().forEach((t) => {
7897
8021
  if (t.publishOption.trackType !== TrackType.VIDEO)
7898
8022
  return;
@@ -9248,12 +9372,13 @@ class DynascaleManager {
9248
9372
  /**
9249
9373
  * Creates a new DynascaleManager instance.
9250
9374
  */
9251
- constructor(callState, speaker) {
9375
+ constructor(callState, speaker, tracer) {
9252
9376
  /**
9253
9377
  * The viewport tracker instance.
9254
9378
  */
9255
9379
  this.viewportTracker = new ViewportTracker();
9256
9380
  this.logger = videoLoggerSystem.getLogger('DynascaleManager');
9381
+ this.useWebAudio = isSafari();
9257
9382
  this.pendingSubscriptionsUpdate = null;
9258
9383
  this.videoTrackSubscriptionOverridesSubject = new BehaviorSubject({});
9259
9384
  this.videoTrackSubscriptionOverrides$ = this.videoTrackSubscriptionOverridesSubject.asObservable();
@@ -9293,6 +9418,10 @@ class DynascaleManager {
9293
9418
  }
9294
9419
  };
9295
9420
  this.setVideoTrackSubscriptionOverrides = (override, sessionIds) => {
9421
+ this.tracer.trace('setVideoTrackSubscriptionOverrides', [
9422
+ override,
9423
+ sessionIds,
9424
+ ]);
9296
9425
  if (!sessionIds) {
9297
9426
  return setCurrentValue(this.videoTrackSubscriptionOverridesSubject, override ? { [globalOverrideKey]: override } : {});
9298
9427
  }
@@ -9374,6 +9503,18 @@ class DynascaleManager {
9374
9503
  this.setViewport = (element) => {
9375
9504
  return this.viewportTracker.setViewport(element);
9376
9505
  };
9506
+ /**
9507
+ * Sets whether to use WebAudio API for audio playback.
9508
+ * Must be set before joining the call.
9509
+ *
9510
+ * @internal
9511
+ *
9512
+ * @param useWebAudio whether to use WebAudio API.
9513
+ */
9514
+ this.setUseWebAudio = (useWebAudio) => {
9515
+ this.tracer.trace('setUseWebAudio', useWebAudio);
9516
+ this.useWebAudio = useWebAudio;
9517
+ };
9377
9518
  /**
9378
9519
  * Binds a DOM <video> element to the given session id.
9379
9520
  * This method will make sure that the video element will play
@@ -9589,6 +9730,7 @@ class DynascaleManager {
9589
9730
  // we will play audio directly through the audio element in other browsers
9590
9731
  audioElement.muted = false;
9591
9732
  audioElement.play().catch((e) => {
9733
+ this.tracer.trace('audioPlaybackError', e.message);
9592
9734
  this.logger.warn(`Failed to play audio stream`, e);
9593
9735
  });
9594
9736
  }
@@ -9623,32 +9765,57 @@ class DynascaleManager {
9623
9765
  };
9624
9766
  };
9625
9767
  this.getOrCreateAudioContext = () => {
9626
- if (this.audioContext || !isSafari())
9768
+ if (!this.useWebAudio)
9769
+ return;
9770
+ if (this.audioContext)
9627
9771
  return this.audioContext;
9628
9772
  const context = new AudioContext();
9773
+ this.tracer.trace('audioContext.create', context.state);
9629
9774
  if (context.state === 'suspended') {
9630
9775
  document.addEventListener('click', this.resumeAudioContext);
9631
9776
  }
9632
- // @ts-expect-error audioSession is available in Safari only
9777
+ context.addEventListener('statechange', () => {
9778
+ this.tracer.trace('audioContext.state', context.state);
9779
+ if (context.state === 'interrupted') {
9780
+ this.resumeAudioContext();
9781
+ }
9782
+ });
9633
9783
  const audioSession = navigator.audioSession;
9634
9784
  if (audioSession) {
9635
9785
  // https://github.com/w3c/audio-session/blob/main/explainer.md
9636
9786
  audioSession.type = 'play-and-record';
9787
+ let isSessionInterrupted = false;
9788
+ audioSession.addEventListener('statechange', () => {
9789
+ this.tracer.trace('audioSession.state', audioSession.state);
9790
+ if (audioSession.state === 'interrupted') {
9791
+ isSessionInterrupted = true;
9792
+ }
9793
+ else if (isSessionInterrupted) {
9794
+ this.resumeAudioContext();
9795
+ isSessionInterrupted = false;
9796
+ }
9797
+ });
9637
9798
  }
9638
9799
  return (this.audioContext = context);
9639
9800
  };
9640
9801
  this.resumeAudioContext = () => {
9641
- if (this.audioContext?.state === 'suspended') {
9642
- this.audioContext
9643
- .resume()
9644
- .catch((err) => this.logger.warn(`Can't resume audio context`, err))
9645
- .then(() => {
9802
+ if (!this.audioContext)
9803
+ return;
9804
+ const { state } = this.audioContext;
9805
+ if (state === 'suspended' || state === 'interrupted') {
9806
+ const tag = 'audioContext.resume';
9807
+ this.audioContext.resume().then(() => {
9808
+ this.tracer.trace(tag, this.audioContext?.state);
9646
9809
  document.removeEventListener('click', this.resumeAudioContext);
9810
+ }, (err) => {
9811
+ this.tracer.trace(`${tag}Error`, this.audioContext?.state);
9812
+ this.logger.warn(`Can't resume audio context`, err);
9647
9813
  });
9648
9814
  }
9649
9815
  };
9650
9816
  this.callState = callState;
9651
9817
  this.speaker = speaker;
9818
+ this.tracer = tracer;
9652
9819
  }
9653
9820
  setSfuClient(sfuClient) {
9654
9821
  this.sfuClient = sfuClient;
@@ -11868,6 +12035,37 @@ class SpeakerManager {
11868
12035
  this.state = new SpeakerState(call.tracer);
11869
12036
  this.setup();
11870
12037
  }
12038
+ apply(settings) {
12039
+ if (!isReactNative()) {
12040
+ return;
12041
+ }
12042
+ /// Determines if the speaker should be enabled based on a priority hierarchy of
12043
+ /// settings.
12044
+ ///
12045
+ /// The priority order is as follows:
12046
+ /// 1. If video camera is set to be on by default, speaker is enabled
12047
+ /// 2. If audio speaker is set to be on by default, speaker is enabled
12048
+ /// 3. If the default audio device is set to speaker, speaker is enabled
12049
+ ///
12050
+ /// This ensures that the speaker state aligns with the most important user
12051
+ /// preference or system requirement.
12052
+ const speakerOnWithSettingsPriority = settings.video.camera_default_on ||
12053
+ settings.audio.speaker_default_on ||
12054
+ settings.audio.default_device ===
12055
+ AudioSettingsRequestDefaultDeviceEnum.SPEAKER;
12056
+ const defaultDevice = speakerOnWithSettingsPriority
12057
+ ? AudioSettingsRequestDefaultDeviceEnum.SPEAKER
12058
+ : AudioSettingsRequestDefaultDeviceEnum.EARPIECE;
12059
+ if (this.defaultDevice !== defaultDevice) {
12060
+ this.call.logger.debug('SpeakerManager: setting default device', {
12061
+ defaultDevice,
12062
+ });
12063
+ this.defaultDevice = defaultDevice;
12064
+ globalThis.streamRNVideoSDK?.callManager.setup({
12065
+ defaultDevice,
12066
+ });
12067
+ }
12068
+ }
11871
12069
  setup() {
11872
12070
  if (this.areSubscriptionsSetUp) {
11873
12071
  return;
@@ -11930,8 +12128,6 @@ class SpeakerManager {
11930
12128
  /**
11931
12129
  * Set the volume of a participant.
11932
12130
  *
11933
- * Note: This method is not supported in React Native.
11934
- *
11935
12131
  * @param sessionId the participant's session id.
11936
12132
  * @param volume a number between 0 and 1. Set it to `undefined` to use the default volume.
11937
12133
  */
@@ -12274,6 +12470,7 @@ class Call {
12274
12470
  this.ringingSubject.next(false);
12275
12471
  this.cancelAutoDrop();
12276
12472
  this.clientStore.unregisterCall(this);
12473
+ globalThis.streamRNVideoSDK?.callManager.stop();
12277
12474
  this.camera.dispose();
12278
12475
  this.microphone.dispose();
12279
12476
  this.screenShare.dispose();
@@ -12606,6 +12803,7 @@ class Call {
12606
12803
  // re-apply them on later reconnections or server-side data fetches
12607
12804
  if (!this.deviceSettingsAppliedOnce && this.state.settings) {
12608
12805
  await this.applyDeviceConfig(this.state.settings, true);
12806
+ globalThis.streamRNVideoSDK?.callManager.start();
12609
12807
  this.deviceSettingsAppliedOnce = true;
12610
12808
  }
12611
12809
  // We shouldn't persist the `ring` and `notify` state after joining the call
@@ -13397,14 +13595,22 @@ class Call {
13397
13595
  /**
13398
13596
  * Starts recording the call
13399
13597
  */
13400
- this.startRecording = async (request) => {
13401
- return this.streamClient.post(`${this.streamClientBasePath}/start_recording`, request ? request : {});
13598
+ this.startRecording = async (dataOrType, type) => {
13599
+ type = typeof dataOrType === 'string' ? dataOrType : type;
13600
+ dataOrType = typeof dataOrType === 'string' ? undefined : dataOrType;
13601
+ const endpoint = !type
13602
+ ? `/start_recording`
13603
+ : `/recordings/${encodeURIComponent(type)}/start`;
13604
+ return this.streamClient.post(`${this.streamClientBasePath}${endpoint}`, dataOrType);
13402
13605
  };
13403
13606
  /**
13404
13607
  * Stops recording the call
13405
13608
  */
13406
- this.stopRecording = async () => {
13407
- return this.streamClient.post(`${this.streamClientBasePath}/stop_recording`, {});
13609
+ this.stopRecording = async (type) => {
13610
+ const endpoint = !type
13611
+ ? `/stop_recording`
13612
+ : `/recordings/${encodeURIComponent(type)}/stop`;
13613
+ return this.streamClient.post(`${this.streamClientBasePath}${endpoint}`);
13408
13614
  };
13409
13615
  /**
13410
13616
  * Starts the transcription of the call.
@@ -13801,6 +14007,7 @@ class Call {
13801
14007
  * @internal
13802
14008
  */
13803
14009
  this.applyDeviceConfig = async (settings, publish) => {
14010
+ this.speaker.apply(settings);
13804
14011
  await this.camera.apply(settings.video, publish).catch((err) => {
13805
14012
  this.logger.warn('Camera init failed', err);
13806
14013
  });
@@ -13970,7 +14177,7 @@ class Call {
13970
14177
  this.microphone = new MicrophoneManager(this);
13971
14178
  this.speaker = new SpeakerManager(this);
13972
14179
  this.screenShare = new ScreenShareManager(this);
13973
- this.dynascaleManager = new DynascaleManager(this.state, this.speaker);
14180
+ this.dynascaleManager = new DynascaleManager(this.state, this.speaker, this.tracer);
13974
14181
  }
13975
14182
  /**
13976
14183
  * A flag indicating whether the call is "ringing" type of call.
@@ -15114,7 +15321,7 @@ class StreamClient {
15114
15321
  this.getUserAgent = () => {
15115
15322
  if (!this.cachedUserAgent) {
15116
15323
  const { clientAppIdentifier = {} } = this.options;
15117
- const { sdkName = 'js', sdkVersion = "1.40.3", ...extras } = clientAppIdentifier;
15324
+ const { sdkName = 'js', sdkVersion = "1.41.1", ...extras } = clientAppIdentifier;
15118
15325
  this.cachedUserAgent = [
15119
15326
  `stream-video-${sdkName}-v${sdkVersion}`,
15120
15327
  ...Object.entries(extras).map(([key, value]) => `${key}=${value}`),
@@ -15750,5 +15957,5 @@ const humanize = (n) => {
15750
15957
  return String(n);
15751
15958
  };
15752
15959
 
15753
- export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
15960
+ export { AudioSettingsRequestDefaultDeviceEnum, AudioSettingsResponseDefaultDeviceEnum, browsers as Browsers, Call, CallRecordingFailedEventRecordingTypeEnum, CallRecordingReadyEventRecordingTypeEnum, CallRecordingStartedEventRecordingTypeEnum, CallRecordingStoppedEventRecordingTypeEnum, CallState, CallType, CallTypes, CallingState, CameraManager, CameraManagerState, CreateDeviceRequestPushProviderEnum, DebounceType, DeviceManager, DeviceManagerState, DynascaleManager, ErrorFromResponse, FrameRecordingSettingsRequestModeEnum, FrameRecordingSettingsRequestQualityEnum, FrameRecordingSettingsResponseModeEnum, IndividualRecordingSettingsRequestModeEnum, IndividualRecordingSettingsResponseModeEnum, IngressAudioEncodingOptionsRequestChannelsEnum, IngressSourceRequestFpsEnum, IngressVideoLayerRequestCodecEnum, LayoutSettingsRequestNameEnum, MicrophoneManager, MicrophoneManagerState, NoiseCancellationSettingsModeEnum, OwnCapability, RNSpeechDetector, RTMPBroadcastRequestQualityEnum, RTMPSettingsRequestQualityEnum, RawRecordingSettingsRequestModeEnum, RawRecordingSettingsResponseModeEnum, RecordSettingsRequestModeEnum, RecordSettingsRequestQualityEnum, rxUtils as RxUtils, ScreenShareManager, ScreenShareState, events as SfuEvents, SfuJoinError, models as SfuModels, SpeakerManager, SpeakerState, StartClosedCaptionsRequestLanguageEnum, StartTranscriptionRequestLanguageEnum, StreamSfuClient, StreamVideoClient, StreamVideoReadOnlyStateStore, StreamVideoWriteableStateStore, TranscriptionSettingsRequestClosedCaptionModeEnum, TranscriptionSettingsRequestLanguageEnum, TranscriptionSettingsRequestModeEnum, TranscriptionSettingsResponseClosedCaptionModeEnum, TranscriptionSettingsResponseLanguageEnum, TranscriptionSettingsResponseModeEnum, VideoSettingsRequestCameraFacingEnum, VideoSettingsResponseCameraFacingEnum, ViewportTracker, VisibilityState, checkIfAudioOutputChangeSupported, combineComparators, conditional, createSoundDetector, defaultSortPreset, descending, deviceIds$, disposeOfMediaStream, dominantSpeaker, getAudioBrowserPermission, getAudioDevices, getAudioOutputDevices, getAudioStream, getClientDetails, getDeviceState, getScreenShareStream, getSdkInfo, getVideoBrowserPermission, getVideoDevices, getVideoStream, getWebRTCInfo, hasAudio, hasPausedTrack, hasScreenShare, hasScreenShareAudio, hasVideo, humanize, isPinned, livestreamOrAudioRoomSortPreset, logToConsole, name, noopComparator, paginatedLayoutSortPreset, pinned, publishingAudio, publishingVideo, reactionType, resolveDeviceId, role, screenSharing, setDeviceInfo, setOSInfo, setPowerState, setSdkInfo, setThermalState, setWebRTCInfo, speakerLayoutSortPreset, speaking, videoLoggerSystem, withParticipantSource };
15754
15961
  //# sourceMappingURL=index.browser.es.js.map