@newgameplusinc/odyssey-audio-video-sdk-dev 1.0.16 β†’ 1.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -13,7 +13,6 @@ It mirrors the production SDK used by Odyssey V2 and ships ready-to-drop into an
13
13
  - 🧭 **Accurate pose propagation** – `updatePosition()` streams listener pose to the SFU while `participant-position-updated` keeps the local store in sync.
14
14
  - 🎧 **Studio-grade spatial audio** – each remote participant gets a dedicated Web Audio graph: denoiser β†’ high-pass β†’ low-pass β†’ HRTF `PannerNode` β†’ adaptive gain β†’ master compressor.
15
15
  - πŸŽ₯ **Camera-ready streams** – video tracks are exposed separately so UI layers can render muted `<video>` tags while audio stays inside Web Audio.
16
- - πŸŽ™οΈ **Clean microphone uplink (opt‑in)** – when `outboundTuning.enabled=true`, `enhanceOutgoingAudioTrack` runs mic input through denoiser + EQ + compressor before hitting the SFU.
17
16
  - πŸ” **EventEmitter contract** – subscribe to `room-joined`, `consumer-created`, `participant-position-updated`, etc., without touching Socket.IO directly.
18
17
 
19
18
  ## Quick Start
@@ -81,7 +80,6 @@ sdk.setListenerFromLSD(listenerPos, cameraPos, lookAtPos);
81
80
  - **Orientation math** – `setListenerFromLSD()` builds forward/right/up vectors from camera/LookAt to keep the listener aligned with head movement.
82
81
  - **Dynamic distance gain** – `updateSpatialAudio()` measures distance from listener β†’ source and applies a smooth rolloff curve, so distant avatars fade to silence.
83
82
  - **Noise handling** – optional AudioWorklet denoiser plus high/low-pass filters trim rumble & hiss before HRTF processing.
84
- - **Dynamic gate (opt-in)** – enable via `noiseGate.enabled=true` to let the SDK automatically clamp remote tracks when they’re idle.
85
83
 
86
84
  #### How Spatial Audio Is Built
87
85
  1. **Telemetry ingestion** – each LSD packet is passed through `setListenerFromLSD(listenerPos, cameraPos, lookAtPos)` so the Web Audio listener matches the player’s real head/camera pose.
@@ -91,13 +89,6 @@ sdk.setListenerFromLSD(listenerPos, cameraPos, lookAtPos);
91
89
  4. **Distance-aware gain** – the manager stores the latest listener pose and computes the Euclidean distance to each remote participant on every update. A custom rolloff curve adjusts gain before the compressor, giving the β€œsomeone on my left / far away” perception without blowing out master levels.
92
90
  5. **Left/right rendering** – because the panner uses `panningModel = "HRTF"`, browsers feed the processed signal into the user’s audio hardware with head-related transfer functions, producing natural interaural time/intensity differences.
93
91
 
94
- #### How Microphone Audio Is Tuned Before Sending (Opt-In)
95
- > Disabled by default. Enable via `new SpatialAudioManager({ outboundTuning: { enabled: true } })`.
96
- 1. **Hardware constraints first** – the SDK requests `noiseSuppression`, `echoCancellation`, and `autoGainControl` on the raw `MediaStreamTrack` (plus Chromium-specific `goog*` flags).
97
- 2. **Web Audio pre-flight** – `enhanceOutgoingAudioTrack(track)` clones the mic into a dedicated `AudioContext` and chain: `Denoiser β†’ 50/60β€―Hz notches β†’ Low-shelf rumble cut β†’ High-pass (95β€―Hz) β†’ Low-pass (7.2β€―kHz) β†’ High-shelf tame β†’ Presence boost β†’ Dynamics compressor β†’ Adaptive gate`.
98
- 3. **Adaptive gate** – a lightweight RMS monitor clamps the gate gain when only background hiss remains, but opens instantly when speech energy rises.
99
- 4. **Clean stream to SFU** – the processed track is what you pass to `produceTrack`, so every participant receives the filtered audio (and your local store uses the same track for mute toggles). Toggle the feature off to fall back to raw WebRTC audio instantly.
100
-
101
92
  ## Video Flow (Capture ↔ Rendering)
102
93
 
103
94
  ```
@@ -12,17 +12,9 @@ type DenoiserOptions = {
12
12
  noiseFloor?: number;
13
13
  release?: number;
14
14
  };
15
- type NoiseGateOptions = {
16
- enabled?: boolean;
17
- };
18
- type OutboundTuningOptions = {
19
- enabled?: boolean;
20
- };
21
15
  type SpatialAudioOptions = {
22
16
  distance?: SpatialAudioDistanceConfig;
23
17
  denoiser?: DenoiserOptions;
24
- noiseGate?: NoiseGateOptions;
25
- outboundTuning?: OutboundTuningOptions;
26
18
  };
27
19
  export declare class SpatialAudioManager extends EventManager {
28
20
  private audioContext;
@@ -31,16 +23,12 @@ export declare class SpatialAudioManager extends EventManager {
31
23
  private monitoringIntervals;
32
24
  private compressor;
33
25
  private options;
26
+ private denoiseWorkletReady;
34
27
  private denoiseWorkletUrl?;
35
28
  private denoiserWasmBytes?;
36
- private denoiseContextPromises;
37
29
  private listenerPosition;
38
30
  private listenerInitialized;
39
- private stabilityState;
40
- private outgoingProcessors;
41
31
  private listenerDirection;
42
- private noiseGateEnabled;
43
- private outboundTuningEnabled;
44
32
  constructor(options?: SpatialAudioOptions);
45
33
  getAudioContext(): AudioContext;
46
34
  /**
@@ -59,9 +47,7 @@ export declare class SpatialAudioManager extends EventManager {
59
47
  * @param bypassSpatialization For testing - bypasses 3D positioning
60
48
  */
61
49
  setupSpatialAudioForParticipant(participantId: string, track: MediaStreamTrack, bypassSpatialization?: boolean): Promise<void>;
62
- enhanceOutgoingAudioTrack(track: MediaStreamTrack): Promise<MediaStreamTrack>;
63
50
  private startMonitoring;
64
- private handleTrackStability;
65
51
  /**
66
52
  * Update spatial audio position and orientation for a participant
67
53
  *
@@ -111,9 +97,6 @@ export declare class SpatialAudioManager extends EventManager {
111
97
  private calculateDistanceGain;
112
98
  private normalizePositionUnits;
113
99
  private isDenoiserEnabled;
114
- private applyHardwareNoiseConstraints;
115
- private startOutboundMonitor;
116
- private cleanupOutboundProcessor;
117
100
  private ensureDenoiseWorklet;
118
101
  private resolveOptions;
119
102
  }
@@ -7,23 +7,19 @@ class SpatialAudioManager extends EventManager_1.EventManager {
7
7
  super();
8
8
  this.participantNodes = new Map();
9
9
  this.monitoringIntervals = new Map();
10
- this.denoiseContextPromises = new WeakMap();
10
+ this.denoiseWorkletReady = null;
11
11
  this.listenerPosition = { x: 0, y: 0, z: 0 };
12
12
  this.listenerInitialized = false;
13
- this.stabilityState = new Map();
14
- this.outgoingProcessors = new Map();
15
13
  this.listenerDirection = {
16
14
  forward: { x: 0, y: 1, z: 0 },
17
15
  up: { x: 0, y: 0, z: 1 },
18
16
  };
19
17
  this.options = this.resolveOptions(options);
20
- this.noiseGateEnabled = this.options.noiseGate?.enabled ?? false;
21
- this.outboundTuningEnabled = this.options.outboundTuning?.enabled ?? false;
22
18
  // Use high sample rate for best audio quality
23
19
  this.audioContext = new AudioContext({ sampleRate: 48000 });
24
20
  // Master gain
25
21
  this.masterGainNode = this.audioContext.createGain();
26
- this.masterGainNode.gain.value = 1.8; // Lower headroom to avoid hiss from boosted noise floor
22
+ this.masterGainNode.gain.value = 5.0;
27
23
  // Compressor for dynamic range control and preventing distortion
28
24
  this.compressor = this.audioContext.createDynamicsCompressor();
29
25
  this.compressor.threshold.value = -24; // dB
@@ -73,7 +69,6 @@ class SpatialAudioManager extends EventManager_1.EventManager {
73
69
  const panner = this.audioContext.createPanner();
74
70
  const analyser = this.audioContext.createAnalyser();
75
71
  const gain = this.audioContext.createGain();
76
- const noiseGate = this.audioContext.createGain();
77
72
  let denoiseNode;
78
73
  if (this.isDenoiserEnabled() && typeof this.audioContext.audioWorklet !== "undefined") {
79
74
  try {
@@ -107,8 +102,6 @@ class SpatialAudioManager extends EventManager_1.EventManager {
107
102
  lowpassFilter.type = "lowpass";
108
103
  lowpassFilter.frequency.value = 7500; // Below 8kHz to avoid flat/muffled sound
109
104
  lowpassFilter.Q.value = 1.0; // Quality factor
110
- // Adaptive noise gate defaults
111
- noiseGate.gain.value = 1.0;
112
105
  // Configure Panner for realistic 3D spatial audio
113
106
  const distanceConfig = this.getDistanceConfig();
114
107
  panner.panningModel = "HRTF"; // Head-Related Transfer Function for realistic 3D
@@ -128,20 +121,15 @@ class SpatialAudioManager extends EventManager_1.EventManager {
128
121
  }
129
122
  currentNode.connect(highpassFilter);
130
123
  highpassFilter.connect(lowpassFilter);
131
- let postFilterNode = lowpassFilter;
132
- if (this.noiseGateEnabled) {
133
- lowpassFilter.connect(noiseGate);
134
- postFilterNode = noiseGate;
135
- }
136
124
  if (bypassSpatialization) {
137
125
  console.log(`πŸ”Š TESTING: Connecting audio directly to destination (bypassing spatial audio) for ${participantId}`);
138
- postFilterNode.connect(analyser);
126
+ lowpassFilter.connect(analyser);
139
127
  analyser.connect(this.masterGainNode);
140
128
  }
141
129
  else {
142
130
  // Standard spatialized path with full audio chain
143
- // Audio Chain: source -> filters -> (optional gate) -> panner -> analyser -> gain -> masterGain -> compressor -> destination
144
- postFilterNode.connect(panner);
131
+ // Audio Chain: source -> filters -> panner -> analyser -> gain -> masterGain -> compressor -> destination
132
+ lowpassFilter.connect(panner);
145
133
  panner.connect(analyser);
146
134
  analyser.connect(gain);
147
135
  gain.connect(this.masterGainNode);
@@ -151,21 +139,11 @@ class SpatialAudioManager extends EventManager_1.EventManager {
151
139
  panner,
152
140
  analyser,
153
141
  gain,
154
- noiseGate,
155
142
  highpassFilter,
156
143
  lowpassFilter,
157
144
  denoiseNode,
158
145
  stream,
159
146
  });
160
- this.stabilityState.set(participantId, {
161
- smoothedLevel: 0,
162
- targetGain: 1,
163
- networkMuted: false,
164
- });
165
- if (this.noiseGateEnabled && typeof track.onmute !== "undefined") {
166
- track.onmute = () => this.handleTrackStability(participantId, true);
167
- track.onunmute = () => this.handleTrackStability(participantId, false);
168
- }
169
147
  console.log(`🎧 Spatial audio setup complete for ${participantId}:`, {
170
148
  audioContextState: this.audioContext.state,
171
149
  sampleRate: this.audioContext.sampleRate,
@@ -182,146 +160,15 @@ class SpatialAudioManager extends EventManager_1.EventManager {
182
160
  rolloffFactor: panner.rolloffFactor,
183
161
  },
184
162
  });
185
- // Start monitoring audio levels if gate enabled
186
- if (this.noiseGateEnabled) {
187
- this.startMonitoring(participantId);
188
- }
189
- }
190
- async enhanceOutgoingAudioTrack(track) {
191
- if (track.kind !== "audio" || !this.outboundTuningEnabled) {
192
- return track;
193
- }
194
- const existingProcessor = Array.from(this.outgoingProcessors.values()).find((processor) => processor.originalTrack === track);
195
- if (existingProcessor) {
196
- return existingProcessor.processedTrack;
197
- }
198
- await this.applyHardwareNoiseConstraints(track);
199
- const context = new AudioContext({ sampleRate: 48000 });
200
- await context.resume();
201
- const sourceStream = new MediaStream([track]);
202
- const source = context.createMediaStreamSource(sourceStream);
203
- let current = source;
204
- let denoiseNode;
205
- if (this.isDenoiserEnabled() && typeof context.audioWorklet !== "undefined") {
206
- try {
207
- await this.ensureDenoiseWorklet(context);
208
- denoiseNode = new AudioWorkletNode(context, "odyssey-denoise", {
209
- numberOfInputs: 1,
210
- numberOfOutputs: 1,
211
- processorOptions: {
212
- enabled: true,
213
- threshold: this.options.denoiser?.threshold,
214
- noiseFloor: this.options.denoiser?.noiseFloor,
215
- release: this.options.denoiser?.release,
216
- wasmBytes: this.denoiserWasmBytes
217
- ? this.denoiserWasmBytes.slice(0)
218
- : null,
219
- },
220
- });
221
- current.connect(denoiseNode);
222
- current = denoiseNode;
223
- }
224
- catch (error) {
225
- console.warn("⚠️ Outgoing denoiser unavailable, continuing without it.", error);
226
- }
227
- }
228
- const notch60 = context.createBiquadFilter();
229
- notch60.type = "notch";
230
- notch60.frequency.value = 60;
231
- notch60.Q.value = 24;
232
- current.connect(notch60);
233
- current = notch60;
234
- const notch50 = context.createBiquadFilter();
235
- notch50.type = "notch";
236
- notch50.frequency.value = 50;
237
- notch50.Q.value = 24;
238
- current.connect(notch50);
239
- current = notch50;
240
- const lowShelf = context.createBiquadFilter();
241
- lowShelf.type = "lowshelf";
242
- lowShelf.frequency.value = 120;
243
- lowShelf.gain.value = -3;
244
- current.connect(lowShelf);
245
- current = lowShelf;
246
- const highpassFilter = context.createBiquadFilter();
247
- highpassFilter.type = "highpass";
248
- highpassFilter.frequency.value = 95;
249
- highpassFilter.Q.value = 0.8;
250
- current.connect(highpassFilter);
251
- current = highpassFilter;
252
- const lowpassFilter = context.createBiquadFilter();
253
- lowpassFilter.type = "lowpass";
254
- lowpassFilter.frequency.value = 7200;
255
- lowpassFilter.Q.value = 0.8;
256
- current.connect(lowpassFilter);
257
- current = lowpassFilter;
258
- const hissShelf = context.createBiquadFilter();
259
- hissShelf.type = "highshelf";
260
- hissShelf.frequency.value = 6400;
261
- hissShelf.gain.value = -4;
262
- current.connect(hissShelf);
263
- current = hissShelf;
264
- const presenceBoost = context.createBiquadFilter();
265
- presenceBoost.type = "peaking";
266
- presenceBoost.frequency.value = 2400;
267
- presenceBoost.Q.value = 1.1;
268
- presenceBoost.gain.value = 2.4;
269
- current.connect(presenceBoost);
270
- current = presenceBoost;
271
- const compressor = context.createDynamicsCompressor();
272
- compressor.threshold.value = -18;
273
- compressor.knee.value = 16;
274
- compressor.ratio.value = 3.2;
275
- compressor.attack.value = 0.002;
276
- compressor.release.value = 0.22;
277
- current.connect(compressor);
278
- current = compressor;
279
- const postCompressorTap = context.createGain();
280
- postCompressorTap.gain.value = 1.05;
281
- current.connect(postCompressorTap);
282
- current = postCompressorTap;
283
- const analyser = context.createAnalyser();
284
- analyser.fftSize = 512;
285
- current.connect(analyser);
286
- const gate = context.createGain();
287
- gate.gain.value = 1;
288
- current.connect(gate);
289
- const destination = context.createMediaStreamDestination();
290
- gate.connect(destination);
291
- const processedTrack = destination.stream.getAudioTracks()[0];
292
- processedTrack.contentHint = "speech";
293
- const processorId = processedTrack.id;
294
- const monitor = this.startOutboundMonitor(processorId, analyser, gate);
295
- const cleanup = () => this.cleanupOutboundProcessor(processorId);
296
- processedTrack.addEventListener("ended", cleanup);
297
- track.addEventListener("ended", cleanup);
298
- this.outgoingProcessors.set(processorId, {
299
- context,
300
- sourceStream,
301
- destinationStream: destination.stream,
302
- analyser,
303
- gate,
304
- monitor,
305
- originalTrack: track,
306
- processedTrack,
307
- cleanupListener: cleanup,
308
- });
309
- console.log("πŸŽ›οΈ [SDK] Outgoing audio tuned", {
310
- originalTrackId: track.id,
311
- processedTrackId: processedTrack.id,
312
- });
313
- return processedTrack;
163
+ // Start monitoring audio levels
164
+ this.startMonitoring(participantId);
314
165
  }
315
166
  startMonitoring(participantId) {
316
- if (!this.noiseGateEnabled) {
317
- return;
318
- }
319
167
  const nodes = this.participantNodes.get(participantId);
320
168
  if (!nodes)
321
169
  return;
322
- const { analyser, stream, noiseGate } = nodes;
170
+ const { analyser, stream } = nodes;
323
171
  const dataArray = new Uint8Array(analyser.frequencyBinCount);
324
- let lastTrackLog = 0;
325
172
  // Clear any existing interval for this participant
326
173
  if (this.monitoringIntervals.has(participantId)) {
327
174
  clearInterval(this.monitoringIntervals.get(participantId));
@@ -334,48 +181,16 @@ class SpatialAudioManager extends EventManager_1.EventManager {
334
181
  }
335
182
  const average = sum / dataArray.length;
336
183
  const audioLevel = (average / 128) * 255; // Scale to 0-255
337
- const normalizedLevel = audioLevel / 255;
338
- const stability = this.stabilityState.get(participantId);
339
- if (stability) {
340
- const smoothing = 0.2;
341
- stability.smoothedLevel =
342
- stability.smoothedLevel * (1 - smoothing) + normalizedLevel * smoothing;
343
- const gateOpenThreshold = 0.028; // tuned for speech presence
344
- const gateCloseThreshold = 0.012;
345
- const noiseFloorGain = 0.12;
346
- let targetGain = stability.targetGain;
347
- if (stability.networkMuted) {
348
- targetGain = 0;
349
- }
350
- else if (stability.smoothedLevel < gateCloseThreshold) {
351
- targetGain = 0;
352
- }
353
- else if (stability.smoothedLevel < gateOpenThreshold) {
354
- targetGain = noiseFloorGain;
355
- }
356
- else {
357
- targetGain = 1;
358
- }
359
- if (Math.abs(targetGain - stability.targetGain) > 0.05) {
360
- const ramp = targetGain > stability.targetGain ? 0.04 : 0.18;
361
- noiseGate.gain.setTargetAtTime(targetGain, this.audioContext.currentTime, ramp);
362
- stability.targetGain = targetGain;
363
- }
364
- if (Math.random() < 0.05) {
365
- console.log(`🎚️ [NoiseGate] ${participantId}`, {
366
- level: stability.smoothedLevel.toFixed(3),
367
- gain: stability.targetGain.toFixed(2),
368
- });
369
- }
370
- }
371
- if (audioLevel < 1.0 && Math.random() < 0.2) {
184
+ console.log(`πŸ“Š Audio level for ${participantId}: ${audioLevel.toFixed(2)} (0-255 scale)`);
185
+ if (audioLevel < 1.0) {
372
186
  console.warn(`⚠️ NO AUDIO DATA detected for ${participantId}! Track may be silent or not transmitting.`);
187
+ console.info(`πŸ’‘ Check: 1) Is microphone unmuted? 2) Is correct mic selected? 3) Is mic working in system settings?`);
373
188
  }
374
- if (Date.now() - lastTrackLog > 2000) {
375
- lastTrackLog = Date.now();
189
+ // Check track status after 2 seconds
190
+ setTimeout(() => {
376
191
  const track = stream.getAudioTracks()[0];
377
192
  if (track) {
378
- console.log(`πŸ”Š Audio track status for ${participantId}:`, {
193
+ console.log(`πŸ”Š Audio track status after 2s for ${participantId}:`, {
379
194
  trackEnabled: track.enabled,
380
195
  trackMuted: track.muted,
381
196
  trackReadyState: track.readyState,
@@ -387,23 +202,10 @@ class SpatialAudioManager extends EventManager_1.EventManager {
387
202
  },
388
203
  });
389
204
  }
390
- }
391
- }, 250); // Adaptive monitoring ~4x per second
205
+ }, 2000);
206
+ }, 2000); // Log every 2 seconds
392
207
  this.monitoringIntervals.set(participantId, interval);
393
208
  }
394
- handleTrackStability(participantId, muted) {
395
- if (!this.noiseGateEnabled) {
396
- return;
397
- }
398
- const nodes = this.participantNodes.get(participantId);
399
- if (!nodes)
400
- return;
401
- const stability = this.stabilityState.get(participantId);
402
- if (stability) {
403
- stability.networkMuted = muted;
404
- }
405
- nodes.noiseGate.gain.setTargetAtTime(muted ? 0 : 1, this.audioContext.currentTime, muted ? 0.05 : 0.2);
406
- }
407
209
  /**
408
210
  * Update spatial audio position and orientation for a participant
409
211
  *
@@ -587,18 +389,11 @@ class SpatialAudioManager extends EventManager_1.EventManager {
587
389
  nodes.panner.disconnect();
588
390
  nodes.analyser.disconnect();
589
391
  nodes.gain.disconnect();
590
- nodes.noiseGate.disconnect();
591
392
  if (nodes.denoiseNode) {
592
393
  nodes.denoiseNode.disconnect();
593
394
  }
594
- const track = nodes.stream.getAudioTracks()[0];
595
- if (track) {
596
- track.onmute = null;
597
- track.onunmute = null;
598
- }
599
395
  nodes.stream.getTracks().forEach((track) => track.stop());
600
396
  this.participantNodes.delete(participantId);
601
- this.stabilityState.delete(participantId);
602
397
  console.log(`πŸ—‘οΈ Removed participant ${participantId} from spatial audio.`);
603
398
  }
604
399
  }
@@ -653,7 +448,7 @@ class SpatialAudioManager extends EventManager_1.EventManager {
653
448
  const normalized = (distance - config.refDistance) /
654
449
  Math.max(config.maxDistance - config.refDistance, 0.001);
655
450
  const shaped = Math.pow(Math.max(0, 1 - normalized), Math.max(1.2, config.rolloffFactor * 1.05));
656
- return Math.min(1, Math.max(0.001, shaped));
451
+ return Math.min(1, Math.max(0.01, shaped));
657
452
  }
658
453
  normalizePositionUnits(position) {
659
454
  const distanceConfig = this.getDistanceConfig();
@@ -681,79 +476,11 @@ class SpatialAudioManager extends EventManager_1.EventManager {
681
476
  isDenoiserEnabled() {
682
477
  return this.options.denoiser?.enabled !== false;
683
478
  }
684
- async applyHardwareNoiseConstraints(track) {
685
- try {
686
- await track.applyConstraints({
687
- echoCancellation: true,
688
- noiseSuppression: true,
689
- autoGainControl: true,
690
- advanced: [
691
- {
692
- echoCancellation: true,
693
- noiseSuppression: true,
694
- autoGainControl: true,
695
- googEchoCancellation: true,
696
- googNoiseSuppression: true,
697
- googAutoGainControl: true,
698
- googHighpassFilter: true,
699
- googTypingNoiseDetection: true,
700
- },
701
- ],
702
- });
703
- }
704
- catch (error) {
705
- console.warn("⚠️ Unable to apply hardware audio constraints", error);
706
- }
707
- track.contentHint = "speech";
708
- }
709
- startOutboundMonitor(processorId, analyser, gate) {
710
- const dataArray = new Uint8Array(analyser.fftSize);
711
- let smoothedLevel = 0;
712
- return setInterval(() => {
713
- analyser.getByteTimeDomainData(dataArray);
714
- let sum = 0;
715
- for (const value of dataArray) {
716
- sum += Math.abs(value - 128);
717
- }
718
- const level = (sum / dataArray.length) / 128;
719
- smoothedLevel = smoothedLevel * 0.7 + level * 0.3;
720
- let targetGain = 1;
721
- if (smoothedLevel < 0.02) {
722
- targetGain = 0;
723
- }
724
- else if (smoothedLevel < 0.05) {
725
- targetGain = 0.45;
726
- }
727
- else {
728
- targetGain = 1;
729
- }
730
- gate.gain.setTargetAtTime(targetGain, gate.context.currentTime, targetGain > gate.gain.value ? 0.02 : 0.08);
731
- if (Math.random() < 0.03) {
732
- console.log("🎚️ [SDK] Outgoing gate", {
733
- processorId,
734
- level: smoothedLevel.toFixed(3),
735
- gain: targetGain.toFixed(2),
736
- });
737
- }
738
- }, 200);
739
- }
740
- cleanupOutboundProcessor(processorId) {
741
- const processor = this.outgoingProcessors.get(processorId);
742
- if (!processor)
743
- return;
744
- clearInterval(processor.monitor);
745
- processor.processedTrack.removeEventListener("ended", processor.cleanupListener);
746
- processor.originalTrack.removeEventListener("ended", processor.cleanupListener);
747
- processor.destinationStream.getTracks().forEach((t) => t.stop());
748
- processor.sourceStream.getTracks().forEach((t) => t.stop());
749
- processor.context.close();
750
- this.outgoingProcessors.delete(processorId);
751
- }
752
- async ensureDenoiseWorklet(targetContext = this.audioContext) {
479
+ async ensureDenoiseWorklet() {
753
480
  if (!this.isDenoiserEnabled()) {
754
481
  return;
755
482
  }
756
- if (!("audioWorklet" in targetContext)) {
483
+ if (!("audioWorklet" in this.audioContext)) {
757
484
  console.warn("⚠️ AudioWorklet not supported in this browser. Disabling denoiser.");
758
485
  this.options.denoiser = {
759
486
  ...(this.options.denoiser || {}),
@@ -761,9 +488,8 @@ class SpatialAudioManager extends EventManager_1.EventManager {
761
488
  };
762
489
  return;
763
490
  }
764
- const existingPromise = this.denoiseContextPromises.get(targetContext);
765
- if (existingPromise) {
766
- return existingPromise;
491
+ if (this.denoiseWorkletReady) {
492
+ return this.denoiseWorkletReady;
767
493
  }
768
494
  const processorSource = `class OdysseyDenoiseProcessor extends AudioWorkletProcessor {
769
495
  constructor(options) {
@@ -820,13 +546,11 @@ class SpatialAudioManager extends EventManager_1.EventManager {
820
546
 
821
547
  registerProcessor('odyssey-denoise', OdysseyDenoiseProcessor);
822
548
  `;
823
- if (!this.denoiseWorkletUrl) {
824
- const blob = new Blob([processorSource], {
825
- type: "application/javascript",
826
- });
827
- this.denoiseWorkletUrl = URL.createObjectURL(blob);
828
- }
829
- const promise = targetContext.audioWorklet
549
+ const blob = new Blob([processorSource], {
550
+ type: "application/javascript",
551
+ });
552
+ this.denoiseWorkletUrl = URL.createObjectURL(blob);
553
+ this.denoiseWorkletReady = this.audioContext.audioWorklet
830
554
  .addModule(this.denoiseWorkletUrl)
831
555
  .catch((error) => {
832
556
  console.error("❌ Failed to register denoise worklet", error);
@@ -836,8 +560,7 @@ registerProcessor('odyssey-denoise', OdysseyDenoiseProcessor);
836
560
  };
837
561
  throw error;
838
562
  });
839
- this.denoiseContextPromises.set(targetContext, promise);
840
- return promise;
563
+ return this.denoiseWorkletReady;
841
564
  }
842
565
  resolveOptions(options) {
843
566
  const distanceDefaults = {
@@ -852,12 +575,6 @@ registerProcessor('odyssey-denoise', OdysseyDenoiseProcessor);
852
575
  noiseFloor: 0.004,
853
576
  release: 0.18,
854
577
  };
855
- const noiseGateDefaults = {
856
- enabled: true,
857
- };
858
- const outboundDefaults = {
859
- enabled: false,
860
- };
861
578
  return {
862
579
  distance: {
863
580
  refDistance: options?.distance?.refDistance ?? distanceDefaults.refDistance,
@@ -871,12 +588,6 @@ registerProcessor('odyssey-denoise', OdysseyDenoiseProcessor);
871
588
  noiseFloor: options?.denoiser?.noiseFloor ?? denoiserDefaults.noiseFloor,
872
589
  release: options?.denoiser?.release ?? denoiserDefaults.release,
873
590
  },
874
- noiseGate: {
875
- enabled: options?.noiseGate?.enabled ?? noiseGateDefaults.enabled,
876
- },
877
- outboundTuning: {
878
- enabled: options?.outboundTuning?.enabled ?? outboundDefaults.enabled,
879
- },
880
591
  };
881
592
  }
882
593
  }
package/dist/index.d.ts CHANGED
@@ -26,7 +26,6 @@ export declare class OdysseySpatialComms extends EventManager {
26
26
  }): Promise<Participant>;
27
27
  leaveRoom(): void;
28
28
  resumeAudio(): Promise<void>;
29
- enhanceOutgoingAudioTrack(track: MediaStreamTrack): Promise<MediaStreamTrack>;
30
29
  getAudioContextState(): AudioContextState;
31
30
  produceTrack(track: MediaStreamTrack): Promise<any>;
32
31
  updatePosition(position: Position, direction: Direction, spatialData?: {
package/dist/index.js CHANGED
@@ -121,9 +121,6 @@ class OdysseySpatialComms extends EventManager_1.EventManager {
121
121
  async resumeAudio() {
122
122
  await this.spatialAudioManager.resumeAudioContext();
123
123
  }
124
- async enhanceOutgoingAudioTrack(track) {
125
- return this.spatialAudioManager.enhanceOutgoingAudioTrack(track);
126
- }
127
124
  getAudioContextState() {
128
125
  return this.spatialAudioManager.getAudioContextState();
129
126
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@newgameplusinc/odyssey-audio-video-sdk-dev",
3
- "version": "1.0.16",
3
+ "version": "1.0.17",
4
4
  "description": "Odyssey Spatial Audio & Video SDK using MediaSoup for real-time communication",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",