@tensamin/audio 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/README.md +50 -3
  2. package/dist/chunk-6P2RDBW5.mjs +47 -0
  3. package/dist/chunk-EXH2PNUE.mjs +212 -0
  4. package/{src/vad/vad-state.ts → dist/chunk-JJASCVEW.mjs} +21 -33
  5. package/dist/chunk-OZ7KMC4S.mjs +46 -0
  6. package/dist/chunk-R5JVHKWA.mjs +98 -0
  7. package/dist/chunk-WBQAMGXK.mjs +0 -0
  8. package/dist/chunk-XMTQPMQ6.mjs +91 -0
  9. package/dist/chunk-XO6B3D4A.mjs +67 -0
  10. package/dist/context/audio-context.d.mts +32 -0
  11. package/dist/context/audio-context.d.ts +32 -0
  12. package/dist/context/audio-context.js +75 -0
  13. package/dist/context/audio-context.mjs +16 -0
  14. package/dist/extensibility/plugins.d.mts +9 -0
  15. package/dist/extensibility/plugins.d.ts +9 -0
  16. package/dist/extensibility/plugins.js +238 -0
  17. package/dist/extensibility/plugins.mjs +14 -0
  18. package/dist/index.d.mts +10 -216
  19. package/dist/index.d.ts +10 -216
  20. package/dist/index.js +298 -80
  21. package/dist/index.mjs +29 -352
  22. package/dist/livekit/integration.d.mts +11 -0
  23. package/dist/livekit/integration.d.ts +11 -0
  24. package/dist/livekit/integration.js +585 -0
  25. package/dist/livekit/integration.mjs +12 -0
  26. package/dist/noise-suppression/rnnoise-node.d.mts +10 -0
  27. package/dist/noise-suppression/rnnoise-node.d.ts +10 -0
  28. package/dist/noise-suppression/rnnoise-node.js +101 -0
  29. package/dist/noise-suppression/rnnoise-node.mjs +6 -0
  30. package/dist/pipeline/audio-pipeline.d.mts +6 -0
  31. package/dist/pipeline/audio-pipeline.d.ts +6 -0
  32. package/dist/pipeline/audio-pipeline.js +499 -0
  33. package/dist/pipeline/audio-pipeline.mjs +11 -0
  34. package/dist/types.d.mts +155 -0
  35. package/dist/types.d.ts +155 -0
  36. package/dist/types.js +18 -0
  37. package/dist/types.mjs +1 -0
  38. package/dist/vad/vad-node.d.mts +9 -0
  39. package/dist/vad/vad-node.d.ts +9 -0
  40. package/dist/vad/vad-node.js +122 -0
  41. package/dist/vad/vad-node.mjs +6 -0
  42. package/dist/vad/vad-state.d.mts +15 -0
  43. package/dist/vad/vad-state.d.ts +15 -0
  44. package/dist/vad/vad-state.js +83 -0
  45. package/dist/vad/vad-state.mjs +6 -0
  46. package/package.json +8 -5
  47. package/.github/workflows/publish.yml +0 -29
  48. package/bun.lock +0 -258
  49. package/src/context/audio-context.ts +0 -69
  50. package/src/extensibility/plugins.ts +0 -45
  51. package/src/index.ts +0 -8
  52. package/src/livekit/integration.ts +0 -61
  53. package/src/noise-suppression/rnnoise-node.ts +0 -62
  54. package/src/pipeline/audio-pipeline.ts +0 -154
  55. package/src/types.ts +0 -167
  56. package/src/vad/vad-node.ts +0 -78
  57. package/tsconfig.json +0 -46
@@ -0,0 +1,585 @@
1
+ "use strict";
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+
30
+ // src/livekit/integration.ts
31
+ var integration_exports = {};
32
+ __export(integration_exports, {
33
+ attachProcessingToTrack: () => attachProcessingToTrack
34
+ });
35
+ module.exports = __toCommonJS(integration_exports);
36
+
37
+ // src/pipeline/audio-pipeline.ts
38
+ var import_mitt = __toESM(require("mitt"));
39
+
40
+ // src/context/audio-context.ts
41
+ var sharedContext = null;
42
+ var activePipelines = 0;
43
+ function getAudioContext(options) {
44
+ if (typeof window === "undefined" || typeof AudioContext === "undefined") {
45
+ throw new Error(
46
+ "AudioContext is not supported in this environment (browser only)."
47
+ );
48
+ }
49
+ if (!sharedContext || sharedContext.state === "closed") {
50
+ sharedContext = new AudioContext(options);
51
+ }
52
+ return sharedContext;
53
+ }
54
+ function registerPipeline() {
55
+ activePipelines++;
56
+ }
57
+ function unregisterPipeline() {
58
+ activePipelines = Math.max(0, activePipelines - 1);
59
+ }
60
+
61
+ // src/noise-suppression/rnnoise-node.ts
62
+ var RNNoisePlugin = class {
63
+ name = "rnnoise-ns";
64
+ wasmBuffer = null;
65
+ async createNode(context, config) {
66
+ const { loadRnnoise, RnnoiseWorkletNode } = await import("@sapphi-red/web-noise-suppressor");
67
+ if (!config?.enabled) {
68
+ console.log("Noise suppression disabled, using passthrough node");
69
+ const pass = context.createGain();
70
+ return pass;
71
+ }
72
+ if (!config?.wasmUrl || !config?.simdUrl || !config?.workletUrl) {
73
+ const error = new Error(
74
+ `RNNoisePlugin requires 'wasmUrl', 'simdUrl', and 'workletUrl' to be configured. Please download the assets from @sapphi-red/web-noise-suppressor and provide the URLs in the config. Current config: wasmUrl=${config?.wasmUrl}, simdUrl=${config?.simdUrl}, workletUrl=${config?.workletUrl}
75
+ To disable noise suppression, set noiseSuppression.enabled to false.`
76
+ );
77
+ console.error(error.message);
78
+ throw error;
79
+ }
80
+ try {
81
+ if (!this.wasmBuffer) {
82
+ console.log("Loading RNNoise WASM binary...");
83
+ this.wasmBuffer = await loadRnnoise({
84
+ url: config.wasmUrl,
85
+ simdUrl: config.simdUrl
86
+ });
87
+ console.log("RNNoise WASM loaded successfully");
88
+ }
89
+ } catch (error) {
90
+ const err = new Error(
91
+ `Failed to load RNNoise WASM binary: ${error instanceof Error ? error.message : String(error)}`
92
+ );
93
+ console.error(err);
94
+ throw err;
95
+ }
96
+ const workletUrl = config.workletUrl;
97
+ try {
98
+ await context.audioWorklet.addModule(workletUrl);
99
+ console.log("RNNoise worklet loaded successfully");
100
+ } catch (e) {
101
+ const error = new Error(
102
+ `Failed to load RNNoise worklet from ${workletUrl}: ${e instanceof Error ? e.message : String(e)}. Ensure the workletUrl points to a valid RNNoise worklet script.`
103
+ );
104
+ console.error(error.message);
105
+ throw error;
106
+ }
107
+ try {
108
+ const node = new RnnoiseWorkletNode(context, {
109
+ wasmBinary: this.wasmBuffer,
110
+ maxChannels: 1
111
+ // Mono for now
112
+ });
113
+ console.log("RNNoise worklet node created successfully");
114
+ return node;
115
+ } catch (error) {
116
+ const err = new Error(
117
+ `Failed to create RNNoise worklet node: ${error instanceof Error ? error.message : String(error)}`
118
+ );
119
+ console.error(err);
120
+ throw err;
121
+ }
122
+ }
123
+ };
124
+
125
+ // src/vad/vad-node.ts
126
+ var energyVadWorkletCode = `
127
+ class EnergyVadProcessor extends AudioWorkletProcessor {
128
+ constructor() {
129
+ super();
130
+ this.smoothing = 0.95;
131
+ this.energy = 0;
132
+ this.noiseFloor = 0.001;
133
+ }
134
+
135
+ process(inputs, outputs, parameters) {
136
+ const input = inputs[0];
137
+ if (!input || !input.length) return true;
138
+ const channel = input[0];
139
+
140
+ // Calculate RMS
141
+ let sum = 0;
142
+ for (let i = 0; i < channel.length; i++) {
143
+ sum += channel[i] * channel[i];
144
+ }
145
+ const rms = Math.sqrt(sum / channel.length);
146
+
147
+ // Simple adaptive noise floor (very basic)
148
+ if (rms < this.noiseFloor) {
149
+ this.noiseFloor = this.noiseFloor * 0.99 + rms * 0.01;
150
+ } else {
151
+ this.noiseFloor = this.noiseFloor * 0.999 + rms * 0.001;
152
+ }
153
+
154
+ // Calculate "probability" based on SNR
155
+ // This is a heuristic mapping from energy to 0-1
156
+ const snr = rms / (this.noiseFloor + 1e-6);
157
+ const probability = Math.min(1, Math.max(0, (snr - 1.5) / 10)); // Arbitrary scaling
158
+
159
+ this.port.postMessage({ probability });
160
+
161
+ return true;
162
+ }
163
+ }
164
+ registerProcessor('energy-vad-processor', EnergyVadProcessor);
165
+ `;
166
+ var EnergyVADPlugin = class {
167
+ name = "energy-vad";
168
+ async createNode(context, config, onDecision) {
169
+ if (!config?.enabled) {
170
+ console.log("VAD disabled, using passthrough node");
171
+ const pass = context.createGain();
172
+ return pass;
173
+ }
174
+ const blob = new Blob([energyVadWorkletCode], {
175
+ type: "application/javascript"
176
+ });
177
+ const url = URL.createObjectURL(blob);
178
+ try {
179
+ await context.audioWorklet.addModule(url);
180
+ console.log("Energy VAD worklet loaded successfully");
181
+ } catch (e) {
182
+ const error = new Error(
183
+ `Failed to load Energy VAD worklet: ${e instanceof Error ? e.message : String(e)}`
184
+ );
185
+ console.error(error.message);
186
+ URL.revokeObjectURL(url);
187
+ throw error;
188
+ }
189
+ URL.revokeObjectURL(url);
190
+ let node;
191
+ try {
192
+ node = new AudioWorkletNode(context, "energy-vad-processor");
193
+ console.log("Energy VAD node created successfully");
194
+ } catch (e) {
195
+ const error = new Error(
196
+ `Failed to create Energy VAD node: ${e instanceof Error ? e.message : String(e)}`
197
+ );
198
+ console.error(error.message);
199
+ throw error;
200
+ }
201
+ node.port.onmessage = (event) => {
202
+ try {
203
+ const { probability } = event.data;
204
+ if (typeof probability === "number" && !isNaN(probability)) {
205
+ onDecision(probability);
206
+ } else {
207
+ console.warn("Invalid VAD probability received:", event.data);
208
+ }
209
+ } catch (error) {
210
+ console.error("Error in VAD message handler:", error);
211
+ }
212
+ };
213
+ node.port.onmessageerror = (event) => {
214
+ console.error("VAD port message error:", event);
215
+ };
216
+ return node;
217
+ }
218
+ };
219
+
220
+ // src/extensibility/plugins.ts
221
+ var nsPlugins = /* @__PURE__ */ new Map();
222
+ var vadPlugins = /* @__PURE__ */ new Map();
223
+ var defaultNs = new RNNoisePlugin();
224
+ nsPlugins.set(defaultNs.name, defaultNs);
225
+ var defaultVad = new EnergyVADPlugin();
226
+ vadPlugins.set(defaultVad.name, defaultVad);
227
+ function getNoiseSuppressionPlugin(name) {
228
+ if (!name) return defaultNs;
229
+ const plugin = nsPlugins.get(name);
230
+ if (!plugin) {
231
+ console.warn(
232
+ `Noise suppression plugin '${name}' not found, falling back to default.`
233
+ );
234
+ return defaultNs;
235
+ }
236
+ return plugin;
237
+ }
238
+ function getVADPlugin(name) {
239
+ if (!name) return defaultVad;
240
+ const plugin = vadPlugins.get(name);
241
+ if (!plugin) {
242
+ console.warn(`VAD plugin '${name}' not found, falling back to default.`);
243
+ return defaultVad;
244
+ }
245
+ return plugin;
246
+ }
247
+
248
+ // src/vad/vad-state.ts
249
+ var VADStateMachine = class {
250
+ config;
251
+ currentState = "silent";
252
+ lastSpeechTime = 0;
253
+ speechStartTime = 0;
254
+ frameDurationMs = 20;
255
+ // Assumed frame duration, updated by calls
256
+ constructor(config) {
257
+ this.config = {
258
+ enabled: config?.enabled ?? true,
259
+ pluginName: config?.pluginName ?? "energy-vad",
260
+ startThreshold: config?.startThreshold ?? 0.5,
261
+ stopThreshold: config?.stopThreshold ?? 0.4,
262
+ hangoverMs: config?.hangoverMs ?? 300,
263
+ preRollMs: config?.preRollMs ?? 200
264
+ };
265
+ }
266
+ updateConfig(config) {
267
+ this.config = { ...this.config, ...config };
268
+ }
269
+ processFrame(probability, timestamp) {
270
+ const { startThreshold, stopThreshold, hangoverMs } = this.config;
271
+ let newState = this.currentState;
272
+ if (this.currentState === "silent" || this.currentState === "speech_ending") {
273
+ if (probability >= startThreshold) {
274
+ newState = "speech_starting";
275
+ this.speechStartTime = timestamp;
276
+ this.lastSpeechTime = timestamp;
277
+ } else {
278
+ newState = "silent";
279
+ }
280
+ } else if (this.currentState === "speech_starting" || this.currentState === "speaking") {
281
+ if (probability >= stopThreshold) {
282
+ newState = "speaking";
283
+ this.lastSpeechTime = timestamp;
284
+ } else {
285
+ const timeSinceSpeech = timestamp - this.lastSpeechTime;
286
+ if (timeSinceSpeech < hangoverMs) {
287
+ newState = "speaking";
288
+ } else {
289
+ newState = "speech_ending";
290
+ }
291
+ }
292
+ }
293
+ if (newState === "speech_starting") newState = "speaking";
294
+ if (newState === "speech_ending") newState = "silent";
295
+ this.currentState = newState;
296
+ return {
297
+ isSpeaking: newState === "speaking",
298
+ probability,
299
+ state: newState
300
+ };
301
+ }
302
+ };
303
+
304
+ // src/pipeline/audio-pipeline.ts
305
+ async function createAudioPipeline(sourceTrack, config = {}) {
306
+ const context = getAudioContext();
307
+ registerPipeline();
308
+ const nsEnabled = config.noiseSuppression?.enabled !== false && Boolean(config.noiseSuppression?.wasmUrl && config.noiseSuppression?.simdUrl && config.noiseSuppression?.workletUrl);
309
+ const vadEnabled = config.vad?.enabled !== false;
310
+ const fullConfig = {
311
+ noiseSuppression: {
312
+ enabled: nsEnabled,
313
+ ...config.noiseSuppression
314
+ },
315
+ vad: {
316
+ enabled: vadEnabled,
317
+ ...config.vad
318
+ },
319
+ output: {
320
+ speechGain: 1,
321
+ silenceGain: vadEnabled ? 0 : 1,
322
+ // If no VAD, always output audio
323
+ gainRampTime: 0.02,
324
+ ...config.output
325
+ },
326
+ livekit: { manageTrackMute: false, ...config.livekit }
327
+ };
328
+ console.log("Audio pipeline config:", {
329
+ noiseSuppression: fullConfig.noiseSuppression?.enabled,
330
+ vad: fullConfig.vad?.enabled,
331
+ output: fullConfig.output
332
+ });
333
+ if (!sourceTrack || sourceTrack.kind !== "audio") {
334
+ throw new Error("createAudioPipeline requires a valid audio MediaStreamTrack");
335
+ }
336
+ if (sourceTrack.readyState === "ended") {
337
+ throw new Error("Cannot create pipeline from an ended MediaStreamTrack");
338
+ }
339
+ const sourceStream = new MediaStream([sourceTrack]);
340
+ const sourceNode = context.createMediaStreamSource(sourceStream);
341
+ let nsNode;
342
+ let vadNode;
343
+ const emitter = (0, import_mitt.default)();
344
+ try {
345
+ const nsPlugin = getNoiseSuppressionPlugin(
346
+ fullConfig.noiseSuppression?.pluginName
347
+ );
348
+ nsNode = await nsPlugin.createNode(
349
+ context,
350
+ fullConfig.noiseSuppression
351
+ );
352
+ } catch (error) {
353
+ const err = error instanceof Error ? error : new Error(String(error));
354
+ console.error("Failed to create noise suppression node:", err);
355
+ emitter.emit("error", err);
356
+ throw err;
357
+ }
358
+ const vadStateMachine = new VADStateMachine(fullConfig.vad);
359
+ try {
360
+ const vadPlugin = getVADPlugin(fullConfig.vad?.pluginName);
361
+ vadNode = await vadPlugin.createNode(
362
+ context,
363
+ fullConfig.vad,
364
+ (prob) => {
365
+ try {
366
+ const timestamp = context.currentTime * 1e3;
367
+ const newState = vadStateMachine.processFrame(prob, timestamp);
368
+ if (newState.state !== lastVadState.state || Math.abs(newState.probability - lastVadState.probability) > 0.1) {
369
+ emitter.emit("vadChange", newState);
370
+ lastVadState = newState;
371
+ updateGain(newState);
372
+ }
373
+ } catch (vadError) {
374
+ const err = vadError instanceof Error ? vadError : new Error(String(vadError));
375
+ console.error("Error in VAD callback:", err);
376
+ emitter.emit("error", err);
377
+ }
378
+ }
379
+ );
380
+ } catch (error) {
381
+ const err = error instanceof Error ? error : new Error(String(error));
382
+ console.error("Failed to create VAD node:", err);
383
+ emitter.emit("error", err);
384
+ throw err;
385
+ }
386
+ let lastVadState = {
387
+ isSpeaking: false,
388
+ probability: 0,
389
+ state: "silent"
390
+ };
391
+ const splitter = context.createGain();
392
+ sourceNode.connect(nsNode);
393
+ nsNode.connect(splitter);
394
+ splitter.connect(vadNode);
395
+ const delayNode = context.createDelay(1);
396
+ const preRollSeconds = (fullConfig.vad?.preRollMs ?? 200) / 1e3;
397
+ delayNode.delayTime.value = preRollSeconds;
398
+ const gainNode = context.createGain();
399
+ gainNode.gain.value = fullConfig.output?.silenceGain ?? 0;
400
+ const destination = context.createMediaStreamDestination();
401
+ try {
402
+ splitter.connect(delayNode);
403
+ delayNode.connect(gainNode);
404
+ gainNode.connect(destination);
405
+ } catch (error) {
406
+ const err = error instanceof Error ? error : new Error(String(error));
407
+ console.error("Failed to wire audio pipeline:", err);
408
+ emitter.emit("error", err);
409
+ throw err;
410
+ }
411
+ function updateGain(state) {
412
+ try {
413
+ const { speechGain, silenceGain, gainRampTime } = fullConfig.output;
414
+ const targetGain = state.isSpeaking ? speechGain ?? 1 : silenceGain ?? 0;
415
+ const now = context.currentTime;
416
+ gainNode.gain.setTargetAtTime(targetGain, now, gainRampTime ?? 0.02);
417
+ } catch (error) {
418
+ const err = error instanceof Error ? error : new Error(String(error));
419
+ console.error("Failed to update gain:", err);
420
+ emitter.emit("error", err);
421
+ }
422
+ }
423
+ const audioTracks = destination.stream.getAudioTracks();
424
+ console.log("Destination stream tracks:", {
425
+ count: audioTracks.length,
426
+ tracks: audioTracks.map((t) => ({
427
+ id: t.id,
428
+ label: t.label,
429
+ enabled: t.enabled,
430
+ readyState: t.readyState
431
+ }))
432
+ });
433
+ if (audioTracks.length === 0) {
434
+ const err = new Error(
435
+ "Failed to create processed audio track: destination stream has no audio tracks. This may indicate an issue with the audio graph connection."
436
+ );
437
+ console.error(err);
438
+ emitter.emit("error", err);
439
+ throw err;
440
+ }
441
+ const processedTrack = audioTracks[0];
442
+ if (!processedTrack || processedTrack.readyState === "ended") {
443
+ const err = new Error("Processed audio track is invalid or ended");
444
+ console.error(err);
445
+ emitter.emit("error", err);
446
+ throw err;
447
+ }
448
+ console.log("Audio pipeline created successfully:", {
449
+ sourceTrack: {
450
+ id: sourceTrack.id,
451
+ label: sourceTrack.label,
452
+ readyState: sourceTrack.readyState
453
+ },
454
+ processedTrack: {
455
+ id: processedTrack.id,
456
+ label: processedTrack.label,
457
+ readyState: processedTrack.readyState
458
+ },
459
+ config: {
460
+ noiseSuppression: fullConfig.noiseSuppression?.enabled,
461
+ vad: fullConfig.vad?.enabled
462
+ }
463
+ });
464
+ function dispose() {
465
+ try {
466
+ sourceNode.disconnect();
467
+ nsNode.disconnect();
468
+ splitter.disconnect();
469
+ vadNode.disconnect();
470
+ delayNode.disconnect();
471
+ gainNode.disconnect();
472
+ destination.stream.getTracks().forEach((t) => t.stop());
473
+ unregisterPipeline();
474
+ } catch (error) {
475
+ console.error("Error during pipeline disposal:", error);
476
+ }
477
+ }
478
+ return {
479
+ processedTrack,
480
+ events: emitter,
481
+ get state() {
482
+ return lastVadState;
483
+ },
484
+ setConfig: (newConfig) => {
485
+ try {
486
+ if (newConfig.vad) {
487
+ vadStateMachine.updateConfig(newConfig.vad);
488
+ }
489
+ } catch (error) {
490
+ const err = error instanceof Error ? error : new Error(String(error));
491
+ console.error("Failed to update config:", err);
492
+ emitter.emit("error", err);
493
+ }
494
+ },
495
+ dispose
496
+ };
497
+ }
498
+
499
+ // src/livekit/integration.ts
500
+ async function attachProcessingToTrack(track, config = {}) {
501
+ if (!track) {
502
+ throw new Error("attachProcessingToTrack requires a valid LocalAudioTrack");
503
+ }
504
+ const originalTrack = track.mediaStreamTrack;
505
+ if (!originalTrack) {
506
+ throw new Error("LocalAudioTrack has no underlying MediaStreamTrack");
507
+ }
508
+ if (originalTrack.readyState === "ended") {
509
+ throw new Error("Cannot attach processing to an ended MediaStreamTrack");
510
+ }
511
+ let pipeline;
512
+ try {
513
+ console.log("Creating audio processing pipeline...");
514
+ pipeline = await createAudioPipeline(originalTrack, config);
515
+ console.log("Audio processing pipeline created successfully");
516
+ } catch (error) {
517
+ const err = new Error(
518
+ `Failed to create audio pipeline: ${error instanceof Error ? error.message : String(error)}`
519
+ );
520
+ console.error(err);
521
+ throw err;
522
+ }
523
+ if (!pipeline.processedTrack) {
524
+ throw new Error("Pipeline did not return a processed track");
525
+ }
526
+ try {
527
+ console.log("Replacing LiveKit track with processed track...");
528
+ await track.replaceTrack(pipeline.processedTrack);
529
+ console.log("LiveKit track replaced successfully");
530
+ } catch (error) {
531
+ pipeline.dispose();
532
+ const err = new Error(
533
+ `Failed to replace LiveKit track: ${error instanceof Error ? error.message : String(error)}`
534
+ );
535
+ console.error(err);
536
+ throw err;
537
+ }
538
+ if (config.livekit?.manageTrackMute) {
539
+ let isVadMuted = false;
540
+ pipeline.events.on("vadChange", async (state) => {
541
+ try {
542
+ if (state.isSpeaking) {
543
+ if (isVadMuted) {
544
+ await track.unmute();
545
+ isVadMuted = false;
546
+ }
547
+ } else {
548
+ if (!track.isMuted) {
549
+ await track.mute();
550
+ isVadMuted = true;
551
+ }
552
+ }
553
+ } catch (error) {
554
+ console.error("Error handling VAD-based track muting:", error);
555
+ }
556
+ });
557
+ }
558
+ pipeline.events.on("error", (error) => {
559
+ console.error("Audio pipeline error:", error);
560
+ });
561
+ const originalDispose = pipeline.dispose;
562
+ pipeline.dispose = () => {
563
+ try {
564
+ if (originalTrack.readyState === "live") {
565
+ console.log("Restoring original track...");
566
+ track.replaceTrack(originalTrack).catch((error) => {
567
+ console.error("Failed to restore original track:", error);
568
+ });
569
+ }
570
+ originalDispose();
571
+ } catch (error) {
572
+ console.error("Error during pipeline disposal:", error);
573
+ try {
574
+ originalDispose();
575
+ } catch (disposeError) {
576
+ console.error("Error calling original dispose:", disposeError);
577
+ }
578
+ }
579
+ };
580
+ return pipeline;
581
+ }
582
+ // Annotate the CommonJS export names for ESM import in node:
583
+ 0 && (module.exports = {
584
+ attachProcessingToTrack
585
+ });
@@ -0,0 +1,12 @@
1
+ import {
2
+ attachProcessingToTrack
3
+ } from "../chunk-XMTQPMQ6.mjs";
4
+ import "../chunk-EXH2PNUE.mjs";
5
+ import "../chunk-JJASCVEW.mjs";
6
+ import "../chunk-OZ7KMC4S.mjs";
7
+ import "../chunk-6P2RDBW5.mjs";
8
+ import "../chunk-XO6B3D4A.mjs";
9
+ import "../chunk-R5JVHKWA.mjs";
10
+ export {
11
+ attachProcessingToTrack
12
+ };
@@ -0,0 +1,10 @@
1
+ import { NoiseSuppressionPlugin, AudioProcessingConfig } from '../types.mjs';
2
+ import 'mitt';
3
+
4
+ declare class RNNoisePlugin implements NoiseSuppressionPlugin {
5
+ name: string;
6
+ private wasmBuffer;
7
+ createNode(context: AudioContext, config: AudioProcessingConfig["noiseSuppression"]): Promise<AudioNode>;
8
+ }
9
+
10
+ export { RNNoisePlugin };
@@ -0,0 +1,10 @@
1
+ import { NoiseSuppressionPlugin, AudioProcessingConfig } from '../types.js';
2
+ import 'mitt';
3
+
4
+ declare class RNNoisePlugin implements NoiseSuppressionPlugin {
5
+ name: string;
6
+ private wasmBuffer;
7
+ createNode(context: AudioContext, config: AudioProcessingConfig["noiseSuppression"]): Promise<AudioNode>;
8
+ }
9
+
10
+ export { RNNoisePlugin };