@tensamin/audio 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +24 -2
  2. package/dist/chunk-FS635GMR.mjs +47 -0
  3. package/dist/chunk-HFSKQ33X.mjs +38 -0
  4. package/{src/vad/vad-state.ts → dist/chunk-JJASCVEW.mjs} +21 -33
  5. package/dist/chunk-OZ7KMC4S.mjs +46 -0
  6. package/dist/chunk-QU7E5HBA.mjs +106 -0
  7. package/dist/chunk-SDTOKWM2.mjs +39 -0
  8. package/{src/vad/vad-node.ts → dist/chunk-UMU2KIB6.mjs} +10 -20
  9. package/dist/chunk-WBQAMGXK.mjs +0 -0
  10. package/dist/context/audio-context.d.mts +32 -0
  11. package/dist/context/audio-context.d.ts +32 -0
  12. package/dist/context/audio-context.js +75 -0
  13. package/dist/context/audio-context.mjs +16 -0
  14. package/dist/extensibility/plugins.d.mts +9 -0
  15. package/dist/extensibility/plugins.d.ts +9 -0
  16. package/dist/extensibility/plugins.js +180 -0
  17. package/dist/extensibility/plugins.mjs +14 -0
  18. package/dist/index.d.mts +10 -216
  19. package/dist/index.d.ts +10 -216
  20. package/dist/index.js +11 -10
  21. package/dist/index.mjs +29 -352
  22. package/dist/livekit/integration.d.mts +11 -0
  23. package/dist/livekit/integration.d.ts +11 -0
  24. package/dist/livekit/integration.js +368 -0
  25. package/dist/livekit/integration.mjs +12 -0
  26. package/dist/noise-suppression/rnnoise-node.d.mts +10 -0
  27. package/dist/noise-suppression/rnnoise-node.d.ts +10 -0
  28. package/dist/noise-suppression/rnnoise-node.js +73 -0
  29. package/dist/noise-suppression/rnnoise-node.mjs +6 -0
  30. package/dist/pipeline/audio-pipeline.d.mts +6 -0
  31. package/dist/pipeline/audio-pipeline.d.ts +6 -0
  32. package/dist/pipeline/audio-pipeline.js +335 -0
  33. package/dist/pipeline/audio-pipeline.mjs +11 -0
  34. package/dist/types.d.mts +155 -0
  35. package/dist/types.d.ts +155 -0
  36. package/dist/types.js +18 -0
  37. package/dist/types.mjs +1 -0
  38. package/dist/vad/vad-node.d.mts +9 -0
  39. package/dist/vad/vad-node.d.ts +9 -0
  40. package/dist/vad/vad-node.js +92 -0
  41. package/dist/vad/vad-node.mjs +6 -0
  42. package/dist/vad/vad-state.d.mts +15 -0
  43. package/dist/vad/vad-state.d.ts +15 -0
  44. package/dist/vad/vad-state.js +83 -0
  45. package/dist/vad/vad-state.mjs +6 -0
  46. package/package.json +8 -5
  47. package/.github/workflows/publish.yml +0 -29
  48. package/bun.lock +0 -258
  49. package/src/context/audio-context.ts +0 -69
  50. package/src/extensibility/plugins.ts +0 -45
  51. package/src/index.ts +0 -8
  52. package/src/livekit/integration.ts +0 -61
  53. package/src/noise-suppression/rnnoise-node.ts +0 -62
  54. package/src/pipeline/audio-pipeline.ts +0 -154
  55. package/src/types.ts +0 -167
  56. package/tsconfig.json +0 -46
package/dist/index.mjs CHANGED
@@ -1,356 +1,33 @@
1
- // src/context/audio-context.ts
2
- var sharedContext = null;
3
- var activePipelines = 0;
4
- function getAudioContext(options) {
5
- if (typeof window === "undefined" || typeof AudioContext === "undefined") {
6
- throw new Error(
7
- "AudioContext is not supported in this environment (browser only)."
8
- );
9
- }
10
- if (!sharedContext || sharedContext.state === "closed") {
11
- sharedContext = new AudioContext(options);
12
- }
13
- return sharedContext;
14
- }
15
- function registerPipeline() {
16
- activePipelines++;
17
- }
18
- function unregisterPipeline() {
19
- activePipelines = Math.max(0, activePipelines - 1);
20
- }
21
- async function resumeAudioContext() {
22
- if (sharedContext && sharedContext.state === "suspended") {
23
- await sharedContext.resume();
24
- }
25
- }
26
- async function suspendAudioContext() {
27
- if (sharedContext && sharedContext.state === "running") {
28
- await sharedContext.suspend();
29
- }
30
- }
31
- async function closeAudioContext() {
32
- if (sharedContext && sharedContext.state !== "closed") {
33
- await sharedContext.close();
34
- }
35
- sharedContext = null;
36
- activePipelines = 0;
37
- }
38
-
39
- // src/pipeline/audio-pipeline.ts
40
- import mitt from "mitt";
41
-
42
- // src/noise-suppression/rnnoise-node.ts
1
+ import "./chunk-WBQAMGXK.mjs";
43
2
  import {
44
- RnnoiseWorkletNode,
45
- loadRnnoise
46
- } from "@sapphi-red/web-noise-suppressor";
47
- var DEFAULT_WASM_URL = "https://unpkg.com/@sapphi-red/web-noise-suppressor@0.3.5/dist/rnnoise.wasm";
48
- var DEFAULT_SIMD_WASM_URL = "https://unpkg.com/@sapphi-red/web-noise-suppressor@0.3.5/dist/rnnoise_simd.wasm";
49
- var DEFAULT_WORKLET_URL = "https://unpkg.com/@sapphi-red/web-noise-suppressor@0.3.5/dist/noise-suppressor-worklet.min.js";
50
- var RNNoisePlugin = class {
51
- name = "rnnoise-ns";
52
- wasmBuffer = null;
53
- async createNode(context, config) {
54
- if (!config?.enabled) {
55
- const pass = context.createGain();
56
- return pass;
57
- }
58
- if (!this.wasmBuffer) {
59
- this.wasmBuffer = await loadRnnoise({
60
- url: config.wasmUrl || DEFAULT_WASM_URL,
61
- simdUrl: DEFAULT_SIMD_WASM_URL
62
- // We should probably allow config for this too, but for now default is fine.
63
- });
64
- }
65
- const workletUrl = config.workletUrl || DEFAULT_WORKLET_URL;
66
- try {
67
- await context.audioWorklet.addModule(workletUrl);
68
- } catch (e) {
69
- console.warn("Failed to add RNNoise worklet module:", e);
70
- }
71
- const node = new RnnoiseWorkletNode(context, {
72
- wasmBinary: this.wasmBuffer,
73
- maxChannels: 1
74
- // Mono for now
75
- });
76
- return node;
77
- }
78
- };
79
-
80
- // src/vad/vad-node.ts
81
- var energyVadWorkletCode = `
82
- class EnergyVadProcessor extends AudioWorkletProcessor {
83
- constructor() {
84
- super();
85
- this.smoothing = 0.95;
86
- this.energy = 0;
87
- this.noiseFloor = 0.001;
88
- }
89
-
90
- process(inputs, outputs, parameters) {
91
- const input = inputs[0];
92
- if (!input || !input.length) return true;
93
- const channel = input[0];
94
-
95
- // Calculate RMS
96
- let sum = 0;
97
- for (let i = 0; i < channel.length; i++) {
98
- sum += channel[i] * channel[i];
99
- }
100
- const rms = Math.sqrt(sum / channel.length);
101
-
102
- // Simple adaptive noise floor (very basic)
103
- if (rms < this.noiseFloor) {
104
- this.noiseFloor = this.noiseFloor * 0.99 + rms * 0.01;
105
- } else {
106
- this.noiseFloor = this.noiseFloor * 0.999 + rms * 0.001;
107
- }
108
-
109
- // Calculate "probability" based on SNR
110
- // This is a heuristic mapping from energy to 0-1
111
- const snr = rms / (this.noiseFloor + 1e-6);
112
- const probability = Math.min(1, Math.max(0, (snr - 1.5) / 10)); // Arbitrary scaling
113
-
114
- this.port.postMessage({ probability });
115
-
116
- return true;
117
- }
118
- }
119
- registerProcessor('energy-vad-processor', EnergyVadProcessor);
120
- `;
121
- var EnergyVADPlugin = class {
122
- name = "energy-vad";
123
- async createNode(context, config, onDecision) {
124
- const blob = new Blob([energyVadWorkletCode], {
125
- type: "application/javascript"
126
- });
127
- const url = URL.createObjectURL(blob);
128
- try {
129
- await context.audioWorklet.addModule(url);
130
- } catch (e) {
131
- console.warn("Failed to add Energy VAD worklet:", e);
132
- throw e;
133
- } finally {
134
- URL.revokeObjectURL(url);
135
- }
136
- const node = new AudioWorkletNode(context, "energy-vad-processor");
137
- node.port.onmessage = (event) => {
138
- const { probability } = event.data;
139
- onDecision(probability);
140
- };
141
- return node;
142
- }
143
- };
144
-
145
- // src/extensibility/plugins.ts
146
- var nsPlugins = /* @__PURE__ */ new Map();
147
- var vadPlugins = /* @__PURE__ */ new Map();
148
- var defaultNs = new RNNoisePlugin();
149
- nsPlugins.set(defaultNs.name, defaultNs);
150
- var defaultVad = new EnergyVADPlugin();
151
- vadPlugins.set(defaultVad.name, defaultVad);
152
- function registerNoiseSuppressionPlugin(plugin) {
153
- nsPlugins.set(plugin.name, plugin);
154
- }
155
- function registerVADPlugin(plugin) {
156
- vadPlugins.set(plugin.name, plugin);
157
- }
158
- function getNoiseSuppressionPlugin(name) {
159
- if (!name) return defaultNs;
160
- const plugin = nsPlugins.get(name);
161
- if (!plugin) {
162
- console.warn(
163
- `Noise suppression plugin '${name}' not found, falling back to default.`
164
- );
165
- return defaultNs;
166
- }
167
- return plugin;
168
- }
169
- function getVADPlugin(name) {
170
- if (!name) return defaultVad;
171
- const plugin = vadPlugins.get(name);
172
- if (!plugin) {
173
- console.warn(`VAD plugin '${name}' not found, falling back to default.`);
174
- return defaultVad;
175
- }
176
- return plugin;
177
- }
178
-
179
- // src/vad/vad-state.ts
180
- var VADStateMachine = class {
181
- config;
182
- currentState = "silent";
183
- lastSpeechTime = 0;
184
- speechStartTime = 0;
185
- frameDurationMs = 20;
186
- // Assumed frame duration, updated by calls
187
- constructor(config) {
188
- this.config = {
189
- enabled: config?.enabled ?? true,
190
- pluginName: config?.pluginName ?? "energy-vad",
191
- startThreshold: config?.startThreshold ?? 0.5,
192
- stopThreshold: config?.stopThreshold ?? 0.4,
193
- hangoverMs: config?.hangoverMs ?? 300,
194
- preRollMs: config?.preRollMs ?? 200
195
- };
196
- }
197
- updateConfig(config) {
198
- this.config = { ...this.config, ...config };
199
- }
200
- processFrame(probability, timestamp) {
201
- const { startThreshold, stopThreshold, hangoverMs } = this.config;
202
- let newState = this.currentState;
203
- if (this.currentState === "silent" || this.currentState === "speech_ending") {
204
- if (probability >= startThreshold) {
205
- newState = "speech_starting";
206
- this.speechStartTime = timestamp;
207
- this.lastSpeechTime = timestamp;
208
- } else {
209
- newState = "silent";
210
- }
211
- } else if (this.currentState === "speech_starting" || this.currentState === "speaking") {
212
- if (probability >= stopThreshold) {
213
- newState = "speaking";
214
- this.lastSpeechTime = timestamp;
215
- } else {
216
- const timeSinceSpeech = timestamp - this.lastSpeechTime;
217
- if (timeSinceSpeech < hangoverMs) {
218
- newState = "speaking";
219
- } else {
220
- newState = "speech_ending";
221
- }
222
- }
223
- }
224
- if (newState === "speech_starting") newState = "speaking";
225
- if (newState === "speech_ending") newState = "silent";
226
- this.currentState = newState;
227
- return {
228
- isSpeaking: newState === "speaking",
229
- probability,
230
- state: newState
231
- };
232
- }
233
- };
234
-
235
- // src/pipeline/audio-pipeline.ts
236
- async function createAudioPipeline(sourceTrack, config = {}) {
237
- const context = getAudioContext();
238
- registerPipeline();
239
- const fullConfig = {
240
- noiseSuppression: { enabled: true, ...config.noiseSuppression },
241
- vad: { enabled: true, ...config.vad },
242
- output: {
243
- speechGain: 1,
244
- silenceGain: 0,
245
- gainRampTime: 0.02,
246
- ...config.output
247
- },
248
- livekit: { manageTrackMute: false, ...config.livekit }
249
- };
250
- const sourceStream = new MediaStream([sourceTrack]);
251
- const sourceNode = context.createMediaStreamSource(sourceStream);
252
- const nsPlugin = getNoiseSuppressionPlugin(
253
- fullConfig.noiseSuppression?.pluginName
254
- );
255
- const nsNode = await nsPlugin.createNode(
256
- context,
257
- fullConfig.noiseSuppression
258
- );
259
- const vadPlugin = getVADPlugin(fullConfig.vad?.pluginName);
260
- const vadStateMachine = new VADStateMachine(fullConfig.vad);
261
- const emitter = mitt();
262
- const vadNode = await vadPlugin.createNode(
263
- context,
264
- fullConfig.vad,
265
- (prob) => {
266
- const timestamp = context.currentTime * 1e3;
267
- const newState = vadStateMachine.processFrame(prob, timestamp);
268
- if (newState.state !== lastVadState.state || Math.abs(newState.probability - lastVadState.probability) > 0.1) {
269
- emitter.emit("vadChange", newState);
270
- lastVadState = newState;
271
- updateGain(newState);
272
- }
273
- }
274
- );
275
- let lastVadState = {
276
- isSpeaking: false,
277
- probability: 0,
278
- state: "silent"
279
- };
280
- const splitter = context.createGain();
281
- sourceNode.connect(nsNode);
282
- nsNode.connect(splitter);
283
- splitter.connect(vadNode);
284
- const delayNode = context.createDelay(1);
285
- const preRollSeconds = (fullConfig.vad?.preRollMs ?? 200) / 1e3;
286
- delayNode.delayTime.value = preRollSeconds;
287
- const gainNode = context.createGain();
288
- gainNode.gain.value = fullConfig.output?.silenceGain ?? 0;
289
- const destination = context.createMediaStreamDestination();
290
- splitter.connect(delayNode);
291
- delayNode.connect(gainNode);
292
- gainNode.connect(destination);
293
- function updateGain(state) {
294
- const { speechGain, silenceGain, gainRampTime } = fullConfig.output;
295
- const targetGain = state.isSpeaking ? speechGain ?? 1 : silenceGain ?? 0;
296
- const now = context.currentTime;
297
- gainNode.gain.setTargetAtTime(targetGain, now, gainRampTime ?? 0.02);
298
- }
299
- function dispose() {
300
- sourceNode.disconnect();
301
- nsNode.disconnect();
302
- splitter.disconnect();
303
- vadNode.disconnect();
304
- delayNode.disconnect();
305
- gainNode.disconnect();
306
- destination.stream.getTracks().forEach((t) => t.stop());
307
- unregisterPipeline();
308
- }
309
- return {
310
- processedTrack: destination.stream.getAudioTracks()[0],
311
- events: emitter,
312
- get state() {
313
- return lastVadState;
314
- },
315
- setConfig: (newConfig) => {
316
- if (newConfig.vad) {
317
- vadStateMachine.updateConfig(newConfig.vad);
318
- }
319
- },
320
- dispose
321
- };
322
- }
323
-
324
- // src/livekit/integration.ts
325
- async function attachProcessingToTrack(track, config = {}) {
326
- const originalTrack = track.mediaStreamTrack;
327
- const pipeline = await createAudioPipeline(originalTrack, config);
328
- await track.replaceTrack(pipeline.processedTrack);
329
- if (config.livekit?.manageTrackMute) {
330
- let isVadMuted = false;
331
- pipeline.events.on("vadChange", async (state) => {
332
- if (state.isSpeaking) {
333
- if (isVadMuted) {
334
- await track.unmute();
335
- isVadMuted = false;
336
- }
337
- } else {
338
- if (!track.isMuted) {
339
- await track.mute();
340
- isVadMuted = true;
341
- }
342
- }
343
- });
344
- }
345
- const originalDispose = pipeline.dispose;
346
- pipeline.dispose = () => {
347
- if (originalTrack.readyState === "live") {
348
- track.replaceTrack(originalTrack).catch(console.error);
349
- }
350
- originalDispose();
351
- };
352
- return pipeline;
353
- }
3
+ attachProcessingToTrack
4
+ } from "./chunk-HFSKQ33X.mjs";
5
+ import {
6
+ createAudioPipeline
7
+ } from "./chunk-QU7E5HBA.mjs";
8
+ import {
9
+ VADStateMachine
10
+ } from "./chunk-JJASCVEW.mjs";
11
+ import {
12
+ closeAudioContext,
13
+ getAudioContext,
14
+ registerPipeline,
15
+ resumeAudioContext,
16
+ suspendAudioContext,
17
+ unregisterPipeline
18
+ } from "./chunk-OZ7KMC4S.mjs";
19
+ import {
20
+ getNoiseSuppressionPlugin,
21
+ getVADPlugin,
22
+ registerNoiseSuppressionPlugin,
23
+ registerVADPlugin
24
+ } from "./chunk-FS635GMR.mjs";
25
+ import {
26
+ RNNoisePlugin
27
+ } from "./chunk-SDTOKWM2.mjs";
28
+ import {
29
+ EnergyVADPlugin
30
+ } from "./chunk-UMU2KIB6.mjs";
354
31
  export {
355
32
  EnergyVADPlugin,
356
33
  RNNoisePlugin,
@@ -0,0 +1,11 @@
1
+ import { LocalAudioTrack } from 'livekit-client';
2
+ import { AudioProcessingConfig, AudioPipelineHandle } from '../types.mjs';
3
+ import 'mitt';
4
+
5
+ /**
6
+ * Attaches the audio processing pipeline to a LiveKit LocalAudioTrack.
7
+ * This replaces the underlying MediaStreamTrack with the processed one.
8
+ */
9
+ declare function attachProcessingToTrack(track: LocalAudioTrack, config?: AudioProcessingConfig): Promise<AudioPipelineHandle>;
10
+
11
+ export { attachProcessingToTrack };
@@ -0,0 +1,11 @@
1
+ import { LocalAudioTrack } from 'livekit-client';
2
+ import { AudioProcessingConfig, AudioPipelineHandle } from '../types.js';
3
+ import 'mitt';
4
+
5
+ /**
6
+ * Attaches the audio processing pipeline to a LiveKit LocalAudioTrack.
7
+ * This replaces the underlying MediaStreamTrack with the processed one.
8
+ */
9
+ declare function attachProcessingToTrack(track: LocalAudioTrack, config?: AudioProcessingConfig): Promise<AudioPipelineHandle>;
10
+
11
+ export { attachProcessingToTrack };