@tensamin/audio 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +24 -2
- package/dist/chunk-FS635GMR.mjs +47 -0
- package/dist/chunk-HFSKQ33X.mjs +38 -0
- package/{src/vad/vad-state.ts → dist/chunk-JJASCVEW.mjs} +21 -33
- package/dist/chunk-OZ7KMC4S.mjs +46 -0
- package/dist/chunk-QU7E5HBA.mjs +106 -0
- package/dist/chunk-SDTOKWM2.mjs +39 -0
- package/{src/vad/vad-node.ts → dist/chunk-UMU2KIB6.mjs} +10 -20
- package/dist/chunk-WBQAMGXK.mjs +0 -0
- package/dist/context/audio-context.d.mts +32 -0
- package/dist/context/audio-context.d.ts +32 -0
- package/dist/context/audio-context.js +75 -0
- package/dist/context/audio-context.mjs +16 -0
- package/dist/extensibility/plugins.d.mts +9 -0
- package/dist/extensibility/plugins.d.ts +9 -0
- package/dist/extensibility/plugins.js +180 -0
- package/dist/extensibility/plugins.mjs +14 -0
- package/dist/index.d.mts +10 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.js +419 -0
- package/dist/index.mjs +47 -0
- package/dist/livekit/integration.d.mts +11 -0
- package/dist/livekit/integration.d.ts +11 -0
- package/dist/livekit/integration.js +368 -0
- package/dist/livekit/integration.mjs +12 -0
- package/dist/noise-suppression/rnnoise-node.d.mts +10 -0
- package/dist/noise-suppression/rnnoise-node.d.ts +10 -0
- package/dist/noise-suppression/rnnoise-node.js +73 -0
- package/dist/noise-suppression/rnnoise-node.mjs +6 -0
- package/dist/pipeline/audio-pipeline.d.mts +6 -0
- package/dist/pipeline/audio-pipeline.d.ts +6 -0
- package/dist/pipeline/audio-pipeline.js +335 -0
- package/dist/pipeline/audio-pipeline.mjs +11 -0
- package/dist/types.d.mts +155 -0
- package/dist/types.d.ts +155 -0
- package/dist/types.js +18 -0
- package/dist/types.mjs +1 -0
- package/dist/vad/vad-node.d.mts +9 -0
- package/dist/vad/vad-node.d.ts +9 -0
- package/dist/vad/vad-node.js +92 -0
- package/dist/vad/vad-node.mjs +6 -0
- package/dist/vad/vad-state.d.mts +15 -0
- package/dist/vad/vad-state.d.ts +15 -0
- package/dist/vad/vad-state.js +83 -0
- package/dist/vad/vad-state.mjs +6 -0
- package/package.json +11 -14
- package/.github/workflows/publish.yml +0 -23
- package/src/context/audio-context.ts +0 -69
- package/src/extensibility/plugins.ts +0 -45
- package/src/index.ts +0 -8
- package/src/livekit/integration.ts +0 -61
- package/src/noise-suppression/rnnoise-node.ts +0 -62
- package/src/pipeline/audio-pipeline.ts +0 -154
- package/src/types.ts +0 -167
- package/tsconfig.json +0 -29
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/vad/vad-node.ts
|
|
21
|
+
var vad_node_exports = {};
|
|
22
|
+
__export(vad_node_exports, {
|
|
23
|
+
EnergyVADPlugin: () => EnergyVADPlugin
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(vad_node_exports);
|
|
26
|
+
var energyVadWorkletCode = `
|
|
27
|
+
class EnergyVadProcessor extends AudioWorkletProcessor {
|
|
28
|
+
constructor() {
|
|
29
|
+
super();
|
|
30
|
+
this.smoothing = 0.95;
|
|
31
|
+
this.energy = 0;
|
|
32
|
+
this.noiseFloor = 0.001;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
process(inputs, outputs, parameters) {
|
|
36
|
+
const input = inputs[0];
|
|
37
|
+
if (!input || !input.length) return true;
|
|
38
|
+
const channel = input[0];
|
|
39
|
+
|
|
40
|
+
// Calculate RMS
|
|
41
|
+
let sum = 0;
|
|
42
|
+
for (let i = 0; i < channel.length; i++) {
|
|
43
|
+
sum += channel[i] * channel[i];
|
|
44
|
+
}
|
|
45
|
+
const rms = Math.sqrt(sum / channel.length);
|
|
46
|
+
|
|
47
|
+
// Simple adaptive noise floor (very basic)
|
|
48
|
+
if (rms < this.noiseFloor) {
|
|
49
|
+
this.noiseFloor = this.noiseFloor * 0.99 + rms * 0.01;
|
|
50
|
+
} else {
|
|
51
|
+
this.noiseFloor = this.noiseFloor * 0.999 + rms * 0.001;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Calculate "probability" based on SNR
|
|
55
|
+
// This is a heuristic mapping from energy to 0-1
|
|
56
|
+
const snr = rms / (this.noiseFloor + 1e-6);
|
|
57
|
+
const probability = Math.min(1, Math.max(0, (snr - 1.5) / 10)); // Arbitrary scaling
|
|
58
|
+
|
|
59
|
+
this.port.postMessage({ probability });
|
|
60
|
+
|
|
61
|
+
return true;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
registerProcessor('energy-vad-processor', EnergyVadProcessor);
|
|
65
|
+
`;
|
|
66
|
+
var EnergyVADPlugin = class {
|
|
67
|
+
name = "energy-vad";
|
|
68
|
+
async createNode(context, config, onDecision) {
|
|
69
|
+
const blob = new Blob([energyVadWorkletCode], {
|
|
70
|
+
type: "application/javascript"
|
|
71
|
+
});
|
|
72
|
+
const url = URL.createObjectURL(blob);
|
|
73
|
+
try {
|
|
74
|
+
await context.audioWorklet.addModule(url);
|
|
75
|
+
} catch (e) {
|
|
76
|
+
console.warn("Failed to add Energy VAD worklet:", e);
|
|
77
|
+
throw e;
|
|
78
|
+
} finally {
|
|
79
|
+
URL.revokeObjectURL(url);
|
|
80
|
+
}
|
|
81
|
+
const node = new AudioWorkletNode(context, "energy-vad-processor");
|
|
82
|
+
node.port.onmessage = (event) => {
|
|
83
|
+
const { probability } = event.data;
|
|
84
|
+
onDecision(probability);
|
|
85
|
+
};
|
|
86
|
+
return node;
|
|
87
|
+
}
|
|
88
|
+
};
|
|
89
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
90
|
+
0 && (module.exports = {
|
|
91
|
+
EnergyVADPlugin
|
|
92
|
+
});
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { AudioProcessingConfig, VADState } from '../types.mjs';
|
|
2
|
+
import 'mitt';
|
|
3
|
+
|
|
4
|
+
declare class VADStateMachine {
|
|
5
|
+
private config;
|
|
6
|
+
private currentState;
|
|
7
|
+
private lastSpeechTime;
|
|
8
|
+
private speechStartTime;
|
|
9
|
+
private frameDurationMs;
|
|
10
|
+
constructor(config: AudioProcessingConfig["vad"]);
|
|
11
|
+
updateConfig(config: Partial<AudioProcessingConfig["vad"]>): void;
|
|
12
|
+
processFrame(probability: number, timestamp: number): VADState;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export { VADStateMachine };
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { AudioProcessingConfig, VADState } from '../types.js';
|
|
2
|
+
import 'mitt';
|
|
3
|
+
|
|
4
|
+
declare class VADStateMachine {
|
|
5
|
+
private config;
|
|
6
|
+
private currentState;
|
|
7
|
+
private lastSpeechTime;
|
|
8
|
+
private speechStartTime;
|
|
9
|
+
private frameDurationMs;
|
|
10
|
+
constructor(config: AudioProcessingConfig["vad"]);
|
|
11
|
+
updateConfig(config: Partial<AudioProcessingConfig["vad"]>): void;
|
|
12
|
+
processFrame(probability: number, timestamp: number): VADState;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export { VADStateMachine };
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/vad/vad-state.ts
|
|
21
|
+
var vad_state_exports = {};
|
|
22
|
+
__export(vad_state_exports, {
|
|
23
|
+
VADStateMachine: () => VADStateMachine
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(vad_state_exports);
|
|
26
|
+
var VADStateMachine = class {
|
|
27
|
+
config;
|
|
28
|
+
currentState = "silent";
|
|
29
|
+
lastSpeechTime = 0;
|
|
30
|
+
speechStartTime = 0;
|
|
31
|
+
frameDurationMs = 20;
|
|
32
|
+
// Assumed frame duration, updated by calls
|
|
33
|
+
constructor(config) {
|
|
34
|
+
this.config = {
|
|
35
|
+
enabled: config?.enabled ?? true,
|
|
36
|
+
pluginName: config?.pluginName ?? "energy-vad",
|
|
37
|
+
startThreshold: config?.startThreshold ?? 0.5,
|
|
38
|
+
stopThreshold: config?.stopThreshold ?? 0.4,
|
|
39
|
+
hangoverMs: config?.hangoverMs ?? 300,
|
|
40
|
+
preRollMs: config?.preRollMs ?? 200
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
updateConfig(config) {
|
|
44
|
+
this.config = { ...this.config, ...config };
|
|
45
|
+
}
|
|
46
|
+
processFrame(probability, timestamp) {
|
|
47
|
+
const { startThreshold, stopThreshold, hangoverMs } = this.config;
|
|
48
|
+
let newState = this.currentState;
|
|
49
|
+
if (this.currentState === "silent" || this.currentState === "speech_ending") {
|
|
50
|
+
if (probability >= startThreshold) {
|
|
51
|
+
newState = "speech_starting";
|
|
52
|
+
this.speechStartTime = timestamp;
|
|
53
|
+
this.lastSpeechTime = timestamp;
|
|
54
|
+
} else {
|
|
55
|
+
newState = "silent";
|
|
56
|
+
}
|
|
57
|
+
} else if (this.currentState === "speech_starting" || this.currentState === "speaking") {
|
|
58
|
+
if (probability >= stopThreshold) {
|
|
59
|
+
newState = "speaking";
|
|
60
|
+
this.lastSpeechTime = timestamp;
|
|
61
|
+
} else {
|
|
62
|
+
const timeSinceSpeech = timestamp - this.lastSpeechTime;
|
|
63
|
+
if (timeSinceSpeech < hangoverMs) {
|
|
64
|
+
newState = "speaking";
|
|
65
|
+
} else {
|
|
66
|
+
newState = "speech_ending";
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
if (newState === "speech_starting") newState = "speaking";
|
|
71
|
+
if (newState === "speech_ending") newState = "silent";
|
|
72
|
+
this.currentState = newState;
|
|
73
|
+
return {
|
|
74
|
+
isSpeaking: newState === "speaking",
|
|
75
|
+
probability,
|
|
76
|
+
state: newState
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
};
|
|
80
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
81
|
+
0 && (module.exports = {
|
|
82
|
+
VADStateMachine
|
|
83
|
+
});
|
package/package.json
CHANGED
|
@@ -1,31 +1,25 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@tensamin/audio",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.2",
|
|
4
|
+
"main": "dist/index.js",
|
|
4
5
|
"module": "dist/index.mjs",
|
|
5
6
|
"types": "dist/index.d.ts",
|
|
6
|
-
"type": "module",
|
|
7
7
|
"author": {
|
|
8
8
|
"email": "aloisianer@proton.me",
|
|
9
9
|
"name": "Alois"
|
|
10
10
|
},
|
|
11
11
|
"publishConfig": {
|
|
12
|
-
"registry": "https://registry.npmjs.org",
|
|
13
12
|
"access": "public"
|
|
14
13
|
},
|
|
15
14
|
"repository": {
|
|
16
15
|
"type": "git",
|
|
17
16
|
"url": "https://github.com/Tensamin/Audio"
|
|
18
17
|
},
|
|
19
|
-
"
|
|
20
|
-
".": {
|
|
21
|
-
"import": "./dist/index.mjs",
|
|
22
|
-
"types": "./dist/index.d.ts"
|
|
23
|
-
}
|
|
24
|
-
},
|
|
18
|
+
"license": "MIT",
|
|
25
19
|
"scripts": {
|
|
26
|
-
"build": "tsup src/
|
|
27
|
-
"
|
|
28
|
-
"
|
|
20
|
+
"build": "tsup src/ --format cjs,esm --dts --out-dir dist --clean",
|
|
21
|
+
"format": "bunx prettier --write .",
|
|
22
|
+
"lint": "tsc"
|
|
29
23
|
},
|
|
30
24
|
"dependencies": {
|
|
31
25
|
"@sapphi-red/web-noise-suppressor": "^0.3.5",
|
|
@@ -35,10 +29,13 @@
|
|
|
35
29
|
"livekit-client": "^2.0.0"
|
|
36
30
|
},
|
|
37
31
|
"devDependencies": {
|
|
32
|
+
"tsup": "^8.5.1",
|
|
38
33
|
"@types/bun": "latest",
|
|
39
34
|
"@types/web": "^0.0.298",
|
|
40
35
|
"livekit-client": "^2.16.0",
|
|
41
|
-
"tsup": "^8.5.1",
|
|
42
36
|
"typescript": "^5.9.3"
|
|
43
|
-
}
|
|
37
|
+
},
|
|
38
|
+
"files": [
|
|
39
|
+
"dist"
|
|
40
|
+
]
|
|
44
41
|
}
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
name: Publish
|
|
2
|
-
|
|
3
|
-
on:
|
|
4
|
-
workflow_dispatch:
|
|
5
|
-
|
|
6
|
-
permissions:
|
|
7
|
-
contents: write
|
|
8
|
-
id-token: write
|
|
9
|
-
|
|
10
|
-
jobs:
|
|
11
|
-
publish:
|
|
12
|
-
runs-on: ubuntu-latest
|
|
13
|
-
steps:
|
|
14
|
-
- uses: actions/checkout@v4
|
|
15
|
-
- uses: actions/setup-node@v4
|
|
16
|
-
|
|
17
|
-
- name: Publish
|
|
18
|
-
run: |
|
|
19
|
-
npm ci
|
|
20
|
-
npm run build
|
|
21
|
-
npm pack
|
|
22
|
-
echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > ~/.npmrc
|
|
23
|
-
npm publish --access public
|
|
@@ -1,69 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Manages a shared AudioContext for the application.
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
let sharedContext: AudioContext | null = null;
|
|
6
|
-
let activePipelines = 0;
|
|
7
|
-
|
|
8
|
-
/**
|
|
9
|
-
* Gets the shared AudioContext, creating it if necessary.
|
|
10
|
-
* @param options Optional AudioContextOptions
|
|
11
|
-
*/
|
|
12
|
-
export function getAudioContext(options?: AudioContextOptions): AudioContext {
|
|
13
|
-
if (typeof window === "undefined" || typeof AudioContext === "undefined") {
|
|
14
|
-
throw new Error(
|
|
15
|
-
"AudioContext is not supported in this environment (browser only).",
|
|
16
|
-
);
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
if (!sharedContext || sharedContext.state === "closed") {
|
|
20
|
-
sharedContext = new AudioContext(options);
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
return sharedContext;
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
/**
|
|
27
|
-
* Registers a pipeline usage. Keeps track of active users.
|
|
28
|
-
*/
|
|
29
|
-
export function registerPipeline(): void {
|
|
30
|
-
activePipelines++;
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
/**
|
|
34
|
-
* Unregisters a pipeline usage.
|
|
35
|
-
* Optionally closes the context if no pipelines are active (not implemented by default to avoid churn).
|
|
36
|
-
*/
|
|
37
|
-
export function unregisterPipeline(): void {
|
|
38
|
-
activePipelines = Math.max(0, activePipelines - 1);
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
/**
|
|
42
|
-
* Resumes the shared AudioContext.
|
|
43
|
-
* Should be called in response to a user gesture.
|
|
44
|
-
*/
|
|
45
|
-
export async function resumeAudioContext(): Promise<void> {
|
|
46
|
-
if (sharedContext && sharedContext.state === "suspended") {
|
|
47
|
-
await sharedContext.resume();
|
|
48
|
-
}
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
/**
|
|
52
|
-
* Suspends the shared AudioContext.
|
|
53
|
-
*/
|
|
54
|
-
export async function suspendAudioContext(): Promise<void> {
|
|
55
|
-
if (sharedContext && sharedContext.state === "running") {
|
|
56
|
-
await sharedContext.suspend();
|
|
57
|
-
}
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
/**
|
|
61
|
-
* Closes the shared AudioContext and releases resources.
|
|
62
|
-
*/
|
|
63
|
-
export async function closeAudioContext(): Promise<void> {
|
|
64
|
-
if (sharedContext && sharedContext.state !== "closed") {
|
|
65
|
-
await sharedContext.close();
|
|
66
|
-
}
|
|
67
|
-
sharedContext = null;
|
|
68
|
-
activePipelines = 0;
|
|
69
|
-
}
|
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
import type { NoiseSuppressionPlugin, VADPlugin } from "../types.js";
|
|
2
|
-
import { RNNoisePlugin } from "../noise-suppression/rnnoise-node.js";
|
|
3
|
-
import { EnergyVADPlugin } from "../vad/vad-node.js";
|
|
4
|
-
|
|
5
|
-
const nsPlugins = new Map<string, NoiseSuppressionPlugin>();
|
|
6
|
-
const vadPlugins = new Map<string, VADPlugin>();
|
|
7
|
-
|
|
8
|
-
// Register defaults
|
|
9
|
-
const defaultNs = new RNNoisePlugin();
|
|
10
|
-
nsPlugins.set(defaultNs.name, defaultNs);
|
|
11
|
-
|
|
12
|
-
const defaultVad = new EnergyVADPlugin();
|
|
13
|
-
vadPlugins.set(defaultVad.name, defaultVad);
|
|
14
|
-
|
|
15
|
-
export function registerNoiseSuppressionPlugin(plugin: NoiseSuppressionPlugin) {
|
|
16
|
-
nsPlugins.set(plugin.name, plugin);
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
export function registerVADPlugin(plugin: VADPlugin) {
|
|
20
|
-
vadPlugins.set(plugin.name, plugin);
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
export function getNoiseSuppressionPlugin(
|
|
24
|
-
name?: string,
|
|
25
|
-
): NoiseSuppressionPlugin {
|
|
26
|
-
if (!name) return defaultNs;
|
|
27
|
-
const plugin = nsPlugins.get(name);
|
|
28
|
-
if (!plugin) {
|
|
29
|
-
console.warn(
|
|
30
|
-
`Noise suppression plugin '${name}' not found, falling back to default.`,
|
|
31
|
-
);
|
|
32
|
-
return defaultNs;
|
|
33
|
-
}
|
|
34
|
-
return plugin;
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
export function getVADPlugin(name?: string): VADPlugin {
|
|
38
|
-
if (!name) return defaultVad;
|
|
39
|
-
const plugin = vadPlugins.get(name);
|
|
40
|
-
if (!plugin) {
|
|
41
|
-
console.warn(`VAD plugin '${name}' not found, falling back to default.`);
|
|
42
|
-
return defaultVad;
|
|
43
|
-
}
|
|
44
|
-
return plugin;
|
|
45
|
-
}
|
package/src/index.ts
DELETED
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
export * from "./types.js";
|
|
2
|
-
export * from "./context/audio-context.js";
|
|
3
|
-
export * from "./pipeline/audio-pipeline.js";
|
|
4
|
-
export * from "./livekit/integration.js";
|
|
5
|
-
export * from "./extensibility/plugins.js";
|
|
6
|
-
export * from "./noise-suppression/rnnoise-node.js";
|
|
7
|
-
export * from "./vad/vad-node.js";
|
|
8
|
-
export * from "./vad/vad-state.js";
|
|
@@ -1,61 +0,0 @@
|
|
|
1
|
-
import type { LocalAudioTrack } from "livekit-client";
|
|
2
|
-
import { createAudioPipeline } from "../pipeline/audio-pipeline.js";
|
|
3
|
-
import type { AudioPipelineHandle, AudioProcessingConfig } from "../types.js";
|
|
4
|
-
|
|
5
|
-
/**
|
|
6
|
-
* Attaches the audio processing pipeline to a LiveKit LocalAudioTrack.
|
|
7
|
-
* This replaces the underlying MediaStreamTrack with the processed one.
|
|
8
|
-
*/
|
|
9
|
-
export async function attachProcessingToTrack(
|
|
10
|
-
track: LocalAudioTrack,
|
|
11
|
-
config: AudioProcessingConfig = {},
|
|
12
|
-
): Promise<AudioPipelineHandle> {
|
|
13
|
-
// 1. Get the original track
|
|
14
|
-
const originalTrack = track.mediaStreamTrack;
|
|
15
|
-
|
|
16
|
-
// 2. Create pipeline
|
|
17
|
-
const pipeline = await createAudioPipeline(originalTrack, config);
|
|
18
|
-
|
|
19
|
-
// 3. Replace the track in LiveKit
|
|
20
|
-
// Use replaceTrack which is the public API to swap the underlying MediaStreamTrack.
|
|
21
|
-
await track.replaceTrack(pipeline.processedTrack);
|
|
22
|
-
|
|
23
|
-
// 4. Handle intelligent muting if enabled
|
|
24
|
-
if (config.livekit?.manageTrackMute) {
|
|
25
|
-
let isVadMuted = false;
|
|
26
|
-
|
|
27
|
-
pipeline.events.on("vadChange", async (state) => {
|
|
28
|
-
if (state.isSpeaking) {
|
|
29
|
-
if (isVadMuted) {
|
|
30
|
-
// Only unmute if we were the ones who muted it
|
|
31
|
-
// And check if the track is not globally muted by user?
|
|
32
|
-
// This is tricky. If user muted manually, track.isMuted is true.
|
|
33
|
-
// We should probably check a separate flag or assume VAD overrides only when "active".
|
|
34
|
-
// For safety, we only unmute if we muted.
|
|
35
|
-
await track.unmute();
|
|
36
|
-
isVadMuted = false;
|
|
37
|
-
}
|
|
38
|
-
} else {
|
|
39
|
-
// Silence
|
|
40
|
-
if (!track.isMuted) {
|
|
41
|
-
await track.mute();
|
|
42
|
-
isVadMuted = true;
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
});
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
// 5. Handle cleanup
|
|
49
|
-
const originalDispose = pipeline.dispose;
|
|
50
|
-
pipeline.dispose = () => {
|
|
51
|
-
// Restore original track?
|
|
52
|
-
// Or just stop.
|
|
53
|
-
// If we dispose, we should probably try to restore the original track if it's still alive.
|
|
54
|
-
if (originalTrack.readyState === "live") {
|
|
55
|
-
track.replaceTrack(originalTrack).catch(console.error);
|
|
56
|
-
}
|
|
57
|
-
originalDispose();
|
|
58
|
-
};
|
|
59
|
-
|
|
60
|
-
return pipeline;
|
|
61
|
-
}
|
|
@@ -1,62 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
RnnoiseWorkletNode,
|
|
3
|
-
loadRnnoise,
|
|
4
|
-
} from "@sapphi-red/web-noise-suppressor";
|
|
5
|
-
import type {
|
|
6
|
-
AudioProcessingConfig,
|
|
7
|
-
NoiseSuppressionPlugin,
|
|
8
|
-
} from "../types.js";
|
|
9
|
-
|
|
10
|
-
// Default URLs (can be overridden by config)
|
|
11
|
-
// These defaults assume the assets are served from the same origin or a known CDN.
|
|
12
|
-
// In a real package, we might want to bundle them or require the user to provide them.
|
|
13
|
-
const DEFAULT_WASM_URL =
|
|
14
|
-
"https://unpkg.com/@sapphi-red/web-noise-suppressor@0.3.5/dist/rnnoise.wasm";
|
|
15
|
-
const DEFAULT_SIMD_WASM_URL =
|
|
16
|
-
"https://unpkg.com/@sapphi-red/web-noise-suppressor@0.3.5/dist/rnnoise_simd.wasm";
|
|
17
|
-
const DEFAULT_WORKLET_URL =
|
|
18
|
-
"https://unpkg.com/@sapphi-red/web-noise-suppressor@0.3.5/dist/noise-suppressor-worklet.min.js";
|
|
19
|
-
|
|
20
|
-
export class RNNoisePlugin implements NoiseSuppressionPlugin {
|
|
21
|
-
name = "rnnoise-ns";
|
|
22
|
-
private wasmBuffer: ArrayBuffer | null = null;
|
|
23
|
-
|
|
24
|
-
async createNode(
|
|
25
|
-
context: AudioContext,
|
|
26
|
-
config: AudioProcessingConfig["noiseSuppression"],
|
|
27
|
-
): Promise<AudioNode> {
|
|
28
|
-
if (!config?.enabled) {
|
|
29
|
-
// Return a passthrough gain node if disabled but requested (though pipeline usually handles this)
|
|
30
|
-
const pass = context.createGain();
|
|
31
|
-
return pass;
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
// 1. Load WASM if not loaded
|
|
35
|
-
// We use the library's loader which handles SIMD detection if we provide both URLs.
|
|
36
|
-
// But wait, loadRnnoise returns ArrayBuffer.
|
|
37
|
-
if (!this.wasmBuffer) {
|
|
38
|
-
this.wasmBuffer = await loadRnnoise({
|
|
39
|
-
url: config.wasmUrl || DEFAULT_WASM_URL,
|
|
40
|
-
simdUrl: DEFAULT_SIMD_WASM_URL, // We should probably allow config for this too, but for now default is fine.
|
|
41
|
-
});
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
// 2. Load Worklet
|
|
45
|
-
const workletUrl = config.workletUrl || DEFAULT_WORKLET_URL;
|
|
46
|
-
|
|
47
|
-
try {
|
|
48
|
-
await context.audioWorklet.addModule(workletUrl);
|
|
49
|
-
} catch (e) {
|
|
50
|
-
console.warn("Failed to add RNNoise worklet module:", e);
|
|
51
|
-
// Proceeding, assuming it might be already loaded.
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
// 3. Create Node
|
|
55
|
-
const node = new RnnoiseWorkletNode(context, {
|
|
56
|
-
wasmBinary: this.wasmBuffer,
|
|
57
|
-
maxChannels: 1, // Mono for now
|
|
58
|
-
});
|
|
59
|
-
|
|
60
|
-
return node;
|
|
61
|
-
}
|
|
62
|
-
}
|