@fonoster/autopilot 0.9.40 → 0.9.42
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Autopilot.js +5 -3
- package/dist/envs.d.ts +1 -0
- package/dist/envs.js +6 -1
- package/dist/vad/SileroVad.d.ts +3 -3
- package/dist/vad/SileroVad.js +2 -2
- package/dist/vad/SileroVadModel.d.ts +8 -7
- package/dist/vad/SileroVadModel.js +16 -34
- package/dist/vad/createVad.d.ts +1 -1
- package/dist/vad/createVad.js +19 -33
- package/dist/vad/types.d.ts +12 -28
- package/dist/vadWorker.js +11 -4
- package/dist/vadv5/SileroVad.d.ts +18 -0
- package/dist/vadv5/SileroVad.js +40 -0
- package/dist/vadv5/SileroVadModel.d.ts +14 -0
- package/dist/vadv5/SileroVadModel.js +83 -0
- package/dist/vadv5/createVad.d.ts +3 -0
- package/dist/vadv5/createVad.js +126 -0
- package/dist/vadv5/index.d.ts +20 -0
- package/dist/vadv5/index.js +36 -0
- package/dist/vadv5/types.d.ts +54 -0
- package/dist/vadv5/types.js +2 -0
- package/package.json +7 -7
package/dist/Autopilot.js
CHANGED
|
@@ -36,11 +36,13 @@ class Autopilot {
|
|
|
36
36
|
this.vadWorker = new worker_threads_1.Worker(vadWorkerPath, {
|
|
37
37
|
workerData: conversationSettings.vad
|
|
38
38
|
});
|
|
39
|
+
// Add initialization promise
|
|
39
40
|
this.vadWorkerReady = new Promise((resolve, reject) => {
|
|
40
|
-
|
|
41
|
+
console.log("waiting for vad worker to be ready");
|
|
41
42
|
this.vadWorker.once("message", (message) => {
|
|
43
|
+
console.log("vad worker is ready");
|
|
42
44
|
if (message === "VAD_READY") {
|
|
43
|
-
|
|
45
|
+
console.log("vad worker is ready resolving promise");
|
|
44
46
|
resolve();
|
|
45
47
|
}
|
|
46
48
|
});
|
|
@@ -55,7 +57,7 @@ class Autopilot {
|
|
|
55
57
|
});
|
|
56
58
|
}
|
|
57
59
|
async start() {
|
|
58
|
-
// Wait for
|
|
60
|
+
// Wait for VAD worker to be ready before proceeding
|
|
59
61
|
await this.vadWorkerReady;
|
|
60
62
|
await this.setupVoiceStream();
|
|
61
63
|
await this.setupSpeechGathering();
|
package/dist/envs.d.ts
CHANGED
|
@@ -6,6 +6,7 @@ export declare const KNOWLEDGE_BASE_ENABLED: boolean;
|
|
|
6
6
|
export declare const NODE_ENV: string;
|
|
7
7
|
export declare const UNSTRUCTURED_API_KEY: string;
|
|
8
8
|
export declare const UNSTRUCTURED_API_URL: string;
|
|
9
|
+
export declare const SILERO_VAD_VERSION: string;
|
|
9
10
|
export declare const CONVERSATION_PROVIDER: string;
|
|
10
11
|
export declare const CONVERSATION_PROVIDER_FILE: string;
|
|
11
12
|
export declare const APISERVER_ENDPOINT: string;
|
package/dist/envs.js
CHANGED
|
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
3
3
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
4
|
};
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.SKIP_IDENTITY = exports.OPENAI_API_KEY = exports.INTEGRATIONS_FILE = exports.APISERVER_ENDPOINT = exports.CONVERSATION_PROVIDER_FILE = exports.CONVERSATION_PROVIDER = exports.UNSTRUCTURED_API_URL = exports.UNSTRUCTURED_API_KEY = exports.NODE_ENV = exports.KNOWLEDGE_BASE_ENABLED = exports.AWS_S3_SECRET_ACCESS_KEY = exports.AWS_S3_REGION = exports.AWS_S3_ENDPOINT = exports.AWS_S3_ACCESS_KEY_ID = void 0;
|
|
6
|
+
exports.SKIP_IDENTITY = exports.OPENAI_API_KEY = exports.INTEGRATIONS_FILE = exports.APISERVER_ENDPOINT = exports.CONVERSATION_PROVIDER_FILE = exports.CONVERSATION_PROVIDER = exports.SILERO_VAD_VERSION = exports.UNSTRUCTURED_API_URL = exports.UNSTRUCTURED_API_KEY = exports.NODE_ENV = exports.KNOWLEDGE_BASE_ENABLED = exports.AWS_S3_SECRET_ACCESS_KEY = exports.AWS_S3_REGION = exports.AWS_S3_ENDPOINT = exports.AWS_S3_ACCESS_KEY_ID = void 0;
|
|
7
7
|
/**
|
|
8
8
|
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
9
9
|
* http://github.com/fonoster/fonoster
|
|
@@ -39,6 +39,7 @@ exports.NODE_ENV = e.NODE_ENV || "production";
|
|
|
39
39
|
exports.UNSTRUCTURED_API_KEY = e.AUTOPILOT_UNSTRUCTURED_API_KEY ?? "";
|
|
40
40
|
exports.UNSTRUCTURED_API_URL = e.AUTOPILOT_UNSTRUCTURED_API_URL ??
|
|
41
41
|
"https://api.unstructuredapp.io/general/v0/general";
|
|
42
|
+
exports.SILERO_VAD_VERSION = e.AUTOPILOT_SILERO_VAD_VERSION ?? "v5";
|
|
42
43
|
exports.CONVERSATION_PROVIDER = e.AUTOPILOT_CONVERSATION_PROVIDER
|
|
43
44
|
? e.AUTOPILOT_CONVERSATION_PROVIDER
|
|
44
45
|
: types_1.ConversationProvider.FILE;
|
|
@@ -53,6 +54,10 @@ exports.INTEGRATIONS_FILE = e.AUTOPILOT_INTEGRATIONS_FILE
|
|
|
53
54
|
: "/opt/fonoster/integrations.json";
|
|
54
55
|
exports.OPENAI_API_KEY = e.AUTOPILOT_OPENAI_API_KEY;
|
|
55
56
|
exports.SKIP_IDENTITY = e.AUTOPILOT_SKIP_IDENTITY === "true";
|
|
57
|
+
if (exports.SILERO_VAD_VERSION !== "v4" && exports.SILERO_VAD_VERSION !== "v5") {
|
|
58
|
+
console.error("SILERO_VAD_VERSION must be set to 'v4' or 'v5'");
|
|
59
|
+
process.exit(1);
|
|
60
|
+
}
|
|
56
61
|
if (exports.CONVERSATION_PROVIDER.toLocaleLowerCase() !== types_1.ConversationProvider.API &&
|
|
57
62
|
exports.CONVERSATION_PROVIDER.toLocaleLowerCase() !== types_1.ConversationProvider.FILE) {
|
|
58
63
|
console.error("CONVERSATION_PROVIDER must be set to 'api' or 'file'");
|
package/dist/vad/SileroVad.d.ts
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
import { Vad } from "./types";
|
|
2
2
|
declare class SileroVad implements Vad {
|
|
3
3
|
private vad;
|
|
4
|
-
private
|
|
4
|
+
private params;
|
|
5
5
|
constructor(params: {
|
|
6
|
-
pathToModel
|
|
6
|
+
pathToModel?: string;
|
|
7
7
|
activationThreshold: number;
|
|
8
8
|
deactivationThreshold: number;
|
|
9
9
|
debounceFrames: number;
|
|
10
10
|
});
|
|
11
|
-
pathToModel
|
|
11
|
+
pathToModel?: string;
|
|
12
12
|
activationThreshold: number;
|
|
13
13
|
deactivationThreshold: number;
|
|
14
14
|
debounceFrames: number;
|
package/dist/vad/SileroVad.js
CHANGED
|
@@ -24,7 +24,7 @@ const createVad_1 = require("./createVad");
|
|
|
24
24
|
const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
|
|
25
25
|
class SileroVad {
|
|
26
26
|
constructor(params) {
|
|
27
|
-
logger.verbose("starting instance of silero vad
|
|
27
|
+
logger.verbose("starting instance of silero vad v4", { ...params });
|
|
28
28
|
this.params = params;
|
|
29
29
|
}
|
|
30
30
|
async init() {
|
|
@@ -32,7 +32,7 @@ class SileroVad {
|
|
|
32
32
|
}
|
|
33
33
|
processChunk(data, callback) {
|
|
34
34
|
if (!this.vad) {
|
|
35
|
-
throw new Error("VAD not initialized");
|
|
35
|
+
throw new Error("VAD not initialized)");
|
|
36
36
|
}
|
|
37
37
|
this.vad(data, callback);
|
|
38
38
|
}
|
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
import { ONNXRuntimeAPI, SpeechProbabilities } from "./types";
|
|
2
2
|
declare class SileroVadModel {
|
|
3
|
-
private
|
|
4
|
-
private
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
3
|
+
private ort;
|
|
4
|
+
private pathToModel;
|
|
5
|
+
_session: any;
|
|
6
|
+
_h: unknown;
|
|
7
|
+
_c: unknown;
|
|
8
|
+
_sr: unknown;
|
|
8
9
|
constructor(ort: ONNXRuntimeAPI, pathToModel: string);
|
|
9
|
-
static
|
|
10
|
+
static new: (ort: ONNXRuntimeAPI, pathToModel: string) => Promise<SileroVadModel>;
|
|
10
11
|
init(): Promise<void>;
|
|
11
|
-
resetState: () => void;
|
|
12
12
|
process(audioFrame: Float32Array): Promise<SpeechProbabilities>;
|
|
13
|
+
resetState(): void;
|
|
13
14
|
}
|
|
14
15
|
export { SileroVadModel };
|
|
@@ -21,58 +21,40 @@ exports.SileroVadModel = void 0;
|
|
|
21
21
|
* limitations under the License.
|
|
22
22
|
*/
|
|
23
23
|
const fs_1 = require("fs");
|
|
24
|
-
const SAMPLE_RATE = 16000;
|
|
25
|
-
function getNewState(ortInstance) {
|
|
26
|
-
return new ortInstance.Tensor("float32", new Float32Array(2 * 1 * 128), // Use Float32Array for consistency
|
|
27
|
-
[2, 1, 128]);
|
|
28
|
-
}
|
|
29
24
|
class SileroVadModel {
|
|
30
25
|
constructor(ort, pathToModel) {
|
|
31
26
|
this.ort = ort;
|
|
32
27
|
this.pathToModel = pathToModel;
|
|
33
|
-
this.resetState = () => {
|
|
34
|
-
this._state = getNewState(this.ort);
|
|
35
|
-
};
|
|
36
28
|
}
|
|
37
29
|
async init() {
|
|
38
30
|
const modelArrayBuffer = (0, fs_1.readFileSync)(this.pathToModel).buffer;
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
enableCpuMemArena: false
|
|
43
|
-
};
|
|
44
|
-
this._session = await this.ort.InferenceSession.create(modelArrayBuffer, sessionOption);
|
|
45
|
-
// Validate model inputs/outputs
|
|
46
|
-
const requiredInputs = ["input", "state", "sr"];
|
|
47
|
-
for (const name of requiredInputs) {
|
|
48
|
-
if (!this._session.inputNames.includes(name)) {
|
|
49
|
-
throw new Error(`Model is missing expected input "${name}"`);
|
|
50
|
-
}
|
|
51
|
-
}
|
|
52
|
-
if (!this._session.outputNames.includes("output") ||
|
|
53
|
-
!this._session.outputNames.includes("stateN")) {
|
|
54
|
-
throw new Error("Model is missing expected outputs");
|
|
55
|
-
}
|
|
56
|
-
// Use BigInt for sample rate tensor
|
|
57
|
-
this._sr = new this.ort.Tensor("int64", [BigInt(SAMPLE_RATE)], []);
|
|
58
|
-
this._state = getNewState(this.ort);
|
|
31
|
+
this._session = await this.ort.InferenceSession.create(modelArrayBuffer);
|
|
32
|
+
this._sr = new this.ort.Tensor("int64", [16000n]);
|
|
33
|
+
this.resetState();
|
|
59
34
|
}
|
|
60
35
|
async process(audioFrame) {
|
|
61
|
-
const
|
|
36
|
+
const t = new this.ort.Tensor("float32", audioFrame, [
|
|
62
37
|
1,
|
|
63
38
|
audioFrame.length
|
|
64
39
|
]);
|
|
65
|
-
const
|
|
66
|
-
input:
|
|
67
|
-
|
|
40
|
+
const inputs = {
|
|
41
|
+
input: t,
|
|
42
|
+
h: this._h,
|
|
43
|
+
c: this._c,
|
|
68
44
|
sr: this._sr
|
|
69
45
|
};
|
|
70
|
-
const out = await this._session.run(
|
|
71
|
-
this.
|
|
46
|
+
const out = await this._session.run(inputs);
|
|
47
|
+
this._h = out.hn;
|
|
48
|
+
this._c = out.cn;
|
|
72
49
|
const [isSpeech] = out.output.data;
|
|
73
50
|
const notSpeech = 1 - isSpeech;
|
|
74
51
|
return { notSpeech, isSpeech };
|
|
75
52
|
}
|
|
53
|
+
resetState() {
|
|
54
|
+
const zeroes = Array(2 * 64).fill(0);
|
|
55
|
+
this._h = new this.ort.Tensor("float32", zeroes, [2, 1, 64]);
|
|
56
|
+
this._c = new this.ort.Tensor("float32", zeroes, [2, 1, 64]);
|
|
57
|
+
}
|
|
76
58
|
}
|
|
77
59
|
exports.SileroVadModel = SileroVadModel;
|
|
78
60
|
_a = SileroVadModel;
|
package/dist/vad/createVad.d.ts
CHANGED
package/dist/vad/createVad.js
CHANGED
|
@@ -58,61 +58,47 @@ const ort = __importStar(require("onnxruntime-node"));
|
|
|
58
58
|
const chunkToFloat32Array_1 = require("./chunkToFloat32Array");
|
|
59
59
|
const SileroVadModel_1 = require("./SileroVadModel");
|
|
60
60
|
const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
|
|
61
|
-
const FULL_FRAME_SIZE =
|
|
62
|
-
const
|
|
61
|
+
const FULL_FRAME_SIZE = 1600; // Equivalent to 100ms @ 16kHz
|
|
62
|
+
const FRAME_SIZE = 480; // Use last 30ms from the full frame for VAD processing
|
|
63
63
|
async function createVad(params) {
|
|
64
64
|
const { pathToModel, activationThreshold, deactivationThreshold, debounceFrames } = params;
|
|
65
|
-
const effectivePath = pathToModel || (0, path_1.join)(__dirname, "..", "..", "
|
|
66
|
-
const
|
|
67
|
-
|
|
68
|
-
create: ort.InferenceSession.create.bind(ort.InferenceSession)
|
|
69
|
-
},
|
|
70
|
-
Tensor: ort.Tensor
|
|
71
|
-
};
|
|
72
|
-
const silero = await SileroVadModel_1.SileroVadModel.new(ortAdapter, effectivePath);
|
|
73
|
-
let sampleBuffer = [];
|
|
65
|
+
const effectivePath = pathToModel || (0, path_1.join)(__dirname, "..", "..", "silero_vad.onnx");
|
|
66
|
+
const silero = await SileroVadModel_1.SileroVadModel.new(ort, effectivePath);
|
|
67
|
+
let audioBuffer = [];
|
|
74
68
|
let isSpeechActive = false;
|
|
75
69
|
let framesSinceStateChange = 0;
|
|
76
|
-
// Reset internal state after a state change.
|
|
77
|
-
const resetState = () => {
|
|
78
|
-
isSpeechActive = false;
|
|
79
|
-
framesSinceStateChange = 0;
|
|
80
|
-
// Clear any pending audio samples to avoid using outdated values.
|
|
81
|
-
sampleBuffer = [];
|
|
82
|
-
silero.resetState();
|
|
83
|
-
logger.silly("State reset -- sampleBuffer cleared");
|
|
84
|
-
};
|
|
85
70
|
return async function process(chunk, callback) {
|
|
86
|
-
// Convert the incoming chunk to normalized Float32 samples (using chunkToFloat32Array)
|
|
87
71
|
const float32Array = (0, chunkToFloat32Array_1.chunkToFloat32Array)(chunk);
|
|
88
|
-
|
|
89
|
-
//
|
|
90
|
-
while (
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
72
|
+
audioBuffer.push(...float32Array);
|
|
73
|
+
// Process full frames from the buffer
|
|
74
|
+
while (audioBuffer.length >= FULL_FRAME_SIZE) {
|
|
75
|
+
// Extract one full frame worth of samples
|
|
76
|
+
const fullFrame = audioBuffer.slice(0, FULL_FRAME_SIZE);
|
|
77
|
+
audioBuffer = audioBuffer.slice(FULL_FRAME_SIZE);
|
|
78
|
+
// Use the last FRAME_SIZE samples from the full frame for VAD processing
|
|
79
|
+
const frame = fullFrame.slice(fullFrame.length - FRAME_SIZE);
|
|
95
80
|
const result = await silero.process(new Float32Array(frame));
|
|
96
81
|
const rawScore = result.isSpeech;
|
|
97
82
|
logger.silly("Frame processing", {
|
|
98
83
|
rawScore,
|
|
99
84
|
isSpeechActive,
|
|
100
85
|
framesSinceStateChange,
|
|
101
|
-
pendingSamples:
|
|
86
|
+
pendingSamples: audioBuffer.length
|
|
102
87
|
});
|
|
103
88
|
framesSinceStateChange++;
|
|
104
89
|
if (isSpeechActive) {
|
|
105
|
-
// If
|
|
90
|
+
// If currently in speech, check if the score has dropped below the deactivation threshold
|
|
106
91
|
if (rawScore < deactivationThreshold &&
|
|
107
92
|
framesSinceStateChange >= debounceFrames) {
|
|
93
|
+
isSpeechActive = false;
|
|
108
94
|
callback("SPEECH_END");
|
|
109
|
-
resetState();
|
|
95
|
+
silero.resetState(); // Reset VAD state after speech ends
|
|
96
|
+
framesSinceStateChange = 0;
|
|
110
97
|
logger.silly("Speech end detected", { rawScore });
|
|
111
|
-
continue;
|
|
112
98
|
}
|
|
113
99
|
}
|
|
114
100
|
else {
|
|
115
|
-
// If currently
|
|
101
|
+
// If not currently in speech, check if the score exceeds the activation threshold
|
|
116
102
|
if (rawScore > activationThreshold &&
|
|
117
103
|
framesSinceStateChange >= debounceFrames) {
|
|
118
104
|
isSpeechActive = true;
|
package/dist/vad/types.d.ts
CHANGED
|
@@ -26,35 +26,19 @@ type VadParams = {
|
|
|
26
26
|
deactivationThreshold: number;
|
|
27
27
|
debounceFrames: number;
|
|
28
28
|
};
|
|
29
|
-
|
|
29
|
+
type SpeechProbabilities = {
|
|
30
30
|
notSpeech: number;
|
|
31
31
|
isSpeech: number;
|
|
32
|
-
}
|
|
33
|
-
|
|
32
|
+
};
|
|
33
|
+
type ONNXRuntimeAPI = {
|
|
34
34
|
InferenceSession: {
|
|
35
|
-
create
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
35
|
+
create(modelArrayBuffer: ArrayBuffer): Promise<unknown>;
|
|
36
|
+
};
|
|
37
|
+
Tensor: {
|
|
38
|
+
new (type: "int64", dims: [16000n]): unknown;
|
|
39
|
+
new (type: "float32", data: number[], dims: [2, 1, 64]): unknown;
|
|
40
|
+
new (type: "float32", data: Float32Array, dims: [1, number]): unknown;
|
|
41
|
+
new (type: "float32", data: Float32Array, dims: [1, number]): unknown;
|
|
40
42
|
};
|
|
41
|
-
|
|
42
|
-
}
|
|
43
|
-
export interface ONNXSession {
|
|
44
|
-
run: (feeds: {
|
|
45
|
-
[key: string]: ONNXTensor;
|
|
46
|
-
}) => Promise<{
|
|
47
|
-
output: {
|
|
48
|
-
data: Float32Array;
|
|
49
|
-
};
|
|
50
|
-
stateN: ONNXTensor;
|
|
51
|
-
}>;
|
|
52
|
-
inputNames: string[];
|
|
53
|
-
outputNames: string[];
|
|
54
|
-
}
|
|
55
|
-
export interface ONNXTensor {
|
|
56
|
-
data: Float32Array | bigint[];
|
|
57
|
-
dims: number[];
|
|
58
|
-
type: string;
|
|
59
|
-
}
|
|
60
|
-
export { Vad, VadEvent, VadParams };
|
|
43
|
+
};
|
|
44
|
+
export { ONNXRuntimeAPI, SpeechProbabilities, Vad, VadParams, VadEvent };
|
package/dist/vadWorker.js
CHANGED
|
@@ -20,11 +20,18 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
20
20
|
*/
|
|
21
21
|
const path_1 = require("path");
|
|
22
22
|
const worker_threads_1 = require("worker_threads");
|
|
23
|
+
const envs_1 = require("./envs");
|
|
23
24
|
const SileroVad_1 = require("./vad/SileroVad");
|
|
24
|
-
const
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
25
|
+
const SileroVad_2 = require("./vadv5/SileroVad");
|
|
26
|
+
const vad = envs_1.SILERO_VAD_VERSION === "v4"
|
|
27
|
+
? new SileroVad_1.SileroVad({
|
|
28
|
+
...worker_threads_1.workerData,
|
|
29
|
+
pathToModel: (0, path_1.join)(__dirname, "..", "silero_vad.onnx")
|
|
30
|
+
})
|
|
31
|
+
: new SileroVad_2.SileroVad({
|
|
32
|
+
...worker_threads_1.workerData,
|
|
33
|
+
pathToModel: (0, path_1.join)(__dirname, "..", "silero_vad_v5.onnx")
|
|
34
|
+
});
|
|
28
35
|
vad.init().then(() => {
|
|
29
36
|
// Send ready message to parent
|
|
30
37
|
worker_threads_1.parentPort?.postMessage("VAD_READY");
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { Vad } from "./types";
|
|
2
|
+
declare class SileroVad implements Vad {
|
|
3
|
+
private vad;
|
|
4
|
+
private readonly params;
|
|
5
|
+
constructor(params: {
|
|
6
|
+
pathToModel: string;
|
|
7
|
+
activationThreshold: number;
|
|
8
|
+
deactivationThreshold: number;
|
|
9
|
+
debounceFrames: number;
|
|
10
|
+
});
|
|
11
|
+
pathToModel: string;
|
|
12
|
+
activationThreshold: number;
|
|
13
|
+
deactivationThreshold: number;
|
|
14
|
+
debounceFrames: number;
|
|
15
|
+
init(): Promise<void>;
|
|
16
|
+
processChunk(data: Uint8Array, callback: (event: "SPEECH_START" | "SPEECH_END") => void): void;
|
|
17
|
+
}
|
|
18
|
+
export { SileroVad };
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.SileroVad = void 0;
|
|
4
|
+
/**
|
|
5
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
6
|
+
* http://github.com/fonoster/fonoster
|
|
7
|
+
*
|
|
8
|
+
* This file is part of Fonoster
|
|
9
|
+
*
|
|
10
|
+
* Licensed under the MIT License (the "License");
|
|
11
|
+
* you may not use this file except in compliance with
|
|
12
|
+
* the License. You may obtain a copy of the License at
|
|
13
|
+
*
|
|
14
|
+
* https://opensource.org/licenses/MIT
|
|
15
|
+
*
|
|
16
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
17
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
18
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
19
|
+
* See the License for the specific language governing permissions and
|
|
20
|
+
* limitations under the License.
|
|
21
|
+
*/
|
|
22
|
+
const logger_1 = require("@fonoster/logger");
|
|
23
|
+
const createVad_1 = require("./createVad");
|
|
24
|
+
const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
|
|
25
|
+
class SileroVad {
|
|
26
|
+
constructor(params) {
|
|
27
|
+
logger.verbose("starting instance of silero vad v5", { ...params });
|
|
28
|
+
this.params = params;
|
|
29
|
+
}
|
|
30
|
+
async init() {
|
|
31
|
+
this.vad = await (0, createVad_1.createVad)(this.params);
|
|
32
|
+
}
|
|
33
|
+
processChunk(data, callback) {
|
|
34
|
+
if (!this.vad) {
|
|
35
|
+
throw new Error("VAD not initialized");
|
|
36
|
+
}
|
|
37
|
+
this.vad(data, callback);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
exports.SileroVad = SileroVad;
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { ONNXRuntimeAPI, SpeechProbabilities } from "./types";
|
|
2
|
+
declare class SileroVadModel {
|
|
3
|
+
private readonly ort;
|
|
4
|
+
private readonly pathToModel;
|
|
5
|
+
private _session;
|
|
6
|
+
private _state;
|
|
7
|
+
private _sr;
|
|
8
|
+
constructor(ort: ONNXRuntimeAPI, pathToModel: string);
|
|
9
|
+
static readonly new: (ort: ONNXRuntimeAPI, pathToModel: string) => Promise<SileroVadModel>;
|
|
10
|
+
init(): Promise<void>;
|
|
11
|
+
resetState: () => void;
|
|
12
|
+
process(audioFrame: Float32Array): Promise<SpeechProbabilities>;
|
|
13
|
+
}
|
|
14
|
+
export { SileroVadModel };
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var _a;
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
exports.SileroVadModel = void 0;
|
|
5
|
+
/**
|
|
6
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
7
|
+
* http://github.com/fonoster/fonoster
|
|
8
|
+
*
|
|
9
|
+
* This file is part of Fonoster
|
|
10
|
+
*
|
|
11
|
+
* Licensed under the MIT License (the "License");
|
|
12
|
+
* you may not use this file except in compliance with
|
|
13
|
+
* the License. You may obtain a copy of the License at
|
|
14
|
+
*
|
|
15
|
+
* https://opensource.org/licenses/MIT
|
|
16
|
+
*
|
|
17
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
18
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
19
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
20
|
+
* See the License for the specific language governing permissions and
|
|
21
|
+
* limitations under the License.
|
|
22
|
+
*/
|
|
23
|
+
const fs_1 = require("fs");
|
|
24
|
+
const SAMPLE_RATE = 16000;
|
|
25
|
+
function getNewState(ortInstance) {
|
|
26
|
+
return new ortInstance.Tensor("float32", new Float32Array(2 * 1 * 128), // Use Float32Array for consistency
|
|
27
|
+
[2, 1, 128]);
|
|
28
|
+
}
|
|
29
|
+
class SileroVadModel {
|
|
30
|
+
constructor(ort, pathToModel) {
|
|
31
|
+
this.ort = ort;
|
|
32
|
+
this.pathToModel = pathToModel;
|
|
33
|
+
this.resetState = () => {
|
|
34
|
+
this._state = getNewState(this.ort);
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
async init() {
|
|
38
|
+
const modelArrayBuffer = (0, fs_1.readFileSync)(this.pathToModel).buffer;
|
|
39
|
+
const sessionOption = {
|
|
40
|
+
interOpNumThreads: 1,
|
|
41
|
+
intraOpNumThreads: 1,
|
|
42
|
+
enableCpuMemArena: false
|
|
43
|
+
};
|
|
44
|
+
this._session = await this.ort.InferenceSession.create(modelArrayBuffer, sessionOption);
|
|
45
|
+
// Validate model inputs/outputs
|
|
46
|
+
const requiredInputs = ["input", "state", "sr"];
|
|
47
|
+
for (const name of requiredInputs) {
|
|
48
|
+
if (!this._session.inputNames.includes(name)) {
|
|
49
|
+
throw new Error(`Model is missing expected input "${name}"`);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
if (!this._session.outputNames.includes("output") ||
|
|
53
|
+
!this._session.outputNames.includes("stateN")) {
|
|
54
|
+
throw new Error("Model is missing expected outputs");
|
|
55
|
+
}
|
|
56
|
+
// Use BigInt for sample rate tensor
|
|
57
|
+
this._sr = new this.ort.Tensor("int64", [BigInt(SAMPLE_RATE)], []);
|
|
58
|
+
this._state = getNewState(this.ort);
|
|
59
|
+
}
|
|
60
|
+
async process(audioFrame) {
|
|
61
|
+
const inputTensor = new this.ort.Tensor("float32", audioFrame, [
|
|
62
|
+
1,
|
|
63
|
+
audioFrame.length
|
|
64
|
+
]);
|
|
65
|
+
const feeds = {
|
|
66
|
+
input: inputTensor,
|
|
67
|
+
state: this._state,
|
|
68
|
+
sr: this._sr
|
|
69
|
+
};
|
|
70
|
+
const out = await this._session.run(feeds);
|
|
71
|
+
this._state = out.stateN;
|
|
72
|
+
const [isSpeech] = out.output.data;
|
|
73
|
+
const notSpeech = 1 - isSpeech;
|
|
74
|
+
return { notSpeech, isSpeech };
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
exports.SileroVadModel = SileroVadModel;
|
|
78
|
+
_a = SileroVadModel;
|
|
79
|
+
SileroVadModel.new = async (ort, pathToModel) => {
|
|
80
|
+
const model = new _a(ort, pathToModel);
|
|
81
|
+
await model.init();
|
|
82
|
+
return model;
|
|
83
|
+
};
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.createVad = createVad;
|
|
37
|
+
/**
|
|
38
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
39
|
+
* http://github.com/fonoster/fonoster
|
|
40
|
+
*
|
|
41
|
+
* This file is part of Fonoster
|
|
42
|
+
*
|
|
43
|
+
* Licensed under the MIT License (the "License");
|
|
44
|
+
* you may not use this file except in compliance with
|
|
45
|
+
* the License. You may obtain a copy of the License at
|
|
46
|
+
*
|
|
47
|
+
* https://opensource.org/licenses/MIT
|
|
48
|
+
*
|
|
49
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
50
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
51
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
52
|
+
* See the License for the specific language governing permissions and
|
|
53
|
+
* limitations under the License.
|
|
54
|
+
*/
|
|
55
|
+
const path_1 = require("path");
|
|
56
|
+
const logger_1 = require("@fonoster/logger");
|
|
57
|
+
const ort = __importStar(require("onnxruntime-node"));
|
|
58
|
+
const chunkToFloat32Array_1 = require("../vad/chunkToFloat32Array");
|
|
59
|
+
const SileroVadModel_1 = require("./SileroVadModel");
|
|
60
|
+
const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
|
|
61
|
+
const FULL_FRAME_SIZE = 1024; // 64ms @ 16kHz
|
|
62
|
+
const BUFFER_SIZE = 512; // 32ms @ 16kHz
|
|
63
|
+
async function createVad(params) {
|
|
64
|
+
const { pathToModel, activationThreshold, deactivationThreshold, debounceFrames } = params;
|
|
65
|
+
const effectivePath = pathToModel || (0, path_1.join)(__dirname, "..", "..", "silero_vad_v5.onnx");
|
|
66
|
+
const ortAdapter = {
|
|
67
|
+
InferenceSession: {
|
|
68
|
+
create: ort.InferenceSession.create.bind(ort.InferenceSession)
|
|
69
|
+
},
|
|
70
|
+
Tensor: ort.Tensor
|
|
71
|
+
};
|
|
72
|
+
const silero = await SileroVadModel_1.SileroVadModel.new(ortAdapter, effectivePath);
|
|
73
|
+
let sampleBuffer = [];
|
|
74
|
+
let isSpeechActive = false;
|
|
75
|
+
let framesSinceStateChange = 0;
|
|
76
|
+
// Reset internal state after a state change.
|
|
77
|
+
const resetState = () => {
|
|
78
|
+
isSpeechActive = false;
|
|
79
|
+
framesSinceStateChange = 0;
|
|
80
|
+
// Clear any pending audio samples to avoid using outdated values.
|
|
81
|
+
sampleBuffer = [];
|
|
82
|
+
silero.resetState();
|
|
83
|
+
logger.silly("State reset -- sampleBuffer cleared");
|
|
84
|
+
};
|
|
85
|
+
return async function process(chunk, callback) {
|
|
86
|
+
// Convert the incoming chunk to normalized Float32 samples (using chunkToFloat32Array)
|
|
87
|
+
const float32Array = (0, chunkToFloat32Array_1.chunkToFloat32Array)(chunk);
|
|
88
|
+
sampleBuffer.push(...float32Array);
|
|
89
|
+
// Wait until we've collected a full frame worth of samples.
|
|
90
|
+
while (sampleBuffer.length >= FULL_FRAME_SIZE) {
|
|
91
|
+
const fullFrame = sampleBuffer.slice(0, FULL_FRAME_SIZE);
|
|
92
|
+
sampleBuffer = sampleBuffer.slice(FULL_FRAME_SIZE);
|
|
93
|
+
// Use the last BUFFER_SIZE samples from the full frame.
|
|
94
|
+
const frame = fullFrame.slice(fullFrame.length - BUFFER_SIZE);
|
|
95
|
+
const result = await silero.process(new Float32Array(frame));
|
|
96
|
+
const rawScore = result.isSpeech;
|
|
97
|
+
logger.silly("Frame processing", {
|
|
98
|
+
rawScore,
|
|
99
|
+
isSpeechActive,
|
|
100
|
+
framesSinceStateChange,
|
|
101
|
+
pendingSamples: sampleBuffer.length
|
|
102
|
+
});
|
|
103
|
+
framesSinceStateChange++;
|
|
104
|
+
if (isSpeechActive) {
|
|
105
|
+
// If already in speech, check if the score has dropped below deactivationThreshold
|
|
106
|
+
if (rawScore < deactivationThreshold &&
|
|
107
|
+
framesSinceStateChange >= debounceFrames) {
|
|
108
|
+
callback("SPEECH_END");
|
|
109
|
+
resetState();
|
|
110
|
+
logger.silly("Speech end detected", { rawScore });
|
|
111
|
+
continue;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
else {
|
|
115
|
+
// If currently not speaking, check if the score is above activationThreshold
|
|
116
|
+
if (rawScore > activationThreshold &&
|
|
117
|
+
framesSinceStateChange >= debounceFrames) {
|
|
118
|
+
isSpeechActive = true;
|
|
119
|
+
framesSinceStateChange = 0;
|
|
120
|
+
callback("SPEECH_START");
|
|
121
|
+
logger.silly("Speech start detected", { rawScore });
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
};
|
|
126
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
3
|
+
* http://github.com/fonoster/fonoster
|
|
4
|
+
*
|
|
5
|
+
* This file is part of Fonoster
|
|
6
|
+
*
|
|
7
|
+
* Licensed under the MIT License (the "License");
|
|
8
|
+
* you may not use this file except in compliance with
|
|
9
|
+
* the License. You may obtain a copy of the License at
|
|
10
|
+
*
|
|
11
|
+
* https://opensource.org/licenses/MIT
|
|
12
|
+
*
|
|
13
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
14
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
15
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
16
|
+
* See the License for the specific language governing permissions and
|
|
17
|
+
* limitations under the License.
|
|
18
|
+
*/
|
|
19
|
+
export * from "./SileroVad";
|
|
20
|
+
export * from "./types";
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
/**
|
|
18
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
19
|
+
* http://github.com/fonoster/fonoster
|
|
20
|
+
*
|
|
21
|
+
* This file is part of Fonoster
|
|
22
|
+
*
|
|
23
|
+
* Licensed under the MIT License (the "License");
|
|
24
|
+
* you may not use this file except in compliance with
|
|
25
|
+
* the License. You may obtain a copy of the License at
|
|
26
|
+
*
|
|
27
|
+
* https://opensource.org/licenses/MIT
|
|
28
|
+
*
|
|
29
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
30
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
31
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
32
|
+
* See the License for the specific language governing permissions and
|
|
33
|
+
* limitations under the License.
|
|
34
|
+
*/
|
|
35
|
+
__exportStar(require("./SileroVad"), exports);
|
|
36
|
+
__exportStar(require("./types"), exports);
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
3
|
+
* http://github.com/fonoster/fonoster
|
|
4
|
+
*
|
|
5
|
+
* This file is part of Fonoster
|
|
6
|
+
*
|
|
7
|
+
* Licensed under the MIT License (the "License");
|
|
8
|
+
* you may not use this file except in compliance with
|
|
9
|
+
* the License. You may obtain a copy of the License at
|
|
10
|
+
*
|
|
11
|
+
* https://opensource.org/licenses/MIT
|
|
12
|
+
*
|
|
13
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
14
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
15
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
16
|
+
* See the License for the specific language governing permissions and
|
|
17
|
+
* limitations under the License.
|
|
18
|
+
*/
|
|
19
|
+
type VadEvent = "SPEECH_START" | "SPEECH_END";
|
|
20
|
+
type Vad = {
|
|
21
|
+
processChunk: (chunk: Uint8Array, callback: (event: VadEvent) => void) => void;
|
|
22
|
+
};
|
|
23
|
+
export interface SpeechProbabilities {
|
|
24
|
+
notSpeech: number;
|
|
25
|
+
isSpeech: number;
|
|
26
|
+
}
|
|
27
|
+
export interface ONNXRuntimeAPI {
|
|
28
|
+
InferenceSession: {
|
|
29
|
+
create: (modelPath: ArrayBuffer | string, options?: {
|
|
30
|
+
interOpNumThreads: number;
|
|
31
|
+
intraOpNumThreads: number;
|
|
32
|
+
enableCpuMemArena: boolean;
|
|
33
|
+
}) => Promise<ONNXSession>;
|
|
34
|
+
};
|
|
35
|
+
Tensor: new (type: string, data: Float32Array | bigint[], dims: number[]) => ONNXTensor;
|
|
36
|
+
}
|
|
37
|
+
export interface ONNXSession {
|
|
38
|
+
run: (feeds: {
|
|
39
|
+
[key: string]: ONNXTensor;
|
|
40
|
+
}) => Promise<{
|
|
41
|
+
output: {
|
|
42
|
+
data: Float32Array;
|
|
43
|
+
};
|
|
44
|
+
stateN: ONNXTensor;
|
|
45
|
+
}>;
|
|
46
|
+
inputNames: string[];
|
|
47
|
+
outputNames: string[];
|
|
48
|
+
}
|
|
49
|
+
export interface ONNXTensor {
|
|
50
|
+
data: Float32Array | bigint[];
|
|
51
|
+
dims: number[];
|
|
52
|
+
type: string;
|
|
53
|
+
}
|
|
54
|
+
export { Vad, VadEvent };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fonoster/autopilot",
|
|
3
|
-
"version": "0.9.
|
|
3
|
+
"version": "0.9.42",
|
|
4
4
|
"description": "Voice AI for the Fonoster platform",
|
|
5
5
|
"author": "Pedro Sanders <psanders@fonoster.com>",
|
|
6
6
|
"homepage": "https://github.com/fonoster/fonoster#readme",
|
|
@@ -33,11 +33,11 @@
|
|
|
33
33
|
},
|
|
34
34
|
"dependencies": {
|
|
35
35
|
"@aws-sdk/client-s3": "^3.712.0",
|
|
36
|
-
"@fonoster/common": "^0.9.
|
|
37
|
-
"@fonoster/logger": "^0.9.
|
|
38
|
-
"@fonoster/sdk": "^0.9.
|
|
39
|
-
"@fonoster/types": "^0.9.
|
|
40
|
-
"@fonoster/voice": "^0.9.
|
|
36
|
+
"@fonoster/common": "^0.9.42",
|
|
37
|
+
"@fonoster/logger": "^0.9.42",
|
|
38
|
+
"@fonoster/sdk": "^0.9.42",
|
|
39
|
+
"@fonoster/types": "^0.9.42",
|
|
40
|
+
"@fonoster/voice": "^0.9.42",
|
|
41
41
|
"@langchain/community": "^0.3.32",
|
|
42
42
|
"@langchain/core": "^0.3.40",
|
|
43
43
|
"@langchain/groq": "^0.1.3",
|
|
@@ -56,5 +56,5 @@
|
|
|
56
56
|
"xstate": "^5.17.3",
|
|
57
57
|
"zod": "^3.23.8"
|
|
58
58
|
},
|
|
59
|
-
"gitHead": "
|
|
59
|
+
"gitHead": "1ceb629d4fd0035bf56d24580e5c6c3d21568293"
|
|
60
60
|
}
|