@fonoster/autopilot 0.9.42 → 0.9.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,8 +2,7 @@ import { AutopilotParams } from "./types";
2
2
  declare class Autopilot {
3
3
  private readonly params;
4
4
  private readonly actor;
5
- private readonly vadWorker;
6
- private vadWorkerReady;
5
+ private vad;
7
6
  constructor(params: AutopilotParams);
8
7
  start(): Promise<void>;
9
8
  stop(): void;
package/dist/Autopilot.js CHANGED
@@ -23,31 +23,15 @@ exports.Autopilot = void 0;
23
23
  * limitations under the License.
24
24
  */
25
25
  const path_1 = __importDefault(require("path"));
26
- const worker_threads_1 = require("worker_threads");
27
26
  const logger_1 = require("@fonoster/logger");
28
27
  const xstate_1 = require("xstate");
29
28
  const machine_1 = require("./machine/machine");
29
+ const vad_1 = require("./vad");
30
30
  const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
31
31
  class Autopilot {
32
32
  constructor(params) {
33
33
  this.params = params;
34
34
  const { voice, languageModel, conversationSettings } = this.params;
35
- const vadWorkerPath = path_1.default.resolve(__dirname, "../dist", "./vadWorker");
36
- this.vadWorker = new worker_threads_1.Worker(vadWorkerPath, {
37
- workerData: conversationSettings.vad
38
- });
39
- // Add initialization promise
40
- this.vadWorkerReady = new Promise((resolve, reject) => {
41
- console.log("waiting for vad worker to be ready");
42
- this.vadWorker.once("message", (message) => {
43
- console.log("vad worker is ready");
44
- if (message === "VAD_READY") {
45
- console.log("vad worker is ready resolving promise");
46
- resolve();
47
- }
48
- });
49
- this.vadWorker.once("error", reject);
50
- });
51
35
  this.actor = (0, xstate_1.createActor)(machine_1.machine, {
52
36
  input: {
53
37
  conversationSettings,
@@ -57,22 +41,23 @@ class Autopilot {
57
41
  });
58
42
  }
59
43
  async start() {
60
- // Wait for VAD worker to be ready before proceeding
61
- await this.vadWorkerReady;
44
+ const vadParams = this.params.conversationSettings.vad;
45
+ const sileroVad = new vad_1.SileroVad({
46
+ pathToModel: vadParams.pathToModel ||
47
+ path_1.default.resolve(__dirname, "..", "silero_vad_v5.onnx"),
48
+ activationThreshold: vadParams.activationThreshold,
49
+ deactivationThreshold: vadParams.deactivationThreshold,
50
+ debounceFrames: vadParams.debounceFrames
51
+ });
52
+ await sileroVad.init();
53
+ this.vad = sileroVad;
62
54
  await this.setupVoiceStream();
63
55
  await this.setupSpeechGathering();
64
56
  this.actor.start();
57
+ logger.verbose("autopilot is ready");
65
58
  this.actor.subscribe((state) => {
66
59
  logger.verbose("actor's new state is", { state: state.value });
67
60
  });
68
- this.vadWorker.on("error", (err) => {
69
- logger.error("vad worker error", err);
70
- });
71
- this.vadWorker.on("exit", (code) => {
72
- if (code !== 0) {
73
- logger.error("vad worker stopped with exit code", { code });
74
- }
75
- });
76
61
  }
77
62
  stop() {
78
63
  logger.verbose("stopping autopilot");
@@ -82,19 +67,23 @@ class Autopilot {
82
67
  const { voice } = this.params;
83
68
  const stream = await voice.stream();
84
69
  stream.onData(this.handleVoicePayload.bind(this));
85
- this.vadWorker.on("message", (event) => {
86
- logger.verbose("received speech event from vad", { event });
87
- if (event === "SPEECH_START") {
88
- this.actor.send({ type: "SPEECH_START" });
89
- }
90
- else if (event === "SPEECH_END") {
91
- this.actor.send({ type: "SPEECH_END" });
92
- }
93
- });
94
70
  }
95
71
  handleVoicePayload(chunk) {
96
72
  try {
97
- this.vadWorker.postMessage(chunk);
73
+ if (!this.vad) {
74
+ logger.error("VAD not initialized");
75
+ return;
76
+ }
77
+ // Process the audio chunk with the VAD directly
78
+ this.vad.processChunk(chunk, (event) => {
79
+ logger.verbose("received speech event from vad", { event });
80
+ if (event === "SPEECH_START") {
81
+ this.actor.send({ type: "SPEECH_START" });
82
+ }
83
+ else if (event === "SPEECH_END") {
84
+ this.actor.send({ type: "SPEECH_END" });
85
+ }
86
+ });
98
87
  }
99
88
  catch (err) {
100
89
  logger.error("an error occurred while processing vad", err);
package/dist/envs.d.ts CHANGED
@@ -6,7 +6,6 @@ export declare const KNOWLEDGE_BASE_ENABLED: boolean;
6
6
  export declare const NODE_ENV: string;
7
7
  export declare const UNSTRUCTURED_API_KEY: string;
8
8
  export declare const UNSTRUCTURED_API_URL: string;
9
- export declare const SILERO_VAD_VERSION: string;
10
9
  export declare const CONVERSATION_PROVIDER: string;
11
10
  export declare const CONVERSATION_PROVIDER_FILE: string;
12
11
  export declare const APISERVER_ENDPOINT: string;
package/dist/envs.js CHANGED
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.SKIP_IDENTITY = exports.OPENAI_API_KEY = exports.INTEGRATIONS_FILE = exports.APISERVER_ENDPOINT = exports.CONVERSATION_PROVIDER_FILE = exports.CONVERSATION_PROVIDER = exports.SILERO_VAD_VERSION = exports.UNSTRUCTURED_API_URL = exports.UNSTRUCTURED_API_KEY = exports.NODE_ENV = exports.KNOWLEDGE_BASE_ENABLED = exports.AWS_S3_SECRET_ACCESS_KEY = exports.AWS_S3_REGION = exports.AWS_S3_ENDPOINT = exports.AWS_S3_ACCESS_KEY_ID = void 0;
6
+ exports.SKIP_IDENTITY = exports.OPENAI_API_KEY = exports.INTEGRATIONS_FILE = exports.APISERVER_ENDPOINT = exports.CONVERSATION_PROVIDER_FILE = exports.CONVERSATION_PROVIDER = exports.UNSTRUCTURED_API_URL = exports.UNSTRUCTURED_API_KEY = exports.NODE_ENV = exports.KNOWLEDGE_BASE_ENABLED = exports.AWS_S3_SECRET_ACCESS_KEY = exports.AWS_S3_REGION = exports.AWS_S3_ENDPOINT = exports.AWS_S3_ACCESS_KEY_ID = void 0;
7
7
  /**
8
8
  * Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
9
9
  * http://github.com/fonoster/fonoster
@@ -39,7 +39,6 @@ exports.NODE_ENV = e.NODE_ENV || "production";
39
39
  exports.UNSTRUCTURED_API_KEY = e.AUTOPILOT_UNSTRUCTURED_API_KEY ?? "";
40
40
  exports.UNSTRUCTURED_API_URL = e.AUTOPILOT_UNSTRUCTURED_API_URL ??
41
41
  "https://api.unstructuredapp.io/general/v0/general";
42
- exports.SILERO_VAD_VERSION = e.AUTOPILOT_SILERO_VAD_VERSION ?? "v5";
43
42
  exports.CONVERSATION_PROVIDER = e.AUTOPILOT_CONVERSATION_PROVIDER
44
43
  ? e.AUTOPILOT_CONVERSATION_PROVIDER
45
44
  : types_1.ConversationProvider.FILE;
@@ -54,10 +53,6 @@ exports.INTEGRATIONS_FILE = e.AUTOPILOT_INTEGRATIONS_FILE
54
53
  : "/opt/fonoster/integrations.json";
55
54
  exports.OPENAI_API_KEY = e.AUTOPILOT_OPENAI_API_KEY;
56
55
  exports.SKIP_IDENTITY = e.AUTOPILOT_SKIP_IDENTITY === "true";
57
- if (exports.SILERO_VAD_VERSION !== "v4" && exports.SILERO_VAD_VERSION !== "v5") {
58
- console.error("SILERO_VAD_VERSION must be set to 'v4' or 'v5'");
59
- process.exit(1);
60
- }
61
56
  if (exports.CONVERSATION_PROVIDER.toLocaleLowerCase() !== types_1.ConversationProvider.API &&
62
57
  exports.CONVERSATION_PROVIDER.toLocaleLowerCase() !== types_1.ConversationProvider.FILE) {
63
58
  console.error("CONVERSATION_PROVIDER must be set to 'api' or 'file'");
@@ -1,14 +1,14 @@
1
1
  import { Vad } from "./types";
2
2
  declare class SileroVad implements Vad {
3
3
  private vad;
4
- private params;
4
+ private readonly params;
5
5
  constructor(params: {
6
- pathToModel?: string;
6
+ pathToModel: string;
7
7
  activationThreshold: number;
8
8
  deactivationThreshold: number;
9
9
  debounceFrames: number;
10
10
  });
11
- pathToModel?: string;
11
+ pathToModel: string;
12
12
  activationThreshold: number;
13
13
  deactivationThreshold: number;
14
14
  debounceFrames: number;
@@ -24,7 +24,7 @@ const createVad_1 = require("./createVad");
24
24
  const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
25
25
  class SileroVad {
26
26
  constructor(params) {
27
- logger.verbose("starting instance of silero vad v4", { ...params });
27
+ logger.verbose("starting instance of silero vad v5", { ...params });
28
28
  this.params = params;
29
29
  }
30
30
  async init() {
@@ -32,7 +32,7 @@ class SileroVad {
32
32
  }
33
33
  processChunk(data, callback) {
34
34
  if (!this.vad) {
35
- throw new Error("VAD not initialized)");
35
+ throw new Error("VAD not initialized");
36
36
  }
37
37
  this.vad(data, callback);
38
38
  }
@@ -1,15 +1,14 @@
1
1
  import { ONNXRuntimeAPI, SpeechProbabilities } from "./types";
2
2
  declare class SileroVadModel {
3
- private ort;
4
- private pathToModel;
5
- _session: any;
6
- _h: unknown;
7
- _c: unknown;
8
- _sr: unknown;
3
+ private readonly ort;
4
+ private readonly pathToModel;
5
+ private _session;
6
+ private _state;
7
+ private _sr;
9
8
  constructor(ort: ONNXRuntimeAPI, pathToModel: string);
10
- static new: (ort: ONNXRuntimeAPI, pathToModel: string) => Promise<SileroVadModel>;
9
+ static readonly new: (ort: ONNXRuntimeAPI, pathToModel: string) => Promise<SileroVadModel>;
11
10
  init(): Promise<void>;
11
+ resetState: () => void;
12
12
  process(audioFrame: Float32Array): Promise<SpeechProbabilities>;
13
- resetState(): void;
14
13
  }
15
14
  export { SileroVadModel };
@@ -21,40 +21,58 @@ exports.SileroVadModel = void 0;
21
21
  * limitations under the License.
22
22
  */
23
23
  const fs_1 = require("fs");
24
+ const SAMPLE_RATE = 16000;
25
+ function getNewState(ortInstance) {
26
+ return new ortInstance.Tensor("float32", new Float32Array(2 * 1 * 128), // Use Float32Array for consistency
27
+ [2, 1, 128]);
28
+ }
24
29
  class SileroVadModel {
25
30
  constructor(ort, pathToModel) {
26
31
  this.ort = ort;
27
32
  this.pathToModel = pathToModel;
33
+ this.resetState = () => {
34
+ this._state = getNewState(this.ort);
35
+ };
28
36
  }
29
37
  async init() {
30
38
  const modelArrayBuffer = (0, fs_1.readFileSync)(this.pathToModel).buffer;
31
- this._session = await this.ort.InferenceSession.create(modelArrayBuffer);
32
- this._sr = new this.ort.Tensor("int64", [16000n]);
33
- this.resetState();
39
+ const sessionOption = {
40
+ interOpNumThreads: 1,
41
+ intraOpNumThreads: 1,
42
+ enableCpuMemArena: false
43
+ };
44
+ this._session = await this.ort.InferenceSession.create(modelArrayBuffer, sessionOption);
45
+ // Validate model inputs/outputs
46
+ const requiredInputs = ["input", "state", "sr"];
47
+ for (const name of requiredInputs) {
48
+ if (!this._session.inputNames.includes(name)) {
49
+ throw new Error(`Model is missing expected input "${name}"`);
50
+ }
51
+ }
52
+ if (!this._session.outputNames.includes("output") ||
53
+ !this._session.outputNames.includes("stateN")) {
54
+ throw new Error("Model is missing expected outputs");
55
+ }
56
+ // Use BigInt for sample rate tensor
57
+ this._sr = new this.ort.Tensor("int64", [BigInt(SAMPLE_RATE)], []);
58
+ this._state = getNewState(this.ort);
34
59
  }
35
60
  async process(audioFrame) {
36
- const t = new this.ort.Tensor("float32", audioFrame, [
61
+ const inputTensor = new this.ort.Tensor("float32", audioFrame, [
37
62
  1,
38
63
  audioFrame.length
39
64
  ]);
40
- const inputs = {
41
- input: t,
42
- h: this._h,
43
- c: this._c,
65
+ const feeds = {
66
+ input: inputTensor,
67
+ state: this._state,
44
68
  sr: this._sr
45
69
  };
46
- const out = await this._session.run(inputs);
47
- this._h = out.hn;
48
- this._c = out.cn;
70
+ const out = await this._session.run(feeds);
71
+ this._state = out.stateN;
49
72
  const [isSpeech] = out.output.data;
50
73
  const notSpeech = 1 - isSpeech;
51
74
  return { notSpeech, isSpeech };
52
75
  }
53
- resetState() {
54
- const zeroes = Array(2 * 64).fill(0);
55
- this._h = new this.ort.Tensor("float32", zeroes, [2, 1, 64]);
56
- this._c = new this.ort.Tensor("float32", zeroes, [2, 1, 64]);
57
- }
58
76
  }
59
77
  exports.SileroVadModel = SileroVadModel;
60
78
  _a = SileroVadModel;
@@ -1,3 +1,3 @@
1
1
  import { VadParams } from "./types";
2
2
  declare function createVad(params: VadParams): Promise<(chunk: Uint8Array, callback: (event: "SPEECH_START" | "SPEECH_END") => void) => Promise<void>>;
3
- export { createVad };
3
+ export { createVad, VadParams };
@@ -58,47 +58,61 @@ const ort = __importStar(require("onnxruntime-node"));
58
58
  const chunkToFloat32Array_1 = require("./chunkToFloat32Array");
59
59
  const SileroVadModel_1 = require("./SileroVadModel");
60
60
  const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
61
- const FULL_FRAME_SIZE = 1600; // Equivalent to 100ms @ 16kHz
62
- const FRAME_SIZE = 480; // Use last 30ms from the full frame for VAD processing
61
+ const FULL_FRAME_SIZE = 1024; // 64ms @ 16kHz
62
+ const BUFFER_SIZE = 512; // 32ms @ 16kHz
63
63
  async function createVad(params) {
64
64
  const { pathToModel, activationThreshold, deactivationThreshold, debounceFrames } = params;
65
- const effectivePath = pathToModel || (0, path_1.join)(__dirname, "..", "..", "silero_vad.onnx");
66
- const silero = await SileroVadModel_1.SileroVadModel.new(ort, effectivePath);
67
- let audioBuffer = [];
65
+ const effectivePath = pathToModel || (0, path_1.join)(__dirname, "..", "..", "silero_vad_v5.onnx");
66
+ const ortAdapter = {
67
+ InferenceSession: {
68
+ create: ort.InferenceSession.create.bind(ort.InferenceSession)
69
+ },
70
+ Tensor: ort.Tensor
71
+ };
72
+ const silero = await SileroVadModel_1.SileroVadModel.new(ortAdapter, effectivePath);
73
+ let sampleBuffer = [];
68
74
  let isSpeechActive = false;
69
75
  let framesSinceStateChange = 0;
76
+ // Reset internal state after a state change.
77
+ const resetState = () => {
78
+ isSpeechActive = false;
79
+ framesSinceStateChange = 0;
80
+ // Clear any pending audio samples to avoid using outdated values.
81
+ sampleBuffer = [];
82
+ silero.resetState();
83
+ logger.silly("State reset -- sampleBuffer cleared");
84
+ };
70
85
  return async function process(chunk, callback) {
86
+ // Convert the incoming chunk to normalized Float32 samples (using chunkToFloat32Array)
71
87
  const float32Array = (0, chunkToFloat32Array_1.chunkToFloat32Array)(chunk);
72
- audioBuffer.push(...float32Array);
73
- // Process full frames from the buffer
74
- while (audioBuffer.length >= FULL_FRAME_SIZE) {
75
- // Extract one full frame worth of samples
76
- const fullFrame = audioBuffer.slice(0, FULL_FRAME_SIZE);
77
- audioBuffer = audioBuffer.slice(FULL_FRAME_SIZE);
78
- // Use the last FRAME_SIZE samples from the full frame for VAD processing
79
- const frame = fullFrame.slice(fullFrame.length - FRAME_SIZE);
88
+ sampleBuffer.push(...float32Array);
89
+ // Wait until we've collected a full frame worth of samples.
90
+ while (sampleBuffer.length >= FULL_FRAME_SIZE) {
91
+ const fullFrame = sampleBuffer.slice(0, FULL_FRAME_SIZE);
92
+ sampleBuffer = sampleBuffer.slice(FULL_FRAME_SIZE);
93
+ // Use the last BUFFER_SIZE samples from the full frame.
94
+ const frame = fullFrame.slice(fullFrame.length - BUFFER_SIZE);
80
95
  const result = await silero.process(new Float32Array(frame));
81
96
  const rawScore = result.isSpeech;
82
97
  logger.silly("Frame processing", {
83
98
  rawScore,
84
99
  isSpeechActive,
85
100
  framesSinceStateChange,
86
- pendingSamples: audioBuffer.length
101
+ pendingSamples: sampleBuffer.length
87
102
  });
88
103
  framesSinceStateChange++;
89
104
  if (isSpeechActive) {
90
- // If currently in speech, check if the score has dropped below the deactivation threshold
105
+ // If already in speech, check if the score has dropped below deactivationThreshold
91
106
  if (rawScore < deactivationThreshold &&
92
107
  framesSinceStateChange >= debounceFrames) {
93
- isSpeechActive = false;
94
108
  callback("SPEECH_END");
95
- silero.resetState(); // Reset VAD state after speech ends
96
- framesSinceStateChange = 0;
109
+ resetState();
97
110
  logger.silly("Speech end detected", { rawScore });
111
+ continue;
98
112
  }
99
113
  }
100
114
  else {
101
- // If not currently in speech, check if the score exceeds the activation threshold
115
+ // If currently not speaking, check if the score is above activationThreshold
102
116
  if (rawScore > activationThreshold &&
103
117
  framesSinceStateChange >= debounceFrames) {
104
118
  isSpeechActive = true;
@@ -26,19 +26,35 @@ type VadParams = {
26
26
  deactivationThreshold: number;
27
27
  debounceFrames: number;
28
28
  };
29
- type SpeechProbabilities = {
29
+ export interface SpeechProbabilities {
30
30
  notSpeech: number;
31
31
  isSpeech: number;
32
- };
33
- type ONNXRuntimeAPI = {
32
+ }
33
+ export interface ONNXRuntimeAPI {
34
34
  InferenceSession: {
35
- create(modelArrayBuffer: ArrayBuffer): Promise<unknown>;
36
- };
37
- Tensor: {
38
- new (type: "int64", dims: [16000n]): unknown;
39
- new (type: "float32", data: number[], dims: [2, 1, 64]): unknown;
40
- new (type: "float32", data: Float32Array, dims: [1, number]): unknown;
41
- new (type: "float32", data: Float32Array, dims: [1, number]): unknown;
35
+ create: (modelPath: ArrayBuffer | string, options?: {
36
+ interOpNumThreads: number;
37
+ intraOpNumThreads: number;
38
+ enableCpuMemArena: boolean;
39
+ }) => Promise<ONNXSession>;
42
40
  };
43
- };
44
- export { ONNXRuntimeAPI, SpeechProbabilities, Vad, VadParams, VadEvent };
41
+ Tensor: new (type: string, data: Float32Array | bigint[], dims: number[]) => ONNXTensor;
42
+ }
43
+ export interface ONNXSession {
44
+ run: (feeds: {
45
+ [key: string]: ONNXTensor;
46
+ }) => Promise<{
47
+ output: {
48
+ data: Float32Array;
49
+ };
50
+ stateN: ONNXTensor;
51
+ }>;
52
+ inputNames: string[];
53
+ outputNames: string[];
54
+ }
55
+ export interface ONNXTensor {
56
+ data: Float32Array | bigint[];
57
+ dims: number[];
58
+ type: string;
59
+ }
60
+ export { Vad, VadEvent, VadParams };
package/dist/vadWorker.js CHANGED
@@ -20,18 +20,11 @@ Object.defineProperty(exports, "__esModule", { value: true });
20
20
  */
21
21
  const path_1 = require("path");
22
22
  const worker_threads_1 = require("worker_threads");
23
- const envs_1 = require("./envs");
24
23
  const SileroVad_1 = require("./vad/SileroVad");
25
- const SileroVad_2 = require("./vadv5/SileroVad");
26
- const vad = envs_1.SILERO_VAD_VERSION === "v4"
27
- ? new SileroVad_1.SileroVad({
28
- ...worker_threads_1.workerData,
29
- pathToModel: (0, path_1.join)(__dirname, "..", "silero_vad.onnx")
30
- })
31
- : new SileroVad_2.SileroVad({
32
- ...worker_threads_1.workerData,
33
- pathToModel: (0, path_1.join)(__dirname, "..", "silero_vad_v5.onnx")
34
- });
24
+ const vad = new SileroVad_1.SileroVad({
25
+ ...worker_threads_1.workerData,
26
+ pathToModel: (0, path_1.join)(__dirname, "..", "silero_vad_v5.onnx")
27
+ });
35
28
  vad.init().then(() => {
36
29
  // Send ready message to parent
37
30
  worker_threads_1.parentPort?.postMessage("VAD_READY");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fonoster/autopilot",
3
- "version": "0.9.42",
3
+ "version": "0.9.43",
4
4
  "description": "Voice AI for the Fonoster platform",
5
5
  "author": "Pedro Sanders <psanders@fonoster.com>",
6
6
  "homepage": "https://github.com/fonoster/fonoster#readme",
@@ -56,5 +56,5 @@
56
56
  "xstate": "^5.17.3",
57
57
  "zod": "^3.23.8"
58
58
  },
59
- "gitHead": "1ceb629d4fd0035bf56d24580e5c6c3d21568293"
59
+ "gitHead": "7fa2745908c3ced8a5401a48b26cf26c8fc09c2c"
60
60
  }
@@ -1,18 +0,0 @@
1
- import { Vad } from "./types";
2
- declare class SileroVad implements Vad {
3
- private vad;
4
- private readonly params;
5
- constructor(params: {
6
- pathToModel: string;
7
- activationThreshold: number;
8
- deactivationThreshold: number;
9
- debounceFrames: number;
10
- });
11
- pathToModel: string;
12
- activationThreshold: number;
13
- deactivationThreshold: number;
14
- debounceFrames: number;
15
- init(): Promise<void>;
16
- processChunk(data: Uint8Array, callback: (event: "SPEECH_START" | "SPEECH_END") => void): void;
17
- }
18
- export { SileroVad };
@@ -1,40 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.SileroVad = void 0;
4
- /**
5
- * Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
6
- * http://github.com/fonoster/fonoster
7
- *
8
- * This file is part of Fonoster
9
- *
10
- * Licensed under the MIT License (the "License");
11
- * you may not use this file except in compliance with
12
- * the License. You may obtain a copy of the License at
13
- *
14
- * https://opensource.org/licenses/MIT
15
- *
16
- * Unless required by applicable law or agreed to in writing, software
17
- * distributed under the License is distributed on an "AS IS" BASIS,
18
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
- * See the License for the specific language governing permissions and
20
- * limitations under the License.
21
- */
22
- const logger_1 = require("@fonoster/logger");
23
- const createVad_1 = require("./createVad");
24
- const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
25
- class SileroVad {
26
- constructor(params) {
27
- logger.verbose("starting instance of silero vad v5", { ...params });
28
- this.params = params;
29
- }
30
- async init() {
31
- this.vad = await (0, createVad_1.createVad)(this.params);
32
- }
33
- processChunk(data, callback) {
34
- if (!this.vad) {
35
- throw new Error("VAD not initialized");
36
- }
37
- this.vad(data, callback);
38
- }
39
- }
40
- exports.SileroVad = SileroVad;
@@ -1,14 +0,0 @@
1
- import { ONNXRuntimeAPI, SpeechProbabilities } from "./types";
2
- declare class SileroVadModel {
3
- private readonly ort;
4
- private readonly pathToModel;
5
- private _session;
6
- private _state;
7
- private _sr;
8
- constructor(ort: ONNXRuntimeAPI, pathToModel: string);
9
- static readonly new: (ort: ONNXRuntimeAPI, pathToModel: string) => Promise<SileroVadModel>;
10
- init(): Promise<void>;
11
- resetState: () => void;
12
- process(audioFrame: Float32Array): Promise<SpeechProbabilities>;
13
- }
14
- export { SileroVadModel };
@@ -1,83 +0,0 @@
1
- "use strict";
2
- var _a;
3
- Object.defineProperty(exports, "__esModule", { value: true });
4
- exports.SileroVadModel = void 0;
5
- /**
6
- * Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
7
- * http://github.com/fonoster/fonoster
8
- *
9
- * This file is part of Fonoster
10
- *
11
- * Licensed under the MIT License (the "License");
12
- * you may not use this file except in compliance with
13
- * the License. You may obtain a copy of the License at
14
- *
15
- * https://opensource.org/licenses/MIT
16
- *
17
- * Unless required by applicable law or agreed to in writing, software
18
- * distributed under the License is distributed on an "AS IS" BASIS,
19
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
- * See the License for the specific language governing permissions and
21
- * limitations under the License.
22
- */
23
- const fs_1 = require("fs");
24
- const SAMPLE_RATE = 16000;
25
- function getNewState(ortInstance) {
26
- return new ortInstance.Tensor("float32", new Float32Array(2 * 1 * 128), // Use Float32Array for consistency
27
- [2, 1, 128]);
28
- }
29
- class SileroVadModel {
30
- constructor(ort, pathToModel) {
31
- this.ort = ort;
32
- this.pathToModel = pathToModel;
33
- this.resetState = () => {
34
- this._state = getNewState(this.ort);
35
- };
36
- }
37
- async init() {
38
- const modelArrayBuffer = (0, fs_1.readFileSync)(this.pathToModel).buffer;
39
- const sessionOption = {
40
- interOpNumThreads: 1,
41
- intraOpNumThreads: 1,
42
- enableCpuMemArena: false
43
- };
44
- this._session = await this.ort.InferenceSession.create(modelArrayBuffer, sessionOption);
45
- // Validate model inputs/outputs
46
- const requiredInputs = ["input", "state", "sr"];
47
- for (const name of requiredInputs) {
48
- if (!this._session.inputNames.includes(name)) {
49
- throw new Error(`Model is missing expected input "${name}"`);
50
- }
51
- }
52
- if (!this._session.outputNames.includes("output") ||
53
- !this._session.outputNames.includes("stateN")) {
54
- throw new Error("Model is missing expected outputs");
55
- }
56
- // Use BigInt for sample rate tensor
57
- this._sr = new this.ort.Tensor("int64", [BigInt(SAMPLE_RATE)], []);
58
- this._state = getNewState(this.ort);
59
- }
60
- async process(audioFrame) {
61
- const inputTensor = new this.ort.Tensor("float32", audioFrame, [
62
- 1,
63
- audioFrame.length
64
- ]);
65
- const feeds = {
66
- input: inputTensor,
67
- state: this._state,
68
- sr: this._sr
69
- };
70
- const out = await this._session.run(feeds);
71
- this._state = out.stateN;
72
- const [isSpeech] = out.output.data;
73
- const notSpeech = 1 - isSpeech;
74
- return { notSpeech, isSpeech };
75
- }
76
- }
77
- exports.SileroVadModel = SileroVadModel;
78
- _a = SileroVadModel;
79
- SileroVadModel.new = async (ort, pathToModel) => {
80
- const model = new _a(ort, pathToModel);
81
- await model.init();
82
- return model;
83
- };
@@ -1,3 +0,0 @@
1
- import { VadParams } from "../vad/types";
2
- declare function createVad(params: VadParams): Promise<(chunk: Uint8Array, callback: (event: "SPEECH_START" | "SPEECH_END") => void) => Promise<void>>;
3
- export { createVad, VadParams };
@@ -1,126 +0,0 @@
1
- "use strict";
2
- var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
- if (k2 === undefined) k2 = k;
4
- var desc = Object.getOwnPropertyDescriptor(m, k);
5
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
- desc = { enumerable: true, get: function() { return m[k]; } };
7
- }
8
- Object.defineProperty(o, k2, desc);
9
- }) : (function(o, m, k, k2) {
10
- if (k2 === undefined) k2 = k;
11
- o[k2] = m[k];
12
- }));
13
- var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
- Object.defineProperty(o, "default", { enumerable: true, value: v });
15
- }) : function(o, v) {
16
- o["default"] = v;
17
- });
18
- var __importStar = (this && this.__importStar) || (function () {
19
- var ownKeys = function(o) {
20
- ownKeys = Object.getOwnPropertyNames || function (o) {
21
- var ar = [];
22
- for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
- return ar;
24
- };
25
- return ownKeys(o);
26
- };
27
- return function (mod) {
28
- if (mod && mod.__esModule) return mod;
29
- var result = {};
30
- if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
- __setModuleDefault(result, mod);
32
- return result;
33
- };
34
- })();
35
- Object.defineProperty(exports, "__esModule", { value: true });
36
- exports.createVad = createVad;
37
- /**
38
- * Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
39
- * http://github.com/fonoster/fonoster
40
- *
41
- * This file is part of Fonoster
42
- *
43
- * Licensed under the MIT License (the "License");
44
- * you may not use this file except in compliance with
45
- * the License. You may obtain a copy of the License at
46
- *
47
- * https://opensource.org/licenses/MIT
48
- *
49
- * Unless required by applicable law or agreed to in writing, software
50
- * distributed under the License is distributed on an "AS IS" BASIS,
51
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
52
- * See the License for the specific language governing permissions and
53
- * limitations under the License.
54
- */
55
- const path_1 = require("path");
56
- const logger_1 = require("@fonoster/logger");
57
- const ort = __importStar(require("onnxruntime-node"));
58
- const chunkToFloat32Array_1 = require("../vad/chunkToFloat32Array");
59
- const SileroVadModel_1 = require("./SileroVadModel");
60
- const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
61
- const FULL_FRAME_SIZE = 1024; // 64ms @ 16kHz
62
- const BUFFER_SIZE = 512; // 32ms @ 16kHz
63
- async function createVad(params) {
64
- const { pathToModel, activationThreshold, deactivationThreshold, debounceFrames } = params;
65
- const effectivePath = pathToModel || (0, path_1.join)(__dirname, "..", "..", "silero_vad_v5.onnx");
66
- const ortAdapter = {
67
- InferenceSession: {
68
- create: ort.InferenceSession.create.bind(ort.InferenceSession)
69
- },
70
- Tensor: ort.Tensor
71
- };
72
- const silero = await SileroVadModel_1.SileroVadModel.new(ortAdapter, effectivePath);
73
- let sampleBuffer = [];
74
- let isSpeechActive = false;
75
- let framesSinceStateChange = 0;
76
- // Reset internal state after a state change.
77
- const resetState = () => {
78
- isSpeechActive = false;
79
- framesSinceStateChange = 0;
80
- // Clear any pending audio samples to avoid using outdated values.
81
- sampleBuffer = [];
82
- silero.resetState();
83
- logger.silly("State reset -- sampleBuffer cleared");
84
- };
85
- return async function process(chunk, callback) {
86
- // Convert the incoming chunk to normalized Float32 samples (using chunkToFloat32Array)
87
- const float32Array = (0, chunkToFloat32Array_1.chunkToFloat32Array)(chunk);
88
- sampleBuffer.push(...float32Array);
89
- // Wait until we've collected a full frame worth of samples.
90
- while (sampleBuffer.length >= FULL_FRAME_SIZE) {
91
- const fullFrame = sampleBuffer.slice(0, FULL_FRAME_SIZE);
92
- sampleBuffer = sampleBuffer.slice(FULL_FRAME_SIZE);
93
- // Use the last BUFFER_SIZE samples from the full frame.
94
- const frame = fullFrame.slice(fullFrame.length - BUFFER_SIZE);
95
- const result = await silero.process(new Float32Array(frame));
96
- const rawScore = result.isSpeech;
97
- logger.silly("Frame processing", {
98
- rawScore,
99
- isSpeechActive,
100
- framesSinceStateChange,
101
- pendingSamples: sampleBuffer.length
102
- });
103
- framesSinceStateChange++;
104
- if (isSpeechActive) {
105
- // If already in speech, check if the score has dropped below deactivationThreshold
106
- if (rawScore < deactivationThreshold &&
107
- framesSinceStateChange >= debounceFrames) {
108
- callback("SPEECH_END");
109
- resetState();
110
- logger.silly("Speech end detected", { rawScore });
111
- continue;
112
- }
113
- }
114
- else {
115
- // If currently not speaking, check if the score is above activationThreshold
116
- if (rawScore > activationThreshold &&
117
- framesSinceStateChange >= debounceFrames) {
118
- isSpeechActive = true;
119
- framesSinceStateChange = 0;
120
- callback("SPEECH_START");
121
- logger.silly("Speech start detected", { rawScore });
122
- }
123
- }
124
- }
125
- };
126
- }
@@ -1,20 +0,0 @@
1
- /**
2
- * Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
3
- * http://github.com/fonoster/fonoster
4
- *
5
- * This file is part of Fonoster
6
- *
7
- * Licensed under the MIT License (the "License");
8
- * you may not use this file except in compliance with
9
- * the License. You may obtain a copy of the License at
10
- *
11
- * https://opensource.org/licenses/MIT
12
- *
13
- * Unless required by applicable law or agreed to in writing, software
14
- * distributed under the License is distributed on an "AS IS" BASIS,
15
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
- * See the License for the specific language governing permissions and
17
- * limitations under the License.
18
- */
19
- export * from "./SileroVad";
20
- export * from "./types";
@@ -1,36 +0,0 @@
1
- "use strict";
2
- var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
- if (k2 === undefined) k2 = k;
4
- var desc = Object.getOwnPropertyDescriptor(m, k);
5
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
- desc = { enumerable: true, get: function() { return m[k]; } };
7
- }
8
- Object.defineProperty(o, k2, desc);
9
- }) : (function(o, m, k, k2) {
10
- if (k2 === undefined) k2 = k;
11
- o[k2] = m[k];
12
- }));
13
- var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
- for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
- };
16
- Object.defineProperty(exports, "__esModule", { value: true });
17
- /**
18
- * Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
19
- * http://github.com/fonoster/fonoster
20
- *
21
- * This file is part of Fonoster
22
- *
23
- * Licensed under the MIT License (the "License");
24
- * you may not use this file except in compliance with
25
- * the License. You may obtain a copy of the License at
26
- *
27
- * https://opensource.org/licenses/MIT
28
- *
29
- * Unless required by applicable law or agreed to in writing, software
30
- * distributed under the License is distributed on an "AS IS" BASIS,
31
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
32
- * See the License for the specific language governing permissions and
33
- * limitations under the License.
34
- */
35
- __exportStar(require("./SileroVad"), exports);
36
- __exportStar(require("./types"), exports);
@@ -1,54 +0,0 @@
1
- /**
2
- * Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
3
- * http://github.com/fonoster/fonoster
4
- *
5
- * This file is part of Fonoster
6
- *
7
- * Licensed under the MIT License (the "License");
8
- * you may not use this file except in compliance with
9
- * the License. You may obtain a copy of the License at
10
- *
11
- * https://opensource.org/licenses/MIT
12
- *
13
- * Unless required by applicable law or agreed to in writing, software
14
- * distributed under the License is distributed on an "AS IS" BASIS,
15
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
- * See the License for the specific language governing permissions and
17
- * limitations under the License.
18
- */
19
- type VadEvent = "SPEECH_START" | "SPEECH_END";
20
- type Vad = {
21
- processChunk: (chunk: Uint8Array, callback: (event: VadEvent) => void) => void;
22
- };
23
- export interface SpeechProbabilities {
24
- notSpeech: number;
25
- isSpeech: number;
26
- }
27
- export interface ONNXRuntimeAPI {
28
- InferenceSession: {
29
- create: (modelPath: ArrayBuffer | string, options?: {
30
- interOpNumThreads: number;
31
- intraOpNumThreads: number;
32
- enableCpuMemArena: boolean;
33
- }) => Promise<ONNXSession>;
34
- };
35
- Tensor: new (type: string, data: Float32Array | bigint[], dims: number[]) => ONNXTensor;
36
- }
37
- export interface ONNXSession {
38
- run: (feeds: {
39
- [key: string]: ONNXTensor;
40
- }) => Promise<{
41
- output: {
42
- data: Float32Array;
43
- };
44
- stateN: ONNXTensor;
45
- }>;
46
- inputNames: string[];
47
- outputNames: string[];
48
- }
49
- export interface ONNXTensor {
50
- data: Float32Array | bigint[];
51
- dims: number[];
52
- type: string;
53
- }
54
- export { Vad, VadEvent };
@@ -1,2 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });