@fonoster/autopilot 0.8.39 → 0.8.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/Autopilot.js +3 -2
- package/dist/envs.d.ts +1 -1
- package/dist/envs.js +6 -2
- package/dist/machine/machine.d.ts +8 -30
- package/dist/machine/machine.js +112 -71
- package/dist/vad/SileroVad.d.ts +3 -3
- package/dist/vad/SileroVad.js +3 -3
- package/dist/vad/SileroVadModel.d.ts +8 -7
- package/dist/vad/SileroVadModel.js +12 -13
- package/dist/vad/chunkToFloat32Array.js +3 -1
- package/dist/vad/makeVad.d.ts +7 -0
- package/dist/vad/makeVad.js +104 -0
- package/dist/vad/types.d.ts +4 -7
- package/dist/vadWorker.js +7 -2
- package/dist/vadv5/SileroVad.d.ts +18 -0
- package/dist/vadv5/SileroVad.js +41 -0
- package/dist/vadv5/SileroVadModel.d.ts +14 -0
- package/dist/vadv5/SileroVadModel.js +66 -0
- package/dist/vadv5/chunkToFloat32Array.d.ts +2 -0
- package/dist/vadv5/chunkToFloat32Array.js +41 -0
- package/dist/vadv5/index.d.ts +2 -0
- package/dist/vadv5/index.js +37 -0
- package/dist/vadv5/types.d.ts +23 -0
- package/dist/vadv5/types.js +2 -0
- package/package.json +7 -7
- package/silero_vad.onnx +0 -0
- /package/dist/{vad → vadv5}/createVad.d.ts +0 -0
- /package/dist/{vad → vadv5}/createVad.js +0 -0
package/README.md
CHANGED
|
@@ -35,7 +35,7 @@ The configuration file has two major sections: `conversationSettings` and `langu
|
|
|
35
35
|
{
|
|
36
36
|
"conversationSettings": {
|
|
37
37
|
"firstMessage": "Hello, this is Olivia from Dr. Green's Family Medicine. How can I assist you today?",
|
|
38
|
-
"
|
|
38
|
+
"systemPrompt": "You are a Customer Service Representative. You are here to help the caller with their needs.",
|
|
39
39
|
"systemErrorMessage": "I'm sorry, but I seem to be having trouble. Please try again later.",
|
|
40
40
|
"initialDtmf": "6589",
|
|
41
41
|
"transferOptions": {
|
package/dist/Autopilot.js
CHANGED
|
@@ -73,6 +73,9 @@ class Autopilot {
|
|
|
73
73
|
if (event === "SPEECH_START") {
|
|
74
74
|
this.actor.send({ type: "SPEECH_START" });
|
|
75
75
|
}
|
|
76
|
+
else if (event === "SPEECH_END") {
|
|
77
|
+
this.actor.send({ type: "SPEECH_END" });
|
|
78
|
+
}
|
|
76
79
|
});
|
|
77
80
|
}
|
|
78
81
|
handleVoicePayload(chunk) {
|
|
@@ -89,8 +92,6 @@ class Autopilot {
|
|
|
89
92
|
stream.onData((speech) => {
|
|
90
93
|
logger.verbose("received speech result", { speech });
|
|
91
94
|
if (speech) {
|
|
92
|
-
// Testing using STT for both VAD and STT (experimental)
|
|
93
|
-
this.actor.send({ type: "SPEECH_END" });
|
|
94
95
|
this.actor.send({ type: "SPEECH_RESULT", speech });
|
|
95
96
|
}
|
|
96
97
|
});
|
package/dist/envs.d.ts
CHANGED
|
@@ -7,7 +7,7 @@ export declare const KNOWLEDGE_BASE_ENABLED: boolean;
|
|
|
7
7
|
export declare const NODE_ENV: string;
|
|
8
8
|
export declare const UNSTRUCTURED_API_KEY: string | undefined;
|
|
9
9
|
export declare const UNSTRUCTURED_API_URL: string;
|
|
10
|
-
export declare const
|
|
10
|
+
export declare const SILERO_VAD_VERSION: string;
|
|
11
11
|
export declare const CONVERSATION_PROVIDER: string;
|
|
12
12
|
export declare const CONVERSATION_PROVIDER_FILE: string;
|
|
13
13
|
export declare const APISERVER_ENDPOINT: string;
|
package/dist/envs.js
CHANGED
|
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
3
3
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
4
|
};
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.INTEGRATIONS_FILE = exports.APISERVER_ENDPOINT = exports.CONVERSATION_PROVIDER_FILE = exports.CONVERSATION_PROVIDER = exports.
|
|
6
|
+
exports.INTEGRATIONS_FILE = exports.APISERVER_ENDPOINT = exports.CONVERSATION_PROVIDER_FILE = exports.CONVERSATION_PROVIDER = exports.SILERO_VAD_VERSION = exports.UNSTRUCTURED_API_URL = exports.UNSTRUCTURED_API_KEY = exports.NODE_ENV = exports.KNOWLEDGE_BASE_ENABLED = exports.AWS_S3_SECRET_ACCESS_KEY = exports.AWS_S3_REGION = exports.AWS_S3_ENDPOINT = exports.AWS_S3_ACCESS_KEY_ID = exports.SKIP_IDENTITY = void 0;
|
|
7
7
|
/*
|
|
8
8
|
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
9
9
|
* http://github.com/fonoster/fonoster
|
|
@@ -39,7 +39,7 @@ exports.KNOWLEDGE_BASE_ENABLED = e.KNOWLEDGE_BASE_ENABLED === "true";
|
|
|
39
39
|
exports.NODE_ENV = e.NODE_ENV || "production";
|
|
40
40
|
exports.UNSTRUCTURED_API_KEY = e.UNSTRUCTURED_API_KEY;
|
|
41
41
|
exports.UNSTRUCTURED_API_URL = e.UNSTRUCTURED_API_URL ?? "https://api.unstructuredapp.io/general/v0/general";
|
|
42
|
-
exports.
|
|
42
|
+
exports.SILERO_VAD_VERSION = e.SILERO_VAD_VERSION ?? "v4";
|
|
43
43
|
exports.CONVERSATION_PROVIDER = e.CONVERSATION_PROVIDER
|
|
44
44
|
? e.CONVERSATION_PROVIDER
|
|
45
45
|
: types_1.ConversationProvider.FILE;
|
|
@@ -52,6 +52,10 @@ exports.APISERVER_ENDPOINT = e.APISERVER_ENDPOINT
|
|
|
52
52
|
exports.INTEGRATIONS_FILE = e.INTEGRATIONS_FILE
|
|
53
53
|
? e.INTEGRATIONS_FILE
|
|
54
54
|
: `${process.cwd()}/config/integrations.json`;
|
|
55
|
+
if (exports.SILERO_VAD_VERSION !== "v4" && exports.SILERO_VAD_VERSION !== "v5") {
|
|
56
|
+
console.error("SILERO_VAD_VERSION must be set to 'v4' or 'v5'");
|
|
57
|
+
process.exit(1);
|
|
58
|
+
}
|
|
55
59
|
if (exports.CONVERSATION_PROVIDER.toLocaleLowerCase() !== types_1.ConversationProvider.API &&
|
|
56
60
|
exports.CONVERSATION_PROVIDER.toLocaleLowerCase() !== types_1.ConversationProvider.FILE) {
|
|
57
61
|
console.error("CONVERSATION_PROVIDER must be set to 'api' or 'file'");
|
|
@@ -9,8 +9,6 @@ declare const machine: import("xstate").StateMachine<AutopilotContext, {
|
|
|
9
9
|
} | {
|
|
10
10
|
type: "SPEECH_RESULT";
|
|
11
11
|
speech: string;
|
|
12
|
-
} | {
|
|
13
|
-
type: "USER_REQUEST_PROCESSED";
|
|
14
12
|
}, {
|
|
15
13
|
[x: string]: import("xstate").ActorRefFromLogic<import("xstate").PromiseActorLogic<void, {
|
|
16
14
|
context: AutopilotContext;
|
|
@@ -66,20 +64,11 @@ declare const machine: import("xstate").StateMachine<AutopilotContext, {
|
|
|
66
64
|
type: "setSpeakingDone";
|
|
67
65
|
params: import("xstate").NonReducibleUnknown;
|
|
68
66
|
};
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
params: unknown;
|
|
73
|
-
};
|
|
74
|
-
hasSpeechResult: {
|
|
75
|
-
type: "hasSpeechResult";
|
|
76
|
-
params: unknown;
|
|
77
|
-
};
|
|
78
|
-
isNotSpeaking: {
|
|
79
|
-
type: "isNotSpeaking";
|
|
80
|
-
params: unknown;
|
|
67
|
+
resetState: {
|
|
68
|
+
type: "resetState";
|
|
69
|
+
params: import("xstate").NonReducibleUnknown;
|
|
81
70
|
};
|
|
82
|
-
}>, "IDLE_TIMEOUT" | "MAX_SPEECH_WAIT_TIMEOUT", {}, string, {
|
|
71
|
+
}>, never, "IDLE_TIMEOUT" | "MAX_SPEECH_WAIT_TIMEOUT" | "SESSION_TIMEOUT", {}, string, {
|
|
83
72
|
conversationSettings: ConversationSettings;
|
|
84
73
|
languageModel: LanguageModel;
|
|
85
74
|
voice: Voice;
|
|
@@ -90,8 +79,6 @@ declare const machine: import("xstate").StateMachine<AutopilotContext, {
|
|
|
90
79
|
} | {
|
|
91
80
|
type: "SPEECH_RESULT";
|
|
92
81
|
speech: string;
|
|
93
|
-
} | {
|
|
94
|
-
type: "USER_REQUEST_PROCESSED";
|
|
95
82
|
}, {
|
|
96
83
|
src: "doProcessUserRequest";
|
|
97
84
|
logic: import("xstate").PromiseActorLogic<void, {
|
|
@@ -143,20 +130,11 @@ declare const machine: import("xstate").StateMachine<AutopilotContext, {
|
|
|
143
130
|
type: "setSpeakingDone";
|
|
144
131
|
params: import("xstate").NonReducibleUnknown;
|
|
145
132
|
};
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
params: unknown;
|
|
150
|
-
};
|
|
151
|
-
hasSpeechResult: {
|
|
152
|
-
type: "hasSpeechResult";
|
|
153
|
-
params: unknown;
|
|
154
|
-
};
|
|
155
|
-
isNotSpeaking: {
|
|
156
|
-
type: "isNotSpeaking";
|
|
157
|
-
params: unknown;
|
|
133
|
+
resetState: {
|
|
134
|
+
type: "resetState";
|
|
135
|
+
params: import("xstate").NonReducibleUnknown;
|
|
158
136
|
};
|
|
159
|
-
}>, "IDLE_TIMEOUT" | "MAX_SPEECH_WAIT_TIMEOUT", string, {
|
|
137
|
+
}>, never, "IDLE_TIMEOUT" | "MAX_SPEECH_WAIT_TIMEOUT" | "SESSION_TIMEOUT", string, {
|
|
160
138
|
conversationSettings: ConversationSettings;
|
|
161
139
|
languageModel: LanguageModel;
|
|
162
140
|
voice: Voice;
|
package/dist/machine/machine.js
CHANGED
|
@@ -78,7 +78,10 @@ const machine = (0, xstate_1.setup)({
|
|
|
78
78
|
speech: event.speech
|
|
79
79
|
});
|
|
80
80
|
const speech = event.speech;
|
|
81
|
-
|
|
81
|
+
if (!speech) {
|
|
82
|
+
return context;
|
|
83
|
+
}
|
|
84
|
+
context.speechBuffer = ((context.speechBuffer ?? "") +
|
|
82
85
|
" " +
|
|
83
86
|
speech).trimStart();
|
|
84
87
|
return context;
|
|
@@ -103,24 +106,33 @@ const machine = (0, xstate_1.setup)({
|
|
|
103
106
|
});
|
|
104
107
|
context.isSpeaking = false;
|
|
105
108
|
return context;
|
|
109
|
+
}),
|
|
110
|
+
resetState: (0, xstate_1.assign)(({ context }) => {
|
|
111
|
+
logger.verbose("called resetState action");
|
|
112
|
+
return {
|
|
113
|
+
...context,
|
|
114
|
+
speechBuffer: "",
|
|
115
|
+
idleTimeoutCount: 0,
|
|
116
|
+
isSpeaking: false
|
|
117
|
+
};
|
|
106
118
|
})
|
|
107
119
|
},
|
|
108
120
|
guards: {
|
|
109
121
|
idleTimeoutCountExceedsMax: function ({ context }) {
|
|
110
122
|
logger.verbose("called idleTimeoutCountExceedsMax guard", {
|
|
111
|
-
idleTimeoutCount: context.idleTimeoutCount,
|
|
123
|
+
idleTimeoutCount: context.idleTimeoutCount + 1,
|
|
112
124
|
maxIdleTimeoutCount: context.maxIdleTimeoutCount
|
|
113
125
|
});
|
|
114
|
-
return context.idleTimeoutCount
|
|
126
|
+
return context.idleTimeoutCount + 1 > context.maxIdleTimeoutCount;
|
|
115
127
|
},
|
|
116
128
|
hasSpeechResult: function ({ context }) {
|
|
117
|
-
return context.speechBuffer
|
|
129
|
+
return context.speechBuffer;
|
|
118
130
|
},
|
|
119
|
-
|
|
120
|
-
logger.verbose("called
|
|
131
|
+
isSpeaking: function ({ context }) {
|
|
132
|
+
logger.verbose("called isSpeaking guard", {
|
|
121
133
|
isSpeaking: context.isSpeaking
|
|
122
134
|
});
|
|
123
|
-
return
|
|
135
|
+
return context.isSpeaking;
|
|
124
136
|
}
|
|
125
137
|
},
|
|
126
138
|
delays: {
|
|
@@ -129,6 +141,10 @@ const machine = (0, xstate_1.setup)({
|
|
|
129
141
|
},
|
|
130
142
|
MAX_SPEECH_WAIT_TIMEOUT: ({ context }) => {
|
|
131
143
|
return context.maxSpeechWaitTimeout;
|
|
144
|
+
},
|
|
145
|
+
SESSION_TIMEOUT: ({ context }) => {
|
|
146
|
+
const elapsed = Date.now() - context.sessionStartTime;
|
|
147
|
+
return Math.max(0, context.maxSessionDuration - elapsed);
|
|
132
148
|
}
|
|
133
149
|
},
|
|
134
150
|
actors: {
|
|
@@ -175,6 +191,18 @@ const machine = (0, xstate_1.setup)({
|
|
|
175
191
|
await context.voice.say(context.systemErrorMessage);
|
|
176
192
|
}
|
|
177
193
|
})
|
|
194
|
+
},
|
|
195
|
+
on: {
|
|
196
|
+
ERROR: {
|
|
197
|
+
target: "systemError",
|
|
198
|
+
actions: "logError"
|
|
199
|
+
}
|
|
200
|
+
},
|
|
201
|
+
after: {
|
|
202
|
+
SESSION_TIMEOUT: {
|
|
203
|
+
target: "hangup",
|
|
204
|
+
actions: ["goodbye"]
|
|
205
|
+
}
|
|
178
206
|
}
|
|
179
207
|
}).createMachine({
|
|
180
208
|
context: ({ input }) => ({
|
|
@@ -192,8 +220,9 @@ const machine = (0, xstate_1.setup)({
|
|
|
192
220
|
maxIdleTimeoutCount: input.conversationSettings.idleOptions?.maxTimeoutCount || 3,
|
|
193
221
|
idleTimeoutCount: 0,
|
|
194
222
|
maxSpeechWaitTimeout: input.conversationSettings.maxSpeechWaitTimeout,
|
|
195
|
-
|
|
196
|
-
|
|
223
|
+
isSpeaking: false,
|
|
224
|
+
sessionStartTime: Date.now(),
|
|
225
|
+
maxSessionDuration: input.conversationSettings.maxSessionDuration
|
|
197
226
|
}),
|
|
198
227
|
id: "fnAI",
|
|
199
228
|
initial: "greeting",
|
|
@@ -207,110 +236,103 @@ const machine = (0, xstate_1.setup)({
|
|
|
207
236
|
}
|
|
208
237
|
},
|
|
209
238
|
idle: {
|
|
239
|
+
entry: { type: "cleanSpeech" },
|
|
210
240
|
on: {
|
|
211
241
|
SPEECH_START: {
|
|
212
242
|
target: "waitingForUserRequest",
|
|
213
243
|
description: "Event from VAD system."
|
|
214
|
-
},
|
|
215
|
-
SPEECH_RESULT: {
|
|
216
|
-
target: "waitingForUserRequest",
|
|
217
|
-
description: "Event from Speech to Text provider."
|
|
218
244
|
}
|
|
219
245
|
},
|
|
220
246
|
after: {
|
|
221
247
|
IDLE_TIMEOUT: [
|
|
222
248
|
{
|
|
223
249
|
target: "hangup",
|
|
224
|
-
actions: {
|
|
225
|
-
|
|
226
|
-
},
|
|
227
|
-
guard: {
|
|
228
|
-
type: "idleTimeoutCountExceedsMax"
|
|
229
|
-
}
|
|
250
|
+
actions: { type: "goodbye" },
|
|
251
|
+
guard: (0, xstate_1.and)(["idleTimeoutCountExceedsMax", (0, xstate_1.not)("isSpeaking")])
|
|
230
252
|
},
|
|
231
253
|
{
|
|
232
|
-
target: "
|
|
254
|
+
target: "idleTransition",
|
|
255
|
+
guard: (0, xstate_1.not)("isSpeaking"),
|
|
233
256
|
actions: [
|
|
234
|
-
{
|
|
235
|
-
|
|
236
|
-
},
|
|
237
|
-
{
|
|
238
|
-
type: "announceIdleTimeout"
|
|
239
|
-
}
|
|
257
|
+
{ type: "increaseIdleTimeoutCount" },
|
|
258
|
+
{ type: "announceIdleTimeout" }
|
|
240
259
|
]
|
|
241
260
|
}
|
|
242
261
|
]
|
|
243
262
|
}
|
|
244
263
|
},
|
|
264
|
+
idleTransition: {
|
|
265
|
+
always: {
|
|
266
|
+
target: "idle"
|
|
267
|
+
}
|
|
268
|
+
},
|
|
245
269
|
waitingForUserRequest: {
|
|
246
270
|
always: {
|
|
247
271
|
target: "updatingSpeech"
|
|
248
272
|
},
|
|
249
273
|
entry: [
|
|
250
|
-
{
|
|
251
|
-
|
|
252
|
-
}
|
|
253
|
-
{
|
|
254
|
-
type: "interruptPlayback"
|
|
255
|
-
},
|
|
256
|
-
{
|
|
257
|
-
type: "resetIdleTimeoutCount"
|
|
258
|
-
},
|
|
259
|
-
{
|
|
260
|
-
type: "setSpeaking"
|
|
261
|
-
}
|
|
274
|
+
{ type: "interruptPlayback" },
|
|
275
|
+
{ type: "resetIdleTimeoutCount" },
|
|
276
|
+
{ type: "setSpeaking" }
|
|
262
277
|
]
|
|
263
278
|
},
|
|
264
279
|
hangup: {
|
|
265
280
|
type: "final"
|
|
266
281
|
},
|
|
267
|
-
transitioningToIdle: {
|
|
268
|
-
always: {
|
|
269
|
-
target: "idle"
|
|
270
|
-
}
|
|
271
|
-
},
|
|
272
282
|
updatingSpeech: {
|
|
273
283
|
on: {
|
|
274
|
-
|
|
284
|
+
SPEECH_END: [
|
|
275
285
|
{
|
|
276
286
|
target: "processingUserRequest",
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
guard: {
|
|
281
|
-
type: "isNotSpeaking"
|
|
282
|
-
},
|
|
283
|
-
description: "Speech result from the Speech to Text provider."
|
|
287
|
+
guard: "hasSpeechResult",
|
|
288
|
+
actions: [{ type: "setSpeakingDone" }],
|
|
289
|
+
description: "Process immediately if we have speech"
|
|
284
290
|
},
|
|
285
291
|
{
|
|
286
|
-
target: "
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
292
|
+
target: "waitingForSpeechTimeout",
|
|
293
|
+
guard: (0, xstate_1.not)("hasSpeechResult"),
|
|
294
|
+
actions: [{ type: "setSpeakingDone" }],
|
|
295
|
+
description: "Wait for more speech if no results yet"
|
|
290
296
|
}
|
|
291
297
|
],
|
|
292
|
-
|
|
298
|
+
SPEECH_RESULT: [
|
|
299
|
+
{
|
|
300
|
+
actions: { type: "appendSpeech" },
|
|
301
|
+
guard: "isSpeaking",
|
|
302
|
+
description: "Just append the speech result."
|
|
303
|
+
},
|
|
293
304
|
{
|
|
294
305
|
target: "processingUserRequest",
|
|
295
|
-
actions: {
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
306
|
+
actions: { type: "appendSpeech" },
|
|
307
|
+
guard: (0, xstate_1.not)("isSpeaking"),
|
|
308
|
+
description: "Append the speech result and process it."
|
|
309
|
+
}
|
|
310
|
+
]
|
|
311
|
+
}
|
|
312
|
+
},
|
|
313
|
+
waitingForSpeechTimeout: {
|
|
314
|
+
after: {
|
|
315
|
+
MAX_SPEECH_WAIT_TIMEOUT: [
|
|
316
|
+
{
|
|
317
|
+
target: "processingUserRequest",
|
|
318
|
+
guard: "hasSpeechResult"
|
|
302
319
|
},
|
|
303
320
|
{
|
|
304
|
-
target: "
|
|
305
|
-
actions: {
|
|
306
|
-
type: "setSpeakingDone"
|
|
307
|
-
}
|
|
321
|
+
target: "idle"
|
|
308
322
|
}
|
|
309
323
|
]
|
|
310
324
|
},
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
target: "
|
|
325
|
+
on: {
|
|
326
|
+
SPEECH_START: {
|
|
327
|
+
target: "waitingForUserRequest",
|
|
328
|
+
description: "User started speaking again"
|
|
329
|
+
},
|
|
330
|
+
SPEECH_RESULT: {
|
|
331
|
+
target: "processingUserRequest",
|
|
332
|
+
actions: {
|
|
333
|
+
type: "appendSpeech"
|
|
334
|
+
},
|
|
335
|
+
description: "Append final speech and process the request"
|
|
314
336
|
}
|
|
315
337
|
}
|
|
316
338
|
},
|
|
@@ -318,7 +340,17 @@ const machine = (0, xstate_1.setup)({
|
|
|
318
340
|
on: {
|
|
319
341
|
SPEECH_START: {
|
|
320
342
|
target: "waitingForUserRequest",
|
|
321
|
-
description: "Event from VAD or similar system."
|
|
343
|
+
description: "Event from VAD or similar system.",
|
|
344
|
+
actions: [{ type: "interruptPlayback" }, { type: "cleanSpeech" }]
|
|
345
|
+
},
|
|
346
|
+
SPEECH_RESULT: {
|
|
347
|
+
target: "waitingForUserRequest",
|
|
348
|
+
description: "User interrupted with new speech",
|
|
349
|
+
actions: [
|
|
350
|
+
{ type: "interruptPlayback" },
|
|
351
|
+
{ type: "cleanSpeech" },
|
|
352
|
+
{ type: "appendSpeech" }
|
|
353
|
+
]
|
|
322
354
|
}
|
|
323
355
|
},
|
|
324
356
|
invoke: {
|
|
@@ -328,6 +360,15 @@ const machine = (0, xstate_1.setup)({
|
|
|
328
360
|
target: "idle"
|
|
329
361
|
}
|
|
330
362
|
}
|
|
363
|
+
},
|
|
364
|
+
systemError: {
|
|
365
|
+
entry: "announceSystemError",
|
|
366
|
+
after: {
|
|
367
|
+
SYSTEM_ERROR_RECOVERY_TIMEOUT: {
|
|
368
|
+
target: "idle",
|
|
369
|
+
actions: "resetState"
|
|
370
|
+
}
|
|
371
|
+
}
|
|
331
372
|
}
|
|
332
373
|
}
|
|
333
374
|
});
|
package/dist/vad/SileroVad.d.ts
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
import { Vad } from "./types";
|
|
2
2
|
declare class SileroVad implements Vad {
|
|
3
3
|
private vad;
|
|
4
|
-
private
|
|
4
|
+
private params;
|
|
5
5
|
constructor(params: {
|
|
6
|
-
pathToModel
|
|
6
|
+
pathToModel?: string;
|
|
7
7
|
activationThreshold: number;
|
|
8
8
|
deactivationThreshold: number;
|
|
9
9
|
debounceFrames: number;
|
|
10
10
|
});
|
|
11
|
-
pathToModel
|
|
11
|
+
pathToModel?: string;
|
|
12
12
|
activationThreshold: number;
|
|
13
13
|
deactivationThreshold: number;
|
|
14
14
|
debounceFrames: number;
|
package/dist/vad/SileroVad.js
CHANGED
|
@@ -21,15 +21,15 @@ exports.SileroVad = void 0;
|
|
|
21
21
|
* limitations under the License.
|
|
22
22
|
*/
|
|
23
23
|
const logger_1 = require("@fonoster/logger");
|
|
24
|
-
const
|
|
24
|
+
const makeVad_1 = require("./makeVad");
|
|
25
25
|
const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
|
|
26
26
|
class SileroVad {
|
|
27
27
|
constructor(params) {
|
|
28
|
-
logger.verbose("starting instance of silero vad", { ...params });
|
|
28
|
+
logger.verbose("starting instance of silero vad v4", { ...params });
|
|
29
29
|
this.params = params;
|
|
30
30
|
}
|
|
31
31
|
async init() {
|
|
32
|
-
this.vad = await (0,
|
|
32
|
+
this.vad = await (0, makeVad_1.makeVad)(this.params);
|
|
33
33
|
}
|
|
34
34
|
processChunk(data, callback) {
|
|
35
35
|
if (!this.vad) {
|
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
import { ONNXRuntimeAPI, SpeechProbabilities } from "./types";
|
|
2
2
|
declare class SileroVadModel {
|
|
3
|
-
private
|
|
4
|
-
private
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
3
|
+
private ort;
|
|
4
|
+
private pathToModel;
|
|
5
|
+
_session: any;
|
|
6
|
+
_h: unknown;
|
|
7
|
+
_c: unknown;
|
|
8
|
+
_sr: unknown;
|
|
8
9
|
constructor(ort: ONNXRuntimeAPI, pathToModel: string);
|
|
9
|
-
static
|
|
10
|
+
static new: (ort: ONNXRuntimeAPI, pathToModel: string) => Promise<SileroVadModel>;
|
|
10
11
|
init(): Promise<void>;
|
|
11
|
-
resetState: () => void;
|
|
12
12
|
process(audioFrame: Float32Array): Promise<SpeechProbabilities>;
|
|
13
|
+
resetState(): void;
|
|
13
14
|
}
|
|
14
15
|
export { SileroVadModel };
|
|
@@ -21,24 +21,16 @@ exports.SileroVadModel = void 0;
|
|
|
21
21
|
* limitations under the License.
|
|
22
22
|
*/
|
|
23
23
|
const fs_1 = require("fs");
|
|
24
|
-
function getNewState(ortInstance) {
|
|
25
|
-
const zeroes = Array(2 * 128).fill(0);
|
|
26
|
-
return new ortInstance.Tensor("float32", zeroes, [2, 1, 128]);
|
|
27
|
-
}
|
|
28
24
|
class SileroVadModel {
|
|
29
25
|
constructor(ort, pathToModel) {
|
|
30
26
|
this.ort = ort;
|
|
31
27
|
this.pathToModel = pathToModel;
|
|
32
|
-
this.resetState = () => {
|
|
33
|
-
this._state = getNewState(this.ort);
|
|
34
|
-
};
|
|
35
28
|
}
|
|
36
29
|
async init() {
|
|
37
30
|
const modelArrayBuffer = (0, fs_1.readFileSync)(this.pathToModel).buffer;
|
|
38
|
-
|
|
39
|
-
this._session = await this.ort.InferenceSession.create(modelArrayBuffer, sessionOption);
|
|
31
|
+
this._session = await this.ort.InferenceSession.create(modelArrayBuffer);
|
|
40
32
|
this._sr = new this.ort.Tensor("int64", [16000n]);
|
|
41
|
-
this.
|
|
33
|
+
this.resetState();
|
|
42
34
|
}
|
|
43
35
|
async process(audioFrame) {
|
|
44
36
|
const t = new this.ort.Tensor("float32", audioFrame, [
|
|
@@ -47,15 +39,22 @@ class SileroVadModel {
|
|
|
47
39
|
]);
|
|
48
40
|
const inputs = {
|
|
49
41
|
input: t,
|
|
50
|
-
|
|
42
|
+
h: this._h,
|
|
43
|
+
c: this._c,
|
|
51
44
|
sr: this._sr
|
|
52
45
|
};
|
|
53
46
|
const out = await this._session.run(inputs);
|
|
54
|
-
this.
|
|
55
|
-
|
|
47
|
+
this._h = out.hn;
|
|
48
|
+
this._c = out.cn;
|
|
49
|
+
const [isSpeech] = out.output.data;
|
|
56
50
|
const notSpeech = 1 - isSpeech;
|
|
57
51
|
return { notSpeech, isSpeech };
|
|
58
52
|
}
|
|
53
|
+
resetState() {
|
|
54
|
+
const zeroes = Array(2 * 64).fill(0);
|
|
55
|
+
this._h = new this.ort.Tensor("float32", zeroes, [2, 1, 64]);
|
|
56
|
+
this._c = new this.ort.Tensor("float32", zeroes, [2, 1, 64]);
|
|
57
|
+
}
|
|
59
58
|
}
|
|
60
59
|
exports.SileroVadModel = SileroVadModel;
|
|
61
60
|
_a = SileroVadModel;
|
|
@@ -24,12 +24,14 @@ exports.chunkToFloat32Array = chunkToFloat32Array;
|
|
|
24
24
|
//
|
|
25
25
|
// Q. Would it be the same if we just created a new Uint8Array from the chunk?
|
|
26
26
|
function chunkToFloat32Array(chunk) {
|
|
27
|
-
|
|
27
|
+
// Check if byteOffset is not aligned
|
|
28
28
|
const alignedByteOffset = chunk.byteOffset % Int16Array.BYTES_PER_ELEMENT === 0;
|
|
29
|
+
let int16Array;
|
|
29
30
|
if (alignedByteOffset) {
|
|
30
31
|
int16Array = new Int16Array(chunk.buffer, chunk.byteOffset, chunk.byteLength / Int16Array.BYTES_PER_ELEMENT);
|
|
31
32
|
}
|
|
32
33
|
else {
|
|
34
|
+
// Create a new aligned Uint8Array and then an Int16Array from it
|
|
33
35
|
const alignedChunk = new Uint8Array(chunk);
|
|
34
36
|
int16Array = new Int16Array(alignedChunk.buffer, alignedChunk.byteOffset, alignedChunk.byteLength / Int16Array.BYTES_PER_ELEMENT);
|
|
35
37
|
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
declare function makeVad(params: {
|
|
2
|
+
pathToModel?: string;
|
|
3
|
+
activationThreshold: number;
|
|
4
|
+
deactivationThreshold: number;
|
|
5
|
+
debounceFrames: number;
|
|
6
|
+
}): Promise<(chunk: Uint8Array, callback: (event: "SPEECH_START" | "SPEECH_END") => void) => Promise<void>>;
|
|
7
|
+
export { makeVad };
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.makeVad = makeVad;
|
|
37
|
+
/* eslint-disable no-loops/no-loops */
|
|
38
|
+
/*
|
|
39
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
40
|
+
* http://github.com/fonoster/fonoster
|
|
41
|
+
*
|
|
42
|
+
* This file is part of Fonoster
|
|
43
|
+
*
|
|
44
|
+
* Licensed under the MIT License (the "License");
|
|
45
|
+
* you may not use this file except in compliance with
|
|
46
|
+
* the License. You may obtain a copy of the License at
|
|
47
|
+
*
|
|
48
|
+
* https://opensource.org/licenses/MIT
|
|
49
|
+
*
|
|
50
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
51
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
52
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
53
|
+
* See the License for the specific language governing permissions and
|
|
54
|
+
* limitations under the License.
|
|
55
|
+
*/
|
|
56
|
+
const path_1 = require("path");
|
|
57
|
+
const logger_1 = require("@fonoster/logger");
|
|
58
|
+
const ort = __importStar(require("onnxruntime-node"));
|
|
59
|
+
const chunkToFloat32Array_1 = require("./chunkToFloat32Array");
|
|
60
|
+
const SileroVadModel_1 = require("./SileroVadModel");
|
|
61
|
+
const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
|
|
62
|
+
const BUFFER_SIZE = 16000;
|
|
63
|
+
async function makeVad(params) {
|
|
64
|
+
const { pathToModel, activationThreshold, deactivationThreshold, debounceFrames } = params;
|
|
65
|
+
const effectivePath = pathToModel || (0, path_1.join)(__dirname, "..", "..", "silero_vad.onnx");
|
|
66
|
+
const silero = await SileroVadModel_1.SileroVadModel.new(ort, effectivePath);
|
|
67
|
+
let audioBuffer = [];
|
|
68
|
+
let isSpeechActive = false;
|
|
69
|
+
let consecutiveSpeechFrames = 0;
|
|
70
|
+
let consecutiveNonSpeechFrames = 0;
|
|
71
|
+
return async function process(chunk, callback) {
|
|
72
|
+
const float32Array = (0, chunkToFloat32Array_1.chunkToFloat32Array)(chunk);
|
|
73
|
+
audioBuffer.push(...float32Array);
|
|
74
|
+
const processBuffer = async (buffer) => {
|
|
75
|
+
if (buffer.length < BUFFER_SIZE)
|
|
76
|
+
return buffer;
|
|
77
|
+
const audioFrame = buffer.slice(0, BUFFER_SIZE);
|
|
78
|
+
const remainingBuffer = buffer.slice(BUFFER_SIZE);
|
|
79
|
+
const result = await silero.process(new Float32Array(audioFrame));
|
|
80
|
+
logger.silly("last vad result", { ...result });
|
|
81
|
+
if (result.isSpeech > activationThreshold) {
|
|
82
|
+
consecutiveNonSpeechFrames = 0; // Reset non-speech counter
|
|
83
|
+
consecutiveSpeechFrames++;
|
|
84
|
+
if (consecutiveSpeechFrames >= debounceFrames && !isSpeechActive) {
|
|
85
|
+
isSpeechActive = true;
|
|
86
|
+
callback("SPEECH_START");
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
consecutiveSpeechFrames = 0; // Reset speech counter
|
|
91
|
+
consecutiveNonSpeechFrames++;
|
|
92
|
+
if (consecutiveNonSpeechFrames >= debounceFrames &&
|
|
93
|
+
isSpeechActive &&
|
|
94
|
+
result.isSpeech < deactivationThreshold) {
|
|
95
|
+
isSpeechActive = false;
|
|
96
|
+
callback("SPEECH_END");
|
|
97
|
+
silero.resetState(); // Reset VAD state after speech ends
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
return processBuffer(remainingBuffer);
|
|
101
|
+
};
|
|
102
|
+
audioBuffer = await processBuffer(audioBuffer);
|
|
103
|
+
};
|
|
104
|
+
}
|
package/dist/vad/types.d.ts
CHANGED
|
@@ -8,15 +8,12 @@ type SpeechProbabilities = {
|
|
|
8
8
|
};
|
|
9
9
|
type ONNXRuntimeAPI = {
|
|
10
10
|
InferenceSession: {
|
|
11
|
-
create(modelArrayBuffer: ArrayBuffer
|
|
12
|
-
interOpNumThreads: number;
|
|
13
|
-
intraOpNumThreads: number;
|
|
14
|
-
}): Promise<unknown>;
|
|
11
|
+
create(modelArrayBuffer: ArrayBuffer): Promise<unknown>;
|
|
15
12
|
};
|
|
16
13
|
Tensor: {
|
|
17
|
-
new (type: "int64",
|
|
18
|
-
new (type: "
|
|
19
|
-
new (type: "float32", data: Float32Array
|
|
14
|
+
new (type: "int64", dims: [16000n]): unknown;
|
|
15
|
+
new (type: "float32", data: number[], dims: [2, 1, 64]): unknown;
|
|
16
|
+
new (type: "float32", data: Float32Array, dims: [1, number]): unknown;
|
|
20
17
|
new (type: "float32", data: Float32Array, dims: [1, number]): unknown;
|
|
21
18
|
};
|
|
22
19
|
};
|
package/dist/vadWorker.js
CHANGED
|
@@ -20,10 +20,15 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
20
20
|
*/
|
|
21
21
|
const worker_threads_1 = require("worker_threads");
|
|
22
22
|
const SileroVad_1 = require("./vad/SileroVad");
|
|
23
|
+
const SileroVad_2 = require("./vadv5/SileroVad");
|
|
23
24
|
const envs_1 = require("./envs");
|
|
24
|
-
const
|
|
25
|
+
const path_1 = require("path");
|
|
26
|
+
const vad = envs_1.SILERO_VAD_VERSION === "v4" ? new SileroVad_1.SileroVad({
|
|
25
27
|
...worker_threads_1.workerData,
|
|
26
|
-
pathToModel:
|
|
28
|
+
pathToModel: (0, path_1.join)(__dirname, "..", "silero_vad.onnx")
|
|
29
|
+
}) : new SileroVad_2.SileroVad({
|
|
30
|
+
...worker_threads_1.workerData,
|
|
31
|
+
pathToModel: (0, path_1.join)(__dirname, "..", "silero_vad_v5.onnx")
|
|
27
32
|
});
|
|
28
33
|
vad.init().then(() => {
|
|
29
34
|
worker_threads_1.parentPort?.on("message", (chunk) => {
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { Vad } from "./types";
|
|
2
|
+
declare class SileroVad implements Vad {
|
|
3
|
+
private vad;
|
|
4
|
+
private readonly params;
|
|
5
|
+
constructor(params: {
|
|
6
|
+
pathToModel: string;
|
|
7
|
+
activationThreshold: number;
|
|
8
|
+
deactivationThreshold: number;
|
|
9
|
+
debounceFrames: number;
|
|
10
|
+
});
|
|
11
|
+
pathToModel: string;
|
|
12
|
+
activationThreshold: number;
|
|
13
|
+
deactivationThreshold: number;
|
|
14
|
+
debounceFrames: number;
|
|
15
|
+
init(): Promise<void>;
|
|
16
|
+
processChunk(data: Uint8Array, callback: (event: "SPEECH_START" | "SPEECH_END") => void): void;
|
|
17
|
+
}
|
|
18
|
+
export { SileroVad };
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.SileroVad = void 0;
|
|
4
|
+
/* eslint-disable no-loops/no-loops */
|
|
5
|
+
/*
|
|
6
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
7
|
+
* http://github.com/fonoster/fonoster
|
|
8
|
+
*
|
|
9
|
+
* This file is part of Fonoster
|
|
10
|
+
*
|
|
11
|
+
* Licensed under the MIT License (the "License");
|
|
12
|
+
* you may not use this file except in compliance with
|
|
13
|
+
* the License. You may obtain a copy of the License at
|
|
14
|
+
*
|
|
15
|
+
* https://opensource.org/licenses/MIT
|
|
16
|
+
*
|
|
17
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
18
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
19
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
20
|
+
* See the License for the specific language governing permissions and
|
|
21
|
+
* limitations under the License.
|
|
22
|
+
*/
|
|
23
|
+
const logger_1 = require("@fonoster/logger");
|
|
24
|
+
const createVad_1 = require("./createVad");
|
|
25
|
+
const logger = (0, logger_1.getLogger)({ service: "autopilot", filePath: __filename });
|
|
26
|
+
class SileroVad {
|
|
27
|
+
constructor(params) {
|
|
28
|
+
logger.verbose("starting instance of silero vad v5", { ...params });
|
|
29
|
+
this.params = params;
|
|
30
|
+
}
|
|
31
|
+
async init() {
|
|
32
|
+
this.vad = await (0, createVad_1.createVad)(this.params);
|
|
33
|
+
}
|
|
34
|
+
processChunk(data, callback) {
|
|
35
|
+
if (!this.vad) {
|
|
36
|
+
throw new Error("VAD not initialized)");
|
|
37
|
+
}
|
|
38
|
+
this.vad(data, callback);
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
exports.SileroVad = SileroVad;
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { ONNXRuntimeAPI, SpeechProbabilities } from "./types";
|
|
2
|
+
declare class SileroVadModel {
|
|
3
|
+
private readonly ort;
|
|
4
|
+
private readonly pathToModel;
|
|
5
|
+
private _session;
|
|
6
|
+
private _state;
|
|
7
|
+
private _sr;
|
|
8
|
+
constructor(ort: ONNXRuntimeAPI, pathToModel: string);
|
|
9
|
+
static readonly new: (ort: ONNXRuntimeAPI, pathToModel: string) => Promise<SileroVadModel>;
|
|
10
|
+
init(): Promise<void>;
|
|
11
|
+
resetState: () => void;
|
|
12
|
+
process(audioFrame: Float32Array): Promise<SpeechProbabilities>;
|
|
13
|
+
}
|
|
14
|
+
export { SileroVadModel };
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var _a;
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
exports.SileroVadModel = void 0;
|
|
5
|
+
/*
|
|
6
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
7
|
+
* http://github.com/fonoster/fonoster
|
|
8
|
+
*
|
|
9
|
+
* This file is part of Fonoster
|
|
10
|
+
*
|
|
11
|
+
* Licensed under the MIT License (the "License");
|
|
12
|
+
* you may not use this file except in compliance with
|
|
13
|
+
* the License. You may obtain a copy of the License at
|
|
14
|
+
*
|
|
15
|
+
* https://opensource.org/licenses/MIT
|
|
16
|
+
*
|
|
17
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
18
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
19
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
20
|
+
* See the License for the specific language governing permissions and
|
|
21
|
+
* limitations under the License.
|
|
22
|
+
*/
|
|
23
|
+
const fs_1 = require("fs");
|
|
24
|
+
function getNewState(ortInstance) {
|
|
25
|
+
const zeroes = Array(2 * 128).fill(0);
|
|
26
|
+
return new ortInstance.Tensor("float32", zeroes, [2, 1, 128]);
|
|
27
|
+
}
|
|
28
|
+
class SileroVadModel {
|
|
29
|
+
constructor(ort, pathToModel) {
|
|
30
|
+
this.ort = ort;
|
|
31
|
+
this.pathToModel = pathToModel;
|
|
32
|
+
this.resetState = () => {
|
|
33
|
+
this._state = getNewState(this.ort);
|
|
34
|
+
};
|
|
35
|
+
}
|
|
36
|
+
async init() {
|
|
37
|
+
const modelArrayBuffer = (0, fs_1.readFileSync)(this.pathToModel).buffer;
|
|
38
|
+
const sessionOption = { interOpNumThreads: 1, intraOpNumThreads: 1 };
|
|
39
|
+
this._session = await this.ort.InferenceSession.create(modelArrayBuffer, sessionOption);
|
|
40
|
+
this._sr = new this.ort.Tensor("int64", [16000n]);
|
|
41
|
+
this._state = getNewState(this.ort);
|
|
42
|
+
}
|
|
43
|
+
async process(audioFrame) {
|
|
44
|
+
const t = new this.ort.Tensor("float32", audioFrame, [
|
|
45
|
+
1,
|
|
46
|
+
audioFrame.length
|
|
47
|
+
]);
|
|
48
|
+
const inputs = {
|
|
49
|
+
input: t,
|
|
50
|
+
state: this._state,
|
|
51
|
+
sr: this._sr
|
|
52
|
+
};
|
|
53
|
+
const out = await this._session.run(inputs);
|
|
54
|
+
this._state = out["stateN"];
|
|
55
|
+
const [isSpeech] = out["output"].data;
|
|
56
|
+
const notSpeech = 1 - isSpeech;
|
|
57
|
+
return { notSpeech, isSpeech };
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
exports.SileroVadModel = SileroVadModel;
|
|
61
|
+
_a = SileroVadModel;
|
|
62
|
+
SileroVadModel.new = async (ort, pathToModel) => {
|
|
63
|
+
const model = new _a(ort, pathToModel);
|
|
64
|
+
await model.init();
|
|
65
|
+
return model;
|
|
66
|
+
};
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/*
|
|
3
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
4
|
+
* http://github.com/fonoster/fonoster
|
|
5
|
+
*
|
|
6
|
+
* This file is part of Fonoster
|
|
7
|
+
*
|
|
8
|
+
* Licensed under the MIT License (the "License");
|
|
9
|
+
* you may not use this file except in compliance with
|
|
10
|
+
* the License. You may obtain a copy of the License at
|
|
11
|
+
*
|
|
12
|
+
* https://opensource.org/licenses/MIT
|
|
13
|
+
*
|
|
14
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
15
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
16
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
17
|
+
* See the License for the specific language governing permissions and
|
|
18
|
+
* limitations under the License.
|
|
19
|
+
*/
|
|
20
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
21
|
+
exports.chunkToFloat32Array = chunkToFloat32Array;
|
|
22
|
+
// This version of the chunkToFloat32Array accounts for the case where
|
|
23
|
+
// the byteOffset is misaligned.
|
|
24
|
+
//
|
|
25
|
+
// Q. Would it be the same if we just created a new Uint8Array from the chunk?
|
|
26
|
+
function chunkToFloat32Array(chunk) {
|
|
27
|
+
let int16Array;
|
|
28
|
+
const alignedByteOffset = chunk.byteOffset % Int16Array.BYTES_PER_ELEMENT === 0;
|
|
29
|
+
if (alignedByteOffset) {
|
|
30
|
+
int16Array = new Int16Array(chunk.buffer, chunk.byteOffset, chunk.byteLength / Int16Array.BYTES_PER_ELEMENT);
|
|
31
|
+
}
|
|
32
|
+
else {
|
|
33
|
+
const alignedChunk = new Uint8Array(chunk);
|
|
34
|
+
int16Array = new Int16Array(alignedChunk.buffer, alignedChunk.byteOffset, alignedChunk.byteLength / Int16Array.BYTES_PER_ELEMENT);
|
|
35
|
+
}
|
|
36
|
+
const floatArray = new Float32Array(int16Array.length);
|
|
37
|
+
for (let i = 0; i < int16Array.length; i++) {
|
|
38
|
+
floatArray[i] = int16Array[i] / 32768.0;
|
|
39
|
+
}
|
|
40
|
+
return floatArray;
|
|
41
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
/* eslint-disable no-loops/no-loops */
|
|
18
|
+
/*
|
|
19
|
+
* Copyright (C) 2025 by Fonoster Inc (https://fonoster.com)
|
|
20
|
+
* http://github.com/fonoster/fonoster
|
|
21
|
+
*
|
|
22
|
+
* This file is part of Fonoster
|
|
23
|
+
*
|
|
24
|
+
* Licensed under the MIT License (the "License");
|
|
25
|
+
* you may not use this file except in compliance with
|
|
26
|
+
* the License. You may obtain a copy of the License at
|
|
27
|
+
*
|
|
28
|
+
* https://opensource.org/licenses/MIT
|
|
29
|
+
*
|
|
30
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
31
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
32
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
33
|
+
* See the License for the specific language governing permissions and
|
|
34
|
+
* limitations under the License.
|
|
35
|
+
*/
|
|
36
|
+
__exportStar(require("./SileroVad"), exports);
|
|
37
|
+
__exportStar(require("./types"), exports);
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
type VadEvent = "SPEECH_START" | "SPEECH_END";
|
|
2
|
+
type Vad = {
|
|
3
|
+
processChunk: (chunk: Uint8Array, callback: (event: VadEvent) => void) => void;
|
|
4
|
+
};
|
|
5
|
+
type SpeechProbabilities = {
|
|
6
|
+
notSpeech: number;
|
|
7
|
+
isSpeech: number;
|
|
8
|
+
};
|
|
9
|
+
type ONNXRuntimeAPI = {
|
|
10
|
+
InferenceSession: {
|
|
11
|
+
create(modelArrayBuffer: ArrayBuffer, sessionOption: {
|
|
12
|
+
interOpNumThreads: number;
|
|
13
|
+
intraOpNumThreads: number;
|
|
14
|
+
}): Promise<unknown>;
|
|
15
|
+
};
|
|
16
|
+
Tensor: {
|
|
17
|
+
new (type: "int64", data: BigInt[]): unknown;
|
|
18
|
+
new (type: "int64", data: BigInt[], dims: [1]): unknown;
|
|
19
|
+
new (type: "float32", data: Float32Array | number[], dims: [2, 1, 128]): unknown;
|
|
20
|
+
new (type: "float32", data: Float32Array, dims: [1, number]): unknown;
|
|
21
|
+
};
|
|
22
|
+
};
|
|
23
|
+
export { ONNXRuntimeAPI, SpeechProbabilities, Vad, VadEvent };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fonoster/autopilot",
|
|
3
|
-
"version": "0.8.
|
|
3
|
+
"version": "0.8.41",
|
|
4
4
|
"description": "Voice AI for the Fonoster platform",
|
|
5
5
|
"author": "Pedro Sanders <psanders@fonoster.com>",
|
|
6
6
|
"homepage": "https://github.com/fonoster/fonoster#readme",
|
|
@@ -33,11 +33,11 @@
|
|
|
33
33
|
},
|
|
34
34
|
"dependencies": {
|
|
35
35
|
"@aws-sdk/client-s3": "^3.712.0",
|
|
36
|
-
"@fonoster/common": "^0.8.
|
|
37
|
-
"@fonoster/logger": "^0.8.
|
|
38
|
-
"@fonoster/sdk": "^0.8.
|
|
39
|
-
"@fonoster/types": "^0.8.
|
|
40
|
-
"@fonoster/voice": "^0.8.
|
|
36
|
+
"@fonoster/common": "^0.8.41",
|
|
37
|
+
"@fonoster/logger": "^0.8.41",
|
|
38
|
+
"@fonoster/sdk": "^0.8.41",
|
|
39
|
+
"@fonoster/types": "^0.8.41",
|
|
40
|
+
"@fonoster/voice": "^0.8.41",
|
|
41
41
|
"@langchain/community": "^0.3.19",
|
|
42
42
|
"@langchain/core": "^0.3.23",
|
|
43
43
|
"@langchain/groq": "^0.1.2",
|
|
@@ -55,5 +55,5 @@
|
|
|
55
55
|
"devDependencies": {
|
|
56
56
|
"typescript": "^5.5.4"
|
|
57
57
|
},
|
|
58
|
-
"gitHead": "
|
|
58
|
+
"gitHead": "9aeac381dc6e2864d81b3fcc8dfe6f067910516f"
|
|
59
59
|
}
|
package/silero_vad.onnx
CHANGED
|
Binary file
|
|
File without changes
|
|
File without changes
|