@livekit/agents 1.0.5 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +3 -0
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +2 -1
- package/dist/index.d.ts +2 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -0
- package/dist/index.js.map +1 -1
- package/dist/inference/api_protos.cjs +104 -0
- package/dist/inference/api_protos.cjs.map +1 -0
- package/dist/inference/api_protos.d.cts +222 -0
- package/dist/inference/api_protos.d.ts +222 -0
- package/dist/inference/api_protos.d.ts.map +1 -0
- package/dist/inference/api_protos.js +70 -0
- package/dist/inference/api_protos.js.map +1 -0
- package/dist/inference/index.cjs +56 -0
- package/dist/inference/index.cjs.map +1 -0
- package/dist/inference/index.d.cts +9 -0
- package/dist/inference/index.d.ts +9 -0
- package/dist/inference/index.d.ts.map +1 -0
- package/dist/inference/index.js +16 -0
- package/dist/inference/index.js.map +1 -0
- package/dist/inference/llm.cjs +315 -0
- package/dist/inference/llm.cjs.map +1 -0
- package/dist/inference/llm.d.cts +92 -0
- package/dist/inference/llm.d.ts +92 -0
- package/dist/inference/llm.d.ts.map +1 -0
- package/dist/inference/llm.js +286 -0
- package/dist/inference/llm.js.map +1 -0
- package/dist/inference/stt.cjs +305 -0
- package/dist/inference/stt.cjs.map +1 -0
- package/dist/inference/stt.d.cts +79 -0
- package/dist/inference/stt.d.ts +79 -0
- package/dist/inference/stt.d.ts.map +1 -0
- package/dist/inference/stt.js +284 -0
- package/dist/inference/stt.js.map +1 -0
- package/dist/inference/tts.cjs +317 -0
- package/dist/inference/tts.cjs.map +1 -0
- package/dist/inference/tts.d.cts +75 -0
- package/dist/inference/tts.d.ts +75 -0
- package/dist/inference/tts.d.ts.map +1 -0
- package/dist/inference/tts.js +299 -0
- package/dist/inference/tts.js.map +1 -0
- package/dist/inference/utils.cjs +76 -0
- package/dist/inference/utils.cjs.map +1 -0
- package/dist/inference/utils.d.cts +5 -0
- package/dist/inference/utils.d.ts +5 -0
- package/dist/inference/utils.d.ts.map +1 -0
- package/dist/inference/utils.js +51 -0
- package/dist/inference/utils.js.map +1 -0
- package/dist/tts/tts.cjs +1 -1
- package/dist/tts/tts.cjs.map +1 -1
- package/dist/tts/tts.js +1 -1
- package/dist/tts/tts.js.map +1 -1
- package/dist/utils.cjs +11 -0
- package/dist/utils.cjs.map +1 -1
- package/dist/utils.d.cts +1 -0
- package/dist/utils.d.ts +1 -0
- package/dist/utils.d.ts.map +1 -1
- package/dist/utils.js +10 -0
- package/dist/utils.js.map +1 -1
- package/dist/voice/agent.cjs +16 -3
- package/dist/voice/agent.cjs.map +1 -1
- package/dist/voice/agent.d.cts +4 -3
- package/dist/voice/agent.d.ts +4 -3
- package/dist/voice/agent.d.ts.map +1 -1
- package/dist/voice/agent.js +20 -3
- package/dist/voice/agent.js.map +1 -1
- package/dist/voice/agent_session.cjs +16 -3
- package/dist/voice/agent_session.cjs.map +1 -1
- package/dist/voice/agent_session.d.cts +4 -3
- package/dist/voice/agent_session.d.ts +4 -3
- package/dist/voice/agent_session.d.ts.map +1 -1
- package/dist/voice/agent_session.js +20 -3
- package/dist/voice/agent_session.js.map +1 -1
- package/dist/voice/room_io/_input.cjs +9 -0
- package/dist/voice/room_io/_input.cjs.map +1 -1
- package/dist/voice/room_io/_input.d.ts.map +1 -1
- package/dist/voice/room_io/_input.js +10 -0
- package/dist/voice/room_io/_input.js.map +1 -1
- package/dist/worker.cjs.map +1 -1
- package/dist/worker.d.ts.map +1 -1
- package/dist/worker.js +1 -1
- package/dist/worker.js.map +1 -1
- package/package.json +3 -2
- package/src/index.ts +2 -1
- package/src/inference/api_protos.ts +82 -0
- package/src/inference/index.ts +12 -0
- package/src/inference/llm.ts +485 -0
- package/src/inference/stt.ts +414 -0
- package/src/inference/tts.ts +421 -0
- package/src/inference/utils.ts +66 -0
- package/src/tts/tts.ts +1 -1
- package/src/utils.ts +11 -0
- package/src/voice/agent.ts +30 -6
- package/src/voice/agent_session.ts +29 -6
- package/src/voice/room_io/_input.ts +12 -1
- package/src/worker.ts +2 -7
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
var stt_exports = {};
|
|
20
|
+
__export(stt_exports, {
|
|
21
|
+
STT: () => STT,
|
|
22
|
+
SpeechStream: () => SpeechStream
|
|
23
|
+
});
|
|
24
|
+
module.exports = __toCommonJS(stt_exports);
|
|
25
|
+
var import_rtc_node = require("@livekit/rtc-node");
|
|
26
|
+
var import_ws = require("ws");
|
|
27
|
+
var import_exceptions = require("../_exceptions.cjs");
|
|
28
|
+
var import_audio = require("../audio.cjs");
|
|
29
|
+
var import_log = require("../log.cjs");
|
|
30
|
+
var import_stt = require("../stt/index.cjs");
|
|
31
|
+
var import_types = require("../types.cjs");
|
|
32
|
+
var import_utils = require("../utils.cjs");
|
|
33
|
+
var import_utils2 = require("./utils.cjs");
|
|
34
|
+
const DEFAULT_ENCODING = "pcm_s16le";
|
|
35
|
+
const DEFAULT_SAMPLE_RATE = 16e3;
|
|
36
|
+
const DEFAULT_BASE_URL = "wss://agent-gateway.livekit.cloud/v1";
|
|
37
|
+
const DEFAULT_CANCEL_TIMEOUT = 5e3;
|
|
38
|
+
class STT extends import_stt.STT {
|
|
39
|
+
opts;
|
|
40
|
+
streams = /* @__PURE__ */ new Set();
|
|
41
|
+
constructor(opts) {
|
|
42
|
+
super({ streaming: true, interimResults: true });
|
|
43
|
+
const {
|
|
44
|
+
model,
|
|
45
|
+
language,
|
|
46
|
+
baseURL,
|
|
47
|
+
encoding = DEFAULT_ENCODING,
|
|
48
|
+
sampleRate = DEFAULT_SAMPLE_RATE,
|
|
49
|
+
apiKey,
|
|
50
|
+
apiSecret,
|
|
51
|
+
extraKwargs = {}
|
|
52
|
+
} = opts || {};
|
|
53
|
+
const lkBaseURL = baseURL || process.env.LIVEKIT_INFERENCE_URL || DEFAULT_BASE_URL;
|
|
54
|
+
const lkApiKey = apiKey || process.env.LIVEKIT_INFERENCE_API_KEY || process.env.LIVEKIT_API_KEY;
|
|
55
|
+
if (!lkApiKey) {
|
|
56
|
+
throw new Error("apiKey is required: pass apiKey or set LIVEKIT_API_KEY");
|
|
57
|
+
}
|
|
58
|
+
const lkApiSecret = apiSecret || process.env.LIVEKIT_INFERENCE_API_SECRET || process.env.LIVEKIT_API_SECRET;
|
|
59
|
+
if (!lkApiSecret) {
|
|
60
|
+
throw new Error("apiSecret is required: pass apiSecret or set LIVEKIT_API_SECRET");
|
|
61
|
+
}
|
|
62
|
+
this.opts = {
|
|
63
|
+
model,
|
|
64
|
+
language,
|
|
65
|
+
encoding,
|
|
66
|
+
sampleRate,
|
|
67
|
+
baseURL: lkBaseURL,
|
|
68
|
+
apiKey: lkApiKey,
|
|
69
|
+
apiSecret: lkApiSecret,
|
|
70
|
+
extraKwargs
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
get label() {
|
|
74
|
+
return "inference.STT";
|
|
75
|
+
}
|
|
76
|
+
async _recognize(_) {
|
|
77
|
+
throw new Error("LiveKit STT does not support batch recognition, use stream() instead");
|
|
78
|
+
}
|
|
79
|
+
updateOptions(opts) {
|
|
80
|
+
this.opts = { ...this.opts, ...opts };
|
|
81
|
+
for (const stream of this.streams) {
|
|
82
|
+
stream.updateOptions(opts);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
stream(options) {
|
|
86
|
+
const { language, connOptions = import_types.DEFAULT_API_CONNECT_OPTIONS } = options || {};
|
|
87
|
+
const streamOpts = {
|
|
88
|
+
...this.opts,
|
|
89
|
+
language: language ?? this.opts.language
|
|
90
|
+
};
|
|
91
|
+
const stream = new SpeechStream(this, streamOpts, connOptions);
|
|
92
|
+
this.streams.add(stream);
|
|
93
|
+
return stream;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
class SpeechStream extends import_stt.SpeechStream {
|
|
97
|
+
opts;
|
|
98
|
+
requestId = (0, import_utils.shortuuid)("stt_request_");
|
|
99
|
+
speaking = false;
|
|
100
|
+
speechDuration = 0;
|
|
101
|
+
reconnectEvent = new import_utils.Event();
|
|
102
|
+
#logger = (0, import_log.log)();
|
|
103
|
+
constructor(sttImpl, opts, connOptions) {
|
|
104
|
+
super(sttImpl, opts.sampleRate, connOptions);
|
|
105
|
+
this.opts = opts;
|
|
106
|
+
}
|
|
107
|
+
get label() {
|
|
108
|
+
return "inference.SpeechStream";
|
|
109
|
+
}
|
|
110
|
+
updateOptions(opts) {
|
|
111
|
+
this.opts = { ...this.opts, ...opts };
|
|
112
|
+
}
|
|
113
|
+
async run() {
|
|
114
|
+
let ws = null;
|
|
115
|
+
let closingWs = false;
|
|
116
|
+
this.reconnectEvent.set();
|
|
117
|
+
const connect = async () => {
|
|
118
|
+
const params = {
|
|
119
|
+
settings: {
|
|
120
|
+
sample_rate: String(this.opts.sampleRate),
|
|
121
|
+
encoding: this.opts.encoding,
|
|
122
|
+
extra: this.opts.extraKwargs
|
|
123
|
+
}
|
|
124
|
+
};
|
|
125
|
+
if (this.opts.model) {
|
|
126
|
+
params.model = this.opts.model;
|
|
127
|
+
}
|
|
128
|
+
if (this.opts.language) {
|
|
129
|
+
params.settings.language = this.opts.language;
|
|
130
|
+
}
|
|
131
|
+
let baseURL = this.opts.baseURL;
|
|
132
|
+
if (baseURL.startsWith("http://") || baseURL.startsWith("https://")) {
|
|
133
|
+
baseURL = baseURL.replace("http", "ws");
|
|
134
|
+
}
|
|
135
|
+
const token = await (0, import_utils2.createAccessToken)(this.opts.apiKey, this.opts.apiSecret);
|
|
136
|
+
const url = `${baseURL}/stt`;
|
|
137
|
+
const headers = { Authorization: `Bearer ${token}` };
|
|
138
|
+
const socket = await (0, import_utils2.connectWs)(url, headers, 1e4);
|
|
139
|
+
const msg = { ...params, type: "session.create" };
|
|
140
|
+
socket.send(JSON.stringify(msg));
|
|
141
|
+
return socket;
|
|
142
|
+
};
|
|
143
|
+
const send = async (socket, signal) => {
|
|
144
|
+
const audioStream = new import_audio.AudioByteStream(
|
|
145
|
+
this.opts.sampleRate,
|
|
146
|
+
1,
|
|
147
|
+
Math.floor(this.opts.sampleRate / 20)
|
|
148
|
+
// 50ms
|
|
149
|
+
);
|
|
150
|
+
for await (const ev of this.input) {
|
|
151
|
+
if (signal.aborted) break;
|
|
152
|
+
let frames;
|
|
153
|
+
if (ev === SpeechStream.FLUSH_SENTINEL) {
|
|
154
|
+
frames = audioStream.flush();
|
|
155
|
+
} else {
|
|
156
|
+
const frame = ev;
|
|
157
|
+
frames = audioStream.write(new Int16Array(frame.data).buffer);
|
|
158
|
+
}
|
|
159
|
+
for (const frame of frames) {
|
|
160
|
+
this.speechDuration += frame.samplesPerChannel / frame.sampleRate;
|
|
161
|
+
const base64 = Buffer.from(frame.data.buffer).toString("base64");
|
|
162
|
+
const msg = { type: "input_audio", audio: base64 };
|
|
163
|
+
socket.send(JSON.stringify(msg));
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
closingWs = true;
|
|
167
|
+
socket.send(JSON.stringify({ type: "session.finalize" }));
|
|
168
|
+
};
|
|
169
|
+
const recv = async (socket, signal) => {
|
|
170
|
+
while (!this.closed && !signal.aborted) {
|
|
171
|
+
const dataPromise = new Promise((resolve, reject) => {
|
|
172
|
+
const messageHandler = (d) => {
|
|
173
|
+
resolve(d.toString());
|
|
174
|
+
removeListeners();
|
|
175
|
+
};
|
|
176
|
+
const errorHandler = (e) => {
|
|
177
|
+
reject(e);
|
|
178
|
+
removeListeners();
|
|
179
|
+
};
|
|
180
|
+
const closeHandler = (code) => {
|
|
181
|
+
if (closingWs) {
|
|
182
|
+
resolve("");
|
|
183
|
+
} else {
|
|
184
|
+
reject(
|
|
185
|
+
new import_exceptions.APIStatusError({
|
|
186
|
+
message: "LiveKit STT connection closed unexpectedly",
|
|
187
|
+
options: { statusCode: code }
|
|
188
|
+
})
|
|
189
|
+
);
|
|
190
|
+
}
|
|
191
|
+
removeListeners();
|
|
192
|
+
};
|
|
193
|
+
const removeListeners = () => {
|
|
194
|
+
socket.removeListener("message", messageHandler);
|
|
195
|
+
socket.removeListener("error", errorHandler);
|
|
196
|
+
socket.removeListener("close", closeHandler);
|
|
197
|
+
};
|
|
198
|
+
socket.once("message", messageHandler);
|
|
199
|
+
socket.once("error", errorHandler);
|
|
200
|
+
socket.once("close", closeHandler);
|
|
201
|
+
});
|
|
202
|
+
const data = await Promise.race([dataPromise, (0, import_utils.waitForAbort)(signal)]);
|
|
203
|
+
if (!data || signal.aborted) return;
|
|
204
|
+
const json = JSON.parse(data);
|
|
205
|
+
const type = json.type;
|
|
206
|
+
switch (type) {
|
|
207
|
+
case "session.created":
|
|
208
|
+
case "session.finalized":
|
|
209
|
+
case "session.closed":
|
|
210
|
+
break;
|
|
211
|
+
case "interim_transcript":
|
|
212
|
+
this.processTranscript(json, false);
|
|
213
|
+
break;
|
|
214
|
+
case "final_transcript":
|
|
215
|
+
this.processTranscript(json, true);
|
|
216
|
+
break;
|
|
217
|
+
case "error":
|
|
218
|
+
this.#logger.error("received error from LiveKit STT: %o", json);
|
|
219
|
+
throw new import_exceptions.APIError(`LiveKit STT returned error: ${JSON.stringify(json)}`);
|
|
220
|
+
default:
|
|
221
|
+
this.#logger.warn("received unexpected message from LiveKit STT: %o", json);
|
|
222
|
+
break;
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
};
|
|
226
|
+
while (true) {
|
|
227
|
+
try {
|
|
228
|
+
ws = await connect();
|
|
229
|
+
const sendTask = import_utils.Task.from(async ({ signal }) => {
|
|
230
|
+
await send(ws, signal);
|
|
231
|
+
});
|
|
232
|
+
const recvTask = import_utils.Task.from(async ({ signal }) => {
|
|
233
|
+
await recv(ws, signal);
|
|
234
|
+
});
|
|
235
|
+
const tasks = [sendTask, recvTask];
|
|
236
|
+
const waitReconnectTask = import_utils.Task.from(async ({ signal }) => {
|
|
237
|
+
await Promise.race([this.reconnectEvent.wait(), (0, import_utils.waitForAbort)(signal)]);
|
|
238
|
+
});
|
|
239
|
+
try {
|
|
240
|
+
await Promise.race([
|
|
241
|
+
Promise.all(tasks.map((task) => task.result)),
|
|
242
|
+
waitReconnectTask.result
|
|
243
|
+
]);
|
|
244
|
+
if (!waitReconnectTask.done) break;
|
|
245
|
+
this.reconnectEvent.clear();
|
|
246
|
+
} finally {
|
|
247
|
+
await (0, import_utils.cancelAndWait)([sendTask, recvTask, waitReconnectTask], DEFAULT_CANCEL_TIMEOUT);
|
|
248
|
+
}
|
|
249
|
+
} finally {
|
|
250
|
+
try {
|
|
251
|
+
if (ws) ws.close();
|
|
252
|
+
} catch {
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
processTranscript(data, isFinal) {
|
|
258
|
+
const requestId = data.request_id ?? this.requestId;
|
|
259
|
+
const text = data.transcript ?? "";
|
|
260
|
+
const language = data.language ?? this.opts.language ?? "en";
|
|
261
|
+
if (!text && !isFinal) return;
|
|
262
|
+
if (!this.speaking) {
|
|
263
|
+
this.speaking = true;
|
|
264
|
+
this.queue.put({ type: import_stt.SpeechEventType.START_OF_SPEECH });
|
|
265
|
+
}
|
|
266
|
+
const speechData = {
|
|
267
|
+
language,
|
|
268
|
+
startTime: data.start ?? 0,
|
|
269
|
+
endTime: data.duration ?? 0,
|
|
270
|
+
confidence: data.confidence ?? 1,
|
|
271
|
+
text
|
|
272
|
+
};
|
|
273
|
+
if (isFinal) {
|
|
274
|
+
if (this.speechDuration > 0) {
|
|
275
|
+
this.queue.put({
|
|
276
|
+
type: import_stt.SpeechEventType.RECOGNITION_USAGE,
|
|
277
|
+
requestId,
|
|
278
|
+
recognitionUsage: { audioDuration: this.speechDuration }
|
|
279
|
+
});
|
|
280
|
+
this.speechDuration = 0;
|
|
281
|
+
}
|
|
282
|
+
this.queue.put({
|
|
283
|
+
type: import_stt.SpeechEventType.FINAL_TRANSCRIPT,
|
|
284
|
+
requestId,
|
|
285
|
+
alternatives: [speechData]
|
|
286
|
+
});
|
|
287
|
+
if (this.speaking) {
|
|
288
|
+
this.speaking = false;
|
|
289
|
+
this.queue.put({ type: import_stt.SpeechEventType.END_OF_SPEECH });
|
|
290
|
+
}
|
|
291
|
+
} else {
|
|
292
|
+
this.queue.put({
|
|
293
|
+
type: import_stt.SpeechEventType.INTERIM_TRANSCRIPT,
|
|
294
|
+
requestId,
|
|
295
|
+
alternatives: [speechData]
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
301
|
+
0 && (module.exports = {
|
|
302
|
+
STT,
|
|
303
|
+
SpeechStream
|
|
304
|
+
});
|
|
305
|
+
//# sourceMappingURL=stt.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/inference/stt.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport { type AudioFrame } from '@livekit/rtc-node';\nimport { type RawData, WebSocket } from 'ws';\nimport { APIError, APIStatusError } from '../_exceptions.js';\nimport { AudioByteStream } from '../audio.js';\nimport { log } from '../log.js';\nimport {\n STT as BaseSTT,\n SpeechStream as BaseSpeechStream,\n type SpeechData,\n type SpeechEvent,\n SpeechEventType,\n} from '../stt/index.js';\nimport { type APIConnectOptions, DEFAULT_API_CONNECT_OPTIONS } from '../types.js';\nimport { type AudioBuffer, Event, Task, cancelAndWait, shortuuid, waitForAbort } from '../utils.js';\nimport { type AnyModels, connectWs, createAccessToken } from './utils.js';\n\nexport type DeepgramModels =\n | 'deepgram'\n | 'deepgram/nova-3'\n | 'deepgram/nova-3-general'\n | 'deepgram/nova-3-medical'\n | 'deepgram/nova-2'\n | 'deepgram/nova-2-general'\n | 'deepgram/nova-2-medical'\n | 'deepgram/nova-2-phonecall';\n\nexport type CartesiaModels = 'cartesia' | 'cartesia/ink-whisper';\n\nexport type AssemblyaiModels = 'assemblyai' | 'assemblyai/universal-streaming';\n\nexport interface CartesiaOptions {\n min_volume?: number; // default: not specified\n max_silence_duration_secs?: number; // default: not specified\n}\n\nexport interface DeepgramOptions {\n filler_words?: boolean; // default: true\n interim_results?: boolean; // default: true\n endpointing?: number; // default: 25 (ms)\n punctuate?: boolean; // default: false\n smart_format?: boolean;\n keywords?: Array<[string, number]>;\n keyterms?: string[];\n profanity_filter?: boolean;\n numerals?: boolean;\n mip_opt_out?: boolean;\n}\n\nexport interface AssemblyaiOptions {\n format_turns?: boolean; // default: false\n end_of_turn_confidence_threshold?: number; // default: 0.01\n min_end_of_turn_silence_when_confident?: number; // default: 0\n max_turn_silence?: number; // default: not specified\n keyterms_prompt?: string[]; // default: not specified\n}\n\nexport type STTModels = DeepgramModels | CartesiaModels | AssemblyaiModels | AnyModels;\nexport type STTOptions<TModel extends STTModels> = TModel extends DeepgramModels\n ? DeepgramOptions\n : TModel extends CartesiaModels\n ? CartesiaOptions\n : TModel extends AssemblyaiModels\n ? AssemblyaiOptions\n : Record<string, unknown>;\n\nexport type STTLanguages = 'en' | 'de' | 'es' | 'fr' | 'ja' | 'pt' | 'zh';\nexport type STTEncoding = 'pcm_s16le';\n\nconst DEFAULT_ENCODING: STTEncoding = 'pcm_s16le';\nconst DEFAULT_SAMPLE_RATE = 16000;\nconst DEFAULT_BASE_URL = 'wss://agent-gateway.livekit.cloud/v1';\nconst DEFAULT_CANCEL_TIMEOUT = 5000;\n\nexport interface InferenceSTTOptions<TModel extends STTModels> {\n model: TModel;\n language?: STTLanguages | string;\n encoding: STTEncoding;\n sampleRate: number;\n baseURL: string;\n apiKey: string;\n apiSecret: string;\n extraKwargs: STTOptions<TModel>;\n}\n\nexport class STT<TModel extends STTModels> extends BaseSTT {\n private opts: InferenceSTTOptions<TModel>;\n private streams: Set<SpeechStream<TModel>> = new Set();\n\n constructor(opts: {\n model: TModel;\n language?: STTLanguages | string;\n baseURL?: string;\n encoding?: STTEncoding;\n sampleRate?: number;\n apiKey?: string;\n apiSecret?: string;\n extraKwargs?: STTOptions<TModel>;\n }) {\n super({ streaming: true, interimResults: true });\n\n const {\n model,\n language,\n baseURL,\n encoding = DEFAULT_ENCODING,\n sampleRate = DEFAULT_SAMPLE_RATE,\n apiKey,\n apiSecret,\n extraKwargs = {} as STTOptions<TModel>,\n } = opts || {};\n\n const lkBaseURL = baseURL || process.env.LIVEKIT_INFERENCE_URL || DEFAULT_BASE_URL;\n const lkApiKey = apiKey || process.env.LIVEKIT_INFERENCE_API_KEY || process.env.LIVEKIT_API_KEY;\n if (!lkApiKey) {\n throw new Error('apiKey is required: pass apiKey or set LIVEKIT_API_KEY');\n }\n\n const lkApiSecret =\n apiSecret || process.env.LIVEKIT_INFERENCE_API_SECRET || process.env.LIVEKIT_API_SECRET;\n if (!lkApiSecret) {\n throw new Error('apiSecret is required: pass apiSecret or set LIVEKIT_API_SECRET');\n }\n\n this.opts = {\n model,\n language,\n encoding,\n sampleRate,\n baseURL: lkBaseURL,\n apiKey: lkApiKey,\n apiSecret: lkApiSecret,\n extraKwargs,\n };\n }\n\n get label(): string {\n return 'inference.STT';\n }\n\n protected async _recognize(_: AudioBuffer): Promise<SpeechEvent> {\n throw new Error('LiveKit STT does not support batch recognition, use stream() instead');\n }\n\n updateOptions(opts: Partial<Pick<InferenceSTTOptions<TModel>, 'model' | 'language'>>): void {\n this.opts = { ...this.opts, ...opts };\n\n for (const stream of this.streams) {\n stream.updateOptions(opts);\n }\n }\n\n stream(options?: {\n language?: STTLanguages | string;\n connOptions?: APIConnectOptions;\n }): SpeechStream<TModel> {\n const { language, connOptions = DEFAULT_API_CONNECT_OPTIONS } = options || {};\n const streamOpts = {\n ...this.opts,\n language: language ?? this.opts.language,\n } as InferenceSTTOptions<TModel>;\n\n const stream = new SpeechStream(this, streamOpts, connOptions);\n this.streams.add(stream);\n\n return stream;\n }\n}\n\nexport class SpeechStream<TModel extends STTModels> extends BaseSpeechStream {\n private opts: InferenceSTTOptions<TModel>;\n private requestId = shortuuid('stt_request_');\n private speaking = false;\n private speechDuration = 0;\n private reconnectEvent = new Event();\n\n #logger = log();\n\n constructor(\n sttImpl: STT<TModel>,\n opts: InferenceSTTOptions<TModel>,\n connOptions: APIConnectOptions,\n ) {\n super(sttImpl, opts.sampleRate, connOptions);\n this.opts = opts;\n }\n\n get label(): string {\n return 'inference.SpeechStream';\n }\n\n updateOptions(opts: Partial<Pick<InferenceSTTOptions<TModel>, 'model' | 'language'>>): void {\n this.opts = { ...this.opts, ...opts };\n }\n\n protected async run(): Promise<void> {\n let ws: WebSocket | null = null;\n let closingWs = false;\n\n this.reconnectEvent.set();\n\n const connect = async () => {\n const params = {\n settings: {\n sample_rate: String(this.opts.sampleRate),\n encoding: this.opts.encoding,\n extra: this.opts.extraKwargs,\n },\n } as Record<string, unknown>;\n\n if (this.opts.model) {\n params.model = this.opts.model;\n }\n\n if (this.opts.language) {\n (params.settings as Record<string, unknown>).language = this.opts.language;\n }\n\n let baseURL = this.opts.baseURL;\n if (baseURL.startsWith('http://') || baseURL.startsWith('https://')) {\n baseURL = baseURL.replace('http', 'ws');\n }\n\n const token = await createAccessToken(this.opts.apiKey, this.opts.apiSecret);\n const url = `${baseURL}/stt`;\n const headers = { Authorization: `Bearer ${token}` } as Record<string, string>;\n\n const socket = await connectWs(url, headers, 10000);\n const msg = { ...params, type: 'session.create' };\n socket.send(JSON.stringify(msg));\n\n return socket;\n };\n\n const send = async (socket: WebSocket, signal: AbortSignal) => {\n const audioStream = new AudioByteStream(\n this.opts.sampleRate,\n 1,\n Math.floor(this.opts.sampleRate / 20), // 50ms\n );\n\n for await (const ev of this.input) {\n if (signal.aborted) break;\n let frames: AudioFrame[];\n\n if (ev === SpeechStream.FLUSH_SENTINEL) {\n frames = audioStream.flush();\n } else {\n const frame = ev as AudioFrame;\n frames = audioStream.write(new Int16Array(frame.data).buffer);\n }\n\n for (const frame of frames) {\n this.speechDuration += frame.samplesPerChannel / frame.sampleRate;\n const base64 = Buffer.from(frame.data.buffer).toString('base64');\n const msg = { type: 'input_audio', audio: base64 };\n socket.send(JSON.stringify(msg));\n }\n }\n\n closingWs = true;\n socket.send(JSON.stringify({ type: 'session.finalize' }));\n };\n\n const recv = async (socket: WebSocket, signal: AbortSignal) => {\n while (!this.closed && !signal.aborted) {\n const dataPromise = new Promise<string>((resolve, reject) => {\n const messageHandler = (d: RawData) => {\n resolve(d.toString());\n removeListeners();\n };\n const errorHandler = (e: Error) => {\n reject(e);\n removeListeners();\n };\n const closeHandler = (code: number) => {\n if (closingWs) {\n resolve('');\n } else {\n reject(\n new APIStatusError({\n message: 'LiveKit STT connection closed unexpectedly',\n options: { statusCode: code },\n }),\n );\n }\n removeListeners();\n };\n const removeListeners = () => {\n socket.removeListener('message', messageHandler);\n socket.removeListener('error', errorHandler);\n socket.removeListener('close', closeHandler);\n };\n socket.once('message', messageHandler);\n socket.once('error', errorHandler);\n socket.once('close', closeHandler);\n });\n\n const data = await Promise.race([dataPromise, waitForAbort(signal)]);\n\n if (!data || signal.aborted) return;\n\n const json = JSON.parse(data);\n const type = json.type as string | undefined;\n\n switch (type) {\n case 'session.created':\n case 'session.finalized':\n case 'session.closed':\n break;\n case 'interim_transcript':\n this.processTranscript(json, false);\n break;\n case 'final_transcript':\n this.processTranscript(json, true);\n break;\n case 'error':\n this.#logger.error('received error from LiveKit STT: %o', json);\n throw new APIError(`LiveKit STT returned error: ${JSON.stringify(json)}`);\n default:\n this.#logger.warn('received unexpected message from LiveKit STT: %o', json);\n break;\n }\n }\n };\n\n while (true) {\n try {\n ws = await connect();\n\n const sendTask = Task.from(async ({ signal }) => {\n await send(ws!, signal);\n });\n\n const recvTask = Task.from(async ({ signal }) => {\n await recv(ws!, signal);\n });\n\n const tasks = [sendTask, recvTask];\n const waitReconnectTask = Task.from(async ({ signal }) => {\n await Promise.race([this.reconnectEvent.wait(), waitForAbort(signal)]);\n });\n\n try {\n await Promise.race([\n Promise.all(tasks.map((task) => task.result)),\n waitReconnectTask.result,\n ]);\n\n if (!waitReconnectTask.done) break;\n this.reconnectEvent.clear();\n } finally {\n await cancelAndWait([sendTask, recvTask, waitReconnectTask], DEFAULT_CANCEL_TIMEOUT);\n }\n } finally {\n try {\n if (ws) ws.close();\n } catch {}\n }\n }\n }\n\n private processTranscript(data: Record<string, any>, isFinal: boolean) {\n const requestId = data.request_id ?? this.requestId;\n const text = data.transcript ?? '';\n const language = data.language ?? this.opts.language ?? 'en';\n\n if (!text && !isFinal) return;\n\n // We'll have a more accurate way of detecting when speech started when we have VAD\n if (!this.speaking) {\n this.speaking = true;\n this.queue.put({ type: SpeechEventType.START_OF_SPEECH });\n }\n\n const speechData: SpeechData = {\n language,\n startTime: data.start ?? 0,\n endTime: data.duration ?? 0,\n confidence: data.confidence ?? 1.0,\n text,\n };\n\n if (isFinal) {\n if (this.speechDuration > 0) {\n this.queue.put({\n type: SpeechEventType.RECOGNITION_USAGE,\n requestId,\n recognitionUsage: { audioDuration: this.speechDuration },\n });\n this.speechDuration = 0;\n }\n\n this.queue.put({\n type: SpeechEventType.FINAL_TRANSCRIPT,\n requestId,\n alternatives: [speechData],\n });\n\n if (this.speaking) {\n this.speaking = false;\n this.queue.put({ type: SpeechEventType.END_OF_SPEECH });\n }\n } else {\n this.queue.put({\n type: SpeechEventType.INTERIM_TRANSCRIPT,\n requestId,\n alternatives: [speechData],\n });\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAGA,sBAAgC;AAChC,gBAAwC;AACxC,wBAAyC;AACzC,mBAAgC;AAChC,iBAAoB;AACpB,iBAMO;AACP,mBAAoE;AACpE,mBAAsF;AACtF,IAAAA,gBAA6D;AAsD7D,MAAM,mBAAgC;AACtC,MAAM,sBAAsB;AAC5B,MAAM,mBAAmB;AACzB,MAAM,yBAAyB;AAaxB,MAAM,YAAsC,WAAAC,IAAQ;AAAA,EACjD;AAAA,EACA,UAAqC,oBAAI,IAAI;AAAA,EAErD,YAAY,MAST;AACD,UAAM,EAAE,WAAW,MAAM,gBAAgB,KAAK,CAAC;AAE/C,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,WAAW;AAAA,MACX,aAAa;AAAA,MACb;AAAA,MACA;AAAA,MACA,cAAc,CAAC;AAAA,IACjB,IAAI,QAAQ,CAAC;AAEb,UAAM,YAAY,WAAW,QAAQ,IAAI,yBAAyB;AAClE,UAAM,WAAW,UAAU,QAAQ,IAAI,6BAA6B,QAAQ,IAAI;AAChF,QAAI,CAAC,UAAU;AACb,YAAM,IAAI,MAAM,wDAAwD;AAAA,IAC1E;AAEA,UAAM,cACJ,aAAa,QAAQ,IAAI,gCAAgC,QAAQ,IAAI;AACvE,QAAI,CAAC,aAAa;AAChB,YAAM,IAAI,MAAM,iEAAiE;AAAA,IACnF;AAEA,SAAK,OAAO;AAAA,MACV;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,SAAS;AAAA,MACT,QAAQ;AAAA,MACR,WAAW;AAAA,MACX;AAAA,IACF;AAAA,EACF;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO;AAAA,EACT;AAAA,EAEA,MAAgB,WAAW,GAAsC;AAC/D,UAAM,IAAI,MAAM,sEAAsE;AAAA,EACxF;AAAA,EAEA,cAAc,MAA8E;AAC1F,SAAK,OAAO,EAAE,GAAG,KAAK,MAAM,GAAG,KAAK;AAEpC,eAAW,UAAU,KAAK,SAAS;AACjC,aAAO,cAAc,IAAI;AAAA,IAC3B;AAAA,EACF;AAAA,EAEA,OAAO,SAGkB;AACvB,UAAM,EAAE,UAAU,cAAc,yCAA4B,IAAI,WAAW,CAAC;AAC5E,UAAM,aAAa;AAAA,MACjB,GAAG,KAAK;AAAA,MACR,UAAU,YAAY,KAAK,KAAK;AAAA,IAClC;AAEA,UAAM,SAAS,IAAI,aAAa,MAAM,YAAY,WAAW;AAC7D,SAAK,QAAQ,IAAI,MAAM;AAEvB,WAAO;AAAA,EACT;AACF;AAEO,MAAM,qBAA+C,WAAAC,aAAiB;AAAA,EACnE;AAAA,EACA,gBAAY,wBAAU,cAAc;AAAA,EACpC,WAAW;AAAA,EACX,iBAAiB;AAAA,EACjB,iBAAiB,IAAI,mBAAM;AAAA,EAEnC,cAAU,gBAAI;AAAA,EAEd,YACE,SACA,MACA,aACA;AACA,UAAM,SAAS,KAAK,YAAY,WAAW;AAC3C,SAAK,OAAO;AAAA,EACd;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO;AAAA,EACT;AAAA,EAEA,cAAc,MAA8E;AAC1F,SAAK,OAAO,EAAE,GAAG,KAAK,MAAM,GAAG,KAAK;AAAA,EACtC;AAAA,EAEA,MAAgB,MAAqB;AACnC,QAAI,KAAuB;AAC3B,QAAI,YAAY;AAEhB,SAAK,eAAe,IAAI;AAExB,UAAM,UAAU,YAAY;AAC1B,YAAM,SAAS;AAAA,QACb,UAAU;AAAA,UACR,aAAa,OAAO,KAAK,KAAK,UAAU;AAAA,UACxC,UAAU,KAAK,KAAK;AAAA,UACpB,OAAO,KAAK,KAAK;AAAA,QACnB;AAAA,MACF;AAEA,UAAI,KAAK,KAAK,OAAO;AACnB,eAAO,QAAQ,KAAK,KAAK;AAAA,MAC3B;AAEA,UAAI,KAAK,KAAK,UAAU;AACtB,QAAC,OAAO,SAAqC,WAAW,KAAK,KAAK;AAAA,MACpE;AAEA,UAAI,UAAU,KAAK,KAAK;AACxB,UAAI,QAAQ,WAAW,SAAS,KAAK,QAAQ,WAAW,UAAU,GAAG;AACnE,kBAAU,QAAQ,QAAQ,QAAQ,IAAI;AAAA,MACxC;AAEA,YAAM,QAAQ,UAAM,iCAAkB,KAAK,KAAK,QAAQ,KAAK,KAAK,SAAS;AAC3E,YAAM,MAAM,GAAG,OAAO;AACtB,YAAM,UAAU,EAAE,eAAe,UAAU,KAAK,GAAG;AAEnD,YAAM,SAAS,UAAM,yBAAU,KAAK,SAAS,GAAK;AAClD,YAAM,MAAM,EAAE,GAAG,QAAQ,MAAM,iBAAiB;AAChD,aAAO,KAAK,KAAK,UAAU,GAAG,CAAC;AAE/B,aAAO;AAAA,IACT;AAEA,UAAM,OAAO,OAAO,QAAmB,WAAwB;AAC7D,YAAM,cAAc,IAAI;AAAA,QACtB,KAAK,KAAK;AAAA,QACV;AAAA,QACA,KAAK,MAAM,KAAK,KAAK,aAAa,EAAE;AAAA;AAAA,MACtC;AAEA,uBAAiB,MAAM,KAAK,OAAO;AACjC,YAAI,OAAO,QAAS;AACpB,YAAI;AAEJ,YAAI,OAAO,aAAa,gBAAgB;AACtC,mBAAS,YAAY,MAAM;AAAA,QAC7B,OAAO;AACL,gBAAM,QAAQ;AACd,mBAAS,YAAY,MAAM,IAAI,WAAW,MAAM,IAAI,EAAE,MAAM;AAAA,QAC9D;AAEA,mBAAW,SAAS,QAAQ;AAC1B,eAAK,kBAAkB,MAAM,oBAAoB,MAAM;AACvD,gBAAM,SAAS,OAAO,KAAK,MAAM,KAAK,MAAM,EAAE,SAAS,QAAQ;AAC/D,gBAAM,MAAM,EAAE,MAAM,eAAe,OAAO,OAAO;AACjD,iBAAO,KAAK,KAAK,UAAU,GAAG,CAAC;AAAA,QACjC;AAAA,MACF;AAEA,kBAAY;AACZ,aAAO,KAAK,KAAK,UAAU,EAAE,MAAM,mBAAmB,CAAC,CAAC;AAAA,IAC1D;AAEA,UAAM,OAAO,OAAO,QAAmB,WAAwB;AAC7D,aAAO,CAAC,KAAK,UAAU,CAAC,OAAO,SAAS;AACtC,cAAM,cAAc,IAAI,QAAgB,CAAC,SAAS,WAAW;AAC3D,gBAAM,iBAAiB,CAAC,MAAe;AACrC,oBAAQ,EAAE,SAAS,CAAC;AACpB,4BAAgB;AAAA,UAClB;AACA,gBAAM,eAAe,CAAC,MAAa;AACjC,mBAAO,CAAC;AACR,4BAAgB;AAAA,UAClB;AACA,gBAAM,eAAe,CAAC,SAAiB;AACrC,gBAAI,WAAW;AACb,sBAAQ,EAAE;AAAA,YACZ,OAAO;AACL;AAAA,gBACE,IAAI,iCAAe;AAAA,kBACjB,SAAS;AAAA,kBACT,SAAS,EAAE,YAAY,KAAK;AAAA,gBAC9B,CAAC;AAAA,cACH;AAAA,YACF;AACA,4BAAgB;AAAA,UAClB;AACA,gBAAM,kBAAkB,MAAM;AAC5B,mBAAO,eAAe,WAAW,cAAc;AAC/C,mBAAO,eAAe,SAAS,YAAY;AAC3C,mBAAO,eAAe,SAAS,YAAY;AAAA,UAC7C;AACA,iBAAO,KAAK,WAAW,cAAc;AACrC,iBAAO,KAAK,SAAS,YAAY;AACjC,iBAAO,KAAK,SAAS,YAAY;AAAA,QACnC,CAAC;AAED,cAAM,OAAO,MAAM,QAAQ,KAAK,CAAC,iBAAa,2BAAa,MAAM,CAAC,CAAC;AAEnE,YAAI,CAAC,QAAQ,OAAO,QAAS;AAE7B,cAAM,OAAO,KAAK,MAAM,IAAI;AAC5B,cAAM,OAAO,KAAK;AAElB,gBAAQ,MAAM;AAAA,UACZ,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AACH;AAAA,UACF,KAAK;AACH,iBAAK,kBAAkB,MAAM,KAAK;AAClC;AAAA,UACF,KAAK;AACH,iBAAK,kBAAkB,MAAM,IAAI;AACjC;AAAA,UACF,KAAK;AACH,iBAAK,QAAQ,MAAM,uCAAuC,IAAI;AAC9D,kBAAM,IAAI,2BAAS,+BAA+B,KAAK,UAAU,IAAI,CAAC,EAAE;AAAA,UAC1E;AACE,iBAAK,QAAQ,KAAK,oDAAoD,IAAI;AAC1E;AAAA,QACJ;AAAA,MACF;AAAA,IACF;AAEA,WAAO,MAAM;AACX,UAAI;AACF,aAAK,MAAM,QAAQ;AAEnB,cAAM,WAAW,kBAAK,KAAK,OAAO,EAAE,OAAO,MAAM;AAC/C,gBAAM,KAAK,IAAK,MAAM;AAAA,QACxB,CAAC;AAED,cAAM,WAAW,kBAAK,KAAK,OAAO,EAAE,OAAO,MAAM;AAC/C,gBAAM,KAAK,IAAK,MAAM;AAAA,QACxB,CAAC;AAED,cAAM,QAAQ,CAAC,UAAU,QAAQ;AACjC,cAAM,oBAAoB,kBAAK,KAAK,OAAO,EAAE,OAAO,MAAM;AACxD,gBAAM,QAAQ,KAAK,CAAC,KAAK,eAAe,KAAK,OAAG,2BAAa,MAAM,CAAC,CAAC;AAAA,QACvE,CAAC;AAED,YAAI;AACF,gBAAM,QAAQ,KAAK;AAAA,YACjB,QAAQ,IAAI,MAAM,IAAI,CAAC,SAAS,KAAK,MAAM,CAAC;AAAA,YAC5C,kBAAkB;AAAA,UACpB,CAAC;AAED,cAAI,CAAC,kBAAkB,KAAM;AAC7B,eAAK,eAAe,MAAM;AAAA,QAC5B,UAAE;AACA,oBAAM,4BAAc,CAAC,UAAU,UAAU,iBAAiB,GAAG,sBAAsB;AAAA,QACrF;AAAA,MACF,UAAE;AACA,YAAI;AACF,cAAI,GAAI,IAAG,MAAM;AAAA,QACnB,QAAQ;AAAA,QAAC;AAAA,MACX;AAAA,IACF;AAAA,EACF;AAAA,EAEQ,kBAAkB,MAA2B,SAAkB;AACrE,UAAM,YAAY,KAAK,cAAc,KAAK;AAC1C,UAAM,OAAO,KAAK,cAAc;AAChC,UAAM,WAAW,KAAK,YAAY,KAAK,KAAK,YAAY;AAExD,QAAI,CAAC,QAAQ,CAAC,QAAS;AAGvB,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,WAAW;AAChB,WAAK,MAAM,IAAI,EAAE,MAAM,2BAAgB,gBAAgB,CAAC;AAAA,IAC1D;AAEA,UAAM,aAAyB;AAAA,MAC7B;AAAA,MACA,WAAW,KAAK,SAAS;AAAA,MACzB,SAAS,KAAK,YAAY;AAAA,MAC1B,YAAY,KAAK,cAAc;AAAA,MAC/B;AAAA,IACF;AAEA,QAAI,SAAS;AACX,UAAI,KAAK,iBAAiB,GAAG;AAC3B,aAAK,MAAM,IAAI;AAAA,UACb,MAAM,2BAAgB;AAAA,UACtB;AAAA,UACA,kBAAkB,EAAE,eAAe,KAAK,eAAe;AAAA,QACzD,CAAC;AACD,aAAK,iBAAiB;AAAA,MACxB;AAEA,WAAK,MAAM,IAAI;AAAA,QACb,MAAM,2BAAgB;AAAA,QACtB;AAAA,QACA,cAAc,CAAC,UAAU;AAAA,MAC3B,CAAC;AAED,UAAI,KAAK,UAAU;AACjB,aAAK,WAAW;AAChB,aAAK,MAAM,IAAI,EAAE,MAAM,2BAAgB,cAAc,CAAC;AAAA,MACxD;AAAA,IACF,OAAO;AACL,WAAK,MAAM,IAAI;AAAA,QACb,MAAM,2BAAgB;AAAA,QACtB;AAAA,QACA,cAAc,CAAC,UAAU;AAAA,MAC3B,CAAC;AAAA,IACH;AAAA,EACF;AACF;","names":["import_utils","BaseSTT","BaseSpeechStream"]}
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import { STT as BaseSTT, SpeechStream as BaseSpeechStream, type SpeechEvent } from '../stt/index.js';
|
|
2
|
+
import { type APIConnectOptions } from '../types.js';
|
|
3
|
+
import { type AudioBuffer } from '../utils.js';
|
|
4
|
+
import { type AnyModels } from './utils.js';
|
|
5
|
+
export type DeepgramModels = 'deepgram' | 'deepgram/nova-3' | 'deepgram/nova-3-general' | 'deepgram/nova-3-medical' | 'deepgram/nova-2' | 'deepgram/nova-2-general' | 'deepgram/nova-2-medical' | 'deepgram/nova-2-phonecall';
|
|
6
|
+
export type CartesiaModels = 'cartesia' | 'cartesia/ink-whisper';
|
|
7
|
+
export type AssemblyaiModels = 'assemblyai' | 'assemblyai/universal-streaming';
|
|
8
|
+
export interface CartesiaOptions {
|
|
9
|
+
min_volume?: number;
|
|
10
|
+
max_silence_duration_secs?: number;
|
|
11
|
+
}
|
|
12
|
+
export interface DeepgramOptions {
|
|
13
|
+
filler_words?: boolean;
|
|
14
|
+
interim_results?: boolean;
|
|
15
|
+
endpointing?: number;
|
|
16
|
+
punctuate?: boolean;
|
|
17
|
+
smart_format?: boolean;
|
|
18
|
+
keywords?: Array<[string, number]>;
|
|
19
|
+
keyterms?: string[];
|
|
20
|
+
profanity_filter?: boolean;
|
|
21
|
+
numerals?: boolean;
|
|
22
|
+
mip_opt_out?: boolean;
|
|
23
|
+
}
|
|
24
|
+
export interface AssemblyaiOptions {
|
|
25
|
+
format_turns?: boolean;
|
|
26
|
+
end_of_turn_confidence_threshold?: number;
|
|
27
|
+
min_end_of_turn_silence_when_confident?: number;
|
|
28
|
+
max_turn_silence?: number;
|
|
29
|
+
keyterms_prompt?: string[];
|
|
30
|
+
}
|
|
31
|
+
export type STTModels = DeepgramModels | CartesiaModels | AssemblyaiModels | AnyModels;
|
|
32
|
+
export type STTOptions<TModel extends STTModels> = TModel extends DeepgramModels ? DeepgramOptions : TModel extends CartesiaModels ? CartesiaOptions : TModel extends AssemblyaiModels ? AssemblyaiOptions : Record<string, unknown>;
|
|
33
|
+
export type STTLanguages = 'en' | 'de' | 'es' | 'fr' | 'ja' | 'pt' | 'zh';
|
|
34
|
+
export type STTEncoding = 'pcm_s16le';
|
|
35
|
+
export interface InferenceSTTOptions<TModel extends STTModels> {
|
|
36
|
+
model: TModel;
|
|
37
|
+
language?: STTLanguages | string;
|
|
38
|
+
encoding: STTEncoding;
|
|
39
|
+
sampleRate: number;
|
|
40
|
+
baseURL: string;
|
|
41
|
+
apiKey: string;
|
|
42
|
+
apiSecret: string;
|
|
43
|
+
extraKwargs: STTOptions<TModel>;
|
|
44
|
+
}
|
|
45
|
+
export declare class STT<TModel extends STTModels> extends BaseSTT {
|
|
46
|
+
private opts;
|
|
47
|
+
private streams;
|
|
48
|
+
constructor(opts: {
|
|
49
|
+
model: TModel;
|
|
50
|
+
language?: STTLanguages | string;
|
|
51
|
+
baseURL?: string;
|
|
52
|
+
encoding?: STTEncoding;
|
|
53
|
+
sampleRate?: number;
|
|
54
|
+
apiKey?: string;
|
|
55
|
+
apiSecret?: string;
|
|
56
|
+
extraKwargs?: STTOptions<TModel>;
|
|
57
|
+
});
|
|
58
|
+
get label(): string;
|
|
59
|
+
protected _recognize(_: AudioBuffer): Promise<SpeechEvent>;
|
|
60
|
+
updateOptions(opts: Partial<Pick<InferenceSTTOptions<TModel>, 'model' | 'language'>>): void;
|
|
61
|
+
stream(options?: {
|
|
62
|
+
language?: STTLanguages | string;
|
|
63
|
+
connOptions?: APIConnectOptions;
|
|
64
|
+
}): SpeechStream<TModel>;
|
|
65
|
+
}
|
|
66
|
+
export declare class SpeechStream<TModel extends STTModels> extends BaseSpeechStream {
|
|
67
|
+
#private;
|
|
68
|
+
private opts;
|
|
69
|
+
private requestId;
|
|
70
|
+
private speaking;
|
|
71
|
+
private speechDuration;
|
|
72
|
+
private reconnectEvent;
|
|
73
|
+
constructor(sttImpl: STT<TModel>, opts: InferenceSTTOptions<TModel>, connOptions: APIConnectOptions);
|
|
74
|
+
get label(): string;
|
|
75
|
+
updateOptions(opts: Partial<Pick<InferenceSTTOptions<TModel>, 'model' | 'language'>>): void;
|
|
76
|
+
protected run(): Promise<void>;
|
|
77
|
+
private processTranscript;
|
|
78
|
+
}
|
|
79
|
+
//# sourceMappingURL=stt.d.ts.map
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import { STT as BaseSTT, SpeechStream as BaseSpeechStream, type SpeechEvent } from '../stt/index.js';
|
|
2
|
+
import { type APIConnectOptions } from '../types.js';
|
|
3
|
+
import { type AudioBuffer } from '../utils.js';
|
|
4
|
+
import { type AnyModels } from './utils.js';
|
|
5
|
+
export type DeepgramModels = 'deepgram' | 'deepgram/nova-3' | 'deepgram/nova-3-general' | 'deepgram/nova-3-medical' | 'deepgram/nova-2' | 'deepgram/nova-2-general' | 'deepgram/nova-2-medical' | 'deepgram/nova-2-phonecall';
|
|
6
|
+
export type CartesiaModels = 'cartesia' | 'cartesia/ink-whisper';
|
|
7
|
+
export type AssemblyaiModels = 'assemblyai' | 'assemblyai/universal-streaming';
|
|
8
|
+
export interface CartesiaOptions {
|
|
9
|
+
min_volume?: number;
|
|
10
|
+
max_silence_duration_secs?: number;
|
|
11
|
+
}
|
|
12
|
+
export interface DeepgramOptions {
|
|
13
|
+
filler_words?: boolean;
|
|
14
|
+
interim_results?: boolean;
|
|
15
|
+
endpointing?: number;
|
|
16
|
+
punctuate?: boolean;
|
|
17
|
+
smart_format?: boolean;
|
|
18
|
+
keywords?: Array<[string, number]>;
|
|
19
|
+
keyterms?: string[];
|
|
20
|
+
profanity_filter?: boolean;
|
|
21
|
+
numerals?: boolean;
|
|
22
|
+
mip_opt_out?: boolean;
|
|
23
|
+
}
|
|
24
|
+
export interface AssemblyaiOptions {
|
|
25
|
+
format_turns?: boolean;
|
|
26
|
+
end_of_turn_confidence_threshold?: number;
|
|
27
|
+
min_end_of_turn_silence_when_confident?: number;
|
|
28
|
+
max_turn_silence?: number;
|
|
29
|
+
keyterms_prompt?: string[];
|
|
30
|
+
}
|
|
31
|
+
export type STTModels = DeepgramModels | CartesiaModels | AssemblyaiModels | AnyModels;
|
|
32
|
+
export type STTOptions<TModel extends STTModels> = TModel extends DeepgramModels ? DeepgramOptions : TModel extends CartesiaModels ? CartesiaOptions : TModel extends AssemblyaiModels ? AssemblyaiOptions : Record<string, unknown>;
|
|
33
|
+
export type STTLanguages = 'en' | 'de' | 'es' | 'fr' | 'ja' | 'pt' | 'zh';
|
|
34
|
+
export type STTEncoding = 'pcm_s16le';
|
|
35
|
+
export interface InferenceSTTOptions<TModel extends STTModels> {
|
|
36
|
+
model: TModel;
|
|
37
|
+
language?: STTLanguages | string;
|
|
38
|
+
encoding: STTEncoding;
|
|
39
|
+
sampleRate: number;
|
|
40
|
+
baseURL: string;
|
|
41
|
+
apiKey: string;
|
|
42
|
+
apiSecret: string;
|
|
43
|
+
extraKwargs: STTOptions<TModel>;
|
|
44
|
+
}
|
|
45
|
+
export declare class STT<TModel extends STTModels> extends BaseSTT {
|
|
46
|
+
private opts;
|
|
47
|
+
private streams;
|
|
48
|
+
constructor(opts: {
|
|
49
|
+
model: TModel;
|
|
50
|
+
language?: STTLanguages | string;
|
|
51
|
+
baseURL?: string;
|
|
52
|
+
encoding?: STTEncoding;
|
|
53
|
+
sampleRate?: number;
|
|
54
|
+
apiKey?: string;
|
|
55
|
+
apiSecret?: string;
|
|
56
|
+
extraKwargs?: STTOptions<TModel>;
|
|
57
|
+
});
|
|
58
|
+
get label(): string;
|
|
59
|
+
protected _recognize(_: AudioBuffer): Promise<SpeechEvent>;
|
|
60
|
+
updateOptions(opts: Partial<Pick<InferenceSTTOptions<TModel>, 'model' | 'language'>>): void;
|
|
61
|
+
stream(options?: {
|
|
62
|
+
language?: STTLanguages | string;
|
|
63
|
+
connOptions?: APIConnectOptions;
|
|
64
|
+
}): SpeechStream<TModel>;
|
|
65
|
+
}
|
|
66
|
+
export declare class SpeechStream<TModel extends STTModels> extends BaseSpeechStream {
|
|
67
|
+
#private;
|
|
68
|
+
private opts;
|
|
69
|
+
private requestId;
|
|
70
|
+
private speaking;
|
|
71
|
+
private speechDuration;
|
|
72
|
+
private reconnectEvent;
|
|
73
|
+
constructor(sttImpl: STT<TModel>, opts: InferenceSTTOptions<TModel>, connOptions: APIConnectOptions);
|
|
74
|
+
get label(): string;
|
|
75
|
+
updateOptions(opts: Partial<Pick<InferenceSTTOptions<TModel>, 'model' | 'language'>>): void;
|
|
76
|
+
protected run(): Promise<void>;
|
|
77
|
+
private processTranscript;
|
|
78
|
+
}
|
|
79
|
+
//# sourceMappingURL=stt.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"stt.d.ts","sourceRoot":"","sources":["../../src/inference/stt.ts"],"names":[],"mappings":"AAQA,OAAO,EACL,GAAG,IAAI,OAAO,EACd,YAAY,IAAI,gBAAgB,EAEhC,KAAK,WAAW,EAEjB,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAAE,KAAK,iBAAiB,EAA+B,MAAM,aAAa,CAAC;AAClF,OAAO,EAAE,KAAK,WAAW,EAAuD,MAAM,aAAa,CAAC;AACpG,OAAO,EAAE,KAAK,SAAS,EAAgC,MAAM,YAAY,CAAC;AAE1E,MAAM,MAAM,cAAc,GACtB,UAAU,GACV,iBAAiB,GACjB,yBAAyB,GACzB,yBAAyB,GACzB,iBAAiB,GACjB,yBAAyB,GACzB,yBAAyB,GACzB,2BAA2B,CAAC;AAEhC,MAAM,MAAM,cAAc,GAAG,UAAU,GAAG,sBAAsB,CAAC;AAEjE,MAAM,MAAM,gBAAgB,GAAG,YAAY,GAAG,gCAAgC,CAAC;AAE/E,MAAM,WAAW,eAAe;IAC9B,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,yBAAyB,CAAC,EAAE,MAAM,CAAC;CACpC;AAED,MAAM,WAAW,eAAe;IAC9B,YAAY,CAAC,EAAE,OAAO,CAAC;IACvB,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,YAAY,CAAC,EAAE,OAAO,CAAC;IACvB,QAAQ,CAAC,EAAE,KAAK,CAAC,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAAC;IACnC,QAAQ,CAAC,EAAE,MAAM,EAAE,CAAC;IACpB,gBAAgB,CAAC,EAAE,OAAO,CAAC;IAC3B,QAAQ,CAAC,EAAE,OAAO,CAAC;IACnB,WAAW,CAAC,EAAE,OAAO,CAAC;CACvB;AAED,MAAM,WAAW,iBAAiB;IAChC,YAAY,CAAC,EAAE,OAAO,CAAC;IACvB,gCAAgC,CAAC,EAAE,MAAM,CAAC;IAC1C,sCAAsC,CAAC,EAAE,MAAM,CAAC;IAChD,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B,eAAe,CAAC,EAAE,MAAM,EAAE,CAAC;CAC5B;AAED,MAAM,MAAM,SAAS,GAAG,cAAc,GAAG,cAAc,GAAG,gBAAgB,GAAG,SAAS,CAAC;AACvF,MAAM,MAAM,UAAU,CAAC,MAAM,SAAS,SAAS,IAAI,MAAM,SAAS,cAAc,GAC5E,eAAe,GACf,MAAM,SAAS,cAAc,GAC3B,eAAe,GACf,MAAM,SAAS,gBAAgB,GAC7B,iBAAiB,GACjB,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAEhC,MAAM,MAAM,YAAY,GAAG,IAAI,GAAG,IAAI,GAAG,IAAI,GAAG,IAAI,GAAG,IAAI,GAAG,IAAI,GAAG,IAAI,CAAC;AAC1E,MAAM,MAAM,WAAW,GAAG,WAAW,CAAC;AAOtC,MAAM,WAAW,mBAAmB,CAAC,MAAM,SAAS,SAAS;IAC3D,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,CAAC,EAAE,YAAY,GAAG,MAAM,CAAC;IACjC,QAAQ,EAAE,WAAW,CAAC;IACtB,UAAU,EAAE,MAAM,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,CAAC;IAClB,WAAW,EAAE,UAAU,CAAC,MAAM,CAAC,CAAC;CACjC;AAED,qBAAa,GAAG,CAAC,MAAM,SAAS,SAAS,CAAE,SAAQ,OAAO;IACxD,OAAO,CAAC,IAAI,CAA8B;IAC1C,OAAO,CAAC,OAAO,CAAwC;gBAE3C,IAAI,EAAE;QAChB,KAAK,EAAE,MAAM,CAAC;QACd,QAAQ,CAAC,EAAE,YAAY,GAAG,MAAM,CAAC;QACjC,OAAO,CAAC,EAAE,MAAM,CAAC;QACjB,QAAQ,CAAC,EAAE,WAAW,CAAC;QACvB,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,SAAS,CAAC,EAAE,MAAM,CAAC;QACnB,WAAW,CAAC,EAAE,UAAU,CAAC,MAAM,CAAC,CAAC;KAClC;IAsCD,IAAI,KAAK,IAAI,MAAM,CAElB;cAEe,UAAU,CAAC,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,WAAW,CAAC;IAIhE,aAAa,CAAC,IAAI,EAAE,OAAO,CAAC,IAAI,CAAC,mBAAmB,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,UAAU,CAAC,CAAC,GAAG,IAAI;IAQ3F,MAAM,CAAC,OAAO,CAAC,EAAE;QACf,QAAQ,CAAC,EAAE,YAAY,GAAG,MAAM,CAAC;QACjC,WAAW,CAAC,EAAE,iBAAiB,CAAC;KACjC,GAAG,YAAY,CAAC,MAAM,CAAC;CAYzB;AAED,qBAAa,YAAY,CAAC,MAAM,SAAS,SAAS,CAAE,SAAQ,gBAAgB;;IAC1E,OAAO,CAAC,IAAI,CAA8B;IAC1C,OAAO,CAAC,SAAS,CAA6B;IAC9C,OAAO,CAAC,QAAQ,CAAS;IACzB,OAAO,CAAC,cAAc,CAAK;IAC3B,OAAO,CAAC,cAAc,CAAe;gBAKnC,OAAO,EAAE,GAAG,CAAC,MAAM,CAAC,EACpB,IAAI,EAAE,mBAAmB,CAAC,MAAM,CAAC,EACjC,WAAW,EAAE,iBAAiB;IAMhC,IAAI,KAAK,IAAI,MAAM,CAElB;IAED,aAAa,CAAC,IAAI,EAAE,OAAO,CAAC,IAAI,CAAC,mBAAmB,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,UAAU,CAAC,CAAC,GAAG,IAAI;cAI3E,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IAuKpC,OAAO,CAAC,iBAAiB;CAiD1B"}
|