@livekit/agents-plugin-sarvam 1.0.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +110 -0
- package/dist/index.cjs +52 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +29 -0
- package/dist/index.js.map +1 -0
- package/dist/models.cjs +17 -0
- package/dist/models.cjs.map +1 -0
- package/dist/models.d.cts +36 -0
- package/dist/models.d.ts +36 -0
- package/dist/models.d.ts.map +1 -0
- package/dist/models.js +1 -0
- package/dist/models.js.map +1 -0
- package/dist/stt.cjs +499 -0
- package/dist/stt.cjs.map +1 -0
- package/dist/stt.d.cts +104 -0
- package/dist/stt.d.ts +104 -0
- package/dist/stt.d.ts.map +1 -0
- package/dist/stt.js +483 -0
- package/dist/stt.js.map +1 -0
- package/dist/stt.test.cjs +18 -0
- package/dist/stt.test.cjs.map +1 -0
- package/dist/stt.test.d.cts +2 -0
- package/dist/stt.test.d.ts +2 -0
- package/dist/stt.test.d.ts.map +1 -0
- package/dist/stt.test.js +17 -0
- package/dist/stt.test.js.map +1 -0
- package/dist/tts.cjs +405 -0
- package/dist/tts.cjs.map +1 -0
- package/dist/tts.d.cts +111 -0
- package/dist/tts.d.ts +111 -0
- package/dist/tts.d.ts.map +1 -0
- package/dist/tts.js +385 -0
- package/dist/tts.js.map +1 -0
- package/dist/tts.test.cjs +17 -0
- package/dist/tts.test.cjs.map +1 -0
- package/dist/tts.test.d.cts +2 -0
- package/dist/tts.test.d.ts +2 -0
- package/dist/tts.test.d.ts.map +1 -0
- package/dist/tts.test.js +16 -0
- package/dist/tts.test.js.map +1 -0
- package/package.json +54 -0
- package/src/index.ts +34 -0
- package/src/models.ts +135 -0
- package/src/stt.test.ts +23 -0
- package/src/stt.ts +770 -0
- package/src/tts.test.ts +22 -0
- package/src/tts.ts +571 -0
package/dist/stt.d.cts
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { type APIConnectOptions, type AudioBuffer, stt } from '@livekit/agents';
|
|
2
|
+
import type { STTLanguages, STTModels, STTModes, STTV2Languages, STTV3Languages } from './models.js';
|
|
3
|
+
interface STTBaseOptions {
|
|
4
|
+
/** Sarvam API key. Defaults to $SARVAM_API_KEY */
|
|
5
|
+
apiKey?: string;
|
|
6
|
+
/**
|
|
7
|
+
* Whether to use native WebSocket streaming for `stream()`.
|
|
8
|
+
* Set to `false` to prefer non-streaming REST recognition (used by Agent via StreamAdapter + VAD).
|
|
9
|
+
* Default: `true`.
|
|
10
|
+
*/
|
|
11
|
+
streaming?: boolean;
|
|
12
|
+
/** Increase VAD sensitivity (WS only). Maps to `high_vad_sensitivity` query param. */
|
|
13
|
+
highVadSensitivity?: boolean;
|
|
14
|
+
/** Enable flush signal events from server (WS only). Maps to `flush_signal` query param. */
|
|
15
|
+
flushSignal?: boolean;
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* Options specific to saarika:v2.5.
|
|
19
|
+
* saarika:v2.5 will be deprecated soon — prefer {@link STTV3Options} with `saaras:v3` for new integrations.
|
|
20
|
+
* All v2.5 language codes are also supported by v3.
|
|
21
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text/transcribe | Sarvam STT API docs}
|
|
22
|
+
*/
|
|
23
|
+
export interface STTV2Options extends STTBaseOptions {
|
|
24
|
+
model: 'saarika:v2.5';
|
|
25
|
+
/** Language code (BCP-47). Default: 'en-IN'. Set to 'unknown' for auto-detection. */
|
|
26
|
+
languageCode?: STTV2Languages | string;
|
|
27
|
+
/** Return chunk-level timestamps in REST response */
|
|
28
|
+
withTimestamps?: boolean;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Options specific to saaras:v2.5 (dedicated translate endpoint).
|
|
32
|
+
* Uses the `/speech-to-text-translate` endpoint for Indic-to-English translation.
|
|
33
|
+
* Auto-detects the source language; does not accept language codes or timestamps.
|
|
34
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text-translate/translate | Sarvam STT Translate docs}
|
|
35
|
+
*/
|
|
36
|
+
export interface STTTranslateOptions extends STTBaseOptions {
|
|
37
|
+
model: 'saaras:v2.5';
|
|
38
|
+
/** Conversation context to boost model accuracy */
|
|
39
|
+
prompt?: string;
|
|
40
|
+
/** Mode for translate WS. Default: 'translate'. */
|
|
41
|
+
mode?: STTModes | string;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Options specific to saaras:v3 (recommended).
|
|
45
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text/transcribe | Sarvam STT API docs}
|
|
46
|
+
*/
|
|
47
|
+
export interface STTV3Options extends STTBaseOptions {
|
|
48
|
+
model?: 'saaras:v3';
|
|
49
|
+
/** Language code (BCP-47). Default: 'en-IN'. Set to 'unknown' for auto-detection. */
|
|
50
|
+
languageCode?: STTV3Languages | string;
|
|
51
|
+
/** Transcription mode (v3 only). Default: 'transcribe' */
|
|
52
|
+
mode?: STTModes | string;
|
|
53
|
+
/** Conversation context to boost model accuracy */
|
|
54
|
+
prompt?: string;
|
|
55
|
+
/** Return chunk-level timestamps in REST response */
|
|
56
|
+
withTimestamps?: boolean;
|
|
57
|
+
}
|
|
58
|
+
/** Combined options — discriminated by `model` field */
|
|
59
|
+
export type STTOptions = STTV2Options | STTTranslateOptions | STTV3Options;
|
|
60
|
+
interface ResolvedSTTOptions {
|
|
61
|
+
apiKey: string;
|
|
62
|
+
model: STTModels;
|
|
63
|
+
streaming: boolean;
|
|
64
|
+
languageCode?: STTLanguages | string;
|
|
65
|
+
mode?: STTModes | string;
|
|
66
|
+
prompt?: string;
|
|
67
|
+
withTimestamps?: boolean;
|
|
68
|
+
highVadSensitivity?: boolean;
|
|
69
|
+
flushSignal?: boolean;
|
|
70
|
+
}
|
|
71
|
+
export declare class STT extends stt.STT {
|
|
72
|
+
private opts;
|
|
73
|
+
label: string;
|
|
74
|
+
/**
|
|
75
|
+
* Create a new instance of Sarvam AI STT.
|
|
76
|
+
*
|
|
77
|
+
* @remarks
|
|
78
|
+
* `apiKey` must be set to your Sarvam API key, either using the argument or by setting the
|
|
79
|
+
* `SARVAM_API_KEY` environment variable.
|
|
80
|
+
*
|
|
81
|
+
* Supported models:
|
|
82
|
+
* - `saaras:v3` (default, recommended) — supports all 22 languages, modes, prompt, timestamps, and uses `/speech-to-text`.
|
|
83
|
+
* - `saaras:v2.5` — Indic-to-English translation via `/speech-to-text-translate`. Auto-detects source language. Supports prompt.
|
|
84
|
+
* - `saarika:v2.5` — will be deprecated soon. Supports timestamps. All its languages are available in `saaras:v3`.
|
|
85
|
+
*
|
|
86
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text/transcribe | Sarvam STT API docs}
|
|
87
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text-translate/translate | Sarvam STT Translate docs}
|
|
88
|
+
*/
|
|
89
|
+
constructor(opts?: Partial<STTOptions>);
|
|
90
|
+
updateOptions(opts: Partial<STTOptions>): void;
|
|
91
|
+
_recognize(buffer: AudioBuffer, abortSignal?: AbortSignal): Promise<stt.SpeechEvent>;
|
|
92
|
+
stream(options?: {
|
|
93
|
+
connOptions?: APIConnectOptions;
|
|
94
|
+
}): SpeechStream;
|
|
95
|
+
}
|
|
96
|
+
export declare class SpeechStream extends stt.SpeechStream {
|
|
97
|
+
#private;
|
|
98
|
+
label: string;
|
|
99
|
+
constructor(sttInstance: STT, opts: ResolvedSTTOptions, connOptions?: APIConnectOptions);
|
|
100
|
+
updateOptions(opts: Partial<STTOptions>): void;
|
|
101
|
+
protected run(): Promise<void>;
|
|
102
|
+
}
|
|
103
|
+
export {};
|
|
104
|
+
//# sourceMappingURL=stt.d.ts.map
|
package/dist/stt.d.ts
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { type APIConnectOptions, type AudioBuffer, stt } from '@livekit/agents';
|
|
2
|
+
import type { STTLanguages, STTModels, STTModes, STTV2Languages, STTV3Languages } from './models.js';
|
|
3
|
+
interface STTBaseOptions {
|
|
4
|
+
/** Sarvam API key. Defaults to $SARVAM_API_KEY */
|
|
5
|
+
apiKey?: string;
|
|
6
|
+
/**
|
|
7
|
+
* Whether to use native WebSocket streaming for `stream()`.
|
|
8
|
+
* Set to `false` to prefer non-streaming REST recognition (used by Agent via StreamAdapter + VAD).
|
|
9
|
+
* Default: `true`.
|
|
10
|
+
*/
|
|
11
|
+
streaming?: boolean;
|
|
12
|
+
/** Increase VAD sensitivity (WS only). Maps to `high_vad_sensitivity` query param. */
|
|
13
|
+
highVadSensitivity?: boolean;
|
|
14
|
+
/** Enable flush signal events from server (WS only). Maps to `flush_signal` query param. */
|
|
15
|
+
flushSignal?: boolean;
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* Options specific to saarika:v2.5.
|
|
19
|
+
* saarika:v2.5 will be deprecated soon — prefer {@link STTV3Options} with `saaras:v3` for new integrations.
|
|
20
|
+
* All v2.5 language codes are also supported by v3.
|
|
21
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text/transcribe | Sarvam STT API docs}
|
|
22
|
+
*/
|
|
23
|
+
export interface STTV2Options extends STTBaseOptions {
|
|
24
|
+
model: 'saarika:v2.5';
|
|
25
|
+
/** Language code (BCP-47). Default: 'en-IN'. Set to 'unknown' for auto-detection. */
|
|
26
|
+
languageCode?: STTV2Languages | string;
|
|
27
|
+
/** Return chunk-level timestamps in REST response */
|
|
28
|
+
withTimestamps?: boolean;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Options specific to saaras:v2.5 (dedicated translate endpoint).
|
|
32
|
+
* Uses the `/speech-to-text-translate` endpoint for Indic-to-English translation.
|
|
33
|
+
* Auto-detects the source language; does not accept language codes or timestamps.
|
|
34
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text-translate/translate | Sarvam STT Translate docs}
|
|
35
|
+
*/
|
|
36
|
+
export interface STTTranslateOptions extends STTBaseOptions {
|
|
37
|
+
model: 'saaras:v2.5';
|
|
38
|
+
/** Conversation context to boost model accuracy */
|
|
39
|
+
prompt?: string;
|
|
40
|
+
/** Mode for translate WS. Default: 'translate'. */
|
|
41
|
+
mode?: STTModes | string;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Options specific to saaras:v3 (recommended).
|
|
45
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text/transcribe | Sarvam STT API docs}
|
|
46
|
+
*/
|
|
47
|
+
export interface STTV3Options extends STTBaseOptions {
|
|
48
|
+
model?: 'saaras:v3';
|
|
49
|
+
/** Language code (BCP-47). Default: 'en-IN'. Set to 'unknown' for auto-detection. */
|
|
50
|
+
languageCode?: STTV3Languages | string;
|
|
51
|
+
/** Transcription mode (v3 only). Default: 'transcribe' */
|
|
52
|
+
mode?: STTModes | string;
|
|
53
|
+
/** Conversation context to boost model accuracy */
|
|
54
|
+
prompt?: string;
|
|
55
|
+
/** Return chunk-level timestamps in REST response */
|
|
56
|
+
withTimestamps?: boolean;
|
|
57
|
+
}
|
|
58
|
+
/** Combined options — discriminated by `model` field */
|
|
59
|
+
export type STTOptions = STTV2Options | STTTranslateOptions | STTV3Options;
|
|
60
|
+
interface ResolvedSTTOptions {
|
|
61
|
+
apiKey: string;
|
|
62
|
+
model: STTModels;
|
|
63
|
+
streaming: boolean;
|
|
64
|
+
languageCode?: STTLanguages | string;
|
|
65
|
+
mode?: STTModes | string;
|
|
66
|
+
prompt?: string;
|
|
67
|
+
withTimestamps?: boolean;
|
|
68
|
+
highVadSensitivity?: boolean;
|
|
69
|
+
flushSignal?: boolean;
|
|
70
|
+
}
|
|
71
|
+
export declare class STT extends stt.STT {
|
|
72
|
+
private opts;
|
|
73
|
+
label: string;
|
|
74
|
+
/**
|
|
75
|
+
* Create a new instance of Sarvam AI STT.
|
|
76
|
+
*
|
|
77
|
+
* @remarks
|
|
78
|
+
* `apiKey` must be set to your Sarvam API key, either using the argument or by setting the
|
|
79
|
+
* `SARVAM_API_KEY` environment variable.
|
|
80
|
+
*
|
|
81
|
+
* Supported models:
|
|
82
|
+
* - `saaras:v3` (default, recommended) — supports all 22 languages, modes, prompt, timestamps, and uses `/speech-to-text`.
|
|
83
|
+
* - `saaras:v2.5` — Indic-to-English translation via `/speech-to-text-translate`. Auto-detects source language. Supports prompt.
|
|
84
|
+
* - `saarika:v2.5` — will be deprecated soon. Supports timestamps. All its languages are available in `saaras:v3`.
|
|
85
|
+
*
|
|
86
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text/transcribe | Sarvam STT API docs}
|
|
87
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text-translate/translate | Sarvam STT Translate docs}
|
|
88
|
+
*/
|
|
89
|
+
constructor(opts?: Partial<STTOptions>);
|
|
90
|
+
updateOptions(opts: Partial<STTOptions>): void;
|
|
91
|
+
_recognize(buffer: AudioBuffer, abortSignal?: AbortSignal): Promise<stt.SpeechEvent>;
|
|
92
|
+
stream(options?: {
|
|
93
|
+
connOptions?: APIConnectOptions;
|
|
94
|
+
}): SpeechStream;
|
|
95
|
+
}
|
|
96
|
+
export declare class SpeechStream extends stt.SpeechStream {
|
|
97
|
+
#private;
|
|
98
|
+
label: string;
|
|
99
|
+
constructor(sttInstance: STT, opts: ResolvedSTTOptions, connOptions?: APIConnectOptions);
|
|
100
|
+
updateOptions(opts: Partial<STTOptions>): void;
|
|
101
|
+
protected run(): Promise<void>;
|
|
102
|
+
}
|
|
103
|
+
export {};
|
|
104
|
+
//# sourceMappingURL=stt.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"stt.d.ts","sourceRoot":"","sources":["../src/stt.ts"],"names":[],"mappings":"AAGA,OAAO,EACL,KAAK,iBAAiB,EACtB,KAAK,WAAW,EAOhB,GAAG,EAEJ,MAAM,iBAAiB,CAAC;AAGzB,OAAO,KAAK,EACV,YAAY,EACZ,SAAS,EACT,QAAQ,EACR,cAAc,EACd,cAAc,EACf,MAAM,aAAa,CAAC;AAkBrB,UAAU,cAAc;IACtB,kDAAkD;IAClD,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;;;OAIG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,sFAAsF;IACtF,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,4FAA4F;IAC5F,WAAW,CAAC,EAAE,OAAO,CAAC;CACvB;AAED;;;;;GAKG;AACH,MAAM,WAAW,YAAa,SAAQ,cAAc;IAClD,KAAK,EAAE,cAAc,CAAC;IACtB,qFAAqF;IACrF,YAAY,CAAC,EAAE,cAAc,GAAG,MAAM,CAAC;IACvC,qDAAqD;IACrD,cAAc,CAAC,EAAE,OAAO,CAAC;CAC1B;AAED;;;;;GAKG;AACH,MAAM,WAAW,mBAAoB,SAAQ,cAAc;IACzD,KAAK,EAAE,aAAa,CAAC;IACrB,mDAAmD;IACnD,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,mDAAmD;IACnD,IAAI,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;CAC1B;AAED;;;GAGG;AACH,MAAM,WAAW,YAAa,SAAQ,cAAc;IAClD,KAAK,CAAC,EAAE,WAAW,CAAC;IACpB,qFAAqF;IACrF,YAAY,CAAC,EAAE,cAAc,GAAG,MAAM,CAAC;IACvC,0DAA0D;IAC1D,IAAI,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACzB,mDAAmD;IACnD,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,qDAAqD;IACrD,cAAc,CAAC,EAAE,OAAO,CAAC;CAC1B;AAED,wDAAwD;AACxD,MAAM,MAAM,UAAU,GAAG,YAAY,GAAG,mBAAmB,GAAG,YAAY,CAAC;AAM3E,UAAU,kBAAkB;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,SAAS,CAAC;IACjB,SAAS,EAAE,OAAO,CAAC;IAEnB,YAAY,CAAC,EAAE,YAAY,GAAG,MAAM,CAAC;IAErC,IAAI,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IAEzB,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB,cAAc,CAAC,EAAE,OAAO,CAAC;IAEzB,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,WAAW,CAAC,EAAE,OAAO,CAAC;CACvB;AAgOD,qBAAa,GAAI,SAAQ,GAAG,CAAC,GAAG;IAC9B,OAAO,CAAC,IAAI,CAAqB;IACjC,KAAK,SAAgB;IAErB;;;;;;;;;;;;;;OAcG;gBACS,IAAI,GAAE,OAAO,CAAC,UAAU,CAAM;IAU1C,aAAa,CAAC,IAAI,EAAE,OAAO,CAAC,UAAU,CAAC;IAoBjC,UAAU,CAAC,MAAM,EAAE,WAAW,EAAE,WAAW,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC;IA+C1F,MAAM,CAAC,OAAO,CAAC,EAAE;QAAE,WAAW,CAAC,EAAE,iBAAiB,CAAA;KAAE,GAAG,YAAY;CAQpE;AAMD,qBAAa,YAAa,SAAQ,GAAG,CAAC,YAAY;;IAOhD,KAAK,SAAyB;gBAElB,WAAW,EAAE,GAAG,EAAE,IAAI,EAAE,kBAAkB,EAAE,WAAW,CAAC,EAAE,iBAAiB;IAOvF,aAAa,CAAC,IAAI,EAAE,OAAO,CAAC,UAAU,CAAC;cAoBvB,GAAG;CAqRpB"}
|
package/dist/stt.js
ADDED
|
@@ -0,0 +1,483 @@
|
|
|
1
|
+
import {
|
|
2
|
+
AudioByteStream,
|
|
3
|
+
AudioEnergyFilter,
|
|
4
|
+
Future,
|
|
5
|
+
Task,
|
|
6
|
+
log,
|
|
7
|
+
mergeFrames,
|
|
8
|
+
stt,
|
|
9
|
+
waitForAbort
|
|
10
|
+
} from "@livekit/agents";
|
|
11
|
+
import { WebSocket } from "ws";
|
|
12
|
+
const SARVAM_STT_REST_URL = "https://api.sarvam.ai/speech-to-text";
|
|
13
|
+
const SARVAM_STT_TRANSLATE_REST_URL = "https://api.sarvam.ai/speech-to-text-translate";
|
|
14
|
+
const SARVAM_STT_WS_URL = "wss://api.sarvam.ai/speech-to-text/ws";
|
|
15
|
+
const SARVAM_STT_TRANSLATE_WS_URL = "wss://api.sarvam.ai/speech-to-text-translate/ws";
|
|
16
|
+
const SAMPLE_RATE = 16e3;
|
|
17
|
+
const NUM_CHANNELS = 1;
|
|
18
|
+
const SAARIKA_DEFAULTS = {
|
|
19
|
+
languageCode: "en-IN"
|
|
20
|
+
};
|
|
21
|
+
const SAARAS_V3_DEFAULTS = {
|
|
22
|
+
languageCode: "en-IN",
|
|
23
|
+
mode: "transcribe"
|
|
24
|
+
};
|
|
25
|
+
const SAARAS_TRANSLATE_DEFAULTS = {
|
|
26
|
+
mode: "translate"
|
|
27
|
+
};
|
|
28
|
+
const STTV2_LANGUAGE_SET = /* @__PURE__ */ new Set([
|
|
29
|
+
"unknown",
|
|
30
|
+
"hi-IN",
|
|
31
|
+
"bn-IN",
|
|
32
|
+
"kn-IN",
|
|
33
|
+
"ml-IN",
|
|
34
|
+
"mr-IN",
|
|
35
|
+
"od-IN",
|
|
36
|
+
"pa-IN",
|
|
37
|
+
"ta-IN",
|
|
38
|
+
"te-IN",
|
|
39
|
+
"en-IN",
|
|
40
|
+
"gu-IN"
|
|
41
|
+
]);
|
|
42
|
+
function resolveOptions(opts) {
|
|
43
|
+
const apiKey = opts.apiKey ?? process.env.SARVAM_API_KEY;
|
|
44
|
+
if (!apiKey) {
|
|
45
|
+
throw new Error("Sarvam API key is required, whether as an argument or as $SARVAM_API_KEY");
|
|
46
|
+
}
|
|
47
|
+
const model = opts.model ?? "saaras:v3";
|
|
48
|
+
const base = {
|
|
49
|
+
apiKey,
|
|
50
|
+
model,
|
|
51
|
+
streaming: opts.streaming ?? true,
|
|
52
|
+
highVadSensitivity: opts.highVadSensitivity,
|
|
53
|
+
flushSignal: opts.flushSignal
|
|
54
|
+
};
|
|
55
|
+
if (model === "saaras:v2.5") {
|
|
56
|
+
const translateOpts = opts;
|
|
57
|
+
base.prompt = translateOpts.prompt;
|
|
58
|
+
base.mode = translateOpts.mode ?? SAARAS_TRANSLATE_DEFAULTS.mode;
|
|
59
|
+
} else if (model === "saaras:v3") {
|
|
60
|
+
const v3Opts = opts;
|
|
61
|
+
base.languageCode = v3Opts.languageCode ?? SAARAS_V3_DEFAULTS.languageCode;
|
|
62
|
+
base.mode = v3Opts.mode ?? SAARAS_V3_DEFAULTS.mode;
|
|
63
|
+
base.prompt = v3Opts.prompt;
|
|
64
|
+
base.withTimestamps = v3Opts.withTimestamps;
|
|
65
|
+
} else {
|
|
66
|
+
let languageCode = opts.languageCode ?? SAARIKA_DEFAULTS.languageCode;
|
|
67
|
+
if (!STTV2_LANGUAGE_SET.has(languageCode)) {
|
|
68
|
+
languageCode = SAARIKA_DEFAULTS.languageCode;
|
|
69
|
+
}
|
|
70
|
+
base.languageCode = languageCode;
|
|
71
|
+
base.withTimestamps = opts.withTimestamps;
|
|
72
|
+
}
|
|
73
|
+
return base;
|
|
74
|
+
}
|
|
75
|
+
function getRestUrl(model) {
|
|
76
|
+
return model === "saaras:v2.5" ? SARVAM_STT_TRANSLATE_REST_URL : SARVAM_STT_REST_URL;
|
|
77
|
+
}
|
|
78
|
+
function getWsUrl(model) {
|
|
79
|
+
return model === "saaras:v2.5" ? SARVAM_STT_TRANSLATE_WS_URL : SARVAM_STT_WS_URL;
|
|
80
|
+
}
|
|
81
|
+
function buildWsUrl(opts) {
|
|
82
|
+
const base = getWsUrl(opts.model);
|
|
83
|
+
const params = new URLSearchParams();
|
|
84
|
+
params.set("model", opts.model);
|
|
85
|
+
params.set("vad_signals", "true");
|
|
86
|
+
params.set("sample_rate", String(SAMPLE_RATE));
|
|
87
|
+
params.set("input_audio_codec", "pcm_s16le");
|
|
88
|
+
if (opts.model !== "saaras:v2.5" && opts.languageCode != null) {
|
|
89
|
+
params.set("language-code", opts.languageCode);
|
|
90
|
+
}
|
|
91
|
+
if (opts.mode != null) {
|
|
92
|
+
params.set("mode", opts.mode);
|
|
93
|
+
}
|
|
94
|
+
if (opts.highVadSensitivity != null) {
|
|
95
|
+
params.set("high_vad_sensitivity", String(opts.highVadSensitivity));
|
|
96
|
+
}
|
|
97
|
+
if (opts.flushSignal != null) {
|
|
98
|
+
params.set("flush_signal", String(opts.flushSignal));
|
|
99
|
+
}
|
|
100
|
+
return `${base}?${params.toString()}`;
|
|
101
|
+
}
|
|
102
|
+
function buildFormData(wavBlob, opts) {
|
|
103
|
+
const formData = new FormData();
|
|
104
|
+
formData.append("file", wavBlob, "audio.wav");
|
|
105
|
+
formData.append("model", opts.model);
|
|
106
|
+
if (opts.model !== "saaras:v2.5" && opts.languageCode != null) {
|
|
107
|
+
formData.append("language_code", opts.languageCode);
|
|
108
|
+
}
|
|
109
|
+
if (opts.model === "saaras:v3" && opts.mode != null) {
|
|
110
|
+
formData.append("mode", opts.mode);
|
|
111
|
+
}
|
|
112
|
+
if ((opts.model === "saaras:v2.5" || opts.model === "saaras:v3") && opts.prompt != null) {
|
|
113
|
+
formData.append("prompt", opts.prompt);
|
|
114
|
+
}
|
|
115
|
+
if (opts.model !== "saaras:v2.5" && opts.withTimestamps) {
|
|
116
|
+
formData.append("with_timestamps", "true");
|
|
117
|
+
}
|
|
118
|
+
return formData;
|
|
119
|
+
}
|
|
120
|
+
function createWav(frame) {
|
|
121
|
+
const bitsPerSample = 16;
|
|
122
|
+
const byteRate = frame.sampleRate * frame.channels * bitsPerSample / 8;
|
|
123
|
+
const blockAlign = frame.channels * bitsPerSample / 8;
|
|
124
|
+
const header = Buffer.alloc(44);
|
|
125
|
+
header.write("RIFF", 0);
|
|
126
|
+
header.writeUInt32LE(36 + frame.data.byteLength, 4);
|
|
127
|
+
header.write("WAVE", 8);
|
|
128
|
+
header.write("fmt ", 12);
|
|
129
|
+
header.writeUInt32LE(16, 16);
|
|
130
|
+
header.writeUInt16LE(1, 20);
|
|
131
|
+
header.writeUInt16LE(frame.channels, 22);
|
|
132
|
+
header.writeUInt32LE(frame.sampleRate, 24);
|
|
133
|
+
header.writeUInt32LE(byteRate, 28);
|
|
134
|
+
header.writeUInt16LE(blockAlign, 32);
|
|
135
|
+
header.writeUInt16LE(bitsPerSample, 34);
|
|
136
|
+
header.write("data", 36);
|
|
137
|
+
header.writeUInt32LE(frame.data.byteLength, 40);
|
|
138
|
+
const pcm = Buffer.from(frame.data.buffer, frame.data.byteOffset, frame.data.byteLength);
|
|
139
|
+
return Buffer.concat([header, pcm]);
|
|
140
|
+
}
|
|
141
|
+
class STT extends stt.STT {
|
|
142
|
+
opts;
|
|
143
|
+
label = "sarvam.STT";
|
|
144
|
+
/**
|
|
145
|
+
* Create a new instance of Sarvam AI STT.
|
|
146
|
+
*
|
|
147
|
+
* @remarks
|
|
148
|
+
* `apiKey` must be set to your Sarvam API key, either using the argument or by setting the
|
|
149
|
+
* `SARVAM_API_KEY` environment variable.
|
|
150
|
+
*
|
|
151
|
+
* Supported models:
|
|
152
|
+
* - `saaras:v3` (default, recommended) — supports all 22 languages, modes, prompt, timestamps, and uses `/speech-to-text`.
|
|
153
|
+
* - `saaras:v2.5` — Indic-to-English translation via `/speech-to-text-translate`. Auto-detects source language. Supports prompt.
|
|
154
|
+
* - `saarika:v2.5` — will be deprecated soon. Supports timestamps. All its languages are available in `saaras:v3`.
|
|
155
|
+
*
|
|
156
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text/transcribe | Sarvam STT API docs}
|
|
157
|
+
* @see {@link https://docs.sarvam.ai/api-reference-docs/speech-to-text-translate/translate | Sarvam STT Translate docs}
|
|
158
|
+
*/
|
|
159
|
+
constructor(opts = {}) {
|
|
160
|
+
const resolved = resolveOptions(opts);
|
|
161
|
+
super({
|
|
162
|
+
streaming: resolved.streaming,
|
|
163
|
+
interimResults: false,
|
|
164
|
+
alignedTranscript: false
|
|
165
|
+
});
|
|
166
|
+
this.opts = resolved;
|
|
167
|
+
}
|
|
168
|
+
updateOptions(opts) {
|
|
169
|
+
const modelChanging = opts.model != null && opts.model !== this.opts.model;
|
|
170
|
+
const base = modelChanging ? {
|
|
171
|
+
apiKey: this.opts.apiKey,
|
|
172
|
+
streaming: this.opts.streaming,
|
|
173
|
+
...this.opts.highVadSensitivity != null ? { highVadSensitivity: this.opts.highVadSensitivity } : {},
|
|
174
|
+
...this.opts.flushSignal != null ? { flushSignal: this.opts.flushSignal } : {},
|
|
175
|
+
...this.opts.languageCode != null && opts.model !== "saaras:v2.5" ? { languageCode: this.opts.languageCode } : {}
|
|
176
|
+
} : { ...this.opts };
|
|
177
|
+
this.opts = resolveOptions({ ...base, ...opts });
|
|
178
|
+
}
|
|
179
|
+
async _recognize(buffer, abortSignal) {
|
|
180
|
+
const frame = mergeFrames(buffer);
|
|
181
|
+
const wavBuffer = createWav(frame);
|
|
182
|
+
const wavBlob = new Blob([new Uint8Array(wavBuffer)], { type: "audio/wav" });
|
|
183
|
+
const formData = buildFormData(wavBlob, this.opts);
|
|
184
|
+
const response = await fetch(getRestUrl(this.opts.model), {
|
|
185
|
+
method: "POST",
|
|
186
|
+
headers: {
|
|
187
|
+
"api-subscription-key": this.opts.apiKey
|
|
188
|
+
},
|
|
189
|
+
body: formData,
|
|
190
|
+
signal: abortSignal ?? null
|
|
191
|
+
});
|
|
192
|
+
if (!response.ok) {
|
|
193
|
+
const errorBody = await response.text();
|
|
194
|
+
throw new Error(`Sarvam STT API error ${response.status}: ${errorBody}`);
|
|
195
|
+
}
|
|
196
|
+
const data = await response.json();
|
|
197
|
+
let startTime = 0;
|
|
198
|
+
let endTime = 0;
|
|
199
|
+
if (data.timestamps) {
|
|
200
|
+
const starts = data.timestamps.start_time_seconds;
|
|
201
|
+
const ends = data.timestamps.end_time_seconds;
|
|
202
|
+
if (starts.length > 0) startTime = starts[0] ?? 0;
|
|
203
|
+
if (ends.length > 0) endTime = ends[ends.length - 1] ?? 0;
|
|
204
|
+
}
|
|
205
|
+
return {
|
|
206
|
+
type: stt.SpeechEventType.FINAL_TRANSCRIPT,
|
|
207
|
+
requestId: data.request_id ?? void 0,
|
|
208
|
+
alternatives: [
|
|
209
|
+
{
|
|
210
|
+
text: data.transcript || "",
|
|
211
|
+
language: data.language_code ?? this.opts.languageCode ?? "unknown",
|
|
212
|
+
startTime,
|
|
213
|
+
endTime,
|
|
214
|
+
confidence: data.language_probability ?? 0
|
|
215
|
+
}
|
|
216
|
+
]
|
|
217
|
+
};
|
|
218
|
+
}
|
|
219
|
+
stream(options) {
|
|
220
|
+
if (!this.capabilities.streaming) {
|
|
221
|
+
throw new Error(
|
|
222
|
+
"Sarvam STT streaming is disabled (`streaming: false`). Use recognize() for REST or wrap with stt.StreamAdapter + VAD for streaming behavior."
|
|
223
|
+
);
|
|
224
|
+
}
|
|
225
|
+
return new SpeechStream(this, this.opts, options == null ? void 0 : options.connOptions);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
class SpeechStream extends stt.SpeechStream {
|
|
229
|
+
#opts;
|
|
230
|
+
#audioEnergyFilter;
|
|
231
|
+
#logger = log();
|
|
232
|
+
#speaking = false;
|
|
233
|
+
#resetWS = new Future();
|
|
234
|
+
#requestId = "";
|
|
235
|
+
label = "sarvam.SpeechStream";
|
|
236
|
+
constructor(sttInstance, opts, connOptions) {
|
|
237
|
+
super(sttInstance, SAMPLE_RATE, connOptions);
|
|
238
|
+
this.#opts = opts;
|
|
239
|
+
this.closed = false;
|
|
240
|
+
this.#audioEnergyFilter = new AudioEnergyFilter();
|
|
241
|
+
}
|
|
242
|
+
updateOptions(opts) {
|
|
243
|
+
const modelChanging = opts.model != null && opts.model !== this.#opts.model;
|
|
244
|
+
const base = modelChanging ? {
|
|
245
|
+
apiKey: this.#opts.apiKey,
|
|
246
|
+
...this.#opts.highVadSensitivity != null ? { highVadSensitivity: this.#opts.highVadSensitivity } : {},
|
|
247
|
+
...this.#opts.flushSignal != null ? { flushSignal: this.#opts.flushSignal } : {},
|
|
248
|
+
...this.#opts.languageCode != null && opts.model !== "saaras:v2.5" ? { languageCode: this.#opts.languageCode } : {}
|
|
249
|
+
} : { ...this.#opts };
|
|
250
|
+
this.#opts = resolveOptions({ ...base, ...opts });
|
|
251
|
+
this.#resetWS.resolve();
|
|
252
|
+
}
|
|
253
|
+
async run() {
|
|
254
|
+
const maxRetry = 32;
|
|
255
|
+
let retries = 0;
|
|
256
|
+
while (!this.input.closed && !this.closed) {
|
|
257
|
+
const wsUrl = buildWsUrl(this.#opts);
|
|
258
|
+
this.#logger.info(`Sarvam STT connecting to: ${wsUrl}`);
|
|
259
|
+
const ws = new WebSocket(wsUrl, {
|
|
260
|
+
headers: { "api-subscription-key": this.#opts.apiKey }
|
|
261
|
+
});
|
|
262
|
+
let sessionStart = 0;
|
|
263
|
+
try {
|
|
264
|
+
await new Promise((resolve, reject) => {
|
|
265
|
+
ws.once("open", () => resolve());
|
|
266
|
+
ws.once("error", (err) => reject(err));
|
|
267
|
+
ws.once(
|
|
268
|
+
"close",
|
|
269
|
+
(code) => reject(new Error(`WebSocket closed with code ${code}`))
|
|
270
|
+
);
|
|
271
|
+
});
|
|
272
|
+
sessionStart = Date.now();
|
|
273
|
+
await this.#runWS(ws);
|
|
274
|
+
retries = 0;
|
|
275
|
+
} catch (e) {
|
|
276
|
+
ws.removeAllListeners();
|
|
277
|
+
ws.close();
|
|
278
|
+
if (!this.closed && !this.input.closed) {
|
|
279
|
+
if (sessionStart > 0 && Date.now() - sessionStart > 5e3) {
|
|
280
|
+
retries = 0;
|
|
281
|
+
}
|
|
282
|
+
if (retries >= maxRetry) {
|
|
283
|
+
throw new Error(`Failed to connect to Sarvam STT after ${retries} attempts: ${e}`);
|
|
284
|
+
}
|
|
285
|
+
const delay = Math.min(retries * 5, 10);
|
|
286
|
+
retries++;
|
|
287
|
+
this.#logger.warn(
|
|
288
|
+
`Failed to connect to Sarvam STT, retrying in ${delay}s: ${e} (${retries}/${maxRetry})`
|
|
289
|
+
);
|
|
290
|
+
await new Promise((resolve) => setTimeout(resolve, delay * 1e3));
|
|
291
|
+
} else {
|
|
292
|
+
this.#logger.warn(
|
|
293
|
+
`Sarvam STT disconnected, connection is closed: ${e} (inputClosed: ${this.input.closed}, isClosed: ${this.closed})`
|
|
294
|
+
);
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
this.closed = true;
|
|
299
|
+
}
|
|
300
|
+
async #runWS(ws) {
|
|
301
|
+
this.#resetWS = new Future();
|
|
302
|
+
this.#speaking = false;
|
|
303
|
+
let closing = false;
|
|
304
|
+
const sessionController = new AbortController();
|
|
305
|
+
if (this.#opts.model === "saaras:v2.5" && this.#opts.prompt != null) {
|
|
306
|
+
ws.send(JSON.stringify({ type: "config", prompt: this.#opts.prompt }));
|
|
307
|
+
}
|
|
308
|
+
const wsMonitor = Task.from(async (controller) => {
|
|
309
|
+
const closed = new Promise((_, reject) => {
|
|
310
|
+
ws.once("close", (code, reason) => {
|
|
311
|
+
if (!closing) {
|
|
312
|
+
this.#logger.error(`WebSocket closed with code ${code}: ${reason}`);
|
|
313
|
+
reject(new Error("WebSocket closed"));
|
|
314
|
+
}
|
|
315
|
+
});
|
|
316
|
+
});
|
|
317
|
+
await Promise.race([closed, waitForAbort(controller.signal)]);
|
|
318
|
+
});
|
|
319
|
+
const sendTask = async () => {
|
|
320
|
+
const samples50Ms = Math.floor(SAMPLE_RATE / 20);
|
|
321
|
+
const stream = new AudioByteStream(SAMPLE_RATE, NUM_CHANNELS, samples50Ms);
|
|
322
|
+
const abortPromise = waitForAbort(this.abortSignal);
|
|
323
|
+
const sessionAbort = waitForAbort(sessionController.signal);
|
|
324
|
+
try {
|
|
325
|
+
while (!this.closed) {
|
|
326
|
+
const result = await Promise.race([this.input.next(), abortPromise, sessionAbort]);
|
|
327
|
+
if (result === void 0) return;
|
|
328
|
+
if (result.done) break;
|
|
329
|
+
const data = result.value;
|
|
330
|
+
let frames;
|
|
331
|
+
if (data === SpeechStream.FLUSH_SENTINEL) {
|
|
332
|
+
frames = stream.flush();
|
|
333
|
+
} else if (data.sampleRate !== SAMPLE_RATE || data.channels !== NUM_CHANNELS) {
|
|
334
|
+
throw new Error(
|
|
335
|
+
`Expected ${SAMPLE_RATE}Hz/${NUM_CHANNELS}ch, got ${data.sampleRate}Hz/${data.channels}ch`
|
|
336
|
+
);
|
|
337
|
+
} else {
|
|
338
|
+
frames = stream.write(
|
|
339
|
+
data.data.buffer.slice(
|
|
340
|
+
data.data.byteOffset,
|
|
341
|
+
data.data.byteOffset + data.data.byteLength
|
|
342
|
+
)
|
|
343
|
+
);
|
|
344
|
+
}
|
|
345
|
+
for (const frame of frames) {
|
|
346
|
+
if (this.#audioEnergyFilter.pushFrame(frame)) {
|
|
347
|
+
const pcmBuffer = Buffer.from(
|
|
348
|
+
frame.data.buffer,
|
|
349
|
+
frame.data.byteOffset,
|
|
350
|
+
frame.data.byteLength
|
|
351
|
+
);
|
|
352
|
+
const base64Audio = pcmBuffer.toString("base64");
|
|
353
|
+
ws.send(
|
|
354
|
+
JSON.stringify({
|
|
355
|
+
audio: {
|
|
356
|
+
data: base64Audio,
|
|
357
|
+
encoding: "audio/wav",
|
|
358
|
+
sample_rate: SAMPLE_RATE
|
|
359
|
+
}
|
|
360
|
+
})
|
|
361
|
+
);
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
if (data === SpeechStream.FLUSH_SENTINEL) {
|
|
365
|
+
ws.send(JSON.stringify({ type: "flush" }));
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
} finally {
|
|
369
|
+
closing = true;
|
|
370
|
+
try {
|
|
371
|
+
ws.send(
|
|
372
|
+
JSON.stringify({
|
|
373
|
+
type: "end_of_stream",
|
|
374
|
+
audio: { data: "", encoding: "audio/wav", sample_rate: SAMPLE_RATE }
|
|
375
|
+
})
|
|
376
|
+
);
|
|
377
|
+
} catch {
|
|
378
|
+
}
|
|
379
|
+
wsMonitor.cancel();
|
|
380
|
+
}
|
|
381
|
+
};
|
|
382
|
+
const listenTask = Task.from(async (controller) => {
|
|
383
|
+
const putMessage = (event) => {
|
|
384
|
+
if (!this.queue.closed) {
|
|
385
|
+
try {
|
|
386
|
+
this.queue.put(event);
|
|
387
|
+
} catch {
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
};
|
|
391
|
+
const listenMessage = new Promise((resolve, reject) => {
|
|
392
|
+
ws.once("close", () => resolve());
|
|
393
|
+
ws.on("message", (msg) => {
|
|
394
|
+
var _a;
|
|
395
|
+
try {
|
|
396
|
+
const raw = msg.toString();
|
|
397
|
+
this.#logger.debug(`Sarvam STT raw WS message: ${raw.substring(0, 500)}`);
|
|
398
|
+
const json = JSON.parse(raw);
|
|
399
|
+
const msgType = json["type"] ?? "";
|
|
400
|
+
if (msgType === "events") {
|
|
401
|
+
const eventData = json["data"] ?? {};
|
|
402
|
+
const signalType = eventData.signal_type;
|
|
403
|
+
if (signalType === "START_SPEECH") {
|
|
404
|
+
if (!this.#speaking) {
|
|
405
|
+
this.#speaking = true;
|
|
406
|
+
putMessage({ type: stt.SpeechEventType.START_OF_SPEECH });
|
|
407
|
+
}
|
|
408
|
+
} else if (signalType === "END_SPEECH") {
|
|
409
|
+
if (this.#speaking) {
|
|
410
|
+
this.#speaking = false;
|
|
411
|
+
putMessage({ type: stt.SpeechEventType.END_OF_SPEECH });
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
} else if (msgType === "data") {
|
|
415
|
+
const td = json["data"] ?? {};
|
|
416
|
+
const transcript = td.transcript ?? "";
|
|
417
|
+
const language = td.language_code ?? this.#opts.languageCode ?? "unknown";
|
|
418
|
+
const requestId = td.request_id ?? "";
|
|
419
|
+
const confidence = td.language_probability ?? 0;
|
|
420
|
+
this.#requestId = requestId;
|
|
421
|
+
if (td.metrics) {
|
|
422
|
+
this.#logger.debug(
|
|
423
|
+
`Sarvam STT metrics: audio_duration=${td.metrics.audio_duration}s, latency=${td.metrics.processing_latency}s`
|
|
424
|
+
);
|
|
425
|
+
}
|
|
426
|
+
if (transcript) {
|
|
427
|
+
if (!this.#speaking) {
|
|
428
|
+
this.#speaking = true;
|
|
429
|
+
putMessage({ type: stt.SpeechEventType.START_OF_SPEECH });
|
|
430
|
+
}
|
|
431
|
+
putMessage({
|
|
432
|
+
type: stt.SpeechEventType.FINAL_TRANSCRIPT,
|
|
433
|
+
requestId,
|
|
434
|
+
alternatives: [
|
|
435
|
+
{
|
|
436
|
+
text: transcript,
|
|
437
|
+
language,
|
|
438
|
+
startTime: 0,
|
|
439
|
+
endTime: ((_a = td.metrics) == null ? void 0 : _a.audio_duration) ?? 0,
|
|
440
|
+
confidence
|
|
441
|
+
}
|
|
442
|
+
]
|
|
443
|
+
});
|
|
444
|
+
}
|
|
445
|
+
} else if (msgType === "error") {
|
|
446
|
+
const nested = json["data"];
|
|
447
|
+
const errorInfo = (nested == null ? void 0 : nested.message) ?? (nested == null ? void 0 : nested.error) ?? json["error"] ?? json["message"] ?? "Unknown error";
|
|
448
|
+
const errorCode = (nested == null ? void 0 : nested.code) ?? json["code"] ?? "";
|
|
449
|
+
this.#logger.error(`Sarvam STT WebSocket error [${errorCode}]: ${errorInfo}`);
|
|
450
|
+
reject(new Error(`Sarvam STT API error [${errorCode}]: ${errorInfo}`));
|
|
451
|
+
return;
|
|
452
|
+
}
|
|
453
|
+
if (this.closed || closing) {
|
|
454
|
+
resolve();
|
|
455
|
+
}
|
|
456
|
+
} catch (err) {
|
|
457
|
+
this.#logger.error(`Error processing Sarvam STT message: ${msg}`);
|
|
458
|
+
reject(err);
|
|
459
|
+
}
|
|
460
|
+
});
|
|
461
|
+
});
|
|
462
|
+
await Promise.race([listenMessage, waitForAbort(controller.signal)]);
|
|
463
|
+
}, this.abortController);
|
|
464
|
+
try {
|
|
465
|
+
await Promise.race([
|
|
466
|
+
this.#resetWS.await,
|
|
467
|
+
Promise.all([sendTask(), listenTask.result, wsMonitor.result])
|
|
468
|
+
]);
|
|
469
|
+
} finally {
|
|
470
|
+
closing = true;
|
|
471
|
+
sessionController.abort();
|
|
472
|
+
wsMonitor.cancel();
|
|
473
|
+
ws.close();
|
|
474
|
+
listenTask.result.catch(() => {
|
|
475
|
+
});
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
export {
|
|
480
|
+
STT,
|
|
481
|
+
SpeechStream
|
|
482
|
+
};
|
|
483
|
+
//# sourceMappingURL=stt.js.map
|