speechflow 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,239 @@
1
+
2
+ <img src="https://raw.githubusercontent.com/rse/speechflow/master/src/speechflow-logo.svg" width="400" align="right" alt=""/>
3
+
4
+ SpeechFlow
5
+ ==========
6
+
7
+ **Speech Processing Flow Graph**
8
+
9
+ [![github (author stars)](https://img.shields.io/github/stars/rse?logo=github&label=author%20stars&color=%233377aa)](https://github.com/rse)
10
+ [![github (author followers)](https://img.shields.io/github/followers/rse?label=author%20followers&logo=github&color=%234477aa)](https://github.com/rse)
11
+ [![github (project stdver)](https://img.shields.io/github/package-json/x-stdver/rse/speechflow?logo=github&label=project%20stdver&color=%234477aa&cacheSeconds=900)](https://github.com/rse/speechflow)
12
+ [![github (project release)](https://img.shields.io/github/package-json/x-release/rse/speechflow?logo=github&label=project%20release&color=%234477aa&cacheSeconds=900)](https://github.com/rse/speechflow)
13
+
14
+ About
15
+ -----
16
+
17
+ **SpeechFlow** is a command-line interface based tool for establishing a
18
+ directed data flow graph of audio and text processing nodes. This way it
19
+ allows to perform various speech processing tasks in a flexible way.
20
+
21
+ Installation
22
+ ------------
23
+
24
+ ```
25
+ $ npm install -g speechflow
26
+ ```
27
+
28
+ Usage
29
+ -----
30
+
31
+ ```
32
+ $ speechflow
33
+ [-h|--help]
34
+ [-V|--version]
35
+ [-v|--verbose <level>]
36
+ [-e|--expression <expression>]
37
+ [-f|--expression-file <expression-file>]
38
+ [-c|--config <key>@<yaml-config-file>]
39
+ [<argument> [...]]
40
+ ```
41
+
42
+ Processing Graph Examples
43
+ -------------------------
44
+
45
+ - Capture audio from microphone to file:
46
+
47
+ ```
48
+ device(device: "wasapi:VoiceMeeter Out B1", mode: "r") |
49
+ file(path: "capture.pcm", mode: "w", type: "audio")
50
+ ```
51
+
52
+ - Generate audio file with narration of text file:
53
+
54
+ ```
55
+ file(path: argv.0, mode: "r", type: "audio") |
56
+ deepgram(language: "en") |
57
+ file(path: argv.1, mode: "w", type: "text")
58
+ ```
59
+
60
+ - Translate stdin to stdout:
61
+
62
+ ```
63
+ file(path: "-", mode: "r", type: "text") |
64
+ deepl(src: "de", dst: "en-US") |
65
+ file(path: "-", mode: "w", type: "text")
66
+ ```
67
+
68
+ - Pass-through audio from microphone to speaker and in parallel record it to file:
69
+
70
+ ```
71
+ device(device: "wasapi:VoiceMeeter Out B1", mode: "r") | {
72
+ file(path: "capture.pcm", mode: "w", type: "audio"),
73
+ device(device: "wasapi:VoiceMeeter VAIO3 Input", mode: "w")
74
+ }
75
+ ```
76
+
77
+ - Real-time translation from german to english, including capturing of all inputs and outputs:
78
+
79
+ ```
80
+ device(device: "wasapi:VoiceMeeter Out B1", mode: "r") | {
81
+ file(path: "translation-audio-de.pcm", mode: "w", type: "audio"),
82
+ deepgram(language: "de") |
83
+ file(path: "translation-text-de.txt", mode: "w", type: "text")
84
+ } | {
85
+ deepl(src: "de", dst: "en-US") |
86
+ file(path: "translation-text-en.txt", mode: "w", type: "text")
87
+ } | {
88
+ elevenlabs(language: "en") | {
89
+ file(path: "translation-audio-en.pcm", mode: "w", type: "audio"),
90
+ device(device: "wasapi:VoiceMeeter VAIO3 Input", mode: "w")
91
+ }
92
+ }
93
+ ```
94
+
95
+ Processing Node Types
96
+ ---------------------
97
+
98
+ Currently **SpeechFlow** provides the following processing nodes:
99
+
100
+ - Node: **file**<br/>
101
+ Purpose: **File and StdIO source/sink**<br/>
102
+ Example: `file(path: "capture.pcm", mode: "w", type: "audio")`
103
+
104
+ | Port | Payload |
105
+ | ------- | ----------- |
106
+ | input | text, audio |
107
+ | output | text, audio |
108
+
109
+ | Parameter | Position | Default | Requirement |
110
+ | ---------- | --------- | -------- | --------------------- |
111
+ | **path** | 0 | *none* | *none* |
112
+ | **mode** | 1 | "r" | `/^(?:r\|w\|rw)$/` |
113
+ | **type** | 2 | "audio" | `/^(?:audio\|text)$/` |
114
+
115
+ - Node: **websocket**<br/>
116
+ Purpose: **WebSocket source/sink**<br/>
117
+ Example: `websocket(connect: "ws://127.0.0.1:12345". type: "text")`
118
+
119
+ | Port | Payload |
120
+ | ------- | ----------- |
121
+ | input | text, audio |
122
+ | output | text, audio |
123
+
124
+ | Parameter | Position | Default | Requirement |
125
+ | ----------- | --------- | -------- | --------------------- |
126
+ | **listen** | *none* | *none* | `/^(?:\|ws:\/\/(.+?):(\d+))$/` |
127
+ | **connect** | *none* | *none* | `/^(?:\|ws:\/\/(.+?):(\d+)(?:\/.*)?)$/` |
128
+ | **type** | *none* | "audio" | `/^(?:audio\|text)$/` |
129
+
130
+ - Node: **device**<br/>
131
+ Purpose: **Microphone/speaker device source/sink**<br/>
132
+ Example: `device(device: "wasapi:VoiceMeeter Out B1", mode: "r")`
133
+
134
+ | Port | Payload |
135
+ | ------- | ----------- |
136
+ | input | audio |
137
+ | output | audio |
138
+
139
+ | Parameter | Position | Default | Requirement |
140
+ | ----------- | --------- | -------- | ------------------ |
141
+ | **device** | 0 | *none* | `/^(.+?):(.+)$/` |
142
+ | **mode** | 1 | "rw" | `/^(?:r\|w\|rw)$/` |
143
+
144
+ - Node: **deepgram**<br/>
145
+ Purpose: **Deepgram Speech-to-Text conversion**<br/>
146
+ Example: `deepgram(language: "de")`<br/>
147
+ Notice: this node requires an API key!
148
+
149
+ | Port | Payload |
150
+ | ------- | ----------- |
151
+ | input | audio |
152
+ | output | text |
153
+
154
+ | Parameter | Position | Default | Requirement |
155
+ | ------------ | --------- | -------- | ------------------ |
156
+ | **key** | *none* | env.SPEECHFLOW\_KEY\_DEEPGRAM | *none* |
157
+ | **model** | 0 | "nova-2" | *none* |
158
+ | **version** | 1 | "latest" | *none* |
159
+ | **language** | 2 | "de" | *none* |
160
+
161
+ - Node: **deepl**<br/>
162
+ Purpose: **DeepL Text-to-Text translation**<br/>
163
+ Example: `deepl(src: "de", dst: "en-US")`<br/>
164
+ Notice: this node requires an API key!
165
+
166
+ | Port | Payload |
167
+ | ------- | ----------- |
168
+ | input | text |
169
+ | output | text |
170
+
171
+ | Parameter | Position | Default | Requirement |
172
+ | ------------ | --------- | -------- | ------------------ |
173
+ | **key** | *none* | env.SPEECHFLOW\_KEY\_DEEPL | *none* |
174
+ | **src** | 0 | "de" | `/^(?:de\|en-US)$/` |
175
+ | **dst** | 1 | "en-US" | `/^(?:de\|en-US)$/` |
176
+
177
+ - Node: **elevenlabs**<br/>
178
+ Purpose: **ElevenLabs Text-to-Speech conversion**<br/>
179
+ Example: `elevenlabs(language: "en")`<br/>
180
+ Notice: this node requires an API key!
181
+
182
+ | Port | Payload |
183
+ | ------- | ----------- |
184
+ | input | text |
185
+ | output | audio |
186
+
187
+ | Parameter | Position | Default | Requirement |
188
+ | ------------ | --------- | -------- | ------------------ |
189
+ | **key** | *none* | env.SPEECHFLOW\_KEY\_ELEVENLABS | *none* |
190
+ | **voice** | 0 | "Brian" | *none* |
191
+ | **language** | 1 | "de" | *none* |
192
+
193
+ Graph Expression Language
194
+ -------------------------
195
+
196
+ The **SpeechFlow** graph expression language is based on
197
+ [**FlowLink**](https://npmjs.org/flowlink), which itself has a language
198
+ following the following BNF-style grammar:
199
+
200
+ ```
201
+ expr ::= parallel
202
+ | sequential
203
+ | node
204
+ | group
205
+ parallel ::= sequential ("," sequential)+
206
+ sequential ::= node ("|" node)+
207
+ node ::= id ("(" (param ("," param)*)? ")")?
208
+ param ::= array | object | variable | template | string | number | value
209
+ group ::= "{" expr "}"
210
+ id ::= /[a-zA-Z_][a-zA-Z0-9_-]*/
211
+ variable ::= id
212
+ array ::= "[" (param ("," param)*)? "]"
213
+ object ::= "{" (id ":" param ("," id ":" param)*)? "}"
214
+ template ::= "`" ("${" variable "}" / ("\\`"|.))* "`"
215
+ string ::= /"(\\"|.)*"/
216
+ | /'(\\'|.)*'/
217
+ number ::= /[+-]?/ number-value
218
+ number-value ::= "0b" /[01]+/
219
+ | "0o" /[0-7]+/
220
+ | "0x" /[0-9a-fA-F]+/
221
+ | /[0-9]*\.[0-9]+([eE][+-]?[0-9]+)?/
222
+ | /[0-9]+/
223
+ value ::= "true" | "false" | "null" | "NaN" | "undefined"
224
+ ```
225
+
226
+ History
227
+ -------
228
+
229
+ **Speechflow**, as a technical cut-through, was initially created in
230
+ March 2024 for use in the msg Filmstudio context. It was later refined
231
+ into a more complete toolkit in April 2025 and this way the first time
232
+ could be used in production.
233
+
234
+ Copyright & License
235
+ -------------------
236
+
237
+ Copyright &copy; 2024-2025 [Dr. Ralf S. Engelschall](mailto:rse@engelschall.com)<br/>
238
+ Licensed under [GPL 3.0](https://spdx.org/licenses/GPL-3.0-only)
239
+
@@ -0,0 +1,135 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
40
+ var __importDefault = (this && this.__importDefault) || function (mod) {
41
+ return (mod && mod.__esModule) ? mod : { "default": mod };
42
+ };
43
+ Object.defineProperty(exports, "__esModule", { value: true });
44
+ const node_events_1 = require("node:events");
45
+ const node_stream_1 = __importDefault(require("node:stream"));
46
+ const Deepgram = __importStar(require("@deepgram/sdk"));
47
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
48
+ class SpeechFlowNodeDevice extends speechflow_node_1.default {
49
+ dg = null;
50
+ constructor(id, opts, args) {
51
+ super(id, opts, args);
52
+ this.configure({
53
+ key: { type: "string", val: process.env.SPEECHFLOW_KEY_DEEPGRAM },
54
+ model: { type: "string", val: "nova-2", pos: 0 }, /* FIXME: nova-3 multiligual */
55
+ version: { type: "string", val: "latest", pos: 1 },
56
+ language: { type: "string", val: "de", pos: 2 }
57
+ });
58
+ }
59
+ async open() {
60
+ this.input = "audio";
61
+ this.output = "text";
62
+ this.stream = null;
63
+ /* sanity check situation */
64
+ if (this.config.audioBitDepth !== 16 || !this.config.audioLittleEndian)
65
+ throw new Error("Deepgram node currently supports PCM-S16LE audio only");
66
+ /* connect to Deepgram API */
67
+ const queue = new node_events_1.EventEmitter();
68
+ const deepgram = Deepgram.createClient(this.params.key);
69
+ this.dg = deepgram.listen.live({
70
+ model: this.params.model,
71
+ version: this.params.version,
72
+ language: this.params.language,
73
+ channels: this.config.audioChannels,
74
+ sample_rate: this.config.audioSampleRate,
75
+ encoding: "linear16",
76
+ multichannel: false,
77
+ // endpointing: false, /* FIXME: ? */
78
+ interim_results: false,
79
+ smart_format: true,
80
+ punctuate: true,
81
+ filler_words: true,
82
+ diarize: true,
83
+ numerals: true,
84
+ paragraphs: true,
85
+ profanity_filter: true,
86
+ utterances: false,
87
+ });
88
+ await new Promise((resolve) => {
89
+ this.dg.on(Deepgram.LiveTranscriptionEvents.Open, () => {
90
+ this.log("info", "Deepgram: connection open");
91
+ resolve(true);
92
+ });
93
+ });
94
+ /* hooks onto Deepgram API events */
95
+ this.dg.on(Deepgram.LiveTranscriptionEvents.Close, () => {
96
+ this.log("info", "Deepgram: connection close");
97
+ });
98
+ this.dg.on(Deepgram.LiveTranscriptionEvents.Transcript, async (data) => {
99
+ const text = data.channel?.alternatives[0].transcript ?? "";
100
+ if (text === "")
101
+ return;
102
+ queue.emit("text", text);
103
+ });
104
+ this.dg.on(Deepgram.LiveTranscriptionEvents.Error, (error) => {
105
+ this.log("error", `Deepgram: ${error}`);
106
+ });
107
+ /* provide Duplex stream and internally attach to Deepgram API */
108
+ const dg = this.dg;
109
+ this.stream = new node_stream_1.default.Duplex({
110
+ write(chunk, encoding, callback) {
111
+ const data = chunk.buffer.slice(chunk.byteOffset, chunk.byteOffset + chunk.byteLength);
112
+ if (data.byteLength === 0)
113
+ queue.emit("text", "");
114
+ else
115
+ dg.send(data);
116
+ callback();
117
+ },
118
+ read(size) {
119
+ queue.once("text", (text) => {
120
+ if (text !== "")
121
+ this.push(text);
122
+ });
123
+ }
124
+ });
125
+ }
126
+ async close() {
127
+ if (this.stream !== null) {
128
+ this.stream.destroy();
129
+ this.stream = null;
130
+ }
131
+ if (this.dg !== null)
132
+ this.dg.requestClose();
133
+ }
134
+ }
135
+ exports.default = SpeechFlowNodeDevice;
@@ -0,0 +1,105 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
40
+ var __importDefault = (this && this.__importDefault) || function (mod) {
41
+ return (mod && mod.__esModule) ? mod : { "default": mod };
42
+ };
43
+ Object.defineProperty(exports, "__esModule", { value: true });
44
+ const node_stream_1 = __importDefault(require("node:stream"));
45
+ const node_events_1 = require("node:events");
46
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
47
+ const DeepL = __importStar(require("deepl-node"));
48
+ class SpeechFlowNodeDeepL extends speechflow_node_1.default {
49
+ translator = null;
50
+ constructor(id, opts, args) {
51
+ super(id, opts, args);
52
+ this.input = "text";
53
+ this.output = "text";
54
+ this.stream = null;
55
+ this.configure({
56
+ key: { type: "string", val: process.env.SPEECHFLOW_KEY_DEEPL },
57
+ src: { type: "string", pos: 0, val: "de", match: /^(?:de|en-US)$/ },
58
+ dst: { type: "string", pos: 1, val: "en-US", match: /^(?:de|en-US)$/ }
59
+ });
60
+ }
61
+ async open() {
62
+ /* instantiate DeepL API SDK */
63
+ this.translator = new DeepL.Translator(this.params.key);
64
+ /* provide text-to-text translation */
65
+ const translate = async (text) => {
66
+ const result = await this.translator.translateText(text, this.params.src, this.params.dst, {
67
+ splitSentences: "off"
68
+ });
69
+ return (result?.text ?? text);
70
+ };
71
+ /* establish a duplex stream and connect it to the translation */
72
+ const queue = new node_events_1.EventEmitter();
73
+ this.stream = new node_stream_1.default.Duplex({
74
+ write(chunk, encoding, callback) {
75
+ const data = chunk.toString();
76
+ if (data === "") {
77
+ queue.emit("result", "");
78
+ callback();
79
+ }
80
+ else {
81
+ translate(data).then((result) => {
82
+ queue.emit("result", result);
83
+ callback();
84
+ }).catch((err) => {
85
+ callback(err);
86
+ });
87
+ }
88
+ },
89
+ read(size) {
90
+ queue.once("result", (result) => {
91
+ this.push(result);
92
+ });
93
+ }
94
+ });
95
+ }
96
+ async close() {
97
+ if (this.stream !== null) {
98
+ this.stream.destroy();
99
+ this.stream = null;
100
+ }
101
+ if (this.translator !== null)
102
+ this.translator = null;
103
+ }
104
+ }
105
+ exports.default = SpeechFlowNodeDeepL;
@@ -0,0 +1,95 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __importDefault = (this && this.__importDefault) || function (mod) {
8
+ return (mod && mod.__esModule) ? mod : { "default": mod };
9
+ };
10
+ Object.defineProperty(exports, "__esModule", { value: true });
11
+ const naudiodon_1 = __importDefault(require("@gpeng/naudiodon"));
12
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
13
+ const speechflow_util_1 = __importDefault(require("./speechflow-util"));
14
+ class SpeechFlowNodeDevice extends speechflow_node_1.default {
15
+ io = null;
16
+ constructor(id, opts, args) {
17
+ super(id, opts, args);
18
+ this.configure({
19
+ device: { type: "string", pos: 0, match: /^(.+?):(.+)$/ },
20
+ mode: { type: "string", pos: 1, val: "rw", match: /^(?:r|w|rw)$/ }
21
+ });
22
+ }
23
+ async open() {
24
+ /* determine device */
25
+ const device = speechflow_util_1.default.audioDeviceFromURL(this.params.mode, this.params.device);
26
+ /* sanity check sample rate compatibility
27
+ (we still do not resample in input/output for simplification reasons) */
28
+ if (device.defaultSampleRate !== this.config.audioSampleRate)
29
+ throw new Error(`device audio sample rate ${device.defaultSampleRate} is ` +
30
+ `incompatible with required sample rate ${this.config.audioSampleRate}`);
31
+ /* establish device connection
32
+ Notice: "naudion" actually implements Stream.{Readable,Writable,Duplex}, but
33
+ declares just its sub-interface NodeJS.{Readable,Writable,Duplex}Stream,
34
+ so it is correct to cast it back to Stream.{Readable,Writable,Duplex} */
35
+ if (device.maxInputChannels > 0 && device.maxOutputChannels > 0) {
36
+ this.log("info", `resolved "${this.params.device}" to duplex device "${device.id}"`);
37
+ this.input = "audio";
38
+ this.output = "audio";
39
+ this.io = naudiodon_1.default.AudioIO({
40
+ inOptions: {
41
+ deviceId: device.id,
42
+ channelCount: this.config.audioChannels,
43
+ sampleRate: this.config.audioSampleRate,
44
+ sampleFormat: this.config.audioBitDepth
45
+ },
46
+ outOptions: {
47
+ deviceId: device.id,
48
+ channelCount: this.config.audioChannels,
49
+ sampleRate: this.config.audioSampleRate,
50
+ sampleFormat: this.config.audioBitDepth
51
+ }
52
+ });
53
+ this.stream = this.io;
54
+ }
55
+ else if (device.maxInputChannels > 0 && device.maxOutputChannels === 0) {
56
+ this.log("info", `resolved "${this.params.device}" to input device "${device.id}"`);
57
+ this.input = "none";
58
+ this.output = "audio";
59
+ this.io = naudiodon_1.default.AudioIO({
60
+ inOptions: {
61
+ deviceId: device.id,
62
+ channelCount: this.config.audioChannels,
63
+ sampleRate: this.config.audioSampleRate,
64
+ sampleFormat: this.config.audioBitDepth
65
+ }
66
+ });
67
+ this.stream = this.io;
68
+ }
69
+ else if (device.maxInputChannels === 0 && device.maxOutputChannels > 0) {
70
+ this.log("info", `resolved "${this.params.device}" to output device "${device.id}"`);
71
+ this.input = "audio";
72
+ this.output = "none";
73
+ this.io = naudiodon_1.default.AudioIO({
74
+ outOptions: {
75
+ deviceId: device.id,
76
+ channelCount: this.config.audioChannels,
77
+ sampleRate: this.config.audioSampleRate,
78
+ sampleFormat: this.config.audioBitDepth
79
+ }
80
+ });
81
+ this.stream = this.io;
82
+ }
83
+ else
84
+ throw new Error(`device "${device.id}" does not have any input or output channels`);
85
+ /* pass-through errors */
86
+ this.io.on("error", (err) => {
87
+ this.emit("error", err);
88
+ });
89
+ }
90
+ async close() {
91
+ if (this.io !== null)
92
+ this.io.quit();
93
+ }
94
+ }
95
+ exports.default = SpeechFlowNodeDevice;