speechflow 0.9.4 → 0.9.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/CHANGELOG.md +19 -0
  2. package/README.md +227 -54
  3. package/dst/speechflow-node-a2a-ffmpeg.d.ts +13 -0
  4. package/dst/speechflow-node-a2a-ffmpeg.js +152 -0
  5. package/dst/speechflow-node-a2a-wav.d.ts +11 -0
  6. package/dst/speechflow-node-a2a-wav.js +170 -0
  7. package/dst/speechflow-node-a2t-deepgram.d.ts +12 -0
  8. package/dst/speechflow-node-a2t-deepgram.js +220 -0
  9. package/dst/speechflow-node-deepgram.d.ts +3 -1
  10. package/dst/speechflow-node-deepgram.js +86 -22
  11. package/dst/speechflow-node-deepl.d.ts +3 -1
  12. package/dst/speechflow-node-deepl.js +25 -20
  13. package/dst/speechflow-node-device.d.ts +3 -1
  14. package/dst/speechflow-node-device.js +53 -2
  15. package/dst/speechflow-node-elevenlabs.d.ts +4 -1
  16. package/dst/speechflow-node-elevenlabs.js +88 -49
  17. package/dst/speechflow-node-ffmpeg.d.ts +3 -1
  18. package/dst/speechflow-node-ffmpeg.js +42 -4
  19. package/dst/speechflow-node-file.d.ts +3 -1
  20. package/dst/speechflow-node-file.js +84 -13
  21. package/dst/speechflow-node-format.d.ts +11 -0
  22. package/dst/speechflow-node-format.js +80 -0
  23. package/dst/speechflow-node-gemma.d.ts +3 -1
  24. package/dst/speechflow-node-gemma.js +84 -23
  25. package/dst/speechflow-node-mqtt.d.ts +13 -0
  26. package/dst/speechflow-node-mqtt.js +181 -0
  27. package/dst/speechflow-node-opus.d.ts +12 -0
  28. package/dst/speechflow-node-opus.js +135 -0
  29. package/dst/speechflow-node-subtitle.d.ts +12 -0
  30. package/dst/speechflow-node-subtitle.js +96 -0
  31. package/dst/speechflow-node-t2a-elevenlabs.d.ts +13 -0
  32. package/dst/speechflow-node-t2a-elevenlabs.js +182 -0
  33. package/dst/speechflow-node-t2t-deepl.d.ts +12 -0
  34. package/dst/speechflow-node-t2t-deepl.js +133 -0
  35. package/dst/speechflow-node-t2t-format.d.ts +11 -0
  36. package/dst/speechflow-node-t2t-format.js +80 -0
  37. package/dst/speechflow-node-t2t-gemma.d.ts +13 -0
  38. package/dst/speechflow-node-t2t-gemma.js +213 -0
  39. package/dst/speechflow-node-t2t-opus.d.ts +12 -0
  40. package/dst/speechflow-node-t2t-opus.js +135 -0
  41. package/dst/speechflow-node-t2t-subtitle.d.ts +12 -0
  42. package/dst/speechflow-node-t2t-subtitle.js +96 -0
  43. package/dst/speechflow-node-trace.d.ts +11 -0
  44. package/dst/speechflow-node-trace.js +88 -0
  45. package/dst/speechflow-node-wav.d.ts +11 -0
  46. package/dst/speechflow-node-wav.js +170 -0
  47. package/dst/speechflow-node-websocket.d.ts +3 -1
  48. package/dst/speechflow-node-websocket.js +149 -49
  49. package/dst/speechflow-node-whisper-common.d.ts +34 -0
  50. package/dst/speechflow-node-whisper-common.js +7 -0
  51. package/dst/speechflow-node-whisper-ggml.d.ts +1 -0
  52. package/dst/speechflow-node-whisper-ggml.js +97 -0
  53. package/dst/speechflow-node-whisper-onnx.d.ts +1 -0
  54. package/dst/speechflow-node-whisper-onnx.js +131 -0
  55. package/dst/speechflow-node-whisper-worker-ggml.d.ts +1 -0
  56. package/dst/speechflow-node-whisper-worker-ggml.js +97 -0
  57. package/dst/speechflow-node-whisper-worker-onnx.d.ts +1 -0
  58. package/dst/speechflow-node-whisper-worker-onnx.js +131 -0
  59. package/dst/speechflow-node-whisper-worker.d.ts +1 -0
  60. package/dst/speechflow-node-whisper-worker.js +116 -0
  61. package/dst/speechflow-node-whisper-worker2.d.ts +1 -0
  62. package/dst/speechflow-node-whisper-worker2.js +82 -0
  63. package/dst/speechflow-node-whisper.d.ts +19 -0
  64. package/dst/speechflow-node-whisper.js +604 -0
  65. package/dst/speechflow-node-x2x-trace.d.ts +11 -0
  66. package/dst/speechflow-node-x2x-trace.js +88 -0
  67. package/dst/speechflow-node-xio-device.d.ts +13 -0
  68. package/dst/speechflow-node-xio-device.js +205 -0
  69. package/dst/speechflow-node-xio-file.d.ts +11 -0
  70. package/dst/speechflow-node-xio-file.js +176 -0
  71. package/dst/speechflow-node-xio-mqtt.d.ts +13 -0
  72. package/dst/speechflow-node-xio-mqtt.js +181 -0
  73. package/dst/speechflow-node-xio-websocket.d.ts +13 -0
  74. package/dst/speechflow-node-xio-websocket.js +275 -0
  75. package/dst/speechflow-node.d.ts +25 -7
  76. package/dst/speechflow-node.js +74 -9
  77. package/dst/speechflow-utils.d.ts +23 -0
  78. package/dst/speechflow-utils.js +194 -0
  79. package/dst/speechflow.js +146 -43
  80. package/etc/biome.jsonc +12 -4
  81. package/etc/stx.conf +65 -0
  82. package/package.d/@ericedouard+vad-node-realtime+0.2.0.patch +18 -0
  83. package/package.json +49 -31
  84. package/sample.yaml +61 -23
  85. package/src/lib.d.ts +6 -1
  86. package/src/{speechflow-node-ffmpeg.ts → speechflow-node-a2a-ffmpeg.ts} +10 -4
  87. package/src/speechflow-node-a2a-wav.ts +143 -0
  88. package/src/speechflow-node-a2t-deepgram.ts +199 -0
  89. package/src/speechflow-node-t2a-elevenlabs.ts +160 -0
  90. package/src/{speechflow-node-deepl.ts → speechflow-node-t2t-deepl.ts} +36 -25
  91. package/src/speechflow-node-t2t-format.ts +85 -0
  92. package/src/{speechflow-node-gemma.ts → speechflow-node-t2t-gemma.ts} +89 -25
  93. package/src/speechflow-node-t2t-opus.ts +111 -0
  94. package/src/speechflow-node-t2t-subtitle.ts +101 -0
  95. package/src/speechflow-node-x2x-trace.ts +92 -0
  96. package/src/{speechflow-node-device.ts → speechflow-node-xio-device.ts} +25 -3
  97. package/src/speechflow-node-xio-file.ts +153 -0
  98. package/src/speechflow-node-xio-mqtt.ts +154 -0
  99. package/src/speechflow-node-xio-websocket.ts +248 -0
  100. package/src/speechflow-node.ts +78 -13
  101. package/src/speechflow-utils.ts +212 -0
  102. package/src/speechflow.ts +150 -43
  103. package/etc/nps.yaml +0 -40
  104. package/src/speechflow-node-deepgram.ts +0 -133
  105. package/src/speechflow-node-elevenlabs.ts +0 -116
  106. package/src/speechflow-node-file.ts +0 -108
  107. package/src/speechflow-node-websocket.ts +0 -179
@@ -0,0 +1,88 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __importDefault = (this && this.__importDefault) || function (mod) {
8
+ return (mod && mod.__esModule) ? mod : { "default": mod };
9
+ };
10
+ Object.defineProperty(exports, "__esModule", { value: true });
11
+ /* standard dependencies */
12
+ const node_stream_1 = __importDefault(require("node:stream"));
13
+ /* internal dependencies */
14
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
15
+ /* SpeechFlow node for data flow tracing */
16
+ class SpeechFlowNodeTrace extends speechflow_node_1.default {
17
+ /* declare official node name */
18
+ static name = "trace";
19
+ /* construct node */
20
+ constructor(id, cfg, opts, args) {
21
+ super(id, cfg, opts, args);
22
+ /* declare node configuration parameters */
23
+ this.configure({
24
+ type: { type: "string", pos: 0, val: "audio", match: /^(?:audio|text)$/ },
25
+ name: { type: "string", pos: 1 }
26
+ });
27
+ /* declare node input/output format */
28
+ this.input = this.params.type;
29
+ this.output = this.params.type;
30
+ }
31
+ /* open node */
32
+ async open() {
33
+ /* wrapper for local logging */
34
+ const log = (level, msg) => {
35
+ if (this.params.name !== undefined)
36
+ this.log(level, `[${this.params.name}]: ${msg}`);
37
+ else
38
+ this.log(level, msg);
39
+ };
40
+ /* provide Duplex stream and internally attach to Deepgram API */
41
+ const type = this.params.type;
42
+ this.stream = new node_stream_1.default.Transform({
43
+ writableObjectMode: true,
44
+ readableObjectMode: true,
45
+ decodeStrings: false,
46
+ transform(chunk, encoding, callback) {
47
+ let error;
48
+ const fmt = (t) => t.toFormat("hh:mm:ss.SSS");
49
+ if (Buffer.isBuffer(chunk.payload)) {
50
+ if (type === "audio")
51
+ log("info", `writing ${type} chunk: start=${fmt(chunk.timestampStart)} ` +
52
+ `end=${fmt(chunk.timestampEnd)} kind=${chunk.kind} type=${chunk.type} ` +
53
+ `payload-type=Buffer payload-bytes=${chunk.payload.byteLength}`);
54
+ else
55
+ error = new Error(`writing ${type} chunk: seen Buffer instead of String chunk type`);
56
+ }
57
+ else {
58
+ if (type === "text")
59
+ log("info", `writing ${type} chunk: start=${fmt(chunk.timestampStart)} ` +
60
+ `end=${fmt(chunk.timestampEnd)} kind=${chunk.kind} type=${chunk.type}` +
61
+ `payload-type=String payload-length=${chunk.payload.length} ` +
62
+ `payload-encoding=${encoding} payload-content="${chunk.payload.toString()}"`);
63
+ else
64
+ error = new Error(`writing ${type} chunk: seen String instead of Buffer chunk type`);
65
+ }
66
+ if (error !== undefined)
67
+ callback(error);
68
+ else {
69
+ this.push(chunk, encoding);
70
+ callback();
71
+ }
72
+ },
73
+ final(callback) {
74
+ this.push(null);
75
+ callback();
76
+ }
77
+ });
78
+ }
79
+ /* close node */
80
+ async close() {
81
+ /* close stream */
82
+ if (this.stream !== null) {
83
+ this.stream.destroy();
84
+ this.stream = null;
85
+ }
86
+ }
87
+ }
88
+ exports.default = SpeechFlowNodeTrace;
@@ -0,0 +1,13 @@
1
+ import SpeechFlowNode from "./speechflow-node";
2
+ export default class SpeechFlowNodeDevice extends SpeechFlowNode {
3
+ static name: string;
4
+ private io;
5
+ constructor(id: string, cfg: {
6
+ [id: string]: any;
7
+ }, opts: {
8
+ [id: string]: any;
9
+ }, args: any[]);
10
+ private audioDeviceFromURL;
11
+ open(): Promise<void>;
12
+ close(): Promise<void>;
13
+ }
@@ -0,0 +1,205 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
40
+ var __importDefault = (this && this.__importDefault) || function (mod) {
41
+ return (mod && mod.__esModule) ? mod : { "default": mod };
42
+ };
43
+ Object.defineProperty(exports, "__esModule", { value: true });
44
+ /* standard dependencies */
45
+ const node_stream_1 = __importDefault(require("node:stream"));
46
+ /* external dependencies */
47
+ const naudiodon_1 = __importDefault(require("@gpeng/naudiodon"));
48
+ /* internal dependencies */
49
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
50
+ const utils = __importStar(require("./speechflow-utils"));
51
+ /* SpeechFlow node for device access */
52
+ class SpeechFlowNodeDevice extends speechflow_node_1.default {
53
+ /* declare official node name */
54
+ static name = "device";
55
+ /* internal state */
56
+ io = null;
57
+ /* construct node */
58
+ constructor(id, cfg, opts, args) {
59
+ super(id, cfg, opts, args);
60
+ /* declare node configuration parameters */
61
+ this.configure({
62
+ device: { type: "string", pos: 0, match: /^(.+?):(.+)$/ },
63
+ mode: { type: "string", pos: 1, val: "rw", match: /^(?:r|w|rw)$/ }
64
+ });
65
+ /* declare node input/output format */
66
+ if (this.params.mode === "rw") {
67
+ this.input = "audio";
68
+ this.output = "audio";
69
+ }
70
+ else if (this.params.mode === "r") {
71
+ this.input = "none";
72
+ this.output = "audio";
73
+ }
74
+ else if (this.params.mode === "w") {
75
+ this.input = "audio";
76
+ this.output = "none";
77
+ }
78
+ }
79
+ /* INTERNAL: utility function for finding audio device by pseudo-URL notation */
80
+ audioDeviceFromURL(mode, url) {
81
+ /* parse URL */
82
+ const m = url.match(/^(.+?):(.+)$/);
83
+ if (m === null)
84
+ throw new Error(`invalid audio device URL "${url}"`);
85
+ const [, type, name] = m;
86
+ /* determine audio API */
87
+ const apis = naudiodon_1.default.getHostAPIs();
88
+ const api = apis.HostAPIs.find((api) => api.type.toLowerCase() === type.toLowerCase());
89
+ if (!api)
90
+ throw new Error(`invalid audio API type "${type}"`);
91
+ /* determine device of audio API */
92
+ const devices = naudiodon_1.default.getDevices();
93
+ for (const device of devices)
94
+ this.log("info", `found audio device "${device.name}" ` +
95
+ `(inputs: ${device.maxInputChannels}, outputs: ${device.maxOutputChannels}`);
96
+ const device = devices.find((device) => {
97
+ return (((mode === "r" && device.maxInputChannels > 0)
98
+ || (mode === "w" && device.maxOutputChannels > 0)
99
+ || (mode === "rw" && device.maxInputChannels > 0 && device.maxOutputChannels > 0)
100
+ || (mode === "any" && (device.maxInputChannels > 0 || device.maxOutputChannels > 0)))
101
+ && device.name.match(name)
102
+ && device.hostAPIName === api.name);
103
+ });
104
+ if (!device)
105
+ throw new Error(`invalid audio device "${name}" (of audio API type "${type}")`);
106
+ return device;
107
+ }
108
+ /* open node */
109
+ async open() {
110
+ /* determine device */
111
+ const device = this.audioDeviceFromURL(this.params.mode, this.params.device);
112
+ /* sanity check sample rate compatibility
113
+ (we still do not resample in input/output for simplification reasons) */
114
+ if (device.defaultSampleRate !== this.config.audioSampleRate)
115
+ throw new Error(`audio device sample rate ${device.defaultSampleRate} is ` +
116
+ `incompatible with required sample rate ${this.config.audioSampleRate}`);
117
+ /* establish device connection
118
+ Notice: "naudion" actually implements Stream.{Readable,Writable,Duplex}, but
119
+ declares just its sub-interface NodeJS.{Readable,Writable,Duplex}Stream,
120
+ so it is correct to cast it back to Stream.{Readable,Writable,Duplex} */
121
+ /* FIXME: the underlying PortAudio outputs verbose/debugging messages */
122
+ if (this.params.mode === "rw") {
123
+ /* input/output device */
124
+ if (device.maxInputChannels === 0)
125
+ throw new Error(`device "${device.id}" does not have any input channels (required by read/write mode)`);
126
+ if (device.maxOutputChannels === 0)
127
+ throw new Error(`device "${device.id}" does not have any output channels (required by read/write mode)`);
128
+ this.log("info", `resolved "${this.params.device}" to duplex device "${device.id}"`);
129
+ this.io = naudiodon_1.default.AudioIO({
130
+ inOptions: {
131
+ deviceId: device.id,
132
+ channelCount: this.config.audioChannels,
133
+ sampleRate: this.config.audioSampleRate,
134
+ sampleFormat: this.config.audioBitDepth
135
+ },
136
+ outOptions: {
137
+ deviceId: device.id,
138
+ channelCount: this.config.audioChannels,
139
+ sampleRate: this.config.audioSampleRate,
140
+ sampleFormat: this.config.audioBitDepth
141
+ }
142
+ });
143
+ this.stream = this.io;
144
+ /* convert regular stream into object-mode stream */
145
+ const wrapper1 = utils.createTransformStreamForWritableSide();
146
+ const wrapper2 = utils.createTransformStreamForReadableSide("audio", () => this.timeZero);
147
+ this.stream = node_stream_1.default.compose(wrapper1, this.stream, wrapper2);
148
+ }
149
+ else if (this.params.mode === "r") {
150
+ /* input device */
151
+ if (device.maxInputChannels === 0)
152
+ throw new Error(`device "${device.id}" does not have any input channels (required by read mode)`);
153
+ this.log("info", `resolved "${this.params.device}" to input device "${device.id}"`);
154
+ this.io = naudiodon_1.default.AudioIO({
155
+ inOptions: {
156
+ deviceId: device.id,
157
+ channelCount: this.config.audioChannels,
158
+ sampleRate: this.config.audioSampleRate,
159
+ sampleFormat: this.config.audioBitDepth
160
+ }
161
+ });
162
+ this.stream = this.io;
163
+ /* convert regular stream into object-mode stream */
164
+ const wrapper = utils.createTransformStreamForReadableSide("audio", () => this.timeZero);
165
+ this.stream.pipe(wrapper);
166
+ this.stream = wrapper;
167
+ }
168
+ else if (this.params.mode === "w") {
169
+ /* output device */
170
+ if (device.maxOutputChannels === 0)
171
+ throw new Error(`device "${device.id}" does not have any output channels (required by write mode)`);
172
+ this.log("info", `resolved "${this.params.device}" to output device "${device.id}"`);
173
+ this.io = naudiodon_1.default.AudioIO({
174
+ outOptions: {
175
+ deviceId: device.id,
176
+ channelCount: this.config.audioChannels,
177
+ sampleRate: this.config.audioSampleRate,
178
+ sampleFormat: this.config.audioBitDepth
179
+ }
180
+ });
181
+ this.stream = this.io;
182
+ /* convert regular stream into object-mode stream */
183
+ const wrapper = utils.createTransformStreamForWritableSide();
184
+ wrapper.pipe(this.stream);
185
+ this.stream = wrapper;
186
+ }
187
+ else
188
+ throw new Error(`device "${device.id}" does not have any input or output channels`);
189
+ /* pass-through PortAudio errors */
190
+ this.io.on("error", (err) => {
191
+ this.emit("error", err);
192
+ });
193
+ /* start PortAudio */
194
+ this.io.start();
195
+ }
196
+ /* close node */
197
+ async close() {
198
+ /* shutdown PortAudio */
199
+ if (this.io !== null) {
200
+ this.io.quit();
201
+ this.io = null;
202
+ }
203
+ }
204
+ }
205
+ exports.default = SpeechFlowNodeDevice;
@@ -0,0 +1,11 @@
1
+ import SpeechFlowNode from "./speechflow-node";
2
+ export default class SpeechFlowNodeFile extends SpeechFlowNode {
3
+ static name: string;
4
+ constructor(id: string, cfg: {
5
+ [id: string]: any;
6
+ }, opts: {
7
+ [id: string]: any;
8
+ }, args: any[]);
9
+ open(): Promise<void>;
10
+ close(): Promise<void>;
11
+ }
@@ -0,0 +1,176 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
40
+ var __importDefault = (this && this.__importDefault) || function (mod) {
41
+ return (mod && mod.__esModule) ? mod : { "default": mod };
42
+ };
43
+ Object.defineProperty(exports, "__esModule", { value: true });
44
+ /* standard dependencies */
45
+ const node_fs_1 = __importDefault(require("node:fs"));
46
+ const node_stream_1 = __importDefault(require("node:stream"));
47
+ /* internal dependencies */
48
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
49
+ const utils = __importStar(require("./speechflow-utils"));
50
+ /* SpeechFlow node for file access */
51
+ class SpeechFlowNodeFile extends speechflow_node_1.default {
52
+ /* declare official node name */
53
+ static name = "file";
54
+ /* construct node */
55
+ constructor(id, cfg, opts, args) {
56
+ super(id, cfg, opts, args);
57
+ /* declare node configuration parameters */
58
+ this.configure({
59
+ path: { type: "string", pos: 0 },
60
+ mode: { type: "string", pos: 1, val: "r", match: /^(?:r|w|rw)$/ },
61
+ type: { type: "string", pos: 2, val: "audio", match: /^(?:audio|text)$/ }
62
+ });
63
+ /* declare node input/output format */
64
+ if (this.params.mode === "rw") {
65
+ this.input = this.params.type;
66
+ this.output = this.params.type;
67
+ }
68
+ else if (this.params.mode === "r") {
69
+ this.input = "none";
70
+ this.output = this.params.type;
71
+ }
72
+ else if (this.params.mode === "w") {
73
+ this.input = this.params.type;
74
+ this.output = "none";
75
+ }
76
+ }
77
+ /* open node */
78
+ async open() {
79
+ if (this.params.mode === "rw") {
80
+ if (this.params.path === "-") {
81
+ /* standard I/O */
82
+ if (this.params.type === "audio") {
83
+ process.stdin.setEncoding();
84
+ process.stdout.setEncoding();
85
+ }
86
+ else {
87
+ process.stdin.setEncoding(this.config.textEncoding);
88
+ process.stdout.setEncoding(this.config.textEncoding);
89
+ }
90
+ this.stream = node_stream_1.default.Duplex.from({
91
+ readable: process.stdin,
92
+ writable: process.stdout
93
+ });
94
+ }
95
+ else {
96
+ /* file I/O */
97
+ if (this.params.type === "audio") {
98
+ this.stream = node_stream_1.default.Duplex.from({
99
+ readable: node_fs_1.default.createReadStream(this.params.path),
100
+ writable: node_fs_1.default.createWriteStream(this.params.path)
101
+ });
102
+ }
103
+ else {
104
+ this.stream = node_stream_1.default.Duplex.from({
105
+ readable: node_fs_1.default.createReadStream(this.params.path, { encoding: this.config.textEncoding }),
106
+ writable: node_fs_1.default.createWriteStream(this.params.path, { encoding: this.config.textEncoding })
107
+ });
108
+ }
109
+ }
110
+ /* convert regular stream into object-mode stream */
111
+ const wrapper1 = utils.createTransformStreamForWritableSide();
112
+ const wrapper2 = utils.createTransformStreamForReadableSide(this.params.type, () => this.timeZero);
113
+ this.stream = node_stream_1.default.compose(wrapper1, this.stream, wrapper2);
114
+ }
115
+ else if (this.params.mode === "r") {
116
+ if (this.params.path === "-") {
117
+ /* standard I/O */
118
+ if (this.params.type === "audio")
119
+ process.stdin.setEncoding();
120
+ else
121
+ process.stdin.setEncoding(this.config.textEncoding);
122
+ this.stream = process.stdin;
123
+ }
124
+ else {
125
+ /* file I/O */
126
+ if (this.params.type === "audio")
127
+ this.stream = node_fs_1.default.createReadStream(this.params.path);
128
+ else
129
+ this.stream = node_fs_1.default.createReadStream(this.params.path, { encoding: this.config.textEncoding });
130
+ }
131
+ /* convert regular stream into object-mode stream */
132
+ const wrapper = utils.createTransformStreamForReadableSide(this.params.type, () => this.timeZero);
133
+ this.stream.pipe(wrapper);
134
+ this.stream = wrapper;
135
+ }
136
+ else if (this.params.mode === "w") {
137
+ if (this.params.path === "-") {
138
+ /* standard I/O */
139
+ if (this.params.type === "audio")
140
+ process.stdout.setEncoding();
141
+ else
142
+ process.stdout.setEncoding(this.config.textEncoding);
143
+ this.stream = process.stdout;
144
+ }
145
+ else {
146
+ /* file I/O */
147
+ if (this.params.type === "audio")
148
+ this.stream = node_fs_1.default.createWriteStream(this.params.path);
149
+ else
150
+ this.stream = node_fs_1.default.createWriteStream(this.params.path, { encoding: this.config.textEncoding });
151
+ }
152
+ /* convert regular stream into object-mode stream */
153
+ const wrapper = utils.createTransformStreamForWritableSide();
154
+ wrapper.pipe(this.stream);
155
+ this.stream = wrapper;
156
+ }
157
+ else
158
+ throw new Error(`invalid file mode "${this.params.mode}"`);
159
+ }
160
+ /* close node */
161
+ async close() {
162
+ /* shutdown stream */
163
+ if (this.stream !== null) {
164
+ await new Promise((resolve) => {
165
+ if (this.stream instanceof node_stream_1.default.Writable || this.stream instanceof node_stream_1.default.Duplex)
166
+ this.stream.end(() => { resolve(); });
167
+ else
168
+ resolve();
169
+ });
170
+ if (this.params.path !== "-")
171
+ this.stream.destroy();
172
+ this.stream = null;
173
+ }
174
+ }
175
+ }
176
+ exports.default = SpeechFlowNodeFile;
@@ -0,0 +1,13 @@
1
+ import SpeechFlowNode from "./speechflow-node";
2
+ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
3
+ static name: string;
4
+ private broker;
5
+ private clientId;
6
+ constructor(id: string, cfg: {
7
+ [id: string]: any;
8
+ }, opts: {
9
+ [id: string]: any;
10
+ }, args: any[]);
11
+ open(): Promise<void>;
12
+ close(): Promise<void>;
13
+ }