speechflow 0.9.5 → 0.9.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/CHANGELOG.md +24 -0
  2. package/README.md +220 -53
  3. package/dst/speechflow-node-a2a-ffmpeg.d.ts +13 -0
  4. package/dst/speechflow-node-a2a-ffmpeg.js +152 -0
  5. package/dst/speechflow-node-a2a-wav.d.ts +11 -0
  6. package/dst/speechflow-node-a2a-wav.js +170 -0
  7. package/dst/speechflow-node-a2t-deepgram.d.ts +12 -0
  8. package/dst/speechflow-node-a2t-deepgram.js +220 -0
  9. package/dst/speechflow-node-deepgram.d.ts +3 -1
  10. package/dst/speechflow-node-deepgram.js +86 -22
  11. package/dst/speechflow-node-deepl.d.ts +3 -1
  12. package/dst/speechflow-node-deepl.js +25 -20
  13. package/dst/speechflow-node-device.d.ts +3 -1
  14. package/dst/speechflow-node-device.js +53 -2
  15. package/dst/speechflow-node-elevenlabs.d.ts +3 -1
  16. package/dst/speechflow-node-elevenlabs.js +37 -42
  17. package/dst/speechflow-node-ffmpeg.d.ts +3 -1
  18. package/dst/speechflow-node-ffmpeg.js +42 -4
  19. package/dst/speechflow-node-file.d.ts +3 -1
  20. package/dst/speechflow-node-file.js +84 -13
  21. package/dst/speechflow-node-format.d.ts +11 -0
  22. package/dst/speechflow-node-format.js +80 -0
  23. package/dst/speechflow-node-gemma.d.ts +3 -1
  24. package/dst/speechflow-node-gemma.js +84 -23
  25. package/dst/speechflow-node-mqtt.d.ts +13 -0
  26. package/dst/speechflow-node-mqtt.js +181 -0
  27. package/dst/speechflow-node-opus.d.ts +12 -0
  28. package/dst/speechflow-node-opus.js +135 -0
  29. package/dst/speechflow-node-subtitle.d.ts +12 -0
  30. package/dst/speechflow-node-subtitle.js +96 -0
  31. package/dst/speechflow-node-t2a-elevenlabs.d.ts +13 -0
  32. package/dst/speechflow-node-t2a-elevenlabs.js +182 -0
  33. package/dst/speechflow-node-t2t-deepl.d.ts +12 -0
  34. package/dst/speechflow-node-t2t-deepl.js +133 -0
  35. package/dst/speechflow-node-t2t-format.d.ts +11 -0
  36. package/dst/speechflow-node-t2t-format.js +80 -0
  37. package/dst/speechflow-node-t2t-gemma.d.ts +13 -0
  38. package/dst/speechflow-node-t2t-gemma.js +213 -0
  39. package/dst/speechflow-node-t2t-opus.d.ts +12 -0
  40. package/dst/speechflow-node-t2t-opus.js +135 -0
  41. package/dst/speechflow-node-t2t-subtitle.d.ts +12 -0
  42. package/dst/speechflow-node-t2t-subtitle.js +96 -0
  43. package/dst/speechflow-node-trace.d.ts +11 -0
  44. package/dst/speechflow-node-trace.js +88 -0
  45. package/dst/speechflow-node-wav.d.ts +11 -0
  46. package/dst/speechflow-node-wav.js +170 -0
  47. package/dst/speechflow-node-websocket.d.ts +3 -1
  48. package/dst/speechflow-node-websocket.js +149 -49
  49. package/dst/speechflow-node-whisper-common.d.ts +34 -0
  50. package/dst/speechflow-node-whisper-common.js +7 -0
  51. package/dst/speechflow-node-whisper-ggml.d.ts +1 -0
  52. package/dst/speechflow-node-whisper-ggml.js +97 -0
  53. package/dst/speechflow-node-whisper-onnx.d.ts +1 -0
  54. package/dst/speechflow-node-whisper-onnx.js +131 -0
  55. package/dst/speechflow-node-whisper-worker-ggml.d.ts +1 -0
  56. package/dst/speechflow-node-whisper-worker-ggml.js +97 -0
  57. package/dst/speechflow-node-whisper-worker-onnx.d.ts +1 -0
  58. package/dst/speechflow-node-whisper-worker-onnx.js +131 -0
  59. package/dst/speechflow-node-whisper-worker.d.ts +1 -0
  60. package/dst/speechflow-node-whisper-worker.js +116 -0
  61. package/dst/speechflow-node-whisper-worker2.d.ts +1 -0
  62. package/dst/speechflow-node-whisper-worker2.js +82 -0
  63. package/dst/speechflow-node-whisper.d.ts +19 -0
  64. package/dst/speechflow-node-whisper.js +604 -0
  65. package/dst/speechflow-node-x2x-trace.d.ts +11 -0
  66. package/dst/speechflow-node-x2x-trace.js +88 -0
  67. package/dst/speechflow-node-xio-device.d.ts +13 -0
  68. package/dst/speechflow-node-xio-device.js +205 -0
  69. package/dst/speechflow-node-xio-file.d.ts +11 -0
  70. package/dst/speechflow-node-xio-file.js +176 -0
  71. package/dst/speechflow-node-xio-mqtt.d.ts +13 -0
  72. package/dst/speechflow-node-xio-mqtt.js +181 -0
  73. package/dst/speechflow-node-xio-websocket.d.ts +13 -0
  74. package/dst/speechflow-node-xio-websocket.js +275 -0
  75. package/dst/speechflow-node.d.ts +24 -6
  76. package/dst/speechflow-node.js +63 -6
  77. package/dst/speechflow-utils.d.ts +23 -0
  78. package/dst/speechflow-utils.js +194 -0
  79. package/dst/speechflow.js +146 -43
  80. package/etc/biome.jsonc +12 -4
  81. package/etc/speechflow.bat +6 -0
  82. package/etc/speechflow.sh +5 -0
  83. package/etc/speechflow.yaml +71 -0
  84. package/etc/stx.conf +65 -0
  85. package/package.d/@ericedouard+vad-node-realtime+0.2.0.patch +18 -0
  86. package/package.json +49 -31
  87. package/src/lib.d.ts +6 -1
  88. package/src/{speechflow-node-ffmpeg.ts → speechflow-node-a2a-ffmpeg.ts} +10 -4
  89. package/src/speechflow-node-a2a-wav.ts +143 -0
  90. package/src/speechflow-node-a2t-deepgram.ts +199 -0
  91. package/src/{speechflow-node-elevenlabs.ts → speechflow-node-t2a-elevenlabs.ts} +38 -45
  92. package/src/{speechflow-node-deepl.ts → speechflow-node-t2t-deepl.ts} +36 -25
  93. package/src/speechflow-node-t2t-format.ts +85 -0
  94. package/src/{speechflow-node-gemma.ts → speechflow-node-t2t-gemma.ts} +89 -25
  95. package/src/speechflow-node-t2t-opus.ts +111 -0
  96. package/src/speechflow-node-t2t-subtitle.ts +101 -0
  97. package/src/speechflow-node-x2x-trace.ts +92 -0
  98. package/src/{speechflow-node-device.ts → speechflow-node-xio-device.ts} +25 -3
  99. package/src/speechflow-node-xio-file.ts +153 -0
  100. package/src/speechflow-node-xio-mqtt.ts +154 -0
  101. package/src/speechflow-node-xio-websocket.ts +248 -0
  102. package/src/speechflow-node.ts +63 -6
  103. package/src/speechflow-utils.ts +212 -0
  104. package/src/speechflow.ts +150 -43
  105. package/etc/nps.yaml +0 -40
  106. package/sample.yaml +0 -39
  107. package/src/speechflow-node-deepgram.ts +0 -133
  108. package/src/speechflow-node-file.ts +0 -108
  109. package/src/speechflow-node-websocket.ts +0 -179
@@ -2,7 +2,9 @@ import SpeechFlowNode from "./speechflow-node";
2
2
  export default class SpeechFlowNodeDevice extends SpeechFlowNode {
3
3
  static name: string;
4
4
  private io;
5
- constructor(id: string, opts: {
5
+ constructor(id: string, cfg: {
6
+ [id: string]: any;
7
+ }, opts: {
6
8
  [id: string]: any;
7
9
  }, args: any[]);
8
10
  private audioDeviceFromURL;
@@ -4,14 +4,50 @@
4
4
  ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
5
  ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
6
  */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
7
40
  var __importDefault = (this && this.__importDefault) || function (mod) {
8
41
  return (mod && mod.__esModule) ? mod : { "default": mod };
9
42
  };
10
43
  Object.defineProperty(exports, "__esModule", { value: true });
44
+ /* standard dependencies */
45
+ const node_stream_1 = __importDefault(require("node:stream"));
11
46
  /* external dependencies */
12
47
  const naudiodon_1 = __importDefault(require("@gpeng/naudiodon"));
13
48
  /* internal dependencies */
14
49
  const speechflow_node_1 = __importDefault(require("./speechflow-node"));
50
+ const utils = __importStar(require("./speechflow-utils"));
15
51
  /* SpeechFlow node for device access */
16
52
  class SpeechFlowNodeDevice extends speechflow_node_1.default {
17
53
  /* declare official node name */
@@ -19,8 +55,8 @@ class SpeechFlowNodeDevice extends speechflow_node_1.default {
19
55
  /* internal state */
20
56
  io = null;
21
57
  /* construct node */
22
- constructor(id, opts, args) {
23
- super(id, opts, args);
58
+ constructor(id, cfg, opts, args) {
59
+ super(id, cfg, opts, args);
24
60
  /* declare node configuration parameters */
25
61
  this.configure({
26
62
  device: { type: "string", pos: 0, match: /^(.+?):(.+)$/ },
@@ -54,6 +90,9 @@ class SpeechFlowNodeDevice extends speechflow_node_1.default {
54
90
  throw new Error(`invalid audio API type "${type}"`);
55
91
  /* determine device of audio API */
56
92
  const devices = naudiodon_1.default.getDevices();
93
+ for (const device of devices)
94
+ this.log("info", `found audio device "${device.name}" ` +
95
+ `(inputs: ${device.maxInputChannels}, outputs: ${device.maxOutputChannels}`);
57
96
  const device = devices.find((device) => {
58
97
  return (((mode === "r" && device.maxInputChannels > 0)
59
98
  || (mode === "w" && device.maxOutputChannels > 0)
@@ -102,6 +141,10 @@ class SpeechFlowNodeDevice extends speechflow_node_1.default {
102
141
  }
103
142
  });
104
143
  this.stream = this.io;
144
+ /* convert regular stream into object-mode stream */
145
+ const wrapper1 = utils.createTransformStreamForWritableSide();
146
+ const wrapper2 = utils.createTransformStreamForReadableSide("audio", () => this.timeZero);
147
+ this.stream = node_stream_1.default.compose(wrapper1, this.stream, wrapper2);
105
148
  }
106
149
  else if (this.params.mode === "r") {
107
150
  /* input device */
@@ -117,6 +160,10 @@ class SpeechFlowNodeDevice extends speechflow_node_1.default {
117
160
  }
118
161
  });
119
162
  this.stream = this.io;
163
+ /* convert regular stream into object-mode stream */
164
+ const wrapper = utils.createTransformStreamForReadableSide("audio", () => this.timeZero);
165
+ this.stream.pipe(wrapper);
166
+ this.stream = wrapper;
120
167
  }
121
168
  else if (this.params.mode === "w") {
122
169
  /* output device */
@@ -132,6 +179,10 @@ class SpeechFlowNodeDevice extends speechflow_node_1.default {
132
179
  }
133
180
  });
134
181
  this.stream = this.io;
182
+ /* convert regular stream into object-mode stream */
183
+ const wrapper = utils.createTransformStreamForWritableSide();
184
+ wrapper.pipe(this.stream);
185
+ this.stream = wrapper;
135
186
  }
136
187
  else
137
188
  throw new Error(`device "${device.id}" does not have any input or output channels`);
@@ -3,7 +3,9 @@ export default class SpeechFlowNodeElevenlabs extends SpeechFlowNode {
3
3
  static name: string;
4
4
  private elevenlabs;
5
5
  private static speexInitialized;
6
- constructor(id: string, opts: {
6
+ constructor(id: string, cfg: {
7
+ [id: string]: any;
8
+ }, opts: {
7
9
  [id: string]: any;
8
10
  }, args: any[]);
9
11
  open(): Promise<void>;
@@ -43,27 +43,13 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
43
43
  Object.defineProperty(exports, "__esModule", { value: true });
44
44
  /* standard dependencies */
45
45
  const node_stream_1 = __importDefault(require("node:stream"));
46
- const node_events_1 = require("node:events");
47
46
  /* external dependencies */
48
- const ElevenLabs = __importStar(require("elevenlabs"));
47
+ const ElevenLabs = __importStar(require("@elevenlabs/elevenlabs-js"));
49
48
  const get_stream_1 = require("get-stream");
50
49
  const speex_resampler_1 = __importDefault(require("speex-resampler"));
51
50
  /* internal dependencies */
52
51
  const speechflow_node_1 = __importDefault(require("./speechflow-node"));
53
- /*
54
- const elevenlabsVoices = {
55
- "drew": { name: "Drew", model: "eleven_multilingual_v2", lang: [ "en", "de" ] },
56
- "george": { name: "George", model: "eleven_multilingual_v2", lang: [ "en", "de" ] },
57
- "bill": { name: "Bill", model: "eleven_multilingual_v2", lang: [ "en", "de" ] },
58
- "daniel": { name: "Daniel", model: "eleven_multilingual_v1", lang: [ "en", "de" ] },
59
- "brian": { name: "Brian", model: "eleven_turbo_v2", lang: [ "en" ] },
60
- "sarah": { name: "Sarah", model: "eleven_multilingual_v2", lang: [ "en", "de" ] },
61
- "racel": { name: "Racel", model: "eleven_multilingual_v2", lang: [ "en", "de" ] },
62
- "grace": { name: "Grace", model: "eleven_multilingual_v1", lang: [ "en", "de" ] },
63
- "matilda": { name: "Matilda", model: "eleven_multilingual_v1", lang: [ "en", "de" ] },
64
- "alice": { name: "Alice", model: "eleven_turbo_v2", lang: [ "en" ] }
65
- }
66
- */
52
+ /* SpeechFlow node for Elevenlabs text-to-speech conversion */
67
53
  class SpeechFlowNodeElevenlabs extends speechflow_node_1.default {
68
54
  /* declare official node name */
69
55
  static name = "elevenlabs";
@@ -71,8 +57,8 @@ class SpeechFlowNodeElevenlabs extends speechflow_node_1.default {
71
57
  elevenlabs = null;
72
58
  static speexInitialized = false;
73
59
  /* construct node */
74
- constructor(id, opts, args) {
75
- super(id, opts, args);
60
+ constructor(id, cfg, opts, args) {
61
+ super(id, cfg, opts, args);
76
62
  /* declare node configuration parameters */
77
63
  this.configure({
78
64
  key: { type: "string", val: process.env.SPEECHFLOW_KEY_ELEVENLABS },
@@ -100,7 +86,7 @@ class SpeechFlowNodeElevenlabs extends speechflow_node_1.default {
100
86
  "growing_business": 44100,
101
87
  "enterprise": 44100
102
88
  };
103
- const sub = await this.elevenlabs.user.getSubscription();
89
+ const sub = await this.elevenlabs.user.subscription.get();
104
90
  const tier = (sub.tier ?? "free");
105
91
  this.log("info", `determined ElevenLabs tier: "${tier}"`);
106
92
  let maxSampleRate = 16000;
@@ -125,13 +111,14 @@ class SpeechFlowNodeElevenlabs extends speechflow_node_1.default {
125
111
  "eleven_multilingual_v2" :
126
112
  "eleven_flash_v2_5";
127
113
  const speechStream = (text) => {
128
- return this.elevenlabs.textToSpeech.convert(voice.voice_id, {
114
+ this.log("info", `ElevenLabs: send text "${text}"`);
115
+ return this.elevenlabs.textToSpeech.convert(voice.voiceId, {
129
116
  text,
130
- model_id: model,
131
- language_code: this.params.language,
132
- output_format: `pcm_${maxSampleRate}`,
117
+ modelId: model,
118
+ languageCode: this.params.language,
119
+ outputFormat: `pcm_${maxSampleRate}`,
133
120
  seed: 815, /* arbitrary, but fixated by us */
134
- voice_settings: {
121
+ voiceSettings: {
135
122
  speed: this.params.speed
136
123
  }
137
124
  }, {
@@ -139,8 +126,6 @@ class SpeechFlowNodeElevenlabs extends speechflow_node_1.default {
139
126
  maxRetries: 10
140
127
  });
141
128
  };
142
- /* internal queue of results */
143
- const queue = new node_events_1.EventEmitter();
144
129
  /* establish resampler from ElevenLabs's maximum 24Khz
145
130
  output to our standard audio sample rate (48KHz) */
146
131
  if (!SpeechFlowNodeElevenlabs.speexInitialized) {
@@ -149,26 +134,36 @@ class SpeechFlowNodeElevenlabs extends speechflow_node_1.default {
149
134
  SpeechFlowNodeElevenlabs.speexInitialized = true;
150
135
  }
151
136
  const resampler = new speex_resampler_1.default(1, maxSampleRate, this.config.audioSampleRate, 7);
152
- /* create duplex stream and connect it to the ElevenLabs API */
153
- this.stream = new node_stream_1.default.Duplex({
154
- write(chunk, encoding, callback) {
155
- const data = chunk.toString();
156
- speechStream(data).then((stream) => {
157
- (0, get_stream_1.getStreamAsBuffer)(stream).then((buffer) => {
158
- const bufferResampled = resampler.processChunk(buffer);
159
- queue.emit("audio", bufferResampled);
160
- callback();
137
+ /* create transform stream and connect it to the ElevenLabs API */
138
+ const log = (level, msg) => { this.log(level, msg); };
139
+ this.stream = new node_stream_1.default.Transform({
140
+ writableObjectMode: true,
141
+ readableObjectMode: true,
142
+ decodeStrings: false,
143
+ transform(chunk, encoding, callback) {
144
+ if (Buffer.isBuffer(chunk.payload))
145
+ callback(new Error("invalid chunk payload type"));
146
+ else {
147
+ speechStream(chunk.payload).then((stream) => {
148
+ (0, get_stream_1.getStreamAsBuffer)(stream).then((buffer) => {
149
+ const bufferResampled = resampler.processChunk(buffer);
150
+ log("info", `ElevenLabs: received audio (buffer length: ${buffer.byteLength})`);
151
+ const chunkNew = chunk.clone();
152
+ chunkNew.type = "audio";
153
+ chunkNew.payload = bufferResampled;
154
+ this.push(chunkNew);
155
+ callback();
156
+ }).catch((error) => {
157
+ callback(error);
158
+ });
161
159
  }).catch((error) => {
162
160
  callback(error);
163
161
  });
164
- }).catch((error) => {
165
- callback(error);
166
- });
162
+ }
167
163
  },
168
- read(size) {
169
- queue.once("audio", (buffer) => {
170
- this.push(buffer, "binary");
171
- });
164
+ final(callback) {
165
+ this.push(null);
166
+ callback();
172
167
  }
173
168
  });
174
169
  }
@@ -3,7 +3,9 @@ export default class SpeechFlowNodeFFmpeg extends SpeechFlowNode {
3
3
  static name: string;
4
4
  private ffmpegBinary;
5
5
  private ffmpeg;
6
- constructor(id: string, opts: {
6
+ constructor(id: string, cfg: {
7
+ [id: string]: any;
8
+ }, opts: {
7
9
  [id: string]: any;
8
10
  }, args: any[]);
9
11
  open(): Promise<void>;
@@ -4,6 +4,39 @@
4
4
  ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
5
  ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
6
  */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
7
40
  var __importDefault = (this && this.__importDefault) || function (mod) {
8
41
  return (mod && mod.__esModule) ? mod : { "default": mod };
9
42
  };
@@ -15,6 +48,7 @@ const ffmpeg_1 = __importDefault(require("@rse/ffmpeg"));
15
48
  const ffmpeg_stream_1 = require("ffmpeg-stream");
16
49
  /* internal dependencies */
17
50
  const speechflow_node_1 = __importDefault(require("./speechflow-node"));
51
+ const utils = __importStar(require("./speechflow-utils"));
18
52
  /* SpeechFlow node for FFmpeg */
19
53
  class SpeechFlowNodeFFmpeg extends speechflow_node_1.default {
20
54
  /* declare official node name */
@@ -23,8 +57,8 @@ class SpeechFlowNodeFFmpeg extends speechflow_node_1.default {
23
57
  ffmpegBinary = ffmpeg_1.default.supported ? ffmpeg_1.default.binary : "ffmpeg";
24
58
  ffmpeg = null;
25
59
  /* construct node */
26
- constructor(id, opts, args) {
27
- super(id, opts, args);
60
+ constructor(id, cfg, opts, args) {
61
+ super(id, cfg, opts, args);
28
62
  /* declare node configuration parameters */
29
63
  this.configure({
30
64
  src: { type: "string", pos: 0, val: "pcm", match: /^(?:pcm|wav|mp3|opus)$/ },
@@ -87,9 +121,13 @@ class SpeechFlowNodeFFmpeg extends speechflow_node_1.default {
87
121
  this.ffmpeg.run();
88
122
  /* establish a duplex stream and connect it to FFmpeg */
89
123
  this.stream = node_stream_1.default.Duplex.from({
90
- readable: streamOutput,
91
- writable: streamInput
124
+ writable: streamInput,
125
+ readable: streamOutput
92
126
  });
127
+ /* wrap streams with conversions for chunk vs plain audio */
128
+ const wrapper1 = utils.createTransformStreamForWritableSide();
129
+ const wrapper2 = utils.createTransformStreamForReadableSide("audio", () => this.timeZero);
130
+ this.stream = node_stream_1.default.compose(wrapper1, this.stream, wrapper2);
93
131
  }
94
132
  /* close node */
95
133
  async close() {
@@ -1,7 +1,9 @@
1
1
  import SpeechFlowNode from "./speechflow-node";
2
2
  export default class SpeechFlowNodeFile extends SpeechFlowNode {
3
3
  static name: string;
4
- constructor(id: string, opts: {
4
+ constructor(id: string, cfg: {
5
+ [id: string]: any;
6
+ }, opts: {
5
7
  [id: string]: any;
6
8
  }, args: any[]);
7
9
  open(): Promise<void>;
@@ -4,6 +4,39 @@
4
4
  ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
5
  ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
6
  */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
7
40
  var __importDefault = (this && this.__importDefault) || function (mod) {
8
41
  return (mod && mod.__esModule) ? mod : { "default": mod };
9
42
  };
@@ -13,13 +46,14 @@ const node_fs_1 = __importDefault(require("node:fs"));
13
46
  const node_stream_1 = __importDefault(require("node:stream"));
14
47
  /* internal dependencies */
15
48
  const speechflow_node_1 = __importDefault(require("./speechflow-node"));
49
+ const utils = __importStar(require("./speechflow-utils"));
16
50
  /* SpeechFlow node for file access */
17
51
  class SpeechFlowNodeFile extends speechflow_node_1.default {
18
52
  /* declare official node name */
19
53
  static name = "file";
20
54
  /* construct node */
21
- constructor(id, opts, args) {
22
- super(id, opts, args);
55
+ constructor(id, cfg, opts, args) {
56
+ super(id, cfg, opts, args);
23
57
  /* declare node configuration parameters */
24
58
  this.configure({
25
59
  path: { type: "string", pos: 0 },
@@ -42,12 +76,17 @@ class SpeechFlowNodeFile extends speechflow_node_1.default {
42
76
  }
43
77
  /* open node */
44
78
  async open() {
45
- const encoding = this.params.type === "text" ? this.config.textEncoding : "binary";
46
79
  if (this.params.mode === "rw") {
47
80
  if (this.params.path === "-") {
48
81
  /* standard I/O */
49
- process.stdin.setEncoding(encoding);
50
- process.stdout.setEncoding(encoding);
82
+ if (this.params.type === "audio") {
83
+ process.stdin.setEncoding();
84
+ process.stdout.setEncoding();
85
+ }
86
+ else {
87
+ process.stdin.setEncoding(this.config.textEncoding);
88
+ process.stdout.setEncoding(this.config.textEncoding);
89
+ }
51
90
  this.stream = node_stream_1.default.Duplex.from({
52
91
  readable: process.stdin,
53
92
  writable: process.stdout
@@ -55,33 +94,65 @@ class SpeechFlowNodeFile extends speechflow_node_1.default {
55
94
  }
56
95
  else {
57
96
  /* file I/O */
58
- this.stream = node_stream_1.default.Duplex.from({
59
- readable: node_fs_1.default.createReadStream(this.params.path, { encoding }),
60
- writable: node_fs_1.default.createWriteStream(this.params.path, { encoding })
61
- });
97
+ if (this.params.type === "audio") {
98
+ this.stream = node_stream_1.default.Duplex.from({
99
+ readable: node_fs_1.default.createReadStream(this.params.path),
100
+ writable: node_fs_1.default.createWriteStream(this.params.path)
101
+ });
102
+ }
103
+ else {
104
+ this.stream = node_stream_1.default.Duplex.from({
105
+ readable: node_fs_1.default.createReadStream(this.params.path, { encoding: this.config.textEncoding }),
106
+ writable: node_fs_1.default.createWriteStream(this.params.path, { encoding: this.config.textEncoding })
107
+ });
108
+ }
62
109
  }
110
+ /* convert regular stream into object-mode stream */
111
+ const wrapper1 = utils.createTransformStreamForWritableSide();
112
+ const wrapper2 = utils.createTransformStreamForReadableSide(this.params.type, () => this.timeZero);
113
+ this.stream = node_stream_1.default.compose(wrapper1, this.stream, wrapper2);
63
114
  }
64
115
  else if (this.params.mode === "r") {
65
116
  if (this.params.path === "-") {
66
117
  /* standard I/O */
67
- process.stdin.setEncoding(encoding);
118
+ if (this.params.type === "audio")
119
+ process.stdin.setEncoding();
120
+ else
121
+ process.stdin.setEncoding(this.config.textEncoding);
68
122
  this.stream = process.stdin;
69
123
  }
70
124
  else {
71
125
  /* file I/O */
72
- this.stream = node_fs_1.default.createReadStream(this.params.path, { encoding });
126
+ if (this.params.type === "audio")
127
+ this.stream = node_fs_1.default.createReadStream(this.params.path);
128
+ else
129
+ this.stream = node_fs_1.default.createReadStream(this.params.path, { encoding: this.config.textEncoding });
73
130
  }
131
+ /* convert regular stream into object-mode stream */
132
+ const wrapper = utils.createTransformStreamForReadableSide(this.params.type, () => this.timeZero);
133
+ this.stream.pipe(wrapper);
134
+ this.stream = wrapper;
74
135
  }
75
136
  else if (this.params.mode === "w") {
76
137
  if (this.params.path === "-") {
77
138
  /* standard I/O */
78
- process.stdout.setEncoding(encoding);
139
+ if (this.params.type === "audio")
140
+ process.stdout.setEncoding();
141
+ else
142
+ process.stdout.setEncoding(this.config.textEncoding);
79
143
  this.stream = process.stdout;
80
144
  }
81
145
  else {
82
146
  /* file I/O */
83
- this.stream = node_fs_1.default.createWriteStream(this.params.path, { encoding });
147
+ if (this.params.type === "audio")
148
+ this.stream = node_fs_1.default.createWriteStream(this.params.path);
149
+ else
150
+ this.stream = node_fs_1.default.createWriteStream(this.params.path, { encoding: this.config.textEncoding });
84
151
  }
152
+ /* convert regular stream into object-mode stream */
153
+ const wrapper = utils.createTransformStreamForWritableSide();
154
+ wrapper.pipe(this.stream);
155
+ this.stream = wrapper;
85
156
  }
86
157
  else
87
158
  throw new Error(`invalid file mode "${this.params.mode}"`);
@@ -0,0 +1,11 @@
1
+ import SpeechFlowNode from "./speechflow-node";
2
+ export default class SpeechFlowNodeFormat extends SpeechFlowNode {
3
+ static name: string;
4
+ constructor(id: string, cfg: {
5
+ [id: string]: any;
6
+ }, opts: {
7
+ [id: string]: any;
8
+ }, args: any[]);
9
+ open(): Promise<void>;
10
+ close(): Promise<void>;
11
+ }
@@ -0,0 +1,80 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __importDefault = (this && this.__importDefault) || function (mod) {
8
+ return (mod && mod.__esModule) ? mod : { "default": mod };
9
+ };
10
+ Object.defineProperty(exports, "__esModule", { value: true });
11
+ /* standard dependencies */
12
+ const node_stream_1 = __importDefault(require("node:stream"));
13
+ /* external dependencies */
14
+ const wrap_text_1 = __importDefault(require("wrap-text"));
15
+ /* internal dependencies */
16
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
17
+ /* SpeechFlow node for text-to-text formatting */
18
+ class SpeechFlowNodeFormat extends speechflow_node_1.default {
19
+ /* declare official node name */
20
+ static name = "format";
21
+ /* construct node */
22
+ constructor(id, cfg, opts, args) {
23
+ super(id, cfg, opts, args);
24
+ /* declare node configuration parameters */
25
+ this.configure({
26
+ width: { type: "number", val: 80 }
27
+ });
28
+ /* declare node input/output format */
29
+ this.input = "text";
30
+ this.output = "text";
31
+ }
32
+ /* open node */
33
+ async open() {
34
+ /* provide text-to-text formatter */
35
+ const format = async (text) => {
36
+ text = (0, wrap_text_1.default)(text, this.params.width);
37
+ text = text.replace(/([^\n])$/, "$1\n");
38
+ return text;
39
+ };
40
+ /* establish a duplex stream and connect it to DeepL translation */
41
+ this.stream = new node_stream_1.default.Transform({
42
+ readableObjectMode: true,
43
+ writableObjectMode: true,
44
+ decodeStrings: false,
45
+ transform(chunk, encoding, callback) {
46
+ if (Buffer.isBuffer(chunk.payload))
47
+ callback(new Error("invalid chunk payload type"));
48
+ else {
49
+ if (chunk.payload === "") {
50
+ this.push(chunk);
51
+ callback();
52
+ }
53
+ else {
54
+ format(chunk.payload).then((payload) => {
55
+ const chunkNew = chunk.clone();
56
+ chunkNew.payload = payload;
57
+ this.push(chunkNew);
58
+ callback();
59
+ }).catch((err) => {
60
+ callback(err);
61
+ });
62
+ }
63
+ }
64
+ },
65
+ final(callback) {
66
+ this.push(null);
67
+ callback();
68
+ }
69
+ });
70
+ }
71
+ /* open node */
72
+ async close() {
73
+ /* close stream */
74
+ if (this.stream !== null) {
75
+ this.stream.destroy();
76
+ this.stream = null;
77
+ }
78
+ }
79
+ }
80
+ exports.default = SpeechFlowNodeFormat;
@@ -3,7 +3,9 @@ export default class SpeechFlowNodeGemma extends SpeechFlowNode {
3
3
  static name: string;
4
4
  private ollama;
5
5
  private setup;
6
- constructor(id: string, opts: {
6
+ constructor(id: string, cfg: {
7
+ [id: string]: any;
8
+ }, opts: {
7
9
  [id: string]: any;
8
10
  }, args: any[]);
9
11
  open(): Promise<void>;