speechflow 2.0.4 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/README.md +47 -15
  3. package/etc/speechflow.yaml +20 -48
  4. package/etc/stx.conf +2 -2
  5. package/package.json +6 -6
  6. package/speechflow-cli/dst/speechflow-node-a2a-gtcrn-wt.d.ts +1 -0
  7. package/speechflow-cli/dst/speechflow-node-a2a-gtcrn-wt.js +60 -0
  8. package/speechflow-cli/dst/speechflow-node-a2a-gtcrn-wt.js.map +1 -0
  9. package/speechflow-cli/dst/speechflow-node-a2a-gtcrn.d.ts +15 -0
  10. package/speechflow-cli/dst/speechflow-node-a2a-gtcrn.js +234 -0
  11. package/speechflow-cli/dst/speechflow-node-a2a-gtcrn.js.map +1 -0
  12. package/speechflow-cli/dst/speechflow-node-a2a-meter.js +2 -2
  13. package/speechflow-cli/dst/speechflow-node-a2a-meter.js.map +1 -1
  14. package/speechflow-cli/dst/speechflow-node-a2t-deepgram.js +42 -21
  15. package/speechflow-cli/dst/speechflow-node-a2t-deepgram.js.map +1 -1
  16. package/speechflow-cli/dst/speechflow-node-t2t-deepl.js +1 -1
  17. package/speechflow-cli/dst/speechflow-node-t2t-deepl.js.map +1 -1
  18. package/speechflow-cli/dst/speechflow-node-t2t-profanity.js +26 -6
  19. package/speechflow-cli/dst/speechflow-node-t2t-profanity.js.map +1 -1
  20. package/speechflow-cli/dst/speechflow-node-t2t-sentence.d.ts +3 -2
  21. package/speechflow-cli/dst/speechflow-node-t2t-sentence.js +135 -51
  22. package/speechflow-cli/dst/speechflow-node-t2t-sentence.js.map +1 -1
  23. package/speechflow-cli/dst/speechflow-node-t2t-simulator.d.ts +11 -0
  24. package/speechflow-cli/dst/speechflow-node-t2t-simulator.js +128 -0
  25. package/speechflow-cli/dst/speechflow-node-t2t-simulator.js.map +1 -0
  26. package/speechflow-cli/dst/speechflow-util-queue.d.ts +9 -3
  27. package/speechflow-cli/dst/speechflow-util-queue.js +39 -17
  28. package/speechflow-cli/dst/speechflow-util-queue.js.map +1 -1
  29. package/speechflow-cli/etc/oxlint.jsonc +1 -0
  30. package/speechflow-cli/package.d/sherpa-onnx+1.12.25.patch +12 -0
  31. package/speechflow-cli/package.json +32 -26
  32. package/speechflow-cli/src/lib.d.ts +30 -4
  33. package/speechflow-cli/src/speechflow-node-a2a-gtcrn-wt.ts +68 -0
  34. package/speechflow-cli/src/speechflow-node-a2a-gtcrn.ts +219 -0
  35. package/speechflow-cli/src/speechflow-node-a2a-meter.ts +2 -2
  36. package/speechflow-cli/src/speechflow-node-a2t-deepgram.ts +48 -27
  37. package/speechflow-cli/src/speechflow-node-t2t-deepl.ts +1 -1
  38. package/speechflow-cli/src/speechflow-node-t2t-profanity.ts +30 -11
  39. package/speechflow-cli/src/speechflow-node-t2t-sentence.ts +152 -60
  40. package/speechflow-cli/src/speechflow-util-queue.ts +44 -19
  41. package/speechflow-ui-db/dst/app-font-fa-brands-400.woff2 +0 -0
  42. package/speechflow-ui-db/dst/app-font-fa-regular-400.woff2 +0 -0
  43. package/speechflow-ui-db/dst/app-font-fa-solid-900.woff2 +0 -0
  44. package/speechflow-ui-db/dst/app-font-fa-v4compatibility.woff2 +0 -0
  45. package/speechflow-ui-db/dst/index.css +1 -1
  46. package/speechflow-ui-db/dst/index.js +47 -46
  47. package/speechflow-ui-db/package.json +21 -18
  48. package/speechflow-ui-db/src/app.vue +64 -19
  49. package/speechflow-ui-st/dst/app-font-fa-brands-400.woff2 +0 -0
  50. package/speechflow-ui-st/dst/app-font-fa-regular-400.woff2 +0 -0
  51. package/speechflow-ui-st/dst/app-font-fa-solid-900.woff2 +0 -0
  52. package/speechflow-ui-st/dst/app-font-fa-v4compatibility.woff2 +0 -0
  53. package/speechflow-ui-st/dst/index.css +1 -1
  54. package/speechflow-ui-st/dst/index.js +65 -64
  55. package/speechflow-ui-st/package.json +22 -19
  56. package/speechflow-ui-st/src/app.vue +9 -8
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
  ChangeLog
3
3
  =========
4
4
 
5
+ 2.1.1 (2026-02-18)
6
+ ------------------
7
+
8
+ - IMPROVEMENT: add "endpointing" parameter to "a2t-deepgram" node
9
+ - IMPROVEMENT: allow queue handling to be silenced
10
+ - IMPROVEMENT: improve optical appearance of dashboard
11
+ - CLEANUP: remove "preview" feature on "t2t-sentence"
12
+ - BUGFIX: fix "t2t-sentence" processing for intermediate/final kinds and merging functionality
13
+ - UPDATE: upgrade NPM dependencies
14
+
15
+ 2.1.0 (2026-01-27)
16
+ ------------------
17
+
18
+ - BUGFIX: correctly support English ("en") as the source language in t2t-deepl node
19
+ - BUGFIX: improve rendering of LUFS-M audio meter in Dashboard
20
+ - BUGFIX: fix a2a-meter node by correcting the internal chunk buffer handling
21
+ - IMPROVEMENT: add "keywords" parameter to node "a2t-deepgram"
22
+ - IMPROVEMENT: improve readability of subtitle rendering
23
+ - IMPROVEMENT: improve profanity filtering node
24
+ - IMPROVEMENT: add a2a-gtcrn node for GTCRN-based local noise suppression
25
+ - IMPROVEMENT: support intermediate/final text chunk tagging in t2t-sentence node
26
+ - IMPROVEMENT: support timeout handling for incomplete text chunks in t2t-sentence node
27
+ - IMPROVEMENT: support keywords also for Nova-3 mode in a2t-deepgram node
28
+ - UPDATE: upgrade NPM dependencies
29
+
5
30
  2.0.4 (2026-01-16)
6
31
  ------------------
7
32
 
package/README.md CHANGED
@@ -31,7 +31,7 @@ speech-to-speech).
31
31
  - local Voice Activity Detection (VAD),
32
32
  - local voice gender recognition,
33
33
  - local audio LUFS-S/RMS metering,
34
- - local audio Speex and RNNoise noise suppression,
34
+ - local audio Speex, RNNoise, and GTCRN noise suppression,
35
35
  - local audio compressor and expander dynamics processing,
36
36
  - local audio gain adjustment,
37
37
  - local audio pitch shifting and time stretching,
@@ -356,6 +356,7 @@ First a short overview of the available processing nodes:
356
356
  **a2a-gender**,
357
357
  **a2a-speex**,
358
358
  **a2a-rnnoise**,
359
+ **a2a-gtcrn**,
359
360
  **a2a-compressor**,
360
361
  **a2a-expander**,
361
362
  **a2a-gain**,
@@ -717,6 +718,27 @@ The following nodes process audio chunks only.
717
718
  | Parameter | Position | Default | Requirement |
718
719
  | --------- | --------- | -------- | ------------------------ |
719
720
 
721
+ - Node: **a2a-gtcrn**<br/>
722
+ Purpose: **GTCRN Deep Learning Noise Suppression node**<br/>
723
+ Example: `a2a-gtcrn()`
724
+
725
+ > This node uses GTCRN (Gated Temporal Convolutional Recurrent Network)
726
+ > to perform deep learning based noise suppression and speech denoising.
727
+ > It detects and removes noise from the audio stream while preserving
728
+ > speech quality. The GTCRN ONNX model is automatically downloaded from
729
+ > the sherpa-onnx project on first use. NOTICE: This node internally
730
+ > operates at 16KHz sample rate only. Audio is automatically resampled
731
+ > from SpeechFlow's internal 48KHz to 16KHz for processing, and then
732
+ > resampled back to 48KHz for output.
733
+
734
+ | Port | Payload |
735
+ | ------- | ----------- |
736
+ | input | audio |
737
+ | output | audio |
738
+
739
+ | Parameter | Position | Default | Requirement |
740
+ | --------- | --------- | -------- | ------------------------ |
741
+
720
742
  - Node: **a2a-compressor**<br/>
721
743
  Purpose: **audio compressor node**<br/>
722
744
  Example: `a2a-compressor(thresholdDb: -18)`
@@ -867,26 +889,30 @@ The following nodes convert audio to text chunks.
867
889
 
868
890
  - Node: **a2t-deepgram**<br/>
869
891
  Purpose: **Deepgram Speech-to-Text conversion**<br/>
870
- Example: `a2t-deepgram(language: "de")`<br/>
892
+ Example: `a2t-deepgram(language: "de", keywords: "SpeechFlow, TypeScript")`<br/>
871
893
  Notice: this node requires an API key!
872
894
 
873
895
  > This node performs Speech-to-Text (S2T) conversion, i.e., it
874
896
  > recognizes speech in the input audio stream and outputs a
875
- > corresponding text stream.
897
+ > corresponding text stream. The optional `keywords` parameter
898
+ > accepts a comma or space-separated list of words to boost
899
+ > during recognition, improving accuracy for domain-specific terminology.
876
900
 
877
901
  | Port | Payload |
878
902
  | ------- | ----------- |
879
903
  | input | audio |
880
904
  | output | text |
881
905
 
882
- | Parameter | Position | Default | Requirement |
883
- | ------------ | --------- | -------- | ------------------ |
884
- | **key** | *none* | env.SPEECHFLOW\_DEEPGRAM\_KEY | *none* |
885
- | **keyAdm** | *none* | env.SPEECHFLOW\_DEEPGRAM\_KEY\_ADM | *none* |
886
- | **model** | 0 | "nova-2" | *none* |
887
- | **version** | 1 | "latest" | *none* |
888
- | **language** | 2 | "multi" | *none* |
889
- | **interim** | 3 | false | *none* |
906
+ | Parameter | Position | Default | Requirement |
907
+ | ---------------- | --------- | -------- | ------------------ |
908
+ | **key** | *none* | env.SPEECHFLOW\_DEEPGRAM\_KEY | *none* |
909
+ | **keyAdm** | *none* | env.SPEECHFLOW\_DEEPGRAM\_KEY\_ADM | *none* |
910
+ | **model** | 0 | "nova-2" | *none* |
911
+ | **version** | 1 | "latest" | *none* |
912
+ | **language** | 2 | "multi" | *none* |
913
+ | **interim** | 3 | false | *none* |
914
+ | **endpointing** | 4 | 0 | *none* |
915
+ | **keywords** | 5 | "" | *none* |
890
916
 
891
917
  - Node: **a2t-google**<br/>
892
918
  Purpose: **Google Cloud Speech-to-Text conversion**<br/>
@@ -1131,12 +1157,15 @@ The following nodes process text chunks only.
1131
1157
 
1132
1158
  - Node: **t2t-sentence**<br/>
1133
1159
  Purpose: **sentence splitting/merging**<br/>
1134
- Example: `t2t-sentence()`<br/>
1160
+ Example: `t2t-sentence(timeout: 3000)`<br/>
1135
1161
 
1136
1162
  > This node allows you to ensure that a text stream is split or merged
1137
1163
  > into complete sentences. It is primarily intended to be used after
1138
1164
  > the "a2t-deepgram" node and before "t2t-deepl" or "t2a-elevenlabs" nodes in
1139
- > order to improve overall quality.
1165
+ > order to improve overall quality. Intermediate text chunks are passed
1166
+ > through immediately, while final chunks are queued for sentence splitting.
1167
+ > If an incomplete sentence remains in the queue longer than the timeout,
1168
+ > it is promoted to a final chunk and emitted.
1140
1169
 
1141
1170
  | Port | Payload |
1142
1171
  | ------- | ----------- |
@@ -1145,6 +1174,8 @@ The following nodes process text chunks only.
1145
1174
 
1146
1175
  | Parameter | Position | Default | Requirement |
1147
1176
  | ------------ | --------- | -------- | ------------------ |
1177
+ | **timeout** | 0 | 3000 | *none* |
1178
+ | **interim** | 1 | false | *none* |
1148
1179
 
1149
1180
  - Node: **t2t-subtitle**<br/>
1150
1181
  Purpose: **SRT/VTT Subtitle Generation**<br/>
@@ -1411,8 +1442,9 @@ History
1411
1442
  **SpeechFlow**, as a technical cut-through, was initially created in
1412
1443
  March 2024 for use in the msg Filmstudio context. It was later refined
1413
1444
  into a more complete toolkit in April 2025 and this way the first time
1414
- could be used in production. It was fully refactored in July 2025 in
1415
- order to support timestamps in the streams processing.
1445
+ could be used. It was fully refactored in July 2025 in order to support
1446
+ timestamps in the streams processing. In February 2026 it was the first
1447
+ time used in production in the msg Filmstudio.
1416
1448
 
1417
1449
  Copyright & License
1418
1450
  -------------------
@@ -88,58 +88,31 @@ studio-transcription: |
88
88
  # Real-time studio translation from German to English,
89
89
  # including the capturing of all involved inputs and outputs:
90
90
  studio-translation: |
91
- xio-device(device: env.SPEECHFLOW_DEVICE_MIC, mode: "r") | {
92
- a2a-gender() | {
93
- a2a-meter(interval: 250, dashboard: "meter1") |
94
- a2a-wav(mode: "encode") |
95
- xio-file(path: "program-de.wav", mode: "w", type: "audio"),
96
- a2t-deepgram(language: "de", interim: true) | {
97
- x2x-trace(name: "trace1", type: "text", dashboard: "text1") |
98
- t2t-subtitle(format: "vtt", words: true) |
99
- xio-file(path: "program-de.vtt", mode: "w", type: "text"),
91
+ xio-device(device: env.SPEECHFLOW_DEVICE_MIC, mode: "r", chunk: 200) | {
92
+ a2a-meter(interval: 50, dashboard: "audio-de", mode: "sink"),
93
+ a2t-deepgram(language: "de", model: "nova-3", interim: true, keywords: env.SPEECHFLOW_KEYWORDS) | {
94
+ t2t-profanity(lang: "de") | {
95
+ x2x-trace(type: "text", dashboard: "text-de-interim", mode: "sink"),
96
+ t2t-subtitle(mode: "render", addr: env.SPEECHFLOW_IPADDR, port: 8585),
100
97
  t2t-sentence() | {
101
- x2x-trace(name: "trace2", type: "text", notify: true, dashboard: "text2") |
98
+ x2x-filter(name: "final", type: "text", var: "kind", op: "==", val: "final") | {
102
99
  t2t-format(width: 80) |
103
- xio-file(path: "program-de.txt", mode: "w", type: "text"),
100
+ xio-file(path: `${env.SPEECHFLOW_DATADIR}/program-de.txt`, mode: "w", type: "text"),
101
+ t2t-subtitle(mode: "export", format: "srt") |
102
+ xio-file(path: `${env.SPEECHFLOW_DATADIR}/program-de.srt`, mode: "w", type: "text"),
103
+ x2x-trace(type: "text", dashboard: "text-de-final", mode: "sink") /* DE-Final */
104
+ },
104
105
  t2t-deepl(src: "de", dst: "en") | {
105
- x2x-trace(name: "trace3", type: "text", dashboard: "text3") | {
106
+ x2x-trace(type: "text", dashboard: "text-en-interim", mode: "sink"),
107
+ t2t-subtitle(mode: "render", addr: env.SPEECHFLOW_IPADDR, port: 8686),
108
+ x2x-filter(name: "final", type: "text", var: "kind", op: "==", val: "final") | {
106
109
  t2t-format(width: 80) |
107
- xio-file(path: "program-en.txt", mode: "w", type: "text"),
108
- t2t-subtitle(format: "vtt", words: false) |
109
- xio-file(path: "program-en.vtt", mode: "w", type: "text"),
110
- {
111
- x2x-filter(name: "S2T-male", type: "text", var: "meta:gender", op: "==", val: "male") |
112
- t2a-elevenlabs(voice: "Mark", optimize: "latency", speed: 1.05, language: "en"),
113
- x2x-filter(name: "S2T-female", type: "text", var: "meta:gender", op: "==", val: "female") |
114
- t2a-elevenlabs(voice: "Brittney", optimize: "latency", speed: 1.05, language: "en")
115
- } | {
116
- a2a-meter(interval: 250, dashboard: "meter2"),
117
- a2a-wav(mode: "encode") |
118
- xio-file(path: "program-en.wav", mode: "w", type: "audio"),
119
- xio-device(device: env.SPEECHFLOW_DEVICE_SPK, mode: "w")
120
- }
121
- }
122
- }
123
- }
124
- }
125
- }
126
- }
127
-
128
- # Test-drive for development
129
- test: |
130
- xio-device(device: env.SPEECHFLOW_DEVICE_MIC, mode: "r", chunk: 200) | {
131
- a2a-meter(interval: 100, dashboard: "meter1", mode: "sink"),
132
- a2a-gender() | {
133
- a2t-deepgram(language: "de", model: "nova-2", interim: true) | {
134
- x2x-trace(type: "text", mode: "sink", dashboard: "text1"),
135
- t2t-subtitle(mode: "render", addr: "127.0.0.1", port: 8585),
136
- x2x-filter(name: "final", type: "text", var: "kind", op: "==", val: "final") | {
137
- t2t-sentence() | {
138
- x2x-trace(type: "text", dashboard: "text2", mode: "sink"),
139
- t2t-deepl(src: "de", dst: "en") | {
140
- x2x-trace(type: "text", dashboard: "text3", mode: "sink"),
110
+ xio-file(path: `${env.SPEECHFLOW_DATADIR}/program-en.txt`, mode: "w", type: "text"),
111
+ t2t-subtitle(mode: "export", format: "srt") |
112
+ xio-file(path: `${env.SPEECHFLOW_DATADIR}/program-en.srt`, mode: "w", type: "text"),
113
+ x2x-trace(type: "text", dashboard: "text-en-final", mode: "sink"),
141
114
  t2a-elevenlabs(voice: "Mark", optimize: "latency", speed: 1.05, language: "en") | {
142
- a2a-meter(interval: 100, dashboard: "meter2", mode: "sink"),
115
+ a2a-meter(interval: 50, dashboard: "audio-en", mode: "sink"),
143
116
  xio-device(device: env.SPEECHFLOW_DEVICE_SPK, mode: "w")
144
117
  }
145
118
  }
@@ -148,4 +121,3 @@ test: |
148
121
  }
149
122
  }
150
123
  }
151
-
package/etc/stx.conf CHANGED
@@ -68,8 +68,8 @@ run:dev
68
68
  # [top-level] test drive
69
69
  test
70
70
  node --enable-source-maps speechflow-cli/dst/speechflow.js \
71
- -v info -c test@etc/speechflow.yaml \
72
- -d audio:meter1:DE,text:text1:DE-Interim,text:text2:DE-Final,text:text3:EN,audio:meter2:EN
71
+ -v info -c studio-translation@etc/speechflow.yaml \
72
+ -d audio:audio-de:DE,text:text-de-interim:DE-Interim,text:text-en-interim:EN-Interim,text:text-de-final:DE-Final,text:text-en-final:EN-Final,audio:audio-en:EN
73
73
 
74
74
  # [top-level] remove all generated artifacts (reverse of "npm start build")
75
75
  clean
package/package.json CHANGED
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "name": "speechflow",
3
- "version": "2.0.4",
4
- "x-stdver": "2.0.3-GA",
5
- "x-release": "2025-12-24",
3
+ "version": "2.1.1",
4
+ "x-stdver": "2.1.1-GA",
5
+ "x-release": "2026-02-18",
6
6
  "homepage": "https://github.com/rse/speechflow",
7
7
  "description": "Speech Processing Flow Graph",
8
8
  "keywords": [ "speech", "audio", "flow", "graph" ],
@@ -23,11 +23,11 @@
23
23
  "nodemon": "3.1.11",
24
24
  "watch": "1.0.2",
25
25
  "concurrently": "9.2.1",
26
- "wait-on": "9.0.3",
26
+ "wait-on": "9.0.4",
27
27
  "cross-env": "10.1.0",
28
28
  "shx": "0.4.0",
29
- "secretlint": "11.3.0",
30
- "@secretlint/secretlint-rule-preset-recommend": "11.3.0"
29
+ "secretlint": "11.3.1",
30
+ "@secretlint/secretlint-rule-preset-recommend": "11.3.1"
31
31
  },
32
32
  "engines": {
33
33
  "npm": ">=10.0.0",
@@ -0,0 +1,60 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __importDefault = (this && this.__importDefault) || function (mod) {
8
+ return (mod && mod.__esModule) ? mod : { "default": mod };
9
+ };
10
+ Object.defineProperty(exports, "__esModule", { value: true });
11
+ /* standard dependencies */
12
+ const node_worker_threads_1 = require("node:worker_threads");
13
+ /* external dependencies */
14
+ const sherpa_onnx_1 = __importDefault(require("sherpa-onnx"));
15
+ /* receive model path from parent thread */
16
+ const modelPath = node_worker_threads_1.workerData.modelPath;
17
+ /* GTCRN state */
18
+ let denoiser;
19
+ /* helper: log message to parent */
20
+ const log = (level, message) => {
21
+ node_worker_threads_1.parentPort.postMessage({ type: "log", level, message });
22
+ };
23
+ (async () => {
24
+ try {
25
+ /* create denoiser */
26
+ const config = {
27
+ model: {
28
+ gtcrn: {
29
+ model: modelPath
30
+ }
31
+ },
32
+ numThreads: 1
33
+ };
34
+ denoiser = sherpa_onnx_1.default.createOfflineSpeechDenoiser(config);
35
+ log("info", "GTCRN denoiser initialized");
36
+ node_worker_threads_1.parentPort.postMessage({ type: "ready" });
37
+ }
38
+ catch (err) {
39
+ node_worker_threads_1.parentPort.postMessage({ type: "failed", message: `failed to initialize GTCRN: ${err}` });
40
+ process.exit(1);
41
+ }
42
+ })();
43
+ /* receive messages */
44
+ node_worker_threads_1.parentPort.on("message", (msg) => {
45
+ if (msg.type === "process") {
46
+ const { id, samples } = msg;
47
+ /* process with GTCRN denoiser
48
+ NOTICE: GTCRN can also resample out input, but will always
49
+ produces 16KHz output, so we already fixate 16KHz input here! */
50
+ const result = denoiser.run(samples, 16000);
51
+ /* copy to transferable ArrayBuffer and send back to parent */
52
+ const samplesDenoised = new Float32Array(result.samples);
53
+ node_worker_threads_1.parentPort.postMessage({ type: "process-done", id, data: samplesDenoised }, [samplesDenoised.buffer]);
54
+ }
55
+ else if (msg.type === "close") {
56
+ /* shutdown this process */
57
+ process.exit(0);
58
+ }
59
+ });
60
+ //# sourceMappingURL=speechflow-node-a2a-gtcrn-wt.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"speechflow-node-a2a-gtcrn-wt.js","sourceRoot":"","sources":["../src/speechflow-node-a2a-gtcrn-wt.ts"],"names":[],"mappings":";AAAA;;;;EAIE;;;;;AAEF,6BAA6B;AAC7B,6DAAgE;AAEhE,6BAA6B;AAC7B,8DAAwD;AAMxD,6CAA6C;AAC7C,MAAM,SAAS,GAAW,gCAAU,CAAC,SAAS,CAAA;AAE9C,mBAAmB;AACnB,IAAI,QAAyC,CAAA;AAE7C,qCAAqC;AACrC,MAAM,GAAG,GAAG,CAAC,KAAa,EAAE,OAAe,EAAE,EAAE;IAC3C,gCAAW,CAAC,WAAW,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,CAAA;AAC5D,CAAC,CAGA;AAAA,CAAC,KAAK,IAAI,EAAE;IACT,IAAI,CAAC;QACD,uBAAuB;QACvB,MAAM,MAAM,GAA6B;YACrC,KAAK,EAAE;gBACH,KAAK,EAAE;oBACH,KAAK,EAAE,SAAS;iBACnB;aACJ;YACD,UAAU,EAAE,CAAC;SAChB,CAAA;QACD,QAAQ,GAAG,qBAAU,CAAC,2BAA2B,CAAC,MAAM,CAAC,CAAA;QACzD,GAAG,CAAC,MAAM,EAAE,4BAA4B,CAAC,CAAA;QACzC,gCAAW,CAAC,WAAW,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAA;IAC9C,CAAC;IACD,OAAO,GAAG,EAAE,CAAC;QACT,gCAAW,CAAC,WAAW,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,+BAA+B,GAAG,EAAE,EAAE,CAAC,CAAA;QAC1F,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;IACnB,CAAC;AACL,CAAC,CAAC,EAAE,CAAA;AAEJ,wBAAwB;AACxB,gCAAW,CAAC,EAAE,CAAC,SAAS,EAAE,CAAC,GAAG,EAAE,EAAE;IAC9B,IAAI,GAAG,CAAC,IAAI,KAAK,SAAS,EAAE,CAAC;QACzB,MAAM,EAAE,EAAE,EAAE,OAAO,EAAE,GAAG,GAAG,CAAA;QAE3B;;6EAEqE;QACrE,MAAM,MAAM,GAAG,QAAQ,CAAC,GAAG,CAAC,OAAO,EAAE,KAAK,CAAC,CAAA;QAE3C,gEAAgE;QAChE,MAAM,eAAe,GAAG,IAAI,YAAY,CAAC,MAAM,CAAC,OAAO,CAAC,CAAA;QACxD,gCAAW,CAAC,WAAW,CAAC,EAAE,IAAI,EAAE,cAAc,EAAE,EAAE,EAAE,IAAI,EAAE,eAAe,EAAE,EAAE,CAAE,eAAe,CAAC,MAAM,CAAE,CAAC,CAAA;IAC5G,CAAC;SACI,IAAI,GAAG,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;QAC5B,6BAA6B;QAC7B,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;IACnB,CAAC;AACL,CAAC,CAAC,CAAA"}
@@ -0,0 +1,15 @@
1
+ import SpeechFlowNode from "./speechflow-node";
2
+ export default class SpeechFlowNodeA2AGTCRN extends SpeechFlowNode {
3
+ static name: string;
4
+ private closing;
5
+ private worker;
6
+ private resamplerDown;
7
+ private resamplerUp;
8
+ constructor(id: string, cfg: {
9
+ [id: string]: any;
10
+ }, opts: {
11
+ [id: string]: any;
12
+ }, args: any[]);
13
+ open(): Promise<void>;
14
+ close(): Promise<void>;
15
+ }
@@ -0,0 +1,234 @@
1
+ "use strict";
2
+ /*
3
+ ** SpeechFlow - Speech Processing Flow Graph
4
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
5
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
6
+ */
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || (function () {
24
+ var ownKeys = function(o) {
25
+ ownKeys = Object.getOwnPropertyNames || function (o) {
26
+ var ar = [];
27
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
28
+ return ar;
29
+ };
30
+ return ownKeys(o);
31
+ };
32
+ return function (mod) {
33
+ if (mod && mod.__esModule) return mod;
34
+ var result = {};
35
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
36
+ __setModuleDefault(result, mod);
37
+ return result;
38
+ };
39
+ })();
40
+ var __importDefault = (this && this.__importDefault) || function (mod) {
41
+ return (mod && mod.__esModule) ? mod : { "default": mod };
42
+ };
43
+ Object.defineProperty(exports, "__esModule", { value: true });
44
+ /* standard dependencies */
45
+ const node_fs_1 = __importDefault(require("node:fs"));
46
+ const node_path_1 = __importDefault(require("node:path"));
47
+ const node_stream_1 = __importDefault(require("node:stream"));
48
+ const node_worker_threads_1 = require("node:worker_threads");
49
+ /* external dependencies */
50
+ const axios_1 = __importDefault(require("axios"));
51
+ const speex_resampler_1 = __importDefault(require("speex-resampler"));
52
+ /* internal dependencies */
53
+ const speechflow_node_1 = __importDefault(require("./speechflow-node"));
54
+ const util = __importStar(require("./speechflow-util"));
55
+ /* SpeechFlow node for GTCRN based noise suppression in audio-to-audio passing */
56
+ class SpeechFlowNodeA2AGTCRN extends speechflow_node_1.default {
57
+ /* declare official node name */
58
+ static name = "a2a-gtcrn";
59
+ /* internal state */
60
+ closing = false;
61
+ worker = null;
62
+ resamplerDown = null;
63
+ resamplerUp = null;
64
+ /* construct node */
65
+ constructor(id, cfg, opts, args) {
66
+ super(id, cfg, opts, args);
67
+ /* declare node configuration parameters */
68
+ this.configure({});
69
+ /* declare node input/output format */
70
+ this.input = "audio";
71
+ this.output = "audio";
72
+ }
73
+ /* open node */
74
+ async open() {
75
+ /* clear destruction flag */
76
+ this.closing = false;
77
+ /* ensure GTCRN ONNX model is available */
78
+ const modelUrl = "https://github.com/k2-fsa/sherpa-onnx/" +
79
+ "releases/download/speech-enhancement-models/gtcrn_simple.onnx";
80
+ const modelDir = node_path_1.default.join(this.config.cacheDir, "gtcrn");
81
+ const modelPath = node_path_1.default.resolve(modelDir, "gtcrn_simple.onnx");
82
+ const stat = await node_fs_1.default.promises.stat(modelPath).catch(() => null);
83
+ if (stat === null) {
84
+ this.log("info", `GTCRN model downloading from "${modelUrl}"`);
85
+ await node_fs_1.default.promises.mkdir(modelDir, { recursive: true });
86
+ const response = await axios_1.default.get(modelUrl, {
87
+ responseType: "arraybuffer",
88
+ onDownloadProgress: (progressEvent) => {
89
+ if (progressEvent.total) {
90
+ const percent = (progressEvent.loaded / progressEvent.total) * 100;
91
+ this.log("info", `GTCRN model download: ${percent.toFixed(1)}%`);
92
+ }
93
+ }
94
+ });
95
+ await node_fs_1.default.promises.writeFile(modelPath, Buffer.from(response.data));
96
+ this.log("info", `GTCRN model downloaded to "${modelPath}"`);
97
+ }
98
+ /* establish resamplers from SpeechFlow's internal 48KHz
99
+ to GTCRN's required 16KHz format and back */
100
+ this.resamplerDown = new speex_resampler_1.default(1, this.config.audioSampleRate, 16000, 7);
101
+ this.resamplerUp = new speex_resampler_1.default(1, 16000, this.config.audioSampleRate, 7);
102
+ /* initialize worker */
103
+ this.worker = new node_worker_threads_1.Worker(node_path_1.default.resolve(__dirname, "speechflow-node-a2a-gtcrn-wt.js"), {
104
+ workerData: { modelPath }
105
+ });
106
+ this.worker.on("error", (err) => {
107
+ this.log("error", `GTCRN worker thread error: ${err}`);
108
+ this.stream?.emit("error", err);
109
+ });
110
+ this.worker.on("exit", (code) => {
111
+ if (code !== 0)
112
+ this.log("error", `GTCRN worker thread exited with error code ${code}`);
113
+ else
114
+ this.log("info", `GTCRN worker thread exited with regular code ${code}`);
115
+ });
116
+ /* wait for worker to be ready */
117
+ await new Promise((resolve, reject) => {
118
+ const timeout = setTimeout(() => {
119
+ reject(new Error("GTCRN worker thread initialization timeout"));
120
+ }, 60 * 1000);
121
+ const onMessage = (msg) => {
122
+ if (typeof msg === "object" && msg !== null && msg.type === "log")
123
+ this.log(msg.level, msg.message);
124
+ else if (typeof msg === "object" && msg !== null && msg.type === "ready") {
125
+ clearTimeout(timeout);
126
+ this.worker.off("message", onMessage);
127
+ resolve();
128
+ }
129
+ else if (typeof msg === "object" && msg !== null && msg.type === "failed") {
130
+ clearTimeout(timeout);
131
+ this.worker.off("message", onMessage);
132
+ reject(new Error(msg.message ?? "GTCRN worker thread initialization failed"));
133
+ }
134
+ };
135
+ this.worker.on("message", onMessage);
136
+ this.worker.once("error", (err) => {
137
+ clearTimeout(timeout);
138
+ reject(err);
139
+ });
140
+ });
141
+ /* receive message from worker */
142
+ const pending = new Map();
143
+ this.worker.on("exit", () => {
144
+ pending.clear();
145
+ });
146
+ this.worker.on("message", (msg) => {
147
+ if (typeof msg === "object" && msg !== null && msg.type === "process-done") {
148
+ const cb = pending.get(msg.id);
149
+ pending.delete(msg.id);
150
+ if (cb)
151
+ cb(msg.data);
152
+ else
153
+ this.log("warning", `GTCRN worker thread sent back unexpected id: ${msg.id}`);
154
+ }
155
+ else if (typeof msg === "object" && msg !== null && msg.type === "log")
156
+ this.log(msg.level, msg.message);
157
+ else
158
+ this.log("warning", `GTCRN worker thread sent unexpected message: ${JSON.stringify(msg)}`);
159
+ });
160
+ /* send message to worker */
161
+ let seq = 0;
162
+ const workerProcess = async (samples) => {
163
+ if (this.closing)
164
+ return samples;
165
+ const id = `${seq++}`;
166
+ return new Promise((resolve) => {
167
+ pending.set(id, (result) => { resolve(result); });
168
+ this.worker.postMessage({ type: "process", id, samples }, [samples.buffer]);
169
+ });
170
+ };
171
+ /* establish a transform stream */
172
+ const self = this;
173
+ this.stream = new node_stream_1.default.Transform({
174
+ readableObjectMode: true,
175
+ writableObjectMode: true,
176
+ decodeStrings: false,
177
+ transform(chunk, encoding, callback) {
178
+ if (self.closing) {
179
+ callback(new Error("stream already destroyed"));
180
+ return;
181
+ }
182
+ if (!Buffer.isBuffer(chunk.payload))
183
+ callback(new Error("invalid chunk payload type"));
184
+ else {
185
+ /* resample Buffer from 48KHz (SpeechFlow) to 16KHz (GTCRN) */
186
+ const resampledDown = self.resamplerDown.processChunk(chunk.payload);
187
+ /* convert Buffer into Float32Array */
188
+ const payload = util.convertBufToF32(resampledDown);
189
+ /* process with GTCRN */
190
+ workerProcess(payload).then((result) => {
191
+ /* convert Float32Array into Buffer */
192
+ const buf = util.convertF32ToBuf(result);
193
+ /* resample Buffer from 16KHz (GTCRN) back to 48KHz (SpeechFlow) */
194
+ const resampledUp = self.resamplerUp.processChunk(buf);
195
+ /* update chunk */
196
+ chunk.payload = resampledUp;
197
+ /* forward updated chunk */
198
+ this.push(chunk);
199
+ callback();
200
+ }).catch((err) => {
201
+ const error = util.ensureError(err);
202
+ self.log("warning", `processing of chunk failed: ${error.message}`);
203
+ callback(error);
204
+ });
205
+ }
206
+ },
207
+ final(callback) {
208
+ callback();
209
+ }
210
+ });
211
+ }
212
+ /* close node */
213
+ async close() {
214
+ /* indicate closing */
215
+ this.closing = true;
216
+ /* shutdown worker */
217
+ if (this.worker !== null) {
218
+ this.worker.terminate();
219
+ this.worker = null;
220
+ }
221
+ /* shutdown stream */
222
+ if (this.stream !== null) {
223
+ await util.destroyStream(this.stream);
224
+ this.stream = null;
225
+ }
226
+ /* destroy resamplers */
227
+ if (this.resamplerDown !== null)
228
+ this.resamplerDown = null;
229
+ if (this.resamplerUp !== null)
230
+ this.resamplerUp = null;
231
+ }
232
+ }
233
+ exports.default = SpeechFlowNodeA2AGTCRN;
234
+ //# sourceMappingURL=speechflow-node-a2a-gtcrn.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"speechflow-node-a2a-gtcrn.js","sourceRoot":"","sources":["../src/speechflow-node-a2a-gtcrn.ts"],"names":[],"mappings":";AAAA;;;;EAIE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAEF,6BAA6B;AAC7B,sDAAyD;AACzD,0DAA2D;AAC3D,8DAA6D;AAC7D,6DAAqE;AAErE,6BAA6B;AAC7B,kDAAuD;AACvD,sEAAiE;AAEjE,6BAA6B;AAC7B,wEAAmE;AACnE,wDAAmE;AAEnE,mFAAmF;AACnF,MAAqB,sBAAuB,SAAQ,yBAAc;IAC9D,kCAAkC;IAC3B,MAAM,CAAC,IAAI,GAAG,WAAW,CAAA;IAEhC,sBAAsB;IACd,OAAO,GAAG,KAAK,CAAA;IACf,MAAM,GAAkB,IAAI,CAAA;IAC5B,aAAa,GAA0B,IAAI,CAAA;IAC3C,WAAW,GAA4B,IAAI,CAAA;IAEnD,sBAAsB;IACtB,YAAa,EAAU,EAAE,GAA4B,EAAE,IAA6B,EAAE,IAAW;QAC7F,KAAK,CAAC,EAAE,EAAE,GAAG,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAE1B,6CAA6C;QAC7C,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,CAAA;QAElB,wCAAwC;QACxC,IAAI,CAAC,KAAK,GAAI,OAAO,CAAA;QACrB,IAAI,CAAC,MAAM,GAAG,OAAO,CAAA;IACzB,CAAC;IAED,iBAAiB;IACjB,KAAK,CAAC,IAAI;QACN,8BAA8B;QAC9B,IAAI,CAAC,OAAO,GAAG,KAAK,CAAA;QAEpB,4CAA4C;QAC5C,MAAM,QAAQ,GAAI,wCAAwC;YACtD,+DAA+D,CAAA;QACnE,MAAM,QAAQ,GAAI,mBAAI,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAA;QAC1D,MAAM,SAAS,GAAG,mBAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,mBAAmB,CAAC,CAAA;QAC7D,MAAM,IAAI,GAAG,MAAM,iBAAE,CAAC,QAAQ,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,CAAA;QAChE,IAAI,IAAI,KAAK,IAAI,EAAE,CAAC;YAChB,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,iCAAiC,QAAQ,GAAG,CAAC,CAAA;YAC9D,MAAM,iBAAE,CAAC,QAAQ,CAAC,KAAK,CAAC,QAAQ,EAAE,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,CAAA;YACtD,MAAM,QAAQ,GAAG,MAAM,eAAK,CAAC,GAAG,CAAC,QAAQ,EAAE;gBACvC,YAAY,EAAE,aAAa;gBAC3B,kBAAkB,EAAE,CAAC,aAAa,EAAE,EAAE;oBAClC,IAAI,aAAa,CAAC,KAAK,EAAE,CAAC;wBACtB,MAAM,OAAO,GAAG,CAAC,aAAa,CAAC,MAAM,GAAG,aAAa,CAAC,KAAK,CAAC,GAAG,GAAG,CAAA;wBAClE,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,yBAAyB,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAA;oBACpE,CAAC;gBACL,CAAC;aACJ,CAAC,CAAA;YACF,MAAM,iBAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,SAAS,EAAE,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAA;YAClE,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,8BAA8B,SAAS,GAAG,CAAC,CAAA;QAChE,CAAC;QAED;yDACiD;QACjD,IAAI,CAAC,aAAa,GAAG,IAAI,yBAAc,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,eAAe,EAAE,KAAK,EAAE,CAAC,CAAC,CAAA;QACjF,IAAI,CAAC,WAAW,GAAK,IAAI,yBAAc,CAAC,CAAC,EAAE,KAAK,EAAE,IAAI,CAAC,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,CAAA;QAEjF,yBAAyB;QACzB,IAAI,CAAC,MAAM,GAAG,IAAI,4BAAM,CAAC,mBAAI,CAAC,OAAO,CAAC,SAAS,EAAE,iCAAiC,CAAC,EAAE;YACjF,UAAU,EAAE,EAAE,SAAS,EAAE;SAC5B,CAAC,CAAA;QACF,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,GAAG,EAAE,EAAE;YAC5B,IAAI,CAAC,GAAG,CAAC,OAAO,EAAE,8BAA8B,GAAG,EAAE,CAAC,CAAA;YACtD,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,OAAO,EAAE,GAAG,CAAC,CAAA;QACnC,CAAC,CAAC,CAAA;QACF,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,EAAE,CAAC,IAAI,EAAE,EAAE;YAC5B,IAAI,IAAI,KAAK,CAAC;gBACV,IAAI,CAAC,GAAG,CAAC,OAAO,EAAE,8CAA8C,IAAI,EAAE,CAAC,CAAA;;gBAEvE,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,gDAAgD,IAAI,EAAE,CAAC,CAAA;QAChF,CAAC,CAAC,CAAA;QAEF,mCAAmC;QACnC,MAAM,IAAI,OAAO,CAAO,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;YACxC,MAAM,OAAO,GAAG,UAAU,CAAC,GAAG,EAAE;gBAC5B,MAAM,CAAC,IAAI,KAAK,CAAC,4CAA4C,CAAC,CAAC,CAAA;YACnE,CAAC,EAAE,EAAE,GAAG,IAAI,CAAC,CAAA;YACb,MAAM,SAAS,GAAG,CAAC,GAAQ,EAAE,EAAE;gBAC3B,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,CAAC,IAAI,KAAK,KAAK;oBAC7D,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,EAAE,GAAG,CAAC,OAAO,CAAC,CAAA;qBAC/B,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;oBACvE,YAAY,CAAC,OAAO,CAAC,CAAA;oBACrB,IAAI,CAAC,MAAO,CAAC,GAAG,CAAC,SAAS,EAAE,SAAS,CAAC,CAAA;oBACtC,OAAO,EAAE,CAAA;gBACb,CAAC;qBACI,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE,CAAC;oBACxE,YAAY,CAAC,OAAO,CAAC,CAAA;oBACrB,IAAI,CAAC,MAAO,CAAC,GAAG,CAAC,SAAS,EAAE,SAAS,CAAC,CAAA;oBACtC,MAAM,CAAC,IAAI,KAAK,CAAC,GAAG,CAAC,OAAO,IAAI,2CAA2C,CAAC,CAAC,CAAA;gBACjF,CAAC;YACL,CAAC,CAAA;YACD,IAAI,CAAC,MAAO,CAAC,EAAE,CAAC,SAAS,EAAE,SAAS,CAAC,CAAA;YACrC,IAAI,CAAC,MAAO,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC,GAAG,EAAE,EAAE;gBAC/B,YAAY,CAAC,OAAO,CAAC,CAAA;gBACrB,MAAM,CAAC,GAAG,CAAC,CAAA;YACf,CAAC,CAAC,CAAA;QACN,CAAC,CAAC,CAAA;QAEF,mCAAmC;QACnC,MAAM,OAAO,GAAG,IAAI,GAAG,EAAoD,CAAA;QAC3E,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,MAAM,EAAE,GAAG,EAAE;YACxB,OAAO,CAAC,KAAK,EAAE,CAAA;QACnB,CAAC,CAAC,CAAA;QACF,IAAI,CAAC,MAAM,CAAC,EAAE,CAAC,SAAS,EAAE,CAAC,GAAQ,EAAE,EAAE;YACnC,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,CAAC,IAAI,KAAK,cAAc,EAAE,CAAC;gBACzE,MAAM,EAAE,GAAG,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,CAAA;gBAC9B,OAAO,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,CAAA;gBACtB,IAAI,EAAE;oBACF,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAAA;;oBAEZ,IAAI,CAAC,GAAG,CAAC,SAAS,EAAE,gDAAgD,GAAG,CAAC,EAAE,EAAE,CAAC,CAAA;YACrF,CAAC;iBACI,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,KAAK,IAAI,IAAI,GAAG,CAAC,IAAI,KAAK,KAAK;gBAClE,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,EAAE,GAAG,CAAC,OAAO,CAAC,CAAA;;gBAEhC,IAAI,CAAC,GAAG,CAAC,SAAS,EAAE,gDAAgD,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC,CAAA;QAClG,CAAC,CAAC,CAAA;QAEF,8BAA8B;QAC9B,IAAI,GAAG,GAAG,CAAC,CAAA;QACX,MAAM,aAAa,GAAG,KAAK,EAAE,OAAkC,EAAE,EAAE;YAC/D,IAAI,IAAI,CAAC,OAAO;gBACZ,OAAO,OAAO,CAAA;YAClB,MAAM,EAAE,GAAG,GAAG,GAAG,EAAE,EAAE,CAAA;YACrB,OAAO,IAAI,OAAO,CAA4B,CAAC,OAAO,EAAE,EAAE;gBACtD,OAAO,CAAC,GAAG,CAAC,EAAE,EAAE,CAAC,MAAM,EAAE,EAAE,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA,CAAC,CAAC,CAAC,CAAA;gBAChD,IAAI,CAAC,MAAO,CAAC,WAAW,CAAC,EAAE,IAAI,EAAE,SAAS,EAAE,EAAE,EAAE,OAAO,EAAE,EAAE,CAAE,OAAO,CAAC,MAAM,CAAE,CAAC,CAAA;YAClF,CAAC,CAAC,CAAA;QACN,CAAC,CAAA;QAED,oCAAoC;QACpC,MAAM,IAAI,GAAG,IAAI,CAAA;QACjB,IAAI,CAAC,MAAM,GAAG,IAAI,qBAAM,CAAC,SAAS,CAAC;YAC/B,kBAAkB,EAAE,IAAI;YACxB,kBAAkB,EAAE,IAAI;YACxB,aAAa,EAAO,KAAK;YACzB,SAAS,CAAE,KAA4C,EAAE,QAAQ,EAAE,QAAQ;gBACvE,IAAI,IAAI,CAAC,OAAO,EAAE,CAAC;oBACf,QAAQ,CAAC,IAAI,KAAK,CAAC,0BAA0B,CAAC,CAAC,CAAA;oBAC/C,OAAM;gBACV,CAAC;gBACD,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,OAAO,CAAC;oBAC/B,QAAQ,CAAC,IAAI,KAAK,CAAC,4BAA4B,CAAC,CAAC,CAAA;qBAChD,CAAC;oBACF,gEAAgE;oBAChE,MAAM,aAAa,GAAG,IAAI,CAAC,aAAc,CAAC,YAAY,CAAC,KAAK,CAAC,OAAO,CAAC,CAAA;oBAErE,wCAAwC;oBACxC,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,aAAa,CAAC,CAAA;oBAEnD,0BAA0B;oBAC1B,aAAa,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,CAAC,MAAiC,EAAE,EAAE;wBAC9D,wCAAwC;wBACxC,MAAM,GAAG,GAAG,IAAI,CAAC,eAAe,CAAC,MAAM,CAAC,CAAA;wBAExC,qEAAqE;wBACrE,MAAM,WAAW,GAAG,IAAI,CAAC,WAAY,CAAC,YAAY,CAAC,GAAG,CAAC,CAAA;wBAEvD,oBAAoB;wBACpB,KAAK,CAAC,OAAO,GAAG,WAAW,CAAA;wBAE3B,6BAA6B;wBAC7B,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAA;wBAChB,QAAQ,EAAE,CAAA;oBACd,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAY,EAAE,EAAE;wBACtB,MAAM,KAAK,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,CAAA;wBACnC,IAAI,CAAC,GAAG,CAAC,SAAS,EAAE,+BAA+B,KAAK,CAAC,OAAO,EAAE,CAAC,CAAA;wBACnE,QAAQ,CAAC,KAAK,CAAC,CAAA;oBACnB,CAAC,CAAC,CAAA;gBACN,CAAC;YACL,CAAC;YACD,KAAK,CAAE,QAAQ;gBACX,QAAQ,EAAE,CAAA;YACd,CAAC;SACJ,CAAC,CAAA;IACN,CAAC;IAED,kBAAkB;IAClB,KAAK,CAAC,KAAK;QACP,wBAAwB;QACxB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAA;QAEnB,uBAAuB;QACvB,IAAI,IAAI,CAAC,MAAM,KAAK,IAAI,EAAE,CAAC;YACvB,IAAI,CAAC,MAAM,CAAC,SAAS,EAAE,CAAA;YACvB,IAAI,CAAC,MAAM,GAAG,IAAI,CAAA;QACtB,CAAC;QAED,uBAAuB;QACvB,IAAI,IAAI,CAAC,MAAM,KAAK,IAAI,EAAE,CAAC;YACvB,MAAM,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,CAAA;YACrC,IAAI,CAAC,MAAM,GAAG,IAAI,CAAA;QACtB,CAAC;QAED,0BAA0B;QAC1B,IAAI,IAAI,CAAC,aAAa,KAAK,IAAI;YAC3B,IAAI,CAAC,aAAa,GAAG,IAAI,CAAA;QAC7B,IAAI,IAAI,CAAC,WAAW,KAAK,IAAI;YACzB,IAAI,CAAC,WAAW,GAAG,IAAI,CAAA;IAC/B,CAAC;;AApML,yCAqMC"}
@@ -103,10 +103,10 @@ class SpeechFlowNodeA2AMeter extends speechflow_node_1.default {
103
103
  return;
104
104
  /* grab the accumulated chunk data */
105
105
  const chunkData = this.chunkBuffer;
106
- this.chunkBuffer = new Float32Array(0);
106
+ this.chunkBuffer = chunkData.subarray(samplesPerChunk);
107
107
  /* update internal audio sample sliding window for LUFS-M */
108
108
  if (chunkData.length > sampleWindow.length)
109
- sampleWindow.set(chunkData.subarray(chunkData.length - sampleWindow.length), 0);
109
+ sampleWindow.set(chunkData.subarray(0, sampleWindow.length), 0);
110
110
  else {
111
111
  sampleWindow.set(sampleWindow.subarray(chunkData.length), 0);
112
112
  sampleWindow.set(chunkData, sampleWindow.length - chunkData.length);