speechflow 1.6.6 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/README.md +28 -14
- package/etc/secretlint.json +7 -0
- package/etc/speechflow.yaml +13 -4
- package/etc/stx.conf +3 -2
- package/package.json +9 -7
- package/speechflow-cli/dst/speechflow-main-api.js +3 -3
- package/speechflow-cli/dst/speechflow-main-api.js.map +1 -1
- package/speechflow-cli/dst/speechflow-main-cli.js +2 -2
- package/speechflow-cli/dst/speechflow-main-cli.js.map +1 -1
- package/speechflow-cli/dst/speechflow-main-graph.js +10 -9
- package/speechflow-cli/dst/speechflow-main-graph.js.map +1 -1
- package/speechflow-cli/dst/speechflow-main-status.js +35 -1
- package/speechflow-cli/dst/speechflow-main-status.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-ffmpeg.js +2 -2
- package/speechflow-cli/dst/speechflow-node-a2a-ffmpeg.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-filler.js +46 -17
- package/speechflow-cli/dst/speechflow-node-a2a-filler.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-gain.js +0 -5
- package/speechflow-cli/dst/speechflow-node-a2a-gain.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-gender.js +1 -2
- package/speechflow-cli/dst/speechflow-node-a2a-gender.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-mute.js +0 -5
- package/speechflow-cli/dst/speechflow-node-a2a-mute.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise.js +0 -5
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-speex.js +0 -5
- package/speechflow-cli/dst/speechflow-node-a2a-speex.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-wav.js +0 -1
- package/speechflow-cli/dst/speechflow-node-a2a-wav.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2t-amazon.js +16 -13
- package/speechflow-cli/dst/speechflow-node-a2t-amazon.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2t-deepgram.js +21 -16
- package/speechflow-cli/dst/speechflow-node-a2t-deepgram.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2t-openai.js +75 -46
- package/speechflow-cli/dst/speechflow-node-a2t-openai.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2a-elevenlabs.js +39 -39
- package/speechflow-cli/dst/speechflow-node-t2a-elevenlabs.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2a-kokoro.js +0 -1
- package/speechflow-cli/dst/speechflow-node-t2a-kokoro.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-amazon.js +1 -2
- package/speechflow-cli/dst/speechflow-node-t2t-amazon.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-deepl.js +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-deepl.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-format.js +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-format.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-google.js +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-google.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-modify.js +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-modify.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-ollama.js +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-ollama.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-openai.js +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-openai.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js +173 -29
- package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-x2x-filter.js +0 -1
- package/speechflow-cli/dst/speechflow-node-x2x-filter.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-x2x-trace.js +0 -5
- package/speechflow-cli/dst/speechflow-node-x2x-trace.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-device.js +2 -2
- package/speechflow-cli/dst/speechflow-node-xio-device.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-file.js +3 -3
- package/speechflow-cli/dst/speechflow-node-xio-file.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-mqtt.js +7 -2
- package/speechflow-cli/dst/speechflow-node-xio-mqtt.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-websocket.js +14 -4
- package/speechflow-cli/dst/speechflow-node-xio-websocket.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node.js +1 -1
- package/speechflow-cli/dst/speechflow-node.js.map +1 -1
- package/speechflow-cli/dst/speechflow-util-audio.js +3 -3
- package/speechflow-cli/dst/speechflow-util-audio.js.map +1 -1
- package/speechflow-cli/dst/speechflow-util-misc.d.ts +2 -0
- package/speechflow-cli/dst/speechflow-util-misc.js +26 -0
- package/speechflow-cli/dst/speechflow-util-misc.js.map +1 -0
- package/speechflow-cli/dst/speechflow-util-queue.d.ts +9 -2
- package/speechflow-cli/dst/speechflow-util-queue.js +33 -12
- package/speechflow-cli/dst/speechflow-util-queue.js.map +1 -1
- package/speechflow-cli/dst/speechflow-util-stream.d.ts +2 -2
- package/speechflow-cli/dst/speechflow-util-stream.js +13 -17
- package/speechflow-cli/dst/speechflow-util-stream.js.map +1 -1
- package/speechflow-cli/dst/speechflow-util.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-util.js +1 -0
- package/speechflow-cli/dst/speechflow-util.js.map +1 -1
- package/speechflow-cli/etc/oxlint.jsonc +7 -1
- package/speechflow-cli/etc/stx.conf +1 -0
- package/speechflow-cli/package.json +31 -30
- package/speechflow-cli/src/speechflow-main-api.ts +3 -6
- package/speechflow-cli/src/speechflow-main-cli.ts +2 -2
- package/speechflow-cli/src/speechflow-main-graph.ts +10 -11
- package/speechflow-cli/src/speechflow-main-status.ts +2 -2
- package/speechflow-cli/src/speechflow-node-a2a-ffmpeg.ts +4 -2
- package/speechflow-cli/src/speechflow-node-a2a-filler.ts +57 -20
- package/speechflow-cli/src/speechflow-node-a2a-gain.ts +0 -5
- package/speechflow-cli/src/speechflow-node-a2a-gender.ts +1 -2
- package/speechflow-cli/src/speechflow-node-a2a-mute.ts +0 -5
- package/speechflow-cli/src/speechflow-node-a2a-rnnoise.ts +0 -5
- package/speechflow-cli/src/speechflow-node-a2a-speex.ts +0 -5
- package/speechflow-cli/src/speechflow-node-a2a-wav.ts +0 -1
- package/speechflow-cli/src/speechflow-node-a2t-amazon.ts +21 -16
- package/speechflow-cli/src/speechflow-node-a2t-deepgram.ts +24 -16
- package/speechflow-cli/src/speechflow-node-a2t-openai.ts +86 -54
- package/speechflow-cli/src/speechflow-node-t2a-elevenlabs.ts +41 -38
- package/speechflow-cli/src/speechflow-node-t2a-kokoro.ts +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-amazon.ts +1 -2
- package/speechflow-cli/src/speechflow-node-t2t-deepl.ts +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-format.ts +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-google.ts +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-modify.ts +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-ollama.ts +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-openai.ts +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-subtitle.ts +205 -33
- package/speechflow-cli/src/speechflow-node-x2x-filter.ts +0 -1
- package/speechflow-cli/src/speechflow-node-x2x-trace.ts +0 -5
- package/speechflow-cli/src/speechflow-node-xio-device.ts +2 -2
- package/speechflow-cli/src/speechflow-node-xio-file.ts +3 -3
- package/speechflow-cli/src/speechflow-node-xio-mqtt.ts +7 -2
- package/speechflow-cli/src/speechflow-node-xio-websocket.ts +14 -4
- package/speechflow-cli/src/speechflow-node.ts +1 -1
- package/speechflow-cli/src/speechflow-util-audio.ts +3 -3
- package/speechflow-cli/src/speechflow-util-misc.ts +23 -0
- package/speechflow-cli/src/speechflow-util-queue.ts +36 -16
- package/speechflow-cli/src/speechflow-util-stream.ts +24 -21
- package/speechflow-cli/src/speechflow-util.ts +1 -0
- package/speechflow-ui-db/dst/app-font-fa-brands-400.woff2 +0 -0
- package/speechflow-ui-db/dst/app-font-fa-regular-400.woff2 +0 -0
- package/speechflow-ui-db/dst/app-font-fa-solid-900.woff2 +0 -0
- package/speechflow-ui-db/dst/app-font-fa-v4compatibility.woff2 +0 -0
- package/speechflow-ui-db/dst/index.css +1 -5
- package/speechflow-ui-db/dst/index.js +17 -58
- package/speechflow-ui-db/etc/stx.conf +5 -16
- package/speechflow-ui-db/package.json +20 -19
- package/speechflow-ui-st/dst/app-font-fa-brands-400.woff2 +0 -0
- package/speechflow-ui-st/dst/app-font-fa-regular-400.woff2 +0 -0
- package/speechflow-ui-st/dst/app-font-fa-solid-900.woff2 +0 -0
- package/speechflow-ui-st/dst/app-font-fa-v4compatibility.woff2 +0 -0
- package/speechflow-ui-st/dst/index.css +1 -5
- package/speechflow-ui-st/dst/index.js +36 -79
- package/speechflow-ui-st/etc/stx.conf +5 -16
- package/speechflow-ui-st/package.json +21 -20
- package/speechflow-cli/dst/speechflow-node-a2a-pitch2-wt.d.ts +0 -1
- package/speechflow-cli/dst/speechflow-node-a2a-pitch2-wt.js +0 -149
- package/speechflow-cli/dst/speechflow-node-a2a-pitch2-wt.js.map +0 -1
- package/speechflow-cli/dst/speechflow-node-a2a-pitch2.d.ts +0 -13
- package/speechflow-cli/dst/speechflow-node-a2a-pitch2.js +0 -202
- package/speechflow-cli/dst/speechflow-node-a2a-pitch2.js.map +0 -1
|
@@ -23,4 +23,5 @@ __exportStar(require("./speechflow-util-audio"), exports);
|
|
|
23
23
|
__exportStar(require("./speechflow-util-error"), exports);
|
|
24
24
|
__exportStar(require("./speechflow-util-stream"), exports);
|
|
25
25
|
__exportStar(require("./speechflow-util-queue"), exports);
|
|
26
|
+
__exportStar(require("./speechflow-util-misc"), exports);
|
|
26
27
|
//# sourceMappingURL=speechflow-util.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"speechflow-util.js","sourceRoot":"","sources":["../src/speechflow-util.ts"],"names":[],"mappings":";AAAA;;;;EAIE;;;;;;;;;;;;;;;;AAEF,0DAAuC;AACvC,0DAAuC;AACvC,2DAAwC;AACxC,0DAAuC"}
|
|
1
|
+
{"version":3,"file":"speechflow-util.js","sourceRoot":"","sources":["../src/speechflow-util.ts"],"names":[],"mappings":";AAAA;;;;EAIE;;;;;;;;;;;;;;;;AAEF,0DAAuC;AACvC,0DAAuC;AACvC,2DAAwC;AACxC,0DAAuC;AACvC,yDAAsC"}
|
|
@@ -120,7 +120,13 @@
|
|
|
120
120
|
"no-barrel-file": "off",
|
|
121
121
|
"no-process-exit": "off",
|
|
122
122
|
"no-cycle": "off",
|
|
123
|
-
"class-methods-use-this": "off"
|
|
123
|
+
"class-methods-use-this": "off",
|
|
124
|
+
"no-named-export": "off",
|
|
125
|
+
"prefer-at": "off",
|
|
126
|
+
"no-param-reassign": "off",
|
|
127
|
+
"prefer-top-level-await": "off",
|
|
128
|
+
"no-process-env": "off",
|
|
129
|
+
"no-warning-comments": "off"
|
|
124
130
|
}
|
|
125
131
|
}
|
|
126
132
|
|
|
@@ -23,6 +23,7 @@ dev
|
|
|
23
23
|
|
|
24
24
|
# [speechflow-cli] static code analysis (linting)
|
|
25
25
|
lint:
|
|
26
|
+
check-dependencies && \
|
|
26
27
|
tsc --project etc/tsconfig.json --noEmit && \
|
|
27
28
|
oxlint --config etc/oxlint.jsonc src/*.ts && \
|
|
28
29
|
biome lint --diagnostic-level=warn --config-path=etc/biome.jsonc src/*.ts && \
|
|
@@ -18,34 +18,34 @@
|
|
|
18
18
|
"cli-io": "0.9.13",
|
|
19
19
|
"yargs": "18.0.0",
|
|
20
20
|
"flowlink": "1.2.3",
|
|
21
|
-
"js-yaml": "4.1.
|
|
21
|
+
"js-yaml": "4.1.1",
|
|
22
22
|
"@gpeng/naudiodon": "2.4.1",
|
|
23
23
|
"@deepgram/sdk": "4.11.2",
|
|
24
|
-
"deepl-node": "1.
|
|
25
|
-
"@elevenlabs/elevenlabs-js": "2.
|
|
24
|
+
"deepl-node": "1.22.0",
|
|
25
|
+
"@elevenlabs/elevenlabs-js": "2.26.0",
|
|
26
26
|
"get-stream": "9.0.1",
|
|
27
|
-
"@dotenvx/dotenvx": "1.51.
|
|
27
|
+
"@dotenvx/dotenvx": "1.51.1",
|
|
28
28
|
"speex-resampler": "3.0.1",
|
|
29
29
|
"@sapphi-red/speex-preprocess-wasm": "0.4.0",
|
|
30
30
|
"@shiguredo/rnnoise-wasm": "2025.1.5",
|
|
31
|
-
"@aws-sdk/client-transcribe-streaming": "3.
|
|
32
|
-
"@aws-sdk/client-translate": "3.
|
|
33
|
-
"@aws-sdk/client-polly": "3.
|
|
34
|
-
"@google-cloud/translate": "9.
|
|
35
|
-
"node-web-audio-api": "1.0.
|
|
31
|
+
"@aws-sdk/client-transcribe-streaming": "3.946.0",
|
|
32
|
+
"@aws-sdk/client-translate": "3.946.0",
|
|
33
|
+
"@aws-sdk/client-polly": "3.946.0",
|
|
34
|
+
"@google-cloud/translate": "9.3.0",
|
|
35
|
+
"node-web-audio-api": "1.0.7",
|
|
36
36
|
"object-path": "0.11.8",
|
|
37
|
-
"glob": "
|
|
37
|
+
"glob": "13.0.0",
|
|
38
38
|
"ws": "8.18.3",
|
|
39
39
|
"bufferutil": "4.0.9",
|
|
40
40
|
"utf-8-validate": "6.0.5",
|
|
41
|
-
"@hapi/hapi": "21.4.
|
|
41
|
+
"@hapi/hapi": "21.4.4",
|
|
42
42
|
"@hapi/boom": "10.0.1",
|
|
43
43
|
"@hapi/inert": "7.1.0",
|
|
44
44
|
"hapi-plugin-header": "1.1.8",
|
|
45
45
|
"hapi-plugin-websocket": "2.4.11",
|
|
46
46
|
"@opensumi/reconnecting-websocket": "4.4.0",
|
|
47
|
-
"ollama": "0.6.
|
|
48
|
-
"openai": "
|
|
47
|
+
"ollama": "0.6.3",
|
|
48
|
+
"openai": "6.10.0",
|
|
49
49
|
"@rse/ffmpeg": "1.4.2",
|
|
50
50
|
"ffmpeg-stream": "1.0.1",
|
|
51
51
|
"installed-packages": "1.0.13",
|
|
@@ -53,11 +53,11 @@
|
|
|
53
53
|
"wav": "1.0.2",
|
|
54
54
|
"mqtt": "5.14.1",
|
|
55
55
|
"cbor2": "2.0.1",
|
|
56
|
-
"arktype": "2.1.
|
|
56
|
+
"arktype": "2.1.28",
|
|
57
57
|
"pure-uuid": "1.8.1",
|
|
58
58
|
"wavefile": "11.0.0",
|
|
59
59
|
"audio-inspect": "0.0.4",
|
|
60
|
-
"@huggingface/transformers": "3.
|
|
60
|
+
"@huggingface/transformers": "3.8.1",
|
|
61
61
|
"kokoro-js": "1.2.1",
|
|
62
62
|
"@ericedouard/vad-node-realtime": "0.2.0",
|
|
63
63
|
"osc-js": "2.4.1",
|
|
@@ -69,22 +69,22 @@
|
|
|
69
69
|
"@rse/stx": "1.1.2"
|
|
70
70
|
},
|
|
71
71
|
"devDependencies": {
|
|
72
|
-
"eslint": "9.
|
|
73
|
-
"@eslint/js": "9.
|
|
72
|
+
"eslint": "9.39.1",
|
|
73
|
+
"@eslint/js": "9.39.1",
|
|
74
74
|
"neostandard": "0.12.2",
|
|
75
75
|
"eslint-plugin-promise": "7.2.1",
|
|
76
76
|
"eslint-plugin-import": "2.32.0",
|
|
77
77
|
"eslint-plugin-node": "11.1.0",
|
|
78
|
-
"typescript-eslint": "8.
|
|
79
|
-
"@typescript-eslint/eslint-plugin": "8.
|
|
80
|
-
"@typescript-eslint/parser": "8.
|
|
81
|
-
"oxlint": "1.
|
|
82
|
-
"eslint-plugin-oxlint": "1.
|
|
78
|
+
"typescript-eslint": "8.48.1",
|
|
79
|
+
"@typescript-eslint/eslint-plugin": "8.48.1",
|
|
80
|
+
"@typescript-eslint/parser": "8.48.1",
|
|
81
|
+
"oxlint": "1.31.0",
|
|
82
|
+
"eslint-plugin-oxlint": "1.31.0",
|
|
83
83
|
"@biomejs/biome": "2.0.6",
|
|
84
84
|
"eslint-config-biome": "2.1.3",
|
|
85
85
|
|
|
86
|
-
"@types/node": "24.
|
|
87
|
-
"@types/yargs": "17.0.
|
|
86
|
+
"@types/node": "24.10.1",
|
|
87
|
+
"@types/yargs": "17.0.35",
|
|
88
88
|
"@types/js-yaml": "4.0.9",
|
|
89
89
|
"@types/object-path": "0.11.4",
|
|
90
90
|
"@types/ws": "8.18.1",
|
|
@@ -93,14 +93,15 @@
|
|
|
93
93
|
"@types/luxon": "3.7.1",
|
|
94
94
|
"@types/wrap-text": "1.0.2",
|
|
95
95
|
|
|
96
|
-
"
|
|
96
|
+
"check-dependencies": "2.0.0",
|
|
97
|
+
"patch-package": "8.0.1",
|
|
97
98
|
"stmux": "1.8.11",
|
|
98
|
-
"nodemon": "3.1.
|
|
99
|
+
"nodemon": "3.1.11",
|
|
99
100
|
"shx": "0.4.0",
|
|
100
|
-
"@yao-pkg/pkg": "6.
|
|
101
|
-
"typescript": "5.9.
|
|
102
|
-
"delay-cli": "
|
|
103
|
-
"cross-env": "10.
|
|
101
|
+
"@yao-pkg/pkg": "6.10.1",
|
|
102
|
+
"typescript": "5.9.3",
|
|
103
|
+
"delay-cli": "3.0.0",
|
|
104
|
+
"cross-env": "10.1.0"
|
|
104
105
|
},
|
|
105
106
|
"overrides": {
|
|
106
107
|
"@huggingface/transformers": { "onnxruntime-node": "1.23.0-dev.20250703-7fc6235861" }
|
|
@@ -69,8 +69,7 @@ export class APIServer {
|
|
|
69
69
|
else {
|
|
70
70
|
await Promise.race<void>([
|
|
71
71
|
foundNode.receiveRequest(argList),
|
|
72
|
-
|
|
73
|
-
reject(new Error("timeout")), 10 * 1000))
|
|
72
|
+
util.timeout(10 * 1000)
|
|
74
73
|
]).catch((err: Error) => {
|
|
75
74
|
this.cli.log("warning", `external request to node <${name}> failed: ${err.message}`)
|
|
76
75
|
throw err
|
|
@@ -255,8 +254,7 @@ export class APIServer {
|
|
|
255
254
|
for (const n of graph.getGraphNodes()) {
|
|
256
255
|
Promise.race<void>([
|
|
257
256
|
n.receiveDashboard(info.type, info.id, info.kind, info.value),
|
|
258
|
-
|
|
259
|
-
reject(new Error("timeout")), 10 * 1000))
|
|
257
|
+
util.timeout(10 * 1000)
|
|
260
258
|
]).catch((err: Error) => {
|
|
261
259
|
this.cli.log("warning", `sending dashboard info to node <${n.id}> failed: ${err.message}`)
|
|
262
260
|
})
|
|
@@ -297,8 +295,7 @@ export class APIServer {
|
|
|
297
295
|
}
|
|
298
296
|
await Promise.race([
|
|
299
297
|
Promise.all(closePromises),
|
|
300
|
-
|
|
301
|
-
setTimeout(() => reject(new Error("timeout for all peers")), 5 * 1000))
|
|
298
|
+
util.timeout(5 * 1000)
|
|
302
299
|
]).catch((error: unknown) => {
|
|
303
300
|
this.cli.log("warning", `HAPI: WebSockets failed to close: ${util.ensureError(error).message}`)
|
|
304
301
|
})
|
|
@@ -69,7 +69,7 @@ export class CLIContext {
|
|
|
69
69
|
"[-p|--port <tcp-port>] " +
|
|
70
70
|
"[-C|--cache <directory>] " +
|
|
71
71
|
"[-d|--dashboard <type>:<id>:<name>[,...]] " +
|
|
72
|
-
"[-o|--osc <ip-address>:<udp-port> " +
|
|
72
|
+
"[-o|--osc <ip-address>:<udp-port>] " +
|
|
73
73
|
"[-e|--expression <expression>] " +
|
|
74
74
|
"[-f|--file <file>] " +
|
|
75
75
|
"[-c|--config <id>@<yaml-config-file>] " +
|
|
@@ -252,7 +252,7 @@ export class CLIContext {
|
|
|
252
252
|
if (this.debug)
|
|
253
253
|
process.stderr.write(`${pkg.name}: ${chalk.red("ERROR")}: ${err.message}\n${err.stack}\n`)
|
|
254
254
|
else
|
|
255
|
-
process.stderr.write(`${pkg.name}: ${chalk.red("ERROR")}: ${err.message}`)
|
|
255
|
+
process.stderr.write(`${pkg.name}: ${chalk.red("ERROR")}: ${err.message}\n`)
|
|
256
256
|
}
|
|
257
257
|
process.exit(1)
|
|
258
258
|
}
|
|
@@ -76,7 +76,8 @@ export class NodeGraph {
|
|
|
76
76
|
if (!objectPath.has(variables, id))
|
|
77
77
|
throw new Error(`failed to resolve variable "${id}"`)
|
|
78
78
|
const value = objectPath.get(variables, id)
|
|
79
|
-
|
|
79
|
+
const sensitive = /(?:key|secret|token|password)/i.test(id)
|
|
80
|
+
this.cli.log("info", `resolve variable: "${id}" -> "${sensitive ? "***" : value}"`)
|
|
80
81
|
return value
|
|
81
82
|
},
|
|
82
83
|
createNode: (id: string, opts: { [ id: string ]: any }, args: any[]) => {
|
|
@@ -100,8 +101,8 @@ export class NodeGraph {
|
|
|
100
101
|
process.exit(1)
|
|
101
102
|
}
|
|
102
103
|
const params = Object.keys(node.params).map((key) => {
|
|
103
|
-
if (key.
|
|
104
|
-
return `${key}:
|
|
104
|
+
if (/(?:key|secret|token|password)/i.test(key))
|
|
105
|
+
return `${key}: ***`
|
|
105
106
|
else
|
|
106
107
|
return `${key}: ${JSON.stringify(node.params[key])}`
|
|
107
108
|
}).join(", ")
|
|
@@ -173,8 +174,7 @@ export class NodeGraph {
|
|
|
173
174
|
node.setTimeZero(this.timeZero)
|
|
174
175
|
await Promise.race<void>([
|
|
175
176
|
node.open(),
|
|
176
|
-
|
|
177
|
-
reject(new Error("timeout")), 30 * 1000))
|
|
177
|
+
util.timeout(30 * 1000)
|
|
178
178
|
]).catch((err: Error) => {
|
|
179
179
|
this.cli.log("error", `<${node.id}>: failed to open node <${node.id}>: ${err.message}`)
|
|
180
180
|
throw new Error(`failed to open node <${node.id}>: ${err.message}`)
|
|
@@ -224,11 +224,11 @@ export class NodeGraph {
|
|
|
224
224
|
this.shutdown("finished", args, api)
|
|
225
225
|
}
|
|
226
226
|
}
|
|
227
|
-
node.stream.on("end", () => {
|
|
228
|
-
deactivateNode(node, `readable stream side of node <${node.id}> raised "end" event`)
|
|
229
|
-
})
|
|
230
227
|
node.stream.on("finish", () => {
|
|
231
|
-
deactivateNode(node, `writable stream side of node <${node.id}> raised "finish" event`)
|
|
228
|
+
deactivateNode(node, `writable stream side (input) of node <${node.id}> raised "finish" event`)
|
|
229
|
+
})
|
|
230
|
+
node.stream.on("end", () => {
|
|
231
|
+
deactivateNode(node, `readable stream side (output) of node <${node.id}> raised "end" event`)
|
|
232
232
|
})
|
|
233
233
|
}
|
|
234
234
|
|
|
@@ -270,8 +270,7 @@ export class NodeGraph {
|
|
|
270
270
|
this.cli.log("info", `close node <${node.id}>`)
|
|
271
271
|
await Promise.race<void>([
|
|
272
272
|
node.close(),
|
|
273
|
-
|
|
274
|
-
reject(new Error("timeout")), 10 * 1000))
|
|
273
|
+
util.timeout(10 * 1000)
|
|
275
274
|
]).catch((err: Error) => {
|
|
276
275
|
this.cli.log("warning", `node <${node.id}> failed to close: ${err.message}`)
|
|
277
276
|
})
|
|
@@ -15,6 +15,7 @@ import chalk from "chalk"
|
|
|
15
15
|
/* internal dependencies */
|
|
16
16
|
import SpeechFlowNode from "./speechflow-node"
|
|
17
17
|
import { NodeConfig } from "./speechflow-main-config"
|
|
18
|
+
import * as util from "./speechflow-util"
|
|
18
19
|
|
|
19
20
|
/* the node status manager */
|
|
20
21
|
export class NodeStatusManager {
|
|
@@ -48,8 +49,7 @@ export class NodeStatusManager {
|
|
|
48
49
|
node._accessBus = accessBus
|
|
49
50
|
const status = await Promise.race<{ [ key: string ]: string | number }>([
|
|
50
51
|
node.status(),
|
|
51
|
-
|
|
52
|
-
reject(new Error("timeout")), 10 * 1000))
|
|
52
|
+
util.timeout(10 * 1000)
|
|
53
53
|
]).catch((err: Error) => {
|
|
54
54
|
this.cli.log("warning", `[${node.id}]: failed to gather status of node <${node.id}>: ${err.message}`)
|
|
55
55
|
return {} as { [ key: string ]: string | number }
|
|
@@ -99,7 +99,7 @@ export default class SpeechFlowNodeA2AFFMPEG extends SpeechFlowNode {
|
|
|
99
99
|
})
|
|
100
100
|
|
|
101
101
|
/* wrap streams with conversions for chunk vs plain audio */
|
|
102
|
-
const wrapper1 = util.createTransformStreamForWritableSide()
|
|
102
|
+
const wrapper1 = util.createTransformStreamForWritableSide("audio", 1)
|
|
103
103
|
const wrapper2 = util.createTransformStreamForReadableSide("audio", () => this.timeZero)
|
|
104
104
|
this.stream = Stream.compose(wrapper1, ffmpegStream, wrapper2)
|
|
105
105
|
}
|
|
@@ -114,7 +114,9 @@ export default class SpeechFlowNodeA2AFFMPEG extends SpeechFlowNode {
|
|
|
114
114
|
|
|
115
115
|
/* shutdown FFmpeg */
|
|
116
116
|
if (this.ffmpeg !== null) {
|
|
117
|
-
util.run(
|
|
117
|
+
util.run("stopping FFmpeg process",
|
|
118
|
+
() => this.ffmpeg!.kill(),
|
|
119
|
+
() => {})
|
|
118
120
|
this.ffmpeg = null
|
|
119
121
|
}
|
|
120
122
|
}
|
|
@@ -15,6 +15,8 @@ import * as util from "./speechflow-util"
|
|
|
15
15
|
|
|
16
16
|
class AudioFiller extends EventEmitter {
|
|
17
17
|
private emittedEndSamples = 0 /* stream position in samples already emitted */
|
|
18
|
+
private maxInputEndSamples = 0
|
|
19
|
+
private lastMeta: Map<string, any> | undefined = undefined
|
|
18
20
|
private readonly bytesPerSample = 2 /* PCM I16 */
|
|
19
21
|
private readonly bytesPerFrame: number
|
|
20
22
|
private readonly sampleTolerance = 0.5 /* tolerance for floating-point sample comparisons */
|
|
@@ -25,12 +27,12 @@ class AudioFiller extends EventEmitter {
|
|
|
25
27
|
}
|
|
26
28
|
|
|
27
29
|
/* optional helper to allow subscribing with strong typing */
|
|
28
|
-
public on(event: "chunk", listener: (chunk: SpeechFlowChunk) => void): this
|
|
30
|
+
public on(event: "chunk", listener: (chunk: SpeechFlowChunk, type: string) => void): this
|
|
29
31
|
public on(event: string, listener: (...args: any[]) => void): this {
|
|
30
32
|
return super.on(event, listener)
|
|
31
33
|
}
|
|
32
34
|
|
|
33
|
-
/* convert fractional samples
|
|
35
|
+
/* convert fractional samples from duration */
|
|
34
36
|
private samplesFromDuration(duration: Duration): number {
|
|
35
37
|
const seconds = duration.as("seconds")
|
|
36
38
|
const samples = seconds * this.sampleRate
|
|
@@ -51,8 +53,9 @@ class AudioFiller extends EventEmitter {
|
|
|
51
53
|
const payload = Buffer.alloc(frames * this.bytesPerFrame) /* already zeroed */
|
|
52
54
|
const timestampStart = this.durationFromSamples(fromSamples)
|
|
53
55
|
const timestampEnd = this.durationFromSamples(toSamples)
|
|
54
|
-
const chunk = new SpeechFlowChunk(timestampStart, timestampEnd,
|
|
55
|
-
|
|
56
|
+
const chunk = new SpeechFlowChunk(timestampStart, timestampEnd,
|
|
57
|
+
"final", "audio", payload, meta ? new Map(meta) : undefined)
|
|
58
|
+
this.emit("chunk", chunk, "silence")
|
|
56
59
|
}
|
|
57
60
|
|
|
58
61
|
/* add a chunk of audio for processing */
|
|
@@ -62,6 +65,12 @@ class AudioFiller extends EventEmitter {
|
|
|
62
65
|
if (endSamp < startSamp)
|
|
63
66
|
throw new Error("invalid timestamps")
|
|
64
67
|
|
|
68
|
+
/* track maximum input end timestamp and last metadata for trailing silence */
|
|
69
|
+
if (endSamp > this.maxInputEndSamples) {
|
|
70
|
+
this.maxInputEndSamples = endSamp
|
|
71
|
+
this.lastMeta = chunk.meta ? new Map(chunk.meta) : undefined
|
|
72
|
+
}
|
|
73
|
+
|
|
65
74
|
/* if chunk starts beyond what we've emitted, insert silence for the gap */
|
|
66
75
|
if (startSamp > this.emittedEndSamples + this.sampleTolerance) {
|
|
67
76
|
this.emitSilence(this.emittedEndSamples, startSamp, chunk.meta)
|
|
@@ -95,12 +104,20 @@ class AudioFiller extends EventEmitter {
|
|
|
95
104
|
const outEndSamples = outStartSamples + Math.floor(payload.length / this.bytesPerFrame)
|
|
96
105
|
const timestampStart = this.durationFromSamples(outStartSamples)
|
|
97
106
|
const timestampEnd = this.durationFromSamples(outEndSamples)
|
|
98
|
-
const c = new SpeechFlowChunk(timestampStart, timestampEnd,
|
|
99
|
-
|
|
107
|
+
const c = new SpeechFlowChunk(timestampStart, timestampEnd,
|
|
108
|
+
"final", "audio", payload, new Map(chunk.meta))
|
|
109
|
+
this.emit("chunk", c, "content")
|
|
100
110
|
|
|
101
111
|
/* advance emitted cursor */
|
|
102
112
|
this.emittedEndSamples = Math.max(this.emittedEndSamples, outEndSamples)
|
|
103
113
|
}
|
|
114
|
+
|
|
115
|
+
/* signal end of processing and emit trailing silence */
|
|
116
|
+
public done (): void {
|
|
117
|
+
/* emit trailing silence if there's a gap between emitted and max input */
|
|
118
|
+
if (this.maxInputEndSamples > this.emittedEndSamples + this.sampleTolerance)
|
|
119
|
+
this.emitSilence(this.emittedEndSamples, this.maxInputEndSamples, this.lastMeta)
|
|
120
|
+
}
|
|
104
121
|
}
|
|
105
122
|
|
|
106
123
|
/* SpeechFlow node for filling audio gaps */
|
|
@@ -137,12 +154,13 @@ export default class SpeechFlowNodeA2AFiller extends SpeechFlowNode {
|
|
|
137
154
|
this.sendQueue = new util.AsyncQueue<SpeechFlowChunk | null>()
|
|
138
155
|
|
|
139
156
|
/* shift chunks from filler to send queue */
|
|
140
|
-
this.filler.on("chunk", (chunk) => {
|
|
157
|
+
this.filler.on("chunk", (chunk, type) => {
|
|
141
158
|
this.sendQueue?.write(chunk)
|
|
142
159
|
})
|
|
143
160
|
|
|
144
161
|
/* establish a duplex stream */
|
|
145
162
|
const self = this
|
|
163
|
+
const reads = new util.PromiseSet<void>()
|
|
146
164
|
this.stream = new Stream.Duplex({
|
|
147
165
|
readableObjectMode: true,
|
|
148
166
|
writableObjectMode: true,
|
|
@@ -154,8 +172,6 @@ export default class SpeechFlowNodeA2AFiller extends SpeechFlowNode {
|
|
|
154
172
|
callback(new Error("invalid chunk payload type"))
|
|
155
173
|
else {
|
|
156
174
|
try {
|
|
157
|
-
if (self.closing || self.filler === null)
|
|
158
|
-
throw new Error("stream already destroyed")
|
|
159
175
|
self.filler.add(chunk)
|
|
160
176
|
callback()
|
|
161
177
|
}
|
|
@@ -164,12 +180,37 @@ export default class SpeechFlowNodeA2AFiller extends SpeechFlowNode {
|
|
|
164
180
|
}
|
|
165
181
|
}
|
|
166
182
|
},
|
|
183
|
+
async final (callback) {
|
|
184
|
+
/* short-circuit processing in case of own closing */
|
|
185
|
+
if (self.closing) {
|
|
186
|
+
callback()
|
|
187
|
+
return
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/* signal end of stream */
|
|
191
|
+
if (self.filler !== null && self.sendQueue !== null) {
|
|
192
|
+
/* optionally emit trailing silence
|
|
193
|
+
(we have to wait for its internal "emit" operation to happen) */
|
|
194
|
+
self.filler.done()
|
|
195
|
+
await util.sleep(10)
|
|
196
|
+
|
|
197
|
+
/* signal end of stream */
|
|
198
|
+
self.sendQueue.write(null)
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
/* await all read operations */
|
|
202
|
+
await reads.awaitAll()
|
|
203
|
+
|
|
204
|
+
/* signal end of streaming */
|
|
205
|
+
this.push(null)
|
|
206
|
+
callback()
|
|
207
|
+
},
|
|
167
208
|
read (size) {
|
|
168
209
|
if (self.closing || self.sendQueue === null) {
|
|
169
210
|
this.push(null)
|
|
170
211
|
return
|
|
171
212
|
}
|
|
172
|
-
self.sendQueue.read().then((chunk) => {
|
|
213
|
+
reads.add(self.sendQueue.read().then((chunk) => {
|
|
173
214
|
if (self.closing || self.sendQueue === null) {
|
|
174
215
|
this.push(null)
|
|
175
216
|
return
|
|
@@ -178,22 +219,18 @@ export default class SpeechFlowNodeA2AFiller extends SpeechFlowNode {
|
|
|
178
219
|
self.log("info", "received EOF signal")
|
|
179
220
|
this.push(null)
|
|
180
221
|
}
|
|
222
|
+
else if (!(chunk.payload instanceof Buffer)) {
|
|
223
|
+
self.log("warning", "invalid chunk (expected audio buffer)")
|
|
224
|
+
this.push(null)
|
|
225
|
+
}
|
|
181
226
|
else {
|
|
182
|
-
self.log("debug", `received data (${chunk.payload.
|
|
227
|
+
self.log("debug", `received data (${chunk.payload.byteLength} bytes)`)
|
|
183
228
|
this.push(chunk)
|
|
184
229
|
}
|
|
185
230
|
}).catch((error: unknown) => {
|
|
186
231
|
if (!self.closing && self.sendQueue !== null)
|
|
187
232
|
self.log("error", `queue read error: ${util.ensureError(error).message}`)
|
|
188
|
-
})
|
|
189
|
-
},
|
|
190
|
-
final (callback) {
|
|
191
|
-
if (self.closing) {
|
|
192
|
-
callback()
|
|
193
|
-
return
|
|
194
|
-
}
|
|
195
|
-
this.push(null)
|
|
196
|
-
callback()
|
|
233
|
+
}))
|
|
197
234
|
}
|
|
198
235
|
})
|
|
199
236
|
}
|
|
@@ -363,8 +363,7 @@ export default class SpeechFlowNodeA2AGender extends SpeechFlowNode {
|
|
|
363
363
|
if (this.classifier !== null) {
|
|
364
364
|
try {
|
|
365
365
|
const disposePromise = this.classifier.dispose()
|
|
366
|
-
|
|
367
|
-
await Promise.race([ disposePromise, timeoutPromise ])
|
|
366
|
+
await Promise.race([ disposePromise, util.sleep(5000) ])
|
|
368
367
|
}
|
|
369
368
|
catch (error) {
|
|
370
369
|
this.log("warning", `error during classifier cleanup: ${error}`)
|
|
@@ -202,8 +202,9 @@ export default class SpeechFlowNodeA2TAmazon extends SpeechFlowNode {
|
|
|
202
202
|
/* remember opening time to receive time zero offset */
|
|
203
203
|
this.timeOpen = DateTime.now()
|
|
204
204
|
|
|
205
|
-
/* provide Duplex stream and internally attach to
|
|
205
|
+
/* provide Duplex stream and internally attach to Amazon Transcribe API */
|
|
206
206
|
const self = this
|
|
207
|
+
const reads = new util.PromiseSet<void>()
|
|
207
208
|
this.stream = new Stream.Duplex({
|
|
208
209
|
writableObjectMode: true,
|
|
209
210
|
readableObjectMode: true,
|
|
@@ -231,12 +232,29 @@ export default class SpeechFlowNodeA2TAmazon extends SpeechFlowNode {
|
|
|
231
232
|
callback()
|
|
232
233
|
}
|
|
233
234
|
},
|
|
235
|
+
async final (callback) {
|
|
236
|
+
if (self.closing || self.client === null) {
|
|
237
|
+
callback()
|
|
238
|
+
return
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
/* await all read operations */
|
|
242
|
+
await reads.awaitAll()
|
|
243
|
+
|
|
244
|
+
util.run(
|
|
245
|
+
() => self.client!.destroy(),
|
|
246
|
+
(error: Error) => self.log("warning", `error closing Amazon Transcribe connection: ${error}`)
|
|
247
|
+
)
|
|
248
|
+
audioQueue.push(null) /* do not push null to stream, let Amazon Transcribe do it */
|
|
249
|
+
audioQueue.destroy()
|
|
250
|
+
callback()
|
|
251
|
+
},
|
|
234
252
|
read (size) {
|
|
235
253
|
if (self.closing || self.queue === null) {
|
|
236
254
|
this.push(null)
|
|
237
255
|
return
|
|
238
256
|
}
|
|
239
|
-
self.queue.read().then((chunk) => {
|
|
257
|
+
reads.add(self.queue.read().then((chunk) => {
|
|
240
258
|
if (self.closing || self.queue === null) {
|
|
241
259
|
this.push(null)
|
|
242
260
|
return
|
|
@@ -252,20 +270,7 @@ export default class SpeechFlowNodeA2TAmazon extends SpeechFlowNode {
|
|
|
252
270
|
}).catch((error: unknown) => {
|
|
253
271
|
if (!self.closing && self.queue !== null)
|
|
254
272
|
self.log("error", `queue read error: ${util.ensureError(error).message}`)
|
|
255
|
-
})
|
|
256
|
-
},
|
|
257
|
-
final (callback) {
|
|
258
|
-
if (self.closing || self.client === null) {
|
|
259
|
-
callback()
|
|
260
|
-
return
|
|
261
|
-
}
|
|
262
|
-
util.run(
|
|
263
|
-
() => self.client!.destroy(),
|
|
264
|
-
(error: Error) => self.log("warning", `error closing Amazon Transcribe connection: ${error}`)
|
|
265
|
-
)
|
|
266
|
-
audioQueue.push(null) /* do not push null to stream, let Amazon Transcribe do it */
|
|
267
|
-
audioQueue.destroy()
|
|
268
|
-
callback()
|
|
273
|
+
}))
|
|
269
274
|
}
|
|
270
275
|
})
|
|
271
276
|
}
|