speechflow 1.4.5 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +28 -0
- package/README.md +220 -7
- package/etc/claude.md +70 -0
- package/etc/speechflow.yaml +5 -3
- package/etc/stx.conf +7 -0
- package/package.json +7 -6
- package/speechflow-cli/dst/speechflow-node-a2a-compressor-wt.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-compressor-wt.js +155 -0
- package/speechflow-cli/dst/speechflow-node-a2a-compressor-wt.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-compressor.d.ts +15 -0
- package/speechflow-cli/dst/speechflow-node-a2a-compressor.js +287 -0
- package/speechflow-cli/dst/speechflow-node-a2a-compressor.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-dynamics-wt.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-dynamics-wt.js +208 -0
- package/speechflow-cli/dst/speechflow-node-a2a-dynamics-wt.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-dynamics.d.ts +15 -0
- package/speechflow-cli/dst/speechflow-node-a2a-dynamics.js +312 -0
- package/speechflow-cli/dst/speechflow-node-a2a-dynamics.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-expander-wt.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-expander-wt.js +161 -0
- package/speechflow-cli/dst/speechflow-node-a2a-expander-wt.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-expander.d.ts +13 -0
- package/speechflow-cli/dst/speechflow-node-a2a-expander.js +208 -0
- package/speechflow-cli/dst/speechflow-node-a2a-expander.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-ffmpeg.js +13 -3
- package/speechflow-cli/dst/speechflow-node-a2a-ffmpeg.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-filler.d.ts +14 -0
- package/speechflow-cli/dst/speechflow-node-a2a-filler.js +233 -0
- package/speechflow-cli/dst/speechflow-node-a2a-filler.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-gain.d.ts +12 -0
- package/speechflow-cli/dst/speechflow-node-a2a-gain.js +125 -0
- package/speechflow-cli/dst/speechflow-node-a2a-gain.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-gender.d.ts +0 -1
- package/speechflow-cli/dst/speechflow-node-a2a-gender.js +28 -12
- package/speechflow-cli/dst/speechflow-node-a2a-gender.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-meter.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-meter.js +12 -8
- package/speechflow-cli/dst/speechflow-node-a2a-meter.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-mute.js +2 -1
- package/speechflow-cli/dst/speechflow-node-a2a-mute.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise-wt.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise-wt.js +55 -0
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise-wt.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise.d.ts +14 -0
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise.js +184 -0
- package/speechflow-cli/dst/speechflow-node-a2a-rnnoise.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-speex.d.ts +14 -0
- package/speechflow-cli/dst/speechflow-node-a2a-speex.js +156 -0
- package/speechflow-cli/dst/speechflow-node-a2a-speex.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2a-vad.js +3 -3
- package/speechflow-cli/dst/speechflow-node-a2a-vad.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-wav.js +22 -17
- package/speechflow-cli/dst/speechflow-node-a2a-wav.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2t-awstranscribe.d.ts +18 -0
- package/speechflow-cli/dst/speechflow-node-a2t-awstranscribe.js +317 -0
- package/speechflow-cli/dst/speechflow-node-a2t-awstranscribe.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-a2t-deepgram.js +15 -13
- package/speechflow-cli/dst/speechflow-node-a2t-deepgram.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2t-openaitranscribe.d.ts +19 -0
- package/speechflow-cli/dst/speechflow-node-a2t-openaitranscribe.js +351 -0
- package/speechflow-cli/dst/speechflow-node-a2t-openaitranscribe.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2a-awspolly.d.ts +16 -0
- package/speechflow-cli/dst/speechflow-node-t2a-awspolly.js +171 -0
- package/speechflow-cli/dst/speechflow-node-t2a-awspolly.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2a-elevenlabs.js +19 -14
- package/speechflow-cli/dst/speechflow-node-t2a-elevenlabs.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2a-kokoro.js +11 -6
- package/speechflow-cli/dst/speechflow-node-t2a-kokoro.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-awstranslate.d.ts +13 -0
- package/speechflow-cli/dst/speechflow-node-t2t-awstranslate.js +141 -0
- package/speechflow-cli/dst/speechflow-node-t2t-awstranslate.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2t-deepl.js +13 -15
- package/speechflow-cli/dst/speechflow-node-t2t-deepl.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-format.js +10 -15
- package/speechflow-cli/dst/speechflow-node-t2t-format.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-ollama.js +44 -31
- package/speechflow-cli/dst/speechflow-node-t2t-ollama.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-openai.js +44 -45
- package/speechflow-cli/dst/speechflow-node-t2t-openai.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-sentence.js +8 -8
- package/speechflow-cli/dst/speechflow-node-t2t-sentence.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js +10 -12
- package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-transformers.js +22 -27
- package/speechflow-cli/dst/speechflow-node-t2t-transformers.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-x2x-filter.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-x2x-filter.js +50 -15
- package/speechflow-cli/dst/speechflow-node-x2x-filter.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-x2x-trace.js +17 -18
- package/speechflow-cli/dst/speechflow-node-x2x-trace.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-device.js +13 -21
- package/speechflow-cli/dst/speechflow-node-xio-device.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-mqtt.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-xio-mqtt.js +22 -16
- package/speechflow-cli/dst/speechflow-node-xio-mqtt.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-websocket.js +19 -19
- package/speechflow-cli/dst/speechflow-node-xio-websocket.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node.d.ts +6 -3
- package/speechflow-cli/dst/speechflow-node.js +13 -2
- package/speechflow-cli/dst/speechflow-node.js.map +1 -1
- package/speechflow-cli/dst/speechflow-utils-audio-wt.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-utils-audio-wt.js +124 -0
- package/speechflow-cli/dst/speechflow-utils-audio-wt.js.map +1 -0
- package/speechflow-cli/dst/speechflow-utils-audio.d.ts +13 -0
- package/speechflow-cli/dst/speechflow-utils-audio.js +137 -0
- package/speechflow-cli/dst/speechflow-utils-audio.js.map +1 -0
- package/speechflow-cli/dst/speechflow-utils.d.ts +18 -0
- package/speechflow-cli/dst/speechflow-utils.js +123 -35
- package/speechflow-cli/dst/speechflow-utils.js.map +1 -1
- package/speechflow-cli/dst/speechflow.js +69 -14
- package/speechflow-cli/dst/speechflow.js.map +1 -1
- package/speechflow-cli/etc/oxlint.jsonc +112 -11
- package/speechflow-cli/etc/stx.conf +2 -2
- package/speechflow-cli/etc/tsconfig.json +1 -1
- package/speechflow-cli/package.d/@shiguredo+rnnoise-wasm+2025.1.5.patch +25 -0
- package/speechflow-cli/package.json +102 -94
- package/speechflow-cli/src/lib.d.ts +24 -0
- package/speechflow-cli/src/speechflow-node-a2a-compressor-wt.ts +151 -0
- package/speechflow-cli/src/speechflow-node-a2a-compressor.ts +303 -0
- package/speechflow-cli/src/speechflow-node-a2a-expander-wt.ts +158 -0
- package/speechflow-cli/src/speechflow-node-a2a-expander.ts +212 -0
- package/speechflow-cli/src/speechflow-node-a2a-ffmpeg.ts +13 -3
- package/speechflow-cli/src/speechflow-node-a2a-filler.ts +223 -0
- package/speechflow-cli/src/speechflow-node-a2a-gain.ts +98 -0
- package/speechflow-cli/src/speechflow-node-a2a-gender.ts +31 -17
- package/speechflow-cli/src/speechflow-node-a2a-meter.ts +13 -9
- package/speechflow-cli/src/speechflow-node-a2a-mute.ts +3 -2
- package/speechflow-cli/src/speechflow-node-a2a-rnnoise-wt.ts +62 -0
- package/speechflow-cli/src/speechflow-node-a2a-rnnoise.ts +164 -0
- package/speechflow-cli/src/speechflow-node-a2a-speex.ts +137 -0
- package/speechflow-cli/src/speechflow-node-a2a-vad.ts +3 -3
- package/speechflow-cli/src/speechflow-node-a2a-wav.ts +20 -13
- package/speechflow-cli/src/speechflow-node-a2t-awstranscribe.ts +308 -0
- package/speechflow-cli/src/speechflow-node-a2t-deepgram.ts +15 -13
- package/speechflow-cli/src/speechflow-node-a2t-openaitranscribe.ts +337 -0
- package/speechflow-cli/src/speechflow-node-t2a-awspolly.ts +187 -0
- package/speechflow-cli/src/speechflow-node-t2a-elevenlabs.ts +19 -14
- package/speechflow-cli/src/speechflow-node-t2a-kokoro.ts +12 -7
- package/speechflow-cli/src/speechflow-node-t2t-awstranslate.ts +152 -0
- package/speechflow-cli/src/speechflow-node-t2t-deepl.ts +13 -15
- package/speechflow-cli/src/speechflow-node-t2t-format.ts +10 -15
- package/speechflow-cli/src/speechflow-node-t2t-ollama.ts +55 -42
- package/speechflow-cli/src/speechflow-node-t2t-openai.ts +58 -58
- package/speechflow-cli/src/speechflow-node-t2t-sentence.ts +10 -10
- package/speechflow-cli/src/speechflow-node-t2t-subtitle.ts +15 -16
- package/speechflow-cli/src/speechflow-node-t2t-transformers.ts +27 -32
- package/speechflow-cli/src/speechflow-node-x2x-filter.ts +20 -16
- package/speechflow-cli/src/speechflow-node-x2x-trace.ts +20 -19
- package/speechflow-cli/src/speechflow-node-xio-device.ts +15 -23
- package/speechflow-cli/src/speechflow-node-xio-mqtt.ts +23 -16
- package/speechflow-cli/src/speechflow-node-xio-websocket.ts +19 -19
- package/speechflow-cli/src/speechflow-node.ts +21 -8
- package/speechflow-cli/src/speechflow-utils-audio-wt.ts +172 -0
- package/speechflow-cli/src/speechflow-utils-audio.ts +147 -0
- package/speechflow-cli/src/speechflow-utils.ts +125 -32
- package/speechflow-cli/src/speechflow.ts +74 -17
- package/speechflow-ui-db/dst/index.js +31 -31
- package/speechflow-ui-db/etc/eslint.mjs +0 -1
- package/speechflow-ui-db/etc/tsc-client.json +3 -3
- package/speechflow-ui-db/package.json +11 -10
- package/speechflow-ui-db/src/app.vue +20 -6
- package/speechflow-ui-st/dst/index.js +26 -26
- package/speechflow-ui-st/etc/eslint.mjs +0 -1
- package/speechflow-ui-st/etc/tsc-client.json +3 -3
- package/speechflow-ui-st/package.json +11 -10
- package/speechflow-ui-st/src/app.vue +5 -12
|
@@ -65,18 +65,18 @@ export default class SpeechFlowNodeSubtitle extends SpeechFlowNode {
|
|
|
65
65
|
if (typeof chunk.payload !== "string")
|
|
66
66
|
throw new Error("chunk payload type must be string")
|
|
67
67
|
const convertSingle = (
|
|
68
|
-
start:
|
|
69
|
-
end:
|
|
70
|
-
text:
|
|
71
|
-
word?:
|
|
72
|
-
|
|
68
|
+
start: Duration,
|
|
69
|
+
end: Duration,
|
|
70
|
+
text: string,
|
|
71
|
+
word?: string,
|
|
72
|
+
occurrence?: number
|
|
73
73
|
) => {
|
|
74
74
|
if (word) {
|
|
75
|
-
|
|
75
|
+
occurrence ??= 1
|
|
76
76
|
let match = 1
|
|
77
77
|
word = word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
|
|
78
78
|
text = text.replaceAll(new RegExp(`\\b${word}\\b`, "g"), (m) => {
|
|
79
|
-
if (match++ ===
|
|
79
|
+
if (match++ === occurrence)
|
|
80
80
|
return `<b>${m}</b>`
|
|
81
81
|
else
|
|
82
82
|
return m
|
|
@@ -102,12 +102,12 @@ export default class SpeechFlowNodeSubtitle extends SpeechFlowNode {
|
|
|
102
102
|
output += convertSingle(chunk.timestampStart, chunk.timestampEnd, chunk.payload)
|
|
103
103
|
const words = (chunk.meta.get("words") ?? []) as
|
|
104
104
|
{ word: string, start: Duration, end: Duration }[]
|
|
105
|
-
const
|
|
105
|
+
const occurrences = new Map<string, number>()
|
|
106
106
|
for (const word of words) {
|
|
107
|
-
let
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
output += convertSingle(word.start, word.end, chunk.payload, word.word,
|
|
107
|
+
let occurrence = occurrences.get(word.word) ?? 0
|
|
108
|
+
occurrence++
|
|
109
|
+
occurrences.set(word.word, occurrence)
|
|
110
|
+
output += convertSingle(word.start, word.end, chunk.payload, word.word, occurrence)
|
|
111
111
|
}
|
|
112
112
|
}
|
|
113
113
|
else
|
|
@@ -222,9 +222,8 @@ export default class SpeechFlowNodeSubtitle extends SpeechFlowNode {
|
|
|
222
222
|
}
|
|
223
223
|
}
|
|
224
224
|
},
|
|
225
|
-
handler: (request: HAPI.Request, h: HAPI.ResponseToolkit) =>
|
|
226
|
-
|
|
227
|
-
}
|
|
225
|
+
handler: (request: HAPI.Request, h: HAPI.ResponseToolkit) =>
|
|
226
|
+
h.response({}).code(204)
|
|
228
227
|
})
|
|
229
228
|
|
|
230
229
|
await this.hapi.start()
|
|
@@ -259,7 +258,7 @@ export default class SpeechFlowNodeSubtitle extends SpeechFlowNode {
|
|
|
259
258
|
}
|
|
260
259
|
}
|
|
261
260
|
|
|
262
|
-
/*
|
|
261
|
+
/* close node */
|
|
263
262
|
async close () {
|
|
264
263
|
/* close stream */
|
|
265
264
|
if (this.stream !== null) {
|
|
@@ -46,11 +46,11 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
|
|
|
46
46
|
"Preserve the original meaning, tone, and nuance.\n" +
|
|
47
47
|
"Directly translate text from English (EN) to fluent and natural German (DE) language.\n",
|
|
48
48
|
chat: [
|
|
49
|
-
{ role: "user",
|
|
49
|
+
{ role: "user", content: "I love my wife." },
|
|
50
50
|
{ role: "assistant", content: "Ich liebe meine Frau." },
|
|
51
|
-
{ role: "user",
|
|
51
|
+
{ role: "user", content: "The weather is wonderful." },
|
|
52
52
|
{ role: "assistant", content: "Das Wetter ist wunderschön." },
|
|
53
|
-
{ role: "user",
|
|
53
|
+
{ role: "user", content: "The life is awesome." },
|
|
54
54
|
{ role: "assistant", content: "Das Leben ist einfach großartig." }
|
|
55
55
|
]
|
|
56
56
|
},
|
|
@@ -65,19 +65,19 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
|
|
|
65
65
|
"Do not chat.\n" +
|
|
66
66
|
"Do not show any explanations.\n" +
|
|
67
67
|
"Do not show any introduction.\n" +
|
|
68
|
-
"Do not show any preamble
|
|
69
|
-
"Do not show any prolog
|
|
70
|
-
"Do not show any epilog
|
|
68
|
+
"Do not show any preamble.\n" +
|
|
69
|
+
"Do not show any prolog.\n" +
|
|
70
|
+
"Do not show any epilog.\n" +
|
|
71
71
|
"Get to the point.\n" +
|
|
72
72
|
"Preserve the original meaning, tone, and nuance.\n" +
|
|
73
73
|
"Directly translate text from German (DE) to fluent and natural English (EN) language.\n",
|
|
74
74
|
chat: [
|
|
75
|
-
{ role: "user",
|
|
75
|
+
{ role: "user", content: "Ich liebe meine Frau." },
|
|
76
76
|
{ role: "assistant", content: "I love my wife." },
|
|
77
|
-
{ role: "user",
|
|
77
|
+
{ role: "user", content: "Das Wetter ist wunderschön." },
|
|
78
78
|
{ role: "assistant", content: "The weather is wonderful." },
|
|
79
|
-
{ role: "user",
|
|
80
|
-
{ role: "assistant", content: "The
|
|
79
|
+
{ role: "user", content: "Das Leben ist einfach großartig." },
|
|
80
|
+
{ role: "assistant", content: "The life is awesome." }
|
|
81
81
|
]
|
|
82
82
|
}
|
|
83
83
|
}
|
|
@@ -114,7 +114,7 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
|
|
|
114
114
|
artifact += `:${progress.file}`
|
|
115
115
|
let percent = 0
|
|
116
116
|
if (typeof progress.loaded === "number" && typeof progress.total === "number")
|
|
117
|
-
percent = (progress.loaded
|
|
117
|
+
percent = (progress.loaded / progress.total) * 100
|
|
118
118
|
else if (typeof progress.progress === "number")
|
|
119
119
|
percent = progress.progress
|
|
120
120
|
if (percent > 0)
|
|
@@ -123,7 +123,7 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
|
|
|
123
123
|
const interval = setInterval(() => {
|
|
124
124
|
for (const [ artifact, percent ] of progressState) {
|
|
125
125
|
this.log("info", `downloaded ${percent.toFixed(2)}% of artifact "${artifact}"`)
|
|
126
|
-
if (percent >=
|
|
126
|
+
if (percent >= 100.0)
|
|
127
127
|
progressState.delete(artifact)
|
|
128
128
|
}
|
|
129
129
|
}, 1000)
|
|
@@ -163,9 +163,8 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
|
|
|
163
163
|
const translate = async (text: string) => {
|
|
164
164
|
if (this.params.model === "OPUS") {
|
|
165
165
|
const result = await this.translator!(text)
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
(result as Transformers.TranslationSingle).translation_text
|
|
166
|
+
const single = Array.isArray(result) ? result[0] : result
|
|
167
|
+
return (single as Transformers.TranslationSingle).translation_text
|
|
169
168
|
}
|
|
170
169
|
else if (this.params.model === "SmolLM3") {
|
|
171
170
|
const key = `SmolLM3:${this.params.src}-${this.params.dst}`
|
|
@@ -184,13 +183,11 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
|
|
|
184
183
|
skip_special_tokens: true
|
|
185
184
|
})
|
|
186
185
|
})
|
|
187
|
-
const
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
const response = typeof generatedText === "string" ?
|
|
186
|
+
const single = Array.isArray(result) ? result[0] : result
|
|
187
|
+
const generatedText = (single as Transformers.TextGenerationSingle).generated_text
|
|
188
|
+
return typeof generatedText === "string" ?
|
|
191
189
|
generatedText :
|
|
192
190
|
generatedText.at(-1)!.content
|
|
193
|
-
return response
|
|
194
191
|
}
|
|
195
192
|
else
|
|
196
193
|
throw new Error("invalid model")
|
|
@@ -205,21 +202,19 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
|
|
|
205
202
|
transform (chunk: SpeechFlowChunk, encoding, callback) {
|
|
206
203
|
if (Buffer.isBuffer(chunk.payload))
|
|
207
204
|
callback(new Error("invalid chunk payload type"))
|
|
205
|
+
else if (chunk.payload === "") {
|
|
206
|
+
this.push(chunk)
|
|
207
|
+
callback()
|
|
208
|
+
}
|
|
208
209
|
else {
|
|
209
|
-
|
|
210
|
+
translate(chunk.payload).then((payload) => {
|
|
211
|
+
chunk = chunk.clone()
|
|
212
|
+
chunk.payload = payload
|
|
210
213
|
this.push(chunk)
|
|
211
214
|
callback()
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
chunk = chunk.clone()
|
|
216
|
-
chunk.payload = payload
|
|
217
|
-
this.push(chunk)
|
|
218
|
-
callback()
|
|
219
|
-
}).catch((err) => {
|
|
220
|
-
callback(err)
|
|
221
|
-
})
|
|
222
|
-
}
|
|
215
|
+
}).catch((err) => {
|
|
216
|
+
callback(err)
|
|
217
|
+
})
|
|
223
218
|
}
|
|
224
219
|
},
|
|
225
220
|
final (callback) {
|
|
@@ -9,12 +9,16 @@ import Stream from "node:stream"
|
|
|
9
9
|
|
|
10
10
|
/* internal dependencies */
|
|
11
11
|
import SpeechFlowNode, { SpeechFlowChunk } from "./speechflow-node"
|
|
12
|
+
import * as utils from "./speechflow-utils"
|
|
12
13
|
|
|
13
14
|
/* SpeechFlow node for data flow filtering (based on meta information) */
|
|
14
15
|
export default class SpeechFlowNodeFilter extends SpeechFlowNode {
|
|
15
16
|
/* declare official node name */
|
|
16
17
|
public static name = "filter"
|
|
17
18
|
|
|
19
|
+
/* cached regular expression instance */
|
|
20
|
+
private cachedRegExp = new utils.CachedRegExp()
|
|
21
|
+
|
|
18
22
|
/* construct node */
|
|
19
23
|
constructor (id: string, cfg: { [ id: string ]: any }, opts: { [ id: string ]: any }, args: any[]) {
|
|
20
24
|
super(id, cfg, opts, args)
|
|
@@ -50,33 +54,33 @@ export default class SpeechFlowNodeFilter extends SpeechFlowNode {
|
|
|
50
54
|
val2 instanceof RegExp ?
|
|
51
55
|
val2 :
|
|
52
56
|
typeof val2 === "string" ?
|
|
53
|
-
|
|
54
|
-
|
|
57
|
+
this.cachedRegExp.compile(val2) :
|
|
58
|
+
this.cachedRegExp.compile(val2.toString()))
|
|
59
|
+
if (regexp === null) {
|
|
60
|
+
/* fallback to literal string comparison on invalid regex */
|
|
61
|
+
this.log("warning", `invalid regular expression: "${val2}"`)
|
|
62
|
+
return (op === "~~" ? (str === val2) : (str !== val2))
|
|
63
|
+
}
|
|
55
64
|
return (op === "~~" ? regexp.test(str) : !regexp.test(str))
|
|
56
65
|
}
|
|
57
66
|
else {
|
|
58
67
|
/* non-equal comparison */
|
|
59
|
-
const coerceNum = (val: any) =>
|
|
60
|
-
|
|
61
|
-
typeof val === "string" && val.match(/^[\d+-]+$/) ? parseInt(val) : (
|
|
68
|
+
const coerceNum = (val: any) =>
|
|
69
|
+
typeof val === "number" ? val : (
|
|
70
|
+
typeof val === "string" && val.match(/^[\d+-]+$/) ? Number.parseInt(val, 10) : (
|
|
62
71
|
typeof val === "string" && val.match(/^[\d.+-]+$/) ?
|
|
63
|
-
parseFloat(val) :
|
|
72
|
+
Number.parseFloat(val) :
|
|
64
73
|
Number(val)
|
|
65
74
|
)
|
|
66
75
|
)
|
|
67
|
-
}
|
|
68
76
|
const num1 = coerceNum(val1)
|
|
69
77
|
const num2 = coerceNum(val2)
|
|
70
78
|
return (
|
|
71
|
-
op === "<"
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
(num1 >= num2) :
|
|
77
|
-
op === ">" ?
|
|
78
|
-
(num1 > num2) :
|
|
79
|
-
false
|
|
79
|
+
op === "<" ? (num1 < num2) :
|
|
80
|
+
op === "<=" ? (num1 <= num2) :
|
|
81
|
+
op === ">=" ? (num1 >= num2) :
|
|
82
|
+
op === ">" ? (num1 > num2) :
|
|
83
|
+
false
|
|
80
84
|
)
|
|
81
85
|
}
|
|
82
86
|
}
|
|
@@ -48,6 +48,23 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
|
|
|
48
48
|
this.log(level, msg)
|
|
49
49
|
}
|
|
50
50
|
|
|
51
|
+
/* helper functions for formatting */
|
|
52
|
+
const fmtTime = (t: Duration) => t.toFormat("hh:mm:ss.SSS")
|
|
53
|
+
const fmtMeta = (meta: Map<string, any>) => {
|
|
54
|
+
if (meta.size === 0)
|
|
55
|
+
return "none"
|
|
56
|
+
else
|
|
57
|
+
return `{ ${Array.from(meta.entries())
|
|
58
|
+
.map(([ k, v ]) => `${k}: ${JSON.stringify(v)}`)
|
|
59
|
+
.join(", ")
|
|
60
|
+
} }`
|
|
61
|
+
}
|
|
62
|
+
const fmtChunkBase = (chunk: SpeechFlowChunk) =>
|
|
63
|
+
`chunk: type=${chunk.type} ` +
|
|
64
|
+
`kind=${chunk.kind} ` +
|
|
65
|
+
`start=${fmtTime(chunk.timestampStart)} ` +
|
|
66
|
+
`end=${fmtTime(chunk.timestampEnd)} `
|
|
67
|
+
|
|
51
68
|
/* provide Transform stream */
|
|
52
69
|
const self = this
|
|
53
70
|
this.stream = new Stream.Transform({
|
|
@@ -57,22 +74,9 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
|
|
|
57
74
|
highWaterMark: 1,
|
|
58
75
|
transform (chunk: SpeechFlowChunk, encoding, callback) {
|
|
59
76
|
let error: Error | undefined
|
|
60
|
-
const fmtTime = (t: Duration) => t.toFormat("hh:mm:ss.SSS")
|
|
61
|
-
const fmtMeta = (meta: Map<string, any>) => {
|
|
62
|
-
if (meta.size === 0)
|
|
63
|
-
return "none"
|
|
64
|
-
else
|
|
65
|
-
return `{ ${Array.from(meta.entries())
|
|
66
|
-
.map(([ k, v ]) => `${k}: ${JSON.stringify(v)}`)
|
|
67
|
-
.join(", ")
|
|
68
|
-
} }`
|
|
69
|
-
}
|
|
70
77
|
if (Buffer.isBuffer(chunk.payload)) {
|
|
71
78
|
if (self.params.type === "audio")
|
|
72
|
-
log("debug",
|
|
73
|
-
`kind=${chunk.kind} ` +
|
|
74
|
-
`start=${fmtTime(chunk.timestampStart)} ` +
|
|
75
|
-
`end=${fmtTime(chunk.timestampEnd)} ` +
|
|
79
|
+
log("debug", fmtChunkBase(chunk) +
|
|
76
80
|
`payload-type=Buffer payload-length=${chunk.payload.byteLength} ` +
|
|
77
81
|
`meta=${fmtMeta(chunk.meta)}`)
|
|
78
82
|
else
|
|
@@ -80,15 +84,12 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
|
|
|
80
84
|
}
|
|
81
85
|
else {
|
|
82
86
|
if (self.params.type === "text") {
|
|
83
|
-
log("debug",
|
|
84
|
-
`kind=${chunk.kind} ` +
|
|
85
|
-
`start=${fmtTime(chunk.timestampStart)} ` +
|
|
86
|
-
`end=${fmtTime(chunk.timestampEnd)} ` +
|
|
87
|
+
log("debug", fmtChunkBase(chunk) +
|
|
87
88
|
`payload-type=String payload-length=${chunk.payload.length} ` +
|
|
88
89
|
`payload-content="${chunk.payload.toString()}" ` +
|
|
89
90
|
`meta=${fmtMeta(chunk.meta)}`)
|
|
90
91
|
if (self.params.dashboard !== "")
|
|
91
|
-
self.
|
|
92
|
+
self.sendDashboard("text", self.params.dashboard, chunk.kind, chunk.payload.toString())
|
|
92
93
|
}
|
|
93
94
|
else
|
|
94
95
|
error = new Error(`${self.params.type} chunk: seen String instead of Buffer chunk type`)
|
|
@@ -69,17 +69,15 @@ export default class SpeechFlowNodeDevice extends SpeechFlowNode {
|
|
|
69
69
|
const devices = PortAudio.getDevices()
|
|
70
70
|
for (const device of devices)
|
|
71
71
|
this.log("info", `found audio device "${device.name}" ` +
|
|
72
|
-
`(inputs: ${device.maxInputChannels}, outputs: ${device.maxOutputChannels}`)
|
|
73
|
-
const device = devices.find((device) =>
|
|
74
|
-
|
|
75
|
-
(
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
)
|
|
82
|
-
})
|
|
72
|
+
`(inputs: ${device.maxInputChannels}, outputs: ${device.maxOutputChannels})`)
|
|
73
|
+
const device = devices.find((device) => (
|
|
74
|
+
( ( mode === "r" && device.maxInputChannels > 0)
|
|
75
|
+
|| (mode === "w" && device.maxOutputChannels > 0)
|
|
76
|
+
|| (mode === "rw" && device.maxInputChannels > 0 && device.maxOutputChannels > 0)
|
|
77
|
+
|| (mode === "any" && (device.maxInputChannels > 0 || device.maxOutputChannels > 0)))
|
|
78
|
+
&& device.name.match(name)
|
|
79
|
+
&& device.hostAPIName === api.name
|
|
80
|
+
))
|
|
83
81
|
if (!device)
|
|
84
82
|
throw new Error(`invalid audio device "${name}" (of audio API type "${type}")`)
|
|
85
83
|
return device
|
|
@@ -197,20 +195,14 @@ export default class SpeechFlowNodeDevice extends SpeechFlowNode {
|
|
|
197
195
|
async close () {
|
|
198
196
|
/* shutdown PortAudio */
|
|
199
197
|
if (this.io !== null) {
|
|
200
|
-
await new Promise<void>((resolve
|
|
201
|
-
this.io!.abort((
|
|
202
|
-
|
|
203
|
-
reject(err)
|
|
204
|
-
else
|
|
205
|
-
resolve()
|
|
198
|
+
await new Promise<void>((resolve) => {
|
|
199
|
+
this.io!.abort(() => {
|
|
200
|
+
resolve()
|
|
206
201
|
})
|
|
207
202
|
})
|
|
208
|
-
await new Promise<void>((resolve
|
|
209
|
-
this.io!.quit((
|
|
210
|
-
|
|
211
|
-
reject(err)
|
|
212
|
-
else
|
|
213
|
-
resolve()
|
|
203
|
+
await new Promise<void>((resolve) => {
|
|
204
|
+
this.io!.quit(() => {
|
|
205
|
+
resolve()
|
|
214
206
|
})
|
|
215
207
|
})
|
|
216
208
|
this.io = null
|
|
@@ -23,6 +23,7 @@ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
|
|
|
23
23
|
/* internal state */
|
|
24
24
|
private broker: MQTT.MqttClient | null = null
|
|
25
25
|
private clientId: string = (new UUID(1)).format()
|
|
26
|
+
private chunkQueue: utils.SingleQueue<SpeechFlowChunk> | null = null
|
|
26
27
|
|
|
27
28
|
/* construct node */
|
|
28
29
|
constructor (id: string, cfg: { [ id: string ]: any }, opts: { [ id: string ]: any }, args: any[]) {
|
|
@@ -63,6 +64,10 @@ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
|
|
|
63
64
|
throw new Error("writing to MQTT requires a topicWrite parameter")
|
|
64
65
|
if ((this.params.mode === "r" || this.params.mode === "rw") && this.params.topicRead === "")
|
|
65
66
|
throw new Error("reading from MQTT requires a topicRead parameter")
|
|
67
|
+
if (this.params.username !== "" && this.params.password === "")
|
|
68
|
+
throw new Error("username provided but password is missing")
|
|
69
|
+
if (this.params.username === "" && this.params.password !== "")
|
|
70
|
+
throw new Error("password provided but username is missing")
|
|
66
71
|
|
|
67
72
|
/* connect remotely to a MQTT broker */
|
|
68
73
|
this.broker = MQTT.connect(this.params.url, {
|
|
@@ -85,7 +90,7 @@ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
|
|
|
85
90
|
if (this.params.mode !== "w" && !packet.sessionPresent)
|
|
86
91
|
this.broker!.subscribe([ this.params.topicRead ], (err) => {
|
|
87
92
|
if (err)
|
|
88
|
-
this.log("
|
|
93
|
+
this.log("warning", `failed to subscribe to MQTT topic "${this.params.topicRead}": ${err.message}`)
|
|
89
94
|
})
|
|
90
95
|
})
|
|
91
96
|
this.broker.on("reconnect", () => {
|
|
@@ -94,49 +99,48 @@ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
|
|
|
94
99
|
this.broker.on("disconnect", (packet: MQTT.IDisconnectPacket) => {
|
|
95
100
|
this.log("info", `connection closed to MQTT ${this.params.url}`)
|
|
96
101
|
})
|
|
97
|
-
|
|
102
|
+
this.chunkQueue = new utils.SingleQueue<SpeechFlowChunk>()
|
|
98
103
|
this.broker.on("message", (topic: string, payload: Buffer, packet: MQTT.IPublishPacket) => {
|
|
99
|
-
if (topic !== this.params.topicRead)
|
|
104
|
+
if (topic !== this.params.topicRead || this.params.mode === "w")
|
|
100
105
|
return
|
|
101
106
|
try {
|
|
102
107
|
const chunk = utils.streamChunkDecode(payload)
|
|
103
|
-
chunkQueue
|
|
108
|
+
this.chunkQueue!.write(chunk)
|
|
104
109
|
}
|
|
105
110
|
catch (_err: any) {
|
|
106
111
|
this.log("warning", `received invalid CBOR chunk from MQTT ${this.params.url}`)
|
|
107
112
|
}
|
|
108
113
|
})
|
|
109
|
-
const
|
|
110
|
-
const topicWrite = this.params.topicWrite
|
|
111
|
-
const type = this.params.type
|
|
112
|
-
const mode = this.params.mode
|
|
114
|
+
const self = this
|
|
113
115
|
this.stream = new Stream.Duplex({
|
|
114
116
|
writableObjectMode: true,
|
|
115
117
|
readableObjectMode: true,
|
|
116
118
|
decodeStrings: false,
|
|
117
119
|
highWaterMark: 1,
|
|
118
120
|
write (chunk: SpeechFlowChunk, encoding, callback) {
|
|
119
|
-
if (mode === "r")
|
|
121
|
+
if (self.params.mode === "r")
|
|
120
122
|
callback(new Error("write operation on read-only node"))
|
|
121
|
-
else if (chunk.type !== type)
|
|
122
|
-
callback(new Error(`written chunk is not of ${type} type`))
|
|
123
|
-
else if (!broker
|
|
123
|
+
else if (chunk.type !== self.params.type)
|
|
124
|
+
callback(new Error(`written chunk is not of ${self.params.type} type`))
|
|
125
|
+
else if (!self.broker!.connected)
|
|
124
126
|
callback(new Error("still no MQTT connection available"))
|
|
125
127
|
else {
|
|
126
128
|
const data = Buffer.from(utils.streamChunkEncode(chunk))
|
|
127
|
-
broker
|
|
129
|
+
self.broker!.publish(self.params.topicWrite, data, { qos: 2, retain: false }, (err) => {
|
|
128
130
|
if (err)
|
|
129
|
-
callback(new Error(`failed to publish to MQTT topic "${topicWrite}": ${err}`))
|
|
131
|
+
callback(new Error(`failed to publish to MQTT topic "${self.params.topicWrite}": ${err}`))
|
|
130
132
|
else
|
|
131
133
|
callback()
|
|
132
134
|
})
|
|
133
135
|
}
|
|
134
136
|
},
|
|
135
137
|
read (size: number) {
|
|
136
|
-
if (mode === "w")
|
|
138
|
+
if (self.params.mode === "w")
|
|
137
139
|
throw new Error("read operation on write-only node")
|
|
138
|
-
chunkQueue
|
|
140
|
+
self.chunkQueue!.read().then((chunk) => {
|
|
139
141
|
this.push(chunk, "binary")
|
|
142
|
+
}).catch((err: Error) => {
|
|
143
|
+
self.log("warning", `read on chunk queue operation failed: ${err}`)
|
|
140
144
|
})
|
|
141
145
|
}
|
|
142
146
|
})
|
|
@@ -144,6 +148,9 @@ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
|
|
|
144
148
|
|
|
145
149
|
/* close node */
|
|
146
150
|
async close () {
|
|
151
|
+
/* clear chunk queue reference */
|
|
152
|
+
this.chunkQueue = null
|
|
153
|
+
|
|
147
154
|
/* close MQTT broker */
|
|
148
155
|
if (this.broker !== null) {
|
|
149
156
|
if (this.broker.connected)
|
|
@@ -66,7 +66,7 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
|
|
|
66
66
|
const chunkQueue = new utils.SingleQueue<SpeechFlowChunk>()
|
|
67
67
|
this.server = new ws.WebSocketServer({
|
|
68
68
|
host: url.hostname,
|
|
69
|
-
port: Number.parseInt(url.port),
|
|
69
|
+
port: Number.parseInt(url.port, 10),
|
|
70
70
|
path: url.pathname
|
|
71
71
|
})
|
|
72
72
|
this.server.on("listening", () => {
|
|
@@ -108,18 +108,17 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
|
|
|
108
108
|
this.server.on("error", (error) => {
|
|
109
109
|
this.log("error", `error of some connection on URL ${this.params.listen}: ${error.message}`)
|
|
110
110
|
})
|
|
111
|
-
const
|
|
112
|
-
const mode = this.params.mode
|
|
111
|
+
const self = this
|
|
113
112
|
this.stream = new Stream.Duplex({
|
|
114
113
|
writableObjectMode: true,
|
|
115
114
|
readableObjectMode: true,
|
|
116
115
|
decodeStrings: false,
|
|
117
116
|
highWaterMark: 1,
|
|
118
117
|
write (chunk: SpeechFlowChunk, encoding, callback) {
|
|
119
|
-
if (mode === "r")
|
|
118
|
+
if (self.params.mode === "r")
|
|
120
119
|
callback(new Error("write operation on read-only node"))
|
|
121
|
-
else if (chunk.type !== type)
|
|
122
|
-
callback(new Error(`written chunk is not of ${type} type`))
|
|
120
|
+
else if (chunk.type !== self.params.type)
|
|
121
|
+
callback(new Error(`written chunk is not of ${self.params.type} type`))
|
|
123
122
|
else if (websockets.size === 0)
|
|
124
123
|
callback(new Error("still no Websocket connections available"))
|
|
125
124
|
else {
|
|
@@ -137,17 +136,18 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
|
|
|
137
136
|
}
|
|
138
137
|
Promise.all(results).then(() => {
|
|
139
138
|
callback()
|
|
140
|
-
}).catch((
|
|
141
|
-
const error = new Error(errors.map((e) => e.message).join("; "))
|
|
139
|
+
}).catch((error: Error) => {
|
|
142
140
|
callback(error)
|
|
143
141
|
})
|
|
144
142
|
}
|
|
145
143
|
},
|
|
146
144
|
read (size: number) {
|
|
147
|
-
if (mode === "w")
|
|
145
|
+
if (self.params.mode === "w")
|
|
148
146
|
throw new Error("read operation on write-only node")
|
|
149
147
|
chunkQueue.read().then((chunk) => {
|
|
150
148
|
this.push(chunk, "binary")
|
|
149
|
+
}).catch((err: Error) => {
|
|
150
|
+
self.log("warning", `read on chunk queue operation failed: ${err}`)
|
|
151
151
|
})
|
|
152
152
|
}
|
|
153
153
|
})
|
|
@@ -188,33 +188,33 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
|
|
|
188
188
|
const chunk = utils.streamChunkDecode(buffer)
|
|
189
189
|
chunkQueue.write(chunk)
|
|
190
190
|
})
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
const type = this.params.type
|
|
194
|
-
const mode = this.params.mode
|
|
191
|
+
this.client.binaryType = "arraybuffer"
|
|
192
|
+
const self = this
|
|
195
193
|
this.stream = new Stream.Duplex({
|
|
196
194
|
writableObjectMode: true,
|
|
197
195
|
readableObjectMode: true,
|
|
198
196
|
decodeStrings: false,
|
|
199
197
|
highWaterMark: 1,
|
|
200
198
|
write (chunk: SpeechFlowChunk, encoding, callback) {
|
|
201
|
-
if (mode === "r")
|
|
199
|
+
if (self.params.mode === "r")
|
|
202
200
|
callback(new Error("write operation on read-only node"))
|
|
203
|
-
else if (chunk.type !== type)
|
|
204
|
-
callback(new Error(`written chunk is not of ${type} type`))
|
|
205
|
-
else if (!client
|
|
201
|
+
else if (chunk.type !== self.params.type)
|
|
202
|
+
callback(new Error(`written chunk is not of ${self.params.type} type`))
|
|
203
|
+
else if (!self.client!.OPEN)
|
|
206
204
|
callback(new Error("still no Websocket connection available"))
|
|
207
205
|
else {
|
|
208
206
|
const data = utils.streamChunkEncode(chunk)
|
|
209
|
-
client
|
|
207
|
+
self.client!.send(data)
|
|
210
208
|
callback()
|
|
211
209
|
}
|
|
212
210
|
},
|
|
213
211
|
read (size: number) {
|
|
214
|
-
if (mode === "w")
|
|
212
|
+
if (self.params.mode === "w")
|
|
215
213
|
throw new Error("read operation on write-only node")
|
|
216
214
|
chunkQueue.read().then((chunk) => {
|
|
217
215
|
this.push(chunk, "binary")
|
|
216
|
+
}).catch((err: Error) => {
|
|
217
|
+
self.log("warning", `read on chunk queue operation failed: ${err}`)
|
|
218
218
|
})
|
|
219
219
|
}
|
|
220
220
|
})
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
7
|
/* standard dependencies */
|
|
8
|
-
import Events from "node:events"
|
|
8
|
+
import Events, { EventEmitter } from "node:events"
|
|
9
9
|
import Stream from "node:stream"
|
|
10
10
|
|
|
11
11
|
/* external dependencies */
|
|
@@ -62,6 +62,7 @@ export default class SpeechFlowNode extends Events.EventEmitter {
|
|
|
62
62
|
timeOpen: DateTime<boolean> | undefined
|
|
63
63
|
timeZero: DateTime<boolean> = DateTime.fromMillis(0)
|
|
64
64
|
timeZeroOffset: Duration<boolean> = Duration.fromMillis(0)
|
|
65
|
+
_accessBus: ((name: string) => EventEmitter) | null = null
|
|
65
66
|
|
|
66
67
|
/* the default constructor */
|
|
67
68
|
constructor (
|
|
@@ -87,20 +88,32 @@ export default class SpeechFlowNode extends Events.EventEmitter {
|
|
|
87
88
|
}
|
|
88
89
|
|
|
89
90
|
/* receive external request */
|
|
90
|
-
async receiveRequest (args: any[]) {
|
|
91
|
+
async receiveRequest (args: any[]): Promise<void> {
|
|
91
92
|
/* no-op */
|
|
92
93
|
}
|
|
93
94
|
|
|
94
95
|
/* send external response */
|
|
95
|
-
sendResponse (args: any[]) {
|
|
96
|
+
sendResponse (args: any[]): void {
|
|
96
97
|
this.emit("send-response", args)
|
|
97
98
|
}
|
|
98
99
|
|
|
99
|
-
/*
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
100
|
+
/* receive dashboard information */
|
|
101
|
+
async receiveDashboard (type: "audio" | "text", id: string, kind: "final" | "intermediate", value: number | string): Promise<void> {
|
|
102
|
+
/* no-op */
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/* send dashboard information */
|
|
106
|
+
sendDashboard (type: "audio", id: string, kind: "final" | "intermediate", value: number): void
|
|
107
|
+
sendDashboard (type: "text", id: string, kind: "final" | "intermediate", value: string): void
|
|
108
|
+
sendDashboard (type: "audio" | "text", id: string, kind: "final" | "intermediate", value: number | string): void {
|
|
109
|
+
this.emit("send-dashboard", { type, id, kind, value })
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/* access communication bus */
|
|
113
|
+
accessBus (name: string): EventEmitter {
|
|
114
|
+
if (this._accessBus === null)
|
|
115
|
+
throw new Error("access to communication bus still not possible")
|
|
116
|
+
return this._accessBus(name)
|
|
104
117
|
}
|
|
105
118
|
|
|
106
119
|
/* INTERNAL: utility function: create "params" attribute from constructor of sub-classes */
|