speechflow 1.3.2 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/etc/stx.conf +54 -58
  3. package/package.json +25 -106
  4. package/speechflow-cli/etc/stx.conf +77 -0
  5. package/speechflow-cli/package.json +116 -0
  6. package/speechflow-cli/src/speechflow-node-a2a-meter.ts +217 -0
  7. package/{src → speechflow-cli/src}/speechflow-node-a2a-vad.ts +14 -21
  8. package/{src → speechflow-cli/src}/speechflow-node-a2t-deepgram.ts +21 -38
  9. package/{src → speechflow-cli/src}/speechflow-node-t2a-elevenlabs.ts +10 -16
  10. package/speechflow-cli/src/speechflow-node-t2t-subtitle.ts +276 -0
  11. package/{src → speechflow-cli/src}/speechflow-node-x2x-filter.ts +5 -1
  12. package/{src → speechflow-cli/src}/speechflow-node-x2x-trace.ts +15 -7
  13. package/{src → speechflow-cli/src}/speechflow-node.ts +7 -0
  14. package/{src → speechflow-cli/src}/speechflow.ts +81 -25
  15. package/speechflow-ui-db/etc/eslint.mjs +106 -0
  16. package/speechflow-ui-db/etc/htmllint.json +55 -0
  17. package/speechflow-ui-db/etc/stx.conf +79 -0
  18. package/speechflow-ui-db/etc/stylelint.js +46 -0
  19. package/speechflow-ui-db/etc/stylelint.yaml +33 -0
  20. package/speechflow-ui-db/etc/tsc-client.json +30 -0
  21. package/speechflow-ui-db/etc/tsc.node.json +9 -0
  22. package/speechflow-ui-db/etc/vite-client.mts +63 -0
  23. package/speechflow-ui-db/package.d/htmllint-cli+0.0.7.patch +20 -0
  24. package/speechflow-ui-db/package.json +75 -0
  25. package/speechflow-ui-db/src/app-icon.ai +1989 -4
  26. package/speechflow-ui-db/src/app-icon.svg +26 -0
  27. package/speechflow-ui-db/src/app.styl +64 -0
  28. package/speechflow-ui-db/src/app.vue +221 -0
  29. package/speechflow-ui-db/src/index.html +23 -0
  30. package/speechflow-ui-db/src/index.ts +26 -0
  31. package/{dst/speechflow.d.ts → speechflow-ui-db/src/lib.d.ts} +5 -3
  32. package/speechflow-ui-db/src/tsconfig.json +3 -0
  33. package/speechflow-ui-st/etc/eslint.mjs +106 -0
  34. package/speechflow-ui-st/etc/htmllint.json +55 -0
  35. package/speechflow-ui-st/etc/stx.conf +79 -0
  36. package/speechflow-ui-st/etc/stylelint.js +46 -0
  37. package/speechflow-ui-st/etc/stylelint.yaml +33 -0
  38. package/speechflow-ui-st/etc/tsc-client.json +30 -0
  39. package/speechflow-ui-st/etc/tsc.node.json +9 -0
  40. package/speechflow-ui-st/etc/vite-client.mts +63 -0
  41. package/speechflow-ui-st/package.d/htmllint-cli+0.0.7.patch +20 -0
  42. package/speechflow-ui-st/package.json +79 -0
  43. package/speechflow-ui-st/src/app-icon.ai +1989 -4
  44. package/speechflow-ui-st/src/app-icon.svg +26 -0
  45. package/speechflow-ui-st/src/app.styl +64 -0
  46. package/speechflow-ui-st/src/app.vue +142 -0
  47. package/speechflow-ui-st/src/index.html +23 -0
  48. package/speechflow-ui-st/src/index.ts +26 -0
  49. package/speechflow-ui-st/src/lib.d.ts +9 -0
  50. package/speechflow-ui-st/src/tsconfig.json +3 -0
  51. package/dst/speechflow-node-a2a-ffmpeg.d.ts +0 -13
  52. package/dst/speechflow-node-a2a-ffmpeg.js +0 -153
  53. package/dst/speechflow-node-a2a-ffmpeg.js.map +0 -1
  54. package/dst/speechflow-node-a2a-gender.d.ts +0 -20
  55. package/dst/speechflow-node-a2a-gender.js +0 -349
  56. package/dst/speechflow-node-a2a-gender.js.map +0 -1
  57. package/dst/speechflow-node-a2a-meter.d.ts +0 -14
  58. package/dst/speechflow-node-a2a-meter.js +0 -196
  59. package/dst/speechflow-node-a2a-meter.js.map +0 -1
  60. package/dst/speechflow-node-a2a-mute.d.ts +0 -17
  61. package/dst/speechflow-node-a2a-mute.js +0 -117
  62. package/dst/speechflow-node-a2a-mute.js.map +0 -1
  63. package/dst/speechflow-node-a2a-vad.d.ts +0 -19
  64. package/dst/speechflow-node-a2a-vad.js +0 -383
  65. package/dst/speechflow-node-a2a-vad.js.map +0 -1
  66. package/dst/speechflow-node-a2a-wav.d.ts +0 -11
  67. package/dst/speechflow-node-a2a-wav.js +0 -211
  68. package/dst/speechflow-node-a2a-wav.js.map +0 -1
  69. package/dst/speechflow-node-a2t-deepgram.d.ts +0 -19
  70. package/dst/speechflow-node-a2t-deepgram.js +0 -345
  71. package/dst/speechflow-node-a2t-deepgram.js.map +0 -1
  72. package/dst/speechflow-node-t2a-elevenlabs.d.ts +0 -18
  73. package/dst/speechflow-node-t2a-elevenlabs.js +0 -244
  74. package/dst/speechflow-node-t2a-elevenlabs.js.map +0 -1
  75. package/dst/speechflow-node-t2a-kokoro.d.ts +0 -14
  76. package/dst/speechflow-node-t2a-kokoro.js +0 -155
  77. package/dst/speechflow-node-t2a-kokoro.js.map +0 -1
  78. package/dst/speechflow-node-t2t-deepl.d.ts +0 -15
  79. package/dst/speechflow-node-t2t-deepl.js +0 -146
  80. package/dst/speechflow-node-t2t-deepl.js.map +0 -1
  81. package/dst/speechflow-node-t2t-format.d.ts +0 -11
  82. package/dst/speechflow-node-t2t-format.js +0 -82
  83. package/dst/speechflow-node-t2t-format.js.map +0 -1
  84. package/dst/speechflow-node-t2t-ollama.d.ts +0 -13
  85. package/dst/speechflow-node-t2t-ollama.js +0 -247
  86. package/dst/speechflow-node-t2t-ollama.js.map +0 -1
  87. package/dst/speechflow-node-t2t-openai.d.ts +0 -13
  88. package/dst/speechflow-node-t2t-openai.js +0 -227
  89. package/dst/speechflow-node-t2t-openai.js.map +0 -1
  90. package/dst/speechflow-node-t2t-sentence.d.ts +0 -17
  91. package/dst/speechflow-node-t2t-sentence.js +0 -250
  92. package/dst/speechflow-node-t2t-sentence.js.map +0 -1
  93. package/dst/speechflow-node-t2t-subtitle.d.ts +0 -12
  94. package/dst/speechflow-node-t2t-subtitle.js +0 -166
  95. package/dst/speechflow-node-t2t-subtitle.js.map +0 -1
  96. package/dst/speechflow-node-t2t-transformers.d.ts +0 -14
  97. package/dst/speechflow-node-t2t-transformers.js +0 -265
  98. package/dst/speechflow-node-t2t-transformers.js.map +0 -1
  99. package/dst/speechflow-node-x2x-filter.d.ts +0 -11
  100. package/dst/speechflow-node-x2x-filter.js +0 -117
  101. package/dst/speechflow-node-x2x-filter.js.map +0 -1
  102. package/dst/speechflow-node-x2x-trace.d.ts +0 -11
  103. package/dst/speechflow-node-x2x-trace.js +0 -104
  104. package/dst/speechflow-node-x2x-trace.js.map +0 -1
  105. package/dst/speechflow-node-xio-device.d.ts +0 -13
  106. package/dst/speechflow-node-xio-device.js +0 -230
  107. package/dst/speechflow-node-xio-device.js.map +0 -1
  108. package/dst/speechflow-node-xio-file.d.ts +0 -11
  109. package/dst/speechflow-node-xio-file.js +0 -216
  110. package/dst/speechflow-node-xio-file.js.map +0 -1
  111. package/dst/speechflow-node-xio-mqtt.d.ts +0 -13
  112. package/dst/speechflow-node-xio-mqtt.js +0 -188
  113. package/dst/speechflow-node-xio-mqtt.js.map +0 -1
  114. package/dst/speechflow-node-xio-websocket.d.ts +0 -13
  115. package/dst/speechflow-node-xio-websocket.js +0 -278
  116. package/dst/speechflow-node-xio-websocket.js.map +0 -1
  117. package/dst/speechflow-node.d.ts +0 -63
  118. package/dst/speechflow-node.js +0 -177
  119. package/dst/speechflow-node.js.map +0 -1
  120. package/dst/speechflow-utils.d.ts +0 -74
  121. package/dst/speechflow-utils.js +0 -519
  122. package/dst/speechflow-utils.js.map +0 -1
  123. package/dst/speechflow.js +0 -787
  124. package/dst/speechflow.js.map +0 -1
  125. package/src/speechflow-node-a2a-meter.ts +0 -177
  126. package/src/speechflow-node-t2t-subtitle.ts +0 -149
  127. /package/{etc → speechflow-cli/etc}/biome.jsonc +0 -0
  128. /package/{etc → speechflow-cli/etc}/eslint.mjs +0 -0
  129. /package/{etc → speechflow-cli/etc}/oxlint.jsonc +0 -0
  130. /package/{etc → speechflow-cli/etc}/speechflow.bat +0 -0
  131. /package/{etc → speechflow-cli/etc}/speechflow.sh +0 -0
  132. /package/{etc → speechflow-cli/etc}/speechflow.yaml +0 -0
  133. /package/{etc → speechflow-cli/etc}/tsconfig.json +0 -0
  134. /package/{package.d → speechflow-cli/package.d}/@ericedouard+vad-node-realtime+0.2.0.patch +0 -0
  135. /package/{src → speechflow-cli/src}/lib.d.ts +0 -0
  136. /package/{src → speechflow-cli/src}/speechflow-logo.ai +0 -0
  137. /package/{src → speechflow-cli/src}/speechflow-logo.svg +0 -0
  138. /package/{src → speechflow-cli/src}/speechflow-node-a2a-ffmpeg.ts +0 -0
  139. /package/{src → speechflow-cli/src}/speechflow-node-a2a-gender.ts +0 -0
  140. /package/{src → speechflow-cli/src}/speechflow-node-a2a-mute.ts +0 -0
  141. /package/{src → speechflow-cli/src}/speechflow-node-a2a-wav.ts +0 -0
  142. /package/{src → speechflow-cli/src}/speechflow-node-t2a-kokoro.ts +0 -0
  143. /package/{src → speechflow-cli/src}/speechflow-node-t2t-deepl.ts +0 -0
  144. /package/{src → speechflow-cli/src}/speechflow-node-t2t-format.ts +0 -0
  145. /package/{src → speechflow-cli/src}/speechflow-node-t2t-ollama.ts +0 -0
  146. /package/{src → speechflow-cli/src}/speechflow-node-t2t-openai.ts +0 -0
  147. /package/{src → speechflow-cli/src}/speechflow-node-t2t-sentence.ts +0 -0
  148. /package/{src → speechflow-cli/src}/speechflow-node-t2t-transformers.ts +0 -0
  149. /package/{src → speechflow-cli/src}/speechflow-node-xio-device.ts +0 -0
  150. /package/{src → speechflow-cli/src}/speechflow-node-xio-file.ts +0 -0
  151. /package/{src → speechflow-cli/src}/speechflow-node-xio-mqtt.ts +0 -0
  152. /package/{src → speechflow-cli/src}/speechflow-node-xio-websocket.ts +0 -0
  153. /package/{src → speechflow-cli/src}/speechflow-utils.ts +0 -0
  154. /package/{tsconfig.json → speechflow-cli/tsconfig.json} +0 -0
@@ -0,0 +1,276 @@
1
+ /*
2
+ ** SpeechFlow - Speech Processing Flow Graph
3
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
4
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
5
+ */
6
+
7
+ /* standard dependencies */
8
+ import path from "node:path"
9
+ import http from "node:http"
10
+ import Stream from "node:stream"
11
+
12
+ /* external dependencies */
13
+ import { Duration } from "luxon"
14
+ import * as HAPI from "@hapi/hapi"
15
+ import Inert from "@hapi/inert"
16
+ import WebSocket from "ws"
17
+ import HAPIWebSocket from "hapi-plugin-websocket"
18
+
19
+ /* internal dependencies */
20
+ import SpeechFlowNode, { SpeechFlowChunk } from "./speechflow-node"
21
+
22
+ type wsPeerCtx = {
23
+ peer: string
24
+ }
25
+ type wsPeerInfo = {
26
+ ctx: wsPeerCtx
27
+ ws: WebSocket
28
+ req: http.IncomingMessage
29
+ }
30
+
31
+ /* SpeechFlow node for subtitle (text-to-text) "translations" */
32
+ export default class SpeechFlowNodeSubtitle extends SpeechFlowNode {
33
+ /* declare official node name */
34
+ public static name = "subtitle"
35
+
36
+ /* internal state */
37
+ private sequenceNo = 1
38
+ private hapi: HAPI.Server | null = null
39
+
40
+ /* construct node */
41
+ constructor (id: string, cfg: { [ id: string ]: any }, opts: { [ id: string ]: any }, args: any[]) {
42
+ super(id, cfg, opts, args)
43
+
44
+ /* declare node configuration parameters */
45
+ this.configure({
46
+ format: { type: "string", pos: 0, val: "srt", match: /^(?:srt|vtt)$/ },
47
+ words: { type: "boolean", val: false },
48
+ mode: { type: "string", val: "export", match: /^(?:export|render)$/ },
49
+ addr: { type: "string", val: "127.0.0.1" },
50
+ port: { type: "number", val: 8585 }
51
+ })
52
+
53
+ /* declare node input/output format */
54
+ this.input = "text"
55
+ this.output = this.params.mode === "export" ? "text" : "none"
56
+ }
57
+
58
+ /* open node */
59
+ async open () {
60
+ if (this.params.mode === "export") {
61
+ this.sequenceNo = 1
62
+
63
+ /* provide text-to-subtitle conversion */
64
+ const convert = async (chunk: SpeechFlowChunk) => {
65
+ if (typeof chunk.payload !== "string")
66
+ throw new Error("chunk payload type must be string")
67
+ const convertSingle = (
68
+ start: Duration,
69
+ end: Duration,
70
+ text: string,
71
+ word?: string,
72
+ occurence?: number
73
+ ) => {
74
+ if (word) {
75
+ occurence ??= 1
76
+ let match = 1
77
+ word = word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
78
+ text = text.replaceAll(new RegExp(`\\b${word}\\b`, "g"), (m) => {
79
+ if (match++ === occurence)
80
+ return `<b>${m}</b>`
81
+ else
82
+ return m
83
+ })
84
+ }
85
+ if (this.params.format === "srt") {
86
+ const startFmt = start.toFormat("hh:mm:ss,SSS")
87
+ const endFmt = end.toFormat("hh:mm:ss,SSS")
88
+ text = `${this.sequenceNo++}\n` +
89
+ `${startFmt} --> ${endFmt}\n` +
90
+ `${text}\n\n`
91
+ }
92
+ else if (this.params.format === "vtt") {
93
+ const startFmt = start.toFormat("hh:mm:ss.SSS")
94
+ const endFmt = end.toFormat("hh:mm:ss.SSS")
95
+ text = `${startFmt} --> ${endFmt}\n` +
96
+ `${text}\n\n`
97
+ }
98
+ return text
99
+ }
100
+ let output = ""
101
+ if (this.params.words) {
102
+ output += convertSingle(chunk.timestampStart, chunk.timestampEnd, chunk.payload)
103
+ const words = (chunk.meta.get("words") ?? []) as
104
+ { word: string, start: Duration, end: Duration }[]
105
+ const occurences = new Map<string, number>()
106
+ for (const word of words) {
107
+ let occurence = occurences.get(word.word) ?? 0
108
+ occurence++
109
+ occurences.set(word.word, occurence)
110
+ output += convertSingle(word.start, word.end, chunk.payload, word.word, occurence)
111
+ }
112
+ }
113
+ else
114
+ output += convertSingle(chunk.timestampStart, chunk.timestampEnd, chunk.payload)
115
+ return output
116
+ }
117
+
118
+ /* establish a duplex stream */
119
+ const self = this
120
+ let firstChunk = true
121
+ this.stream = new Stream.Transform({
122
+ readableObjectMode: true,
123
+ writableObjectMode: true,
124
+ decodeStrings: false,
125
+ highWaterMark: 1,
126
+ transform (chunk: SpeechFlowChunk, encoding, callback) {
127
+ if (firstChunk && self.params.format === "vtt") {
128
+ this.push(new SpeechFlowChunk(
129
+ Duration.fromMillis(0), Duration.fromMillis(0),
130
+ "final", "text",
131
+ "WEBVTT\n\n"
132
+ ))
133
+ firstChunk = false
134
+ }
135
+ if (Buffer.isBuffer(chunk.payload))
136
+ callback(new Error("invalid chunk payload type"))
137
+ else {
138
+ if (chunk.payload === "") {
139
+ this.push(chunk)
140
+ callback()
141
+ }
142
+ else {
143
+ convert(chunk).then((payload) => {
144
+ const chunkNew = chunk.clone()
145
+ chunkNew.payload = payload
146
+ this.push(chunkNew)
147
+ callback()
148
+ }).catch((err) => {
149
+ callback(err)
150
+ })
151
+ }
152
+ }
153
+ },
154
+ final (callback) {
155
+ this.push(null)
156
+ callback()
157
+ }
158
+ })
159
+ }
160
+ else if (this.params.mode === "render") {
161
+ /* establish REST/WebSocket API */
162
+ const wsPeers = new Map<string, wsPeerInfo>()
163
+ this.hapi = new HAPI.Server({
164
+ address: this.params.addr,
165
+ port: this.params.port
166
+ })
167
+ await this.hapi.register({ plugin: Inert })
168
+ await this.hapi.register({ plugin: HAPIWebSocket })
169
+ this.hapi.events.on({ name: "request", channels: [ "error" ] }, (request: HAPI.Request, event: HAPI.RequestEvent, tags: { [key: string]: true }) => {
170
+ if (event.error instanceof Error)
171
+ this.log("error", `HAPI: request-error: ${event.error.message}`)
172
+ else
173
+ this.log("error", `HAPI: request-error: ${event.error}`)
174
+ })
175
+ this.hapi.events.on("log", (event: HAPI.LogEvent, tags: { [key: string]: true }) => {
176
+ if (tags.error) {
177
+ const err = event.error
178
+ if (err instanceof Error)
179
+ this.log("error", `HAPI: log: ${err.message}`)
180
+ else
181
+ this.log("error", `HAPI: log: ${err}`)
182
+ }
183
+ })
184
+ this.hapi.route({
185
+ method: "GET",
186
+ path: "/{param*}",
187
+ handler: {
188
+ directory: {
189
+ path: path.join(__dirname, "../../speechflow-ui-st/dst"),
190
+ redirectToSlash: true,
191
+ index: true
192
+ }
193
+ }
194
+ })
195
+ this.hapi.route({
196
+ method: "POST",
197
+ path: "/api",
198
+ options: {
199
+ payload: {
200
+ output: "data",
201
+ parse: true,
202
+ allow: "application/json"
203
+ },
204
+ plugins: {
205
+ websocket: {
206
+ autoping: 30 * 1000,
207
+ connect: (args: any) => {
208
+ const ctx: wsPeerCtx = args.ctx
209
+ const ws: WebSocket = args.ws
210
+ const req: http.IncomingMessage = args.req
211
+ const peer = `${req.socket.remoteAddress}:${req.socket.remotePort}`
212
+ ctx.peer = peer
213
+ wsPeers.set(peer, { ctx, ws, req })
214
+ this.log("info", `HAPI: WebSocket: connect: peer ${peer}`)
215
+ },
216
+ disconnect: (args: any) => {
217
+ const ctx: wsPeerCtx = args.ctx
218
+ const peer = ctx.peer
219
+ wsPeers.delete(peer)
220
+ this.log("info", `HAPI: WebSocket: disconnect: peer ${peer}`)
221
+ }
222
+ }
223
+ }
224
+ },
225
+ handler: (request: HAPI.Request, h: HAPI.ResponseToolkit) => {
226
+ return h.response({}).code(204)
227
+ }
228
+ })
229
+
230
+ await this.hapi.start()
231
+ this.log("info", `HAPI: started REST/WebSocket network service: http://${this.params.addr}:${this.params.port}`)
232
+
233
+ const emit = (chunk: SpeechFlowChunk) => {
234
+ const data = JSON.stringify(chunk)
235
+ for (const info of wsPeers.values())
236
+ info.ws.send(data)
237
+ }
238
+
239
+ this.stream = new Stream.Writable({
240
+ objectMode: true,
241
+ decodeStrings: false,
242
+ highWaterMark: 1,
243
+ write (chunk: SpeechFlowChunk, encoding, callback) {
244
+ if (Buffer.isBuffer(chunk.payload))
245
+ callback(new Error("invalid chunk payload type"))
246
+ else {
247
+ if (chunk.payload === "")
248
+ callback()
249
+ else {
250
+ emit(chunk)
251
+ callback()
252
+ }
253
+ }
254
+ },
255
+ final (callback) {
256
+ callback()
257
+ }
258
+ })
259
+ }
260
+ }
261
+
262
+ /* open node */
263
+ async close () {
264
+ /* close stream */
265
+ if (this.stream !== null) {
266
+ this.stream.destroy()
267
+ this.stream = null
268
+ }
269
+
270
+ /* shutdown HAPI */
271
+ if (this.hapi !== null) {
272
+ await this.hapi.stop()
273
+ this.hapi = null
274
+ }
275
+ }
276
+ }
@@ -23,7 +23,7 @@ export default class SpeechFlowNodeFilter extends SpeechFlowNode {
23
23
  this.configure({
24
24
  type: { type: "string", pos: 0, val: "audio", match: /^(?:audio|text)$/ },
25
25
  name: { type: "string", pos: 1, val: "filter", match: /^.+?$/ },
26
- var: { type: "string", pos: 2, val: "", match: /^(?:meta:.+|payload:(?:length|text)|time:(?:start|end))$/ },
26
+ var: { type: "string", pos: 2, val: "", match: /^(?:meta:.+|payload:(?:length|text)|time:(?:start|end)|kind|type)$/ },
27
27
  op: { type: "string", pos: 3, val: "==", match: /^(?:<|<=|==|!=|~~|!~|>=|>)$/ },
28
28
  val: { type: "string", pos: 4, val: "", match: /^.*$/ }
29
29
  })
@@ -94,6 +94,10 @@ export default class SpeechFlowNodeFilter extends SpeechFlowNode {
94
94
  const m = self.params.var.match(/^meta:(.+)$/)
95
95
  if (m !== null)
96
96
  val1 = chunk.meta.get(m[1]) ?? ""
97
+ else if (self.params.var === "kind")
98
+ val1 = chunk.kind
99
+ else if (self.params.var === "type")
100
+ val1 = chunk.type
97
101
  else if (self.params.var === "payload:length")
98
102
  val1 = chunk.payload.length
99
103
  else if (self.params.var === "payload:text")
@@ -24,10 +24,15 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
24
24
 
25
25
  /* declare node configuration parameters */
26
26
  this.configure({
27
- type: { type: "string", pos: 0, val: "audio", match: /^(?:audio|text)$/ },
28
- name: { type: "string", pos: 1, val: "trace" }
27
+ type: { type: "string", pos: 0, val: "audio", match: /^(?:audio|text)$/ },
28
+ name: { type: "string", pos: 1, val: "trace" },
29
+ dashboard: { type: "string", val: "" }
29
30
  })
30
31
 
32
+ /* sanity check parameters */
33
+ if (this.params.dashboard !== "" && this.params.type === "audio")
34
+ throw new Error("only trace nodes of type \"text\" can export to dashboard")
35
+
31
36
  /* declare node input/output format */
32
37
  this.input = this.params.type
33
38
  this.output = this.params.type
@@ -44,7 +49,7 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
44
49
  }
45
50
 
46
51
  /* provide Transform stream */
47
- const type = this.params.type
52
+ const self = this
48
53
  this.stream = new Stream.Transform({
49
54
  writableObjectMode: true,
50
55
  readableObjectMode: true,
@@ -63,7 +68,7 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
63
68
  } }`
64
69
  }
65
70
  if (Buffer.isBuffer(chunk.payload)) {
66
- if (type === "audio")
71
+ if (self.params.type === "audio")
67
72
  log("debug", `chunk: type=${chunk.type} ` +
68
73
  `kind=${chunk.kind} ` +
69
74
  `start=${fmtTime(chunk.timestampStart)} ` +
@@ -71,10 +76,10 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
71
76
  `payload-type=Buffer payload-length=${chunk.payload.byteLength} ` +
72
77
  `meta=${fmtMeta(chunk.meta)}`)
73
78
  else
74
- error = new Error(`${type} chunk: seen Buffer instead of String chunk type`)
79
+ error = new Error(`${self.params.type} chunk: seen Buffer instead of String chunk type`)
75
80
  }
76
81
  else {
77
- if (type === "text")
82
+ if (self.params.type === "text") {
78
83
  log("debug", `chunk: type=${chunk.type} ` +
79
84
  `kind=${chunk.kind} ` +
80
85
  `start=${fmtTime(chunk.timestampStart)} ` +
@@ -82,8 +87,11 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
82
87
  `payload-type=String payload-length=${chunk.payload.length} ` +
83
88
  `payload-content="${chunk.payload.toString()}" ` +
84
89
  `meta=${fmtMeta(chunk.meta)}`)
90
+ if (self.params.dashboard !== "")
91
+ self.dashboardInfo("text", self.params.dashboard, chunk.kind, chunk.payload.toString())
92
+ }
85
93
  else
86
- error = new Error(`${type} chunk: seen String instead of Buffer chunk type`)
94
+ error = new Error(`${self.params.type} chunk: seen String instead of Buffer chunk type`)
87
95
  }
88
96
  if (error !== undefined)
89
97
  callback(error)
@@ -96,6 +96,13 @@ export default class SpeechFlowNode extends Events.EventEmitter {
96
96
  this.emit("send-response", args)
97
97
  }
98
98
 
99
+ /* emit dashboard information */
100
+ dashboardInfo (type: "audio", id: string, kind: "final" | "intermediate", value: number): void
101
+ dashboardInfo (type: "text", id: string, kind: "final" | "intermediate", value: string): void
102
+ dashboardInfo (type: "audio" | "text", id: string, kind: "final" | "intermediate", value: number | string): void {
103
+ this.emit("dashboard-info", { type, id, kind, value })
104
+ }
105
+
99
106
  /* INTERNAL: utility function: create "params" attribute from constructor of sub-classes */
100
107
  configure (spec: { [ id: string ]: { type: string, pos?: number, val?: any, match?: RegExp | ((x: any) => boolean) } }) {
101
108
  for (const name of Object.keys(spec)) {
@@ -11,6 +11,7 @@ import Stream from "node:stream"
11
11
  import { EventEmitter } from "node:events"
12
12
  import http from "node:http"
13
13
  import * as HAPI from "@hapi/hapi"
14
+ import Inert from "@hapi/inert"
14
15
  import WebSocket from "ws"
15
16
  import HAPIWebSocket from "hapi-plugin-websocket"
16
17
  import HAPIHeader from "hapi-plugin-header"
@@ -32,7 +33,7 @@ import chalk from "chalk"
32
33
 
33
34
  /* internal dependencies */
34
35
  import SpeechFlowNode from "./speechflow-node"
35
- import pkg from "../package.json"
36
+ import pkg from "../../package.json"
36
37
 
37
38
  /* central CLI context */
38
39
  let cli: CLIio | null = null
@@ -67,6 +68,7 @@ type wsPeerInfo = {
67
68
  "[-a|--address <ip-address>] " +
68
69
  "[-p|--port <tcp-port>] " +
69
70
  "[-C|--cache <directory>] " +
71
+ "[-d|--dashboard <type>:<id>:<name>[,...]] " +
70
72
  "[-e|--expression <expression>] " +
71
73
  "[-f|--file <file>] " +
72
74
  "[-c|--config <id>@<yaml-config-file>] " +
@@ -125,6 +127,15 @@ type wsPeerInfo = {
125
127
  default: path.join(dataDir, "cache"),
126
128
  describe: "directory for cached files (primarily AI model files)"
127
129
  })
130
+ .option("d", {
131
+ alias: "dashboard",
132
+ type: "string",
133
+ array: false,
134
+ coerce,
135
+ nargs: 1,
136
+ default: "",
137
+ describe: "list of dashboard block types and names"
138
+ })
128
139
  .option("e", {
129
140
  alias: "expression",
130
141
  type: "string",
@@ -319,7 +330,7 @@ type wsPeerInfo = {
319
330
  const node = new nodes[name](name, cfg, {}, [])
320
331
  const status = await Promise.race<{ [ key: string ]: string | number }>([
321
332
  node.status(),
322
- new Promise((resolve, reject) => setTimeout(() =>
333
+ new Promise<never>((resolve, reject) => setTimeout(() =>
323
334
  reject(new Error("timeout")), 10 * 1000))
324
335
  ]).catch((err: Error) => {
325
336
  cli!.log("warning", `[${node.id}]: failed to gather status of node <${node.id}>: ${err.message}`)
@@ -352,12 +363,9 @@ type wsPeerInfo = {
352
363
  ast = flowlink.compile(config)
353
364
  }
354
365
  catch (err) {
355
- if (err instanceof Error && err.name === "FlowLinkError")
356
- cli!.log("error", `failed to parse SpeechFlow configuration: ${err.toString()}`)
357
- else if (err instanceof Error)
358
- cli!.log("error", `failed to parse SpeechFlow configuration: ${err.message}`)
359
- else
360
- cli!.log("error", "failed to parse SpeechFlow configuration: internal error")
366
+ const errorMsg = err instanceof Error && err.name === "FlowLinkError"
367
+ ? err.toString() : (err instanceof Error ? err.message : "internal error")
368
+ cli!.log("error", `failed to parse SpeechFlow configuration: ${errorMsg}`)
361
369
  process.exit(1)
362
370
  }
363
371
  try {
@@ -374,10 +382,11 @@ type wsPeerInfo = {
374
382
  throw new Error(`unknown node <${id}>`)
375
383
  let node: SpeechFlowNode
376
384
  try {
377
- let num = nodeNums.get(nodes[id]) ?? 0
378
- nodeNums.set(nodes[id], ++num)
385
+ const NodeClass = nodes[id]
386
+ let num = nodeNums.get(NodeClass) ?? 0
387
+ nodeNums.set(NodeClass, ++num)
379
388
  const name = num === 1 ? id : `${id}:${num}`
380
- node = new nodes[id](name, cfg, opts, args)
389
+ node = new NodeClass(name, cfg, opts, args)
381
390
  }
382
391
  catch (err) {
383
392
  /* fatal error */
@@ -400,12 +409,9 @@ type wsPeerInfo = {
400
409
  })
401
410
  }
402
411
  catch (err) {
403
- if (err instanceof Error && err.name === "FlowLinkError")
404
- cli!.log("error", `failed to materialize SpeechFlow configuration: ${err.toString()}`)
405
- else if (err instanceof Error)
406
- cli!.log("error", `failed to materialize SpeechFlow configuration: ${err.message}`)
407
- else
408
- cli!.log("error", "failed to materialize SpeechFlow configuration: internal error")
412
+ const errorMsg = err instanceof Error && err.name === "FlowLinkError"
413
+ ? err.toString() : (err instanceof Error ? err.message : "internal error")
414
+ cli!.log("error", `failed to materialize SpeechFlow configuration: ${errorMsg}`)
409
415
  process.exit(1)
410
416
  }
411
417
 
@@ -456,7 +462,7 @@ type wsPeerInfo = {
456
462
  node.setTimeZero(timeZero)
457
463
  await Promise.race<void>([
458
464
  node.open(),
459
- new Promise((resolve, reject) => setTimeout(() =>
465
+ new Promise<never>((resolve, reject) => setTimeout(() =>
460
466
  reject(new Error("timeout")), 10 * 1000))
461
467
  ]).catch((err: Error) => {
462
468
  cli!.log("error", `[${node.id}]: failed to open node <${node.id}>: ${err.message}`)
@@ -535,7 +541,7 @@ type wsPeerInfo = {
535
541
  else {
536
542
  await Promise.race<void>([
537
543
  foundNode.receiveRequest(args),
538
- new Promise((resolve, reject) => setTimeout(() =>
544
+ new Promise<never>((resolve, reject) => setTimeout(() =>
539
545
  reject(new Error("timeout")), 10 * 1000))
540
546
  ]).catch((err: Error) => {
541
547
  cli!.log("warning", `external request to node <${name}> failed: ${err.message}`)
@@ -549,6 +555,7 @@ type wsPeerInfo = {
549
555
  address: args.a,
550
556
  port: args.p
551
557
  })
558
+ await hapi.register({ plugin: Inert })
552
559
  await hapi.register({ plugin: HAPIHeader, options: { Server: `${pkg.name}/${pkg.version}` } })
553
560
  await hapi.register({ plugin: HAPIWebSocket })
554
561
  hapi.events.on("response", (request: HAPI.Request) => {
@@ -582,6 +589,29 @@ type wsPeerInfo = {
582
589
  cli!.log("error", `HAPI: log: ${err}`)
583
590
  }
584
591
  })
592
+ hapi.route({
593
+ method: "GET",
594
+ path: "/{param*}",
595
+ handler: {
596
+ directory: {
597
+ path: path.join(__dirname, "../../speechflow-ui-db/dst"),
598
+ redirectToSlash: true,
599
+ index: true
600
+ }
601
+ }
602
+ })
603
+ hapi.route({
604
+ method: "GET",
605
+ path: "/api/dashboard",
606
+ handler: (request: HAPI.Request, h: HAPI.ResponseToolkit) => {
607
+ const config = []
608
+ for (const block of args.d.split(",")) {
609
+ const [ type, id, name ] = block.split(":")
610
+ config.push({ type, id, name })
611
+ }
612
+ return h.response(config).code(200)
613
+ }
614
+ })
585
615
  hapi.route({
586
616
  method: "GET",
587
617
  path: "/api/{req}/{node}/{params*}",
@@ -589,10 +619,13 @@ type wsPeerInfo = {
589
619
  },
590
620
  handler: (request: HAPI.Request, h: HAPI.ResponseToolkit) => {
591
621
  const peer = request.info.remoteAddress
622
+ const params = request.params.params as string ?? ""
623
+ if (params.length > 1000)
624
+ return h.response({ response: "ERROR", data: "parameters too long" }).code(400)
592
625
  const req = {
593
626
  request: request.params.req,
594
627
  node: request.params.node,
595
- args: (request.params.params as string ?? "").split("/").filter((seg) => seg !== "")
628
+ args: params.split("/").filter((seg) => seg !== "")
596
629
  }
597
630
  cli!.log("info", `HAPI: peer ${peer}: GET: ${JSON.stringify(req)}`)
598
631
  return consumeExternalRequest(req).then(() => {
@@ -607,9 +640,10 @@ type wsPeerInfo = {
607
640
  path: "/api",
608
641
  options: {
609
642
  payload: {
610
- output: "data",
611
- parse: true,
612
- allow: "application/json"
643
+ output: "data",
644
+ parse: true,
645
+ allow: "application/json",
646
+ maxBytes: 1 * 1024 * 1024
613
647
  },
614
648
  plugins: {
615
649
  websocket: {
@@ -625,8 +659,10 @@ type wsPeerInfo = {
625
659
  },
626
660
  disconnect: (args: any) => {
627
661
  const ctx: wsPeerCtx = args.ctx
662
+ const ws: WebSocket = args.ws
628
663
  const peer = ctx.peer
629
664
  wsPeers.delete(peer)
665
+ ws.removeAllListeners()
630
666
  cli!.log("info", `HAPI: WebSocket: disconnect: peer ${peer}`)
631
667
  }
632
668
  }
@@ -654,13 +690,33 @@ type wsPeerInfo = {
654
690
  node.on("send-response", (args: any[]) => {
655
691
  const data = JSON.stringify({ response: "NOTIFY", node: node.id, args })
656
692
  for (const [ peer, info ] of wsPeers.entries()) {
657
- cli!.log("info", `HAPI: peer ${peer}: ${data}`)
693
+ cli!.log("debug", `HAPI: remote peer ${peer}: sending ${data}`)
658
694
  if (info.ws.readyState === WebSocket.OPEN)
659
695
  info.ws.send(data)
660
696
  }
661
697
  })
662
698
  }
663
699
 
700
+ /* hook for dashboardInfo method of nodes */
701
+ for (const node of graphNodes) {
702
+ node.on("dashboard-info", (info: {
703
+ type: string,
704
+ id: string,
705
+ kind: "final" | "intermediate",
706
+ value: string | number
707
+ }) => {
708
+ const data = JSON.stringify({
709
+ response: "DASHBOARD",
710
+ node: "",
711
+ args: [ info.type, info.id, info.kind, info.value ]
712
+ })
713
+ for (const [ peer, info ] of wsPeers.entries()) {
714
+ cli!.log("debug", `HAPI: dashboard peer ${peer}: send ${data}`)
715
+ info.ws.send(data)
716
+ }
717
+ })
718
+ }
719
+
664
720
  /* start of internal stream processing */
665
721
  cli!.log("info", "**** everything established -- stream processing in SpeechFlow graph starts ****")
666
722
 
@@ -742,7 +798,7 @@ type wsPeerInfo = {
742
798
  cli!.log("info", `close node <${node.id}>`)
743
799
  await Promise.race<void>([
744
800
  node.close(),
745
- new Promise((resolve, reject) => setTimeout(() =>
801
+ new Promise<never>((resolve, reject) => setTimeout(() =>
746
802
  reject(new Error("timeout")), 10 * 1000))
747
803
  ]).catch((err: Error) => {
748
804
  cli!.log("warning", `node <${node.id}> failed to close: ${err.message}`)