speechflow 1.3.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/README.md +165 -22
  3. package/dst/speechflow-node-a2a-gender.d.ts +2 -0
  4. package/dst/speechflow-node-a2a-gender.js +137 -59
  5. package/dst/speechflow-node-a2a-gender.js.map +1 -1
  6. package/dst/speechflow-node-a2a-meter.d.ts +3 -1
  7. package/dst/speechflow-node-a2a-meter.js +79 -35
  8. package/dst/speechflow-node-a2a-meter.js.map +1 -1
  9. package/dst/speechflow-node-a2a-mute.d.ts +1 -0
  10. package/dst/speechflow-node-a2a-mute.js +37 -11
  11. package/dst/speechflow-node-a2a-mute.js.map +1 -1
  12. package/dst/speechflow-node-a2a-vad.d.ts +3 -0
  13. package/dst/speechflow-node-a2a-vad.js +194 -96
  14. package/dst/speechflow-node-a2a-vad.js.map +1 -1
  15. package/dst/speechflow-node-a2a-wav.js +27 -11
  16. package/dst/speechflow-node-a2a-wav.js.map +1 -1
  17. package/dst/speechflow-node-a2t-deepgram.d.ts +4 -0
  18. package/dst/speechflow-node-a2t-deepgram.js +141 -43
  19. package/dst/speechflow-node-a2t-deepgram.js.map +1 -1
  20. package/dst/speechflow-node-t2a-elevenlabs.d.ts +2 -0
  21. package/dst/speechflow-node-t2a-elevenlabs.js +61 -12
  22. package/dst/speechflow-node-t2a-elevenlabs.js.map +1 -1
  23. package/dst/speechflow-node-t2a-kokoro.d.ts +1 -0
  24. package/dst/speechflow-node-t2a-kokoro.js +10 -4
  25. package/dst/speechflow-node-t2a-kokoro.js.map +1 -1
  26. package/dst/speechflow-node-t2t-deepl.js +8 -4
  27. package/dst/speechflow-node-t2t-deepl.js.map +1 -1
  28. package/dst/speechflow-node-t2t-format.js +2 -2
  29. package/dst/speechflow-node-t2t-format.js.map +1 -1
  30. package/dst/speechflow-node-t2t-ollama.js +1 -1
  31. package/dst/speechflow-node-t2t-ollama.js.map +1 -1
  32. package/dst/speechflow-node-t2t-openai.js +1 -1
  33. package/dst/speechflow-node-t2t-openai.js.map +1 -1
  34. package/dst/speechflow-node-t2t-sentence.d.ts +1 -1
  35. package/dst/speechflow-node-t2t-sentence.js +35 -24
  36. package/dst/speechflow-node-t2t-sentence.js.map +1 -1
  37. package/dst/speechflow-node-t2t-subtitle.js +85 -17
  38. package/dst/speechflow-node-t2t-subtitle.js.map +1 -1
  39. package/dst/speechflow-node-t2t-transformers.js +2 -2
  40. package/dst/speechflow-node-t2t-transformers.js.map +1 -1
  41. package/dst/speechflow-node-x2x-filter.js +4 -4
  42. package/dst/speechflow-node-x2x-trace.js +1 -1
  43. package/dst/speechflow-node-x2x-trace.js.map +1 -1
  44. package/dst/speechflow-node-xio-device.js +12 -8
  45. package/dst/speechflow-node-xio-device.js.map +1 -1
  46. package/dst/speechflow-node-xio-file.js +9 -3
  47. package/dst/speechflow-node-xio-file.js.map +1 -1
  48. package/dst/speechflow-node-xio-mqtt.js +5 -2
  49. package/dst/speechflow-node-xio-mqtt.js.map +1 -1
  50. package/dst/speechflow-node-xio-websocket.js +11 -11
  51. package/dst/speechflow-node-xio-websocket.js.map +1 -1
  52. package/dst/speechflow-utils.d.ts +5 -0
  53. package/dst/speechflow-utils.js +77 -44
  54. package/dst/speechflow-utils.js.map +1 -1
  55. package/dst/speechflow.js +104 -34
  56. package/dst/speechflow.js.map +1 -1
  57. package/etc/eslint.mjs +1 -2
  58. package/etc/speechflow.yaml +18 -7
  59. package/etc/stx.conf +3 -3
  60. package/package.json +14 -13
  61. package/src/speechflow-node-a2a-gender.ts +148 -64
  62. package/src/speechflow-node-a2a-meter.ts +87 -40
  63. package/src/speechflow-node-a2a-mute.ts +39 -11
  64. package/src/speechflow-node-a2a-vad.ts +206 -100
  65. package/src/speechflow-node-a2a-wav.ts +27 -11
  66. package/src/speechflow-node-a2t-deepgram.ts +148 -45
  67. package/src/speechflow-node-t2a-elevenlabs.ts +65 -12
  68. package/src/speechflow-node-t2a-kokoro.ts +11 -4
  69. package/src/speechflow-node-t2t-deepl.ts +9 -4
  70. package/src/speechflow-node-t2t-format.ts +2 -2
  71. package/src/speechflow-node-t2t-ollama.ts +1 -1
  72. package/src/speechflow-node-t2t-openai.ts +1 -1
  73. package/src/speechflow-node-t2t-sentence.ts +38 -27
  74. package/src/speechflow-node-t2t-subtitle.ts +62 -15
  75. package/src/speechflow-node-t2t-transformers.ts +4 -3
  76. package/src/speechflow-node-x2x-filter.ts +4 -4
  77. package/src/speechflow-node-x2x-trace.ts +1 -1
  78. package/src/speechflow-node-xio-device.ts +12 -8
  79. package/src/speechflow-node-xio-file.ts +9 -3
  80. package/src/speechflow-node-xio-mqtt.ts +5 -2
  81. package/src/speechflow-node-xio-websocket.ts +12 -12
  82. package/src/speechflow-utils.ts +78 -44
  83. package/src/speechflow.ts +117 -36
@@ -29,12 +29,12 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
29
29
  public static name = "sentence"
30
30
 
31
31
  /* internal state */
32
- private static speexInitialized = false
33
32
  private queue = new utils.Queue<TextQueueElement>()
34
33
  private queueRecv = this.queue.pointerUse("recv")
35
34
  private queueSplit = this.queue.pointerUse("split")
36
35
  private queueSend = this.queue.pointerUse("send")
37
36
  private destroyed = false
37
+ private workingOffTimer: ReturnType<typeof setTimeout> | null = null
38
38
 
39
39
  /* construct node */
40
40
  constructor (id: string, cfg: { [ id: string ]: any }, opts: { [ id: string ]: any }, args: any[]) {
@@ -53,11 +53,7 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
53
53
  /* clear destruction flag */
54
54
  this.destroyed = false
55
55
 
56
- /* pass-through logging */
57
- const log = (level: string, msg: string) => { this.log(level, msg) }
58
-
59
56
  /* work off queued audio frames */
60
- let workingOffTimer: ReturnType<typeof setTimeout> | null = null
61
57
  let workingOff = false
62
58
  const workOffQueue = async () => {
63
59
  if (this.destroyed)
@@ -67,17 +63,14 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
67
63
  if (workingOff)
68
64
  return
69
65
  workingOff = true
70
- if (workingOffTimer !== null) {
71
- clearTimeout(workingOffTimer)
72
- workingOffTimer = null
66
+ if (this.workingOffTimer !== null) {
67
+ clearTimeout(this.workingOffTimer)
68
+ this.workingOffTimer = null
73
69
  }
74
70
  this.queue.off("write", workOffQueue)
75
71
 
76
- const position = this.queueSplit.position()
77
- const maxPosition = this.queueSplit.maxPosition()
78
- log("info", `SPLIT: ${maxPosition - position} elements in queue`)
79
-
80
- while (true) {
72
+ /* try to work off one or more chunks */
73
+ while (!this.destroyed) {
81
74
  const element = this.queueSplit.peek()
82
75
  if (element === undefined)
83
76
  break
@@ -139,10 +132,12 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
139
132
  }
140
133
  }
141
134
 
142
- /* re-initiate working off round */
135
+ /* re-initiate working off round (if still not destroyed) */
143
136
  workingOff = false
144
- workingOffTimer = setTimeout(workOffQueue, 100)
145
- this.queue.once("write", workOffQueue)
137
+ if (!this.destroyed) {
138
+ this.workingOffTimer = setTimeout(workOffQueue, 100)
139
+ this.queue.once("write", workOffQueue)
140
+ }
146
141
  }
147
142
  this.queue.once("write", workOffQueue)
148
143
 
@@ -156,12 +151,14 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
156
151
 
157
152
  /* receive text chunk (writable side of stream) */
158
153
  write (chunk: SpeechFlowChunk, encoding, callback) {
159
- if (Buffer.isBuffer(chunk.payload))
154
+ if (self.destroyed)
155
+ callback(new Error("stream already destroyed"))
156
+ else if (Buffer.isBuffer(chunk.payload))
160
157
  callback(new Error("expected text input as string chunks"))
161
158
  else if (chunk.payload.length === 0)
162
159
  callback()
163
160
  else {
164
- log("info", `received text: ${JSON.stringify(chunk.payload)}`)
161
+ self.log("info", `received text: ${JSON.stringify(chunk.payload)}`)
165
162
  self.queueRecv.append({ type: "text-frame", chunk })
166
163
  callback()
167
164
  }
@@ -169,6 +166,10 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
169
166
 
170
167
  /* receive no more text chunks (writable side of stream) */
171
168
  final (callback) {
169
+ if (self.destroyed) {
170
+ callback()
171
+ return
172
+ }
172
173
  /* signal end of file */
173
174
  self.queueRecv.append({ type: "text-eof" })
174
175
  callback()
@@ -176,11 +177,12 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
176
177
 
177
178
  /* send text chunk(s) (readable side of stream) */
178
179
  read (_size) {
179
- /* flush pending audio chunks */
180
- const position = self.queueSend.position()
181
- const maxPosition = self.queueSend.maxPosition()
182
- log("info", `SEND: ${maxPosition - position} elements in queue`)
180
+ /* flush pending text chunks */
183
181
  const flushPendingChunks = () => {
182
+ if (self.destroyed) {
183
+ this.push(null)
184
+ return
185
+ }
184
186
  const element = self.queueSend.peek()
185
187
  if (element !== undefined
186
188
  && element.type === "text-eof") {
@@ -202,13 +204,13 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
202
204
  else if (element.type === "text-frame"
203
205
  && element.complete !== true)
204
206
  break
205
- log("info", `send text: ${JSON.stringify(element.chunk.payload)}`)
207
+ self.log("info", `send text: ${JSON.stringify(element.chunk.payload)}`)
206
208
  this.push(element.chunk)
207
209
  self.queueSend.walk(+1)
208
210
  self.queue.trim()
209
211
  }
210
212
  }
211
- else
213
+ else if (!self.destroyed)
212
214
  self.queue.once("write", flushPendingChunks)
213
215
  }
214
216
  flushPendingChunks()
@@ -218,13 +220,22 @@ export default class SpeechFlowNodeSentence extends SpeechFlowNode {
218
220
 
219
221
  /* close node */
220
222
  async close () {
223
+ /* indicate destruction */
224
+ this.destroyed = true
225
+
226
+ /* clean up timer */
227
+ if (this.workingOffTimer !== null) {
228
+ clearTimeout(this.workingOffTimer)
229
+ this.workingOffTimer = null
230
+ }
231
+
232
+ /* remove any pending event listeners */
233
+ this.queue.removeAllListeners("write")
234
+
221
235
  /* close stream */
222
236
  if (this.stream !== null) {
223
237
  this.stream.destroy()
224
238
  this.stream = null
225
239
  }
226
-
227
- /* indicate destruction */
228
- this.destroyed = true
229
240
  }
230
241
  }
@@ -7,6 +7,9 @@
7
7
  /* standard dependencies */
8
8
  import Stream from "node:stream"
9
9
 
10
+ /* external dependencies */
11
+ import { Duration } from "luxon"
12
+
10
13
  /* internal dependencies */
11
14
  import SpeechFlowNode, { SpeechFlowChunk } from "./speechflow-node"
12
15
 
@@ -24,7 +27,8 @@ export default class SpeechFlowNodeSubtitle extends SpeechFlowNode {
24
27
 
25
28
  /* declare node configuration parameters */
26
29
  this.configure({
27
- format: { type: "string", pos: 0, val: "srt", match: /^(?:srt|vtt)$/ }
30
+ format: { type: "string", pos: 0, val: "srt", match: /^(?:srt|vtt)$/ },
31
+ words: { type: "boolean", val: false }
28
32
  })
29
33
 
30
34
  /* declare node input/output format */
@@ -40,31 +44,74 @@ export default class SpeechFlowNodeSubtitle extends SpeechFlowNode {
40
44
  const convert = async (chunk: SpeechFlowChunk) => {
41
45
  if (typeof chunk.payload !== "string")
42
46
  throw new Error("chunk payload type must be string")
43
- let text = chunk.payload
44
- if (this.params.format === "srt") {
45
- const start = chunk.timestampStart.toFormat("hh:mm:ss,SSS")
46
- const end = chunk.timestampEnd.toFormat("hh:mm:ss,SSS")
47
- text = `${this.sequenceNo++}\n` +
48
- `${start} --> ${end}\n` +
49
- `${text}\n\n`
47
+ const convertSingle = (
48
+ start: Duration,
49
+ end: Duration,
50
+ text: string,
51
+ word?: string,
52
+ occurence?: number
53
+ ) => {
54
+ if (word) {
55
+ occurence ??= 1
56
+ let match = 1
57
+ word = word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
58
+ text = text.replaceAll(new RegExp(`\\b${word}\\b`, "g"), (m) => {
59
+ if (match++ === occurence)
60
+ return `<b>${m}</b>`
61
+ else
62
+ return m
63
+ })
64
+ }
65
+ if (this.params.format === "srt") {
66
+ const startFmt = start.toFormat("hh:mm:ss,SSS")
67
+ const endFmt = end.toFormat("hh:mm:ss,SSS")
68
+ text = `${this.sequenceNo++}\n` +
69
+ `${startFmt} --> ${endFmt}\n` +
70
+ `${text}\n\n`
71
+ }
72
+ else if (this.params.format === "vtt") {
73
+ const startFmt = start.toFormat("hh:mm:ss.SSS")
74
+ const endFmt = end.toFormat("hh:mm:ss.SSS")
75
+ text = `${startFmt} --> ${endFmt}\n` +
76
+ `${text}\n\n`
77
+ }
78
+ return text
50
79
  }
51
- else if (this.params.format === "vtt") {
52
- const start = chunk.timestampStart.toFormat("hh:mm:ss.SSS")
53
- const end = chunk.timestampEnd.toFormat("hh:mm:ss.SSS")
54
- text = `${this.sequenceNo++}\n` +
55
- `${start} --> ${end}\n` +
56
- `${text}\n\n`
80
+ let output = ""
81
+ if (this.params.words) {
82
+ output += convertSingle(chunk.timestampStart, chunk.timestampEnd, chunk.payload)
83
+ const words = (chunk.meta.get("words") ?? []) as
84
+ { word: string, start: Duration, end: Duration }[]
85
+ const occurences = new Map<string, number>()
86
+ for (const word of words) {
87
+ let occurence = occurences.get(word.word) ?? 0
88
+ occurence++
89
+ occurences.set(word.word, occurence)
90
+ output += convertSingle(word.start, word.end, chunk.payload, word.word, occurence)
91
+ }
57
92
  }
58
- return text
93
+ else
94
+ output += convertSingle(chunk.timestampStart, chunk.timestampEnd, chunk.payload)
95
+ return output
59
96
  }
60
97
 
61
98
  /* establish a duplex stream */
99
+ const self = this
100
+ let firstChunk = true
62
101
  this.stream = new Stream.Transform({
63
102
  readableObjectMode: true,
64
103
  writableObjectMode: true,
65
104
  decodeStrings: false,
66
105
  highWaterMark: 1,
67
106
  transform (chunk: SpeechFlowChunk, encoding, callback) {
107
+ if (firstChunk && self.params.format === "vtt") {
108
+ this.push(new SpeechFlowChunk(
109
+ Duration.fromMillis(0), Duration.fromMillis(0),
110
+ "final", "text",
111
+ "WEBVTT\n\n"
112
+ ))
113
+ firstChunk = false
114
+ }
68
115
  if (Buffer.isBuffer(chunk.payload))
69
116
  callback(new Error("invalid chunk payload type"))
70
117
  else {
@@ -104,7 +104,7 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
104
104
 
105
105
  /* open node */
106
106
  async open () {
107
- let model: string = ""
107
+ let model = ""
108
108
 
109
109
  /* track download progress when instantiating Transformers engine and model */
110
110
  const progressState = new Map<string, number>()
@@ -138,7 +138,6 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
138
138
  progress_callback: progressCallback
139
139
  })
140
140
  this.translator = await pipeline
141
- clearInterval(interval)
142
141
  if (this.translator === null)
143
142
  throw new Error("failed to instantiate translator pipeline")
144
143
  }
@@ -151,13 +150,15 @@ export default class SpeechFlowNodeTransformers extends SpeechFlowNode {
151
150
  progress_callback: progressCallback
152
151
  })
153
152
  this.generator = await pipeline
154
- clearInterval(interval)
155
153
  if (this.generator === null)
156
154
  throw new Error("failed to instantiate generator pipeline")
157
155
  }
158
156
  else
159
157
  throw new Error("invalid model")
160
158
 
159
+ /* clear progress interval again */
160
+ clearInterval(interval)
161
+
161
162
  /* provide text-to-text translation */
162
163
  const translate = async (text: string) => {
163
164
  if (this.params.model === "OPUS") {
@@ -94,13 +94,13 @@ export default class SpeechFlowNodeFilter extends SpeechFlowNode {
94
94
  const m = self.params.var.match(/^meta:(.+)$/)
95
95
  if (m !== null)
96
96
  val1 = chunk.meta.get(m[1]) ?? ""
97
- else if (self.params.key === "payload:length")
97
+ else if (self.params.var === "payload:length")
98
98
  val1 = chunk.payload.length
99
- else if (self.params.key === "payload:text")
99
+ else if (self.params.var === "payload:text")
100
100
  val1 = (self.params.type === "text" ? chunk.payload as string : "")
101
- else if (self.params.key === "time:start")
101
+ else if (self.params.var === "time:start")
102
102
  val1 = chunk.timestampStart.toMillis()
103
- else if (self.params.key === "time:end")
103
+ else if (self.params.var === "time:end")
104
104
  val1 = chunk.timestampEnd.toMillis()
105
105
  if (comparison(val1, self.params.op, val2)) {
106
106
  self.log("info", `[${self.params.name}]: passing through ${chunk.type} chunk`)
@@ -75,7 +75,7 @@ export default class SpeechFlowNodeTrace extends SpeechFlowNode {
75
75
  }
76
76
  else {
77
77
  if (type === "text")
78
- log("debug", `${type} chunk: type=${chunk.type} ` +
78
+ log("debug", `chunk: type=${chunk.type} ` +
79
79
  `kind=${chunk.kind} ` +
80
80
  `start=${fmtTime(chunk.timestampStart)} ` +
81
81
  `end=${fmtTime(chunk.timestampEnd)} ` +
@@ -159,8 +159,7 @@ export default class SpeechFlowNodeDevice extends SpeechFlowNode {
159
159
 
160
160
  /* convert regular stream into object-mode stream */
161
161
  const wrapper = utils.createTransformStreamForReadableSide("audio", () => this.timeZero)
162
- this.stream.pipe(wrapper)
163
- this.stream = wrapper
162
+ this.stream = Stream.compose(this.stream, wrapper)
164
163
  }
165
164
  else if (this.params.mode === "w") {
166
165
  /* output device */
@@ -180,8 +179,7 @@ export default class SpeechFlowNodeDevice extends SpeechFlowNode {
180
179
 
181
180
  /* convert regular stream into object-mode stream */
182
181
  const wrapper = utils.createTransformStreamForWritableSide()
183
- wrapper.pipe(this.stream)
184
- this.stream = wrapper
182
+ this.stream = Stream.compose(wrapper, this.stream)
185
183
  }
186
184
  else
187
185
  throw new Error(`device "${device.id}" does not have any input or output channels`)
@@ -200,13 +198,19 @@ export default class SpeechFlowNodeDevice extends SpeechFlowNode {
200
198
  /* shutdown PortAudio */
201
199
  if (this.io !== null) {
202
200
  await new Promise<void>((resolve, reject) => {
203
- this.io!.abort(() => {
204
- resolve()
201
+ this.io!.abort((err?: Error) => {
202
+ if (err)
203
+ reject(err)
204
+ else
205
+ resolve()
205
206
  })
206
207
  })
207
208
  await new Promise<void>((resolve, reject) => {
208
- this.io!.quit(() => {
209
- resolve()
209
+ this.io!.quit((err?: Error) => {
210
+ if (err)
211
+ reject(err)
212
+ else
213
+ resolve()
210
214
  })
211
215
  })
212
216
  this.io = null
@@ -178,9 +178,15 @@ export default class SpeechFlowNodeFile extends SpeechFlowNode {
178
178
  async close () {
179
179
  /* shutdown stream */
180
180
  if (this.stream !== null) {
181
- await new Promise<void>((resolve) => {
182
- if (this.stream instanceof Stream.Writable || this.stream instanceof Stream.Duplex)
183
- this.stream.end(() => { resolve() })
181
+ await new Promise<void>((resolve, reject) => {
182
+ if (this.stream instanceof Stream.Writable || this.stream instanceof Stream.Duplex) {
183
+ this.stream.end((err?: Error) => {
184
+ if (err)
185
+ reject(err)
186
+ else
187
+ resolve()
188
+ })
189
+ }
184
190
  else
185
191
  resolve()
186
192
  })
@@ -83,7 +83,10 @@ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
83
83
  this.broker.on("connect", (packet: MQTT.IConnackPacket) => {
84
84
  this.log("info", `connection opened to MQTT ${this.params.url}`)
85
85
  if (this.params.mode !== "w" && !packet.sessionPresent)
86
- this.broker!.subscribe([ this.params.topicRead ], () => {})
86
+ this.broker!.subscribe([ this.params.topicRead ], (err) => {
87
+ if (err)
88
+ this.log("error", `failed to subscribe to MQTT topic "${this.params.topicRead}": ${err.message}`)
89
+ })
87
90
  })
88
91
  this.broker.on("reconnect", () => {
89
92
  this.log("info", `connection re-opened to MQTT ${this.params.url}`)
@@ -141,7 +144,7 @@ export default class SpeechFlowNodeMQTT extends SpeechFlowNode {
141
144
 
142
145
  /* close node */
143
146
  async close () {
144
- /* close Websocket server */
147
+ /* close MQTT broker */
145
148
  if (this.broker !== null) {
146
149
  if (this.broker.connected)
147
150
  this.broker.end()
@@ -64,15 +64,15 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
64
64
  const url = new URL(this.params.listen)
65
65
  const websockets = new Set<ws.WebSocket>()
66
66
  const chunkQueue = new utils.SingleQueue<SpeechFlowChunk>()
67
- const server = new ws.WebSocketServer({
67
+ this.server = new ws.WebSocketServer({
68
68
  host: url.hostname,
69
69
  port: Number.parseInt(url.port),
70
70
  path: url.pathname
71
71
  })
72
- server.on("listening", () => {
72
+ this.server.on("listening", () => {
73
73
  this.log("info", `listening on URL ${this.params.listen}`)
74
74
  })
75
- server.on("connection", (ws, request) => {
75
+ this.server.on("connection", (ws, request) => {
76
76
  const peer = `${request.socket.remoteAddress}:${request.socket.remotePort}`
77
77
  this.log("info", `connection opened on URL ${this.params.listen} by peer ${peer}`)
78
78
  websockets.add(ws)
@@ -105,7 +105,7 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
105
105
  chunkQueue.write(chunk)
106
106
  })
107
107
  })
108
- server.on("error", (error) => {
108
+ this.server.on("error", (error) => {
109
109
  this.log("error", `error of some connection on URL ${this.params.listen}: ${error.message}`)
110
110
  })
111
111
  const type = this.params.type
@@ -124,7 +124,7 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
124
124
  callback(new Error("still no Websocket connections available"))
125
125
  else {
126
126
  const data = utils.streamChunkEncode(chunk)
127
- const results = []
127
+ const results: Promise<void>[] = []
128
128
  for (const websocket of websockets.values()) {
129
129
  results.push(new Promise<void>((resolve, reject) => {
130
130
  websocket.send(data, (error) => {
@@ -175,12 +175,12 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
175
175
  const chunkQueue = new utils.SingleQueue<SpeechFlowChunk>()
176
176
  this.client.addEventListener("message", (ev: MessageEvent) => {
177
177
  if (this.params.mode === "w") {
178
- this.log("warning", `connection to URL ${this.params.listen}: ` +
178
+ this.log("warning", `connection to URL ${this.params.connect}: ` +
179
179
  "received remote data on write-only node")
180
180
  return
181
181
  }
182
182
  if (!(ev.data instanceof ArrayBuffer)) {
183
- this.log("warning", `connection to URL ${this.params.listen}: ` +
183
+ this.log("warning", `connection to URL ${this.params.connect}: ` +
184
184
  "received non-binary message")
185
185
  return
186
186
  }
@@ -204,15 +204,15 @@ export default class SpeechFlowNodeWebsocket extends SpeechFlowNode {
204
204
  callback(new Error(`written chunk is not of ${type} type`))
205
205
  else if (!client.OPEN)
206
206
  callback(new Error("still no Websocket connection available"))
207
- const data = utils.streamChunkEncode(chunk)
208
- client.send(data)
209
- callback()
207
+ else {
208
+ const data = utils.streamChunkEncode(chunk)
209
+ client.send(data)
210
+ callback()
211
+ }
210
212
  },
211
213
  read (size: number) {
212
214
  if (mode === "w")
213
215
  throw new Error("read operation on write-only node")
214
- if (!client.OPEN)
215
- throw new Error("still no Websocket connection available")
216
216
  chunkQueue.read().then((chunk) => {
217
217
  this.push(chunk, "binary")
218
218
  })