speechflow 2.0.1 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/etc/claude.md +1 -1
  3. package/package.json +4 -4
  4. package/speechflow-cli/dst/speechflow-main-api.js.map +1 -1
  5. package/speechflow-cli/dst/speechflow-main-graph.js +4 -4
  6. package/speechflow-cli/dst/speechflow-main-graph.js.map +1 -1
  7. package/speechflow-cli/dst/speechflow-main.js +1 -1
  8. package/speechflow-cli/dst/speechflow-main.js.map +1 -1
  9. package/speechflow-cli/dst/speechflow-node-a2a-compressor.js +6 -6
  10. package/speechflow-cli/dst/speechflow-node-a2a-compressor.js.map +1 -1
  11. package/speechflow-cli/dst/speechflow-node-a2a-filler.js.map +1 -1
  12. package/speechflow-cli/dst/speechflow-node-a2a-mute.js +2 -2
  13. package/speechflow-cli/dst/speechflow-node-a2a-mute.js.map +1 -1
  14. package/speechflow-cli/dst/speechflow-node-a2a-rnnoise.js +1 -1
  15. package/speechflow-cli/dst/speechflow-node-a2t-amazon.js.map +1 -1
  16. package/speechflow-cli/dst/speechflow-node-a2t-google.js +8 -8
  17. package/speechflow-cli/dst/speechflow-node-a2t-google.js.map +1 -1
  18. package/speechflow-cli/dst/speechflow-node-t2a-elevenlabs.js +9 -9
  19. package/speechflow-cli/dst/speechflow-node-t2a-elevenlabs.js.map +1 -1
  20. package/speechflow-cli/dst/speechflow-node-t2a-google.js +3 -3
  21. package/speechflow-cli/dst/speechflow-node-t2a-google.js.map +1 -1
  22. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.d.ts +2 -3
  23. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js +93 -466
  24. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js.map +1 -1
  25. package/speechflow-cli/dst/speechflow-node-t2t-amazon.js +4 -4
  26. package/speechflow-cli/dst/speechflow-node-t2t-amazon.js.map +1 -1
  27. package/speechflow-cli/dst/speechflow-node-t2t-deepl.js +1 -1
  28. package/speechflow-cli/dst/speechflow-node-t2t-deepl.js.map +1 -1
  29. package/speechflow-cli/dst/speechflow-node-t2t-format.js +1 -1
  30. package/speechflow-cli/dst/speechflow-node-t2t-format.js.map +1 -1
  31. package/speechflow-cli/dst/speechflow-node-t2t-google.js +1 -1
  32. package/speechflow-cli/dst/speechflow-node-t2t-google.js.map +1 -1
  33. package/speechflow-cli/dst/speechflow-node-t2t-modify.js +1 -1
  34. package/speechflow-cli/dst/speechflow-node-t2t-modify.js.map +1 -1
  35. package/speechflow-cli/dst/speechflow-node-t2t-opus.js +1 -1
  36. package/speechflow-cli/dst/speechflow-node-t2t-opus.js.map +1 -1
  37. package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js +1 -2
  38. package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js.map +1 -1
  39. package/speechflow-cli/dst/speechflow-node-t2t-spellcheck.js +1 -2
  40. package/speechflow-cli/dst/speechflow-node-t2t-spellcheck.js.map +1 -1
  41. package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js +2 -2
  42. package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js.map +1 -1
  43. package/speechflow-cli/dst/speechflow-node-t2t-summary.js +3 -4
  44. package/speechflow-cli/dst/speechflow-node-t2t-summary.js.map +1 -1
  45. package/speechflow-cli/dst/speechflow-node-t2t-translate.js +1 -2
  46. package/speechflow-cli/dst/speechflow-node-t2t-translate.js.map +1 -1
  47. package/speechflow-cli/dst/speechflow-node-x2x-filter.js.map +1 -1
  48. package/speechflow-cli/dst/speechflow-node-xio-exec.js +2 -2
  49. package/speechflow-cli/dst/speechflow-node-xio-exec.js.map +1 -1
  50. package/speechflow-cli/dst/speechflow-node-xio-file.js +2 -2
  51. package/speechflow-cli/dst/speechflow-node-xio-file.js.map +1 -1
  52. package/speechflow-cli/dst/speechflow-node-xio-vban.js.map +1 -1
  53. package/speechflow-cli/dst/speechflow-node-xio-webrtc.js +1 -1
  54. package/speechflow-cli/dst/speechflow-util-audio.d.ts +1 -0
  55. package/speechflow-cli/dst/speechflow-util-audio.js +10 -3
  56. package/speechflow-cli/dst/speechflow-util-audio.js.map +1 -1
  57. package/speechflow-cli/dst/speechflow-util-llm.d.ts +0 -1
  58. package/speechflow-cli/dst/speechflow-util-llm.js +4 -8
  59. package/speechflow-cli/dst/speechflow-util-llm.js.map +1 -1
  60. package/speechflow-cli/dst/speechflow-util-queue.js.map +1 -1
  61. package/speechflow-cli/dst/speechflow-util-stream.js +4 -5
  62. package/speechflow-cli/dst/speechflow-util-stream.js.map +1 -1
  63. package/speechflow-cli/etc/eslint.mjs +1 -3
  64. package/speechflow-cli/etc/oxlint.jsonc +4 -1
  65. package/speechflow-cli/etc/stx.conf +0 -1
  66. package/speechflow-cli/package.json +16 -19
  67. package/speechflow-cli/src/lib.d.ts +5 -1
  68. package/speechflow-cli/src/speechflow-main-api.ts +4 -4
  69. package/speechflow-cli/src/speechflow-main-cli.ts +1 -1
  70. package/speechflow-cli/src/speechflow-main-graph.ts +16 -16
  71. package/speechflow-cli/src/speechflow-main-nodes.ts +1 -1
  72. package/speechflow-cli/src/speechflow-main-status.ts +2 -2
  73. package/speechflow-cli/src/speechflow-main.ts +1 -1
  74. package/speechflow-cli/src/speechflow-node-a2a-compressor-wt.ts +3 -3
  75. package/speechflow-cli/src/speechflow-node-a2a-compressor.ts +6 -6
  76. package/speechflow-cli/src/speechflow-node-a2a-expander-wt.ts +2 -2
  77. package/speechflow-cli/src/speechflow-node-a2a-filler.ts +4 -4
  78. package/speechflow-cli/src/speechflow-node-a2a-gender.ts +1 -1
  79. package/speechflow-cli/src/speechflow-node-a2a-mute.ts +2 -2
  80. package/speechflow-cli/src/speechflow-node-a2a-pitch.ts +1 -1
  81. package/speechflow-cli/src/speechflow-node-a2a-rnnoise-wt.ts +2 -2
  82. package/speechflow-cli/src/speechflow-node-a2a-rnnoise.ts +1 -1
  83. package/speechflow-cli/src/speechflow-node-a2t-amazon.ts +2 -2
  84. package/speechflow-cli/src/speechflow-node-a2t-google.ts +8 -8
  85. package/speechflow-cli/src/speechflow-node-t2a-elevenlabs.ts +9 -9
  86. package/speechflow-cli/src/speechflow-node-t2a-google.ts +3 -3
  87. package/speechflow-cli/src/speechflow-node-t2a-supertonic.ts +103 -577
  88. package/speechflow-cli/src/speechflow-node-t2t-amazon.ts +4 -4
  89. package/speechflow-cli/src/speechflow-node-t2t-deepl.ts +1 -1
  90. package/speechflow-cli/src/speechflow-node-t2t-format.ts +1 -1
  91. package/speechflow-cli/src/speechflow-node-t2t-google.ts +2 -2
  92. package/speechflow-cli/src/speechflow-node-t2t-modify.ts +2 -2
  93. package/speechflow-cli/src/speechflow-node-t2t-opus.ts +1 -1
  94. package/speechflow-cli/src/speechflow-node-t2t-punctuation.ts +1 -2
  95. package/speechflow-cli/src/speechflow-node-t2t-spellcheck.ts +1 -2
  96. package/speechflow-cli/src/speechflow-node-t2t-subtitle.ts +2 -2
  97. package/speechflow-cli/src/speechflow-node-t2t-summary.ts +3 -4
  98. package/speechflow-cli/src/speechflow-node-t2t-translate.ts +1 -2
  99. package/speechflow-cli/src/speechflow-node-x2x-filter.ts +4 -4
  100. package/speechflow-cli/src/speechflow-node-xio-exec.ts +2 -2
  101. package/speechflow-cli/src/speechflow-node-xio-file.ts +2 -2
  102. package/speechflow-cli/src/speechflow-node-xio-vban.ts +4 -2
  103. package/speechflow-cli/src/speechflow-node-xio-webrtc.ts +1 -1
  104. package/speechflow-cli/src/speechflow-util-audio.ts +11 -3
  105. package/speechflow-cli/src/speechflow-util-llm.ts +4 -9
  106. package/speechflow-cli/src/speechflow-util-queue.ts +1 -1
  107. package/speechflow-cli/src/speechflow-util-stream.ts +4 -5
  108. package/speechflow-ui-db/dst/index.js +13 -13
  109. package/speechflow-ui-db/etc/oxlint.jsonc +137 -0
  110. package/speechflow-ui-db/etc/stx.conf +4 -3
  111. package/speechflow-ui-db/package.json +9 -6
  112. package/speechflow-ui-st/dst/index.js +27 -27
  113. package/speechflow-ui-st/etc/oxlint.jsonc +137 -0
  114. package/speechflow-ui-st/etc/stx.conf +4 -3
  115. package/speechflow-ui-st/package.json +9 -6
  116. package/speechflow-cli/etc/biome.jsonc +0 -46
  117. package/speechflow-ui-db/src/lib.d.ts +0 -9
  118. package/speechflow-ui-st/src/lib.d.ts +0 -9
@@ -22,7 +22,7 @@
22
22
  "@gpeng/naudiodon": "2.4.1",
23
23
  "execa": "9.6.1",
24
24
  "shell-parser": "1.0.0",
25
- "@deepgram/sdk": "4.11.2",
25
+ "@deepgram/sdk": "4.11.3",
26
26
  "deepl-node": "1.24.0",
27
27
  "@elevenlabs/elevenlabs-js": "2.28.0",
28
28
  "get-stream": "9.0.1",
@@ -30,9 +30,9 @@
30
30
  "speex-resampler": "3.0.1",
31
31
  "@sapphi-red/speex-preprocess-wasm": "0.4.0",
32
32
  "@shiguredo/rnnoise-wasm": "2025.1.5",
33
- "@aws-sdk/client-transcribe-streaming": "3.952.0",
34
- "@aws-sdk/client-translate": "3.952.0",
35
- "@aws-sdk/client-polly": "3.952.0",
33
+ "@aws-sdk/client-transcribe-streaming": "3.958.0",
34
+ "@aws-sdk/client-translate": "3.958.0",
35
+ "@aws-sdk/client-polly": "3.958.0",
36
36
  "@google-cloud/translate": "9.3.0",
37
37
  "@google-cloud/speech": "7.2.1",
38
38
  "@google-cloud/text-to-speech": "6.4.0",
@@ -40,8 +40,8 @@
40
40
  "object-path": "0.11.8",
41
41
  "glob": "13.0.0",
42
42
  "ws": "8.18.3",
43
- "bufferutil": "4.0.9",
44
- "utf-8-validate": "6.0.5",
43
+ "bufferutil": "4.1.0",
44
+ "utf-8-validate": "6.0.6",
45
45
  "@hapi/hapi": "21.4.4",
46
46
  "@hapi/boom": "10.0.1",
47
47
  "@hapi/inert": "7.1.0",
@@ -49,9 +49,9 @@
49
49
  "hapi-plugin-websocket": "2.4.11",
50
50
  "@opensumi/reconnecting-websocket": "4.4.0",
51
51
  "ollama": "0.6.3",
52
- "openai": "6.13.0",
52
+ "openai": "6.15.0",
53
53
  "@anthropic-ai/sdk": "0.71.2",
54
- "@google/genai": "1.33.0",
54
+ "@google/genai": "1.34.0",
55
55
  "@rse/ffmpeg": "1.4.2",
56
56
  "ffmpeg-stream": "1.0.1",
57
57
  "installed-packages": "1.0.13",
@@ -61,7 +61,7 @@
61
61
  "vban": "1.5.6",
62
62
  "cbor2": "2.0.1",
63
63
  "arktype": "2.1.29",
64
- "pure-uuid": "1.8.1",
64
+ "pure-uuid": "2.0.0",
65
65
  "wavefile": "11.0.0",
66
66
  "audio-inspect": "0.0.4",
67
67
  "@huggingface/transformers": "3.8.1",
@@ -79,7 +79,7 @@
79
79
  "@soundtouchjs/audio-worklet": "0.2.1",
80
80
  "werift": "0.22.2",
81
81
  "@discordjs/opus": "0.10.0",
82
- "@rse/stx": "1.1.2"
82
+ "@rse/stx": "1.1.4"
83
83
  },
84
84
  "devDependencies": {
85
85
  "eslint": "9.39.2",
@@ -88,13 +88,11 @@
88
88
  "eslint-plugin-promise": "7.2.1",
89
89
  "eslint-plugin-import": "2.32.0",
90
90
  "eslint-plugin-node": "11.1.0",
91
- "typescript-eslint": "8.50.0",
92
- "@typescript-eslint/eslint-plugin": "8.50.0",
93
- "@typescript-eslint/parser": "8.50.0",
94
- "oxlint": "1.33.0",
95
- "eslint-plugin-oxlint": "1.33.0",
96
- "@biomejs/biome": "2.0.6",
97
- "eslint-config-biome": "2.1.3",
91
+ "typescript-eslint": "8.50.1",
92
+ "@typescript-eslint/eslint-plugin": "8.50.1",
93
+ "@typescript-eslint/parser": "8.50.1",
94
+ "oxlint": "1.35.0",
95
+ "eslint-plugin-oxlint": "1.35.0",
98
96
 
99
97
  "@types/node": "24.10.1",
100
98
  "@types/yargs": "17.0.35",
@@ -117,10 +115,9 @@
117
115
  "cross-env": "10.1.0"
118
116
  },
119
117
  "overrides": {
120
- "@huggingface/transformers": { "onnxruntime-node": "1.23.2" }
118
+ "@huggingface/transformers": { "onnxruntime-node": "1.22.0" }
121
119
  },
122
120
  "upd": [
123
- "!@biomejs/biome",
124
121
  "!onnxruntime-node",
125
122
  "!@types/node"
126
123
  ],
@@ -4,6 +4,8 @@
4
4
  ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
5
5
  */
6
6
 
7
+ /* eslint-disable no-unused-vars */
8
+
7
9
  /* type definitions for Stream.compose */
8
10
  declare module "node:stream" {
9
11
  import { Stream, Duplex } from "node:stream"
@@ -30,7 +32,7 @@ declare interface AudioParamDescriptor {
30
32
  maxValue?: number
31
33
  automationRate?: "a-rate" | "k-rate"
32
34
  }
33
- declare function registerProcessor(
35
+ declare function registerProcessor (
34
36
  name: string,
35
37
  processorCtor: new (...args: any[]) => AudioWorkletProcessor
36
38
  ): void
@@ -40,3 +42,5 @@ declare module "shell-parser" {
40
42
  export default function shellParser (command: string): string[]
41
43
  }
42
44
 
45
+ /* eslint-enable no-unused-vars */
46
+
@@ -44,7 +44,7 @@ export class APIServer {
44
44
  ) {}
45
45
 
46
46
  /* start API server service */
47
- async start(args: CLIOptions, graph: NodeGraph): Promise<void> {
47
+ async start (args: CLIOptions, graph: NodeGraph): Promise<void> {
48
48
  /* define external request/response structure */
49
49
  const requestValidator = arktype.type({
50
50
  request: "string",
@@ -67,7 +67,7 @@ export class APIServer {
67
67
  throw new Error(`external request failed: no such node <${name}>`)
68
68
  }
69
69
  else {
70
- await Promise.race<void>([
70
+ await Promise.race([
71
71
  foundNode.receiveRequest(argList),
72
72
  util.timeout(10 * 1000)
73
73
  ]).catch((err: Error) => {
@@ -253,7 +253,7 @@ export class APIServer {
253
253
  peerInfo.ws.send(data)
254
254
  }
255
255
  for (const n of graph.getGraphNodes()) {
256
- Promise.race<void>([
256
+ Promise.race([
257
257
  n.receiveDashboard(info.type, info.id, info.kind, info.value),
258
258
  util.timeout(10 * 1000)
259
259
  ]).catch((err: Error) => {
@@ -267,7 +267,7 @@ export class APIServer {
267
267
  }
268
268
 
269
269
  /* stop API server service */
270
- async stop(args: CLIOptions): Promise<void> {
270
+ async stop (args: CLIOptions): Promise<void> {
271
271
  /* shutdown HAPI service */
272
272
  if (this.hapi) {
273
273
  this.cli.log("info", `HAPI: stopping REST/WebSocket network service: http://${args.a}:${args.p}`)
@@ -257,4 +257,4 @@ export class CLIContext {
257
257
  }
258
258
  process.exit(1)
259
259
  }
260
- }
260
+ }
@@ -56,8 +56,8 @@ export class NodeGraph {
56
56
  ): Promise<void> {
57
57
  /* internal helper for extracting error messages */
58
58
  const flowlinkErrorMsg = (err: unknown): string =>
59
- err instanceof Error && err.name === "FlowLinkError"
60
- ? err.toString() : (err instanceof Error ? err.message : "internal error")
59
+ err instanceof Error && err.name === "FlowLinkError" ?
60
+ err.toString() : (err instanceof Error ? err.message : "internal error")
61
61
 
62
62
  /* instantiate FlowLink parser */
63
63
  const flowlink = new FlowLink<SpeechFlowNode>({
@@ -157,7 +157,7 @@ export class NodeGraph {
157
157
  }
158
158
 
159
159
  /* graph establishment: PASS 3: open nodes */
160
- async openNodes(): Promise<void> {
160
+ async openNodes (): Promise<void> {
161
161
  this.timeZero = DateTime.now()
162
162
  for (const node of this.graphNodes) {
163
163
  /* connect node events */
@@ -171,7 +171,7 @@ export class NodeGraph {
171
171
  /* open node */
172
172
  this.cli.log("info", `open node <${node.id}>`)
173
173
  node.setTimeZero(this.timeZero)
174
- await Promise.race<void>([
174
+ await Promise.race([
175
175
  node.open(),
176
176
  util.timeout(30 * 1000)
177
177
  ]).catch((err: Error) => {
@@ -182,7 +182,7 @@ export class NodeGraph {
182
182
  }
183
183
 
184
184
  /* graph establishment: PASS 4: connect node streams */
185
- async connectStreams() {
185
+ async connectStreams () {
186
186
  for (const node of this.graphNodes) {
187
187
  if (node.stream === null)
188
188
  throw new Error(`stream of node <${node.id}> still not initialized`)
@@ -202,7 +202,7 @@ export class NodeGraph {
202
202
  }
203
203
 
204
204
  /* graph establishment: PASS 5: track stream finishing */
205
- trackFinishing(args: CLIOptions, api: APIServer): void {
205
+ trackFinishing (args: CLIOptions, api: APIServer): void {
206
206
  this.finishEvents.removeAllListeners()
207
207
  this.finishEvents.setMaxListeners(this.graphNodes.size + 10)
208
208
  for (const node of this.graphNodes) {
@@ -236,15 +236,15 @@ export class NodeGraph {
236
236
  }
237
237
 
238
238
  /* graph destruction: PASS 1: end node streams */
239
- async endStreams(): Promise<void> {
239
+ async endStreams (): Promise<void> {
240
240
  /* end all writable streams and wait for them to drain */
241
241
  const drainPromises: Promise<void>[] = []
242
242
  for (const node of this.graphNodes) {
243
243
  if (node.stream === null)
244
244
  continue
245
245
  const stream = node.stream
246
- if ((stream instanceof Stream.Writable || stream instanceof Stream.Duplex) &&
247
- (!stream.writableEnded && !stream.destroyed)) {
246
+ if ((stream instanceof Stream.Writable || stream instanceof Stream.Duplex)
247
+ && (!stream.writableEnded && !stream.destroyed)) {
248
248
  drainPromises.push(
249
249
  Promise.race([
250
250
  new Promise<void>((resolve) => {
@@ -261,7 +261,7 @@ export class NodeGraph {
261
261
  }
262
262
 
263
263
  /* graph destruction: PASS 2: disconnect node streams */
264
- async disconnectStreams(): Promise<void> {
264
+ async disconnectStreams (): Promise<void> {
265
265
  for (const node of this.graphNodes) {
266
266
  if (node.stream === null) {
267
267
  this.cli.log("warning", `stream of node <${node.id}> no longer initialized`)
@@ -289,10 +289,10 @@ export class NodeGraph {
289
289
  }
290
290
 
291
291
  /* graph destruction: PASS 3: close nodes */
292
- async closeNodes(): Promise<void> {
292
+ async closeNodes (): Promise<void> {
293
293
  for (const node of this.graphNodes) {
294
294
  this.cli.log("info", `close node <${node.id}>`)
295
- await Promise.race<void>([
295
+ await Promise.race([
296
296
  node.close(),
297
297
  util.timeout(10 * 1000)
298
298
  ]).catch((err: Error) => {
@@ -302,7 +302,7 @@ export class NodeGraph {
302
302
  }
303
303
 
304
304
  /* graph destruction: PASS 4: disconnect nodes */
305
- disconnectNodes(): void {
305
+ disconnectNodes (): void {
306
306
  for (const node of this.graphNodes) {
307
307
  this.cli.log("info", `disconnect node <${node.id}>`)
308
308
  const connectionsIn = Array.from(node.connectionsIn)
@@ -313,7 +313,7 @@ export class NodeGraph {
313
313
  }
314
314
 
315
315
  /* graph destruction: PASS 5: destroy nodes */
316
- destroyNodes(): void {
316
+ destroyNodes (): void {
317
317
  for (const node of this.graphNodes) {
318
318
  this.cli.log("info", `destroy node <${node.id}>`)
319
319
  this.graphNodes.delete(node)
@@ -321,7 +321,7 @@ export class NodeGraph {
321
321
  }
322
322
 
323
323
  /* setup signal handling for shutdown */
324
- setupSignalHandlers(args: CLIOptions, api: APIServer): void {
324
+ setupSignalHandlers (args: CLIOptions, api: APIServer): void {
325
325
  /* internal helper functions */
326
326
  const shutdownHandler = (signal: string) =>
327
327
  this.shutdown(signal, args, api)
@@ -356,7 +356,7 @@ export class NodeGraph {
356
356
  }
357
357
 
358
358
  /* shutdown procedure */
359
- async shutdown(signal: string, args: CLIOptions, api: APIServer): Promise<void> {
359
+ async shutdown (signal: string, args: CLIOptions, api: APIServer): Promise<void> {
360
360
  if (this.shuttingDown)
361
361
  return
362
362
  this.shuttingDown = true
@@ -59,4 +59,4 @@ export class NodeRegistry {
59
59
  }
60
60
  }
61
61
  }
62
- }
62
+ }
@@ -24,7 +24,7 @@ export class NodeStatusManager {
24
24
  ) {}
25
25
 
26
26
  /* gather and show status of all nodes */
27
- async showNodeStatus(
27
+ async showNodeStatus (
28
28
  nodes: { [ id: string ]: typeof SpeechFlowNode },
29
29
  cfg: NodeConfig,
30
30
  accessBus: (name: string) => EventEmitter
@@ -63,4 +63,4 @@ export class NodeStatusManager {
63
63
  const output = table.toString()
64
64
  process.stdout.write(output + "\n")
65
65
  }
66
- }
66
+ }
@@ -84,7 +84,7 @@ export default class Main {
84
84
  process.exit(0)
85
85
  }
86
86
 
87
- /* global library initialization */
87
+ /* initialize global libraries */
88
88
  await SpeexResampler.initPromise
89
89
 
90
90
  /* initialize graph processor */
@@ -15,7 +15,7 @@ class CompressorProcessor extends AudioWorkletProcessor {
15
15
  public reduction = 0
16
16
 
17
17
  /* eslint no-undef: off */
18
- static get parameterDescriptors(): AudioParamDescriptor[] {
18
+ static get parameterDescriptors (): AudioParamDescriptor[] {
19
19
  return [
20
20
  { name: "threshold", defaultValue: -23, minValue: -100, maxValue: 0, automationRate: "k-rate" }, // dBFS
21
21
  { name: "ratio", defaultValue: 4.0, minValue: 1.0, maxValue: 20, automationRate: "k-rate" }, // compression ratio
@@ -63,7 +63,7 @@ class CompressorProcessor extends AudioWorkletProcessor {
63
63
  }
64
64
 
65
65
  /* process a single sample frame */
66
- process(
66
+ process (
67
67
  inputs: Float32Array[][],
68
68
  outputs: Float32Array[][],
69
69
  parameters: Record<string, Float32Array>
@@ -125,4 +125,4 @@ class CompressorProcessor extends AudioWorkletProcessor {
125
125
  }
126
126
 
127
127
  /* register the new audio nodes */
128
- registerProcessor("compressor", CompressorProcessor)
128
+ registerProcessor("compressor", CompressorProcessor)
@@ -69,10 +69,10 @@ class AudioCompressor extends util.WebAudio {
69
69
  await this.audioContext.audioWorklet.addModule(url)
70
70
 
71
71
  /* determine operation modes */
72
- const needsCompressor = (this.type === "standalone" && this.mode === "compress") ||
73
- (this.type === "sidechain" && this.mode === "measure")
74
- const needsGain = (this.type === "standalone" && this.mode === "compress") ||
75
- (this.type === "sidechain" && this.mode === "adjust")
72
+ const needsCompressor = (this.type === "standalone" && this.mode === "compress")
73
+ || (this.type === "sidechain" && this.mode === "measure")
74
+ const needsGain = (this.type === "standalone" && this.mode === "compress")
75
+ || (this.type === "sidechain" && this.mode === "adjust")
76
76
 
77
77
  /* create compressor worklet node */
78
78
  if (needsCompressor) {
@@ -250,8 +250,8 @@ export default class SpeechFlowNodeA2ACompressor extends SpeechFlowNode {
250
250
  callback(new Error("stream already destroyed"))
251
251
  return
252
252
  }
253
- if ((self.params.type === "standalone" && self.params.mode === "compress") ||
254
- (self.params.type === "sidechain" && self.params.mode === "adjust") ) {
253
+ if ((self.params.type === "standalone" && self.params.mode === "compress")
254
+ || (self.params.type === "sidechain" && self.params.mode === "adjust")) {
255
255
  /* take over compressed data */
256
256
  const payload = util.convertI16ToBuf(result)
257
257
  chunk.payload = payload
@@ -14,7 +14,7 @@ class ExpanderProcessor extends AudioWorkletProcessor {
14
14
  private sampleRate: number
15
15
 
16
16
  /* eslint no-undef: off */
17
- static get parameterDescriptors(): AudioParamDescriptor[] {
17
+ static get parameterDescriptors (): AudioParamDescriptor[] {
18
18
  return [
19
19
  { name: "threshold", defaultValue: -45, minValue: -100, maxValue: 0, automationRate: "k-rate" }, // dBFS
20
20
  { name: "floor", defaultValue: -64, minValue: -100, maxValue: 0, automationRate: "k-rate" }, // dBFS minimum output level
@@ -63,7 +63,7 @@ class ExpanderProcessor extends AudioWorkletProcessor {
63
63
  }
64
64
 
65
65
  /* process a single sample frame */
66
- process(
66
+ process (
67
67
  inputs: Float32Array[][],
68
68
  outputs: Float32Array[][],
69
69
  parameters: Record<string, Float32Array>
@@ -29,20 +29,20 @@ class AudioFiller extends EventEmitter {
29
29
  }
30
30
 
31
31
  /* optional helper to allow subscribing with strong typing */
32
- public on(event: "chunk", listener: (chunk: SpeechFlowChunk, type: string) => void): this
33
- public on(event: string, listener: (...args: any[]) => void): this {
32
+ public on (event: "chunk", listener: (chunk: SpeechFlowChunk, type: string) => void): this
33
+ public on (event: string, listener: (...args: any[]) => void): this {
34
34
  return super.on(event, listener)
35
35
  }
36
36
 
37
37
  /* convert fractional samples from duration */
38
- private samplesFromDuration(duration: Duration): number {
38
+ private samplesFromDuration (duration: Duration): number {
39
39
  const seconds = duration.as("seconds")
40
40
  const samples = seconds * this.sampleRate
41
41
  return samples
42
42
  }
43
43
 
44
44
  /* convert duration to fractional samples */
45
- private durationFromSamples(samples: number): Duration {
45
+ private durationFromSamples (samples: number): Duration {
46
46
  const seconds = samples / this.sampleRate
47
47
  return Duration.fromObject({ seconds })
48
48
  }
@@ -376,4 +376,4 @@ export default class SpeechFlowNodeA2AGender extends SpeechFlowNode {
376
376
  this.queue.pointerDelete("ac")
377
377
  this.queue.pointerDelete("send")
378
378
  }
379
- }
379
+ }
@@ -44,8 +44,8 @@ export default class SpeechFlowNodeA2AMute extends SpeechFlowNode {
44
44
  throw new Error("mute: node already destroyed")
45
45
  try {
46
46
  if (params.length === 2 && params[0] === "mode") {
47
- if (typeof params[1] !== "string" ||
48
- !params[1].match(/^(?:none|silenced|unplugged)$/))
47
+ if (typeof params[1] !== "string"
48
+ || !params[1].match(/^(?:none|silenced|unplugged)$/))
49
49
  throw new Error("mute: invalid mode argument in external request")
50
50
  const muteMode = params[1] as MuteMode
51
51
  this.setMuteMode(muteMode)
@@ -219,4 +219,4 @@ export default class SpeechFlowNodeA2APitch extends SpeechFlowNode {
219
219
  this.stream = null
220
220
  }
221
221
  }
222
- }
222
+ }
@@ -14,7 +14,7 @@ import { type DenoiseState, Rnnoise } from "@shiguredo/rnnoise-wasm"
14
14
  let rnnoise: Rnnoise
15
15
  let denoiseState: DenoiseState
16
16
 
17
- /* global initialization */
17
+ /* initialize globals */
18
18
  ;(async () => {
19
19
  try {
20
20
  rnnoise = await Rnnoise.load()
@@ -60,4 +60,4 @@ parentPort!.on("message", (msg) => {
60
60
  process.exit(0)
61
61
  }
62
62
  }
63
- })
63
+ })
@@ -63,7 +63,7 @@ export default class SpeechFlowNodeA2ARNNoise extends SpeechFlowNode {
63
63
  else if (typeof msg === "object" && msg !== null && msg.type === "failed")
64
64
  reject(new Error(msg.message ?? "RNNoise worker thread initialization failed"))
65
65
  else
66
- reject(new Error(`RNNoise worker thread sent unexpected message on startup`))
66
+ reject(new Error("RNNoise worker thread sent unexpected message on startup"))
67
67
  })
68
68
  this.worker!.once("error", (err) => {
69
69
  clearTimeout(timeout)
@@ -43,7 +43,7 @@ class AsyncQueue<T> {
43
43
  }
44
44
  this.queue.length = 0
45
45
  }
46
- async *[Symbol.asyncIterator](): AsyncIterator<T> {
46
+ async * [Symbol.asyncIterator] (): AsyncIterator<T> {
47
47
  while (true) {
48
48
  if (this.queue.length > 0) {
49
49
  const v = this.queue.shift()
@@ -128,7 +128,7 @@ export default class SpeechFlowNodeA2TAmazon extends SpeechFlowNode {
128
128
 
129
129
  /* create an AudioStream for Amazon Transcribe */
130
130
  const audioQueue = new AsyncQueue<Uint8Array>()
131
- const audioStream = (async function *(q: AsyncQueue<Uint8Array>): AsyncIterable<AudioStream> {
131
+ const audioStream = (async function * (q: AsyncQueue<Uint8Array>): AsyncIterable<AudioStream> {
132
132
  for await (const chunk of q) {
133
133
  yield { AudioEvent: { AudioChunk: chunk } }
134
134
  }
@@ -125,18 +125,18 @@ export default class SpeechFlowNodeA2TGoogle extends SpeechFlowNode {
125
125
  const words: { word: string, start: Duration, end: Duration }[] = []
126
126
  if (alternative.words && alternative.words.length > 0) {
127
127
  for (const wordInfo of alternative.words) {
128
- const wordStart = wordInfo.startTime
129
- ? Duration.fromMillis(
128
+ const wordStart = wordInfo.startTime ?
129
+ Duration.fromMillis(
130
130
  (Number(wordInfo.startTime.seconds ?? 0) * 1000) +
131
131
  (Number(wordInfo.startTime.nanos ?? 0) / 1000000)
132
- ).plus(this.timeZeroOffset)
133
- : Duration.fromMillis(0)
134
- const wordEnd = wordInfo.endTime
135
- ? Duration.fromMillis(
132
+ ).plus(this.timeZeroOffset) :
133
+ Duration.fromMillis(0)
134
+ const wordEnd = wordInfo.endTime ?
135
+ Duration.fromMillis(
136
136
  (Number(wordInfo.endTime.seconds ?? 0) * 1000) +
137
137
  (Number(wordInfo.endTime.nanos ?? 0) / 1000000)
138
- ).plus(this.timeZeroOffset)
139
- : Duration.fromMillis(0)
138
+ ).plus(this.timeZeroOffset) :
139
+ Duration.fromMillis(0)
140
140
  words.push({
141
141
  word: wordInfo.word ?? "",
142
142
  start: wordStart,
@@ -56,9 +56,9 @@ export default class SpeechFlowNodeT2AElevenlabs extends SpeechFlowNode {
56
56
  try {
57
57
  const elevenlabs = new ElevenLabs.ElevenLabsClient({ apiKey: this.params.key })
58
58
  const subscription = await elevenlabs.user.subscription.get()
59
- const percent = subscription.characterLimit > 0
60
- ? subscription.characterCount / subscription.characterLimit
61
- : 0
59
+ const percent = subscription.characterLimit > 0 ?
60
+ subscription.characterCount / subscription.characterLimit :
61
+ 0
62
62
  return { usage: `${percent.toFixed(2)}%` }
63
63
  }
64
64
  catch (_error) {
@@ -103,15 +103,15 @@ export default class SpeechFlowNodeT2AElevenlabs extends SpeechFlowNode {
103
103
  throw new Error(`invalid ElevenLabs voice "${this.params.voice}"`)
104
104
  }
105
105
  const labels = voice.labels ?? {}
106
- const info = Object.keys(labels).length > 0
107
- ? ", " + Object.entries(labels).map(([ key, val ]) => `${key}: "${val}"`).join(", ")
108
- : ""
106
+ const info = Object.keys(labels).length > 0 ?
107
+ ", " + Object.entries(labels).map(([ key, val ]) => `${key}: "${val}"`).join(", ") :
108
+ ""
109
109
  this.log("info", `selected voice: name: "${voice.name}"${info}`)
110
110
 
111
111
  /* perform text-to-speech operation with Elevenlabs API */
112
- const model = this.params.optimize === "quality"
113
- ? "eleven_turbo_v2_5"
114
- : "eleven_flash_v2_5"
112
+ const model = this.params.optimize === "quality" ?
113
+ "eleven_turbo_v2_5" :
114
+ "eleven_flash_v2_5"
115
115
  const speechStream = (text: string) => {
116
116
  this.log("info", `ElevenLabs: send text "${text}"`)
117
117
  return this.elevenlabs!.textToSpeech.convert(voice.voiceId, {
@@ -103,9 +103,9 @@ export default class SpeechFlowNodeT2AGoogle extends SpeechFlowNode {
103
103
  throw new Error("no audio content returned from Google TTS")
104
104
 
105
105
  /* convert response to buffer */
106
- const buffer = Buffer.isBuffer(response.audioContent)
107
- ? response.audioContent
108
- : Buffer.from(response.audioContent)
106
+ const buffer = Buffer.isBuffer(response.audioContent) ?
107
+ response.audioContent :
108
+ Buffer.from(response.audioContent)
109
109
  this.log("info", `Google TTS: received audio (buffer length: ${buffer.byteLength})`)
110
110
 
111
111
  /* resample from Google's sample rate to our standard rate */