speechflow 1.7.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/README.md +387 -119
- package/etc/claude.md +5 -5
- package/etc/speechflow.yaml +2 -2
- package/package.json +3 -3
- package/speechflow-cli/dst/speechflow-main-graph.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-main-graph.js +28 -5
- package/speechflow-cli/dst/speechflow-main-graph.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2a-wav.js +24 -4
- package/speechflow-cli/dst/speechflow-node-a2a-wav.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-a2t-google.d.ts +17 -0
- package/speechflow-cli/dst/speechflow-node-a2t-google.js +320 -0
- package/speechflow-cli/dst/speechflow-node-a2t-google.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2a-google.d.ts +15 -0
- package/speechflow-cli/dst/speechflow-node-t2a-google.js +218 -0
- package/speechflow-cli/dst/speechflow-node-t2a-google.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2a-openai.d.ts +15 -0
- package/speechflow-cli/dst/speechflow-node-t2a-openai.js +195 -0
- package/speechflow-cli/dst/speechflow-node-t2a-openai.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2a-supertonic.d.ts +17 -0
- package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js +608 -0
- package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2t-amazon.js.map +1 -1
- package/speechflow-cli/dst/{speechflow-node-t2t-transformers.d.ts → speechflow-node-t2t-opus.d.ts} +1 -3
- package/speechflow-cli/dst/speechflow-node-t2t-opus.js +159 -0
- package/speechflow-cli/dst/speechflow-node-t2t-opus.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2t-profanity.d.ts +11 -0
- package/speechflow-cli/dst/speechflow-node-t2t-profanity.js +118 -0
- package/speechflow-cli/dst/speechflow-node-t2t-profanity.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2t-punctuation.d.ts +13 -0
- package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js +220 -0
- package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js.map +1 -0
- package/speechflow-cli/dst/{speechflow-node-t2t-openai.d.ts → speechflow-node-t2t-spellcheck.d.ts} +2 -2
- package/speechflow-cli/dst/{speechflow-node-t2t-openai.js → speechflow-node-t2t-spellcheck.js} +47 -99
- package/speechflow-cli/dst/speechflow-node-t2t-spellcheck.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js +3 -6
- package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-t2t-summary.d.ts +16 -0
- package/speechflow-cli/dst/speechflow-node-t2t-summary.js +241 -0
- package/speechflow-cli/dst/speechflow-node-t2t-summary.js.map +1 -0
- package/speechflow-cli/dst/{speechflow-node-t2t-ollama.d.ts → speechflow-node-t2t-translate.d.ts} +2 -2
- package/speechflow-cli/dst/{speechflow-node-t2t-transformers.js → speechflow-node-t2t-translate.js} +53 -115
- package/speechflow-cli/dst/speechflow-node-t2t-translate.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-xio-exec.d.ts +12 -0
- package/speechflow-cli/dst/speechflow-node-xio-exec.js +223 -0
- package/speechflow-cli/dst/speechflow-node-xio-exec.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-xio-file.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-node-xio-file.js +79 -66
- package/speechflow-cli/dst/speechflow-node-xio-file.js.map +1 -1
- package/speechflow-cli/dst/speechflow-node-xio-vban.d.ts +17 -0
- package/speechflow-cli/dst/speechflow-node-xio-vban.js +330 -0
- package/speechflow-cli/dst/speechflow-node-xio-vban.js.map +1 -0
- package/speechflow-cli/dst/speechflow-node-xio-webrtc.d.ts +39 -0
- package/speechflow-cli/dst/speechflow-node-xio-webrtc.js +500 -0
- package/speechflow-cli/dst/speechflow-node-xio-webrtc.js.map +1 -0
- package/speechflow-cli/dst/speechflow-util-audio.js +4 -5
- package/speechflow-cli/dst/speechflow-util-audio.js.map +1 -1
- package/speechflow-cli/dst/speechflow-util-error.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-util-error.js +5 -0
- package/speechflow-cli/dst/speechflow-util-error.js.map +1 -1
- package/speechflow-cli/dst/speechflow-util-llm.d.ts +35 -0
- package/speechflow-cli/dst/speechflow-util-llm.js +363 -0
- package/speechflow-cli/dst/speechflow-util-llm.js.map +1 -0
- package/speechflow-cli/dst/speechflow-util.d.ts +1 -0
- package/speechflow-cli/dst/speechflow-util.js +1 -0
- package/speechflow-cli/dst/speechflow-util.js.map +1 -1
- package/speechflow-cli/etc/oxlint.jsonc +2 -1
- package/speechflow-cli/package.json +34 -17
- package/speechflow-cli/src/lib.d.ts +5 -0
- package/speechflow-cli/src/speechflow-main-graph.ts +31 -5
- package/speechflow-cli/src/speechflow-node-a2a-wav.ts +24 -4
- package/speechflow-cli/src/speechflow-node-a2t-google.ts +322 -0
- package/speechflow-cli/src/speechflow-node-t2a-google.ts +206 -0
- package/speechflow-cli/src/speechflow-node-t2a-openai.ts +179 -0
- package/speechflow-cli/src/speechflow-node-t2a-supertonic.ts +701 -0
- package/speechflow-cli/src/speechflow-node-t2t-amazon.ts +2 -1
- package/speechflow-cli/src/speechflow-node-t2t-opus.ts +136 -0
- package/speechflow-cli/src/speechflow-node-t2t-profanity.ts +93 -0
- package/speechflow-cli/src/speechflow-node-t2t-punctuation.ts +201 -0
- package/speechflow-cli/src/{speechflow-node-t2t-openai.ts → speechflow-node-t2t-spellcheck.ts} +48 -107
- package/speechflow-cli/src/speechflow-node-t2t-subtitle.ts +3 -6
- package/speechflow-cli/src/speechflow-node-t2t-summary.ts +229 -0
- package/speechflow-cli/src/speechflow-node-t2t-translate.ts +181 -0
- package/speechflow-cli/src/speechflow-node-xio-exec.ts +210 -0
- package/speechflow-cli/src/speechflow-node-xio-file.ts +92 -79
- package/speechflow-cli/src/speechflow-node-xio-vban.ts +325 -0
- package/speechflow-cli/src/speechflow-node-xio-webrtc.ts +533 -0
- package/speechflow-cli/src/speechflow-util-audio.ts +5 -5
- package/speechflow-cli/src/speechflow-util-error.ts +9 -0
- package/speechflow-cli/src/speechflow-util-llm.ts +367 -0
- package/speechflow-cli/src/speechflow-util.ts +1 -0
- package/speechflow-ui-db/package.json +9 -9
- package/speechflow-ui-st/package.json +9 -9
- package/speechflow-cli/dst/speechflow-node-t2t-ollama.js +0 -293
- package/speechflow-cli/dst/speechflow-node-t2t-ollama.js.map +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-openai.js.map +0 -1
- package/speechflow-cli/dst/speechflow-node-t2t-transformers.js.map +0 -1
- package/speechflow-cli/src/speechflow-node-t2t-ollama.ts +0 -281
- package/speechflow-cli/src/speechflow-node-t2t-transformers.ts +0 -247
|
@@ -0,0 +1,701 @@
|
|
|
1
|
+
/*
|
|
2
|
+
** SpeechFlow - Speech Processing Flow Graph
|
|
3
|
+
** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
|
|
4
|
+
** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/* standard dependencies */
|
|
8
|
+
import fs from "node:fs"
|
|
9
|
+
import path from "node:path"
|
|
10
|
+
import Stream from "node:stream"
|
|
11
|
+
|
|
12
|
+
/* external dependencies */
|
|
13
|
+
import { mkdirp } from "mkdirp"
|
|
14
|
+
import * as HF from "@huggingface/hub"
|
|
15
|
+
import SpeexResampler from "speex-resampler"
|
|
16
|
+
import { Duration } from "luxon"
|
|
17
|
+
|
|
18
|
+
/* @ts-expect-error no type available */
|
|
19
|
+
import * as ORT from "onnxruntime-node"
|
|
20
|
+
|
|
21
|
+
/* internal dependencies */
|
|
22
|
+
import SpeechFlowNode, { SpeechFlowChunk } from "./speechflow-node"
|
|
23
|
+
import * as util from "./speechflow-util"
|
|
24
|
+
|
|
25
|
+
/* ==== SUPERTONIC TTS IMPLEMENTATION ==== */
|
|
26
|
+
|
|
27
|
+
/* type for voice style tensors */
|
|
28
|
+
interface SupertonicStyle {
|
|
29
|
+
ttl: ORT.Tensor
|
|
30
|
+
dp: ORT.Tensor
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/* type for TTS configuration */
|
|
34
|
+
interface SupertonicConfig {
|
|
35
|
+
ae: {
|
|
36
|
+
sample_rate: number
|
|
37
|
+
base_chunk_size: number
|
|
38
|
+
chunk_compress_factor: number
|
|
39
|
+
}
|
|
40
|
+
ttl: {
|
|
41
|
+
latent_dim: number
|
|
42
|
+
chunk_compress_factor: number
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/* convert lengths to binary mask */
|
|
47
|
+
function lengthToMask (lengths: number[], maxLen: number | null = null): number[][][] {
|
|
48
|
+
/* handle empty input */
|
|
49
|
+
if (lengths.length === 0)
|
|
50
|
+
return []
|
|
51
|
+
|
|
52
|
+
/* determine maximum length */
|
|
53
|
+
maxLen = maxLen ?? Math.max(...lengths)
|
|
54
|
+
|
|
55
|
+
/* build mask array */
|
|
56
|
+
const mask: number[][][] = []
|
|
57
|
+
for (let i = 0; i < lengths.length; i++) {
|
|
58
|
+
const row: number[] = []
|
|
59
|
+
for (let j = 0; j < maxLen; j++)
|
|
60
|
+
row.push(j < lengths[i] ? 1.0 : 0.0)
|
|
61
|
+
mask.push([ row ])
|
|
62
|
+
}
|
|
63
|
+
return mask
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/* get latent mask from wav lengths */
|
|
67
|
+
function getLatentMask (wavLengths: number[], baseChunkSize: number, chunkCompressFactor: number): number[][][] {
|
|
68
|
+
/* calculate latent size and lengths */
|
|
69
|
+
const latentSize = baseChunkSize * chunkCompressFactor
|
|
70
|
+
const latentLengths = wavLengths.map((len) =>
|
|
71
|
+
Math.floor((len + latentSize - 1) / latentSize))
|
|
72
|
+
|
|
73
|
+
/* generate mask from latent lengths */
|
|
74
|
+
return lengthToMask(latentLengths)
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/* convert array to ONNX tensor */
|
|
78
|
+
function arrayToTensor (array: number[] | number[][] | number[][][], dims: number[]): ORT.Tensor {
|
|
79
|
+
/* flatten array and create float32 tensor */
|
|
80
|
+
const flat = array.flat(Infinity) as number[]
|
|
81
|
+
return new ORT.Tensor("float32", Float32Array.from(flat), dims)
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/* convert int array to ONNX tensor */
|
|
85
|
+
function intArrayToTensor (array: number[][], dims: number[]): ORT.Tensor {
|
|
86
|
+
/* flatten array and create int64 tensor */
|
|
87
|
+
const flat = array.flat(Infinity) as number[]
|
|
88
|
+
return new ORT.Tensor("int64", BigInt64Array.from(flat.map(BigInt)), dims)
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/* chunk text into manageable segments */
|
|
92
|
+
function chunkText (text: string, maxLen = 300): string[] {
|
|
93
|
+
/* validate input type */
|
|
94
|
+
if (typeof text !== "string")
|
|
95
|
+
throw new Error(`chunkText expects a string, got ${typeof text}`)
|
|
96
|
+
|
|
97
|
+
/* split by paragraph (two or more newlines) */
|
|
98
|
+
const paragraphs = text.trim().split(/\n\s*\n+/).filter((p) => p.trim())
|
|
99
|
+
|
|
100
|
+
/* process each paragraph into chunks */
|
|
101
|
+
const chunks: string[] = []
|
|
102
|
+
for (let paragraph of paragraphs) {
|
|
103
|
+
paragraph = paragraph.trim()
|
|
104
|
+
if (!paragraph)
|
|
105
|
+
continue
|
|
106
|
+
|
|
107
|
+
/* split by sentence boundaries (period, question mark, exclamation mark followed by space)
|
|
108
|
+
but exclude common abbreviations like Mr., Mrs., Dr., etc. and single capital letters like F. */
|
|
109
|
+
const sentences = paragraph.split(/(?<!Mr\.|Mrs\.|Ms\.|Dr\.|Prof\.|Sr\.|Jr\.|Ph\.D\.|etc\.|e\.g\.|i\.e\.|vs\.|Inc\.|Ltd\.|Co\.|Corp\.|St\.|Ave\.|Blvd\.)(?<!\b[A-Z]\.)(?<=[.!?])\s+/)
|
|
110
|
+
|
|
111
|
+
/* accumulate sentences into chunks respecting max length */
|
|
112
|
+
let currentChunk = ""
|
|
113
|
+
for (const sentence of sentences) {
|
|
114
|
+
if (currentChunk.length + sentence.length + 1 <= maxLen)
|
|
115
|
+
currentChunk += (currentChunk ? " " : "") + sentence
|
|
116
|
+
else {
|
|
117
|
+
if (currentChunk)
|
|
118
|
+
chunks.push(currentChunk.trim())
|
|
119
|
+
currentChunk = sentence
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/* push remaining chunk */
|
|
124
|
+
if (currentChunk)
|
|
125
|
+
chunks.push(currentChunk.trim())
|
|
126
|
+
}
|
|
127
|
+
return chunks
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/* unicode text processor class */
|
|
131
|
+
class SupertonicTextProcessor {
|
|
132
|
+
private indexer: Record<number, number>
|
|
133
|
+
|
|
134
|
+
constructor (unicodeIndexerJsonPath: string) {
|
|
135
|
+
/* load and parse unicode indexer JSON */
|
|
136
|
+
try {
|
|
137
|
+
this.indexer = JSON.parse(fs.readFileSync(unicodeIndexerJsonPath, "utf8"))
|
|
138
|
+
}
|
|
139
|
+
catch (err) {
|
|
140
|
+
throw new Error(`failed to parse unicode indexer JSON "${unicodeIndexerJsonPath}"`, { cause: err })
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
private preprocessText (text: string): string {
|
|
145
|
+
/* normalize text */
|
|
146
|
+
text = text.normalize("NFKD")
|
|
147
|
+
|
|
148
|
+
/* remove emojis (wide Unicode range) */
|
|
149
|
+
const emojiPattern = /[\u{1F600}-\u{1F64F}\u{1F300}-\u{1F5FF}\u{1F680}-\u{1F6FF}\u{1F700}-\u{1F77F}\u{1F780}-\u{1F7FF}\u{1F800}-\u{1F8FF}\u{1F900}-\u{1F9FF}\u{1FA00}-\u{1FA6F}\u{1FA70}-\u{1FAFF}\u{2600}-\u{26FF}\u{2700}-\u{27BF}\u{1F1E6}-\u{1F1FF}]+/gu
|
|
150
|
+
text = text.replace(emojiPattern, "")
|
|
151
|
+
|
|
152
|
+
/* replace various dashes and symbols */
|
|
153
|
+
const replacements: Record<string, string> = {
|
|
154
|
+
"–": "-",
|
|
155
|
+
"‑": "-",
|
|
156
|
+
"—": "-",
|
|
157
|
+
"¯": " ",
|
|
158
|
+
"_": " ",
|
|
159
|
+
"\u201C": "\"",
|
|
160
|
+
"\u201D": "\"",
|
|
161
|
+
"\u2018": "'",
|
|
162
|
+
"\u2019": "'",
|
|
163
|
+
"´": "'",
|
|
164
|
+
"`": "'",
|
|
165
|
+
"[": " ",
|
|
166
|
+
"]": " ",
|
|
167
|
+
"|": " ",
|
|
168
|
+
"/": " ",
|
|
169
|
+
"#": " ",
|
|
170
|
+
"→": " ",
|
|
171
|
+
"←": " "
|
|
172
|
+
}
|
|
173
|
+
for (const [ k, v ] of Object.entries(replacements))
|
|
174
|
+
text = text.replaceAll(k, v)
|
|
175
|
+
|
|
176
|
+
/* remove combining diacritics */
|
|
177
|
+
text = text.replace(/[\u0302\u0303\u0304\u0305\u0306\u0307\u0308\u030A\u030B\u030C\u0327\u0328\u0329\u032A\u032B\u032C\u032D\u032E\u032F]/g, "")
|
|
178
|
+
|
|
179
|
+
/* remove special symbols */
|
|
180
|
+
text = text.replace(/[♥☆♡©\\]/g, "")
|
|
181
|
+
|
|
182
|
+
/* replace known expressions */
|
|
183
|
+
const exprReplacements: Record<string, string> = {
|
|
184
|
+
"@": " at ",
|
|
185
|
+
"e.g.,": "for example, ",
|
|
186
|
+
"i.e.,": "that is, "
|
|
187
|
+
}
|
|
188
|
+
for (const [ k, v ] of Object.entries(exprReplacements))
|
|
189
|
+
text = text.replaceAll(k, v)
|
|
190
|
+
|
|
191
|
+
/* fix spacing around punctuation */
|
|
192
|
+
text = text.replace(/ ,/g, ",")
|
|
193
|
+
text = text.replace(/ \./g, ".")
|
|
194
|
+
text = text.replace(/ !/g, "!")
|
|
195
|
+
text = text.replace(/ \?/g, "?")
|
|
196
|
+
text = text.replace(/ ;/g, ";")
|
|
197
|
+
text = text.replace(/ :/g, ":")
|
|
198
|
+
text = text.replace(/ '/g, "'")
|
|
199
|
+
|
|
200
|
+
/* remove duplicate quotes */
|
|
201
|
+
text = text.replace(/""+/g, "\"")
|
|
202
|
+
text = text.replace(/''+/g, "'")
|
|
203
|
+
text = text.replace(/``+/g, "`")
|
|
204
|
+
|
|
205
|
+
/* remove extra spaces */
|
|
206
|
+
text = text.replace(/\s+/g, " ").trim()
|
|
207
|
+
|
|
208
|
+
/* if text doesn't end with punctuation, add a period */
|
|
209
|
+
if (!/[.!?;:,'"')\]}…。」』】〉》›»]$/.test(text))
|
|
210
|
+
text += "."
|
|
211
|
+
return text
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
private textToUnicodeValues (text: string): number[] {
|
|
215
|
+
/* convert text characters to unicode code points */
|
|
216
|
+
return Array.from(text).map((char) => char.charCodeAt(0))
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
call (textList: string[]): { textIds: number[][], textMask: number[][][] } {
|
|
220
|
+
/* handle empty input */
|
|
221
|
+
if (textList.length === 0)
|
|
222
|
+
return { textIds: [], textMask: [] }
|
|
223
|
+
|
|
224
|
+
/* preprocess all texts */
|
|
225
|
+
const processedTexts = textList.map((t) => this.preprocessText(t))
|
|
226
|
+
const textIdsLengths = processedTexts.map((t) => t.length)
|
|
227
|
+
const maxLen = Math.max(...textIdsLengths)
|
|
228
|
+
|
|
229
|
+
/* convert texts to indexed token arrays */
|
|
230
|
+
const textIds: number[][] = []
|
|
231
|
+
for (let i = 0; i < processedTexts.length; i++) {
|
|
232
|
+
const row = Array.from<number>({ length: maxLen }).fill(0)
|
|
233
|
+
const unicodeVals = this.textToUnicodeValues(processedTexts[i])
|
|
234
|
+
for (let j = 0; j < unicodeVals.length; j++)
|
|
235
|
+
row[j] = this.indexer[unicodeVals[j]] ?? 0
|
|
236
|
+
textIds.push(row)
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/* generate text mask from lengths */
|
|
240
|
+
const textMask = lengthToMask(textIdsLengths)
|
|
241
|
+
return { textIds, textMask }
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
/* Supertonic TTS engine class */
|
|
246
|
+
class SupertonicTTS {
|
|
247
|
+
public sampleRate: number
|
|
248
|
+
|
|
249
|
+
private cfgs: SupertonicConfig
|
|
250
|
+
private textProcessor: SupertonicTextProcessor
|
|
251
|
+
private dpOrt: ORT.InferenceSession
|
|
252
|
+
private textEncOrt: ORT.InferenceSession
|
|
253
|
+
private vectorEstOrt: ORT.InferenceSession
|
|
254
|
+
private vocoderOrt: ORT.InferenceSession
|
|
255
|
+
private baseChunkSize: number
|
|
256
|
+
private chunkCompressFactor: number
|
|
257
|
+
private latentDim: number
|
|
258
|
+
|
|
259
|
+
constructor (
|
|
260
|
+
cfgs: SupertonicConfig,
|
|
261
|
+
textProcessor: SupertonicTextProcessor,
|
|
262
|
+
dpOrt: ORT.InferenceSession,
|
|
263
|
+
textEncOrt: ORT.InferenceSession,
|
|
264
|
+
vectorEstOrt: ORT.InferenceSession,
|
|
265
|
+
vocoderOrt: ORT.InferenceSession
|
|
266
|
+
) {
|
|
267
|
+
/* store configuration and dependencies */
|
|
268
|
+
this.cfgs = cfgs
|
|
269
|
+
this.textProcessor = textProcessor
|
|
270
|
+
this.dpOrt = dpOrt
|
|
271
|
+
this.textEncOrt = textEncOrt
|
|
272
|
+
this.vectorEstOrt = vectorEstOrt
|
|
273
|
+
this.vocoderOrt = vocoderOrt
|
|
274
|
+
|
|
275
|
+
/* extract configuration values */
|
|
276
|
+
this.sampleRate = cfgs.ae.sample_rate
|
|
277
|
+
this.baseChunkSize = cfgs.ae.base_chunk_size
|
|
278
|
+
this.chunkCompressFactor = cfgs.ttl.chunk_compress_factor
|
|
279
|
+
this.latentDim = cfgs.ttl.latent_dim
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
private sampleNoisyLatent (duration: number[]): { noisyLatent: number[][][], latentMask: number[][][] } {
|
|
283
|
+
/* calculate dimensions for latent space */
|
|
284
|
+
const wavLenMax = Math.max(...duration) * this.sampleRate
|
|
285
|
+
const wavLengths = duration.map((d) => Math.floor(d * this.sampleRate))
|
|
286
|
+
const chunkSize = this.baseChunkSize * this.chunkCompressFactor
|
|
287
|
+
const latentLen = Math.floor((wavLenMax + chunkSize - 1) / chunkSize)
|
|
288
|
+
const latentDimExpanded = this.latentDim * this.chunkCompressFactor
|
|
289
|
+
|
|
290
|
+
/* generate random noise (pre-allocate arrays for performance) */
|
|
291
|
+
const noisyLatent: number[][][] = Array.from({ length: duration.length })
|
|
292
|
+
for (let b = 0; b < duration.length; b++) {
|
|
293
|
+
const batch: number[][] = Array.from({ length: latentDimExpanded })
|
|
294
|
+
for (let d = 0; d < latentDimExpanded; d++) {
|
|
295
|
+
const row: number[] = Array.from({ length: latentLen })
|
|
296
|
+
for (let t = 0; t < latentLen; t++) {
|
|
297
|
+
|
|
298
|
+
/* Box-Muller transform for normal distribution */
|
|
299
|
+
const eps = 1e-10
|
|
300
|
+
const u1 = Math.max(eps, Math.random())
|
|
301
|
+
const u2 = Math.random()
|
|
302
|
+
row[t] = Math.sqrt(-2.0 * Math.log(u1)) * Math.cos(2.0 * Math.PI * u2)
|
|
303
|
+
}
|
|
304
|
+
batch[d] = row
|
|
305
|
+
}
|
|
306
|
+
noisyLatent[b] = batch
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
/* apply mask */
|
|
310
|
+
const latentMask = getLatentMask(wavLengths, this.baseChunkSize, this.chunkCompressFactor)
|
|
311
|
+
for (let b = 0; b < noisyLatent.length; b++) {
|
|
312
|
+
for (let d = 0; d < noisyLatent[b].length; d++) {
|
|
313
|
+
for (let t = 0; t < noisyLatent[b][d].length; t++)
|
|
314
|
+
noisyLatent[b][d][t] *= latentMask[b][0][t]
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
return { noisyLatent, latentMask }
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
private async infer (textList: string[], style: SupertonicStyle, totalStep: number, speed: number): Promise<{ wav: number[], duration: number[] }> {
|
|
321
|
+
/* validate batch size matches style vectors */
|
|
322
|
+
if (textList.length !== style.ttl.dims[0])
|
|
323
|
+
throw new Error("Number of texts must match number of style vectors")
|
|
324
|
+
|
|
325
|
+
/* process text into token IDs and masks */
|
|
326
|
+
const batchSize = textList.length
|
|
327
|
+
const { textIds, textMask } = this.textProcessor.call(textList)
|
|
328
|
+
const textIdsShape = [ batchSize, textIds[0].length ]
|
|
329
|
+
const textMaskShape = [ batchSize, 1, textMask[0][0].length ]
|
|
330
|
+
const textMaskTensor = arrayToTensor(textMask, textMaskShape)
|
|
331
|
+
|
|
332
|
+
/* run duration predictor model */
|
|
333
|
+
const dpResult = await this.dpOrt.run({
|
|
334
|
+
text_ids: intArrayToTensor(textIds, textIdsShape),
|
|
335
|
+
style_dp: style.dp,
|
|
336
|
+
text_mask: textMaskTensor
|
|
337
|
+
})
|
|
338
|
+
const predictedDurations = Array.from(dpResult.duration.data as Float32Array)
|
|
339
|
+
|
|
340
|
+
/* apply speed factor to duration */
|
|
341
|
+
for (let i = 0; i < predictedDurations.length; i++)
|
|
342
|
+
predictedDurations[i] /= speed
|
|
343
|
+
|
|
344
|
+
/* run text encoder model */
|
|
345
|
+
const textEncResult = await this.textEncOrt.run({
|
|
346
|
+
text_ids: intArrayToTensor(textIds, textIdsShape),
|
|
347
|
+
style_ttl: style.ttl,
|
|
348
|
+
text_mask: textMaskTensor
|
|
349
|
+
})
|
|
350
|
+
const textEmbTensor = textEncResult.text_emb
|
|
351
|
+
|
|
352
|
+
/* sample initial noisy latent vectors */
|
|
353
|
+
const { noisyLatent, latentMask } = this.sampleNoisyLatent(predictedDurations)
|
|
354
|
+
const latentShape = [ batchSize, noisyLatent[0].length, noisyLatent[0][0].length ]
|
|
355
|
+
const latentMaskShape = [ batchSize, 1, latentMask[0][0].length ]
|
|
356
|
+
const latentMaskTensor = arrayToTensor(latentMask, latentMaskShape)
|
|
357
|
+
|
|
358
|
+
/* prepare step tensors */
|
|
359
|
+
const totalStepArray = Array.from<number>({ length: batchSize }).fill(totalStep)
|
|
360
|
+
const scalarShape = [ batchSize ]
|
|
361
|
+
const totalStepTensor = arrayToTensor(totalStepArray, scalarShape)
|
|
362
|
+
|
|
363
|
+
/* iteratively denoise latent vectors */
|
|
364
|
+
for (let step = 0; step < totalStep; step++) {
|
|
365
|
+
const currentStepArray = Array.from<number>({ length: batchSize }).fill(step)
|
|
366
|
+
|
|
367
|
+
/* run vector estimator model */
|
|
368
|
+
const vectorEstResult = await this.vectorEstOrt.run({
|
|
369
|
+
noisy_latent: arrayToTensor(noisyLatent, latentShape),
|
|
370
|
+
text_emb: textEmbTensor,
|
|
371
|
+
style_ttl: style.ttl,
|
|
372
|
+
text_mask: textMaskTensor,
|
|
373
|
+
latent_mask: latentMaskTensor,
|
|
374
|
+
total_step: totalStepTensor,
|
|
375
|
+
current_step: arrayToTensor(currentStepArray, scalarShape)
|
|
376
|
+
})
|
|
377
|
+
const denoisedLatent = Array.from(vectorEstResult.denoised_latent.data as Float32Array)
|
|
378
|
+
|
|
379
|
+
/* update latent with the denoised output */
|
|
380
|
+
let idx = 0
|
|
381
|
+
for (let b = 0; b < noisyLatent.length; b++)
|
|
382
|
+
for (let d = 0; d < noisyLatent[b].length; d++)
|
|
383
|
+
for (let t = 0; t < noisyLatent[b][d].length; t++)
|
|
384
|
+
noisyLatent[b][d][t] = denoisedLatent[idx++]
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
/* run vocoder to generate audio waveform */
|
|
388
|
+
const vocoderResult = await this.vocoderOrt.run({
|
|
389
|
+
latent: arrayToTensor(noisyLatent, latentShape)
|
|
390
|
+
})
|
|
391
|
+
const wav = Array.from(vocoderResult.wav_tts.data as Float32Array)
|
|
392
|
+
return { wav, duration: predictedDurations }
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
async synthesize (text: string, style: SupertonicStyle, totalStep: number, speed: number, silenceDuration = 0.3): Promise<{ wav: number[], duration: number }> {
|
|
396
|
+
/* validate single speaker mode */
|
|
397
|
+
if (style.ttl.dims[0] !== 1)
|
|
398
|
+
throw new Error("Single speaker text to speech only supports single style")
|
|
399
|
+
|
|
400
|
+
/* chunk text into segments */
|
|
401
|
+
const textList = chunkText(text)
|
|
402
|
+
if (textList.length === 0)
|
|
403
|
+
return { wav: [], duration: 0 }
|
|
404
|
+
|
|
405
|
+
/* synthesize each chunk and concatenate with silence */
|
|
406
|
+
const wavParts: number[][] = []
|
|
407
|
+
let totalDuration = 0
|
|
408
|
+
for (const chunk of textList) {
|
|
409
|
+
const { wav, duration } = await this.infer([ chunk ], style, totalStep, speed)
|
|
410
|
+
|
|
411
|
+
/* insert silence between chunks */
|
|
412
|
+
if (wavParts.length > 0) {
|
|
413
|
+
const silenceLen = Math.floor(silenceDuration * this.sampleRate)
|
|
414
|
+
wavParts.push(Array.from<number>({ length: silenceLen }).fill(0))
|
|
415
|
+
totalDuration += silenceDuration
|
|
416
|
+
}
|
|
417
|
+
wavParts.push(wav)
|
|
418
|
+
totalDuration += duration[0]
|
|
419
|
+
}
|
|
420
|
+
return { wav: wavParts.flat(), duration: totalDuration }
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
async release (): Promise<void> {
|
|
424
|
+
/* release all ONNX inference sessions */
|
|
425
|
+
await Promise.all([
|
|
426
|
+
this.dpOrt.release(),
|
|
427
|
+
this.textEncOrt.release(),
|
|
428
|
+
this.vectorEstOrt.release(),
|
|
429
|
+
this.vocoderOrt.release()
|
|
430
|
+
])
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
/* type for voice style JSON file */
|
|
435
|
+
interface VoiceStyleJSON {
|
|
436
|
+
style_ttl: { dims: number[], data: number[][][] }
|
|
437
|
+
style_dp: { dims: number[], data: number[][][] }
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
/* load voice style from JSON file */
|
|
441
|
+
async function loadVoiceStyle (voiceStylePath: string): Promise<SupertonicStyle> {
|
|
442
|
+
/* read and parse voice style JSON */
|
|
443
|
+
let voiceStyle: VoiceStyleJSON
|
|
444
|
+
try {
|
|
445
|
+
voiceStyle = JSON.parse(await fs.promises.readFile(voiceStylePath, "utf8")) as VoiceStyleJSON
|
|
446
|
+
}
|
|
447
|
+
catch (err) {
|
|
448
|
+
throw new Error(`failed to parse voice style JSON "${voiceStylePath}"`, { cause: err })
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
/* extract dimensions and data */
|
|
452
|
+
const ttlDims = voiceStyle.style_ttl.dims
|
|
453
|
+
const dpDims = voiceStyle.style_dp.dims
|
|
454
|
+
const ttlData = voiceStyle.style_ttl.data.flat(Infinity) as number[]
|
|
455
|
+
const dpData = voiceStyle.style_dp.data.flat(Infinity) as number[]
|
|
456
|
+
|
|
457
|
+
/* create ONNX tensors for style vectors */
|
|
458
|
+
const ttlStyle = new ORT.Tensor("float32", Float32Array.from(ttlData), ttlDims)
|
|
459
|
+
const dpStyle = new ORT.Tensor("float32", Float32Array.from(dpData), dpDims)
|
|
460
|
+
return { ttl: ttlStyle, dp: dpStyle }
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
/* load TTS engine from ONNX models */
|
|
464
|
+
async function loadSupertonic (assetsDir: string): Promise<SupertonicTTS> {
|
|
465
|
+
/* load configuration */
|
|
466
|
+
const cfgPath = path.join(assetsDir, "onnx", "tts.json")
|
|
467
|
+
let cfgs: SupertonicConfig
|
|
468
|
+
try {
|
|
469
|
+
cfgs = JSON.parse(await fs.promises.readFile(cfgPath, "utf8"))
|
|
470
|
+
}
|
|
471
|
+
catch (err) {
|
|
472
|
+
throw new Error(`failed to parse TTS config JSON "${cfgPath}"`, { cause: err })
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
/* load text processor */
|
|
476
|
+
const unicodeIndexerPath = path.join(assetsDir, "onnx", "unicode_indexer.json")
|
|
477
|
+
const textProcessor = new SupertonicTextProcessor(unicodeIndexerPath)
|
|
478
|
+
|
|
479
|
+
/* load ONNX models */
|
|
480
|
+
const opts: ORT.InferenceSession.SessionOptions = {}
|
|
481
|
+
const [ dpOrt, textEncOrt, vectorEstOrt, vocoderOrt ] = await Promise.all([
|
|
482
|
+
ORT.InferenceSession.create(path.join(assetsDir, "onnx", "duration_predictor.onnx"), opts),
|
|
483
|
+
ORT.InferenceSession.create(path.join(assetsDir, "onnx", "text_encoder.onnx"), opts),
|
|
484
|
+
ORT.InferenceSession.create(path.join(assetsDir, "onnx", "vector_estimator.onnx"), opts),
|
|
485
|
+
ORT.InferenceSession.create(path.join(assetsDir, "onnx", "vocoder.onnx"), opts)
|
|
486
|
+
])
|
|
487
|
+
return new SupertonicTTS(cfgs, textProcessor, dpOrt, textEncOrt, vectorEstOrt, vocoderOrt)
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
/* ==== SPEECHFLOW NODE IMPLEMENTATION ==== */
|
|
491
|
+
|
|
492
|
+
/* SpeechFlow node for Supertonic text-to-speech conversion */
|
|
493
|
+
export default class SpeechFlowNodeT2ASupertonic extends SpeechFlowNode {
|
|
494
|
+
/* declare official node name */
|
|
495
|
+
public static name = "t2a-supertonic"
|
|
496
|
+
|
|
497
|
+
/* internal state */
|
|
498
|
+
private supertonic: SupertonicTTS | null = null
|
|
499
|
+
private style: SupertonicStyle | null = null
|
|
500
|
+
private resampler: SpeexResampler | null = null
|
|
501
|
+
private closing = false
|
|
502
|
+
|
|
503
|
+
/* construct node */
|
|
504
|
+
constructor (id: string, cfg: { [ id: string ]: any }, opts: { [ id: string ]: any }, args: any[]) {
|
|
505
|
+
super(id, cfg, opts, args)
|
|
506
|
+
|
|
507
|
+
/* declare node configuration parameters */
|
|
508
|
+
this.configure({
|
|
509
|
+
voice: { type: "string", val: "M1", pos: 0, match: /^(?:M1|M2|F1|F2)$/ },
|
|
510
|
+
speed: { type: "number", val: 1.40, pos: 1, match: (n: number) => n >= 0.5 && n <= 2.0 },
|
|
511
|
+
steps: { type: "number", val: 20, pos: 2, match: (n: number) => n >= 1 && n <= 20 }
|
|
512
|
+
})
|
|
513
|
+
|
|
514
|
+
/* declare node input/output format */
|
|
515
|
+
this.input = "text"
|
|
516
|
+
this.output = "audio"
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
/* one-time status of node */
|
|
520
|
+
async status () {
|
|
521
|
+
return {}
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
/* download HuggingFace assets */
|
|
525
|
+
private async downloadAssets () {
|
|
526
|
+
/* define HuggingFace repository and required files */
|
|
527
|
+
const assetRepo = "Supertone/supertonic"
|
|
528
|
+
const assetFiles = [
|
|
529
|
+
"voice_styles/F1.json",
|
|
530
|
+
"voice_styles/F2.json",
|
|
531
|
+
"voice_styles/M1.json",
|
|
532
|
+
"voice_styles/M2.json",
|
|
533
|
+
"onnx/tts.json",
|
|
534
|
+
"onnx/duration_predictor.onnx",
|
|
535
|
+
"onnx/text_encoder.onnx",
|
|
536
|
+
"onnx/unicode_indexer.json",
|
|
537
|
+
"onnx/vector_estimator.onnx",
|
|
538
|
+
"onnx/vocoder.onnx",
|
|
539
|
+
]
|
|
540
|
+
|
|
541
|
+
/* create asset directories */
|
|
542
|
+
const assetDir = path.join(this.config.cacheDir, "supertonic")
|
|
543
|
+
await mkdirp(path.join(assetDir, "voice_styles"), { mode: 0o750 })
|
|
544
|
+
await mkdirp(path.join(assetDir, "onnx"), { mode: 0o750 })
|
|
545
|
+
|
|
546
|
+
/* download missing asset files */
|
|
547
|
+
for (const assetFile of assetFiles) {
|
|
548
|
+
const url = `${assetRepo}/${assetFile}`
|
|
549
|
+
const file = path.join(assetDir, assetFile)
|
|
550
|
+
const stat = await fs.promises.stat(file).catch((_err) => null)
|
|
551
|
+
if (stat === null || !stat.isFile()) {
|
|
552
|
+
this.log("info", `downloading from HuggingFace "${url}"`)
|
|
553
|
+
const response = await HF.downloadFile({ repo: assetRepo, path: assetFile })
|
|
554
|
+
if (!response)
|
|
555
|
+
throw new Error(`failed to download from HuggingFace "${url}"`)
|
|
556
|
+
const buffer = Buffer.from(await response.arrayBuffer())
|
|
557
|
+
await fs.promises.writeFile(file, buffer)
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
return assetDir
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
/* open node */
|
|
564
|
+
async open () {
|
|
565
|
+
this.closing = false
|
|
566
|
+
|
|
567
|
+
/* download assets */
|
|
568
|
+
const assetsDir = await this.downloadAssets()
|
|
569
|
+
|
|
570
|
+
/* download ONNX models */
|
|
571
|
+
this.log("info", `loading ONNX models (asset dir: "${assetsDir}")`)
|
|
572
|
+
this.supertonic = await loadSupertonic(assetsDir)
|
|
573
|
+
this.log("info", `loaded ONNX models (sample rate: ${this.supertonic.sampleRate}Hz)`)
|
|
574
|
+
|
|
575
|
+
/* load voice style */
|
|
576
|
+
const voiceStylePath = path.join(assetsDir, "voice_styles", `${this.params.voice}.json`)
|
|
577
|
+
if (!fs.existsSync(voiceStylePath))
|
|
578
|
+
throw new Error(`voice style not found: ${voiceStylePath}`)
|
|
579
|
+
this.log("info", `loading voice style "${this.params.voice}"`)
|
|
580
|
+
this.style = await loadVoiceStyle(voiceStylePath)
|
|
581
|
+
this.log("info", `loaded voice style "${this.params.voice}"`)
|
|
582
|
+
|
|
583
|
+
/* establish resampler from Supertonic's output sample rate to our standard audio sample rate (48kHz) */
|
|
584
|
+
this.resampler = new SpeexResampler(1, this.supertonic.sampleRate, this.config.audioSampleRate, 7)
|
|
585
|
+
|
|
586
|
+
/* perform text-to-speech operation with Supertonic */
|
|
587
|
+
const text2speech = async (text: string) => {
|
|
588
|
+
/* synthesize speech from text */
|
|
589
|
+
this.log("info", `Supertonic: input: "${text}"`)
|
|
590
|
+
const { wav, duration } = await this.supertonic!.synthesize(
|
|
591
|
+
text,
|
|
592
|
+
this.style!,
|
|
593
|
+
this.params.steps,
|
|
594
|
+
this.params.speed
|
|
595
|
+
)
|
|
596
|
+
this.log("info", `Supertonic: synthesized ${duration.toFixed(2)}s of audio`)
|
|
597
|
+
|
|
598
|
+
/* convert audio samples from PCM/F32 to PCM/I16 */
|
|
599
|
+
const buffer1 = Buffer.alloc(wav.length * 2)
|
|
600
|
+
for (let i = 0; i < wav.length; i++) {
|
|
601
|
+
const sample = Math.max(-1, Math.min(1, wav[i]))
|
|
602
|
+
buffer1.writeInt16LE(sample * 0x7FFF, i * 2)
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
/* resample audio samples from 44.1kHz to 48kHz */
|
|
606
|
+
const buffer2 = this.resampler!.processChunk(buffer1)
|
|
607
|
+
return buffer2
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
/* create transform stream and connect it to the Supertonic TTS */
|
|
611
|
+
const self = this
|
|
612
|
+
this.stream = new Stream.Transform({
|
|
613
|
+
writableObjectMode: true,
|
|
614
|
+
readableObjectMode: true,
|
|
615
|
+
decodeStrings: false,
|
|
616
|
+
highWaterMark: 1,
|
|
617
|
+
async transform (chunk: SpeechFlowChunk, encoding, callback) {
|
|
618
|
+
if (self.closing)
|
|
619
|
+
callback(new Error("stream already destroyed"))
|
|
620
|
+
else if (Buffer.isBuffer(chunk.payload))
|
|
621
|
+
callback(new Error("invalid chunk payload type"))
|
|
622
|
+
else {
|
|
623
|
+
let processTimeout: ReturnType<typeof setTimeout> | null = setTimeout(() => {
|
|
624
|
+
processTimeout = null
|
|
625
|
+
callback(new Error("Supertonic TTS timeout"))
|
|
626
|
+
}, 120 * 1000)
|
|
627
|
+
const clearProcessTimeout = () => {
|
|
628
|
+
if (processTimeout !== null) {
|
|
629
|
+
clearTimeout(processTimeout)
|
|
630
|
+
processTimeout = null
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
try {
|
|
634
|
+
if (self.closing) {
|
|
635
|
+
clearProcessTimeout()
|
|
636
|
+
callback(new Error("stream destroyed during processing"))
|
|
637
|
+
return
|
|
638
|
+
}
|
|
639
|
+
const buffer = await text2speech(chunk.payload as string)
|
|
640
|
+
if (self.closing) {
|
|
641
|
+
clearProcessTimeout()
|
|
642
|
+
callback(new Error("stream destroyed during processing"))
|
|
643
|
+
return
|
|
644
|
+
}
|
|
645
|
+
self.log("info", `Supertonic: received audio (buffer length: ${buffer.byteLength})`)
|
|
646
|
+
|
|
647
|
+
/* calculate actual audio duration from PCM buffer size */
|
|
648
|
+
const durationMs = util.audioBufferDuration(buffer,
|
|
649
|
+
self.config.audioSampleRate, self.config.audioBitDepth) * 1000
|
|
650
|
+
|
|
651
|
+
/* create new chunk with recalculated timestamps */
|
|
652
|
+
const chunkNew = chunk.clone()
|
|
653
|
+
chunkNew.type = "audio"
|
|
654
|
+
chunkNew.payload = buffer
|
|
655
|
+
chunkNew.timestampEnd = Duration.fromMillis(chunkNew.timestampStart.toMillis() + durationMs)
|
|
656
|
+
|
|
657
|
+
/* push chunk and complete transform */
|
|
658
|
+
clearProcessTimeout()
|
|
659
|
+
this.push(chunkNew)
|
|
660
|
+
callback()
|
|
661
|
+
}
|
|
662
|
+
catch (error) {
|
|
663
|
+
|
|
664
|
+
/* handle processing errors */
|
|
665
|
+
clearProcessTimeout()
|
|
666
|
+
callback(util.ensureError(error, "Supertonic processing failed"))
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
},
|
|
670
|
+
final (callback) {
|
|
671
|
+
callback()
|
|
672
|
+
}
|
|
673
|
+
})
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
/* close node */
|
|
677
|
+
async close () {
|
|
678
|
+
/* indicate closing */
|
|
679
|
+
this.closing = true
|
|
680
|
+
|
|
681
|
+
/* shutdown stream */
|
|
682
|
+
if (this.stream !== null) {
|
|
683
|
+
await util.destroyStream(this.stream)
|
|
684
|
+
this.stream = null
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
/* destroy voice style */
|
|
688
|
+
if (this.style !== null)
|
|
689
|
+
this.style = null
|
|
690
|
+
|
|
691
|
+
/* destroy resampler */
|
|
692
|
+
if (this.resampler !== null)
|
|
693
|
+
this.resampler = null
|
|
694
|
+
|
|
695
|
+
/* destroy Supertonic TTS */
|
|
696
|
+
if (this.supertonic !== null) {
|
|
697
|
+
await this.supertonic.release()
|
|
698
|
+
this.supertonic = null
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
}
|