speechflow 0.9.8 → 0.9.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/LICENSE.txt +674 -0
- package/README.md +66 -16
- package/dst/speechflow-node-a2a-vad.d.ts +16 -0
- package/dst/speechflow-node-a2a-vad.js +431 -0
- package/dst/speechflow-node-t2a-kokoro.d.ts +13 -0
- package/dst/speechflow-node-t2a-kokoro.js +147 -0
- package/dst/speechflow-node-t2t-gemma.js +23 -3
- package/dst/speechflow-node-t2t-ollama.d.ts +13 -0
- package/dst/speechflow-node-t2t-ollama.js +245 -0
- package/dst/speechflow-node-t2t-openai.d.ts +13 -0
- package/dst/speechflow-node-t2t-openai.js +225 -0
- package/dst/speechflow-node-t2t-opus.js +1 -1
- package/dst/speechflow-node-t2t-transformers.d.ts +14 -0
- package/dst/speechflow-node-t2t-transformers.js +260 -0
- package/dst/speechflow-node-x2x-trace.js +2 -2
- package/dst/speechflow.js +86 -40
- package/etc/speechflow.yaml +9 -2
- package/etc/stx.conf +1 -1
- package/package.json +7 -6
- package/src/speechflow-node-t2a-kokoro.ts +160 -0
- package/src/{speechflow-node-t2t-gemma.ts → speechflow-node-t2t-ollama.ts} +44 -10
- package/src/speechflow-node-t2t-openai.ts +246 -0
- package/src/speechflow-node-t2t-transformers.ts +244 -0
- package/src/speechflow-node-x2x-trace.ts +2 -2
- package/src/speechflow.ts +86 -40
- package/src/speechflow-node-t2t-opus.ts +0 -111
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/*
|
|
3
|
+
** SpeechFlow - Speech Processing Flow Graph
|
|
4
|
+
** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
|
|
5
|
+
** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
|
|
6
|
+
*/
|
|
7
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
8
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
9
|
+
};
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
/* standard dependencies */
|
|
12
|
+
const node_stream_1 = __importDefault(require("node:stream"));
|
|
13
|
+
/* external dependencies */
|
|
14
|
+
const kokoro_js_1 = require("kokoro-js");
|
|
15
|
+
const speex_resampler_1 = __importDefault(require("speex-resampler"));
|
|
16
|
+
/* internal dependencies */
|
|
17
|
+
const speechflow_node_1 = __importDefault(require("./speechflow-node"));
|
|
18
|
+
/* SpeechFlow node for Kokoro text-to-speech conversion */
|
|
19
|
+
class SpeechFlowNodeKokoro extends speechflow_node_1.default {
|
|
20
|
+
/* declare official node name */
|
|
21
|
+
static name = "kokoro";
|
|
22
|
+
/* internal state */
|
|
23
|
+
kokoro = null;
|
|
24
|
+
static speexInitialized = false;
|
|
25
|
+
/* construct node */
|
|
26
|
+
constructor(id, cfg, opts, args) {
|
|
27
|
+
super(id, cfg, opts, args);
|
|
28
|
+
/* declare node configuration parameters */
|
|
29
|
+
this.configure({
|
|
30
|
+
voice: { type: "string", val: "Aoede", pos: 0, match: /^(?:Aoede|Heart|Puck|Fenrir)$/ },
|
|
31
|
+
language: { type: "string", val: "en", pos: 1, match: /^(?:en)$/ },
|
|
32
|
+
speed: { type: "number", val: 1.25, pos: 2, match: (n) => n >= 1.0 && n <= 1.30 },
|
|
33
|
+
});
|
|
34
|
+
/* declare node input/output format */
|
|
35
|
+
this.input = "text";
|
|
36
|
+
this.output = "audio";
|
|
37
|
+
}
|
|
38
|
+
/* open node */
|
|
39
|
+
async open() {
|
|
40
|
+
/* establish Kokoro */
|
|
41
|
+
const model = "onnx-community/Kokoro-82M-v1.0-ONNX";
|
|
42
|
+
const progressState = new Map();
|
|
43
|
+
const progressCallback = (progress) => {
|
|
44
|
+
let artifact = model;
|
|
45
|
+
if (typeof progress.file === "string")
|
|
46
|
+
artifact += `:${progress.file}`;
|
|
47
|
+
let percent = 0;
|
|
48
|
+
if (typeof progress.loaded === "number" && typeof progress.total === "number")
|
|
49
|
+
percent = progress.loaded / progress.total * 100;
|
|
50
|
+
else if (typeof progress.progress === "number")
|
|
51
|
+
percent = progress.progress;
|
|
52
|
+
if (percent > 0)
|
|
53
|
+
progressState.set(artifact, percent);
|
|
54
|
+
};
|
|
55
|
+
const interval = setInterval(() => {
|
|
56
|
+
for (const [artifact, percent] of progressState) {
|
|
57
|
+
this.log("info", `downloaded ${percent.toFixed(2)}% of artifact "${artifact}"`);
|
|
58
|
+
if (percent >= 1.0)
|
|
59
|
+
progressState.delete(artifact);
|
|
60
|
+
}
|
|
61
|
+
}, 1000);
|
|
62
|
+
this.kokoro = await kokoro_js_1.KokoroTTS.from_pretrained(model, {
|
|
63
|
+
dtype: "q4f16",
|
|
64
|
+
progress_callback: progressCallback
|
|
65
|
+
});
|
|
66
|
+
clearInterval(interval);
|
|
67
|
+
if (this.kokoro === null)
|
|
68
|
+
throw new Error("failed to instantiate Kokoro");
|
|
69
|
+
/* establish resampler from Kokoro's maximum 24Khz
|
|
70
|
+
output to our standard audio sample rate (48KHz) */
|
|
71
|
+
if (!SpeechFlowNodeKokoro.speexInitialized) {
|
|
72
|
+
/* at least once initialize resampler */
|
|
73
|
+
await speex_resampler_1.default.initPromise;
|
|
74
|
+
SpeechFlowNodeKokoro.speexInitialized = true;
|
|
75
|
+
}
|
|
76
|
+
const resampler = new speex_resampler_1.default(1, 24000, this.config.audioSampleRate, 7);
|
|
77
|
+
/* determine voice for text-to-speech operation */
|
|
78
|
+
const voices = {
|
|
79
|
+
"Aoede": "af_aoede",
|
|
80
|
+
"Heart": "af_heart",
|
|
81
|
+
"Puck": "am_puck",
|
|
82
|
+
"Fenrir": "am_fenrir"
|
|
83
|
+
};
|
|
84
|
+
const voice = (voices[this.params.voice]);
|
|
85
|
+
if (voice === undefined)
|
|
86
|
+
throw new Error(`invalid Kokoro voice "${this.params.voice}"`);
|
|
87
|
+
/* perform text-to-speech operation with Elevenlabs API */
|
|
88
|
+
const text2speech = async (text) => {
|
|
89
|
+
this.log("info", `Kokoro: input: "${text}"`);
|
|
90
|
+
const audio = await this.kokoro.generate(text, {
|
|
91
|
+
speed: this.params.speed,
|
|
92
|
+
voice: voice
|
|
93
|
+
});
|
|
94
|
+
if (audio.sampling_rate !== 24000)
|
|
95
|
+
throw new Error("expected 24KHz sampling rate in Kokoro output");
|
|
96
|
+
/* convert audio samples from PCM/F32/24Khz to PCM/I16/24KHz */
|
|
97
|
+
const samples = audio.audio;
|
|
98
|
+
const buffer1 = Buffer.alloc(samples.length * 2);
|
|
99
|
+
for (let i = 0; i < samples.length; i++) {
|
|
100
|
+
const sample = Math.max(-1, Math.min(1, samples[i]));
|
|
101
|
+
buffer1.writeInt16LE(sample * 0x7FFF, i * 2);
|
|
102
|
+
}
|
|
103
|
+
/* resample audio samples from PCM/I16/24Khz to PCM/I16/48KHz */
|
|
104
|
+
const buffer2 = resampler.processChunk(buffer1);
|
|
105
|
+
return buffer2;
|
|
106
|
+
};
|
|
107
|
+
/* create transform stream and connect it to the Kokoro API */
|
|
108
|
+
const log = (level, msg) => { this.log(level, msg); };
|
|
109
|
+
this.stream = new node_stream_1.default.Transform({
|
|
110
|
+
writableObjectMode: true,
|
|
111
|
+
readableObjectMode: true,
|
|
112
|
+
decodeStrings: false,
|
|
113
|
+
transform(chunk, encoding, callback) {
|
|
114
|
+
if (Buffer.isBuffer(chunk.payload))
|
|
115
|
+
callback(new Error("invalid chunk payload type"));
|
|
116
|
+
else {
|
|
117
|
+
text2speech(chunk.payload).then((buffer) => {
|
|
118
|
+
log("info", `Kokoro: received audio (buffer length: ${buffer.byteLength})`);
|
|
119
|
+
chunk = chunk.clone();
|
|
120
|
+
chunk.type = "audio";
|
|
121
|
+
chunk.payload = buffer;
|
|
122
|
+
this.push(chunk);
|
|
123
|
+
callback();
|
|
124
|
+
}).catch((err) => {
|
|
125
|
+
callback(err);
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
final(callback) {
|
|
130
|
+
this.push(null);
|
|
131
|
+
callback();
|
|
132
|
+
}
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
/* close node */
|
|
136
|
+
async close() {
|
|
137
|
+
/* destroy stream */
|
|
138
|
+
if (this.stream !== null) {
|
|
139
|
+
this.stream.destroy();
|
|
140
|
+
this.stream = null;
|
|
141
|
+
}
|
|
142
|
+
/* destroy Kokoro API */
|
|
143
|
+
if (this.kokoro !== null)
|
|
144
|
+
this.kokoro = null;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
exports.default = SpeechFlowNodeKokoro;
|
|
@@ -93,7 +93,8 @@ class SpeechFlowNodeGemma extends speechflow_node_1.default {
|
|
|
93
93
|
"Do not show any prolog.\n" +
|
|
94
94
|
"Do not show any epilog.\n" +
|
|
95
95
|
"Get to the point.\n" +
|
|
96
|
-
"
|
|
96
|
+
"Preserve the original meaning, tone, and nuance.\n" +
|
|
97
|
+
"Directly translate text from English (EN) to fluent and natural German (DE) language.\n",
|
|
97
98
|
chat: [
|
|
98
99
|
{ role: "user", content: "I love my wife." },
|
|
99
100
|
{ role: "system", content: "Ich liebe meine Frau." },
|
|
@@ -115,7 +116,8 @@ class SpeechFlowNodeGemma extends speechflow_node_1.default {
|
|
|
115
116
|
"Do not show any prolog. \n" +
|
|
116
117
|
"Do not show any epilog. \n" +
|
|
117
118
|
"Get to the point.\n" +
|
|
118
|
-
"
|
|
119
|
+
"Preserve the original meaning, tone, and nuance.\n" +
|
|
120
|
+
"Directly translate text from German (DE) to fluent and natural English (EN) language.\n",
|
|
119
121
|
chat: [
|
|
120
122
|
{ role: "user", content: "Ich liebe meine Frau." },
|
|
121
123
|
{ role: "system", content: "I love my wife." },
|
|
@@ -132,9 +134,16 @@ class SpeechFlowNodeGemma extends speechflow_node_1.default {
|
|
|
132
134
|
/* declare node configuration parameters */
|
|
133
135
|
this.configure({
|
|
134
136
|
api: { type: "string", val: "http://127.0.0.1:11434", match: /^https?:\/\/.+?:\d+$/ },
|
|
137
|
+
model: { type: "string", val: "gemma3:4b-it-q4_K_M", match: /^.+$/ },
|
|
135
138
|
src: { type: "string", pos: 0, val: "de", match: /^(?:de|en)$/ },
|
|
136
139
|
dst: { type: "string", pos: 1, val: "en", match: /^(?:de|en)$/ }
|
|
137
140
|
});
|
|
141
|
+
/* tell effective mode */
|
|
142
|
+
if (this.params.src === this.params.dst)
|
|
143
|
+
this.log("info", `Gemma/Ollama: operation mode: spellchecking for language "${this.params.src}"`);
|
|
144
|
+
else
|
|
145
|
+
this.log("info", `Gemma/Ollama: operation mode: translation from language "${this.params.src}"` +
|
|
146
|
+
` to language "${this.params.dst}"`);
|
|
138
147
|
/* declare node input/output format */
|
|
139
148
|
this.input = "text";
|
|
140
149
|
this.output = "text";
|
|
@@ -143,12 +152,23 @@ class SpeechFlowNodeGemma extends speechflow_node_1.default {
|
|
|
143
152
|
async open() {
|
|
144
153
|
/* instantiate Ollama API */
|
|
145
154
|
this.ollama = new ollama_1.Ollama({ host: this.params.api });
|
|
155
|
+
/* ensure the model is available */
|
|
156
|
+
const model = this.params.model;
|
|
157
|
+
const models = await this.ollama.list();
|
|
158
|
+
const exists = models.models.some((m) => m.name === model);
|
|
159
|
+
if (!exists) {
|
|
160
|
+
this.log("info", `Gemma/Ollama: model "${model}" still not present in Ollama -- ` +
|
|
161
|
+
"automatically downloading model");
|
|
162
|
+
await this.ollama.pull({ model });
|
|
163
|
+
}
|
|
164
|
+
else
|
|
165
|
+
this.log("info", `Gemma/Ollama: model "${model}" already present in Ollama`);
|
|
146
166
|
/* provide text-to-text translation */
|
|
147
167
|
const translate = async (text) => {
|
|
148
168
|
const key = `${this.params.src}-${this.params.dst}`;
|
|
149
169
|
const cfg = this.setup[key];
|
|
150
170
|
const response = await this.ollama.chat({
|
|
151
|
-
model
|
|
171
|
+
model,
|
|
152
172
|
messages: [
|
|
153
173
|
{ role: "system", content: cfg.systemPrompt },
|
|
154
174
|
...cfg.chat,
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import SpeechFlowNode from "./speechflow-node";
|
|
2
|
+
export default class SpeechFlowNodeOllama extends SpeechFlowNode {
|
|
3
|
+
static name: string;
|
|
4
|
+
private ollama;
|
|
5
|
+
private setup;
|
|
6
|
+
constructor(id: string, cfg: {
|
|
7
|
+
[id: string]: any;
|
|
8
|
+
}, opts: {
|
|
9
|
+
[id: string]: any;
|
|
10
|
+
}, args: any[]);
|
|
11
|
+
open(): Promise<void>;
|
|
12
|
+
close(): Promise<void>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/*
|
|
3
|
+
** SpeechFlow - Speech Processing Flow Graph
|
|
4
|
+
** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
|
|
5
|
+
** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
|
|
6
|
+
*/
|
|
7
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
8
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
9
|
+
};
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
/* standard dependencies */
|
|
12
|
+
const node_stream_1 = __importDefault(require("node:stream"));
|
|
13
|
+
/* external dependencies */
|
|
14
|
+
const ollama_1 = require("ollama");
|
|
15
|
+
/* internal dependencies */
|
|
16
|
+
const speechflow_node_1 = __importDefault(require("./speechflow-node"));
|
|
17
|
+
/* SpeechFlow node for Ollama text-to-text translation */
|
|
18
|
+
class SpeechFlowNodeOllama extends speechflow_node_1.default {
|
|
19
|
+
/* declare official node name */
|
|
20
|
+
static name = "ollama";
|
|
21
|
+
/* internal state */
|
|
22
|
+
ollama = null;
|
|
23
|
+
/* internal LLM setup */
|
|
24
|
+
setup = {
|
|
25
|
+
/* English (EN) spellchecking only */
|
|
26
|
+
"en-en": {
|
|
27
|
+
systemPrompt: "You are a proofreader and spellchecker for English.\n" +
|
|
28
|
+
"Output only the corrected text.\n" +
|
|
29
|
+
"Do NOT use markdown.\n" +
|
|
30
|
+
"Do NOT give any explanations.\n" +
|
|
31
|
+
"Do NOT give any introduction.\n" +
|
|
32
|
+
"Do NOT give any comments.\n" +
|
|
33
|
+
"Do NOT give any preamble.\n" +
|
|
34
|
+
"Do NOT give any prolog.\n" +
|
|
35
|
+
"Do NOT give any epilog.\n" +
|
|
36
|
+
"Do NOT change the gammar.\n" +
|
|
37
|
+
"Do NOT use synonyms for words.\n" +
|
|
38
|
+
"Keep all words.\n" +
|
|
39
|
+
"Fill in missing commas.\n" +
|
|
40
|
+
"Fill in missing points.\n" +
|
|
41
|
+
"Fill in missing question marks.\n" +
|
|
42
|
+
"Fill in missing hyphens.\n" +
|
|
43
|
+
"Focus ONLY on the word spelling.\n" +
|
|
44
|
+
"The text you have to correct is:\n",
|
|
45
|
+
chat: [
|
|
46
|
+
{ role: "user", content: "I luve my wyfe" },
|
|
47
|
+
{ role: "system", content: "I love my wife." },
|
|
48
|
+
{ role: "user", content: "The weether is wunderfull!" },
|
|
49
|
+
{ role: "system", content: "The weather is wonderful!" },
|
|
50
|
+
{ role: "user", content: "The live awesome but I'm hungry." },
|
|
51
|
+
{ role: "system", content: "The live is awesome, but I'm hungry." }
|
|
52
|
+
]
|
|
53
|
+
},
|
|
54
|
+
/* German (DE) spellchecking only */
|
|
55
|
+
"de-de": {
|
|
56
|
+
systemPrompt: "Du bist ein Korrekturleser und Rechtschreibprüfer für Deutsch.\n" +
|
|
57
|
+
"Gib nur den korrigierten Text aus.\n" +
|
|
58
|
+
"Benutze KEIN Markdown.\n" +
|
|
59
|
+
"Gib KEINE Erklärungen.\n" +
|
|
60
|
+
"Gib KEINE Einleitung.\n" +
|
|
61
|
+
"Gib KEINE Kommentare.\n" +
|
|
62
|
+
"Gib KEINE Preamble.\n" +
|
|
63
|
+
"Gib KEINEN Prolog.\n" +
|
|
64
|
+
"Gib KEINEN Epilog.\n" +
|
|
65
|
+
"Ändere NICHT die Grammatik.\n" +
|
|
66
|
+
"Verwende KEINE Synonyme für Wörter.\n" +
|
|
67
|
+
"Behalte alle Wörter bei.\n" +
|
|
68
|
+
"Füge fehlende Kommas ein.\n" +
|
|
69
|
+
"Füge fehlende Punkte ein.\n" +
|
|
70
|
+
"Füge fehlende Fragezeichen ein.\n" +
|
|
71
|
+
"Füge fehlende Bindestriche ein.\n" +
|
|
72
|
+
"Füge fehlende Gedankenstriche ein.\n" +
|
|
73
|
+
"Fokussiere dich NUR auf die Rechtschreibung der Wörter.\n" +
|
|
74
|
+
"Der von dir zu korrigierende Text ist:\n",
|
|
75
|
+
chat: [
|
|
76
|
+
{ role: "user", content: "Ich ljebe meine Frao" },
|
|
77
|
+
{ role: "system", content: "Ich liebe meine Frau." },
|
|
78
|
+
{ role: "user", content: "Die Wedter ist wunderschoen." },
|
|
79
|
+
{ role: "system", content: "Das Wetter ist wunderschön." },
|
|
80
|
+
{ role: "user", content: "Das Leben einfach großartig aber ich bin hungrig." },
|
|
81
|
+
{ role: "system", content: "Das Leben ist einfach großartig, aber ich bin hungrig." }
|
|
82
|
+
]
|
|
83
|
+
},
|
|
84
|
+
/* English (EN) to German (DE) translation */
|
|
85
|
+
"en-de": {
|
|
86
|
+
systemPrompt: "You are a translator.\n" +
|
|
87
|
+
"Output only the requested text.\n" +
|
|
88
|
+
"Do not use markdown.\n" +
|
|
89
|
+
"Do not chat.\n" +
|
|
90
|
+
"Do not show any explanations.\n" +
|
|
91
|
+
"Do not show any introduction.\n" +
|
|
92
|
+
"Do not show any preamble.\n" +
|
|
93
|
+
"Do not show any prolog.\n" +
|
|
94
|
+
"Do not show any epilog.\n" +
|
|
95
|
+
"Get to the point.\n" +
|
|
96
|
+
"Preserve the original meaning, tone, and nuance.\n" +
|
|
97
|
+
"Directly translate text from English (EN) to fluent and natural German (DE) language.\n",
|
|
98
|
+
chat: [
|
|
99
|
+
{ role: "user", content: "I love my wife." },
|
|
100
|
+
{ role: "system", content: "Ich liebe meine Frau." },
|
|
101
|
+
{ role: "user", content: "The weather is wonderful." },
|
|
102
|
+
{ role: "system", content: "Das Wetter ist wunderschön." },
|
|
103
|
+
{ role: "user", content: "The live is awesome." },
|
|
104
|
+
{ role: "system", content: "Das Leben ist einfach großartig." }
|
|
105
|
+
]
|
|
106
|
+
},
|
|
107
|
+
/* German (DE) to English (EN) translation */
|
|
108
|
+
"de-en": {
|
|
109
|
+
systemPrompt: "You are a translator.\n" +
|
|
110
|
+
"Output only the requested text.\n" +
|
|
111
|
+
"Do not use markdown.\n" +
|
|
112
|
+
"Do not chat.\n" +
|
|
113
|
+
"Do not show any explanations.\n" +
|
|
114
|
+
"Do not show any introduction.\n" +
|
|
115
|
+
"Do not show any preamble. \n" +
|
|
116
|
+
"Do not show any prolog. \n" +
|
|
117
|
+
"Do not show any epilog. \n" +
|
|
118
|
+
"Get to the point.\n" +
|
|
119
|
+
"Preserve the original meaning, tone, and nuance.\n" +
|
|
120
|
+
"Directly translate text from German (DE) to fluent and natural English (EN) language.\n",
|
|
121
|
+
chat: [
|
|
122
|
+
{ role: "user", content: "Ich liebe meine Frau." },
|
|
123
|
+
{ role: "system", content: "I love my wife." },
|
|
124
|
+
{ role: "user", content: "Das Wetter ist wunderschön." },
|
|
125
|
+
{ role: "system", content: "The weather is wonderful." },
|
|
126
|
+
{ role: "user", content: "Das Leben ist einfach großartig." },
|
|
127
|
+
{ role: "system", content: "The live is awesome." }
|
|
128
|
+
]
|
|
129
|
+
}
|
|
130
|
+
};
|
|
131
|
+
/* construct node */
|
|
132
|
+
constructor(id, cfg, opts, args) {
|
|
133
|
+
super(id, cfg, opts, args);
|
|
134
|
+
/* declare node configuration parameters */
|
|
135
|
+
this.configure({
|
|
136
|
+
api: { type: "string", val: "http://127.0.0.1:11434", match: /^https?:\/\/.+?:\d+$/ },
|
|
137
|
+
model: { type: "string", val: "gemma3:4b-it-q4_K_M", match: /^.+$/ },
|
|
138
|
+
src: { type: "string", pos: 0, val: "de", match: /^(?:de|en)$/ },
|
|
139
|
+
dst: { type: "string", pos: 1, val: "en", match: /^(?:de|en)$/ }
|
|
140
|
+
});
|
|
141
|
+
/* tell effective mode */
|
|
142
|
+
if (this.params.src === this.params.dst)
|
|
143
|
+
this.log("info", `Ollama: operation mode: spellchecking for language "${this.params.src}"`);
|
|
144
|
+
else
|
|
145
|
+
this.log("info", `Ollama: operation mode: translation from language "${this.params.src}"` +
|
|
146
|
+
` to language "${this.params.dst}"`);
|
|
147
|
+
/* declare node input/output format */
|
|
148
|
+
this.input = "text";
|
|
149
|
+
this.output = "text";
|
|
150
|
+
}
|
|
151
|
+
/* open node */
|
|
152
|
+
async open() {
|
|
153
|
+
/* instantiate Ollama API */
|
|
154
|
+
this.ollama = new ollama_1.Ollama({ host: this.params.api });
|
|
155
|
+
/* ensure the model is available */
|
|
156
|
+
const model = this.params.model;
|
|
157
|
+
const models = await this.ollama.list();
|
|
158
|
+
const exists = models.models.some((m) => m.name === model);
|
|
159
|
+
if (!exists) {
|
|
160
|
+
this.log("info", `Ollama: model "${model}" still not present in Ollama -- ` +
|
|
161
|
+
"automatically downloading model");
|
|
162
|
+
let artifact = "";
|
|
163
|
+
let percent = 0;
|
|
164
|
+
const interval = setInterval(() => {
|
|
165
|
+
this.log("info", `downloaded ${percent.toFixed(2)}% of artifact "${artifact}"`);
|
|
166
|
+
}, 1000);
|
|
167
|
+
const progress = await this.ollama.pull({ model, stream: true });
|
|
168
|
+
for await (const event of progress) {
|
|
169
|
+
if (event.digest)
|
|
170
|
+
artifact = event.digest;
|
|
171
|
+
if (event.completed && event.total)
|
|
172
|
+
percent = (event.completed / event.total) * 100;
|
|
173
|
+
}
|
|
174
|
+
clearInterval(interval);
|
|
175
|
+
}
|
|
176
|
+
else
|
|
177
|
+
this.log("info", `Ollama: model "${model}" already present in Ollama`);
|
|
178
|
+
/* provide text-to-text translation */
|
|
179
|
+
const translate = async (text) => {
|
|
180
|
+
const key = `${this.params.src}-${this.params.dst}`;
|
|
181
|
+
const cfg = this.setup[key];
|
|
182
|
+
const response = await this.ollama.chat({
|
|
183
|
+
model,
|
|
184
|
+
messages: [
|
|
185
|
+
{ role: "system", content: cfg.systemPrompt },
|
|
186
|
+
...cfg.chat,
|
|
187
|
+
{ role: "user", content: text }
|
|
188
|
+
],
|
|
189
|
+
keep_alive: "10m",
|
|
190
|
+
options: {
|
|
191
|
+
repeat_penalty: 1.1,
|
|
192
|
+
temperature: 0.7,
|
|
193
|
+
seed: 1,
|
|
194
|
+
top_k: 10,
|
|
195
|
+
top_p: 0.5
|
|
196
|
+
}
|
|
197
|
+
});
|
|
198
|
+
return response.message.content;
|
|
199
|
+
};
|
|
200
|
+
/* establish a duplex stream and connect it to Ollama */
|
|
201
|
+
this.stream = new node_stream_1.default.Transform({
|
|
202
|
+
readableObjectMode: true,
|
|
203
|
+
writableObjectMode: true,
|
|
204
|
+
decodeStrings: false,
|
|
205
|
+
transform(chunk, encoding, callback) {
|
|
206
|
+
if (Buffer.isBuffer(chunk.payload))
|
|
207
|
+
callback(new Error("invalid chunk payload type"));
|
|
208
|
+
else {
|
|
209
|
+
if (chunk.payload === "") {
|
|
210
|
+
this.push(chunk);
|
|
211
|
+
callback();
|
|
212
|
+
}
|
|
213
|
+
else {
|
|
214
|
+
translate(chunk.payload).then((payload) => {
|
|
215
|
+
const chunkNew = chunk.clone();
|
|
216
|
+
chunkNew.payload = payload;
|
|
217
|
+
this.push(chunkNew);
|
|
218
|
+
callback();
|
|
219
|
+
}).catch((err) => {
|
|
220
|
+
callback(err);
|
|
221
|
+
});
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
},
|
|
225
|
+
final(callback) {
|
|
226
|
+
this.push(null);
|
|
227
|
+
callback();
|
|
228
|
+
}
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
/* close node */
|
|
232
|
+
async close() {
|
|
233
|
+
/* close stream */
|
|
234
|
+
if (this.stream !== null) {
|
|
235
|
+
this.stream.destroy();
|
|
236
|
+
this.stream = null;
|
|
237
|
+
}
|
|
238
|
+
/* shutdown Ollama */
|
|
239
|
+
if (this.ollama !== null) {
|
|
240
|
+
this.ollama.abort();
|
|
241
|
+
this.ollama = null;
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
exports.default = SpeechFlowNodeOllama;
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import SpeechFlowNode from "./speechflow-node";
|
|
2
|
+
export default class SpeechFlowNodeOpenAI extends SpeechFlowNode {
|
|
3
|
+
static name: string;
|
|
4
|
+
private openai;
|
|
5
|
+
private setup;
|
|
6
|
+
constructor(id: string, cfg: {
|
|
7
|
+
[id: string]: any;
|
|
8
|
+
}, opts: {
|
|
9
|
+
[id: string]: any;
|
|
10
|
+
}, args: any[]);
|
|
11
|
+
open(): Promise<void>;
|
|
12
|
+
close(): Promise<void>;
|
|
13
|
+
}
|