@ferchy/n8n-nodes-aimc-toolkit 0.1.8 → 0.1.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md
CHANGED
|
@@ -4,12 +4,13 @@ AIMC Toolkit is a community node package for n8n with focused nodes:
|
|
|
4
4
|
|
|
5
5
|
- **AIMC Code**: run JavaScript with a practical toolbox of libraries.
|
|
6
6
|
- **AIMC Media**: FFmpeg-powered media operations without extra glue nodes.
|
|
7
|
-
- **AIMC TTS**: CPU-friendly text-to-speech using Piper.
|
|
8
7
|
|
|
9
8
|
## Why I Built This
|
|
10
9
|
|
|
11
10
|
n8n is powerful, but real workflows often need basic utilities (validation, parsing, HTTP) and media tasks (convert, compress, merge). I built AIMC Toolkit to remove the busywork and keep workflows short, readable, and fast.
|
|
12
11
|
|
|
12
|
+
**Inspiration**: The original idea was sparked by Kenkaii’s SuperCode. I’m grateful for that work and built AIMC Toolkit as my own version, tailored to my needs and expanded with improvements over time.
|
|
13
|
+
|
|
13
14
|
## Who This Is For
|
|
14
15
|
|
|
15
16
|
- Automation builders who want fewer nodes and faster iterations.
|
|
@@ -22,7 +23,6 @@ n8n is powerful, but real workflows often need basic utilities (validation, pars
|
|
|
22
23
|
- **Fewer nodes, cleaner flows**: consolidate multiple steps into one code node.
|
|
23
24
|
- **Media ready**: convert, compress, merge, and inspect media in one place.
|
|
24
25
|
- **Practical libraries**: parsing, validation, dates, and web utilities built in.
|
|
25
|
-
- **Local voice**: generate speech on a CPU server without paid APIs.
|
|
26
26
|
|
|
27
27
|
## Installation
|
|
28
28
|
|
|
@@ -74,6 +74,7 @@ brew install ffmpeg
|
|
|
74
74
|
- Run once for all items or once per item.
|
|
75
75
|
- Access libraries as globals (`axios`, `_`, `zod`) or via `libs`.
|
|
76
76
|
- Built-in helpers (`utils.now`, `utils.safeJson`, `utils.toArray`).
|
|
77
|
+
- **AIMC Connect Mode**: optional AI connectors for LLMs, tools, memory, and more.
|
|
77
78
|
|
|
78
79
|
**Example: normalize data**
|
|
79
80
|
```javascript
|
|
@@ -105,6 +106,20 @@ result = {
|
|
|
105
106
|
}
|
|
106
107
|
```
|
|
107
108
|
|
|
109
|
+
**AIMC Connect Mode (AI)**
|
|
110
|
+
Enable **AIMC Connect Mode** to attach AI connectors. When enabled, your code can use:
|
|
111
|
+
|
|
112
|
+
- `ai` (object with all connectors)
|
|
113
|
+
- `aiModel`, `aiTools`, `aiMemory`, `aiVectorStore`, `aiChain`, `aiDocument`
|
|
114
|
+
|
|
115
|
+
Example:
|
|
116
|
+
```javascript
|
|
117
|
+
if (aiModel) {
|
|
118
|
+
const response = await aiModel.invoke('Summarize this input.');
|
|
119
|
+
return { summary: response };
|
|
120
|
+
}
|
|
121
|
+
```
|
|
122
|
+
|
|
108
123
|
### AIMC Media
|
|
109
124
|
|
|
110
125
|
**Operations**
|
|
@@ -147,31 +162,6 @@ Audio File Path: /path/to/audio.mp3
|
|
|
147
162
|
**Large Files**
|
|
148
163
|
Use **Input Mode = File Path** to avoid loading big files into memory.
|
|
149
164
|
|
|
150
|
-
### AIMC TTS (Piper)
|
|
151
|
-
|
|
152
|
-
**What it does**
|
|
153
|
-
Generate speech locally using Piper (CPU-friendly, no external API). Output is WAV audio.
|
|
154
|
-
|
|
155
|
-
**Requirements**
|
|
156
|
-
- Python 3 installed
|
|
157
|
-
- Piper installed: `pip install piper-tts`
|
|
158
|
-
- A downloaded voice model
|
|
159
|
-
|
|
160
|
-
**Download a voice**
|
|
161
|
-
```bash
|
|
162
|
-
python3 -m piper.download_voices en_US-lessac-medium
|
|
163
|
-
```
|
|
164
|
-
|
|
165
|
-
**Example**
|
|
166
|
-
```
|
|
167
|
-
Text: Hello from AIMC Toolkit
|
|
168
|
-
Voice Name: en_US-lessac-medium
|
|
169
|
-
Output Mode: Binary
|
|
170
|
-
```
|
|
171
|
-
|
|
172
|
-
**Tips**
|
|
173
|
-
- Use **Output Mode = File Path** for large audio.
|
|
174
|
-
- For custom voice storage, set **Data Dir**.
|
|
175
165
|
|
|
176
166
|
|
|
177
167
|
## Library Reference (AIMC Code)
|
|
@@ -147,6 +147,7 @@ function normalizeResult(result, fallbackItems) {
|
|
|
147
147
|
];
|
|
148
148
|
}
|
|
149
149
|
function buildSandbox(params) {
|
|
150
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j;
|
|
150
151
|
const cache = {};
|
|
151
152
|
const sandbox = {
|
|
152
153
|
items: params.items,
|
|
@@ -161,6 +162,16 @@ function buildSandbox(params) {
|
|
|
161
162
|
: params.items.map((entry) => entry.json),
|
|
162
163
|
},
|
|
163
164
|
params: params.nodeParams,
|
|
165
|
+
ai: params.aiContext || {},
|
|
166
|
+
aiModel: (_a = params.aiContext) === null || _a === void 0 ? void 0 : _a.languageModel,
|
|
167
|
+
aiTools: (_b = params.aiContext) === null || _b === void 0 ? void 0 : _b.tools,
|
|
168
|
+
aiMemory: (_c = params.aiContext) === null || _c === void 0 ? void 0 : _c.memory,
|
|
169
|
+
aiChain: (_d = params.aiContext) === null || _d === void 0 ? void 0 : _d.chain,
|
|
170
|
+
aiDocument: (_e = params.aiContext) === null || _e === void 0 ? void 0 : _e.document,
|
|
171
|
+
aiEmbedding: (_f = params.aiContext) === null || _f === void 0 ? void 0 : _f.embedding,
|
|
172
|
+
aiOutputParser: (_g = params.aiContext) === null || _g === void 0 ? void 0 : _g.outputParser,
|
|
173
|
+
aiTextSplitter: (_h = params.aiContext) === null || _h === void 0 ? void 0 : _h.textSplitter,
|
|
174
|
+
aiVectorStore: (_j = params.aiContext) === null || _j === void 0 ? void 0 : _j.vectorStore,
|
|
164
175
|
utils: {
|
|
165
176
|
now: () => new Date().toISOString(),
|
|
166
177
|
sleep: (ms) => new Promise((resolve) => setTimeout(resolve, ms)),
|
|
@@ -294,7 +305,32 @@ class AimcCode {
|
|
|
294
305
|
defaults: {
|
|
295
306
|
name: 'AIMC Code',
|
|
296
307
|
},
|
|
297
|
-
inputs:
|
|
308
|
+
inputs: `={{
|
|
309
|
+
((values, aiMode) => {
|
|
310
|
+
const connectorTypes = {
|
|
311
|
+
'${"ai_chain"}': 'Chain',
|
|
312
|
+
'${"ai_document"}': 'Document',
|
|
313
|
+
'${"ai_embedding"}': 'Embedding',
|
|
314
|
+
'${"ai_languageModel"}': 'Language Model',
|
|
315
|
+
'${"ai_memory"}': 'Memory',
|
|
316
|
+
'${"ai_outputParser"}': 'Output Parser',
|
|
317
|
+
'${"ai_textSplitter"}': 'Text Splitter',
|
|
318
|
+
'${"ai_tool"}': 'Tool',
|
|
319
|
+
'${"ai_vectorStore"}': 'Vector Store',
|
|
320
|
+
'${"main"}': 'Main'
|
|
321
|
+
};
|
|
322
|
+
const baseInputs = [{ displayName: '', type: '${"main"}' }];
|
|
323
|
+
if (aiMode && values) {
|
|
324
|
+
return baseInputs.concat(values.map(value => ({
|
|
325
|
+
type: value.type,
|
|
326
|
+
required: value.required,
|
|
327
|
+
maxConnections: value.maxConnections === -1 ? undefined : value.maxConnections,
|
|
328
|
+
displayName: connectorTypes[value.type] !== 'Main' ? connectorTypes[value.type] : undefined
|
|
329
|
+
})));
|
|
330
|
+
}
|
|
331
|
+
return baseInputs;
|
|
332
|
+
})($parameter.aiConnections?.input, $parameter.aiConnectMode)
|
|
333
|
+
}}`,
|
|
298
334
|
outputs: ['main'],
|
|
299
335
|
properties: [
|
|
300
336
|
{
|
|
@@ -333,6 +369,67 @@ class AimcCode {
|
|
|
333
369
|
maxValue: 300,
|
|
334
370
|
},
|
|
335
371
|
},
|
|
372
|
+
{
|
|
373
|
+
displayName: 'AIMC Connect Mode',
|
|
374
|
+
name: 'aiConnectMode',
|
|
375
|
+
type: 'boolean',
|
|
376
|
+
default: false,
|
|
377
|
+
description: 'Enable AI connector inputs for models, tools, memory, and more.',
|
|
378
|
+
},
|
|
379
|
+
{
|
|
380
|
+
displayName: 'AI Connections',
|
|
381
|
+
name: 'aiConnections',
|
|
382
|
+
placeholder: 'Add AI Connection',
|
|
383
|
+
type: 'fixedCollection',
|
|
384
|
+
displayOptions: {
|
|
385
|
+
show: {
|
|
386
|
+
aiConnectMode: [true],
|
|
387
|
+
},
|
|
388
|
+
},
|
|
389
|
+
typeOptions: {
|
|
390
|
+
multipleValues: true,
|
|
391
|
+
},
|
|
392
|
+
default: {},
|
|
393
|
+
options: [
|
|
394
|
+
{
|
|
395
|
+
name: 'input',
|
|
396
|
+
displayName: 'Input',
|
|
397
|
+
values: [
|
|
398
|
+
{
|
|
399
|
+
displayName: 'Type',
|
|
400
|
+
name: 'type',
|
|
401
|
+
type: 'options',
|
|
402
|
+
options: [
|
|
403
|
+
{ name: 'Chain', value: 'ai_chain' },
|
|
404
|
+
{ name: 'Document', value: 'ai_document' },
|
|
405
|
+
{ name: 'Embedding', value: 'ai_embedding' },
|
|
406
|
+
{ name: 'Language Model', value: 'ai_languageModel' },
|
|
407
|
+
{ name: 'Memory', value: 'ai_memory' },
|
|
408
|
+
{ name: 'Output Parser', value: 'ai_outputParser' },
|
|
409
|
+
{ name: 'Text Splitter', value: 'ai_textSplitter' },
|
|
410
|
+
{ name: 'Tool', value: 'ai_tool' },
|
|
411
|
+
{ name: 'Vector Store', value: 'ai_vectorStore' },
|
|
412
|
+
],
|
|
413
|
+
default: 'ai_languageModel',
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
displayName: 'Max Connections',
|
|
417
|
+
name: 'maxConnections',
|
|
418
|
+
type: 'number',
|
|
419
|
+
default: 1,
|
|
420
|
+
description: 'Set -1 for unlimited connections',
|
|
421
|
+
},
|
|
422
|
+
{
|
|
423
|
+
displayName: 'Required',
|
|
424
|
+
name: 'required',
|
|
425
|
+
type: 'boolean',
|
|
426
|
+
default: false,
|
|
427
|
+
description: 'Whether this connection is required',
|
|
428
|
+
},
|
|
429
|
+
],
|
|
430
|
+
},
|
|
431
|
+
],
|
|
432
|
+
},
|
|
336
433
|
{
|
|
337
434
|
displayName: 'JavaScript Code',
|
|
338
435
|
name: 'code',
|
|
@@ -410,8 +507,38 @@ class AimcCode {
|
|
|
410
507
|
const language = this.getNodeParameter('language', 0, 'javascript');
|
|
411
508
|
const mode = this.getNodeParameter('mode', 0, 'runOnceForAllItems');
|
|
412
509
|
const timeoutSeconds = this.getNodeParameter('timeoutSeconds', 0, 30);
|
|
510
|
+
const aiConnectMode = this.getNodeParameter('aiConnectMode', 0, false);
|
|
413
511
|
const timeoutMs = Math.max(1, timeoutSeconds) * 1000;
|
|
414
512
|
const nodeParams = this.getNode().parameters;
|
|
513
|
+
const aiContext = {};
|
|
514
|
+
const fetchAi = async (type) => {
|
|
515
|
+
if (!aiConnectMode) {
|
|
516
|
+
return undefined;
|
|
517
|
+
}
|
|
518
|
+
const getter = this
|
|
519
|
+
.getInputConnectionData;
|
|
520
|
+
if (!getter) {
|
|
521
|
+
return undefined;
|
|
522
|
+
}
|
|
523
|
+
try {
|
|
524
|
+
const value = await getter(type, 0);
|
|
525
|
+
return Array.isArray(value) ? value[0] : value;
|
|
526
|
+
}
|
|
527
|
+
catch {
|
|
528
|
+
return undefined;
|
|
529
|
+
}
|
|
530
|
+
};
|
|
531
|
+
if (aiConnectMode) {
|
|
532
|
+
aiContext.chain = await fetchAi('ai_chain');
|
|
533
|
+
aiContext.document = await fetchAi('ai_document');
|
|
534
|
+
aiContext.embedding = await fetchAi('ai_embedding');
|
|
535
|
+
aiContext.languageModel = await fetchAi('ai_languageModel');
|
|
536
|
+
aiContext.memory = await fetchAi('ai_memory');
|
|
537
|
+
aiContext.outputParser = await fetchAi('ai_outputParser');
|
|
538
|
+
aiContext.textSplitter = await fetchAi('ai_textSplitter');
|
|
539
|
+
aiContext.tools = await fetchAi('ai_tool');
|
|
540
|
+
aiContext.vectorStore = await fetchAi('ai_vectorStore');
|
|
541
|
+
}
|
|
415
542
|
if (language === 'python') {
|
|
416
543
|
const pythonCode = this.getNodeParameter('pythonCode', 0, '');
|
|
417
544
|
if (!pythonCode.trim()) {
|
|
@@ -477,6 +604,7 @@ class AimcCode {
|
|
|
477
604
|
item,
|
|
478
605
|
mode,
|
|
479
606
|
nodeParams,
|
|
607
|
+
aiContext,
|
|
480
608
|
});
|
|
481
609
|
const result = await runCode(sandbox);
|
|
482
610
|
const normalized = normalizeResult(result, [item]);
|
|
@@ -488,6 +616,7 @@ class AimcCode {
|
|
|
488
616
|
items,
|
|
489
617
|
mode,
|
|
490
618
|
nodeParams,
|
|
619
|
+
aiContext,
|
|
491
620
|
});
|
|
492
621
|
const result = await runCode(sandbox);
|
|
493
622
|
const normalized = normalizeResult(result, items);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ferchy/n8n-nodes-aimc-toolkit",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.10",
|
|
4
4
|
"description": "AIMC Toolkit nodes for n8n: code execution and media operations.",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Ferchy",
|
|
@@ -28,8 +28,7 @@
|
|
|
28
28
|
"n8nNodesApiVersion": 1,
|
|
29
29
|
"nodes": [
|
|
30
30
|
"dist/nodes/AimcCode/AimcCode.node.js",
|
|
31
|
-
"dist/nodes/AimcMedia/AimcMedia.node.js"
|
|
32
|
-
"dist/nodes/AimcTts/AimcTts.node.js"
|
|
31
|
+
"dist/nodes/AimcMedia/AimcMedia.node.js"
|
|
33
32
|
]
|
|
34
33
|
},
|
|
35
34
|
"dependencies": {
|
|
@@ -1,276 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
-
if (k2 === undefined) k2 = k;
|
|
4
|
-
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
-
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
-
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
-
}
|
|
8
|
-
Object.defineProperty(o, k2, desc);
|
|
9
|
-
}) : (function(o, m, k, k2) {
|
|
10
|
-
if (k2 === undefined) k2 = k;
|
|
11
|
-
o[k2] = m[k];
|
|
12
|
-
}));
|
|
13
|
-
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
-
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
-
}) : function(o, v) {
|
|
16
|
-
o["default"] = v;
|
|
17
|
-
});
|
|
18
|
-
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
-
var ownKeys = function(o) {
|
|
20
|
-
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
-
var ar = [];
|
|
22
|
-
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
-
return ar;
|
|
24
|
-
};
|
|
25
|
-
return ownKeys(o);
|
|
26
|
-
};
|
|
27
|
-
return function (mod) {
|
|
28
|
-
if (mod && mod.__esModule) return mod;
|
|
29
|
-
var result = {};
|
|
30
|
-
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
-
__setModuleDefault(result, mod);
|
|
32
|
-
return result;
|
|
33
|
-
};
|
|
34
|
-
})();
|
|
35
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
-
exports.AimcTts = void 0;
|
|
37
|
-
const n8n_workflow_1 = require("n8n-workflow");
|
|
38
|
-
const fs = __importStar(require("fs"));
|
|
39
|
-
const path = __importStar(require("path"));
|
|
40
|
-
const os = __importStar(require("os"));
|
|
41
|
-
const child_process_1 = require("child_process");
|
|
42
|
-
const util_1 = require("util");
|
|
43
|
-
const execFileAsync = (0, util_1.promisify)(child_process_1.execFile);
|
|
44
|
-
let piperChecked = null;
|
|
45
|
-
async function ensurePiperAvailable(pythonPath) {
|
|
46
|
-
if (piperChecked) {
|
|
47
|
-
if (!piperChecked.ok) {
|
|
48
|
-
throw new Error(piperChecked.message || 'Piper not available');
|
|
49
|
-
}
|
|
50
|
-
return;
|
|
51
|
-
}
|
|
52
|
-
try {
|
|
53
|
-
await execFileAsync(pythonPath, ['-m', 'piper', '--help'], {
|
|
54
|
-
timeout: 10000,
|
|
55
|
-
maxBuffer: 1024 * 1024,
|
|
56
|
-
});
|
|
57
|
-
piperChecked = { ok: true };
|
|
58
|
-
}
|
|
59
|
-
catch (error) {
|
|
60
|
-
const message = error instanceof Error && error.message
|
|
61
|
-
? error.message
|
|
62
|
-
: 'Piper not available';
|
|
63
|
-
piperChecked = { ok: false, message };
|
|
64
|
-
throw new Error(message);
|
|
65
|
-
}
|
|
66
|
-
}
|
|
67
|
-
async function createTempDir() {
|
|
68
|
-
return fs.promises.mkdtemp(path.join(os.tmpdir(), 'aimc-tts-'));
|
|
69
|
-
}
|
|
70
|
-
class AimcTts {
|
|
71
|
-
constructor() {
|
|
72
|
-
this.description = {
|
|
73
|
-
displayName: 'AIMC TTS',
|
|
74
|
-
name: 'aimcTts',
|
|
75
|
-
icon: 'file:aimc-tts.svg',
|
|
76
|
-
group: ['transform'],
|
|
77
|
-
version: 1,
|
|
78
|
-
description: 'Text-to-speech using Piper (CPU-friendly).',
|
|
79
|
-
defaults: {
|
|
80
|
-
name: 'AIMC TTS',
|
|
81
|
-
},
|
|
82
|
-
inputs: ['main'],
|
|
83
|
-
outputs: ['main'],
|
|
84
|
-
properties: [
|
|
85
|
-
{
|
|
86
|
-
displayName: 'Text',
|
|
87
|
-
name: 'text',
|
|
88
|
-
type: 'string',
|
|
89
|
-
default: '',
|
|
90
|
-
typeOptions: {
|
|
91
|
-
rows: 4,
|
|
92
|
-
},
|
|
93
|
-
description: 'Text to synthesize. Supports expressions from input data.',
|
|
94
|
-
},
|
|
95
|
-
{
|
|
96
|
-
displayName: 'Voice Name',
|
|
97
|
-
name: 'voice',
|
|
98
|
-
type: 'string',
|
|
99
|
-
default: 'en_US-lessac-medium',
|
|
100
|
-
description: 'Piper voice name (downloaded via piper).',
|
|
101
|
-
},
|
|
102
|
-
{
|
|
103
|
-
displayName: 'Data Dir',
|
|
104
|
-
name: 'dataDir',
|
|
105
|
-
type: 'string',
|
|
106
|
-
default: '',
|
|
107
|
-
description: 'Optional custom voice data directory.',
|
|
108
|
-
},
|
|
109
|
-
{
|
|
110
|
-
displayName: 'Python Path',
|
|
111
|
-
name: 'pythonPath',
|
|
112
|
-
type: 'string',
|
|
113
|
-
default: 'python3',
|
|
114
|
-
description: 'Path to Python binary with piper-tts installed.',
|
|
115
|
-
},
|
|
116
|
-
{
|
|
117
|
-
displayName: 'Sentence Silence (seconds)',
|
|
118
|
-
name: 'sentenceSilence',
|
|
119
|
-
type: 'number',
|
|
120
|
-
default: 0,
|
|
121
|
-
typeOptions: {
|
|
122
|
-
minValue: 0,
|
|
123
|
-
maxValue: 10,
|
|
124
|
-
},
|
|
125
|
-
description: 'Silence added after each sentence except the last.',
|
|
126
|
-
},
|
|
127
|
-
{
|
|
128
|
-
displayName: 'Volume',
|
|
129
|
-
name: 'volume',
|
|
130
|
-
type: 'number',
|
|
131
|
-
default: 1,
|
|
132
|
-
typeOptions: {
|
|
133
|
-
minValue: 0.1,
|
|
134
|
-
maxValue: 3,
|
|
135
|
-
},
|
|
136
|
-
description: 'Volume multiplier (1.0 = default).',
|
|
137
|
-
},
|
|
138
|
-
{
|
|
139
|
-
displayName: 'Disable Normalization',
|
|
140
|
-
name: 'noNormalize',
|
|
141
|
-
type: 'boolean',
|
|
142
|
-
default: false,
|
|
143
|
-
description: 'Disable automatic volume normalization.',
|
|
144
|
-
},
|
|
145
|
-
{
|
|
146
|
-
displayName: 'Timeout (Seconds)',
|
|
147
|
-
name: 'timeoutSeconds',
|
|
148
|
-
type: 'number',
|
|
149
|
-
default: 60,
|
|
150
|
-
typeOptions: {
|
|
151
|
-
minValue: 10,
|
|
152
|
-
maxValue: 600,
|
|
153
|
-
},
|
|
154
|
-
},
|
|
155
|
-
{
|
|
156
|
-
displayName: 'Output Mode',
|
|
157
|
-
name: 'outputMode',
|
|
158
|
-
type: 'options',
|
|
159
|
-
options: [
|
|
160
|
-
{ name: 'Binary', value: 'binary' },
|
|
161
|
-
{ name: 'File Path', value: 'filePath' },
|
|
162
|
-
],
|
|
163
|
-
default: 'binary',
|
|
164
|
-
},
|
|
165
|
-
{
|
|
166
|
-
displayName: 'Output Binary Property',
|
|
167
|
-
name: 'outputBinaryProperty',
|
|
168
|
-
type: 'string',
|
|
169
|
-
default: 'data',
|
|
170
|
-
displayOptions: {
|
|
171
|
-
show: {
|
|
172
|
-
outputMode: ['binary'],
|
|
173
|
-
},
|
|
174
|
-
},
|
|
175
|
-
},
|
|
176
|
-
{
|
|
177
|
-
displayName: 'Output File Path',
|
|
178
|
-
name: 'outputFilePath',
|
|
179
|
-
type: 'string',
|
|
180
|
-
default: '',
|
|
181
|
-
placeholder: '/path/to/output.wav',
|
|
182
|
-
displayOptions: {
|
|
183
|
-
show: {
|
|
184
|
-
outputMode: ['filePath'],
|
|
185
|
-
},
|
|
186
|
-
},
|
|
187
|
-
},
|
|
188
|
-
],
|
|
189
|
-
};
|
|
190
|
-
}
|
|
191
|
-
async execute() {
|
|
192
|
-
const items = this.getInputData();
|
|
193
|
-
const results = [];
|
|
194
|
-
for (let index = 0; index < items.length; index++) {
|
|
195
|
-
const item = items[index];
|
|
196
|
-
const text = this.getNodeParameter('text', index, '');
|
|
197
|
-
const voice = this.getNodeParameter('voice', index);
|
|
198
|
-
const dataDir = this.getNodeParameter('dataDir', index, '');
|
|
199
|
-
const pythonPath = this.getNodeParameter('pythonPath', index, 'python3');
|
|
200
|
-
const sentenceSilence = this.getNodeParameter('sentenceSilence', index, 0);
|
|
201
|
-
const volume = this.getNodeParameter('volume', index, 1);
|
|
202
|
-
const noNormalize = this.getNodeParameter('noNormalize', index, false);
|
|
203
|
-
const timeoutSeconds = this.getNodeParameter('timeoutSeconds', index, 60);
|
|
204
|
-
const outputMode = this.getNodeParameter('outputMode', index, 'binary');
|
|
205
|
-
const outputBinaryProperty = this.getNodeParameter('outputBinaryProperty', index, 'data');
|
|
206
|
-
const outputFilePath = this.getNodeParameter('outputFilePath', index, '');
|
|
207
|
-
if (!text || !text.trim()) {
|
|
208
|
-
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Text is required.');
|
|
209
|
-
}
|
|
210
|
-
try {
|
|
211
|
-
await ensurePiperAvailable(pythonPath);
|
|
212
|
-
}
|
|
213
|
-
catch (error) {
|
|
214
|
-
const message = error instanceof Error ? error.message : 'Piper not available';
|
|
215
|
-
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Piper not found. Install piper-tts and voices. ${message}`);
|
|
216
|
-
}
|
|
217
|
-
let tempDir = null;
|
|
218
|
-
let outputPath = outputFilePath;
|
|
219
|
-
if (outputMode === 'filePath' && !outputPath) {
|
|
220
|
-
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Output File Path is required.');
|
|
221
|
-
}
|
|
222
|
-
if (outputMode === 'binary') {
|
|
223
|
-
tempDir = await createTempDir();
|
|
224
|
-
outputPath = path.join(tempDir, 'speech.wav');
|
|
225
|
-
}
|
|
226
|
-
const args = ['-m', 'piper', '-m', voice, '-f', outputPath];
|
|
227
|
-
if (dataDir) {
|
|
228
|
-
args.push('--data-dir', dataDir);
|
|
229
|
-
}
|
|
230
|
-
if (sentenceSilence > 0) {
|
|
231
|
-
args.push('--sentence-silence', sentenceSilence.toString());
|
|
232
|
-
}
|
|
233
|
-
if (volume && volume !== 1) {
|
|
234
|
-
args.push('--volume', volume.toString());
|
|
235
|
-
}
|
|
236
|
-
if (noNormalize) {
|
|
237
|
-
args.push('--no-normalize');
|
|
238
|
-
}
|
|
239
|
-
args.push('--', text);
|
|
240
|
-
try {
|
|
241
|
-
await execFileAsync(pythonPath, args, {
|
|
242
|
-
timeout: Math.max(10, timeoutSeconds) * 1000,
|
|
243
|
-
maxBuffer: 1024 * 1024,
|
|
244
|
-
});
|
|
245
|
-
const outputItem = {
|
|
246
|
-
json: {
|
|
247
|
-
...item.json,
|
|
248
|
-
tts: {
|
|
249
|
-
voice,
|
|
250
|
-
outputPath: outputMode === 'filePath' ? outputPath : undefined,
|
|
251
|
-
},
|
|
252
|
-
},
|
|
253
|
-
};
|
|
254
|
-
if (outputMode === 'binary') {
|
|
255
|
-
const data = await fs.promises.readFile(outputPath);
|
|
256
|
-
const binaryData = await this.helpers.prepareBinaryData(data, 'speech.wav');
|
|
257
|
-
outputItem.binary = {
|
|
258
|
-
[outputBinaryProperty]: binaryData,
|
|
259
|
-
};
|
|
260
|
-
}
|
|
261
|
-
results.push(outputItem);
|
|
262
|
-
}
|
|
263
|
-
catch (error) {
|
|
264
|
-
const message = error instanceof Error ? error.message : 'Unknown error';
|
|
265
|
-
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `TTS failed: ${message}`);
|
|
266
|
-
}
|
|
267
|
-
finally {
|
|
268
|
-
if (tempDir) {
|
|
269
|
-
await fs.promises.rm(tempDir, { recursive: true, force: true });
|
|
270
|
-
}
|
|
271
|
-
}
|
|
272
|
-
}
|
|
273
|
-
return [results];
|
|
274
|
-
}
|
|
275
|
-
}
|
|
276
|
-
exports.AimcTts = AimcTts;
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" width="64" height="64">
|
|
2
|
-
<defs>
|
|
3
|
-
<linearGradient id="aimc-tts-bg" x1="0" y1="0" x2="1" y2="1">
|
|
4
|
-
<stop offset="0%" stop-color="#0A1021"/>
|
|
5
|
-
<stop offset="100%" stop-color="#0B1B3A"/>
|
|
6
|
-
</linearGradient>
|
|
7
|
-
<linearGradient id="aimc-tts-glow" x1="0" y1="0" x2="1" y2="1">
|
|
8
|
-
<stop offset="0%" stop-color="#7DD3FC"/>
|
|
9
|
-
<stop offset="100%" stop-color="#0EA5E9"/>
|
|
10
|
-
</linearGradient>
|
|
11
|
-
</defs>
|
|
12
|
-
<rect x="6" y="6" width="52" height="52" rx="10" fill="url(#aimc-tts-bg)"/>
|
|
13
|
-
<rect x="26" y="18" width="12" height="22" rx="6" fill="url(#aimc-tts-glow)"/>
|
|
14
|
-
<path d="M22 30C22 36 26 40 32 40C38 40 42 36 42 30" stroke="#38BDF8" stroke-width="2.5" fill="none" stroke-linecap="round"/>
|
|
15
|
-
<path d="M24 46H40" stroke="#1E3A8A" stroke-width="2" stroke-linecap="round"/>
|
|
16
|
-
<path d="M32 40V46" stroke="#1E3A8A" stroke-width="2" stroke-linecap="round"/>
|
|
17
|
-
<path d="M14 24C18 20 20 20 22 24" stroke="#1E3A8A" stroke-width="1.5" fill="none" stroke-linecap="round"/>
|
|
18
|
-
<path d="M50 24C46 20 44 20 42 24" stroke="#1E3A8A" stroke-width="1.5" fill="none" stroke-linecap="round"/>
|
|
19
|
-
</svg>
|