@lee-jisoo/n8n-nodes-mediafx 1.6.0 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/fonts/DejaVuSans.ttf +0 -0
- package/dist/fonts/Inter-Regular.ttf +0 -0
- package/dist/fonts/NanumGothic-Regular.ttf +0 -0
- package/dist/fonts/NotoSansKR-Regular.ttf +0 -0
- package/dist/fonts/Pretendard-Regular.otf +0 -0
- package/dist/fonts/Roboto-Regular.ttf +0 -0
- package/dist/nodes/MediaFX/MediaFX.node.d.ts +12 -0
- package/dist/nodes/MediaFX/MediaFX.node.js +559 -0
- package/dist/nodes/MediaFX/mediafx.png +0 -0
- package/dist/nodes/MediaFX/operations/addSubtitle.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/addSubtitle.js +146 -0
- package/dist/nodes/MediaFX/operations/addText.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/addText.js +108 -0
- package/dist/nodes/MediaFX/operations/extractAudio.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/extractAudio.js +57 -0
- package/dist/nodes/MediaFX/operations/imageToVideo.d.ts +5 -0
- package/dist/nodes/MediaFX/operations/imageToVideo.js +65 -0
- package/dist/nodes/MediaFX/operations/index.d.ts +13 -0
- package/dist/nodes/MediaFX/operations/index.js +29 -0
- package/dist/nodes/MediaFX/operations/merge.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/merge.js +121 -0
- package/dist/nodes/MediaFX/operations/mixAudio.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/mixAudio.js +141 -0
- package/dist/nodes/MediaFX/operations/multiVideoTransition.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/multiVideoTransition.js +245 -0
- package/dist/nodes/MediaFX/operations/overlayVideo.d.ts +16 -0
- package/dist/nodes/MediaFX/operations/overlayVideo.js +240 -0
- package/dist/nodes/MediaFX/operations/separateAudio.d.ts +17 -0
- package/dist/nodes/MediaFX/operations/separateAudio.js +78 -0
- package/dist/nodes/MediaFX/operations/singleVideoFade.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/singleVideoFade.js +60 -0
- package/dist/nodes/MediaFX/operations/speed.d.ts +12 -0
- package/dist/nodes/MediaFX/operations/speed.js +110 -0
- package/dist/nodes/MediaFX/operations/stampImage.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/stampImage.js +146 -0
- package/dist/nodes/MediaFX/operations/trim.d.ts +2 -0
- package/dist/nodes/MediaFX/operations/trim.js +49 -0
- package/dist/nodes/MediaFX/properties/audio.properties.d.ts +2 -0
- package/dist/nodes/MediaFX/properties/audio.properties.js +394 -0
- package/dist/nodes/MediaFX/properties/font.properties.d.ts +2 -0
- package/dist/nodes/MediaFX/properties/font.properties.js +186 -0
- package/dist/nodes/MediaFX/properties/image.properties.d.ts +2 -0
- package/dist/nodes/MediaFX/properties/image.properties.js +333 -0
- package/dist/nodes/MediaFX/properties/resources.properties.d.ts +2 -0
- package/dist/nodes/MediaFX/properties/resources.properties.js +34 -0
- package/dist/nodes/MediaFX/properties/subtitle.properties.d.ts +2 -0
- package/dist/nodes/MediaFX/properties/subtitle.properties.js +306 -0
- package/dist/nodes/MediaFX/properties/video.properties.d.ts +2 -0
- package/dist/nodes/MediaFX/properties/video.properties.js +1135 -0
- package/dist/nodes/MediaFX/utils/ffmpegVersion.d.ts +14 -0
- package/dist/nodes/MediaFX/utils/ffmpegVersion.js +97 -0
- package/dist/nodes/MediaFX/utils.d.ts +43 -0
- package/dist/nodes/MediaFX/utils.js +410 -0
- package/package.json +1 -1
- package/CHANGELOG.md +0 -65
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.executeMixAudio = void 0;
|
|
4
|
+
const n8n_workflow_1 = require("n8n-workflow");
|
|
5
|
+
const utils_1 = require("../utils");
|
|
6
|
+
const ffmpeg = require("fluent-ffmpeg");
|
|
7
|
+
async function executeMixAudio(videoPath, audioPath, videoVolume, audioVolume, matchLength, advancedMixing, itemIndex) {
|
|
8
|
+
// Verify FFmpeg is available before proceeding
|
|
9
|
+
try {
|
|
10
|
+
(0, utils_1.verifyFfmpegAvailability)();
|
|
11
|
+
}
|
|
12
|
+
catch (error) {
|
|
13
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `FFmpeg is not available: ${error.message}`, { itemIndex });
|
|
14
|
+
}
|
|
15
|
+
const outputPath = (0, utils_1.getTempFile)('.mp4');
|
|
16
|
+
const { enablePartialMix = false, startTime = 0, duration, loop = false, enableFadeIn = false, fadeInDuration = 1, enableFadeOut = false, fadeOutDuration = 1, } = advancedMixing;
|
|
17
|
+
// Check if both inputs have audio
|
|
18
|
+
const videoHasAudio = await (0, utils_1.fileHasAudio)(videoPath);
|
|
19
|
+
const audioFileHasAudio = await (0, utils_1.fileHasAudio)(audioPath);
|
|
20
|
+
// If the "audio" file doesn't have audio, throw an error
|
|
21
|
+
if (!audioFileHasAudio) {
|
|
22
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'The secondary audio source does not contain any audio stream', { itemIndex });
|
|
23
|
+
}
|
|
24
|
+
// If video doesn't have audio, we'll create a silent audio track for it
|
|
25
|
+
let actualVideoPath = videoPath;
|
|
26
|
+
let videoCleanup = null;
|
|
27
|
+
if (!videoHasAudio) {
|
|
28
|
+
const videoDuration = await (0, utils_1.getDuration)(videoPath);
|
|
29
|
+
const { filePath: silentAudioPath, cleanup } = await (0, utils_1.createSilentAudio)(videoDuration);
|
|
30
|
+
videoCleanup = cleanup;
|
|
31
|
+
// Create a temporary video with silent audio
|
|
32
|
+
const tempVideoWithAudio = (0, utils_1.getTempFile)('.mp4');
|
|
33
|
+
const addSilentCommand = ffmpeg()
|
|
34
|
+
.input(videoPath)
|
|
35
|
+
.input(silentAudioPath)
|
|
36
|
+
.outputOptions(['-map', '0:v', '-map', '1:a', '-c:v', 'copy', '-shortest'])
|
|
37
|
+
.save(tempVideoWithAudio);
|
|
38
|
+
await (0, utils_1.runFfmpeg)(addSilentCommand);
|
|
39
|
+
await cleanup(); // Clean up the silent audio file
|
|
40
|
+
actualVideoPath = tempVideoWithAudio;
|
|
41
|
+
videoCleanup = () => require('fs-extra').remove(tempVideoWithAudio);
|
|
42
|
+
}
|
|
43
|
+
const command = ffmpeg().input(actualVideoPath).input(audioPath);
|
|
44
|
+
if (enablePartialMix) {
|
|
45
|
+
// Get audio duration to determine processing strategy
|
|
46
|
+
const audioDuration = await (0, utils_1.getDuration)(audioPath);
|
|
47
|
+
// If duration is not provided (null/undefined), use audio duration
|
|
48
|
+
const actualDuration = duration || audioDuration;
|
|
49
|
+
let audioProcessingChain = '[1:a]';
|
|
50
|
+
// Step 1: Handle looping if needed
|
|
51
|
+
if (loop && audioDuration < actualDuration) {
|
|
52
|
+
audioProcessingChain += 'aloop=loop=-1:size=2e9,';
|
|
53
|
+
}
|
|
54
|
+
// Step 2: Trim to duration
|
|
55
|
+
if (loop && audioDuration < actualDuration || audioDuration >= actualDuration) {
|
|
56
|
+
audioProcessingChain += `atrim=duration=${actualDuration},`;
|
|
57
|
+
}
|
|
58
|
+
// Step 3: Reset timestamps
|
|
59
|
+
audioProcessingChain += 'asetpts=PTS-STARTPTS,';
|
|
60
|
+
// Step 4: Apply fade effects
|
|
61
|
+
const fadeFilters = [];
|
|
62
|
+
if (enableFadeIn) {
|
|
63
|
+
fadeFilters.push(`afade=t=in:st=0:d=${fadeInDuration}`);
|
|
64
|
+
}
|
|
65
|
+
if (enableFadeOut) {
|
|
66
|
+
const fadeOutStart = Math.max(0, actualDuration - fadeOutDuration);
|
|
67
|
+
fadeFilters.push(`afade=t=out:st=${fadeOutStart}:d=${fadeOutDuration}`);
|
|
68
|
+
}
|
|
69
|
+
if (fadeFilters.length > 0) {
|
|
70
|
+
audioProcessingChain += fadeFilters.join(',') + ',';
|
|
71
|
+
}
|
|
72
|
+
// Step 5: Set volume and add delay
|
|
73
|
+
audioProcessingChain += `volume=${audioVolume},adelay=${startTime * 1000}|${startTime * 1000}[overlay_audio]`;
|
|
74
|
+
const filterComplex = audioProcessingChain + ';' +
|
|
75
|
+
`[0:a]volume=${videoVolume}[main_audio];` +
|
|
76
|
+
`[main_audio][overlay_audio]amix=inputs=2:duration=first:dropout_transition=0[mixed_audio]`;
|
|
77
|
+
command
|
|
78
|
+
.complexFilter(filterComplex)
|
|
79
|
+
.outputOptions(['-map', '0:v', '-map', '[mixed_audio]', '-c:v copy']);
|
|
80
|
+
}
|
|
81
|
+
else {
|
|
82
|
+
// Standard full audio mix with fade effects
|
|
83
|
+
let audioProcessingChain = '[1:a]';
|
|
84
|
+
// Calculate the effective duration based on matchLength
|
|
85
|
+
const videoDuration = await (0, utils_1.getDuration)(actualVideoPath);
|
|
86
|
+
const audioDuration = await (0, utils_1.getDuration)(audioPath);
|
|
87
|
+
let effectiveDuration;
|
|
88
|
+
switch (matchLength) {
|
|
89
|
+
case 'shortest':
|
|
90
|
+
effectiveDuration = Math.min(videoDuration, audioDuration);
|
|
91
|
+
break;
|
|
92
|
+
case 'longest':
|
|
93
|
+
effectiveDuration = Math.max(videoDuration, audioDuration);
|
|
94
|
+
break;
|
|
95
|
+
case 'first':
|
|
96
|
+
default:
|
|
97
|
+
effectiveDuration = videoDuration;
|
|
98
|
+
break;
|
|
99
|
+
}
|
|
100
|
+
// Apply fade effects if enabled
|
|
101
|
+
const fadeFilters = [];
|
|
102
|
+
if (enableFadeIn) {
|
|
103
|
+
fadeFilters.push(`afade=t=in:st=0:d=${fadeInDuration}`);
|
|
104
|
+
}
|
|
105
|
+
if (enableFadeOut) {
|
|
106
|
+
// Calculate fade out start based on effective duration, not original audio duration
|
|
107
|
+
const fadeOutStart = Math.max(0, effectiveDuration - fadeOutDuration);
|
|
108
|
+
fadeFilters.push(`afade=t=out:st=${fadeOutStart}:d=${fadeOutDuration}`);
|
|
109
|
+
}
|
|
110
|
+
if (fadeFilters.length > 0) {
|
|
111
|
+
audioProcessingChain += fadeFilters.join(',') + ',';
|
|
112
|
+
}
|
|
113
|
+
// Apply volume and create labeled output
|
|
114
|
+
audioProcessingChain += `volume=${audioVolume}[a1]`;
|
|
115
|
+
const filterComplex = audioProcessingChain + ';' +
|
|
116
|
+
`[0:a]volume=${videoVolume}[a0];` +
|
|
117
|
+
`[a0][a1]amix=inputs=2:duration=${matchLength}[a]`;
|
|
118
|
+
command
|
|
119
|
+
.complexFilter(filterComplex)
|
|
120
|
+
.outputOptions(['-map', '0:v', '-map', '[a]', '-c:v copy']);
|
|
121
|
+
}
|
|
122
|
+
command.save(outputPath);
|
|
123
|
+
try {
|
|
124
|
+
await (0, utils_1.runFfmpeg)(command);
|
|
125
|
+
// Clean up temporary video file if we created one
|
|
126
|
+
if (videoCleanup) {
|
|
127
|
+
await videoCleanup();
|
|
128
|
+
}
|
|
129
|
+
return outputPath;
|
|
130
|
+
}
|
|
131
|
+
catch (error) {
|
|
132
|
+
// Clean up output file if creation failed
|
|
133
|
+
await require('fs-extra').remove(outputPath).catch(() => { });
|
|
134
|
+
// Clean up temporary video file if we created one
|
|
135
|
+
if (videoCleanup) {
|
|
136
|
+
await videoCleanup().catch(() => { });
|
|
137
|
+
}
|
|
138
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Error mixing audio: ${error.message}`, { itemIndex });
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
exports.executeMixAudio = executeMixAudio;
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
19
|
+
if (mod && mod.__esModule) return mod;
|
|
20
|
+
var result = {};
|
|
21
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
22
|
+
__setModuleDefault(result, mod);
|
|
23
|
+
return result;
|
|
24
|
+
};
|
|
25
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
26
|
+
exports.executeMultiVideoTransition = void 0;
|
|
27
|
+
const n8n_workflow_1 = require("n8n-workflow");
|
|
28
|
+
const ffmpeg = require("fluent-ffmpeg");
|
|
29
|
+
const utils_1 = require("../utils");
|
|
30
|
+
const ffmpegVersion_1 = require("../utils/ffmpegVersion");
|
|
31
|
+
const fs = __importStar(require("fs-extra"));
|
|
32
|
+
async function executeMultiVideoTransition(inputs, transition, duration, outputFormat, itemIndex) {
|
|
33
|
+
if (inputs.length < 2) {
|
|
34
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Transition (Apply) operation requires at least two source videos.', { itemIndex });
|
|
35
|
+
}
|
|
36
|
+
// Check if the requested transition is supported
|
|
37
|
+
const transitionSupport = await (0, ffmpegVersion_1.checkTransitionSupport)(transition);
|
|
38
|
+
let effectiveTransition = transition;
|
|
39
|
+
if (!transitionSupport.supported) {
|
|
40
|
+
if (transitionSupport.alternative) {
|
|
41
|
+
effectiveTransition = transitionSupport.alternative;
|
|
42
|
+
console.warn(transitionSupport.message);
|
|
43
|
+
}
|
|
44
|
+
else {
|
|
45
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), transitionSupport.message || 'Unsupported transition effect', { itemIndex });
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
const outputPath = (0, utils_1.getTempFile)(`.${outputFormat}`);
|
|
49
|
+
const command = ffmpeg();
|
|
50
|
+
inputs.forEach((input) => command.addInput(input));
|
|
51
|
+
const durations = await Promise.all(inputs.map((input) => (0, utils_1.getDuration)(input)));
|
|
52
|
+
if (durations.some((d) => d === null)) {
|
|
53
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Could not get duration of one or more videos.', {
|
|
54
|
+
itemIndex,
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
// Check if all input videos have audio
|
|
58
|
+
const audioChecks = await Promise.all(inputs.map((input) => (0, utils_1.fileHasAudio)(input)));
|
|
59
|
+
const allHaveAudio = audioChecks.every((hasAudio) => hasAudio);
|
|
60
|
+
const someHaveAudio = audioChecks.some((hasAudio) => hasAudio);
|
|
61
|
+
// Warn if only some videos have audio
|
|
62
|
+
if (someHaveAudio && !allHaveAudio) {
|
|
63
|
+
console.warn('Warning: Not all input videos have audio tracks. Audio will be excluded from the output.');
|
|
64
|
+
}
|
|
65
|
+
// Get video dimensions using ffprobe to determine common resolution
|
|
66
|
+
const getDimensions = async (inputPath) => {
|
|
67
|
+
return new Promise((resolve, reject) => {
|
|
68
|
+
ffmpeg.ffprobe(inputPath, (err, metadata) => {
|
|
69
|
+
if (err) {
|
|
70
|
+
reject(err);
|
|
71
|
+
return;
|
|
72
|
+
}
|
|
73
|
+
const videoStream = metadata.streams.find(s => s.codec_type === 'video');
|
|
74
|
+
if (!videoStream || !videoStream.width || !videoStream.height) {
|
|
75
|
+
reject(new Error('Could not get video dimensions'));
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
resolve({ width: videoStream.width, height: videoStream.height });
|
|
79
|
+
});
|
|
80
|
+
});
|
|
81
|
+
};
|
|
82
|
+
// Get dimensions of all input videos
|
|
83
|
+
const dimensions = await Promise.all(inputs.map(input => getDimensions(input)));
|
|
84
|
+
// Find the maximum dimensions to use as target resolution
|
|
85
|
+
const targetWidth = Math.max(...dimensions.map(d => d.width));
|
|
86
|
+
const targetHeight = Math.max(...dimensions.map(d => d.height));
|
|
87
|
+
console.log(`[MediaFX] Target resolution for transition: ${targetWidth}x${targetHeight}`);
|
|
88
|
+
// Initialize video streams with scaling to common resolution
|
|
89
|
+
const filterGraph = inputs.map((_, i) => {
|
|
90
|
+
const dim = dimensions[i];
|
|
91
|
+
if (dim.width !== targetWidth || dim.height !== targetHeight) {
|
|
92
|
+
// Scale and pad to maintain aspect ratio
|
|
93
|
+
return `[${i}:v]scale=${targetWidth}:${targetHeight}:force_original_aspect_ratio=decrease,pad=${targetWidth}:${targetHeight}:(ow-iw)/2:(oh-ih)/2:black,settb=AVTB[v${i}]`;
|
|
94
|
+
}
|
|
95
|
+
else {
|
|
96
|
+
// No scaling needed
|
|
97
|
+
return `[${i}:v]settb=AVTB[v${i}]`;
|
|
98
|
+
}
|
|
99
|
+
});
|
|
100
|
+
// Initialize audio streams only if audio exists
|
|
101
|
+
if (allHaveAudio) {
|
|
102
|
+
inputs.forEach((_, i) => {
|
|
103
|
+
filterGraph.push(`[${i}:a]aformat=sample_fmts=fltp:sample_rates=44100:channel_layouts=stereo[a${i}]`);
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
let lastVideoOut = 'v0';
|
|
107
|
+
let lastAudioOut = 'a0';
|
|
108
|
+
let cumulativeDuration = durations[0];
|
|
109
|
+
// Use different filter strategies based on transition type
|
|
110
|
+
if (effectiveTransition === 'fade' || effectiveTransition === 'fadeblack' || effectiveTransition === 'fadewhite') {
|
|
111
|
+
// Fallback implementation for fade transitions without xfade
|
|
112
|
+
// Simply concatenate videos with fade effects applied to each segment
|
|
113
|
+
const videoSegments = [];
|
|
114
|
+
const audioSegments = [];
|
|
115
|
+
for (let i = 0; i < inputs.length; i++) {
|
|
116
|
+
const videoDuration = durations[i];
|
|
117
|
+
const isFirst = i === 0;
|
|
118
|
+
const isLast = i === inputs.length - 1;
|
|
119
|
+
const videoSegment = `vseg${i}`;
|
|
120
|
+
if (isFirst && isLast) {
|
|
121
|
+
// Only one video - no transitions needed
|
|
122
|
+
videoSegments.push(`v${i}`);
|
|
123
|
+
}
|
|
124
|
+
else if (isFirst) {
|
|
125
|
+
// First video: fade out at the end
|
|
126
|
+
const fadeOutStart = videoDuration - duration;
|
|
127
|
+
filterGraph.push(`[v${i}]fade=t=out:st=${fadeOutStart}:d=${duration}[${videoSegment}]`);
|
|
128
|
+
videoSegments.push(videoSegment);
|
|
129
|
+
}
|
|
130
|
+
else if (isLast) {
|
|
131
|
+
// Last video: fade in at the beginning
|
|
132
|
+
filterGraph.push(`[v${i}]fade=t=in:st=0:d=${duration}[${videoSegment}]`);
|
|
133
|
+
videoSegments.push(videoSegment);
|
|
134
|
+
}
|
|
135
|
+
else {
|
|
136
|
+
// Middle videos: fade in at start, fade out at end
|
|
137
|
+
const fadeOutStart = videoDuration - duration;
|
|
138
|
+
filterGraph.push(`[v${i}]fade=t=in:st=0:d=${duration},fade=t=out:st=${fadeOutStart}:d=${duration}[${videoSegment}]`);
|
|
139
|
+
videoSegments.push(videoSegment);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
// Concatenate all video segments
|
|
143
|
+
if (videoSegments.length === 1) {
|
|
144
|
+
lastVideoOut = videoSegments[0];
|
|
145
|
+
}
|
|
146
|
+
else {
|
|
147
|
+
lastVideoOut = 'vconcat';
|
|
148
|
+
const concatFilter = videoSegments.map(seg => `[${seg}]`).join('') +
|
|
149
|
+
`concat=n=${videoSegments.length}:v=1:a=0[${lastVideoOut}]`;
|
|
150
|
+
filterGraph.push(concatFilter);
|
|
151
|
+
}
|
|
152
|
+
// Handle audio if it exists
|
|
153
|
+
if (allHaveAudio) {
|
|
154
|
+
for (let i = 0; i < inputs.length; i++) {
|
|
155
|
+
const audioDuration = durations[i];
|
|
156
|
+
const isFirst = i === 0;
|
|
157
|
+
const isLast = i === inputs.length - 1;
|
|
158
|
+
const audioSegment = `aseg${i}`;
|
|
159
|
+
if (isFirst && isLast) {
|
|
160
|
+
audioSegments.push(`a${i}`);
|
|
161
|
+
}
|
|
162
|
+
else if (isFirst) {
|
|
163
|
+
const fadeOutStart = audioDuration - duration;
|
|
164
|
+
filterGraph.push(`[a${i}]afade=t=out:st=${fadeOutStart}:d=${duration}[${audioSegment}]`);
|
|
165
|
+
audioSegments.push(audioSegment);
|
|
166
|
+
}
|
|
167
|
+
else if (isLast) {
|
|
168
|
+
filterGraph.push(`[a${i}]afade=t=in:st=0:d=${duration}[${audioSegment}]`);
|
|
169
|
+
audioSegments.push(audioSegment);
|
|
170
|
+
}
|
|
171
|
+
else {
|
|
172
|
+
const fadeOutStart = audioDuration - duration;
|
|
173
|
+
filterGraph.push(`[a${i}]afade=t=in:st=0:d=${duration},afade=t=out:st=${fadeOutStart}:d=${duration}[${audioSegment}]`);
|
|
174
|
+
audioSegments.push(audioSegment);
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
// Concatenate all audio segments
|
|
178
|
+
if (audioSegments.length === 1) {
|
|
179
|
+
lastAudioOut = audioSegments[0];
|
|
180
|
+
}
|
|
181
|
+
else {
|
|
182
|
+
lastAudioOut = 'aconcat';
|
|
183
|
+
const audioConcatFilter = audioSegments.map(seg => `[${seg}]`).join('') +
|
|
184
|
+
`concat=n=${audioSegments.length}:v=0:a=1[${lastAudioOut}]`;
|
|
185
|
+
filterGraph.push(audioConcatFilter);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
else {
|
|
190
|
+
// Use xfade for supported transitions
|
|
191
|
+
for (let i = 1; i < inputs.length; i++) {
|
|
192
|
+
const nextVideo = `v${i}`;
|
|
193
|
+
const nextAudio = `a${i}`;
|
|
194
|
+
const currentVideoOut = `vout${i}`;
|
|
195
|
+
const currentAudioOut = `aout${i}`;
|
|
196
|
+
const offset = cumulativeDuration - duration;
|
|
197
|
+
filterGraph.push(`[${lastVideoOut}][${nextVideo}]xfade=transition=${effectiveTransition}:duration=${duration}:offset=${offset}[${currentVideoOut}]`);
|
|
198
|
+
if (allHaveAudio) {
|
|
199
|
+
filterGraph.push(`[${lastAudioOut}][${nextAudio}]acrossfade=d=${duration}[${currentAudioOut}]`);
|
|
200
|
+
}
|
|
201
|
+
lastVideoOut = currentVideoOut;
|
|
202
|
+
if (allHaveAudio) {
|
|
203
|
+
lastAudioOut = currentAudioOut;
|
|
204
|
+
}
|
|
205
|
+
cumulativeDuration += durations[i] - duration;
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
// Build output options based on available streams
|
|
209
|
+
const outputOptions = ['-map', `[${lastVideoOut}]`];
|
|
210
|
+
if (allHaveAudio) {
|
|
211
|
+
outputOptions.push('-map', `[${lastAudioOut}]`);
|
|
212
|
+
}
|
|
213
|
+
command
|
|
214
|
+
.complexFilter(filterGraph)
|
|
215
|
+
.outputOptions(outputOptions)
|
|
216
|
+
.videoCodec('libx264');
|
|
217
|
+
if (allHaveAudio) {
|
|
218
|
+
command.audioCodec('aac');
|
|
219
|
+
}
|
|
220
|
+
command.save(outputPath);
|
|
221
|
+
try {
|
|
222
|
+
await (0, utils_1.runFfmpeg)(command);
|
|
223
|
+
// If we used a fallback transition, add a note to the result
|
|
224
|
+
if (effectiveTransition !== transition && transitionSupport.message) {
|
|
225
|
+
console.log(`Transition fallback: ${transitionSupport.message}`);
|
|
226
|
+
}
|
|
227
|
+
return outputPath;
|
|
228
|
+
}
|
|
229
|
+
catch (error) {
|
|
230
|
+
// Clean up output file if creation failed
|
|
231
|
+
await fs.remove(outputPath).catch(() => { });
|
|
232
|
+
const errorMessage = error.message;
|
|
233
|
+
let helpfulMessage = 'Error applying transition.';
|
|
234
|
+
// Check for specific error patterns
|
|
235
|
+
if (errorMessage.includes('No such filter: \'xfade\'')) {
|
|
236
|
+
helpfulMessage = `Your FFmpeg version doesn't support the 'xfade' filter (requires FFmpeg 4.3+). ` +
|
|
237
|
+
`The transition '${transition}' requires xfade. Please upgrade FFmpeg or use basic transitions like 'fade'.`;
|
|
238
|
+
}
|
|
239
|
+
else if (errorMessage.includes('Invalid argument')) {
|
|
240
|
+
helpfulMessage = `Invalid transition parameters. The '${transition}' effect may not be supported by your FFmpeg version.`;
|
|
241
|
+
}
|
|
242
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `${helpfulMessage} FFmpeg error: ${errorMessage}`, { itemIndex });
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
exports.executeMultiVideoTransition = executeMultiVideoTransition;
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { IExecuteFunctions, IDataObject } from 'n8n-workflow';
|
|
2
|
+
export interface OverlayVideoOptions {
|
|
3
|
+
x: string | number;
|
|
4
|
+
y: string | number;
|
|
5
|
+
width: number;
|
|
6
|
+
height: number;
|
|
7
|
+
opacity: number;
|
|
8
|
+
enableTimeControl: boolean;
|
|
9
|
+
startTime: number;
|
|
10
|
+
endTime: number;
|
|
11
|
+
blendMode: string;
|
|
12
|
+
audioHandling: 'main' | 'overlay' | 'mix' | 'none';
|
|
13
|
+
mainVolume: number;
|
|
14
|
+
overlayVolume: number;
|
|
15
|
+
}
|
|
16
|
+
export declare function executeOverlayVideo(this: IExecuteFunctions, mainVideoPath: string, overlayVideoPath: string, options: IDataObject, itemIndex: number): Promise<string>;
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
19
|
+
if (mod && mod.__esModule) return mod;
|
|
20
|
+
var result = {};
|
|
21
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
22
|
+
__setModuleDefault(result, mod);
|
|
23
|
+
return result;
|
|
24
|
+
};
|
|
25
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
26
|
+
exports.executeOverlayVideo = void 0;
|
|
27
|
+
const n8n_workflow_1 = require("n8n-workflow");
|
|
28
|
+
const ffmpeg = require("fluent-ffmpeg");
|
|
29
|
+
const utils_1 = require("../utils");
|
|
30
|
+
const fs = __importStar(require("fs-extra"));
|
|
31
|
+
async function executeOverlayVideo(mainVideoPath, overlayVideoPath, options, itemIndex) {
|
|
32
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w;
|
|
33
|
+
// Verify FFmpeg is available before proceeding
|
|
34
|
+
try {
|
|
35
|
+
(0, utils_1.verifyFfmpegAvailability)();
|
|
36
|
+
}
|
|
37
|
+
catch (error) {
|
|
38
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `FFmpeg is not available: ${error.message}`, { itemIndex });
|
|
39
|
+
}
|
|
40
|
+
// Extract output format from options (default to mp4)
|
|
41
|
+
const outputFormat = options.outputFormat || 'mp4';
|
|
42
|
+
const outputPath = (0, utils_1.getTempFile)(`.${outputFormat}`);
|
|
43
|
+
// Extract options with defaults
|
|
44
|
+
// Position options
|
|
45
|
+
const positionMode = (_a = options.positionMode) !== null && _a !== void 0 ? _a : 'alignment';
|
|
46
|
+
const horizontalAlign = (_b = options.horizontalAlign) !== null && _b !== void 0 ? _b : 'center';
|
|
47
|
+
const verticalAlign = (_c = options.verticalAlign) !== null && _c !== void 0 ? _c : 'middle';
|
|
48
|
+
const paddingX = (_d = options.paddingX) !== null && _d !== void 0 ? _d : 0;
|
|
49
|
+
const paddingY = (_e = options.paddingY) !== null && _e !== void 0 ? _e : 0;
|
|
50
|
+
const customX = (_f = options.x) !== null && _f !== void 0 ? _f : '0';
|
|
51
|
+
const customY = (_g = options.y) !== null && _g !== void 0 ? _g : '0';
|
|
52
|
+
// Size options
|
|
53
|
+
const sizeMode = (_h = options.sizeMode) !== null && _h !== void 0 ? _h : 'percentage';
|
|
54
|
+
const widthPercent = (_j = options.widthPercent) !== null && _j !== void 0 ? _j : 50;
|
|
55
|
+
const heightMode = (_k = options.heightMode) !== null && _k !== void 0 ? _k : 'auto';
|
|
56
|
+
const heightPercent = (_l = options.heightPercent) !== null && _l !== void 0 ? _l : 50;
|
|
57
|
+
const widthPixels = (_m = options.widthPixels) !== null && _m !== void 0 ? _m : -1;
|
|
58
|
+
const heightPixels = (_o = options.heightPixels) !== null && _o !== void 0 ? _o : -1;
|
|
59
|
+
const opacity = (_p = options.opacity) !== null && _p !== void 0 ? _p : 1.0;
|
|
60
|
+
const enableTimeControl = (_q = options.enableTimeControl) !== null && _q !== void 0 ? _q : false;
|
|
61
|
+
const startTime = (_r = options.startTime) !== null && _r !== void 0 ? _r : 0;
|
|
62
|
+
const endTime = (_s = options.endTime) !== null && _s !== void 0 ? _s : 0;
|
|
63
|
+
const blendMode = (_t = options.blendMode) !== null && _t !== void 0 ? _t : 'normal';
|
|
64
|
+
const audioHandling = (_u = options.audioHandling) !== null && _u !== void 0 ? _u : 'main';
|
|
65
|
+
const mainVolume = (_v = options.mainVolume) !== null && _v !== void 0 ? _v : 1.0;
|
|
66
|
+
const overlayVolume = (_w = options.overlayVolume) !== null && _w !== void 0 ? _w : 1.0;
|
|
67
|
+
try {
|
|
68
|
+
// Get durations for calculations
|
|
69
|
+
const mainDuration = await (0, utils_1.getDuration)(mainVideoPath);
|
|
70
|
+
const overlayDuration = await (0, utils_1.getDuration)(overlayVideoPath);
|
|
71
|
+
// Get main video resolution for percentage calculations
|
|
72
|
+
const mainVideoInfo = await (0, utils_1.getVideoStreamInfo)(mainVideoPath);
|
|
73
|
+
const mainWidth = (mainVideoInfo === null || mainVideoInfo === void 0 ? void 0 : mainVideoInfo.width) || 1920;
|
|
74
|
+
const mainHeight = (mainVideoInfo === null || mainVideoInfo === void 0 ? void 0 : mainVideoInfo.height) || 1080;
|
|
75
|
+
// Calculate actual overlay dimensions based on size mode
|
|
76
|
+
let scaleWidth = -1;
|
|
77
|
+
let scaleHeight = -1;
|
|
78
|
+
if (sizeMode === 'percentage') {
|
|
79
|
+
// Calculate width as percentage of main video
|
|
80
|
+
scaleWidth = Math.round(mainWidth * (widthPercent / 100));
|
|
81
|
+
if (heightMode === 'auto') {
|
|
82
|
+
// Keep aspect ratio - use -1 for FFmpeg to auto-calculate
|
|
83
|
+
scaleHeight = -1;
|
|
84
|
+
}
|
|
85
|
+
else {
|
|
86
|
+
// Calculate height as percentage of main video
|
|
87
|
+
scaleHeight = Math.round(mainHeight * (heightPercent / 100));
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
else if (sizeMode === 'pixels') {
|
|
91
|
+
scaleWidth = widthPixels;
|
|
92
|
+
scaleHeight = heightPixels;
|
|
93
|
+
}
|
|
94
|
+
// If sizeMode === 'original', leave both as -1 (no scaling)
|
|
95
|
+
const needsScaling = sizeMode !== 'original';
|
|
96
|
+
// Calculate position based on position mode
|
|
97
|
+
let posX;
|
|
98
|
+
let posY;
|
|
99
|
+
if (positionMode === 'alignment') {
|
|
100
|
+
// Use FFmpeg expressions for alignment
|
|
101
|
+
// overlay_w and overlay_h refer to the scaled overlay dimensions
|
|
102
|
+
switch (horizontalAlign) {
|
|
103
|
+
case 'left':
|
|
104
|
+
posX = String(paddingX);
|
|
105
|
+
break;
|
|
106
|
+
case 'center':
|
|
107
|
+
posX = `(main_w-overlay_w)/2`;
|
|
108
|
+
break;
|
|
109
|
+
case 'right':
|
|
110
|
+
posX = `main_w-overlay_w-${paddingX}`;
|
|
111
|
+
break;
|
|
112
|
+
default:
|
|
113
|
+
posX = String(paddingX);
|
|
114
|
+
}
|
|
115
|
+
switch (verticalAlign) {
|
|
116
|
+
case 'top':
|
|
117
|
+
posY = String(paddingY);
|
|
118
|
+
break;
|
|
119
|
+
case 'middle':
|
|
120
|
+
posY = `(main_h-overlay_h)/2`;
|
|
121
|
+
break;
|
|
122
|
+
case 'bottom':
|
|
123
|
+
posY = `main_h-overlay_h-${paddingY}`;
|
|
124
|
+
break;
|
|
125
|
+
default:
|
|
126
|
+
posY = String(paddingY);
|
|
127
|
+
}
|
|
128
|
+
// Add padding offset for center alignment
|
|
129
|
+
if (horizontalAlign === 'center' && paddingX !== 0) {
|
|
130
|
+
posX = `(main_w-overlay_w)/2+${paddingX}`;
|
|
131
|
+
}
|
|
132
|
+
if (verticalAlign === 'middle' && paddingY !== 0) {
|
|
133
|
+
posY = `(main_h-overlay_h)/2+${paddingY}`;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
// Custom coordinates mode - use provided x, y values
|
|
138
|
+
posX = String(customX);
|
|
139
|
+
posY = String(customY);
|
|
140
|
+
}
|
|
141
|
+
// Calculate actual end time
|
|
142
|
+
const actualEndTime = enableTimeControl
|
|
143
|
+
? (endTime > 0 ? endTime : mainDuration)
|
|
144
|
+
: mainDuration;
|
|
145
|
+
// Build the video filter chain
|
|
146
|
+
let videoFilterChain = '';
|
|
147
|
+
// Process overlay video (scale if needed)
|
|
148
|
+
const needsOpacity = opacity < 1.0;
|
|
149
|
+
let overlayProcessing = '[1:v]';
|
|
150
|
+
if (needsScaling) {
|
|
151
|
+
overlayProcessing += `scale=${scaleWidth}:${scaleHeight}`;
|
|
152
|
+
}
|
|
153
|
+
if (needsOpacity) {
|
|
154
|
+
if (needsScaling) {
|
|
155
|
+
overlayProcessing += ',';
|
|
156
|
+
}
|
|
157
|
+
overlayProcessing += `colorchannelmixer=aa=${opacity}`;
|
|
158
|
+
}
|
|
159
|
+
// Apply blend mode filter if not normal
|
|
160
|
+
if (blendMode !== 'normal' && blendMode !== 'over') {
|
|
161
|
+
if (needsScaling || needsOpacity) {
|
|
162
|
+
overlayProcessing += ',';
|
|
163
|
+
}
|
|
164
|
+
// For blend modes, we'll use the blend filter differently
|
|
165
|
+
}
|
|
166
|
+
if (needsScaling || needsOpacity) {
|
|
167
|
+
overlayProcessing += '[ovr]';
|
|
168
|
+
videoFilterChain = overlayProcessing + ';';
|
|
169
|
+
videoFilterChain += '[0:v][ovr]';
|
|
170
|
+
}
|
|
171
|
+
else {
|
|
172
|
+
videoFilterChain = '[0:v][1:v]';
|
|
173
|
+
}
|
|
174
|
+
// Build overlay filter with position
|
|
175
|
+
// eof_action=pass: continue showing main video after overlay ends
|
|
176
|
+
// repeatlast=0: don't repeat the last frame of overlay
|
|
177
|
+
videoFilterChain += `overlay=x=${posX}:y=${posY}:eof_action=pass:repeatlast=0`;
|
|
178
|
+
// Add time control if enabled
|
|
179
|
+
if (enableTimeControl) {
|
|
180
|
+
videoFilterChain += `:enable='between(t,${startTime},${actualEndTime})'`;
|
|
181
|
+
}
|
|
182
|
+
videoFilterChain += '[outv]';
|
|
183
|
+
// Build audio filter chain based on audio handling option
|
|
184
|
+
let audioFilterChain = '';
|
|
185
|
+
let outputMaps = ['-map', '[outv]'];
|
|
186
|
+
switch (audioHandling) {
|
|
187
|
+
case 'main':
|
|
188
|
+
// Use only main video's audio
|
|
189
|
+
outputMaps.push('-map', '0:a?');
|
|
190
|
+
break;
|
|
191
|
+
case 'overlay':
|
|
192
|
+
// Use only overlay video's audio
|
|
193
|
+
outputMaps.push('-map', '1:a?');
|
|
194
|
+
break;
|
|
195
|
+
case 'mix':
|
|
196
|
+
// Mix both audio tracks (use longest duration)
|
|
197
|
+
audioFilterChain = `;[0:a]volume=${mainVolume}[a0];[1:a]volume=${overlayVolume}[a1];[a0][a1]amix=inputs=2:duration=longest[outa]`;
|
|
198
|
+
outputMaps.push('-map', '[outa]');
|
|
199
|
+
break;
|
|
200
|
+
case 'none':
|
|
201
|
+
// No audio output
|
|
202
|
+
break;
|
|
203
|
+
default:
|
|
204
|
+
outputMaps.push('-map', '0:a?');
|
|
205
|
+
}
|
|
206
|
+
const fullFilterComplex = videoFilterChain + audioFilterChain;
|
|
207
|
+
console.log('=== OVERLAY VIDEO DEBUG ===');
|
|
208
|
+
console.log('Main video:', mainVideoPath);
|
|
209
|
+
console.log('Overlay video:', overlayVideoPath);
|
|
210
|
+
console.log('Main duration:', mainDuration);
|
|
211
|
+
console.log('Overlay duration:', overlayDuration);
|
|
212
|
+
console.log('Main resolution:', { mainWidth, mainHeight });
|
|
213
|
+
console.log('Position mode:', positionMode);
|
|
214
|
+
console.log('Calculated position:', { posX, posY });
|
|
215
|
+
console.log('Size mode:', sizeMode);
|
|
216
|
+
console.log('Calculated scale:', { scaleWidth, scaleHeight, needsScaling });
|
|
217
|
+
console.log('Options:', { opacity, enableTimeControl, startTime, actualEndTime, blendMode, audioHandling });
|
|
218
|
+
console.log('Filter complex:', fullFilterComplex);
|
|
219
|
+
console.log('Output maps:', outputMaps);
|
|
220
|
+
console.log('===========================');
|
|
221
|
+
const command = ffmpeg()
|
|
222
|
+
.input(mainVideoPath)
|
|
223
|
+
.input(overlayVideoPath)
|
|
224
|
+
.complexFilter(fullFilterComplex)
|
|
225
|
+
.outputOptions(outputMaps)
|
|
226
|
+
.outputOptions(['-c:v', 'libx264', '-preset', 'fast', '-crf', '23'])
|
|
227
|
+
.output(outputPath);
|
|
228
|
+
await (0, utils_1.runFfmpeg)(command);
|
|
229
|
+
return outputPath;
|
|
230
|
+
}
|
|
231
|
+
catch (error) {
|
|
232
|
+
// Clean up output file if creation failed
|
|
233
|
+
await fs.remove(outputPath).catch(() => { });
|
|
234
|
+
console.error('=== OVERLAY VIDEO ERROR ===');
|
|
235
|
+
console.error('Error details:', error);
|
|
236
|
+
console.error('===========================');
|
|
237
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Failed to overlay video: ${error instanceof Error ? error.message : 'Unknown error'}`, { itemIndex });
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
exports.executeOverlayVideo = executeOverlayVideo;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { IExecuteFunctions } from 'n8n-workflow';
|
|
2
|
+
export interface SeparateAudioResult {
|
|
3
|
+
videoPath: string;
|
|
4
|
+
audioPath: string;
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Separates audio from video, returning both a muted video and the extracted audio track.
|
|
8
|
+
*
|
|
9
|
+
* @param input - Path to the input video file
|
|
10
|
+
* @param videoFormat - Output format for the muted video (e.g., 'mp4', 'mov')
|
|
11
|
+
* @param audioFormat - Output format for the extracted audio (e.g., 'mp3', 'aac', 'wav')
|
|
12
|
+
* @param audioCodec - Audio codec to use (e.g., 'copy', 'libmp3lame', 'aac')
|
|
13
|
+
* @param audioBitrate - Audio bitrate (e.g., '192k', '320k')
|
|
14
|
+
* @param itemIndex - Current item index for error handling
|
|
15
|
+
* @returns Object containing paths to both the muted video and extracted audio
|
|
16
|
+
*/
|
|
17
|
+
export declare function executeSeparateAudio(this: IExecuteFunctions, input: string, videoFormat: string, audioFormat: string, audioCodec: string, audioBitrate: string, itemIndex: number): Promise<SeparateAudioResult>;
|