@t3lnet/sceneforge 1.0.4 → 1.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -4
- package/cli/cli.js +80 -0
- package/cli/commands/add-audio-to-steps.js +328 -0
- package/cli/commands/concat-final-videos.js +480 -0
- package/cli/commands/doctor.js +102 -0
- package/cli/commands/generate-voiceover.js +304 -0
- package/cli/commands/pipeline.js +314 -0
- package/cli/commands/record-demo.js +305 -0
- package/cli/commands/setup.js +218 -0
- package/cli/commands/split-video.js +236 -0
- package/cli/utils/args.js +15 -0
- package/cli/utils/media.js +81 -0
- package/cli/utils/paths.js +93 -0
- package/cli/utils/sanitize.js +19 -0
- package/package.json +6 -1
package/README.md
CHANGED
|
@@ -27,13 +27,13 @@ await runDemoFromFile("./examples/create-dxf-quote.yaml", {
|
|
|
27
27
|
});
|
|
28
28
|
```
|
|
29
29
|
|
|
30
|
-
## CLI
|
|
30
|
+
## CLI
|
|
31
31
|
|
|
32
|
-
The CLI is
|
|
32
|
+
The CLI is included in this package. After install you can run `sceneforge` via `npx` or your package manager:
|
|
33
33
|
|
|
34
34
|
```bash
|
|
35
|
-
|
|
36
|
-
|
|
35
|
+
npx sceneforge record --definition examples/create-dxf-quote.yaml --base-url http://localhost:5173
|
|
36
|
+
npx sceneforge pipeline --definition examples/create-dxf-quote.yaml --base-url http://localhost:5173 --clean
|
|
37
37
|
```
|
|
38
38
|
|
|
39
39
|
## Extension (optional)
|
package/cli/cli.js
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { runSplitVideoCommand } from "./commands/split-video.js";
|
|
3
|
+
import { runGenerateVoiceoverCommand } from "./commands/generate-voiceover.js";
|
|
4
|
+
import { runAddAudioCommand } from "./commands/add-audio-to-steps.js";
|
|
5
|
+
import { runConcatCommand } from "./commands/concat-final-videos.js";
|
|
6
|
+
import { runRecordDemoCommand } from "./commands/record-demo.js";
|
|
7
|
+
import { runPipelineCommand } from "./commands/pipeline.js";
|
|
8
|
+
import { runDoctorCommand } from "./commands/doctor.js";
|
|
9
|
+
import { runSetupCommand } from "./commands/setup.js";
|
|
10
|
+
|
|
11
|
+
function printHelp() {
|
|
12
|
+
console.log(`
|
|
13
|
+
SceneForge CLI
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
sceneforge <command> [options]
|
|
17
|
+
|
|
18
|
+
Commands:
|
|
19
|
+
record Run a demo definition with Playwright and generate scripts
|
|
20
|
+
setup Run a setup definition and save storage state
|
|
21
|
+
pipeline Run the full pipeline (record → split → voiceover → add-audio → concat)
|
|
22
|
+
split Split recorded demo videos into per-step clips
|
|
23
|
+
voiceover Generate voiceover audio with ElevenLabs
|
|
24
|
+
add-audio Add audio tracks to per-step clips
|
|
25
|
+
concat Concatenate clips into final demo videos
|
|
26
|
+
doctor Run diagnostics for ffmpeg/ffprobe/env
|
|
27
|
+
|
|
28
|
+
Run "sceneforge <command> --help" for command-specific options.
|
|
29
|
+
`);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const [command, ...rest] = process.argv.slice(2);
|
|
33
|
+
|
|
34
|
+
if (!command || command === "help" || command === "--help" || command === "-h") {
|
|
35
|
+
printHelp();
|
|
36
|
+
process.exit(0);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const normalized = command.toLowerCase();
|
|
40
|
+
|
|
41
|
+
switch (normalized) {
|
|
42
|
+
case "record":
|
|
43
|
+
case "run":
|
|
44
|
+
case "generate":
|
|
45
|
+
await runRecordDemoCommand(rest);
|
|
46
|
+
break;
|
|
47
|
+
case "setup":
|
|
48
|
+
case "login":
|
|
49
|
+
await runSetupCommand(rest);
|
|
50
|
+
break;
|
|
51
|
+
case "pipeline":
|
|
52
|
+
case "run-pipeline":
|
|
53
|
+
await runPipelineCommand(rest);
|
|
54
|
+
break;
|
|
55
|
+
case "split":
|
|
56
|
+
case "split-video":
|
|
57
|
+
case "split-video-by-steps":
|
|
58
|
+
await runSplitVideoCommand(rest);
|
|
59
|
+
break;
|
|
60
|
+
case "voiceover":
|
|
61
|
+
case "generate-voiceover":
|
|
62
|
+
await runGenerateVoiceoverCommand(rest);
|
|
63
|
+
break;
|
|
64
|
+
case "add-audio":
|
|
65
|
+
case "add-audio-to-steps":
|
|
66
|
+
await runAddAudioCommand(rest);
|
|
67
|
+
break;
|
|
68
|
+
case "concat":
|
|
69
|
+
case "concat-final-videos":
|
|
70
|
+
await runConcatCommand(rest);
|
|
71
|
+
break;
|
|
72
|
+
case "doctor":
|
|
73
|
+
case "diagnostics":
|
|
74
|
+
await runDoctorCommand(rest);
|
|
75
|
+
break;
|
|
76
|
+
default:
|
|
77
|
+
console.error(`[error] Unknown command: ${command}`);
|
|
78
|
+
printHelp();
|
|
79
|
+
process.exit(1);
|
|
80
|
+
}
|
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
import * as fs from "fs/promises";
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
import { checkFFmpeg, getMediaDuration, runFFmpeg } from "../utils/media.js";
|
|
4
|
+
import { getFlagValue, hasFlag } from "../utils/args.js";
|
|
5
|
+
import { getOutputPaths, resolveRoot, readJson } from "../utils/paths.js";
|
|
6
|
+
import { sanitizeFileSegment } from "../utils/sanitize.js";
|
|
7
|
+
|
|
8
|
+
function printHelp() {
|
|
9
|
+
console.log(`
|
|
10
|
+
Add audio to individual video step clips
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
sceneforge add-audio [options]
|
|
14
|
+
|
|
15
|
+
Options:
|
|
16
|
+
--demo <name> Process a specific demo by name
|
|
17
|
+
--all Process all demos with video steps and audio
|
|
18
|
+
--padding <sec> Extra padding after audio ends (default: 0.3)
|
|
19
|
+
--root <path> Project root (defaults to cwd)
|
|
20
|
+
--output-dir <path> Output directory (defaults to e2e/output or output)
|
|
21
|
+
--help, -h Show this help message
|
|
22
|
+
|
|
23
|
+
Output:
|
|
24
|
+
Creates step_XX_<stepId>_with_audio.mp4 files in the videos/<demo>/ folder
|
|
25
|
+
|
|
26
|
+
Examples:
|
|
27
|
+
sceneforge add-audio --demo create-quote
|
|
28
|
+
sceneforge add-audio --all
|
|
29
|
+
`);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
async function addAudioToStep(videoPath, audioPath, outputPath, padding, nextVideoPath) {
|
|
33
|
+
const videoDuration = await getMediaDuration(videoPath);
|
|
34
|
+
const audioDuration = await getMediaDuration(audioPath);
|
|
35
|
+
const targetDuration = audioDuration + padding;
|
|
36
|
+
|
|
37
|
+
if (targetDuration <= videoDuration) {
|
|
38
|
+
const padDuration = Math.max(0, videoDuration - audioDuration);
|
|
39
|
+
await runFFmpeg([
|
|
40
|
+
"-y",
|
|
41
|
+
"-i",
|
|
42
|
+
videoPath,
|
|
43
|
+
"-i",
|
|
44
|
+
audioPath,
|
|
45
|
+
"-filter_complex",
|
|
46
|
+
`[1:a]apad=pad_dur=${padDuration}[a]`,
|
|
47
|
+
"-map",
|
|
48
|
+
"0:v",
|
|
49
|
+
"-map",
|
|
50
|
+
"[a]",
|
|
51
|
+
"-t",
|
|
52
|
+
String(videoDuration),
|
|
53
|
+
"-c:v",
|
|
54
|
+
"libx264",
|
|
55
|
+
"-preset",
|
|
56
|
+
"fast",
|
|
57
|
+
"-c:a",
|
|
58
|
+
"aac",
|
|
59
|
+
"-b:a",
|
|
60
|
+
"192k",
|
|
61
|
+
outputPath,
|
|
62
|
+
]);
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const extensionNeeded = targetDuration - videoDuration;
|
|
67
|
+
|
|
68
|
+
if (nextVideoPath) {
|
|
69
|
+
const stillDuration = 0.04;
|
|
70
|
+
const filterGraph =
|
|
71
|
+
`[1:v]trim=start=0:end=${stillDuration},setpts=PTS-STARTPTS,` +
|
|
72
|
+
`tpad=stop_mode=clone:stop_duration=${extensionNeeded},` +
|
|
73
|
+
`trim=duration=${extensionNeeded}[next_still];` +
|
|
74
|
+
`[0:v][next_still]concat=n=2:v=1:a=0[outv]`;
|
|
75
|
+
|
|
76
|
+
await runFFmpeg([
|
|
77
|
+
"-y",
|
|
78
|
+
"-i",
|
|
79
|
+
videoPath,
|
|
80
|
+
"-i",
|
|
81
|
+
nextVideoPath,
|
|
82
|
+
"-i",
|
|
83
|
+
audioPath,
|
|
84
|
+
"-filter_complex",
|
|
85
|
+
filterGraph,
|
|
86
|
+
"-map",
|
|
87
|
+
"[outv]",
|
|
88
|
+
"-map",
|
|
89
|
+
"2:a",
|
|
90
|
+
"-c:v",
|
|
91
|
+
"libx264",
|
|
92
|
+
"-preset",
|
|
93
|
+
"fast",
|
|
94
|
+
"-c:a",
|
|
95
|
+
"aac",
|
|
96
|
+
"-b:a",
|
|
97
|
+
"192k",
|
|
98
|
+
"-t",
|
|
99
|
+
String(targetDuration),
|
|
100
|
+
outputPath,
|
|
101
|
+
]);
|
|
102
|
+
return;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
await runFFmpeg([
|
|
106
|
+
"-y",
|
|
107
|
+
"-i",
|
|
108
|
+
videoPath,
|
|
109
|
+
"-i",
|
|
110
|
+
audioPath,
|
|
111
|
+
"-filter_complex",
|
|
112
|
+
`[0:v]tpad=stop_mode=clone:stop_duration=${extensionNeeded}[v]`,
|
|
113
|
+
"-map",
|
|
114
|
+
"[v]",
|
|
115
|
+
"-map",
|
|
116
|
+
"1:a",
|
|
117
|
+
"-c:v",
|
|
118
|
+
"libx264",
|
|
119
|
+
"-preset",
|
|
120
|
+
"fast",
|
|
121
|
+
"-c:a",
|
|
122
|
+
"aac",
|
|
123
|
+
"-b:a",
|
|
124
|
+
"192k",
|
|
125
|
+
"-t",
|
|
126
|
+
String(targetDuration),
|
|
127
|
+
outputPath,
|
|
128
|
+
]);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
async function processDemo(demoName, paths, padding) {
|
|
132
|
+
console.log(`\n[audio] Processing: ${demoName}\n`);
|
|
133
|
+
|
|
134
|
+
const stepsManifestPath = path.join(paths.videosDir, demoName, "steps-manifest.json");
|
|
135
|
+
let stepsManifest;
|
|
136
|
+
|
|
137
|
+
try {
|
|
138
|
+
stepsManifest = await readJson(stepsManifestPath);
|
|
139
|
+
} catch {
|
|
140
|
+
console.error(`[audio] ✗ Steps manifest not found: ${stepsManifestPath}`);
|
|
141
|
+
console.error("[audio] Run sceneforge split first");
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
const audioManifestPath = path.join(paths.audioDir, demoName, "manifest.json");
|
|
146
|
+
let audioManifest;
|
|
147
|
+
|
|
148
|
+
try {
|
|
149
|
+
audioManifest = await readJson(audioManifestPath);
|
|
150
|
+
} catch {
|
|
151
|
+
console.error(`[audio] ✗ Audio manifest not found: ${audioManifestPath}`);
|
|
152
|
+
console.error("[audio] Run sceneforge voiceover first");
|
|
153
|
+
return;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
console.log(`[audio] Steps: ${stepsManifest.steps.length}`);
|
|
157
|
+
console.log(`[audio] Audio segments: ${audioManifest.segments.length}`);
|
|
158
|
+
|
|
159
|
+
await fs.mkdir(paths.tempDir, { recursive: true });
|
|
160
|
+
|
|
161
|
+
const outputFiles = [];
|
|
162
|
+
|
|
163
|
+
for (let index = 0; index < stepsManifest.steps.length; index += 1) {
|
|
164
|
+
const step = stepsManifest.steps[index];
|
|
165
|
+
const nextStep = stepsManifest.steps[index + 1];
|
|
166
|
+
const paddedIndex = String(step.stepIndex + 1).padStart(2, "0");
|
|
167
|
+
const audioSegment = audioManifest.segments.find((segment) => segment.stepId === step.stepId);
|
|
168
|
+
|
|
169
|
+
if (!audioSegment) {
|
|
170
|
+
console.log(`[audio] ${paddedIndex}. ${step.stepId}: ⚠ No audio found, skipping`);
|
|
171
|
+
continue;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
try {
|
|
175
|
+
await fs.access(step.videoFile);
|
|
176
|
+
} catch {
|
|
177
|
+
console.error(`[audio] ${paddedIndex}. ${step.stepId}: ✗ Video not found`);
|
|
178
|
+
continue;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
const safeStepId = step.safeStepId
|
|
182
|
+
? sanitizeFileSegment(step.safeStepId, `step-${step.stepIndex + 1}`)
|
|
183
|
+
: sanitizeFileSegment(step.stepId, `step-${step.stepIndex + 1}`);
|
|
184
|
+
const outputPath = path.join(
|
|
185
|
+
paths.videosDir,
|
|
186
|
+
demoName,
|
|
187
|
+
`step_${paddedIndex}_${safeStepId}_with_audio.mp4`
|
|
188
|
+
);
|
|
189
|
+
|
|
190
|
+
const videoDuration = await getMediaDuration(step.videoFile);
|
|
191
|
+
const audioDuration = await getMediaDuration(audioSegment.audioFile);
|
|
192
|
+
const needsExtension = audioDuration + padding > videoDuration;
|
|
193
|
+
const needsAudioPad = !needsExtension && audioDuration < videoDuration;
|
|
194
|
+
let nextVideoPath = null;
|
|
195
|
+
if (needsExtension && nextStep?.videoFile) {
|
|
196
|
+
try {
|
|
197
|
+
await fs.access(nextStep.videoFile);
|
|
198
|
+
nextVideoPath = nextStep.videoFile;
|
|
199
|
+
} catch {
|
|
200
|
+
nextVideoPath = null;
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
const extensionDuration = Math.max(0, audioDuration + padding - videoDuration);
|
|
204
|
+
|
|
205
|
+
console.log(
|
|
206
|
+
`[audio] ${paddedIndex}. ${step.stepId}: ` +
|
|
207
|
+
`video ${videoDuration.toFixed(2)}s, audio ${audioDuration.toFixed(2)}s` +
|
|
208
|
+
(needsExtension
|
|
209
|
+
? nextVideoPath
|
|
210
|
+
? ` → hold next step first frame for ${extensionDuration.toFixed(2)}s`
|
|
211
|
+
: ` → freeze last frame for ${extensionDuration.toFixed(2)}s`
|
|
212
|
+
: needsAudioPad
|
|
213
|
+
? ` → pad audio with silence for ${(videoDuration - audioDuration).toFixed(2)}s`
|
|
214
|
+
: "")
|
|
215
|
+
);
|
|
216
|
+
|
|
217
|
+
try {
|
|
218
|
+
await addAudioToStep(step.videoFile, audioSegment.audioFile, outputPath, padding, nextVideoPath);
|
|
219
|
+
outputFiles.push(outputPath);
|
|
220
|
+
} catch (error) {
|
|
221
|
+
console.error(`[audio] ${paddedIndex}. ${step.stepId}: ✗ Failed to process`);
|
|
222
|
+
console.error(error);
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
const updatedManifest = {
|
|
227
|
+
...stepsManifest,
|
|
228
|
+
stepsWithAudio: stepsManifest.steps.map((step) => {
|
|
229
|
+
const paddedIndex = String(step.stepIndex + 1).padStart(2, "0");
|
|
230
|
+
const safeStepId = step.safeStepId
|
|
231
|
+
? sanitizeFileSegment(step.safeStepId, `step-${step.stepIndex + 1}`)
|
|
232
|
+
: sanitizeFileSegment(step.stepId, `step-${step.stepIndex + 1}`);
|
|
233
|
+
return {
|
|
234
|
+
...step,
|
|
235
|
+
safeStepId,
|
|
236
|
+
videoFileWithAudio: path.join(
|
|
237
|
+
paths.videosDir,
|
|
238
|
+
demoName,
|
|
239
|
+
`step_${paddedIndex}_${safeStepId}_with_audio.mp4`
|
|
240
|
+
),
|
|
241
|
+
};
|
|
242
|
+
}),
|
|
243
|
+
};
|
|
244
|
+
|
|
245
|
+
await fs.writeFile(stepsManifestPath, JSON.stringify(updatedManifest, null, 2));
|
|
246
|
+
|
|
247
|
+
console.log(`\n[audio] ✓ Processed ${outputFiles.length} steps with audio`);
|
|
248
|
+
console.log(`[audio] Output: ${path.join(paths.videosDir, demoName)}`);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
async function processAll(paths, padding) {
|
|
252
|
+
console.log("\n[audio] Processing all demos...\n");
|
|
253
|
+
|
|
254
|
+
try {
|
|
255
|
+
const videoDirs = await fs.readdir(paths.videosDir);
|
|
256
|
+
const demosToProcess = [];
|
|
257
|
+
|
|
258
|
+
for (const dir of videoDirs) {
|
|
259
|
+
const stepsManifestPath = path.join(paths.videosDir, dir, "steps-manifest.json");
|
|
260
|
+
const audioManifestPath = path.join(paths.audioDir, dir, "manifest.json");
|
|
261
|
+
|
|
262
|
+
try {
|
|
263
|
+
await fs.access(stepsManifestPath);
|
|
264
|
+
await fs.access(audioManifestPath);
|
|
265
|
+
demosToProcess.push(dir);
|
|
266
|
+
} catch {
|
|
267
|
+
// Skip demos missing manifests
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
if (demosToProcess.length === 0) {
|
|
272
|
+
console.log("[audio] No demos ready for audio addition");
|
|
273
|
+
console.log("[audio] Make sure you've run:");
|
|
274
|
+
console.log("[audio] 1. sceneforge split");
|
|
275
|
+
console.log("[audio] 2. sceneforge voiceover");
|
|
276
|
+
return;
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
console.log(`[audio] Found ${demosToProcess.length} demo(s) to process\n`);
|
|
280
|
+
|
|
281
|
+
for (const demo of demosToProcess) {
|
|
282
|
+
await processDemo(demo, paths, padding);
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
await fs.rm(paths.tempDir, { recursive: true, force: true });
|
|
286
|
+
|
|
287
|
+
console.log("\n[audio] All demos processed!");
|
|
288
|
+
} catch (error) {
|
|
289
|
+
console.error("[audio] Error:", error);
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
export async function runAddAudioCommand(argv) {
|
|
294
|
+
const args = argv ?? process.argv.slice(2);
|
|
295
|
+
const help = hasFlag(args, "--help") || hasFlag(args, "-h");
|
|
296
|
+
const demo = getFlagValue(args, "--demo");
|
|
297
|
+
const all = hasFlag(args, "--all");
|
|
298
|
+
const padding = parseFloat(getFlagValue(args, "--padding") ?? "0.3");
|
|
299
|
+
const root = getFlagValue(args, "--root");
|
|
300
|
+
const outputDir = getFlagValue(args, "--output-dir");
|
|
301
|
+
|
|
302
|
+
if (help) {
|
|
303
|
+
printHelp();
|
|
304
|
+
return;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
const hasFFmpeg = await checkFFmpeg();
|
|
308
|
+
if (!hasFFmpeg) {
|
|
309
|
+
console.error("[error] FFmpeg is not installed");
|
|
310
|
+
process.exit(1);
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
const rootDir = resolveRoot(root);
|
|
314
|
+
const paths = await getOutputPaths(rootDir, outputDir);
|
|
315
|
+
|
|
316
|
+
if (demo) {
|
|
317
|
+
await processDemo(demo, paths, padding);
|
|
318
|
+
await fs.rm(paths.tempDir, { recursive: true, force: true });
|
|
319
|
+
return;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
if (all) {
|
|
323
|
+
await processAll(paths, padding);
|
|
324
|
+
return;
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
printHelp();
|
|
328
|
+
}
|