@vargai/sdk 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/.github/workflows/ci.yml +23 -0
  2. package/.husky/README.md +102 -0
  3. package/.husky/commit-msg +9 -0
  4. package/.husky/pre-commit +12 -0
  5. package/.husky/pre-push +9 -0
  6. package/.size-limit.json +8 -0
  7. package/.test-hooks.ts +5 -0
  8. package/CONTRIBUTING.md +150 -0
  9. package/LICENSE.md +53 -0
  10. package/README.md +7 -0
  11. package/action/captions/index.ts +202 -12
  12. package/action/captions/tiktok.ts +538 -0
  13. package/action/cut/index.ts +119 -0
  14. package/action/fade/index.ts +116 -0
  15. package/action/merge/index.ts +177 -0
  16. package/action/remove/index.ts +184 -0
  17. package/action/split/index.ts +133 -0
  18. package/action/transition/index.ts +154 -0
  19. package/action/trim/index.ts +117 -0
  20. package/bun.lock +299 -8
  21. package/cli/index.ts +1 -1
  22. package/commitlint.config.js +22 -0
  23. package/index.ts +12 -0
  24. package/lib/ass.ts +547 -0
  25. package/lib/fal.ts +75 -1
  26. package/lib/ffmpeg.ts +400 -0
  27. package/lib/higgsfield/example.ts +22 -29
  28. package/lib/higgsfield/index.ts +3 -2
  29. package/lib/higgsfield/soul.ts +0 -5
  30. package/lib/remotion/SKILL.md +240 -21
  31. package/lib/remotion/cli.ts +34 -0
  32. package/package.json +20 -3
  33. package/pipeline/cookbooks/scripts/animate-frames-parallel.ts +83 -0
  34. package/pipeline/cookbooks/scripts/combine-scenes.sh +53 -0
  35. package/pipeline/cookbooks/scripts/generate-frames-parallel.ts +98 -0
  36. package/pipeline/cookbooks/scripts/still-to-video.sh +37 -0
  37. package/pipeline/cookbooks/text-to-tiktok.md +669 -0
  38. package/scripts/.gitkeep +0 -0
  39. package/service/music/index.ts +29 -14
  40. package/tsconfig.json +1 -1
  41. package/HIGGSFIELD_REWRITE_SUMMARY.md +0 -300
  42. package/TEST_RESULTS.md +0 -122
  43. package/output.txt +0 -1
  44. package/scripts/produce-menopause-campaign.sh +0 -202
  45. package/test-import.ts +0 -7
  46. package/test-services.ts +0 -97
@@ -0,0 +1,538 @@
1
+ #!/usr/bin/env bun
2
+
3
+ /**
4
+ * TikTok-style word-by-word captions with animations
5
+ *
6
+ * Features:
7
+ * - Progressive word appearance (words appear one by one)
8
+ * - Bounce animation on active word
9
+ * - Color switching (yellow inactive → white active)
10
+ * - Fade in/out transitions
11
+ * - TikTok safe zones for mobile video
12
+ *
13
+ * Based on Python reference: sdk-py-reference/varg-cli/lib/caption.py
14
+ */
15
+
16
+ import { existsSync, unlinkSync } from "node:fs";
17
+ import { tmpdir } from "node:os";
18
+ import { join } from "node:path";
19
+ import ffmpeg from "fluent-ffmpeg";
20
+ import {
21
+ type ASSDocument,
22
+ type ASSEvent,
23
+ type ASSStyle,
24
+ bounceTag,
25
+ colorTag,
26
+ colorToBGR,
27
+ createDocument,
28
+ createEvent,
29
+ createTikTokStyle,
30
+ fadeTag,
31
+ resetTag,
32
+ saveASS,
33
+ splitIntoLines,
34
+ } from "../../lib/ass";
35
+
36
+ // ============ TIKTOK SAFE ZONES ============
37
+
38
+ /**
39
+ * TikTok canvas constants for 9:16 aspect ratio
40
+ */
41
+ export const TIKTOK_CANVAS = {
42
+ WIDTH: 1080,
43
+ HEIGHT: 1920,
44
+ SAFE_ZONE_WIDTH: 840,
45
+ SAFE_ZONE_HEIGHT: 1280,
46
+ MARGIN_TOP: 120,
47
+ MARGIN_BOTTOM: 240,
48
+ MARGIN_LEFT: 60,
49
+ MARGIN_RIGHT: 120,
50
+ } as const;
51
+
52
+ /**
53
+ * Position zones for TikTok (pixels from edge for 1920px height)
54
+ */
55
+ export const TIKTOK_POSITIONS = {
56
+ "upper-middle": { alignment: 8, marginV: 300 }, // Golden zone for hooks (top center)
57
+ middle: { alignment: 5, marginV: 0 }, // Middle center
58
+ "lower-middle": { alignment: 2, marginV: 400 }, // Above navigation (bottom center)
59
+ top: { alignment: 8, marginV: 120 }, // Top center
60
+ bottom: { alignment: 2, marginV: 240 }, // Bottom center
61
+ } as const;
62
+
63
+ export type TikTokPosition = keyof typeof TIKTOK_POSITIONS;
64
+
65
+ // ============ TYPES ============
66
+
67
+ export interface TikTokWordStyle {
68
+ /** Font size in pixels (default: 80, min 60-80 for 1080x1920) */
69
+ fontsize?: number;
70
+ /** Color of active/highlighted word (default: 'white') */
71
+ activeColor?: string | [number, number, number];
72
+ /** Color of inactive words (default: 'tiktok_yellow' #FEE715) */
73
+ inactiveColor?: string | [number, number, number];
74
+ /** Font name (default: 'Helvetica Bold') */
75
+ font?: string;
76
+ /** Stroke/outline color RGB (default: [0, 0, 0] black) */
77
+ strokeColor?: [number, number, number];
78
+ /** Stroke width in pixels (default: 8 for 4.5:1 contrast) */
79
+ strokeWidth?: number;
80
+ /** Vertical position (default: 'upper-middle') */
81
+ position?: TikTokPosition;
82
+ /** Scale multiplier for bounce effect (default: 1.12) */
83
+ bounceScale?: number;
84
+ /** Max characters per line (default: 27) */
85
+ maxCharsPerLine?: number;
86
+ /** Enable bounce animation (default: true) */
87
+ useBounce?: boolean;
88
+ /** Fade in/out duration in seconds (default: 0.15) */
89
+ fadeDuration?: number;
90
+ /** Pause between caption segments in seconds (default: 0.1) */
91
+ pauseBetweenSegments?: number;
92
+ /** Animation transition duration in milliseconds (default: 50) */
93
+ animationDuration?: number;
94
+ }
95
+
96
+ export interface TikTokWordData {
97
+ word: string;
98
+ start: number; // seconds
99
+ end: number; // seconds
100
+ }
101
+
102
+ export interface TikTokCaptionItem {
103
+ /** Full caption text */
104
+ text: string;
105
+ /** Start time in seconds */
106
+ start: number;
107
+ /** End time in seconds */
108
+ end: number;
109
+ /** Optional word-level timing (auto-splits if not provided) */
110
+ words?: TikTokWordData[];
111
+ }
112
+
113
+ export interface AddTikTokCaptionsOptions {
114
+ videoPath: string;
115
+ output: string;
116
+ captions: TikTokCaptionItem[];
117
+ style?: TikTokWordStyle;
118
+ }
119
+
120
+ // ============ DEFAULT STYLE ============
121
+
122
+ const DEFAULT_TIKTOK_STYLE: Required<TikTokWordStyle> = {
123
+ fontsize: 80,
124
+ activeColor: "white",
125
+ inactiveColor: "tiktok_yellow",
126
+ font: "Helvetica Bold",
127
+ strokeColor: [0, 0, 0],
128
+ strokeWidth: 8,
129
+ position: "upper-middle",
130
+ bounceScale: 1.12,
131
+ maxCharsPerLine: 27,
132
+ useBounce: true,
133
+ fadeDuration: 0.15,
134
+ pauseBetweenSegments: 0.1,
135
+ animationDuration: 50,
136
+ };
137
+
138
+ // ============ VIDEO UTILITIES ============
139
+
140
+ /**
141
+ * Get video dimensions using ffprobe
142
+ */
143
+ async function getVideoDimensions(
144
+ videoPath: string,
145
+ ): Promise<{ width: number; height: number }> {
146
+ return new Promise((resolve, reject) => {
147
+ ffmpeg.ffprobe(videoPath, (err, metadata) => {
148
+ if (err) {
149
+ console.warn(
150
+ "[tiktok] could not get video dimensions, defaulting to 1080x1920",
151
+ );
152
+ resolve({ width: 1080, height: 1920 });
153
+ return;
154
+ }
155
+
156
+ const videoStream = metadata.streams.find(
157
+ (s) => s.codec_type === "video",
158
+ );
159
+ if (videoStream?.width && videoStream?.height) {
160
+ resolve({ width: videoStream.width, height: videoStream.height });
161
+ } else {
162
+ resolve({ width: 1080, height: 1920 });
163
+ }
164
+ });
165
+ });
166
+ }
167
+
168
+ /**
169
+ * Calculate margin based on position and video height
170
+ */
171
+ function calculateMarginV(
172
+ position: TikTokPosition,
173
+ videoHeight: number,
174
+ ): number {
175
+ const scale = videoHeight / TIKTOK_CANVAS.HEIGHT;
176
+ const posConfig = TIKTOK_POSITIONS[position];
177
+ return Math.round(posConfig.marginV * scale);
178
+ }
179
+
180
+ /**
181
+ * Get alignment for position
182
+ */
183
+ function getAlignment(position: TikTokPosition): number {
184
+ return TIKTOK_POSITIONS[position].alignment;
185
+ }
186
+
187
+ // ============ CAPTION GENERATION ============
188
+
189
+ /**
190
+ * Split text into words with even timing
191
+ */
192
+ function autoSplitWords(
193
+ text: string,
194
+ start: number,
195
+ end: number,
196
+ ): TikTokWordData[] {
197
+ const words = text.split(/\s+/).filter((w) => w.length > 0);
198
+ if (words.length === 0) return [];
199
+
200
+ const duration = end - start;
201
+ const durationPerWord = duration / words.length;
202
+
203
+ return words.map((word, i) => ({
204
+ word,
205
+ start: start + i * durationPerWord,
206
+ end: start + (i + 1) * durationPerWord,
207
+ }));
208
+ }
209
+
210
+ /**
211
+ * Build ASS text for a word with styling
212
+ */
213
+ function buildWordText(
214
+ word: string,
215
+ isActive: boolean,
216
+ style: Required<TikTokWordStyle>,
217
+ wordDurationMs: number,
218
+ ): string {
219
+ const color = isActive ? style.activeColor : style.inactiveColor;
220
+ const colorStr = colorTag(color);
221
+
222
+ if (isActive && style.useBounce) {
223
+ const scale = Math.round(style.bounceScale * 100);
224
+ const bounce = bounceTag(wordDurationMs, scale, style.animationDuration);
225
+ return `${bounce}${colorStr}${word}${resetTag()}`;
226
+ }
227
+
228
+ return `${colorStr}${word}${resetTag()}`;
229
+ }
230
+
231
+ /**
232
+ * Generate ASS events for TikTok-style word-by-word captions
233
+ */
234
+ function generateTikTokEvents(
235
+ captions: TikTokCaptionItem[],
236
+ style: Required<TikTokWordStyle>,
237
+ ): ASSEvent[] {
238
+ const events: ASSEvent[] = [];
239
+ const fadeInMs = Math.round(style.fadeDuration * 1000);
240
+ const fadeOutMs = Math.round(style.fadeDuration * 1000);
241
+
242
+ let prevSegmentEnd = 0;
243
+
244
+ for (const caption of captions) {
245
+ const { end, text } = caption;
246
+ let start = caption.start;
247
+
248
+ // Add pause after previous segment
249
+ if (start < prevSegmentEnd + style.pauseBetweenSegments) {
250
+ start = prevSegmentEnd + style.pauseBetweenSegments;
251
+ }
252
+
253
+ // Get word timings (auto-split if not provided)
254
+ const wordsData = caption.words ?? autoSplitWords(text, start, end);
255
+ if (wordsData.length === 0) continue;
256
+
257
+ // Split words into lines
258
+ const lines = splitIntoLines(wordsData, style.maxCharsPerLine);
259
+
260
+ // Process each line
261
+ for (const lineWords of lines) {
262
+ const firstWord = lineWords[0];
263
+ const lastWord = lineWords[lineWords.length - 1];
264
+ if (!firstWord || !lastWord) continue;
265
+
266
+ const lineEnd = lastWord.end;
267
+
268
+ // Create event for each word (progressive appearance)
269
+ for (let wordIdx = 0; wordIdx < lineWords.length; wordIdx++) {
270
+ const wordData = lineWords[wordIdx];
271
+ if (!wordData) continue;
272
+
273
+ const wordStart = wordData.start;
274
+ const wordEnd = wordData.end;
275
+ const wordDurationMs = Math.round((wordEnd - wordStart) * 1000);
276
+
277
+ // Build text showing only words up to current (progressive)
278
+ const lineParts: string[] = [];
279
+
280
+ for (let i = 0; i <= wordIdx; i++) {
281
+ const wd = lineWords[i];
282
+ if (!wd) continue;
283
+
284
+ const isActive = i === wordIdx;
285
+ const wdDurationMs = Math.round((wd.end - wd.start) * 1000);
286
+
287
+ lineParts.push(buildWordText(wd.word, isActive, style, wdDurationMs));
288
+ }
289
+
290
+ let lineText = lineParts.join(" ");
291
+
292
+ // First word: add fade in
293
+ if (wordIdx === 0) {
294
+ // Set initial color before fade to prevent flash
295
+ const activeColorStr = colorTag(style.activeColor);
296
+ lineText = `${activeColorStr}${fadeTag(fadeInMs, 0)}${lineText}`;
297
+ }
298
+
299
+ // Last word: add fade out and extend duration
300
+ let eventEnd = wordEnd;
301
+ if (wordIdx === lineWords.length - 1) {
302
+ const activeColorStr = colorTag(style.activeColor);
303
+ lineText = `${activeColorStr}${fadeTag(0, fadeOutMs)}${lineParts.join(" ")}`;
304
+ eventEnd = wordEnd + style.fadeDuration;
305
+ }
306
+
307
+ events.push(createEvent(wordStart, eventEnd, lineText, "TikTok"));
308
+ }
309
+
310
+ prevSegmentEnd = lineEnd;
311
+ }
312
+ }
313
+
314
+ return events;
315
+ }
316
+
317
+ /**
318
+ * Create ASS style for TikTok captions
319
+ */
320
+ function createTikTokASSStyle(
321
+ style: Required<TikTokWordStyle>,
322
+ videoWidth: number,
323
+ videoHeight: number,
324
+ ): ASSStyle {
325
+ const scale = videoWidth / TIKTOK_CANVAS.WIDTH;
326
+ const marginV = calculateMarginV(style.position, videoHeight);
327
+ const alignment = getAlignment(style.position);
328
+
329
+ return createTikTokStyle("TikTok", {
330
+ fontname: style.font,
331
+ fontsize: Math.round(style.fontsize * scale),
332
+ primarycolor: colorToBGR(style.inactiveColor), // Yellow base
333
+ secondarycolor: colorToBGR(style.activeColor), // White highlight
334
+ outlinecolor: colorToBGR(style.strokeColor),
335
+ outline: Math.round(style.strokeWidth * scale),
336
+ spacing: Math.round(3 * scale),
337
+ alignment,
338
+ marginL: Math.round(TIKTOK_CANVAS.MARGIN_LEFT * scale),
339
+ marginR: Math.round(TIKTOK_CANVAS.MARGIN_RIGHT * scale),
340
+ marginV,
341
+ });
342
+ }
343
+
344
+ // ============ MAIN FUNCTION ============
345
+
346
+ /**
347
+ * Add TikTok-style word-by-word captions to a video
348
+ *
349
+ * Features:
350
+ * - Progressive word appearance
351
+ * - Bounce animation on active word
352
+ * - Yellow → White color switching
353
+ * - Fade in/out transitions
354
+ * - TikTok safe zone positioning
355
+ *
356
+ * @example
357
+ * ```typescript
358
+ * await addTikTokCaptions({
359
+ * videoPath: 'video.mp4',
360
+ * output: 'captioned.mp4',
361
+ * captions: [
362
+ * {
363
+ * text: 'Follow the Apostles Diet',
364
+ * start: 0,
365
+ * end: 3,
366
+ * words: [
367
+ * { word: 'Follow', start: 0, end: 0.5 },
368
+ * { word: 'the', start: 0.5, end: 0.8 },
369
+ * { word: 'Apostles', start: 0.8, end: 1.5 },
370
+ * { word: 'Diet', start: 1.5, end: 3 },
371
+ * ]
372
+ * }
373
+ * ],
374
+ * style: { position: 'upper-middle', bounceScale: 1.15 }
375
+ * });
376
+ * ```
377
+ */
378
+ export async function addTikTokCaptions(
379
+ options: AddTikTokCaptionsOptions,
380
+ ): Promise<string> {
381
+ const { videoPath, output, captions, style: userStyle } = options;
382
+
383
+ if (!videoPath) {
384
+ throw new Error("videoPath is required");
385
+ }
386
+ if (!output) {
387
+ throw new Error("output is required");
388
+ }
389
+ if (!existsSync(videoPath)) {
390
+ throw new Error(`video file not found: ${videoPath}`);
391
+ }
392
+ if (!captions || captions.length === 0) {
393
+ throw new Error("captions array is required and must not be empty");
394
+ }
395
+
396
+ console.log("[tiktok] adding TikTok-style captions...");
397
+
398
+ // Merge style with defaults
399
+ const style: Required<TikTokWordStyle> = {
400
+ ...DEFAULT_TIKTOK_STYLE,
401
+ ...userStyle,
402
+ };
403
+
404
+ // Get video dimensions
405
+ const { width, height } = await getVideoDimensions(videoPath);
406
+ console.log(`[tiktok] video dimensions: ${width}x${height}`);
407
+
408
+ // Create ASS style
409
+ const assStyle = createTikTokASSStyle(style, width, height);
410
+
411
+ // Generate events
412
+ const events = generateTikTokEvents(captions, style);
413
+ console.log(`[tiktok] generated ${events.length} caption events`);
414
+
415
+ // Create ASS document
416
+ const doc = createDocument(
417
+ width,
418
+ height,
419
+ [assStyle],
420
+ events,
421
+ "TikTok Captions",
422
+ );
423
+
424
+ // Save to temp file
425
+ const assPath = join(tmpdir(), `tiktok-captions-${Date.now()}.ass`);
426
+ saveASS(doc, assPath);
427
+
428
+ console.log("[tiktok] rendering video with ffmpeg...");
429
+
430
+ // Apply with ffmpeg
431
+ return new Promise((resolve, reject) => {
432
+ ffmpeg(videoPath)
433
+ .videoFilters(`ass=${assPath}`)
434
+ .outputOptions(["-c:a", "copy"]) // copy audio without re-encoding
435
+ .output(output)
436
+ .on("end", () => {
437
+ // Clean up temp file
438
+ try {
439
+ unlinkSync(assPath);
440
+ } catch {
441
+ // Ignore cleanup errors
442
+ }
443
+ console.log(`[tiktok] saved to ${output}`);
444
+ resolve(output);
445
+ })
446
+ .on("error", (err) => {
447
+ // Clean up temp file
448
+ try {
449
+ unlinkSync(assPath);
450
+ } catch {
451
+ // Ignore cleanup errors
452
+ }
453
+ console.error("[tiktok] error:", err);
454
+ reject(err);
455
+ })
456
+ .run();
457
+ });
458
+ }
459
+
460
+ // ============ CLI ============
461
+
462
+ if (import.meta.main) {
463
+ const args = process.argv.slice(2);
464
+
465
+ if (args.length < 2) {
466
+ console.log(`
467
+ TikTok-Style Word-by-Word Captions
468
+ ==================================
469
+
470
+ Usage:
471
+ bun run action/captions/tiktok.ts <videoPath> <outputPath> [options]
472
+
473
+ Options:
474
+ --text <text> Caption text (will auto-split words)
475
+ --start <seconds> Start time (default: 0)
476
+ --end <seconds> End time (required if --text provided)
477
+ --position <pos> Position: upper-middle, middle, lower-middle, top, bottom
478
+ --bounce <scale> Bounce scale (default: 1.12)
479
+ --no-bounce Disable bounce animation
480
+
481
+ Example:
482
+ bun run action/captions/tiktok.ts video.mp4 output.mp4 \\
483
+ --text "Follow the Apostles Diet" \\
484
+ --start 0 --end 3 \\
485
+ --position upper-middle
486
+ `);
487
+ process.exit(0);
488
+ }
489
+
490
+ const videoPath = args[0] as string;
491
+ const outputPath = args[1] || videoPath.replace(/(\.[^.]+)$/, "_tiktok$1");
492
+
493
+ // Parse options
494
+ let text = "";
495
+ let start = 0;
496
+ let end = 5;
497
+ let position: TikTokPosition = "upper-middle";
498
+ let bounceScale = 1.12;
499
+ let useBounce = true;
500
+
501
+ for (let i = 2; i < args.length; i++) {
502
+ const arg = args[i];
503
+ if (arg === "--text" && args[i + 1]) {
504
+ text = args[++i] as string;
505
+ } else if (arg === "--start" && args[i + 1]) {
506
+ start = Number.parseFloat(args[++i] as string);
507
+ } else if (arg === "--end" && args[i + 1]) {
508
+ end = Number.parseFloat(args[++i] as string);
509
+ } else if (arg === "--position" && args[i + 1]) {
510
+ position = args[++i] as TikTokPosition;
511
+ } else if (arg === "--bounce" && args[i + 1]) {
512
+ bounceScale = Number.parseFloat(args[++i] as string);
513
+ } else if (arg === "--no-bounce") {
514
+ useBounce = false;
515
+ }
516
+ }
517
+
518
+ if (!text) {
519
+ text = "This is a TikTok style caption demo";
520
+ }
521
+
522
+ const captions: TikTokCaptionItem[] = [{ text, start, end }];
523
+
524
+ addTikTokCaptions({
525
+ videoPath,
526
+ output: outputPath,
527
+ captions,
528
+ style: { position, bounceScale, useBounce },
529
+ })
530
+ .then(() => {
531
+ console.log("[tiktok] done!");
532
+ process.exit(0);
533
+ })
534
+ .catch((err) => {
535
+ console.error("[tiktok] failed:", err);
536
+ process.exit(1);
537
+ });
538
+ }
@@ -0,0 +1,119 @@
1
+ #!/usr/bin/env bun
2
+
3
+ /**
4
+ * cut action
5
+ * split video at specific timestamps into separate clips
6
+ */
7
+
8
+ import { existsSync } from "node:fs";
9
+ import { basename, dirname, extname, join } from "node:path";
10
+ import type { ActionMeta } from "../../cli/types";
11
+ import { splitAtTimestamps } from "../../lib/ffmpeg";
12
+
13
+ export const meta: ActionMeta = {
14
+ name: "cut",
15
+ type: "action",
16
+ description: "split video at specific timestamps into separate clips",
17
+ inputType: "video",
18
+ outputType: "video",
19
+ schema: {
20
+ input: {
21
+ type: "object",
22
+ required: ["video", "timestamps"],
23
+ properties: {
24
+ video: {
25
+ type: "string",
26
+ format: "file-path",
27
+ description: "input video file",
28
+ },
29
+ timestamps: {
30
+ type: "string",
31
+ description:
32
+ "comma-separated cut points in seconds (e.g., '10,30,60')",
33
+ },
34
+ "output-prefix": {
35
+ type: "string",
36
+ description: "prefix for output files (default: input filename)",
37
+ },
38
+ },
39
+ },
40
+ output: {
41
+ type: "string",
42
+ format: "file-path",
43
+ description: "comma-separated list of output paths",
44
+ },
45
+ },
46
+ async run(options) {
47
+ const {
48
+ video,
49
+ timestamps,
50
+ "output-prefix": outputPrefix,
51
+ } = options as {
52
+ video: string;
53
+ timestamps: string;
54
+ "output-prefix"?: string;
55
+ };
56
+ return cut({ video, timestamps, outputPrefix });
57
+ },
58
+ };
59
+
60
+ export interface CutOptions {
61
+ video: string;
62
+ timestamps: string;
63
+ outputPrefix?: string;
64
+ }
65
+
66
+ export interface CutResult {
67
+ outputs: string[];
68
+ count: number;
69
+ }
70
+
71
+ /**
72
+ * split video at timestamps into separate clips
73
+ */
74
+ export async function cut(options: CutOptions): Promise<CutResult> {
75
+ const { video, timestamps, outputPrefix } = options;
76
+
77
+ if (!video) {
78
+ throw new Error("video is required");
79
+ }
80
+ if (!timestamps) {
81
+ throw new Error("timestamps are required");
82
+ }
83
+ if (!existsSync(video)) {
84
+ throw new Error(`video file not found: ${video}`);
85
+ }
86
+
87
+ // Parse timestamps from comma-separated string
88
+ const timestampValues = timestamps
89
+ .split(",")
90
+ .map((t) => Number.parseFloat(t.trim()))
91
+ .filter((t) => !Number.isNaN(t));
92
+
93
+ if (timestampValues.length === 0) {
94
+ throw new Error("at least one valid timestamp is required");
95
+ }
96
+
97
+ // Generate output prefix if not provided
98
+ const prefix =
99
+ outputPrefix || join(dirname(video), basename(video, extname(video)));
100
+
101
+ console.log(`[cut] splitting at timestamps: ${timestampValues.join(", ")}s`);
102
+
103
+ const outputs = await splitAtTimestamps({
104
+ input: video,
105
+ timestamps: timestampValues,
106
+ outputPrefix: prefix,
107
+ });
108
+
109
+ return {
110
+ outputs,
111
+ count: outputs.length,
112
+ };
113
+ }
114
+
115
+ // cli
116
+ if (import.meta.main) {
117
+ const { runCli } = await import("../../cli/runner");
118
+ runCli(meta);
119
+ }