scrolltube 2.1.2 → 2.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AGENTS.md CHANGED
@@ -11,7 +11,8 @@ You are a ScrollTube Implementation Expert. Your goal is to transform static med
11
11
  ### Step 1: Asset Preparation (The CLI)
12
12
  Before writing any UI code, you MUST process the raw assets (video or images) into a ScrollTube project.
13
13
  ```bash
14
- npx stube create <input_path> --cloud --depth --prompt "main subject" [-s 2]
14
+ npx scrolltube create <input_path> --cloud --depth --track "main subject"
15
+
15
16
  ```
16
17
  - **Why?** This generates optimized multi-resolution frames, optional AI-tracked subject coordinates, and optional depth maps.
17
18
  - **Output**: A directory containing `scrolltube.json` and variant folders (`mobile/`, `desktop/`).
package/README.md CHANGED
@@ -28,7 +28,8 @@ Transform your video into a ScrollTube project from your terminal:
28
28
 
29
29
  ```bash
30
30
  # This will extract frames, track the subject, and generate optimized variants and depth maps.
31
- npx stube create "your-video.mp4" --name "my-project" --track "apple" --cloud --depth
31
+ npx scrolltube create "your-video.mp4" --name "my-project" --track "apple" --cloud --depth
32
+
32
33
  ```
33
34
 
34
35
  #### Programmatic Usage (Browser/Node)
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ import 'dotenv/config';
@@ -0,0 +1,240 @@
1
+ #!/usr/bin/env node
2
+ "use strict";
3
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
4
+ if (k2 === undefined) k2 = k;
5
+ var desc = Object.getOwnPropertyDescriptor(m, k);
6
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
7
+ desc = { enumerable: true, get: function() { return m[k]; } };
8
+ }
9
+ Object.defineProperty(o, k2, desc);
10
+ }) : (function(o, m, k, k2) {
11
+ if (k2 === undefined) k2 = k;
12
+ o[k2] = m[k];
13
+ }));
14
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
15
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
16
+ }) : function(o, v) {
17
+ o["default"] = v;
18
+ });
19
+ var __importStar = (this && this.__importStar) || (function () {
20
+ var ownKeys = function(o) {
21
+ ownKeys = Object.getOwnPropertyNames || function (o) {
22
+ var ar = [];
23
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
24
+ return ar;
25
+ };
26
+ return ownKeys(o);
27
+ };
28
+ return function (mod) {
29
+ if (mod && mod.__esModule) return mod;
30
+ var result = {};
31
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
32
+ __setModuleDefault(result, mod);
33
+ return result;
34
+ };
35
+ })();
36
+ var __importDefault = (this && this.__importDefault) || function (mod) {
37
+ return (mod && mod.__esModule) ? mod : { "default": mod };
38
+ };
39
+ Object.defineProperty(exports, "__esModule", { value: true });
40
+ const commander_1 = require("commander");
41
+ const chalk_1 = __importDefault(require("chalk"));
42
+ const fs = __importStar(require("fs-extra"));
43
+ const path = __importStar(require("path"));
44
+ const child_process_1 = require("child_process");
45
+ const ffmpeg_static_1 = __importDefault(require("ffmpeg-static"));
46
+ const pipeline_1 = require("../pipeline");
47
+ const readline = __importStar(require("readline"));
48
+ require("dotenv/config");
49
+ const pkg = require('../../package.json');
50
+ /**
51
+ * MEDIA QUERY BUILDER
52
+ */
53
+ function buildVariantsFromIds(input) {
54
+ const result = [];
55
+ const orientations = ['portrait', 'landscape'];
56
+ // 1. Process each "Target" resolution
57
+ input.forEach(item => {
58
+ let res = 0;
59
+ if (typeof item === 'number')
60
+ res = item;
61
+ else if (typeof item === 'string')
62
+ res = parseInt(item);
63
+ else if (item.height)
64
+ res = item.height; // Assume height is the defining metric
65
+ if (!res || isNaN(res))
66
+ return;
67
+ orientations.forEach(orient => {
68
+ const isPortrait = orient === 'portrait';
69
+ const width = isPortrait ? res : Math.round(res * (16 / 9));
70
+ const height = isPortrait ? Math.round(res * (16 / 9)) : res;
71
+ result.push({
72
+ id: `${res}p_${orient.substring(0, 1)}`,
73
+ width,
74
+ height,
75
+ orientation: orient,
76
+ aspectRatio: isPortrait ? '9:16' : '16:9',
77
+ media: `(orientation: ${orient})` // Minimal fallback
78
+ });
79
+ });
80
+ });
81
+ // 2. Sort by height (ascending) so the engine finds the first one that fits
82
+ return result.sort((a, b) => a.height - b.height);
83
+ }
84
+ /**
85
+ * CONFIG LOADER
86
+ * Looks for scrolltube.config.js/ts in the current working directory.
87
+ */
88
+ async function loadProjectConfig() {
89
+ const possiblePaths = [
90
+ path.join(process.cwd(), 'scrolltube.cli.config.js'),
91
+ path.join(process.cwd(), 'scrolltube.cli.config.cjs'),
92
+ path.join(process.cwd(), 'scrolltube.cli.config.ts')
93
+ ];
94
+ for (const p of possiblePaths) {
95
+ if (fs.existsSync(p)) {
96
+ try {
97
+ // For simplicity in CLI we handle commonjs/esm basics
98
+ // If it's TS, it might need jiti or ts-node, but let's assume JS for now
99
+ // or use a simple dynamic import if supported.
100
+ return require(p);
101
+ }
102
+ catch (e) {
103
+ console.warn(chalk_1.default.yellow(`⚠️ Found config at ${p} but failed to load it. Skipping...`));
104
+ }
105
+ }
106
+ }
107
+ return null;
108
+ }
109
+ /**
110
+ * Robust FFmpeg Detection
111
+ * Prioritizes bundled static binary, then system PATH.
112
+ */
113
+ function getFFmpegPath() {
114
+ // 1. Try bundled ffmpeg-static
115
+ if (ffmpeg_static_1.default)
116
+ return ffmpeg_static_1.default;
117
+ // 2. Try system PATH
118
+ try {
119
+ (0, child_process_1.execSync)('ffmpeg -version', { stdio: 'ignore' });
120
+ return 'ffmpeg';
121
+ }
122
+ catch (e) {
123
+ return null;
124
+ }
125
+ }
126
+ const program = new commander_1.Command();
127
+ program
128
+ .name('scrolltube')
129
+ .description('ScrollTube CLI - Immersive Web SDK')
130
+ .version(pkg.version);
131
+ /**
132
+ * Interactive Helper
133
+ */
134
+ async function prompt(question, defaultValue) {
135
+ const rl = readline.createInterface({
136
+ input: process.stdin,
137
+ output: process.stdout
138
+ });
139
+ return new Promise(resolve => {
140
+ rl.question(`${chalk_1.default.cyan('?')} ${question}${defaultValue ? ` (${defaultValue})` : ''}: `, (answer) => {
141
+ rl.close();
142
+ resolve(answer.trim() || defaultValue || '');
143
+ });
144
+ });
145
+ }
146
+ program
147
+ .command('create')
148
+ .description('ONE-STEP: Transform video/images into a responsive ScrollTube')
149
+ .argument('[input]', 'Path to input video or directory of images')
150
+ .option('-o, --output <dir>', 'Output directory (deprecated, use --name)')
151
+ .option('-p, --track <text>', 'Text prompt for subject tracking', 'main subject')
152
+ .option('-n, --name <string>', 'Name of the project')
153
+ .option('-v, --variants <string>', 'Comma-separated target resolutions (e.g. 720,1080)')
154
+ .option('-s, --step <number>', 'Process every Nth frame (default: 1)', '1')
155
+ .option('--cloud', 'Use Fal.ai for tracking and refinement', false)
156
+ .option('--depth', 'Generate a 3D depth map for the displacement effect (Requires --cloud)', false)
157
+ .action(async (inputArg, opts) => {
158
+ console.log(chalk_1.default.bold.blue('\n🎞️ ScrollTube Asset Pipeline\n'));
159
+ // 0. PRE-FLIGHT CHECK
160
+ const ffmpegPath = getFFmpegPath();
161
+ if (!ffmpegPath) {
162
+ console.error(chalk_1.default.red('\n❌ FFmpeg not found!'));
163
+ console.log(chalk_1.default.yellow('This CLI requires FFmpeg to process videos.'));
164
+ console.log('Please install it manually or ensure regular npm install was successful.');
165
+ process.exit(1);
166
+ }
167
+ const projectConfig = await loadProjectConfig();
168
+ let input = inputArg;
169
+ let track = opts.track;
170
+ let projectName = opts.name;
171
+ let useTracking = opts.cloud; // Default to cloud if flag set
172
+ let useDepth = opts.depth;
173
+ let customVariants = projectConfig?.variants || (opts.variants ? buildVariantsFromIds(opts.variants.split(',')) : null);
174
+ // 1. INPUT VALIDATION (Immediate)
175
+ while (!input || !fs.existsSync(input)) {
176
+ if (input && !fs.existsSync(input)) {
177
+ console.error(chalk_1.default.red(`\n❌ Error: Input path "${input}" does not exist.`));
178
+ }
179
+ input = await prompt('Path to input video or directory of images');
180
+ }
181
+ // 2. PROJECT NAME & SETTINGS
182
+ if (!projectName) {
183
+ projectName = await prompt('Project name', 'scrolltube-project');
184
+ }
185
+ let step = parseInt(opts.step) || 1;
186
+ if (!inputArg) {
187
+ const stepInput = await prompt('Process every Nth frame (Step size)', '1');
188
+ step = parseInt(stepInput) || 1;
189
+ }
190
+ // AI Tracking logic preserved in CLI wrapper...
191
+ // ...
192
+ const pipeline = new pipeline_1.AssetPipeline({
193
+ apiKey: process.env.FAL_KEY,
194
+ onProgress: (p) => {
195
+ // You could add a progress bar here
196
+ }
197
+ });
198
+ try {
199
+ await pipeline.create({
200
+ input: input,
201
+ name: projectName,
202
+ track: useTracking ? track : undefined,
203
+ depth: useDepth,
204
+ variants: customVariants || [720, 1080],
205
+ step: step
206
+ });
207
+ console.log(chalk_1.default.bold.green(`\n✅ Project Created Successfully!`));
208
+ console.log(chalk_1.default.white(`📍 Output: ${projectName}`));
209
+ console.log(chalk_1.default.white(`📜 Config: scrolltube.json`));
210
+ console.log(chalk_1.default.cyan(`\nNext: Import the .json into your <ScrollTubeProvider />\n`));
211
+ }
212
+ catch (err) {
213
+ console.error(chalk_1.default.red(`\n❌ Error during pipeline: ${err.message}`));
214
+ process.exit(1);
215
+ }
216
+ });
217
+ // NEW UPDATE COMMAND
218
+ program
219
+ .command('update')
220
+ .description('Rerun extraction and tracking on an existing project')
221
+ .argument('<dir>', 'Project directory')
222
+ .option('-p, --track <text>', 'Additional subject to track')
223
+ .action(async (dir, opts) => {
224
+ console.log(chalk_1.default.bold.yellow('\n♻️ ScrollTube Update Pipeline\n'));
225
+ const projectPath = path.resolve(dir);
226
+ const configPath = path.join(projectPath, 'scrolltube.json');
227
+ if (!fs.existsSync(configPath)) {
228
+ console.error(chalk_1.default.red('❌ Not a valid ScrollTube project directory (missing scrolltube.json).'));
229
+ process.exit(1);
230
+ }
231
+ const config = await fs.readJson(configPath);
232
+ if (config.version !== pkg.version) {
233
+ console.warn(chalk_1.default.yellow(`⚠️ Version Mismatch: Project is ${config.version}, CLI is ${pkg.version}`));
234
+ }
235
+ console.log(chalk_1.default.red('⚠️ UNDER CONSTRUCTION'));
236
+ console.log(chalk_1.default.yellow('The "update" command is currently being refactored for the Universal Pipeline.'));
237
+ console.log(chalk_1.default.dim('Please use "scft create" to regenerate your project for now.\n'));
238
+ process.exit(0);
239
+ });
240
+ program.parse(process.argv);
@@ -9,6 +9,7 @@ export interface ProjectConfiguration {
9
9
  settings: ProjectSettings;
10
10
  assets: SequenceAsset[];
11
11
  timeline: TimelineDefinition;
12
+ source?: string;
12
13
  }
13
14
  export interface ProjectSettings {
14
15
  baseResolution: {
@@ -8,7 +8,12 @@ export declare class BrowserDriver implements IPipelineDriver {
8
8
  exists(path: string): Promise<boolean>;
9
9
  readdir(dirPath: string): Promise<string[]>;
10
10
  remove(path: string): Promise<void>;
11
+ copyFile(src: string, dest: string): Promise<void>;
11
12
  join(...parts: string[]): string;
13
+ getVideoDimensions(input: string | File | Blob): Promise<{
14
+ width: number;
15
+ height: number;
16
+ }>;
12
17
  resolve(...parts: string[]): string;
13
18
  /**
14
19
  * EXTRACT FRAMES (via ffmpeg.wasm)
@@ -80,9 +80,38 @@ class BrowserDriver {
80
80
  }
81
81
  }
82
82
  }
83
+ async copyFile(src, dest) {
84
+ const data = await this.readFile(src);
85
+ await this.writeFile(dest, data);
86
+ }
83
87
  join(...parts) {
84
88
  return parts.join('/').replace(/\/+/g, '/');
85
89
  }
90
+ async getVideoDimensions(input) {
91
+ return new Promise((resolve, reject) => {
92
+ let url;
93
+ if (typeof input === 'string') {
94
+ // Fallback for paths if they represent URLs in browser
95
+ url = input;
96
+ }
97
+ else {
98
+ url = URL.createObjectURL(input);
99
+ }
100
+ const video = document.createElement('video');
101
+ video.onloadedmetadata = () => {
102
+ const dimensions = { width: video.videoWidth, height: video.videoHeight };
103
+ if (typeof input !== 'string')
104
+ URL.revokeObjectURL(url);
105
+ resolve(dimensions);
106
+ };
107
+ video.onerror = (e) => {
108
+ if (typeof input !== 'string')
109
+ URL.revokeObjectURL(url);
110
+ reject(new Error('Failed to load video metadata in browser.'));
111
+ };
112
+ video.src = url;
113
+ });
114
+ }
86
115
  resolve(...parts) {
87
116
  return this.join(...parts);
88
117
  }
@@ -109,7 +138,7 @@ class BrowserDriver {
109
138
  const inputName = 'input.mp4';
110
139
  await ffmpeg.writeFile(inputName, await fetchFile(videoSource));
111
140
  // Extract as PNGs/WebPs (WebP might be faster if supported in the WASM build)
112
- await ffmpeg.exec(['-i', inputName, `${outputDir}/frame_%04d.png`]);
141
+ await ffmpeg.exec(['-hide_banner', '-loglevel', 'error', '-i', inputName, `${outputDir}/frame_%04d.png`]);
113
142
  // Move files from FFmpeg VFS to our Map FS
114
143
  const files = await ffmpeg.listDir(outputDir);
115
144
  for (const file of files) {
@@ -17,5 +17,5 @@ export declare class AssetPipeline {
17
17
  create(opts: CreateCommandOptions): Promise<ProjectConfiguration | Uint8Array<ArrayBufferLike>>;
18
18
  private normalizeVariants;
19
19
  private processVariants;
20
- saveConfig(variants: AssetVariant[], outDir: string): Promise<ProjectConfiguration>;
20
+ saveConfig(variants: AssetVariant[], outDir: string, sourcePath?: string): Promise<ProjectConfiguration>;
21
21
  }
@@ -32,9 +32,13 @@ var __importStar = (this && this.__importStar) || (function () {
32
32
  return result;
33
33
  };
34
34
  })();
35
+ var __importDefault = (this && this.__importDefault) || function (mod) {
36
+ return (mod && mod.__esModule) ? mod : { "default": mod };
37
+ };
35
38
  Object.defineProperty(exports, "__esModule", { value: true });
36
39
  exports.AssetPipeline = void 0;
37
40
  const cloud_service_1 = require("./cloud-service");
41
+ const chalk_1 = __importDefault(require("chalk"));
38
42
  class AssetPipeline {
39
43
  driver;
40
44
  options;
@@ -88,11 +92,76 @@ class AssetPipeline {
88
92
  const tempDir = this.driver.join(outDir, '.temp-frames');
89
93
  const framesDir = this.driver.join(tempDir, 'frames');
90
94
  const depthsDir = this.driver.join(tempDir, 'depths');
95
+ // Inform users of agents using this command, that this may take a few minutes to complete.
96
+ console.log(chalk_1.default.yellow('\n⚠️ This may take a few minutes to complete.\n'));
91
97
  this.report('initializing', 0, `Creating project: ${name}`);
92
98
  await this.driver.mkdir(outDir);
93
99
  await this.driver.mkdir(tempDir);
94
100
  await this.driver.mkdir(framesDir);
95
101
  await this.driver.mkdir(depthsDir);
102
+ // 0. PRE-FLIGHT: DIMENSIONS & VARIANT FILTERING
103
+ this.report('initializing', 5, 'Detecting source dimensions...');
104
+ let sourceDimensions = { width: 1920, height: 1080 }; // Default fallback
105
+ try {
106
+ sourceDimensions = await this.driver.getVideoDimensions(input);
107
+ console.log(chalk_1.default.cyan(`🎞️ Source Resolution: ${sourceDimensions.width}x${sourceDimensions.height}`));
108
+ }
109
+ catch (e) {
110
+ console.warn(chalk_1.default.yellow(`⚠️ Could not detect source dimensions. Proceeding with defaults.`));
111
+ }
112
+ const requestedVariants = this.normalizeVariants(opts.variants);
113
+ const validVariants = requestedVariants.filter(v => {
114
+ const isTooLarge = v.width > sourceDimensions.width || v.height > sourceDimensions.height;
115
+ if (isTooLarge) {
116
+ console.warn(chalk_1.default.yellow(`⚠️ Skipping variant ${v.id} (${v.width}x${v.height}) as it exceeds source resolution. (Upscaling is disabled)`));
117
+ return false;
118
+ }
119
+ return true;
120
+ });
121
+ if (validVariants.length === 0 && requestedVariants.length > 0) {
122
+ console.warn(chalk_1.default.bold.red(`\n❌ All requested variants were too large for the source video!`));
123
+ console.log(chalk_1.default.white(`Hint: Upscale your video first, or request smaller variants.\n`));
124
+ // Re-add at least one matching the source? No, let's let the user decide or use a safe fallback.
125
+ // For now, let's use the source resolution as a single variant if everything else failed.
126
+ const sourceVariant = {
127
+ id: 'source-res',
128
+ width: sourceDimensions.width,
129
+ height: sourceDimensions.height,
130
+ orientation: sourceDimensions.width > sourceDimensions.height ? 'landscape' : 'portrait',
131
+ aspectRatio: `${sourceDimensions.width}:${sourceDimensions.height}`,
132
+ media: '(min-width: 0px)'
133
+ };
134
+ validVariants.push(sourceVariant);
135
+ console.log(chalk_1.default.blue(`ℹ️ Falling back to source resolution variant: ${sourceDimensions.width}x${sourceDimensions.height}`));
136
+ }
137
+ // 0. SAVE SOURCE (Copy input video to the project directory)
138
+ let sourceRelPath = '';
139
+ try {
140
+ let sourceFileName = 'video-source';
141
+ let extension = '.mp4';
142
+ if (typeof input === 'string') {
143
+ const parts = input.split('.');
144
+ if (parts.length > 1)
145
+ extension = `.${parts.pop()}`;
146
+ sourceFileName += extension;
147
+ await this.driver.copyFile(input, this.driver.join(outDir, sourceFileName));
148
+ sourceRelPath = `./${sourceFileName}`;
149
+ }
150
+ else if (input && input.arrayBuffer) {
151
+ // Handle File/Blob (Browser)
152
+ const fileName = input.name || 'source.mp4';
153
+ const parts = fileName.split('.');
154
+ if (parts.length > 1)
155
+ extension = `.${parts.pop()}`;
156
+ sourceFileName += extension;
157
+ const buffer = await input.arrayBuffer();
158
+ await this.driver.writeFile(this.driver.join(outDir, sourceFileName), new Uint8Array(buffer));
159
+ sourceRelPath = `./${sourceFileName}`;
160
+ }
161
+ }
162
+ catch (e) {
163
+ console.warn(chalk_1.default.yellow(`⚠️ Could not save a local copy of source video: ${e instanceof Error ? e.message : String(e)}`));
164
+ }
96
165
  // 1. FRAME EXTRACTION
97
166
  this.report('extracting', 10, 'Extracting frames from source...');
98
167
  await this.driver.extractFrames(input, framesDir);
@@ -127,12 +196,12 @@ class AssetPipeline {
127
196
  const variants = await this.processVariants(tempDir, trackingData, {
128
197
  step,
129
198
  depth: isDepthActive,
130
- variants: this.normalizeVariants(opts.variants),
199
+ variants: validVariants,
131
200
  outDir
132
201
  });
133
202
  // 4. SAVE CONFIG
134
203
  this.report('saving', 90, 'Finalizing project configuration...');
135
- const config = await this.saveConfig(variants, outDir);
204
+ const config = await this.saveConfig(variants, outDir, sourceRelPath);
136
205
  // Cleanup
137
206
  await this.driver.remove(tempDir);
138
207
  this.report('saving', 100, 'Project ready!');
@@ -206,7 +275,7 @@ class AssetPipeline {
206
275
  }
207
276
  return assetVariants;
208
277
  }
209
- async saveConfig(variants, outDir) {
278
+ async saveConfig(variants, outDir, sourcePath) {
210
279
  const pkg = require('../../package.json');
211
280
  const config = {
212
281
  version: pkg.version,
@@ -216,10 +285,13 @@ class AssetPipeline {
216
285
  totalDuration: "300vh",
217
286
  scenes: [{
218
287
  id: "scene-1", assetId: "main-sequence", startProgress: 0, duration: 1,
219
- assetRange: [0, variants[0].frameCount - 1], layers: []
288
+ assetRange: [0, (variants.length > 0 ? variants[0].frameCount : 1) - 1], layers: []
220
289
  }]
221
290
  }
222
291
  };
292
+ if (sourcePath) {
293
+ config.source = sourcePath;
294
+ }
223
295
  await this.driver.writeFile(this.driver.join(outDir, 'scrolltube.json'), JSON.stringify(config, null, 2));
224
296
  return config;
225
297
  }
@@ -8,8 +8,14 @@ export declare class NodeDriver implements IPipelineDriver {
8
8
  exists(filePath: string): Promise<boolean>;
9
9
  readdir(dirPath: string): Promise<string[]>;
10
10
  remove(filePath: string): Promise<void>;
11
+ copyFile(src: string, dest: string): Promise<void>;
11
12
  join(...parts: string[]): string;
12
13
  resolve(...parts: string[]): string;
14
+ getVideoDimensions(input: string): Promise<{
15
+ width: number;
16
+ height: number;
17
+ }>;
18
+ private parseDimensions;
13
19
  extractFrames(videoSource: string, outputDir: string, onProgress?: (percent: number) => void): Promise<void>;
14
20
  processImage(input: Uint8Array | string, config: VariantConfig, options?: {
15
21
  grayscale?: boolean;
@@ -70,19 +70,45 @@ class NodeDriver {
70
70
  async remove(filePath) {
71
71
  await fs.remove(filePath);
72
72
  }
73
+ async copyFile(src, dest) {
74
+ await fs.copy(src, dest);
75
+ }
73
76
  join(...parts) {
74
77
  return path.join(...parts);
75
78
  }
76
79
  resolve(...parts) {
77
80
  return path.resolve(...parts);
78
81
  }
82
+ async getVideoDimensions(input) {
83
+ return new Promise((resolve, reject) => {
84
+ try {
85
+ const result = (0, child_process_1.execSync)(`"${this.ffmpegPath}" -i "${input}"`, { stdio: 'pipe' }).toString();
86
+ // ffmpeg outputs info to stderr, which execSync might throw on if -i is used without an output file
87
+ this.parseDimensions(result, resolve, reject);
88
+ }
89
+ catch (err) {
90
+ // execSync throws if exit code != 0, but ffmpeg -i returns 1 because no output file
91
+ const output = err.stderr ? err.stderr.toString() : (err.stdout ? err.stdout.toString() : '');
92
+ this.parseDimensions(output, resolve, reject);
93
+ }
94
+ });
95
+ }
96
+ parseDimensions(output, resolve, reject) {
97
+ const match = output.match(/, (\d{2,5})x(\d{2,5})/);
98
+ if (match) {
99
+ resolve({ width: parseInt(match[1]), height: parseInt(match[2]) });
100
+ }
101
+ else {
102
+ reject(new Error('Could not parse video dimensions.'));
103
+ }
104
+ }
79
105
  async extractFrames(videoSource, outputDir, onProgress) {
80
106
  return new Promise((resolve, reject) => {
81
107
  // For simplicity, we use execSync in a promise or spawn for progress
82
108
  try {
83
109
  // ffmpeg -i input output%04d.png
84
110
  // For now, let's keep it simple like existing CLI
85
- (0, child_process_1.execSync)(`"${this.ffmpegPath}" -i "${videoSource}" "${outputDir}/frame_%04d.png"`, { stdio: 'inherit' });
111
+ (0, child_process_1.execSync)(`"${this.ffmpegPath}" -hide_banner -loglevel error -i "${videoSource}" "${outputDir}/frame_%04d.png"`, { stdio: 'inherit' });
86
112
  resolve();
87
113
  }
88
114
  catch (err) {
@@ -18,8 +18,13 @@ export interface IPipelineDriver {
18
18
  exists(path: string): Promise<boolean>;
19
19
  readdir(path: string): Promise<string[]>;
20
20
  remove(path: string): Promise<void>;
21
+ copyFile(src: string, dest: string): Promise<void>;
21
22
  join(...parts: string[]): string;
22
23
  resolve(...parts: string[]): string;
24
+ getVideoDimensions(input: string | File | Blob): Promise<{
25
+ width: number;
26
+ height: number;
27
+ }>;
23
28
  extractFrames(videoSource: string | File | Blob, outputDir: string, onProgress?: (percent: number) => void): Promise<void>;
24
29
  processImage(input: Uint8Array | string, config: VariantConfig, options: {
25
30
  grayscale?: boolean;
@@ -10,7 +10,8 @@ For an AI to successfully build an experience, it must follow these two phases:
10
10
  The agent should first use the CLI to process raw media into an intelligent ScrollTube project.
11
11
  ```bash
12
12
  # Example: Process a video with AI tracking and depth maps
13
- npx stube create input.mp4 --name "my-project" --track "main product" --cloud --depth
13
+ npx scrolltube create input.mp4 --name "my-project" --track "main product" --cloud --depth
14
+
14
15
  ```
15
16
  **Output:** A directory containing optimized frames, depth maps, and a `scrolltube.json` file.
16
17
 
@@ -47,7 +48,8 @@ Paste this into your chat to turn an AI into a ScrollTube specialist:
47
48
  You are the ScrollTube Implementation Specialist. Your goal is to design immersive scroll experiences.
48
49
 
49
50
  Workflow:
50
- 1. CLI FIRST: Start by suggesting `npx stube create` to process assets.
51
+ 1. CLI FIRST: Start by suggesting `npx scrolltube create` to process assets.
52
+
51
53
  2. ENGINE AWARE: Use 'ScrollTubeProvider' to sync the engine with React state.
52
54
  3. SUBJECT PINS: Use 'SubjectLayer' to attach UI to the product coordinates found by the AI tracker.
53
55
  4. DYNAMIC UI: Use the 'progress' (0-1) or 'frame' count from 'useScrollTube' for custom triggers.
@@ -15,6 +15,8 @@ The engine expects a `ProjectConfiguration` object (defined in `src/core/types.t
15
15
  - **`settings`**: Base resolutions, scroll modes, and base path.
16
16
  - **`assets`**: An array of `SequenceAsset` with multiple `variants` (Mobile vs Desktop).
17
17
  - **`timeline`**: A map of `scenes` and `layers`.
18
+ - **`source`**: (New) Relative path to the original source video file, preserved for future edits or variant regenerations.
19
+
18
20
 
19
21
  ---
20
22
 
@@ -18,12 +18,17 @@ This means you have a single source of truth for your processing logic, while be
18
18
 
19
19
  ## 2. CLI Usage
20
20
 
21
- The `npx stube create` command is the primary wrapper for the pipeline on your local machine.
21
+ The `npx scrolltube create` command is the primary wrapper for the pipeline on your local machine.
22
+
22
23
 
23
24
  ```bash
24
- npx stube create <input> [options]
25
+ npx scrolltube create <input> [options]
25
26
  ```
26
27
 
28
+ -> [!TIP]
29
+ -> **Interactive Mode**: If you omit the input path or provide an invalid one, the CLI will now prompt you to try again instead of exiting.
30
+
31
+
27
32
  ### Options:
28
33
  - `-n, --name <string>`: Project folder name.
29
34
  - `-p, --track <text>`: Target object to track (e.g. "red car").
@@ -56,14 +61,16 @@ const zip = await pipeline.create({
56
61
  ```
57
62
 
58
63
  ### Core Pipeline Steps:
59
- 1. **Auto-Upload**: If you provide a local `.mp4`, it's automatically uploaded to the cloud for processing.
60
- 2. **Extraction**: Converts video files into high-quality image sequences.
64
+ 1. **Source Preservation**: Automatically saves a copy of your original video as `video-source.[ext]` in the project directory for future-proofing.
65
+ 2. **Extraction**: Converts video files into high-quality image sequences (with minimal terminal noise).
61
66
  3. **AI Tracking**: Identifies the main subject (using **SAM 3**). Our engine now features **Sticky Tracking**—if the subject is obscured for a few frames, the coordinates hold their last known position.
62
- 4. **Variant Generation**:
67
+ 4. **Upscale Protection**: Automatically detects source dimensions and filters out any requested variants that would require upscaling, ensuring maximum performance and visual quality.
68
+ 5. **Variant Generation**:
63
69
  - **Smart Crop**: Centers the images based on the tracked subject.
64
70
  - **Resolution Factory**: Creates Portrait (9:16) and Landscape (16:9) pairs for each target resolution (e.g. 720p, 1080p).
65
71
  - **Compression**: Optimized `.webp` generation via Sharp (Node) or Canvas (Browser).
66
- 5. **Metadata Export**: Generates the final `scrolltube.json` with **root-relative paths** for easier deployment.
72
+ 6. **Metadata Export**: Generates the final `scrolltube.json` with **root-relative paths** and source file references.
73
+
67
74
 
68
75
  ---
69
76
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "scrolltube",
3
- "version": "2.1.2",
3
+ "version": "2.1.3",
4
4
  "description": "ScrollTube is a web-based tool for scroll-triggered animations.",
5
5
  "main": "dist/core/scrolltube.umd.min.js",
6
6
  "module": "dist/core/scrolltube.umd.min.js",
@@ -23,7 +23,7 @@
23
23
  },
24
24
  "types": "dist/core/index.d.ts",
25
25
  "bin": {
26
- "stube": "dist/cli/index.js"
26
+ "scrolltube": "dist/cli/index.js"
27
27
  },
28
28
  "files": [
29
29
  "dist",