scrolltube 2.1.2 → 2.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -32,9 +32,13 @@ var __importStar = (this && this.__importStar) || (function () {
32
32
  return result;
33
33
  };
34
34
  })();
35
+ var __importDefault = (this && this.__importDefault) || function (mod) {
36
+ return (mod && mod.__esModule) ? mod : { "default": mod };
37
+ };
35
38
  Object.defineProperty(exports, "__esModule", { value: true });
36
39
  exports.AssetPipeline = void 0;
37
40
  const cloud_service_1 = require("./cloud-service");
41
+ const chalk_1 = __importDefault(require("chalk"));
38
42
  class AssetPipeline {
39
43
  driver;
40
44
  options;
@@ -88,11 +92,76 @@ class AssetPipeline {
88
92
  const tempDir = this.driver.join(outDir, '.temp-frames');
89
93
  const framesDir = this.driver.join(tempDir, 'frames');
90
94
  const depthsDir = this.driver.join(tempDir, 'depths');
95
+ // Inform users of agents using this command, that this may take a few minutes to complete.
96
+ console.log(chalk_1.default.yellow('\n⚠️ This may take a few minutes to complete.\n'));
91
97
  this.report('initializing', 0, `Creating project: ${name}`);
92
98
  await this.driver.mkdir(outDir);
93
99
  await this.driver.mkdir(tempDir);
94
100
  await this.driver.mkdir(framesDir);
95
101
  await this.driver.mkdir(depthsDir);
102
+ // 0. PRE-FLIGHT: DIMENSIONS & VARIANT FILTERING
103
+ this.report('initializing', 5, 'Detecting source dimensions...');
104
+ let sourceDimensions = { width: 1920, height: 1080 }; // Default fallback
105
+ try {
106
+ sourceDimensions = await this.driver.getVideoDimensions(input);
107
+ console.log(chalk_1.default.cyan(`🎞️ Source Resolution: ${sourceDimensions.width}x${sourceDimensions.height}`));
108
+ }
109
+ catch (e) {
110
+ console.warn(chalk_1.default.yellow(`⚠️ Could not detect source dimensions. Proceeding with defaults.`));
111
+ }
112
+ const requestedVariants = this.normalizeVariants(opts.variants);
113
+ const validVariants = requestedVariants.filter(v => {
114
+ const isTooLarge = v.width > sourceDimensions.width || v.height > sourceDimensions.height;
115
+ if (isTooLarge) {
116
+ console.warn(chalk_1.default.yellow(`⚠️ Skipping variant ${v.id} (${v.width}x${v.height}) as it exceeds source resolution. (Upscaling is disabled)`));
117
+ return false;
118
+ }
119
+ return true;
120
+ });
121
+ if (validVariants.length === 0 && requestedVariants.length > 0) {
122
+ console.warn(chalk_1.default.bold.red(`\n❌ All requested variants were too large for the source video!`));
123
+ console.log(chalk_1.default.white(`Hint: Upscale your video first, or request smaller variants.\n`));
124
+ // Re-add at least one matching the source? No, let's let the user decide or use a safe fallback.
125
+ // For now, let's use the source resolution as a single variant if everything else failed.
126
+ const sourceVariant = {
127
+ id: 'source-res',
128
+ width: sourceDimensions.width,
129
+ height: sourceDimensions.height,
130
+ orientation: sourceDimensions.width > sourceDimensions.height ? 'landscape' : 'portrait',
131
+ aspectRatio: `${sourceDimensions.width}:${sourceDimensions.height}`,
132
+ media: '(min-width: 0px)'
133
+ };
134
+ validVariants.push(sourceVariant);
135
+ console.log(chalk_1.default.blue(`ℹ️ Falling back to source resolution variant: ${sourceDimensions.width}x${sourceDimensions.height}`));
136
+ }
137
+ // 0. SAVE SOURCE (Copy input video to the project directory)
138
+ let sourceRelPath = '';
139
+ try {
140
+ let sourceFileName = 'video-source';
141
+ let extension = '.mp4';
142
+ if (typeof input === 'string') {
143
+ const parts = input.split('.');
144
+ if (parts.length > 1)
145
+ extension = `.${parts.pop()}`;
146
+ sourceFileName += extension;
147
+ await this.driver.copyFile(input, this.driver.join(outDir, sourceFileName));
148
+ sourceRelPath = `./${sourceFileName}`;
149
+ }
150
+ else if (input && input.arrayBuffer) {
151
+ // Handle File/Blob (Browser)
152
+ const fileName = input.name || 'source.mp4';
153
+ const parts = fileName.split('.');
154
+ if (parts.length > 1)
155
+ extension = `.${parts.pop()}`;
156
+ sourceFileName += extension;
157
+ const buffer = await input.arrayBuffer();
158
+ await this.driver.writeFile(this.driver.join(outDir, sourceFileName), new Uint8Array(buffer));
159
+ sourceRelPath = `./${sourceFileName}`;
160
+ }
161
+ }
162
+ catch (e) {
163
+ console.warn(chalk_1.default.yellow(`⚠️ Could not save a local copy of source video: ${e instanceof Error ? e.message : String(e)}`));
164
+ }
96
165
  // 1. FRAME EXTRACTION
97
166
  this.report('extracting', 10, 'Extracting frames from source...');
98
167
  await this.driver.extractFrames(input, framesDir);
@@ -127,12 +196,12 @@ class AssetPipeline {
127
196
  const variants = await this.processVariants(tempDir, trackingData, {
128
197
  step,
129
198
  depth: isDepthActive,
130
- variants: this.normalizeVariants(opts.variants),
199
+ variants: validVariants,
131
200
  outDir
132
201
  });
133
202
  // 4. SAVE CONFIG
134
203
  this.report('saving', 90, 'Finalizing project configuration...');
135
- const config = await this.saveConfig(variants, outDir);
204
+ const config = await this.saveConfig(variants, outDir, sourceRelPath);
136
205
  // Cleanup
137
206
  await this.driver.remove(tempDir);
138
207
  this.report('saving', 100, 'Project ready!');
@@ -206,7 +275,7 @@ class AssetPipeline {
206
275
  }
207
276
  return assetVariants;
208
277
  }
209
- async saveConfig(variants, outDir) {
278
+ async saveConfig(variants, outDir, sourcePath) {
210
279
  const pkg = require('../../package.json');
211
280
  const config = {
212
281
  version: pkg.version,
@@ -216,10 +285,13 @@ class AssetPipeline {
216
285
  totalDuration: "300vh",
217
286
  scenes: [{
218
287
  id: "scene-1", assetId: "main-sequence", startProgress: 0, duration: 1,
219
- assetRange: [0, variants[0].frameCount - 1], layers: []
288
+ assetRange: [0, (variants.length > 0 ? variants[0].frameCount : 1) - 1], layers: []
220
289
  }]
221
290
  }
222
291
  };
292
+ if (sourcePath) {
293
+ config.source = sourcePath;
294
+ }
223
295
  await this.driver.writeFile(this.driver.join(outDir, 'scrolltube.json'), JSON.stringify(config, null, 2));
224
296
  return config;
225
297
  }
@@ -8,8 +8,14 @@ export declare class NodeDriver implements IPipelineDriver {
8
8
  exists(filePath: string): Promise<boolean>;
9
9
  readdir(dirPath: string): Promise<string[]>;
10
10
  remove(filePath: string): Promise<void>;
11
+ copyFile(src: string, dest: string): Promise<void>;
11
12
  join(...parts: string[]): string;
12
13
  resolve(...parts: string[]): string;
14
+ getVideoDimensions(input: string): Promise<{
15
+ width: number;
16
+ height: number;
17
+ }>;
18
+ private parseDimensions;
13
19
  extractFrames(videoSource: string, outputDir: string, onProgress?: (percent: number) => void): Promise<void>;
14
20
  processImage(input: Uint8Array | string, config: VariantConfig, options?: {
15
21
  grayscale?: boolean;
@@ -70,19 +70,45 @@ class NodeDriver {
70
70
  async remove(filePath) {
71
71
  await fs.remove(filePath);
72
72
  }
73
+ async copyFile(src, dest) {
74
+ await fs.copy(src, dest);
75
+ }
73
76
  join(...parts) {
74
77
  return path.join(...parts);
75
78
  }
76
79
  resolve(...parts) {
77
80
  return path.resolve(...parts);
78
81
  }
82
+ async getVideoDimensions(input) {
83
+ return new Promise((resolve, reject) => {
84
+ try {
85
+ const result = (0, child_process_1.execSync)(`"${this.ffmpegPath}" -i "${input}"`, { stdio: 'pipe' }).toString();
86
+ // ffmpeg outputs info to stderr, which execSync might throw on if -i is used without an output file
87
+ this.parseDimensions(result, resolve, reject);
88
+ }
89
+ catch (err) {
90
+ // execSync throws if exit code != 0, but ffmpeg -i returns 1 because no output file
91
+ const output = err.stderr ? err.stderr.toString() : (err.stdout ? err.stdout.toString() : '');
92
+ this.parseDimensions(output, resolve, reject);
93
+ }
94
+ });
95
+ }
96
+ parseDimensions(output, resolve, reject) {
97
+ const match = output.match(/, (\d{2,5})x(\d{2,5})/);
98
+ if (match) {
99
+ resolve({ width: parseInt(match[1]), height: parseInt(match[2]) });
100
+ }
101
+ else {
102
+ reject(new Error('Could not parse video dimensions.'));
103
+ }
104
+ }
79
105
  async extractFrames(videoSource, outputDir, onProgress) {
80
106
  return new Promise((resolve, reject) => {
81
107
  // For simplicity, we use execSync in a promise or spawn for progress
82
108
  try {
83
109
  // ffmpeg -i input output%04d.png
84
110
  // For now, let's keep it simple like existing CLI
85
- (0, child_process_1.execSync)(`"${this.ffmpegPath}" -i "${videoSource}" "${outputDir}/frame_%04d.png"`, { stdio: 'inherit' });
111
+ (0, child_process_1.execSync)(`"${this.ffmpegPath}" -hide_banner -loglevel error -i "${videoSource}" "${outputDir}/frame_%04d.png"`, { stdio: 'inherit' });
86
112
  resolve();
87
113
  }
88
114
  catch (err) {
@@ -18,8 +18,13 @@ export interface IPipelineDriver {
18
18
  exists(path: string): Promise<boolean>;
19
19
  readdir(path: string): Promise<string[]>;
20
20
  remove(path: string): Promise<void>;
21
+ copyFile(src: string, dest: string): Promise<void>;
21
22
  join(...parts: string[]): string;
22
23
  resolve(...parts: string[]): string;
24
+ getVideoDimensions(input: string | File | Blob): Promise<{
25
+ width: number;
26
+ height: number;
27
+ }>;
23
28
  extractFrames(videoSource: string | File | Blob, outputDir: string, onProgress?: (percent: number) => void): Promise<void>;
24
29
  processImage(input: Uint8Array | string, config: VariantConfig, options: {
25
30
  grayscale?: boolean;
@@ -10,9 +10,10 @@ For an AI to successfully build an experience, it must follow these two phases:
10
10
  The agent should first use the CLI to process raw media into an intelligent ScrollTube project.
11
11
  ```bash
12
12
  # Example: Process a video with AI tracking and depth maps
13
- npx stube create input.mp4 --name "my-project" --track "main product" --cloud --depth
13
+ npx scrolltube create input.mp4 --name "my-project" --track "main product" --cloud --depth
14
+
14
15
  ```
15
- **Output:** A directory containing optimized frames, depth maps, and a `scrolltube.json` file.
16
+ **Output:** A directory containing optimized frames, depth maps in variants for specific resolutions and a `scrolltube.json` file and a copy of the source - raw media.
16
17
 
17
18
  ### Phase B: Immersive Implementation (React)
18
19
  The agent then uses the generated JSON to build the UI components.
@@ -30,7 +31,7 @@ export const MyExperience = () => (
30
31
  {/* Pins content to the tracked subject automatically */}
31
32
  <SubjectLayer offset={{ x: 10, y: -5 }}>
32
33
  <div className="info-box">
33
- Premium Tubesmanship
34
+ Hello From ScrollTube
34
35
  </div>
35
36
  </SubjectLayer>
36
37
  </div>
@@ -47,7 +48,8 @@ Paste this into your chat to turn an AI into a ScrollTube specialist:
47
48
  You are the ScrollTube Implementation Specialist. Your goal is to design immersive scroll experiences.
48
49
 
49
50
  Workflow:
50
- 1. CLI FIRST: Start by suggesting `npx stube create` to process assets.
51
+ 1. CLI FIRST: Start by suggesting `npx scrolltube create` to process assets.
52
+
51
53
  2. ENGINE AWARE: Use 'ScrollTubeProvider' to sync the engine with React state.
52
54
  3. SUBJECT PINS: Use 'SubjectLayer' to attach UI to the product coordinates found by the AI tracker.
53
55
  4. DYNAMIC UI: Use the 'progress' (0-1) or 'frame' count from 'useScrollTube' for custom triggers.
@@ -70,3 +72,9 @@ This workflow enables a powerful business model:
70
72
  3. **The AI Agent** uses that intelligence to write the perfectly synced creative layer.
71
73
 
72
74
  You provide the **SDK**, the AI provides the **Implementation**.
75
+ yer.
76
+
77
+ You provide the **SDK**, the AI provides the **Implementation**.
78
+ yer.
79
+
80
+ You provide the **SDK**, the AI provides the **Implementation**.
@@ -15,6 +15,8 @@ The engine expects a `ProjectConfiguration` object (defined in `src/core/types.t
15
15
  - **`settings`**: Base resolutions, scroll modes, and base path.
16
16
  - **`assets`**: An array of `SequenceAsset` with multiple `variants` (Mobile vs Desktop).
17
17
  - **`timeline`**: A map of `scenes` and `layers`.
18
+ - **`source`**: (New) Relative path to the original source video file, preserved for future edits or variant regenerations.
19
+
18
20
 
19
21
  ---
20
22
 
@@ -18,12 +18,17 @@ This means you have a single source of truth for your processing logic, while be
18
18
 
19
19
  ## 2. CLI Usage
20
20
 
21
- The `npx stube create` command is the primary wrapper for the pipeline on your local machine.
21
+ The `npx scrolltube create` command is the primary wrapper for the pipeline on your local machine.
22
+
22
23
 
23
24
  ```bash
24
- npx stube create <input> [options]
25
+ npx scrolltube create <input> [options]
25
26
  ```
26
27
 
28
+ -> [!TIP]
29
+ -> **Interactive Mode**: If you omit the input path or provide an invalid one, the CLI will now prompt you to try again instead of exiting.
30
+
31
+
27
32
  ### Options:
28
33
  - `-n, --name <string>`: Project folder name.
29
34
  - `-p, --track <text>`: Target object to track (e.g. "red car").
@@ -56,14 +61,16 @@ const zip = await pipeline.create({
56
61
  ```
57
62
 
58
63
  ### Core Pipeline Steps:
59
- 1. **Auto-Upload**: If you provide a local `.mp4`, it's automatically uploaded to the cloud for processing.
60
- 2. **Extraction**: Converts video files into high-quality image sequences.
64
+ 1. **Source Preservation**: Automatically saves a copy of your original video as `video-source.[ext]` in the project directory for future-proofing.
65
+ 2. **Extraction**: Converts video files into high-quality image sequences (with minimal terminal noise).
61
66
  3. **AI Tracking**: Identifies the main subject (using **SAM 3**). Our engine now features **Sticky Tracking**—if the subject is obscured for a few frames, the coordinates hold their last known position.
62
- 4. **Variant Generation**:
67
+ 4. **Upscale Protection**: Automatically detects source dimensions and filters out any requested variants that would require upscaling, ensuring maximum performance and visual quality.
68
+ 5. **Variant Generation**:
63
69
  - **Smart Crop**: Centers the images based on the tracked subject.
64
70
  - **Resolution Factory**: Creates Portrait (9:16) and Landscape (16:9) pairs for each target resolution (e.g. 720p, 1080p).
65
71
  - **Compression**: Optimized `.webp` generation via Sharp (Node) or Canvas (Browser).
66
- 5. **Metadata Export**: Generates the final `scrolltube.json` with **root-relative paths** for easier deployment.
72
+ 6. **Metadata Export**: Generates the final `scrolltube.json` with **root-relative paths** and source file references.
73
+
67
74
 
68
75
  ---
69
76
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "scrolltube",
3
- "version": "2.1.2",
3
+ "version": "2.1.4",
4
4
  "description": "ScrollTube is a web-based tool for scroll-triggered animations.",
5
5
  "main": "dist/core/scrolltube.umd.min.js",
6
6
  "module": "dist/core/scrolltube.umd.min.js",
@@ -23,7 +23,7 @@
23
23
  },
24
24
  "types": "dist/core/index.d.ts",
25
25
  "bin": {
26
- "stube": "dist/cli/index.js"
26
+ "scrolltube": "dist/cli/index.js"
27
27
  },
28
28
  "files": [
29
29
  "dist",