@conceptcraft/mindframes 0.1.12 → 0.1.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +140 -92
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -1087,6 +1087,12 @@ async function searchImages(searchRequest) {
|
|
|
1087
1087
|
body: searchRequest
|
|
1088
1088
|
});
|
|
1089
1089
|
}
|
|
1090
|
+
async function generateImage(generateRequest) {
|
|
1091
|
+
return request("/api/cli/images/generate", {
|
|
1092
|
+
method: "POST",
|
|
1093
|
+
body: generateRequest
|
|
1094
|
+
});
|
|
1095
|
+
}
|
|
1090
1096
|
async function searchVideos(searchRequest) {
|
|
1091
1097
|
return request("/api/cli/videos/search", {
|
|
1092
1098
|
method: "POST",
|
|
@@ -3067,8 +3073,8 @@ function generateMainSkillContent(context) {
|
|
|
3067
3073
|
const { name, cmd: cmd2 } = context;
|
|
3068
3074
|
const envPrefix = name.toUpperCase().replace(/[^A-Z0-9]/g, "_");
|
|
3069
3075
|
return `---
|
|
3070
|
-
name: ${
|
|
3071
|
-
description: ${name} CLI for AI-powered content creation. Use when user needs to create presentations, generate video assets (voiceover, music, images, stock videos), use text-to-speech, mix audio, search stock media, or manage branding. This is the main entry point - load specialized skills (${
|
|
3076
|
+
name: ${cmd2}
|
|
3077
|
+
description: ${name} CLI for AI-powered content creation. Use when user needs to create presentations, generate video assets (voiceover, music, images, stock videos), use text-to-speech, mix audio, search stock media, or manage branding. This is the main entry point - load specialized skills (${cmd2}-video, ${cmd2}-presentation) for detailed workflows.
|
|
3072
3078
|
---
|
|
3073
3079
|
|
|
3074
3080
|
# ${name} CLI
|
|
@@ -3187,7 +3193,6 @@ cat <<'EOF' | ${cmd2} video create --output ./public
|
|
|
3187
3193
|
"imageQuery": "call to action button"
|
|
3188
3194
|
}
|
|
3189
3195
|
],
|
|
3190
|
-
"voice": "Kore",
|
|
3191
3196
|
"voiceSettings": {
|
|
3192
3197
|
"speed": 0.95,
|
|
3193
3198
|
"stability": 0.4,
|
|
@@ -3226,10 +3231,10 @@ ${cmd2} tts generate -t "Hello world" -o output.mp3
|
|
|
3226
3231
|
# With voice selection
|
|
3227
3232
|
${cmd2} tts generate -t "Welcome to the demo" -v Rachel -o welcome.mp3
|
|
3228
3233
|
|
|
3229
|
-
# With provider and settings
|
|
3234
|
+
# With provider and settings (Gemini)
|
|
3230
3235
|
${cmd2} tts generate \\
|
|
3231
3236
|
-t "Professional narration" \\
|
|
3232
|
-
-v
|
|
3237
|
+
-v Puck \\
|
|
3233
3238
|
-p gemini \\
|
|
3234
3239
|
-s 0.9 \\
|
|
3235
3240
|
-o narration.mp3
|
|
@@ -3239,8 +3244,11 @@ ${cmd2} tts voices
|
|
|
3239
3244
|
${cmd2} tts voices --provider elevenlabs
|
|
3240
3245
|
\`\`\`
|
|
3241
3246
|
|
|
3242
|
-
**Providers:** \`
|
|
3243
|
-
**
|
|
3247
|
+
**Providers:** \`elevenlabs\` (default), \`gemini\`, \`openai\`
|
|
3248
|
+
**Voices by provider:**
|
|
3249
|
+
- ElevenLabs: \`Rachel\`, \`Josh\`, \`Adam\`, \`Bella\` (or voice IDs)
|
|
3250
|
+
- Gemini: \`Kore\`, \`Puck\`, \`Charon\`, \`Aoede\`
|
|
3251
|
+
- OpenAI: \`alloy\`, \`nova\`, \`echo\`, \`onyx\`
|
|
3244
3252
|
**Speed range:** 0.25 - 4.0 (default: 1.0)
|
|
3245
3253
|
|
|
3246
3254
|
---
|
|
@@ -3424,7 +3432,7 @@ cat scenes.json | ${cmd2} video create -o product-demo/public
|
|
|
3424
3432
|
|
|
3425
3433
|
# 4. Render and add thumbnail (version files: v1, v2, v3...)
|
|
3426
3434
|
cd product-demo
|
|
3427
|
-
|
|
3435
|
+
npx remotion render FullVideo out/FullVideo-v1.mp4
|
|
3428
3436
|
${cmd2} video thumbnail out/FullVideo-v1.mp4 --frame 60
|
|
3429
3437
|
\`\`\`
|
|
3430
3438
|
|
|
@@ -3484,20 +3492,6 @@ ${cmd2} --version # Version info
|
|
|
3484
3492
|
}
|
|
3485
3493
|
|
|
3486
3494
|
// src/commands/skill/rules/video/content.ts
|
|
3487
|
-
var THUMBNAIL_RULES = `Consider creating separate thumbnail component with Remotion (can be captured with remotion still, not used in actual video).
|
|
3488
|
-
|
|
3489
|
-
**High-CTR principles:**
|
|
3490
|
-
- Expressive faces (emotion, not neutral) boost CTR 20-30%
|
|
3491
|
-
- High contrast, bold colors (yellow, orange stand out)
|
|
3492
|
-
- Simple: 3 main elements max (face + text + 1 visual)
|
|
3493
|
-
- Mobile-first: readable at 320px width (70% of views)
|
|
3494
|
-
- Minimal text: 3-5 words, bold legible fonts (60-80px)
|
|
3495
|
-
- Rule of thirds composition
|
|
3496
|
-
|
|
3497
|
-
**Specs:** 1280x720, <2MB, 16:9 ratio
|
|
3498
|
-
|
|
3499
|
-
Can capture: \`pnpm exec remotion still ThumbnailScene out/thumb.png\`
|
|
3500
|
-
`;
|
|
3501
3495
|
var MOTION_DESIGN_GUIDELINES = `# Motion Design Principles
|
|
3502
3496
|
|
|
3503
3497
|
**Core Philosophy:** "Atomic, Kinetic Construction" - nothing is static. Elements arrive and leave via physics-based transitions.
|
|
@@ -3611,7 +3605,7 @@ var ASSET_USAGE_GUIDELINES = `# Asset Usage & Optimization
|
|
|
3611
3605
|
function generateVideoSkillContent(context) {
|
|
3612
3606
|
const { name, cmd: cmd2 } = context;
|
|
3613
3607
|
return `---
|
|
3614
|
-
name: ${
|
|
3608
|
+
name: ${cmd2}-video
|
|
3615
3609
|
description: Use when user asks to create videos (product demos, explainers, social content, promos). Handles video asset generation, Remotion implementation, and thumbnail embedding.
|
|
3616
3610
|
---
|
|
3617
3611
|
|
|
@@ -3634,23 +3628,22 @@ ${cmd2} login
|
|
|
3634
3628
|
|
|
3635
3629
|
### 1. Generate Assets
|
|
3636
3630
|
|
|
3637
|
-
Generate voiceover, music, and
|
|
3631
|
+
Generate voiceover, music, images, and thumbnail:
|
|
3638
3632
|
|
|
3639
3633
|
\`\`\`bash
|
|
3640
3634
|
cat <<SCENES | ${cmd2} video create --output ./public
|
|
3641
3635
|
{
|
|
3642
3636
|
"scenes": [
|
|
3643
|
-
{
|
|
3644
|
-
"name": "Hook",
|
|
3637
|
+
{
|
|
3638
|
+
"name": "Hook",
|
|
3645
3639
|
"script": "Watch how we transformed this complex workflow into a single click.",
|
|
3646
3640
|
"imageQuery": "modern dashboard interface dark theme"
|
|
3647
3641
|
},
|
|
3648
|
-
{
|
|
3649
|
-
"name": "Demo",
|
|
3642
|
+
{
|
|
3643
|
+
"name": "Demo",
|
|
3650
3644
|
"script": "Our AI analyzes your data in real-time, surfacing insights that matter."
|
|
3651
3645
|
}
|
|
3652
|
-
]
|
|
3653
|
-
"voice": "Kore"
|
|
3646
|
+
]
|
|
3654
3647
|
}
|
|
3655
3648
|
SCENES
|
|
3656
3649
|
\`\`\`
|
|
@@ -3658,7 +3651,9 @@ SCENES
|
|
|
3658
3651
|
**Output:**
|
|
3659
3652
|
- \`public/audio/*.wav\` - scene voiceovers
|
|
3660
3653
|
- \`public/audio/music.mp3\` - background music
|
|
3661
|
-
- \`public/
|
|
3654
|
+
- \`public/images/*.jpg\` - scene images (if imageQuery provided)
|
|
3655
|
+
- \`public/thumbnail.jpg\` - auto-generated thumbnail
|
|
3656
|
+
- \`public/video-manifest.json\` - **complete timeline ready to use**
|
|
3662
3657
|
|
|
3663
3658
|
### 2. Initialize Remotion (MANDATORY)
|
|
3664
3659
|
|
|
@@ -3672,29 +3667,47 @@ cd my-video
|
|
|
3672
3667
|
|
|
3673
3668
|
### 3. Render Video
|
|
3674
3669
|
|
|
3675
|
-
**IMPORTANT:**
|
|
3676
|
-
-
|
|
3677
|
-
-
|
|
3670
|
+
**IMPORTANT:**
|
|
3671
|
+
- The \`video-manifest.json\` already contains the complete \`timeline\` - **no need to build or transform anything**
|
|
3672
|
+
- Just pass \`timeline\` directly to the composition
|
|
3673
|
+
- Always version output files: \`out/video-v1.mp4\`, \`out/video-v2.mp4\`, etc.
|
|
3678
3674
|
|
|
3679
|
-
**
|
|
3675
|
+
**YouTube (landscape 16:9, NO captions):**
|
|
3680
3676
|
\`\`\`bash
|
|
3681
|
-
|
|
3677
|
+
npx remotion render YouTubeVideo out/youtube-v1.mp4 \\
|
|
3678
|
+
--props='{"timeline":'$(cat public/video-manifest.json | jq -c .timeline)',"showCaptions":false}'
|
|
3682
3679
|
\`\`\`
|
|
3683
3680
|
|
|
3684
|
-
**
|
|
3681
|
+
**TikTok/Reels/Shorts (vertical 9:16, WITH captions):**
|
|
3685
3682
|
\`\`\`bash
|
|
3686
|
-
|
|
3683
|
+
npx remotion render TikTokVideo out/tiktok-v1.mp4 \\
|
|
3687
3684
|
--props='{"timeline":'$(cat public/video-manifest.json | jq -c .timeline)',"showCaptions":true}'
|
|
3688
3685
|
\`\`\`
|
|
3689
3686
|
|
|
3690
|
-
###
|
|
3687
|
+
### Video Compositions
|
|
3688
|
+
|
|
3689
|
+
| Composition | Dimensions | Captions | Use Case |
|
|
3690
|
+
|-------------|------------|----------|----------|
|
|
3691
|
+
| \`YouTubeVideo\` | 1920x1080 (16:9) | No | YouTube, Vimeo, traditional video |
|
|
3692
|
+
| \`TikTokVideo\` | 1080x1920 (9:16) | Yes (word-by-word) | TikTok, Reels, Shorts |
|
|
3693
|
+
|
|
3694
|
+
**Same timeline, different output formats.** Both use the exact same \`timeline\` from \`video-manifest.json\`.
|
|
3691
3695
|
|
|
3692
|
-
|
|
3696
|
+
### 4. Embed Thumbnail
|
|
3697
|
+
|
|
3698
|
+
Thumbnail is auto-generated during \`video create\`. Inject into final video:
|
|
3693
3699
|
|
|
3694
3700
|
\`\`\`bash
|
|
3695
|
-
${cmd2} video thumbnail out/
|
|
3701
|
+
${cmd2} video thumbnail out/youtube-v1.mp4 --image public/thumbnail.jpg
|
|
3696
3702
|
\`\`\`
|
|
3697
3703
|
|
|
3704
|
+
**High-CTR thumbnail principles:**
|
|
3705
|
+
- Expressive faces boost CTR 20-30%
|
|
3706
|
+
- High contrast, bold colors (yellow, orange)
|
|
3707
|
+
- Simple: 3 elements max
|
|
3708
|
+
- Mobile-first: readable at 320px
|
|
3709
|
+
- Specs: 1280x720, <2MB
|
|
3710
|
+
|
|
3698
3711
|
---
|
|
3699
3712
|
|
|
3700
3713
|
${MOTION_DESIGN_GUIDELINES}
|
|
@@ -3715,7 +3728,7 @@ ${ASSET_USAGE_GUIDELINES}
|
|
|
3715
3728
|
function generatePresentationSkillContent(context) {
|
|
3716
3729
|
const { name, cmd: cmd2 } = context;
|
|
3717
3730
|
return `---
|
|
3718
|
-
name: ${
|
|
3731
|
+
name: ${cmd2}-presentation
|
|
3719
3732
|
description: Use when user asks to create presentations (slides, decks, pitch decks). Generates AI-powered presentations with structured content and professional design.
|
|
3720
3733
|
---
|
|
3721
3734
|
|
|
@@ -4745,7 +4758,7 @@ function getExtension(url) {
|
|
|
4745
4758
|
}
|
|
4746
4759
|
return "jpg";
|
|
4747
4760
|
}
|
|
4748
|
-
var createCommand3 = new Command19("create").description("Create video assets (voiceover per scene, music, images)").option("-s, --script <text>", "Narration script (legacy single-script mode)").option("--script-file <path>", "Path to script file (legacy) or scenes JSON").option("-t, --topic <text>", "Topic for image search").option("-v, --voice <name>", "TTS voice (
|
|
4761
|
+
var createCommand3 = new Command19("create").description("Create video assets (voiceover per scene, music, images)").option("-s, --script <text>", "Narration script (legacy single-script mode)").option("--script-file <path>", "Path to script file (legacy) or scenes JSON").option("-t, --topic <text>", "Topic for image search").option("-v, --voice <name>", "TTS voice ID (ElevenLabs: Rachel, Josh, Adam; OpenAI: alloy, nova; Gemini: Kore, Puck)").option("-m, --music-prompt <text>", "Music description").option("-n, --num-images <number>", "Number of images to search/download", "5").option("-o, --output <dir>", "Output directory", "./public").option("-f, --format <format>", "Output format: human, json, quiet", "human").action(async (options) => {
|
|
4749
4762
|
const format = options.format;
|
|
4750
4763
|
const spinner = format === "human" ? ora12("Initializing...").start() : null;
|
|
4751
4764
|
try {
|
|
@@ -4956,7 +4969,7 @@ var createCommand3 = new Command19("create").description("Create video assets (v
|
|
|
4956
4969
|
timeline.elements.length > 0 ? Math.max(...timeline.elements.map((e) => e.endMs)) : 0
|
|
4957
4970
|
);
|
|
4958
4971
|
const actualVideoDuration = videoEndTimeMs / 1e3;
|
|
4959
|
-
const musicDuration = Math.min(
|
|
4972
|
+
const musicDuration = Math.min(300, Math.ceil(actualVideoDuration));
|
|
4960
4973
|
console.log(`[Music Generation] Requesting music:`, {
|
|
4961
4974
|
prompt: musicPrompt,
|
|
4962
4975
|
requestedDuration: musicDuration,
|
|
@@ -4965,71 +4978,107 @@ var createCommand3 = new Command19("create").description("Create video assets (v
|
|
|
4965
4978
|
timelineDurationMs: videoEndTimeMs
|
|
4966
4979
|
});
|
|
4967
4980
|
let musicInfo;
|
|
4968
|
-
|
|
4969
|
-
|
|
4970
|
-
|
|
4971
|
-
|
|
4972
|
-
|
|
4973
|
-
|
|
4981
|
+
let thumbnailPath;
|
|
4982
|
+
if (spinner) spinner.text = "Generating music and thumbnail...";
|
|
4983
|
+
const firstScene = scenes[0];
|
|
4984
|
+
const thumbnailPrompt = `YouTube thumbnail, bold text overlay, high contrast, vibrant colors, 16:9 aspect ratio: ${firstScene?.text?.slice(0, 100) || "video thumbnail"}`;
|
|
4985
|
+
const parallelTasks = [];
|
|
4986
|
+
const musicTask = (async () => {
|
|
4987
|
+
if (musicDuration < 3) {
|
|
4988
|
+
if (format === "human") {
|
|
4989
|
+
spinner?.stop();
|
|
4990
|
+
warn(`Video duration (${actualVideoDuration.toFixed(1)}s) is too short for music generation (minimum 3s).`);
|
|
4991
|
+
spinner?.start();
|
|
4992
|
+
}
|
|
4993
|
+
return null;
|
|
4974
4994
|
}
|
|
4975
|
-
} else {
|
|
4976
4995
|
try {
|
|
4977
|
-
|
|
4978
|
-
let musicResult = await generateMusic({
|
|
4996
|
+
let musicResult2 = await generateMusic({
|
|
4979
4997
|
prompt: musicPrompt,
|
|
4980
4998
|
duration: musicDuration
|
|
4981
4999
|
});
|
|
4982
|
-
if (
|
|
4983
|
-
|
|
4984
|
-
|
|
4985
|
-
() => checkMusicStatus(musicResult.requestId),
|
|
5000
|
+
if (musicResult2.status !== "completed" && musicResult2.status !== "failed") {
|
|
5001
|
+
musicResult2 = await pollForCompletion(
|
|
5002
|
+
() => checkMusicStatus(musicResult2.requestId),
|
|
4986
5003
|
60,
|
|
4987
5004
|
2e3
|
|
4988
5005
|
);
|
|
4989
5006
|
}
|
|
4990
|
-
if (
|
|
4991
|
-
throw new Error(
|
|
5007
|
+
if (musicResult2.status === "failed") {
|
|
5008
|
+
throw new Error(musicResult2.error || "Unknown error");
|
|
4992
5009
|
}
|
|
4993
5010
|
const musicPath = join2(audioDir, "music.mp3");
|
|
4994
|
-
if (
|
|
4995
|
-
await downloadFile3(
|
|
5011
|
+
if (musicResult2.audioUrl) {
|
|
5012
|
+
await downloadFile3(musicResult2.audioUrl, musicPath);
|
|
4996
5013
|
}
|
|
4997
|
-
|
|
4998
|
-
const actualMusicDuration = musicResult.duration || musicDuration;
|
|
4999
|
-
console.log(`[Music Generation] Received music:`, {
|
|
5000
|
-
requestedDuration: musicDuration,
|
|
5001
|
-
returnedDuration: musicResult.duration,
|
|
5002
|
-
actualUsedDuration: actualMusicDuration,
|
|
5003
|
-
totalAudioDuration: totalDuration,
|
|
5004
|
-
difference: actualMusicDuration - totalDuration,
|
|
5005
|
-
audioUrl: musicResult.audioUrl?.substring(0, 50) + "..."
|
|
5006
|
-
});
|
|
5007
|
-
musicInfo = {
|
|
5014
|
+
return {
|
|
5008
5015
|
path: "audio/music.mp3",
|
|
5009
|
-
duration:
|
|
5016
|
+
duration: musicResult2.duration || musicDuration,
|
|
5010
5017
|
prompt: musicPrompt,
|
|
5011
|
-
cost:
|
|
5018
|
+
cost: musicResult2.cost || 0
|
|
5012
5019
|
};
|
|
5020
|
+
} catch (err) {
|
|
5013
5021
|
if (format === "human") {
|
|
5014
5022
|
spinner?.stop();
|
|
5015
|
-
|
|
5016
|
-
|
|
5017
|
-
|
|
5018
|
-
|
|
5023
|
+
warn(`Music generation failed: ${err.message}`);
|
|
5024
|
+
spinner?.start();
|
|
5025
|
+
}
|
|
5026
|
+
return null;
|
|
5027
|
+
}
|
|
5028
|
+
})();
|
|
5029
|
+
parallelTasks.push(musicTask);
|
|
5030
|
+
const thumbnailTask = (async () => {
|
|
5031
|
+
try {
|
|
5032
|
+
const result = await generateImage({
|
|
5033
|
+
prompt: thumbnailPrompt,
|
|
5034
|
+
options: {
|
|
5035
|
+
width: 1280,
|
|
5036
|
+
height: 720
|
|
5019
5037
|
}
|
|
5038
|
+
});
|
|
5039
|
+
if (result.success && result.data.url) {
|
|
5040
|
+
const thumbPath = join2(options.output, "thumbnail.jpg");
|
|
5041
|
+
await downloadFile3(result.data.url, thumbPath);
|
|
5042
|
+
totalCost += result.data.cost || 0;
|
|
5043
|
+
return thumbPath;
|
|
5044
|
+
}
|
|
5045
|
+
return null;
|
|
5046
|
+
} catch (err) {
|
|
5047
|
+
if (format === "human") {
|
|
5048
|
+
spinner?.stop();
|
|
5049
|
+
warn(`Thumbnail generation failed: ${err.message}`);
|
|
5020
5050
|
spinner?.start();
|
|
5021
5051
|
}
|
|
5022
|
-
|
|
5052
|
+
return null;
|
|
5053
|
+
}
|
|
5054
|
+
})();
|
|
5055
|
+
parallelTasks.push(thumbnailTask);
|
|
5056
|
+
const [musicResult, thumbResult] = await Promise.all(parallelTasks);
|
|
5057
|
+
if (musicResult) {
|
|
5058
|
+
musicInfo = musicResult;
|
|
5059
|
+
totalCost += musicResult.cost || 0;
|
|
5060
|
+
if (format === "human") {
|
|
5023
5061
|
spinner?.stop();
|
|
5024
|
-
|
|
5025
|
-
|
|
5026
|
-
|
|
5062
|
+
success(`Music: ${join2(audioDir, "music.mp3")} (${musicResult.duration}s)`);
|
|
5063
|
+
if (musicResult.duration < actualVideoDuration) {
|
|
5064
|
+
warn(`Music duration (${musicResult.duration.toFixed(1)}s) is shorter than video duration (${actualVideoDuration.toFixed(1)}s).`);
|
|
5065
|
+
}
|
|
5066
|
+
spinner?.start();
|
|
5067
|
+
}
|
|
5068
|
+
}
|
|
5069
|
+
if (thumbResult) {
|
|
5070
|
+
thumbnailPath = thumbResult;
|
|
5071
|
+
if (format === "human") {
|
|
5072
|
+
spinner?.stop();
|
|
5073
|
+
success(`Thumbnail: ${thumbnailPath}`);
|
|
5074
|
+
spinner?.start();
|
|
5027
5075
|
}
|
|
5028
5076
|
}
|
|
5029
5077
|
if (spinner) spinner.text = "Writing manifest...";
|
|
5030
5078
|
const totalDurationInFrames = Math.round(actualVideoDuration * DEFAULT_FPS);
|
|
5031
5079
|
const manifest = {
|
|
5032
5080
|
music: musicInfo,
|
|
5081
|
+
thumbnail: thumbnailPath ? "thumbnail.jpg" : void 0,
|
|
5033
5082
|
images: allImages,
|
|
5034
5083
|
videos: allVideos,
|
|
5035
5084
|
scenes,
|
|
@@ -5117,7 +5166,7 @@ var searchCommand2 = new Command19("search").description("Search for stock video
|
|
|
5117
5166
|
process.exit(EXIT_CODES.GENERAL_ERROR);
|
|
5118
5167
|
}
|
|
5119
5168
|
});
|
|
5120
|
-
var initCommand = new Command19("init").description("Create a new Remotion video project from template").argument("<name>", "Project directory name").option("-t, --template <repo>", "GitHub repo (user/repo)", DEFAULT_TEMPLATE).option("--type <type>", "Video type: landscape (16:9) or tiktok (9:16)", "landscape").option("--no-install", "Skip
|
|
5169
|
+
var initCommand = new Command19("init").description("Create a new Remotion video project from template").argument("<name>", "Project directory name").option("-t, --template <repo>", "GitHub repo (user/repo)", DEFAULT_TEMPLATE).option("--type <type>", "Video type: landscape (16:9) or tiktok (9:16)", "landscape").option("--no-install", "Skip npm install").option("-f, --format <format>", "Output format: human, json, quiet", "human").action(async (name, options) => {
|
|
5121
5170
|
const format = options.format;
|
|
5122
5171
|
const spinner = format === "human" ? ora12("Initializing video project...").start() : null;
|
|
5123
5172
|
try {
|
|
@@ -5157,7 +5206,7 @@ var initCommand = new Command19("init").description("Create a new Remotion video
|
|
|
5157
5206
|
if (options.install) {
|
|
5158
5207
|
if (spinner) spinner.text = "Installing dependencies...";
|
|
5159
5208
|
await new Promise((resolvePromise, reject) => {
|
|
5160
|
-
const child = spawn("
|
|
5209
|
+
const child = spawn("npm", ["install"], {
|
|
5161
5210
|
cwd: targetDir,
|
|
5162
5211
|
stdio: "pipe",
|
|
5163
5212
|
shell: true
|
|
@@ -5166,7 +5215,7 @@ var initCommand = new Command19("init").description("Create a new Remotion video
|
|
|
5166
5215
|
if (code === 0) {
|
|
5167
5216
|
resolvePromise();
|
|
5168
5217
|
} else {
|
|
5169
|
-
reject(new Error(`
|
|
5218
|
+
reject(new Error(`npm install failed with code ${code}`));
|
|
5170
5219
|
}
|
|
5171
5220
|
});
|
|
5172
5221
|
child.on("error", reject);
|
|
@@ -5224,14 +5273,14 @@ var initCommand = new Command19("init").description("Create a new Remotion video
|
|
|
5224
5273
|
info("Next steps:");
|
|
5225
5274
|
info(` cd ${name}`);
|
|
5226
5275
|
if (!options.install) {
|
|
5227
|
-
info("
|
|
5276
|
+
info(" npm install");
|
|
5228
5277
|
}
|
|
5229
|
-
info("
|
|
5278
|
+
info(" npm run dev # Preview in Remotion Studio");
|
|
5230
5279
|
info(" cc video create ... # Generate assets to public/");
|
|
5231
5280
|
if (options.type === "tiktok") {
|
|
5232
|
-
info("
|
|
5281
|
+
info(" npx remotion render TikTokVideo out/tiktok.mp4 --props='{...}' # Render TikTok video");
|
|
5233
5282
|
} else {
|
|
5234
|
-
info("
|
|
5283
|
+
info(" npx remotion render YouTubeVideo out/youtube.mp4 --props='{...}' # Render YouTube video");
|
|
5235
5284
|
}
|
|
5236
5285
|
} catch (err) {
|
|
5237
5286
|
spinner?.stop();
|
|
@@ -5281,7 +5330,6 @@ var thumbnailCommand = new Command19("thumbnail").description("Embed a thumbnail
|
|
|
5281
5330
|
if (options.composition) {
|
|
5282
5331
|
if (spinner) spinner.text = `Extracting frame ${frameNum} from ${options.composition}...`;
|
|
5283
5332
|
const args = [
|
|
5284
|
-
"exec",
|
|
5285
5333
|
"remotion",
|
|
5286
5334
|
"still",
|
|
5287
5335
|
options.composition,
|
|
@@ -5289,7 +5337,7 @@ var thumbnailCommand = new Command19("thumbnail").description("Embed a thumbnail
|
|
|
5289
5337
|
`--frame=${frameNum}`
|
|
5290
5338
|
];
|
|
5291
5339
|
try {
|
|
5292
|
-
execSync(`
|
|
5340
|
+
execSync(`npx ${args.join(" ")}`, {
|
|
5293
5341
|
stdio: "pipe",
|
|
5294
5342
|
cwd: process.cwd()
|
|
5295
5343
|
});
|
|
@@ -5379,7 +5427,7 @@ var thumbnailCommand = new Command19("thumbnail").description("Embed a thumbnail
|
|
|
5379
5427
|
var videoCommand = new Command19("video").description("Video asset generation commands").addCommand(initCommand).addCommand(createCommand3).addCommand(searchCommand2).addCommand(thumbnailCommand);
|
|
5380
5428
|
|
|
5381
5429
|
// src/index.ts
|
|
5382
|
-
var VERSION = "0.1.
|
|
5430
|
+
var VERSION = "0.1.13";
|
|
5383
5431
|
var program = new Command20();
|
|
5384
5432
|
var cmdName = brand.commands[0];
|
|
5385
5433
|
program.name(cmdName).description(brand.description).version(VERSION, "-v, --version", "Show version number").option("--debug", "Enable debug logging").option("--no-color", "Disable colored output").configureOutput({
|