@biggora/claude-plugins 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/.claude/settings.local.json +13 -0
  2. package/CLAUDE.md +55 -0
  3. package/LICENSE +1 -1
  4. package/README.md +208 -39
  5. package/bin/cli.js +39 -0
  6. package/package.json +30 -17
  7. package/registry/registry.json +166 -1
  8. package/registry/schema.json +10 -0
  9. package/src/commands/skills/add.js +194 -0
  10. package/src/commands/skills/list.js +52 -0
  11. package/src/commands/skills/remove.js +27 -0
  12. package/src/commands/skills/update.js +74 -0
  13. package/src/config.js +5 -0
  14. package/src/skills/codex-cli/SKILL.md +265 -0
  15. package/src/skills/commafeed-api/SKILL.md +1012 -0
  16. package/src/skills/gemini-cli/SKILL.md +379 -0
  17. package/src/skills/gemini-cli/references/commands.md +145 -0
  18. package/src/skills/gemini-cli/references/configuration.md +182 -0
  19. package/src/skills/gemini-cli/references/headless-and-scripting.md +181 -0
  20. package/src/skills/gemini-cli/references/mcp-and-extensions.md +254 -0
  21. package/src/skills/n8n-api/SKILL.md +623 -0
  22. package/src/skills/notebook-lm/SKILL.md +217 -0
  23. package/src/skills/notebook-lm/references/artifact-options.md +168 -0
  24. package/src/skills/notebook-lm/references/auth.md +58 -0
  25. package/src/skills/notebook-lm/references/workflows.md +144 -0
  26. package/src/skills/screen-recording/SKILL.md +309 -0
  27. package/src/skills/screen-recording/references/approach1-programmatic.md +311 -0
  28. package/src/skills/screen-recording/references/approach2-xvfb.md +232 -0
  29. package/src/skills/screen-recording/references/design-patterns.md +168 -0
  30. package/src/skills/test-mobile-app/SKILL.md +212 -0
  31. package/src/skills/test-mobile-app/references/report-template.md +95 -0
  32. package/src/skills/test-mobile-app/references/setup-appium.md +154 -0
  33. package/src/skills/test-mobile-app/scripts/analyze_apk.py +164 -0
  34. package/src/skills/test-mobile-app/scripts/check_environment.py +116 -0
  35. package/src/skills/test-mobile-app/scripts/generate_report.py +250 -0
  36. package/src/skills/test-mobile-app/scripts/run_tests.py +326 -0
  37. package/src/skills/test-web-ui/SKILL.md +232 -0
  38. package/src/skills/test-web-ui/references/test_case_schema.md +102 -0
  39. package/src/skills/test-web-ui/scripts/discover.py +176 -0
  40. package/src/skills/test-web-ui/scripts/generate_report.py +237 -0
  41. package/src/skills/test-web-ui/scripts/run_tests.py +296 -0
  42. package/src/skills/text-to-speech/SKILL.md +236 -0
  43. package/src/skills/text-to-speech/references/espeak-cli.md +277 -0
  44. package/src/skills/text-to-speech/references/kokoro-onnx.md +124 -0
  45. package/src/skills/text-to-speech/references/online-engines.md +128 -0
  46. package/src/skills/text-to-speech/references/pyttsx3-espeak.md +143 -0
  47. package/src/skills/tm-search/SKILL.md +240 -0
  48. package/src/skills/tm-search/references/field-guide.md +79 -0
  49. package/src/skills/tm-search/references/scraping-fallback.md +140 -0
  50. package/src/skills/tm-search/scripts/tm_search.py +375 -0
  51. package/src/skills/wp-rest-api/SKILL.md +114 -0
  52. package/src/skills/wp-rest-api/references/authentication.md +18 -0
  53. package/src/skills/wp-rest-api/references/custom-content-types.md +20 -0
  54. package/src/skills/wp-rest-api/references/discovery-and-params.md +20 -0
  55. package/src/skills/wp-rest-api/references/responses-and-fields.md +30 -0
  56. package/src/skills/wp-rest-api/references/routes-and-endpoints.md +36 -0
  57. package/src/skills/wp-rest-api/references/schema.md +22 -0
  58. package/src/skills/youtube-search/SKILL.md +412 -0
  59. package/src/skills/youtube-search/references/parsing-examples.md +159 -0
  60. package/src/skills/youtube-search/references/youtube-api-quota.md +85 -0
  61. package/src/skills/youtube-thumbnail/SKILL.md +1060 -0
  62. package/tests/commands/info.test.js +49 -0
  63. package/tests/commands/install.test.js +36 -0
  64. package/tests/commands/list.test.js +66 -0
  65. package/tests/commands/publish.test.js +182 -0
  66. package/tests/commands/search.test.js +45 -0
  67. package/tests/commands/uninstall.test.js +29 -0
  68. package/tests/commands/update.test.js +59 -0
  69. package/tests/functional/skills-lifecycle.test.js +293 -0
  70. package/tests/helpers/fixtures.js +63 -0
  71. package/tests/integration/cli.test.js +83 -0
  72. package/tests/skills/add.test.js +138 -0
  73. package/tests/skills/list.test.js +63 -0
  74. package/tests/skills/remove.test.js +38 -0
  75. package/tests/skills/update.test.js +60 -0
  76. package/tests/unit/config.test.js +31 -0
  77. package/tests/unit/registry.test.js +79 -0
  78. package/tests/unit/utils.test.js +150 -0
  79. package/tests/validation/registry-schema.test.js +112 -0
  80. package/tests/validation/skills-validation.test.js +96 -0
@@ -0,0 +1,309 @@
1
+ ---
2
+ name: screen-recording
3
+ description: >
4
+ Autonomous video creation skill for the Agent — creates product demos, presentation videos,
5
+ UI walkthroughs, and narrated screencasts entirely without user intervention.
6
+ Use this skill whenever a user asks to: "record a screen", "create a demo video", "make a product video",
7
+ "create a presentation video", "record a walkthrough", "make a screencast", "automate video creation",
8
+ "generate a narrated video", or anything involving producing an MP4/video file showing content, UI, or animations.
9
+ This skill covers the full pipeline: animated frames → video assembly → TTS narration → final MP4.
10
+ Always trigger this skill for any video generation or screen recording automation task.
11
+ ---
12
+
13
+ # Screen Recording Skill
14
+
15
+ Autonomous video creation pipeline for the Agent. No user interaction required after initial brief.
16
+
17
+ ## What this skill creates
18
+
19
+ - **Product demo videos** — animated walkthroughs showing features, UI flows, dashboards
20
+ - **Presentation videos** — slide-style videos with animated content and narration
21
+ - **Screen recordings** — capture of a virtual X11 display (Xvfb) with real browser/app content
22
+ - **Narrated screencasts** — video + TTS voiceover, fully automated
23
+
24
+ ---
25
+
26
+ ## Architecture: 3 Confirmed Approaches
27
+
28
+ ### Approach 1 — Programmatic Animation (RECOMMENDED)
29
+ **Best for**: product demos, feature showcases, presentation videos, marketing videos
30
+
31
+ Stack: `Pillow` → frame generation → `MoviePy` → video assembly → `pyttsx3+espeak` → narration
32
+
33
+ **Why preferred**: Fully offline, fast, no browser needed, complete creative control.
34
+
35
+ ### Approach 2 — Virtual Display Recording
36
+ **Best for**: capturing real browser/app interactions, UI walkthroughs with live content
37
+
38
+ Stack: `Xvfb` (virtual display :99) → `FFmpeg x11grab` → records actual screen content
39
+
40
+ **Why use**: When you need to show a real running application or website.
41
+
42
+ ### Approach 3 — Hybrid (Approach 1 + 2 combined)
43
+ **Best for**: complex demos mixing animated overlays with real UI screenshots
44
+
45
+ ---
46
+
47
+ ## Quick Start Workflow
48
+
49
+ ### Step 1 — Understand the request
50
+ Determine:
51
+ - What content to show (UI flow, feature list, data visualization, slides)
52
+ - Duration (default: 30–120 seconds)
53
+ - Has narration? (default: yes, using pyttsx3+espeak)
54
+ - Resolution (default: 1280×720 HD)
55
+ - Output format (default: MP4, H.264)
56
+
57
+ ### Step 2 — Choose approach (see decision tree below)
58
+
59
+ ### Step 3 — Generate video (see implementation guides)
60
+
61
+ ### Step 4 — Present the file
62
+ ```python
63
+ # Always copy to outputs and use present_files
64
+ import shutil
65
+ shutil.copy("/home/claude/output.mp4", "/mnt/user-data/outputs/demo.mp4")
66
+ ```
67
+
68
+ ---
69
+
70
+ ## Decision Tree
71
+
72
+ ```
73
+ User wants a video
74
+
75
+ ├── Need REAL browser/app on screen?
76
+ │ ├── YES → Approach 2 (Xvfb + x11grab)
77
+ │ └── NO → Continue
78
+
79
+ ├── Presentation / slides / feature demo / marketing?
80
+ │ └── YES → Approach 1 (Programmatic, FASTEST)
81
+
82
+ └── Mix of real UI + animated overlays?
83
+ └── YES → Approach 3 (Hybrid)
84
+ ```
85
+
86
+ ---
87
+
88
+ ## Implementation: Approach 1 (Programmatic)
89
+
90
+ Read `references/approach1-programmatic.md` for the full implementation guide.
91
+
92
+ **Key pattern:**
93
+ ```python
94
+ from moviepy import VideoClip, AudioFileClip
95
+ import numpy as np
96
+ from PIL import Image, ImageDraw
97
+ import pyttsx3, subprocess
98
+
99
+ # 1. Generate TTS
100
+ engine = pyttsx3.init()
101
+ engine.setProperty('rate', 140) # speaking speed
102
+ engine.save_to_file("Your narration text here", '/tmp/narration.wav')
103
+ engine.runAndWait()
104
+ subprocess.run(['ffmpeg', '-i', '/tmp/narration.wav', '-c:a', 'libmp3lame',
105
+ '/tmp/narration.mp3', '-y', '-loglevel', 'quiet'])
106
+
107
+ # 2. Generate frames
108
+ scenes = build_scene_list() # list of {duration, draw_fn}
109
+
110
+ def make_frame(t):
111
+ img = Image.new('RGB', (1280, 720), BACKGROUND_COLOR)
112
+ draw = ImageDraw.Draw(img)
113
+ current_scene(draw, t) # draw current scene content
114
+ return np.array(img)
115
+
116
+ # 3. Assemble
117
+ total_duration = sum(s['duration'] for s in scenes)
118
+ clip = VideoClip(make_frame, duration=total_duration)
119
+ audio = AudioFileClip('/tmp/narration.mp3').with_duration(total_duration)
120
+ final = clip.with_audio(audio)
121
+ final.write_videofile("/home/claude/output.mp4", fps=24, logger=None)
122
+ ```
123
+
124
+ ---
125
+
126
+ ## Implementation: Approach 2 (Xvfb + x11grab)
127
+
128
+ Read `references/approach2-xvfb.md` for the full implementation guide.
129
+
130
+ **Key pattern:**
131
+ ```bash
132
+ # 1. Start virtual display
133
+ Xvfb :99 -screen 0 1280x720x24 &
134
+ XVFB_PID=$!
135
+
136
+ # 2. Start recording
137
+ DISPLAY=:99 ffmpeg -f x11grab -video_size 1280x720 -i :99 \
138
+ -c:v libx264 -preset fast -r 24 /home/claude/recording.mp4 &
139
+ FFMPEG_PID=$!
140
+
141
+ # 3. Run your app/browser on DISPLAY=:99
142
+ DISPLAY=:99 chromium --no-sandbox --headless=new ...
143
+ # OR
144
+ DISPLAY=:99 python3 your_app.py
145
+
146
+ # 4. Stop recording
147
+ kill $FFMPEG_PID $XVFB_PID
148
+ ```
149
+
150
+ ---
151
+
152
+ ## Audio / TTS
153
+
154
+ ### pyttsx3 + espeak-ng (OFFLINE — always works)
155
+ ```python
156
+ import pyttsx3
157
+ engine = pyttsx3.init()
158
+ engine.setProperty('rate', 140) # 100-200, default ~200
159
+ engine.setProperty('volume', 0.9) # 0.0-1.0
160
+
161
+ # List voices:
162
+ for v in engine.getProperty('voices'):
163
+ print(v.id, v.name)
164
+
165
+ engine.save_to_file("Text to speak", '/tmp/out.wav')
166
+ engine.runAndWait()
167
+ ```
168
+ Convert WAV→MP3: `ffmpeg -i /tmp/out.wav -c:a libmp3lame /tmp/out.mp3 -y -loglevel quiet`
169
+
170
+ ### Silent video (no narration)
171
+ ```python
172
+ # Just skip the audio step, write video without audio
173
+ clip.write_videofile("/home/claude/output.mp4", fps=24, logger=None)
174
+ ```
175
+
176
+ ---
177
+
178
+ ## Design System
179
+
180
+ ### Color Palettes (use consistently per video)
181
+ ```python
182
+ # Tech/Product (dark)
183
+ BG = (12, 12, 32) # background
184
+ HEADER = (25, 25, 60) # header bar
185
+ ACCENT = (60, 120, 255) # primary accent
186
+ TEXT = (255, 255, 255) # main text
187
+ SUBTEXT = (150, 150, 200) # secondary text
188
+
189
+ # Presentation (light)
190
+ BG = (245, 248, 255)
191
+ HEADER = (30, 60, 180)
192
+ ACCENT = (255, 100, 50)
193
+ TEXT = (20, 20, 40)
194
+ SUBTEXT = (100, 100, 130)
195
+ ```
196
+
197
+ ### Standard Resolutions
198
+ - `1280x720` — HD (default, fast)
199
+ - `1920x1080` — Full HD (for high-quality output)
200
+ - `1080x1920` — Vertical (mobile/social)
201
+
202
+ ### Animation Patterns
203
+ ```python
204
+ # Smooth ease-in-out (0→1 over duration d, current time t from scene start)
205
+ def ease(t, d):
206
+ x = t/d
207
+ return x*x*(3-2*x)
208
+
209
+ # Fade in text
210
+ alpha = int(255 * ease(t, 0.5)) # fade over 0.5s
211
+
212
+ # Slide in from left
213
+ x = int(-500 + 600 * ease(t, 0.8))
214
+
215
+ # Progress bar
216
+ fill_width = int(max_width * ease(t, d))
217
+ ```
218
+
219
+ ---
220
+
221
+ ## Scene Structure Pattern
222
+
223
+ For multi-scene videos, use a scene list:
224
+
225
+ ```python
226
+ scenes = [
227
+ {
228
+ "title": "Intro",
229
+ "duration": 3,
230
+ "narration": "Welcome to our product demo.",
231
+ "draw": draw_intro_scene
232
+ },
233
+ {
234
+ "title": "Feature 1",
235
+ "duration": 5,
236
+ "narration": "Our AI detects issues automatically.",
237
+ "draw": draw_feature1_scene
238
+ },
239
+ # ...
240
+ ]
241
+
242
+ # Build timeline
243
+ def make_frame(t):
244
+ elapsed = 0
245
+ for scene in scenes:
246
+ if t < elapsed + scene['duration']:
247
+ scene_t = t - elapsed
248
+ scene['draw'](img, draw, scene_t, scene['duration'])
249
+ return np.array(img)
250
+ elapsed += scene['duration']
251
+ ```
252
+
253
+ ---
254
+
255
+ ## FFmpeg Post-Processing
256
+
257
+ ```bash
258
+ # Add subtitles/captions
259
+ ffmpeg -i input.mp4 -vf "subtitles=subs.srt" output.mp4
260
+
261
+ # Compress for web
262
+ ffmpeg -i input.mp4 -c:v libx264 -crf 23 -preset medium -c:a aac -b:a 128k web.mp4
263
+
264
+ # GIF (for short demos)
265
+ ffmpeg -i input.mp4 -vf "fps=12,scale=960:-1:flags=lanczos" -loop 0 demo.gif
266
+
267
+ # Trim
268
+ ffmpeg -i input.mp4 -ss 00:00:05 -t 00:00:30 -c copy trimmed.mp4
269
+
270
+ # Concatenate multiple clips
271
+ # Create concat.txt: file 'clip1.mp4' \n file 'clip2.mp4'
272
+ ffmpeg -f concat -safe 0 -i concat.txt -c copy combined.mp4
273
+ ```
274
+
275
+ ---
276
+
277
+ ## Installation (run once if needed)
278
+
279
+ ```bash
280
+ # Core dependencies (usually pre-installed)
281
+ pip install moviepy pillow opencv-python pyttsx3 --break-system-packages
282
+
283
+ # Offline TTS engine
284
+ apt-get install -y espeak-ng
285
+
286
+ # Verify
287
+ python3 -c "from moviepy import VideoClip; import pyttsx3; print('OK')"
288
+ ```
289
+
290
+ ---
291
+
292
+ ## Common Pitfalls
293
+
294
+ | Problem | Solution |
295
+ |---|---|
296
+ | MoviePy `verbose` kwarg error | Use `logger=None` not `verbose=False` |
297
+ | pyttsx3 "no espeak" error | `apt-get install -y espeak-ng` |
298
+ | gTTS/edge-tts connection error | Use pyttsx3+espeak (offline, always works) |
299
+ | Black video output | Check `make_frame` returns `np.array(img)` not `img` |
300
+ | Audio/video length mismatch | Use `.with_duration(video.duration)` on audio clip |
301
+ | Xvfb display conflict | Use `DISPLAY=:99` and kill after recording |
302
+
303
+ ---
304
+
305
+ ## Reference Files
306
+
307
+ - `references/approach1-programmatic.md` — Full Approach 1 code templates
308
+ - `references/approach2-xvfb.md` — Full Approach 2 (Xvfb) code templates
309
+ - `references/design-patterns.md` — Advanced animations, transitions, UI components
@@ -0,0 +1,311 @@
1
+ # Approach 1: Programmatic Video Generation
2
+
3
+ Full offline pipeline. No browser, no display server needed.
4
+
5
+ ## Full Working Template
6
+
7
+ ```python
8
+ #!/usr/bin/env python3
9
+ """
10
+ Autonomous Product Demo Video Generator
11
+ Usage: python3 generate_demo.py
12
+ Output: /mnt/user-data/outputs/demo.mp4
13
+ """
14
+
15
+ from moviepy import VideoClip, AudioFileClip
16
+ import numpy as np
17
+ from PIL import Image, ImageDraw, ImageFont
18
+ import pyttsx3, subprocess, os, shutil
19
+
20
+ # ── CONFIG ─────────────────────────────────────────────────────────────────────
21
+ WIDTH, HEIGHT = 1280, 720
22
+ FPS = 24
23
+ OUTPUT_PATH = "/home/claude/demo.mp4"
24
+ FINAL_OUTPUT = "/mnt/user-data/outputs/demo.mp4"
25
+
26
+ # Color palette (dark tech theme)
27
+ C = {
28
+ "bg": (12, 12, 32),
29
+ "header": (20, 20, 55),
30
+ "accent": (60, 120, 255),
31
+ "accent2": (100, 220, 180),
32
+ "text": (240, 240, 255),
33
+ "subtext": (140, 140, 190),
34
+ "success": (60, 200, 100),
35
+ "warning": (255, 180, 40),
36
+ "card": (25, 28, 60),
37
+ }
38
+
39
+ # ── SCENES ─────────────────────────────────────────────────────────────────────
40
+ SCENES = [
41
+ {
42
+ "id": "intro",
43
+ "duration": 4,
44
+ "narration": "Welcome to our product. This demonstration will walk you through the key features.",
45
+ },
46
+ {
47
+ "id": "feature_1",
48
+ "duration": 5,
49
+ "narration": "First, our automated detection system identifies issues in real time.",
50
+ },
51
+ {
52
+ "id": "feature_2",
53
+ "duration": 5,
54
+ "narration": "Then, our correction engine applies fixes automatically, saving hours of manual work.",
55
+ },
56
+ {
57
+ "id": "outro",
58
+ "duration": 4,
59
+ "narration": "Get started today and transform your workflow. Thank you for watching.",
60
+ },
61
+ ]
62
+
63
+ TOTAL_DURATION = sum(s["duration"] for s in SCENES)
64
+
65
+ # ── HELPERS ────────────────────────────────────────────────────────────────────
66
+ def ease(t, d):
67
+ """Smooth ease-in-out, 0→1 over duration d"""
68
+ x = max(0, min(1, t / d))
69
+ return x * x * (3 - 2 * x)
70
+
71
+ def draw_header(draw, title, subtitle=""):
72
+ draw.rectangle([0, 0, WIDTH, 72], fill=C["header"])
73
+ draw.text((32, 16), title, fill=C["text"])
74
+ if subtitle:
75
+ draw.text((32, 46), subtitle, fill=C["subtext"])
76
+ # Header accent line
77
+ draw.rectangle([0, 72, WIDTH, 75], fill=C["accent"])
78
+
79
+ def draw_progress_bar(draw, t, duration, y=680, label=""):
80
+ p = ease(t, duration)
81
+ draw.rectangle([80, y, WIDTH-80, y+12], outline=C["subtext"], width=1)
82
+ if p > 0:
83
+ draw.rectangle([80, y, int(80 + (WIDTH-160) * p), y+12], fill=C["accent"])
84
+ if label:
85
+ draw.text((80, y - 22), label, fill=C["subtext"])
86
+
87
+ def draw_card(draw, x, y, w, h, title, content_lines, highlight=False):
88
+ color = C["accent"] if highlight else C["card"]
89
+ draw.rectangle([x, y, x+w, y+h], fill=color if highlight else C["card"],
90
+ outline=C["accent"] if highlight else C["subtext"], width=1)
91
+ draw.text((x+16, y+14), title, fill=C["text"])
92
+ for i, line in enumerate(content_lines):
93
+ draw.text((x+16, y+44+i*26), line, fill=C["subtext"])
94
+
95
+ # ── SCENE DRAW FUNCTIONS ───────────────────────────────────────────────────────
96
+ def draw_intro(draw, t, d):
97
+ # Animated title appearance
98
+ alpha_title = ease(t, 1.0)
99
+ alpha_sub = ease(max(0, t - 0.5), 1.0)
100
+
101
+ draw_header(draw, "Product Demo", "Automated Video — No Human Required")
102
+
103
+ # Big title
104
+ title = "Welcome to Our Product"
105
+ x = int(WIDTH/2 - len(title)*12)
106
+ y = int(200 + (1 - alpha_title) * 60)
107
+ draw.text((x, y), title, fill=(
108
+ int(255 * alpha_title), int(255 * alpha_title), int(255 * alpha_title)))
109
+
110
+ # Subtitle
111
+ sub = "Transforming workflows with AI automation"
112
+ x2 = int(WIDTH/2 - len(sub)*7)
113
+ draw.text((x2, 310), sub, fill=(
114
+ int(180 * alpha_sub), int(180 * alpha_sub), int(220 * alpha_sub)))
115
+
116
+ # Animated dots
117
+ for i in range(3):
118
+ dot_t = (t * 2 + i * 0.4) % 1.5
119
+ r = int(4 + 4 * ease(dot_t, 0.3))
120
+ cx = WIDTH//2 - 30 + i * 30
121
+ draw.ellipse([cx-r, 470-r, cx+r, 470+r], fill=C["accent"])
122
+
123
+ draw_progress_bar(draw, t, d, label="Loading demo...")
124
+
125
+
126
+ def draw_feature_1(draw, t, d):
127
+ draw_header(draw, "Feature 1: Automated Detection", "Real-time issue identification")
128
+
129
+ # Animated list items appearing one by one
130
+ items = [
131
+ ("✓", "Scans 10,000+ items per second"),
132
+ ("✓", "98.7% detection accuracy"),
133
+ ("✓", "Zero false positives guaranteed"),
134
+ ("✓", "Works with any input format"),
135
+ ]
136
+ for i, (icon, text) in enumerate(items):
137
+ appear_t = max(0, t - i * 0.8)
138
+ alpha = ease(appear_t, 0.5)
139
+ if alpha > 0:
140
+ y = 180 + i * 80
141
+ # Card background
142
+ draw.rectangle([80, y, 900, y+60],
143
+ fill=(int(25*alpha), int(28*alpha), int(60*alpha)),
144
+ outline=(int(60*alpha), int(120*alpha), int(255*alpha)), width=1)
145
+ draw.text((110, y+18), f"{icon} {text}",
146
+ fill=(int(240*alpha), int(240*alpha), int(255*alpha)))
147
+
148
+ # Animated counter
149
+ count = int(ease(t, d) * 10847)
150
+ draw.text((950, 250), f"{count:,}", fill=C["accent2"])
151
+ draw.text((950, 290), "items processed", fill=C["subtext"])
152
+
153
+ draw_progress_bar(draw, t, d, label="Detecting...")
154
+
155
+
156
+ def draw_feature_2(draw, t, d):
157
+ draw_header(draw, "Feature 2: Automated Correction", "One-click fix for all detected issues")
158
+
159
+ # Before/After comparison
160
+ mid = WIDTH // 2
161
+
162
+ # Before panel
163
+ before_alpha = max(0, 1 - ease(t, 1.5) * 0.3)
164
+ draw.rectangle([60, 150, mid-20, 580], fill=C["card"], outline=C["warning"], width=2)
165
+ draw.text((80, 165), "BEFORE", fill=C["warning"])
166
+ issues = ["❌ Extra fingers detected", "❌ Proportion mismatch", "❌ Anatomy anomaly", "❌ 3 more issues..."]
167
+ for i, issue in enumerate(issues):
168
+ draw.text((80, 210 + i * 50), issue, fill=(200, 180, 100))
169
+
170
+ # Animated arrow
171
+ arrow_p = ease(max(0, t - 0.5), 1.0)
172
+ ax = int(mid - 40 + 80 * arrow_p)
173
+ if arrow_p > 0.1:
174
+ draw.text((ax, 340), "→", fill=C["accent"])
175
+
176
+ # After panel (appears after animation)
177
+ after_alpha = ease(max(0, t - 1.5), 1.0)
178
+ if after_alpha > 0:
179
+ draw.rectangle([mid+20, 150, WIDTH-60, 580], fill=C["card"],
180
+ outline=(int(60*after_alpha), int(200*after_alpha), int(100*after_alpha)), width=2)
181
+ draw.text((mid+40, 165), "AFTER", fill=C["success"])
182
+ fixes = ["✅ Fingers corrected", "✅ Proportions normalized", "✅ Anatomy verified", "✅ All issues resolved"]
183
+ for i, fix in enumerate(fixes):
184
+ fix_alpha = ease(max(0, t - 1.5 - i*0.3), 0.4)
185
+ draw.text((mid+40, 210 + i * 50), fix,
186
+ fill=(int(60*fix_alpha), int(200*fix_alpha), int(100*fix_alpha)))
187
+
188
+ draw_progress_bar(draw, t, d, label="Correcting...")
189
+
190
+
191
+ def draw_outro(draw, t, d):
192
+ draw.rectangle([0, 0, WIDTH, HEIGHT], fill=C["bg"])
193
+ draw_header(draw, "Get Started Today")
194
+
195
+ p = ease(t, 2.0)
196
+
197
+ # CTA
198
+ draw.text((WIDTH//2 - 200, 250), "Ready to transform your workflow?", fill=C["text"])
199
+
200
+ # Stats cards
201
+ stats = [("10x", "Faster"), ("99%", "Accurate"), ("0", "Manual Work")]
202
+ for i, (val, label) in enumerate(stats):
203
+ card_p = ease(max(0, t - 0.5 - i * 0.3), 0.5)
204
+ if card_p > 0:
205
+ x = 100 + i * 380
206
+ draw.rectangle([x, 360, x+320, 500], fill=C["card"],
207
+ outline=C["accent"], width=int(1 + card_p))
208
+ draw.text((x + 120, 390), val, fill=C["accent2"])
209
+ draw.text((x + 120, 440), label, fill=C["subtext"])
210
+
211
+ # Final tagline
212
+ if t > 2:
213
+ final_p = ease(t - 2, 1.5)
214
+ draw.text((WIDTH//2 - 180, 560), "Start your free trial now →",
215
+ fill=(int(60*final_p), int(120*final_p), int(255*final_p)))
216
+
217
+ DRAW_FUNCTIONS = {
218
+ "intro": draw_intro,
219
+ "feature_1": draw_feature_1,
220
+ "feature_2": draw_feature_2,
221
+ "outro": draw_outro,
222
+ }
223
+
224
+ # ── MAIN PIPELINE ──────────────────────────────────────────────────────────────
225
+ def generate_video():
226
+ print("📝 Step 1: Generating TTS narration...")
227
+ narration_text = " ".join(s["narration"] for s in SCENES)
228
+ engine = pyttsx3.init()
229
+ engine.setProperty('rate', 140)
230
+ engine.save_to_file(narration_text, '/tmp/narration.wav')
231
+ engine.runAndWait()
232
+ subprocess.run(['ffmpeg', '-i', '/tmp/narration.wav', '-c:a', 'libmp3lame',
233
+ '-b:a', '128k', '/tmp/narration.mp3', '-y', '-loglevel', 'quiet'])
234
+ print(f" ✅ Narration: {os.path.getsize('/tmp/narration.mp3'):,} bytes")
235
+
236
+ print("🎬 Step 2: Rendering video frames...")
237
+ # Build timeline
238
+ timeline = []
239
+ elapsed = 0
240
+ for scene in SCENES:
241
+ timeline.append((elapsed, elapsed + scene["duration"], scene["id"]))
242
+ elapsed += scene["duration"]
243
+
244
+ def make_frame(t):
245
+ img = Image.new('RGB', (WIDTH, HEIGHT), C["bg"])
246
+ draw = ImageDraw.Draw(img)
247
+ for start, end, scene_id in timeline:
248
+ if start <= t < end:
249
+ DRAW_FUNCTIONS[scene_id](draw, t - start, end - start)
250
+ break
251
+ return np.array(img)
252
+
253
+ clip = VideoClip(make_frame, duration=TOTAL_DURATION)
254
+ print(f" ✅ Clip: {TOTAL_DURATION}s at {FPS}fps")
255
+
256
+ print("🎵 Step 3: Combining video + audio...")
257
+ audio = AudioFileClip('/tmp/narration.mp3').with_duration(TOTAL_DURATION)
258
+ final = clip.with_audio(audio)
259
+ final.write_videofile(OUTPUT_PATH, fps=FPS, logger=None)
260
+ size = os.path.getsize(OUTPUT_PATH)
261
+ print(f" ✅ Video: {size:,} bytes ({size//1024} KB)")
262
+
263
+ print("📦 Step 4: Copying to outputs...")
264
+ os.makedirs(os.path.dirname(FINAL_OUTPUT), exist_ok=True)
265
+ shutil.copy(OUTPUT_PATH, FINAL_OUTPUT)
266
+ print(f" ✅ Saved to: {FINAL_OUTPUT}")
267
+
268
+ return FINAL_OUTPUT
269
+
270
+ if __name__ == "__main__":
271
+ result = generate_video()
272
+ print(f"\n🎉 Done! Video ready at: {result}")
273
+ ```
274
+
275
+ ## Tips for Customization
276
+
277
+ ### Adding a logo/watermark
278
+ ```python
279
+ # Load image and paste onto frame
280
+ logo = Image.open("/path/to/logo.png").convert("RGBA")
281
+ logo = logo.resize((120, 60))
282
+ img.paste(logo, (WIDTH - 140, 20), logo)
283
+ ```
284
+
285
+ ### Screen mockup (fake browser window)
286
+ ```python
287
+ def draw_browser_mockup(draw, x, y, w, h, url="https://yourapp.com"):
288
+ # Chrome bar
289
+ draw.rectangle([x, y, x+w, y+40], fill=(50, 50, 60))
290
+ draw.ellipse([x+10, y+12, x+26, y+28], fill=(255, 80, 80))
291
+ draw.ellipse([x+32, y+12, x+48, y+28], fill=(255, 180, 40))
292
+ draw.ellipse([x+54, y+12, x+70, y+28], fill=(60, 200, 80))
293
+ # URL bar
294
+ draw.rectangle([x+90, y+8, x+w-20, y+32], fill=(35, 35, 45))
295
+ draw.text((x+100, y+14), url, fill=(180, 180, 200))
296
+ # Content area
297
+ draw.rectangle([x, y+40, x+w, y+h], fill=(240, 242, 250))
298
+ ```
299
+
300
+ ### Animated typing effect
301
+ ```python
302
+ def typing_effect(draw, text, x, y, t, speed=20, color=(240,240,255)):
303
+ """Shows text appearing character by character"""
304
+ chars_shown = int(t * speed)
305
+ visible = text[:chars_shown]
306
+ draw.text((x, y), visible, fill=color)
307
+ # Blinking cursor
308
+ if chars_shown < len(text) and int(t * 4) % 2 == 0:
309
+ cursor_x = x + chars_shown * 12
310
+ draw.text((cursor_x, y), "▌", fill=color)
311
+ ```