@opensassi/opencode 0.1.5 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/AGENTS.md +1 -0
  2. package/package.json +1 -1
  3. package/skills/demo-video/SKILL.md +264 -0
  4. package/skills/demo-video/scripts/assemble.cjs +152 -0
  5. package/skills/demo-video/scripts/capture-browser.sh +64 -0
  6. package/skills/demo-video/scripts/capture-html.sh +48 -0
  7. package/skills/demo-video/scripts/generate-subs.cjs +75 -0
  8. package/skills/demo-video/scripts/generate-tts.sh +28 -0
  9. package/skills/demo-video/scripts/render-slide.cjs +61 -0
  10. package/skills/demo-video/scripts/render-terminal.cjs +138 -0
  11. package/skills/demo-video/scripts/setup.sh +44 -0
  12. package/skills/demo-video/test/assemble.test.js +100 -0
  13. package/skills/demo-video/test/capture-browser.test.js +71 -0
  14. package/skills/demo-video/test/capture-html.test.js +72 -0
  15. package/skills/demo-video/test/e2e-test.sh +302 -0
  16. package/skills/demo-video/test/fixtures/demo-scenes.json +36 -0
  17. package/skills/demo-video/test/fixtures/hello.output +2 -0
  18. package/skills/demo-video/test/fixtures/hello.timing +13 -0
  19. package/skills/demo-video/test/generate-subs.test.js +67 -0
  20. package/skills/demo-video/test/generate-tts.test.js +58 -0
  21. package/skills/demo-video/test/helpers/run-script.js +33 -0
  22. package/skills/demo-video/test/integration.test.js +110 -0
  23. package/skills/demo-video/test/jest.config.cjs +6 -0
  24. package/skills/demo-video/test/render-slide.test.js +79 -0
  25. package/skills/demo-video/test/render-terminal.test.js +87 -0
  26. package/skills/demo-video/test/setup.test.js +55 -0
  27. package/skills/opensassi/SKILL.md +6 -1
  28. package/skills-index.json +5 -0
@@ -0,0 +1,302 @@
1
+ #!/usr/bin/env bash
2
+ # e2e-test.sh — Full pipeline E2E demo with master asset verification
3
+ #
4
+ # 1. Record raw terminal data + generate TTS
5
+ # 2. Render + capture each scene at TTS duration
6
+ # 3. assemble --mode video-only → video_master.mp4
7
+ # 4. assemble --mode audio-only → audio_master.m4a
8
+ # 5. ffprobe compare (fails if >0.5s mismatch)
9
+ # 6. assemble --mode final → demo-e2e.mp4
10
+ #
11
+ # Usage: bash test/e2e-test.sh [--open]
12
+ set -euo pipefail
13
+
14
+ WORKDIR="/tmp/demo-e2e-$(date +%s)"
15
+ CLIPS="$WORKDIR/clips"
16
+ AUDIO="$WORKDIR/audio"
17
+ mkdir -p "$WORKDIR" "$CLIPS" "$AUDIO"
18
+
19
+ SCRIPT_DIR="$(cd "$(dirname "$0")"/.. && pwd)"
20
+ PROJECT_ROOT="$(cd "$SCRIPT_DIR"/../.. && pwd)"
21
+
22
+ get_duration() {
23
+ ffprobe -v error -show_entries format=duration -of csv=p=0 "$1" 2>/dev/null || echo 5
24
+ }
25
+
26
+ record_terminal_raw() {
27
+ local name="$1" cmd="$2"
28
+ echo " [record] $name"
29
+ script --timing="$WORKDIR/${name}.timing" \
30
+ --flush "$WORKDIR/${name}.output" \
31
+ -c "$cmd" 2>/dev/null
32
+ }
33
+
34
+ render_and_capture_terminal() {
35
+ local name="$1" tts_file="$2" speed="${3:-3}" cmd="${4:-}"
36
+ local dur; dur=$(get_duration "$tts_file")
37
+ node "$SCRIPT_DIR/scripts/render-terminal.cjs" \
38
+ --timing "$WORKDIR/${name}.timing" \
39
+ --output "$WORKDIR/${name}.output" \
40
+ --command "$cmd" \
41
+ --speed "$speed" \
42
+ --html "$WORKDIR/${name}.html"
43
+ bash "$SCRIPT_DIR/scripts/capture-html.sh" \
44
+ --html "$WORKDIR/${name}.html" \
45
+ --duration "$dur" \
46
+ --output "$CLIPS/${name}.mp4"
47
+ echo " [capture] $name — ${dur}s"
48
+ }
49
+
50
+ render_and_capture_slide() {
51
+ local name="$1" title="$2" bullets="$3" tts_file="$4"
52
+ local dur; dur=$(get_duration "$tts_file")
53
+ node "$SCRIPT_DIR/scripts/render-slide.cjs" \
54
+ --title "$title" \
55
+ --bullets "$bullets" \
56
+ --output "$WORKDIR/${name}.html"
57
+ bash "$SCRIPT_DIR/scripts/capture-html.sh" \
58
+ --html "$WORKDIR/${name}.html" \
59
+ --duration "$dur" \
60
+ --output "$CLIPS/${name}.mp4"
61
+ echo " [capture] $name — ${dur}s"
62
+ }
63
+
64
+ capture_browser_scene() {
65
+ local name="$1" url="$2" tts_file="$3"
66
+ local dur; dur=$(get_duration "$tts_file")
67
+ bash "$SCRIPT_DIR/scripts/capture-browser.sh" \
68
+ --url "$url" \
69
+ --duration "$dur" \
70
+ --output "$CLIPS/${name}.mp4"
71
+ echo " [capture] $name — ${dur}s (browser)"
72
+ }
73
+
74
+ start_dashboard() {
75
+ local port="${1:-3000}"
76
+ echo " [dashboard] starting on port $port..."
77
+ cd "$PROJECT_ROOT" && node scripts/dashboard.js --port "$port" &
78
+ DASHBOARD_PID=$!
79
+ for i in $(seq 1 15); do
80
+ if curl -sf "http://127.0.0.1:$port/api/health" > /dev/null 2>&1; then
81
+ echo " [dashboard] ready"
82
+ return 0
83
+ fi
84
+ sleep 0.5
85
+ done
86
+ echo " [dashboard] failed to start"
87
+ return 1
88
+ }
89
+
90
+ stop_dashboard() {
91
+ if [ -n "${DASHBOARD_PID:-}" ]; then
92
+ kill "$DASHBOARD_PID" 2>/dev/null || true
93
+ wait "$DASHBOARD_PID" 2>/dev/null || true
94
+ echo " [dashboard] stopped"
95
+ fi
96
+ }
97
+
98
+ generate_tts() {
99
+ local name="$1" text="$2"
100
+ bash "$SCRIPT_DIR/scripts/generate-tts.sh" \
101
+ --text "$text" \
102
+ --voice en-US-AriaNeural \
103
+ --output "$AUDIO/${name}.mp3"
104
+ }
105
+
106
+ cat > "$WORKDIR/scenes.json" << 'SCHEMA'
107
+ {}
108
+ SCHEMA
109
+
110
+ # === Phase 1: Record raw + TTS ===
111
+ echo ""
112
+ echo "=== Phase 1: Record raw + TTS ==="
113
+ echo ""
114
+
115
+ echo "--- Scene 1: Intro slide ---"
116
+ generate_tts "intro" \
117
+ "This is the demo-video skill for the opensassi opencode platform. It produces narrated, edited demonstration videos from project outlines, capturing terminal TUI sessions and browser interactions, and assembling them with multi-language subtitles."
118
+
119
+ echo "--- Scene 2: Skills index ---"
120
+ cmd_map_skills="cd '$PROJECT_ROOT' && npm run opencode -- opensassi --print-index 2>&1 | head -30"
121
+ record_terminal_raw "skills" "$cmd_map_skills"
122
+ generate_tts "skills" \
123
+ "Here we see all available skills. The demo-video skill integrates as a standard skill, registered in the skills index."
124
+
125
+ echo "--- Scene 3: Environment check ---"
126
+ cmd_map_env="cd '$PROJECT_ROOT' && npm run opencode -- run --skill opensassi env-check.sh 2>&1"
127
+ record_terminal_raw "env" "$cmd_map_env"
128
+ generate_tts "env" \
129
+ "The environment checker detects OS, distribution, package manager, Node.js, and git, outputting structured JSON."
130
+
131
+ echo "--- Scene 4: Pipeline design ---"
132
+ generate_tts "pipeline" \
133
+ "The pipeline uses Playwright-first capture with no screen recording. Terminal output is captured via the script command and replayed at accelerated speed."
134
+
135
+ echo "--- Scene 5: Test results ---"
136
+ cmd_map_tests="cd '$PROJECT_ROOT' && npx jest --config skills/demo-video/test/jest.config.cjs --verbose 2>&1 | tail -10"
137
+ record_terminal_raw "tests" "$cmd_map_tests"
138
+ generate_tts "tests" \
139
+ "All 36 tests pass across 9 test suites."
140
+
141
+ echo "--- Scene 6: Dashboard ---"
142
+ generate_tts "dashboard" \
143
+ "The opencode dashboard displays session evaluations, daily reports, git activity, and search across all recorded sessions."
144
+
145
+ echo "--- Scene 7: Outro ---"
146
+ generate_tts "outro" \
147
+ "The demo-video skill is complete. 22 files, approximately 950 lines of code, ready for use."
148
+
149
+ # === Phase 2: Render + capture (TTS-driven) ===
150
+ echo ""
151
+ echo "=== Phase 2: Render + capture ==="
152
+ echo ""
153
+
154
+ render_and_capture_slide "intro" \
155
+ "Building the demo-video Skill" \
156
+ '["Session: May 16, 2026","22 files · 950 lines · 36 tests"]' \
157
+ "$AUDIO/intro.mp3"
158
+ render_and_capture_terminal "skills" "$AUDIO/skills.mp3" 4 "$cmd_map_skills"
159
+ render_and_capture_terminal "env" "$AUDIO/env.mp3" 3 "$cmd_map_env"
160
+ render_and_capture_slide "pipeline" \
161
+ "Pipeline Design" \
162
+ '["Playwright-first HTML capture","No screen recording","Multi-language subtitles","2-4x accelerated replay"]' \
163
+ "$AUDIO/pipeline.mp3"
164
+ render_and_capture_terminal "tests" "$AUDIO/tests.mp3" 5 "$cmd_map_tests"
165
+ render_and_capture_slide "outro" \
166
+ "demo-video Skill Complete" \
167
+ '["9 scripts · 1 SKILL.md · 9 test files","3-phase pipeline: Plan / Record / Produce"]' \
168
+ "$AUDIO/outro.mp3"
169
+
170
+ # === Phase 2.5: Browser scene (dashboard) ===
171
+ echo ""
172
+ echo "=== Phase 2.5: Dashboard browser scene ==="
173
+ echo ""
174
+
175
+ start_dashboard 3210 || echo " [skip] dashboard not available"
176
+ capture_browser_scene "dashboard" "http://127.0.0.1:3210/#/daily/2026-05-16" "$AUDIO/dashboard.mp3"
177
+ stop_dashboard
178
+
179
+ # === Phase 3: Build manifest ===
180
+ echo ""
181
+ echo "=== Phase 3: Manifest ==="
182
+ echo ""
183
+
184
+ cat > "$WORKDIR/manifest.json" << MANIFEST
185
+ {
186
+ "scenes": [
187
+ {"clip":"$CLIPS/intro.mp4", "duration":$(get_duration "$AUDIO/intro.mp3"), "audio":"$AUDIO/intro.mp3"},
188
+ {"clip":"$CLIPS/skills.mp4", "duration":$(get_duration "$AUDIO/skills.mp3"), "audio":"$AUDIO/skills.mp3"},
189
+ {"clip":"$CLIPS/env.mp4", "duration":$(get_duration "$AUDIO/env.mp3"), "audio":"$AUDIO/env.mp3"},
190
+ {"clip":"$CLIPS/pipeline.mp4", "duration":$(get_duration "$AUDIO/pipeline.mp3"), "audio":"$AUDIO/pipeline.mp3"},
191
+ {"clip":"$CLIPS/tests.mp4", "duration":$(get_duration "$AUDIO/tests.mp3"), "audio":"$AUDIO/tests.mp3"},
192
+ {"clip":"$CLIPS/outro.mp4", "duration":$(get_duration "$AUDIO/outro.mp3"), "audio":"$AUDIO/outro.mp3"},
193
+ {"clip":"$CLIPS/dashboard.mp4","duration":$(get_duration "$AUDIO/dashboard.mp3"),"audio":"$AUDIO/dashboard.mp3"}
194
+ ]
195
+ }
196
+ MANIFEST
197
+
198
+ # === Phase 4: Build masters ===
199
+ echo ""
200
+ echo "=== Phase 4: Build masters ==="
201
+ echo ""
202
+
203
+ VIDEO_MASTER="$WORKDIR/video_master.mp4"
204
+ AUDIO_MASTER="$WORKDIR/audio_master.m4a"
205
+ FINAL_OUTPUT="$WORKDIR/demo-e2e.mp4"
206
+
207
+ node "$SCRIPT_DIR/scripts/assemble.cjs" \
208
+ --mode video-only \
209
+ --manifest "$WORKDIR/manifest.json" \
210
+ --output "$VIDEO_MASTER" \
211
+ --keep-raw true
212
+
213
+ node "$SCRIPT_DIR/scripts/assemble.cjs" \
214
+ --mode audio-only \
215
+ --manifest "$WORKDIR/manifest.json" \
216
+ --output "$AUDIO_MASTER" \
217
+ --keep-raw true
218
+
219
+ # === Phase 5: Verify masters ===
220
+ echo ""
221
+ echo "=== Phase 5: Verify masters ==="
222
+ echo ""
223
+
224
+ V_DUR=$(get_duration "$VIDEO_MASTER")
225
+ A_DUR=$(get_duration "$AUDIO_MASTER")
226
+ DIFF=$(echo "$V_DUR - $A_DUR" | bc | sed 's/^-//')
227
+
228
+ echo "Video master: ${V_DUR}s"
229
+ echo "Audio master: ${A_DUR}s"
230
+ echo "Mismatch: ${DIFF}s"
231
+
232
+ if (( $(echo "$DIFF > 0.5" | bc -l) )); then
233
+ echo "ERROR: Video/audio mismatch exceeds 0.5s threshold"
234
+ exit 1
235
+ fi
236
+ echo "Masters verified — within 0.5s tolerance."
237
+
238
+ # === Phase 6: Mix final ===
239
+ echo ""
240
+ echo "=== Phase 6: Mix final ==="
241
+ echo ""
242
+
243
+ node "$SCRIPT_DIR/scripts/assemble.cjs" \
244
+ --mode final \
245
+ --video "$VIDEO_MASTER" \
246
+ --audio "$AUDIO_MASTER" \
247
+ --output "$FINAL_OUTPUT"
248
+
249
+ # === Phase 7: Subtitles ===
250
+ echo ""
251
+ echo "=== Phase 7: Subtitles ==="
252
+ echo ""
253
+
254
+ cat > "$WORKDIR/scenes.json" << SCENES
255
+ {
256
+ "scenes": [
257
+ {"id":1,"duration":$(get_duration "$AUDIO/intro.mp3"),"narration":{"en":"This is the demo-video skill."}},
258
+ {"id":2,"duration":$(get_duration "$AUDIO/skills.mp3"),"narration":{"en":"Here we see all available skills."}},
259
+ {"id":3,"duration":$(get_duration "$AUDIO/env.mp3"),"narration":{"en":"The environment checker detects OS."}},
260
+ {"id":4,"duration":$(get_duration "$AUDIO/pipeline.mp3"),"narration":{"en":"The pipeline uses Playwright-first capture."}},
261
+ {"id":5,"duration":$(get_duration "$AUDIO/tests.mp3"),"narration":{"en":"All 36 tests pass."}},
262
+ {"id":6,"duration":$(get_duration "$AUDIO/dashboard.mp3"),"narration":{"en":"The opencode dashboard displays session data, reports, and git activity."}},
263
+ {"id":7,"duration":$(get_duration "$AUDIO/outro.mp3"),"narration":{"en":"The demo-video skill is complete."}}
264
+ ],
265
+ "languages": {"en":{"voice":"en-US-AriaNeural"}}
266
+ }
267
+ SCENES
268
+
269
+ node "$SCRIPT_DIR/scripts/generate-subs.cjs" \
270
+ --scenes "$WORKDIR/scenes.json" \
271
+ --languages '["en"]' \
272
+ --output-dir "$WORKDIR/subs/"
273
+
274
+ # === Phase 8: Summary ===
275
+ echo ""
276
+ echo "=== Phase 8: Summary ==="
277
+ echo ""
278
+
279
+ OUT_SIZE=$(du -h "$FINAL_OUTPUT" 2>/dev/null | cut -f1)
280
+ V_SIZE=$(du -h "$VIDEO_MASTER" 2>/dev/null | cut -f1)
281
+ A_SIZE=$(du -h "$AUDIO_MASTER" 2>/dev/null | cut -f1)
282
+
283
+ echo "Video master: $VIDEO_MASTER (${V_SIZE})"
284
+ echo "Audio master: $AUDIO_MASTER (${A_SIZE})"
285
+ echo "Final output: $FINAL_OUTPUT (${OUT_SIZE})"
286
+ echo "Duration: ${V_DUR}s"
287
+ echo ""
288
+
289
+ VIDEOS_DIR="$HOME/Videos"
290
+ mkdir -p "$VIDEOS_DIR"
291
+ cp "$FINAL_OUTPUT" "$VIDEOS_DIR/demo-e2e.mp4"
292
+ echo "Copied to: $VIDEOS_DIR/demo-e2e.mp4"
293
+ echo ""
294
+
295
+ if [ "${1:-}" = "--open" ]; then
296
+ xdg-open "$FINAL_OUTPUT" 2>/dev/null || open "$FINAL_OUTPUT" 2>/dev/null || true
297
+ else
298
+ read -p "Open video now? [y/N] " OPEN_VIDEO
299
+ if [ "$OPEN_VIDEO" = "y" ] || [ "$OPEN_VIDEO" = "Y" ]; then
300
+ xdg-open "$FINAL_OUTPUT" 2>/dev/null || open "$FINAL_OUTPUT" 2>/dev/null || echo "File at: $FINAL_OUTPUT"
301
+ fi
302
+ fi
@@ -0,0 +1,36 @@
1
+ {
2
+ "metadata": {
3
+ "title": "Test Demo",
4
+ "output_file": "test-demo.mp4",
5
+ "resolution": "1920x1080",
6
+ "frame_rate": 30
7
+ },
8
+ "languages": {
9
+ "en": { "voice": "en-US-AriaNeural" },
10
+ "zh": { "voice": "zh-CN-XiaoxiaoNeural" }
11
+ },
12
+ "scenes": [
13
+ {
14
+ "id": 1,
15
+ "type": "terminal_command",
16
+ "command": "echo 'hello demo'",
17
+ "cwd": "/tmp",
18
+ "speed": 3,
19
+ "narration": {
20
+ "en": "Running the first command.",
21
+ "zh": "运行第一个命令。"
22
+ }
23
+ },
24
+ {
25
+ "id": 2,
26
+ "type": "narration_only",
27
+ "bullets": ["Feature one", "Feature two"],
28
+ "duration": 5,
29
+ "transition": "fade",
30
+ "narration": {
31
+ "en": "Key features include.",
32
+ "zh": "主要功能包括。"
33
+ }
34
+ }
35
+ ]
36
+ }
@@ -0,0 +1,2 @@
1
+ echo hello
2
+ hello
@@ -0,0 +1,13 @@
1
+ 0.000001 1
2
+ 0.050000 3
3
+ 0.120000 2
4
+ 0.200000 5
5
+ 0.300000 1
6
+ 0.450000 12
7
+ 0.550000 7
8
+ 0.620000 4
9
+ 0.700000 9
10
+ 0.800000 6
11
+ 0.900000 8
12
+ 1.000000 3
13
+ 1.100000 2
@@ -0,0 +1,67 @@
1
+ const path = require('path')
2
+ const fs = require('fs')
3
+ const { execSync } = require('child_process')
4
+ const { generateSrt } = require('../scripts/generate-subs.cjs')
5
+
6
+ describe('generate-subs.js — SRT subtitle generation', () => {
7
+ const outDir = '/tmp/demo-test-subs'
8
+
9
+ beforeEach(() => {
10
+ fs.mkdirSync(outDir, { recursive: true })
11
+ })
12
+
13
+ afterEach(() => {
14
+ fs.rmSync(outDir, { recursive: true, force: true })
15
+ })
16
+
17
+ const scenes = [
18
+ { id: 1, duration: 5, narration: { en: 'Hello', zh: '你好' } },
19
+ { id: 2, duration: 3, narration: { en: 'World', zh: '世界' } },
20
+ ]
21
+
22
+ test('GS01: generates English SRT with correct timing', () => {
23
+ const srt = generateSrt(scenes, 'en')
24
+ expect(srt).toContain('Hello')
25
+ expect(srt).toContain('World')
26
+ expect(srt).toContain('00:00:05.00 --> 00:00:08.00')
27
+ })
28
+
29
+ test('GS02: Chinese SRT uses Chinese narration text', () => {
30
+ const srt = generateSrt(scenes, 'zh')
31
+ expect(srt).toContain('你好')
32
+ expect(srt).toContain('世界')
33
+ expect(srt).not.toContain('Hello')
34
+ })
35
+
36
+ test('GS03: missing narration lang falls back to English', () => {
37
+ const mixedScenes = [
38
+ { id: 1, duration: 3, narration: { en: 'Fallback text' } },
39
+ ]
40
+ const srt = generateSrt(mixedScenes, 'fr')
41
+ expect(srt).toContain('Fallback text')
42
+ })
43
+
44
+ test('GS04: empty scenes list produces empty SRT', () => {
45
+ const srt = generateSrt([], 'en')
46
+ expect(srt).toBe('')
47
+ })
48
+
49
+ test('GS05: correct cue numbering', () => {
50
+ const srt = generateSrt(scenes, 'en')
51
+ const lines = srt.split('\n')
52
+ expect(lines[0]).toBe('1')
53
+ expect(lines[4]).toBe('2')
54
+ })
55
+
56
+ test('GS06: CLI invocation writes SRT files', () => {
57
+ const scenesJson = path.join(outDir, 'scenes.json')
58
+ const data = { scenes }
59
+ fs.writeFileSync(scenesJson, JSON.stringify(data))
60
+
61
+ const scriptPath = path.join(__dirname, '..', 'scripts', 'generate-subs.cjs')
62
+ const cmd = `node '${scriptPath}' --scenes '${scenesJson}' --languages '["en","zh"]' --output-dir '${outDir}'`
63
+ execSync(cmd, { encoding: 'utf-8', timeout: 10000 })
64
+ expect(fs.existsSync(path.join(outDir, 'demo.en.srt'))).toBe(true)
65
+ expect(fs.existsSync(path.join(outDir, 'demo.zh.srt'))).toBe(true)
66
+ })
67
+ })
@@ -0,0 +1,58 @@
1
+ const path = require('path')
2
+ const fs = require('fs')
3
+ const { runScript } = require('./helpers/run-script')
4
+
5
+ const TTS_SCRIPT = 'scripts/generate-tts.sh'
6
+
7
+ describe('generate-tts.sh — edge-tts audio generation', () => {
8
+ const outDir = '/tmp/demo-test-tts'
9
+
10
+ beforeAll(() => {
11
+ fs.mkdirSync(outDir, { recursive: true })
12
+ })
13
+
14
+ afterAll(() => {
15
+ fs.rmSync(outDir, { recursive: true, force: true })
16
+ })
17
+
18
+ test('GT01: generates MP3 from English text', () => {
19
+ const outFile = path.join(outDir, 'test.mp3')
20
+
21
+ if (!process.env.CI) {
22
+ try {
23
+ require('child_process').execSync('python3 -m edge_tts --version', { stdio: 'ignore' })
24
+ } catch {
25
+ // edge-tts not available — skip
26
+ return
27
+ }
28
+ }
29
+
30
+ const result = runScript(TTS_SCRIPT, [
31
+ '--text', 'Hello world',
32
+ '--voice', 'en-US-AriaNeural',
33
+ '--output', outFile,
34
+ ])
35
+ expect(result.exitCode).toBe(0)
36
+ expect(fs.existsSync(outFile)).toBe(true)
37
+ expect(fs.statSync(outFile).size).toBeGreaterThan(500)
38
+ })
39
+
40
+ test('GT02: empty text still runs without crash', () => {
41
+ const outFile = path.join(outDir, 'empty.mp3')
42
+ const result = runScript(TTS_SCRIPT, [
43
+ '--text', '',
44
+ '--voice', 'en-US-AriaNeural',
45
+ '--output', outFile,
46
+ ])
47
+ // edge-tts may produce a silent file or error — either is acceptable
48
+ expect([0, 1]).toContain(result.exitCode)
49
+ })
50
+
51
+ test('GT03: missing --output exits with usage', () => {
52
+ const result = runScript(TTS_SCRIPT, [
53
+ '--text', 'Hello',
54
+ '--voice', 'en-US-AriaNeural',
55
+ ])
56
+ expect(result.exitCode).toBe(1)
57
+ })
58
+ })
@@ -0,0 +1,33 @@
1
+ const { execSync } = require('child_process')
2
+ const path = require('path')
3
+
4
+ const SKILL_DIR = path.resolve(__dirname, '../..')
5
+
6
+ function runScript(scriptRelative, args = [], opts = {}) {
7
+ const scriptPath = path.resolve(SKILL_DIR, scriptRelative)
8
+ const timeout = opts.timeout || 30000
9
+ const env = opts.env || process.env
10
+
11
+ const isJs = scriptPath.endsWith('.js') || scriptPath.endsWith('.cjs')
12
+ const quoted = args.map(a => `'${String(a).replace(/'/g, "'\\''")}'`).join(' ')
13
+ const cmd = isJs ? `node '${scriptPath}' ${quoted}` : `'${scriptPath}' ${quoted}`
14
+
15
+ try {
16
+ const stdout = execSync(cmd, {
17
+ encoding: 'utf-8',
18
+ timeout,
19
+ env,
20
+ stdio: ['pipe', 'pipe', 'pipe'],
21
+ shell: '/bin/bash',
22
+ })
23
+ return { stdout: stdout.trim(), stderr: '', exitCode: 0 }
24
+ } catch (err) {
25
+ return {
26
+ stdout: (err.stdout || '').toString().trim(),
27
+ stderr: (err.stderr || '').toString().trim(),
28
+ exitCode: err.status !== undefined ? err.status : 1,
29
+ }
30
+ }
31
+ }
32
+
33
+ module.exports = { runScript, SKILL_DIR }
@@ -0,0 +1,110 @@
1
+ const path = require('path')
2
+ const fs = require('fs')
3
+ const { execSync } = require('child_process')
4
+ const { assembleVideo, assembleAudio, assembleFinal, getDuration } = require('../scripts/assemble.cjs')
5
+ const { runScript } = require('./helpers/run-script')
6
+
7
+ const SKILL_DIR = path.resolve(__dirname, '..')
8
+ const FIXTURES = path.resolve(__dirname, 'fixtures')
9
+
10
+ describe('integration — full pipeline', () => {
11
+ const outDir = '/tmp/demo-test-integration'
12
+ const clipDir = path.join(outDir, 'clips')
13
+ const audioDir = path.join(outDir, 'audio')
14
+ const subsDir = path.join(outDir, 'subs')
15
+ const htmlDir = path.join(outDir, 'html')
16
+
17
+ function haveFfmpeg() {
18
+ try { execSync('ffmpeg -version', { stdio: 'ignore', timeout: 5000 }); return true }
19
+ catch { return false }
20
+ }
21
+
22
+ function havePlaywright() {
23
+ try { execSync('npx playwright --version', { stdio: 'ignore', timeout: 5000 }); return true }
24
+ catch { return false }
25
+ }
26
+
27
+ beforeAll(() => {
28
+ fs.mkdirSync(clipDir, { recursive: true })
29
+ fs.mkdirSync(audioDir, { recursive: true })
30
+ fs.mkdirSync(subsDir, { recursive: true })
31
+ fs.mkdirSync(htmlDir, { recursive: true })
32
+ })
33
+
34
+ afterAll(() => {
35
+ fs.rmSync(outDir, { recursive: true, force: true })
36
+ })
37
+
38
+ test('IT01: render terminal scene + slide scene + subs', () => {
39
+ // Step 1 — Render terminal HTML
40
+ const termHtml = path.join(htmlDir, 'terminal.html')
41
+ const termResult = runScript('scripts/render-terminal.cjs', [
42
+ '--timing', path.join(FIXTURES, 'hello.timing'),
43
+ '--output', path.join(FIXTURES, 'hello.output'),
44
+ '--command', 'echo integration-test',
45
+ '--speed', '3',
46
+ '--html', termHtml,
47
+ ])
48
+ expect(termResult.exitCode).toBe(0)
49
+ expect(fs.existsSync(termHtml)).toBe(true)
50
+
51
+ // Step 2 — Render narration slide
52
+ const slideHtml = path.join(htmlDir, 'slide.html')
53
+ const slideResult = runScript('scripts/render-slide.cjs', [
54
+ '--title', 'Integration Test',
55
+ '--bullets', '["Passed","Verified"]',
56
+ '--output', slideHtml,
57
+ ])
58
+ expect(slideResult.exitCode).toBe(0)
59
+ expect(fs.existsSync(slideHtml)).toBe(true)
60
+
61
+ // Step 3 — Generate subtitles
62
+ const scenesFixture = path.join(FIXTURES, 'demo-scenes.json')
63
+ const subsResult = runScript('scripts/generate-subs.cjs', [
64
+ '--scenes', scenesFixture,
65
+ '--languages', '["en","zh"]',
66
+ '--output-dir', subsDir,
67
+ ])
68
+ expect(subsResult.exitCode).toBe(0)
69
+ expect(fs.existsSync(path.join(subsDir, 'demo.en.srt'))).toBe(true)
70
+ const enSrt = fs.readFileSync(path.join(subsDir, 'demo.en.srt'), 'utf-8')
71
+ expect(enSrt).toContain('Running the first command.')
72
+ expect(enSrt).toContain('Key features include.')
73
+ })
74
+
75
+ test('IT02: assembly with empty manifest throws error', () => {
76
+ expect(() => {
77
+ assembleVideo({ scenes: [] }, path.join(outDir, 'empty.mp4'), true)
78
+ }).toThrow(/No valid clips/)
79
+ })
80
+
81
+ test('IT03: assemble video + audio + final with synthetic clips', () => {
82
+ if (!haveFfmpeg()) return
83
+
84
+ const clip = path.join(clipDir, 'test_clip.mp4')
85
+ const audio = path.join(audioDir, 'test_audio.wav')
86
+
87
+ execSync(`ffmpeg -y -f lavfi -i "color=c=blue:s=1920x1080:d=2" -c:v libx264 -preset ultrafast -crf 28 "${clip}"`, { stdio: 'ignore', timeout: 15000 })
88
+ execSync(`ffmpeg -y -f lavfi -i "sine=frequency=440:duration=2" -c:a pcm_s16le "${audio}"`, { stdio: 'ignore', timeout: 15000 })
89
+
90
+ const manifest = {
91
+ scenes: [{ clip, duration: 2, audio }],
92
+ }
93
+
94
+ const videoMaster = path.join(outDir, 'it03_video.mp4')
95
+ const audioMaster = path.join(outDir, 'it03_audio.m4a')
96
+ const finalOut = path.join(outDir, 'it03_final.mp4')
97
+
98
+ assembleVideo(manifest, videoMaster, true)
99
+ expect(fs.existsSync(videoMaster)).toBe(true)
100
+ expect(getDuration(videoMaster)).toBeGreaterThan(1)
101
+
102
+ assembleAudio(manifest, audioMaster, true)
103
+ expect(fs.existsSync(audioMaster)).toBe(true)
104
+ expect(getDuration(audioMaster)).toBeGreaterThan(1)
105
+
106
+ assembleFinal({ video: videoMaster, audio: audioMaster, output: finalOut, 'keep-raw': 'true' })
107
+ expect(fs.existsSync(finalOut)).toBe(true)
108
+ expect(getDuration(finalOut)).toBeGreaterThan(1)
109
+ })
110
+ })
@@ -0,0 +1,6 @@
1
+ module.exports = {
2
+ rootDir: '../',
3
+ testMatch: ['<rootDir>/test/**/*.test.js'],
4
+ testTimeout: 30000,
5
+ verbose: true,
6
+ }