@lightcone-ai/daemon 0.19.0 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -272,16 +272,15 @@ server.tool(
272
272
  // tails, and re-records (Task #25/#26 trial).
273
273
  server.tool(
274
274
  'compose_video_v2',
275
- 'Compose a video from a list of segments using ffmpeg. Each segment has a visual source (image / scroll / '
276
- + 'carousel / video / gif), optional audio, and optional subtitle text. Subtitles are burned in when '
277
- + 'subtitle_text is provided AND burn_subtitles is not false. Segments are concatenated in order; outro clips are appended after.\n\n'
275
+ 'Compose video(s) from a list of segments using ffmpeg. Each segment has a visual source (image / scroll / '
276
+ + 'carousel / video / gif), optional audio, and optional subtitle text. Segments are concatenated in order; '
277
+ + 'outro clips are appended after.\n\n'
278
278
  + 'When any segment has audio_path, MUST be preceded by plan_video_segments in the same session '
279
- + '(plan_video_segments fills duration/subtitle_text/audio_path mechanically; manual alignment is rejected). '
280
- + 'Returns a local mp4 path + size_bytes.\n\n'
281
- + 'Dual / multi-version delivery (e.g. subtitled+voiced + clean silent): pass the variants[] array one call '
282
- + 'runs the heavy per-segment ffmpeg work ONCE and only diverges at audio mux + concat + subtitle burn per '
283
- + 'variant. That is ~1.2-1.4× single-version time vs ~2× when calling this tool twice. Each variant chooses '
284
- + 'its own burn_subtitles and include_audio independently.',
279
+ + '(plan_video_segments fills duration/subtitle_text/audio_path mechanically; manual alignment is rejected).\n\n'
280
+ + 'Outputs are controlled by variants[] ALWAYS required. Single output is variants:[{output_path:"..."}]. '
281
+ + 'Multi-output (e.g. 字幕+配音 + 无声无字幕 clean ) is variants:[{output_path:"sub.mp4"}, {output_path:"clean.mp4", burn_subtitles:false, include_audio:false}]. '
282
+ + 'The heavy per-segment ffmpeg work runs ONCE across all variants only audio mux + concat + subtitle burn '
283
+ + 'repeat per variant. Two-variant delivery is ~1.2-1.4× single-variant time, not 2×.',
285
284
  {
286
285
  segments: z.array(z.object({
287
286
  visual_path: z.string().optional().describe('Absolute path to a single image / video / gif.'),
@@ -296,20 +295,15 @@ server.tool(
296
295
  subtitle_text: z.string().optional().describe('Narration text to burn as subtitle. Displayed for the full segment duration.'),
297
296
  transition: z.enum(['cut', 'fade', 'crossfade']).optional().describe('Transition to next segment. Default cut.'),
298
297
  })).describe('Ordered list of video segments.'),
299
- outro_paths: z.array(z.string()).optional().describe('Absolute paths to outro video clips appended at end.'),
298
+ outro_paths: z.array(z.string()).optional().describe('Absolute paths to outro video clips appended at end (shared across all variants).'),
300
299
  resolution: z.string().optional().describe('Output resolution WxH. Default "1080x1920".'),
301
- output_path: z.string().optional().describe('Absolute output path (single-output mode). Auto-generated if omitted. Ignored when variants[] is provided.'),
302
- burn_subtitles: z.boolean().optional().describe('Single-output mode only: whether to burn subtitle_text. Default true. '
303
- + 'For producing multiple variants in one call, use variants[] instead.'),
304
300
  variants: z.array(z.object({
305
301
  output_path: z.string().describe('Absolute output path for this variant. Each variant must use a unique path.'),
306
302
  burn_subtitles: z.boolean().optional().describe('Whether to burn subtitle_text into THIS variant. Default true.'),
307
303
  include_audio: z.boolean().optional().describe('Whether to mux segment.audio_path into THIS variant. Default true. '
308
- + 'Pass false for a fully silent copy (skips audio mux entirely; segment.audio_path is ignored for this variant).'),
309
- })).optional().describe('Multi-output mode: one call produces all variants. '
310
- + 'Visual segment processing (the heavy work) runs once; each variant only repeats audio mux + concat + optional subtitle burn. '
311
- + 'Typical use: [{output_path:"with-sub.mp4"}, {output_path:"clean.mp4", burn_subtitles:false, include_audio:false}] '
312
- + 'to deliver a subtitled+voiced version and a silent clean version together.'),
304
+ + 'Pass false for a fully silent copy (segment.audio_path ignored for this variant).'),
305
+ })).min(1).describe('Required: one entry per output file. Single output = 1-element array. '
306
+ + 'Multi-output dual delivery example: [{output_path:"with-sub-voice.mp4"}, {output_path:"clean.mp4", burn_subtitles:false, include_audio:false}].'),
313
307
  },
314
308
  async (args) => {
315
309
  const segments = Array.isArray(args?.segments) ? args.segments : [];
@@ -348,7 +342,7 @@ server.tool(
348
342
  // audio in production runs (Tasks #20/#25/#26), forcing re-records.
349
343
  server.tool(
350
344
  'record_url_narration',
351
- 'Record a silent video of a URL by driving Chromium on an Xvfb display and capturing it with Playwright recordVideo, driven by a video plan; ffmpeg then transcodes the recording to mp4. Outputs a silent mp4 that can be passed to compose_video_v2 as a video-kind segment with an audio_path for narration.\n\nUse this as the canonical recording step for URL-narration videos. Falls back: if the page needs interactions outside the visual_action vocabulary (clicks, waits, OCR loops), use Monitor (Bash) with custom Playwright instead.\n\nMUST be preceded by plan_video_segments in the same session — feed plan_video_segments\'s `segments` array as `plan.sections` so dwell_ms aligns mechanically with TTS audio_duration_ms (hand-written dwell_ms has drifted and forced re-records in production).\n\nRuntime requirements: this tool only works on a Linux daemon machine with Xvfb + Chromium + ffmpeg installed (ffmpeg is used to transcode the recording to mp4; no x11grab device support needed). macOS / Windows daemons will fail at startup.',
345
+ 'Record silent mp4s of a URL by driving Chromium on an Xvfb display and capturing it with Playwright recordVideo, then ffmpeg-transcoding. Each output mp4 can be passed to compose_video_v2 as a video-kind segment with an audio_path for narration.\n\nUse this as the canonical recording step for URL-narration videos. Falls back: if the page needs interactions outside the visual_action vocabulary (clicks, waits, OCR loops), use Monitor (Bash) with custom Playwright instead.\n\nMUST be preceded by plan_video_segments in the same session — feed plan_video_segments\'s `segments` array as `plan.sections` so dwell_ms aligns mechanically with TTS audio_duration_ms (hand-written dwell_ms has drifted and forced re-records in production).\n\nALWAYS pass output_paths as an array with one mp4 path per plan.sections entry (single-section recording is a 1-element array). The tool records the URL ONCE continuously (one browser session, one scrollTop, natural scroll flow through all sections), then slices the recording at section boundaries via ffmpeg. There is NO mode that records N sections in N separate calls — that pattern reopened the browser and re-scrolled-from-top for each segment, which looked visually disjointed. One URL = one call.\n\nRuntime requirements: this tool only works on a Linux daemon machine with Xvfb + Chromium + ffmpeg installed (ffmpeg is used to transcode the recording to mp4; no x11grab device support needed). macOS / Windows daemons will fail at startup.',
352
346
  {
353
347
  url: z.string().describe('Page URL to record'),
354
348
  plan: z.record(z.any()).describe(
@@ -367,8 +361,9 @@ server.tool(
367
361
  + 'frag.short.recruitment_url_mode_policy). Pick a different target_y in the 标题/岗位 '
368
362
  + 'information area and rewrite that section.'
369
363
  ),
370
- output_path: z.string().optional().describe('Workspace-relative output mp4 path. Default tmp/wx3_video/recorded-{ts}.mp4'),
371
- events_path: z.string().optional().describe('Workspace-relative events.json path. Default ${output_path}.events.json'),
364
+ output_paths: z.array(z.string()).min(1).describe('REQUIRED. Workspace-relative mp4 paths, one per plan.sections entry (single-section is a 1-element array). The tool records ONCE continuously and slices the result at section boundaries (derived from phase_start / phase_end events) — each section produces exactly one of these mp4s.'),
365
+ output_path: z.string().optional().describe('Optional debug-only path for the CONSOLIDATED master recording (the full continuous webm transcoded). Auto-generated under tmp/ if omitted. Agents normally do not need to set this — they consume output_paths.'),
366
+ events_path: z.string().optional().describe('Workspace-relative events.json path. Default ${master}.events.json'),
372
367
  viewport: z.object({
373
368
  width: z.number().optional(),
374
369
  height: z.number().optional(),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lightcone-ai/daemon",
3
- "version": "0.19.0",
3
+ "version": "0.21.0",
4
4
  "type": "module",
5
5
  "main": "src/index.js",
6
6
  "bin": {
@@ -249,54 +249,41 @@ async function applyFadeTransition({ clipA, clipB, tmpDir, style = 'fade' }) {
249
249
  return outPath;
250
250
  }
251
251
 
252
- // compose_video_v2 supports two modes:
252
+ // compose_video_v2 ONE shape: caller passes variants[]. Single-output is
253
+ // just variants of length 1. Multi-output (subtitled+voiced + clean silent)
254
+ // is the same call with more variants. There is no top-level output_path or
255
+ // burn_subtitles shortcut — it added a second pattern, and agents
256
+ // consistently defaulted to the simpler one even when multi-output was
257
+ // requested, so the dual-version optimization went unused.
253
258
  //
254
- // 1. Legacy single-output: pass output_path (+ optional burn_subtitles).
255
- // Returns { path, duration_ms, size_bytes, variants: [..1 entry..] }.
256
- //
257
- // 2. Multi-variant: pass variants=[{output_path, burn_subtitles?, include_audio?}, ...].
258
- // Visual segment processing runs ONCE (the heavy part — per-segment ffmpeg
259
- // transcode/scale/scroll). Each variant then diverges only at audio mux +
260
- // concat + subtitle burn — typically a few seconds per extra variant.
261
- // Returns { variants: [{path, duration_ms, size_bytes, burn_subtitles,
262
- // include_audio}, ...] }.
263
- //
264
- // Use the multi-variant mode when shipping the same content with different
265
- // subtitle/audio combinations (e.g. subtitled+voiced + clean silent). Calling
266
- // the legacy mode twice produces correct outputs but redoes per-segment work.
259
+ // Visual segment processing runs ONCE; each variant diverges only at audio
260
+ // mux + concat + subtitle burn (~seconds per extra variant).
267
261
  export async function composeVideoV2({
268
262
  segments = [],
269
263
  outro_paths = [],
270
264
  resolution = '1080x1920',
271
- output_path,
272
- burn_subtitles = true,
273
265
  variants,
274
266
  }) {
275
267
  if (!Array.isArray(segments) || segments.length === 0) {
276
268
  throw new Error('segments must be a non-empty array');
277
269
  }
278
270
 
279
- // Normalize variants. If caller did not pass an explicit variants array,
280
- // synthesize a single variant from the legacy output_path + burn_subtitles.
281
- // include_audio defaults to true (auto-include any segment.audio_path).
282
- const normalizedVariants = (Array.isArray(variants) && variants.length > 0)
283
- ? variants.map((v, idx) => {
284
- if (!v || typeof v !== 'object') {
285
- throw new Error(`variants[${idx}]: must be an object`);
286
- }
287
- const outPath = String(v.output_path ?? '').trim();
288
- if (!outPath) throw new Error(`variants[${idx}]: output_path is required`);
289
- return {
290
- output_path: outPath,
291
- burn_subtitles: v.burn_subtitles !== false,
292
- include_audio: v.include_audio !== false,
293
- };
294
- })
295
- : [{
296
- output_path: output_path ?? path.join(os.tmpdir(), `lightcone-video-${Date.now()}.mp4`),
297
- burn_subtitles: burn_subtitles !== false,
298
- include_audio: true,
299
- }];
271
+ if (!Array.isArray(variants) || variants.length === 0) {
272
+ throw new Error('variants must be a non-empty array. Single output is variants:[{output_path:"..."}].');
273
+ }
274
+
275
+ const normalizedVariants = variants.map((v, idx) => {
276
+ if (!v || typeof v !== 'object') {
277
+ throw new Error(`variants[${idx}]: must be an object`);
278
+ }
279
+ const outPath = String(v.output_path ?? '').trim();
280
+ if (!outPath) throw new Error(`variants[${idx}]: output_path is required`);
281
+ return {
282
+ output_path: outPath,
283
+ burn_subtitles: v.burn_subtitles !== false,
284
+ include_audio: v.include_audio !== false,
285
+ };
286
+ });
300
287
 
301
288
  // Disallow two variants writing to the same file — would race on disk.
302
289
  const seenOutputs = new Set();
@@ -485,15 +472,8 @@ export async function composeVideoV2({
485
472
  });
486
473
  }
487
474
 
488
- // Legacy single-output callers (didn't pass variants) get the same flat
489
- // shape they used to get, plus the variants array for forward-compat.
490
- const first = variantOutputs[0];
491
- return {
492
- path: first.path,
493
- duration_ms: first.duration_ms,
494
- size_bytes: first.size_bytes,
495
- variants: variantOutputs,
496
- };
475
+ // Always return variants[]. Single-output callers read variants[0].
476
+ return { variants: variantOutputs };
497
477
  } finally {
498
478
  await rm(tmpDir, { recursive: true, force: true });
499
479
  }
@@ -215,12 +215,128 @@ async function transcodeWebmToMp4({
215
215
  });
216
216
  }
217
217
 
218
+ // Frame-accurate slice of an mp4 — re-encodes to honour the exact start/end
219
+ // instead of snapping to the nearest keyframe (which `-c copy` would do, and
220
+ // can drift by several seconds with libx264's default ~250-frame GOP).
221
+ // Re-encoding short clips (≤30s) at preset=veryfast is fast (<1s typical),
222
+ // so we trade a bit of CPU for being able to align section cuts to the
223
+ // per-segment TTS the rest of the pipeline expects.
224
+ async function cutMp4Slice({
225
+ inputPath,
226
+ outputPath,
227
+ startMs,
228
+ durationMs,
229
+ fps = DEFAULT_FPS,
230
+ ffmpegBin = 'ffmpeg',
231
+ } = {}) {
232
+ const startSec = Math.max(0, Number(startMs) || 0) / 1000;
233
+ const durationSec = Math.max(0.05, Number(durationMs) || 0) / 1000;
234
+ const args = [
235
+ '-y',
236
+ '-i', inputPath,
237
+ '-ss', startSec.toFixed(3),
238
+ '-t', durationSec.toFixed(3),
239
+ '-an',
240
+ '-c:v', 'libx264',
241
+ '-preset', 'veryfast',
242
+ '-pix_fmt', 'yuv420p',
243
+ ...(Number.isFinite(Number(fps)) && Number(fps) > 0 ? ['-r', String(fps)] : []),
244
+ '-movflags', '+faststart',
245
+ outputPath,
246
+ ];
247
+ await new Promise((resolve, reject) => {
248
+ const proc = spawn(ffmpegBin, args, { stdio: ['ignore', 'pipe', 'pipe'] });
249
+ const errChunks = [];
250
+ proc.stderr?.on('data', (chunk) => errChunks.push(chunk));
251
+ proc.once('error', (err) => {
252
+ const wrapped = new Error(`ffmpeg_spawn_failed:${err.message}`);
253
+ wrapped.code = 'FFMPEG_SPAWN_FAILED';
254
+ reject(wrapped);
255
+ });
256
+ proc.on('close', (code) => {
257
+ if (code === 0) return resolve();
258
+ const wrapped = new Error(
259
+ `ffmpeg_cut_failed:code=${code}: ${Buffer.concat(errChunks).toString().slice(-2000)}`
260
+ );
261
+ wrapped.code = 'FFMPEG_CUT_FAILED';
262
+ reject(wrapped);
263
+ });
264
+ });
265
+ }
266
+
267
+ // Derive per-section cut points from eventsLog. phase_start.t_ms / phase_end.t_ms
268
+ // are recorded against the trimmed mp4 timeline (head trim already happened),
269
+ // so we can use them as-is.
270
+ function deriveSectionCutPoints(eventsLog, phaseCount) {
271
+ if (!Array.isArray(eventsLog) || eventsLog.length === 0) {
272
+ throw new Error('events_log_empty');
273
+ }
274
+ const starts = new Map();
275
+ const ends = new Map();
276
+ for (const ev of eventsLog) {
277
+ if (!ev || typeof ev !== 'object') continue;
278
+ const id = ev.phase_id;
279
+ const t = Number(ev.t_ms);
280
+ if (!id || !Number.isFinite(t)) continue;
281
+ if (ev.action === 'phase_start' && !starts.has(id)) starts.set(id, t);
282
+ if (ev.action === 'phase_end') ends.set(id, t);
283
+ }
284
+ // Walk phases in order to preserve plan ordering even if events arrived
285
+ // out-of-order (they shouldn't, but guard against it).
286
+ const orderedIds = [];
287
+ for (const ev of eventsLog) {
288
+ if (ev?.action === 'phase_start' && !orderedIds.includes(ev.phase_id)) {
289
+ orderedIds.push(ev.phase_id);
290
+ }
291
+ }
292
+ if (orderedIds.length !== phaseCount) {
293
+ throw new Error(`events_phase_count_mismatch:expected=${phaseCount}:got=${orderedIds.length}`);
294
+ }
295
+ return orderedIds.map((id) => {
296
+ const startMs = starts.get(id);
297
+ const endMs = ends.get(id);
298
+ if (!Number.isFinite(startMs) || !Number.isFinite(endMs)) {
299
+ throw new Error(`phase_timing_missing:${id}`);
300
+ }
301
+ if (endMs <= startMs) {
302
+ throw new Error(`phase_timing_invalid:${id}:start=${startMs}:end=${endMs}`);
303
+ }
304
+ return { phase_id: id, start_ms: startMs, end_ms: endMs, duration_ms: endMs - startMs };
305
+ });
306
+ }
307
+
308
+ function normalizeOutputPaths(rawList) {
309
+ if (rawList == null) return null;
310
+ if (!Array.isArray(rawList)) {
311
+ const error = new Error('output_paths_must_be_array');
312
+ error.code = 'OUTPUT_PATHS_MUST_BE_ARRAY';
313
+ throw error;
314
+ }
315
+ if (rawList.length === 0) return null;
316
+ return rawList.map((entry, idx) => {
317
+ const normalized = normalizeText(entry);
318
+ if (!normalized) {
319
+ const error = new Error(`output_paths[${idx}]_empty`);
320
+ error.code = 'OUTPUT_PATHS_ENTRY_EMPTY';
321
+ throw error;
322
+ }
323
+ return path.resolve(normalized);
324
+ });
325
+ }
326
+
218
327
  export async function recordUrlNarration({
219
328
  plan,
220
329
  output_path,
221
330
  outputPath = output_path,
222
331
  events_path,
223
332
  eventsPath = events_path,
333
+ // Multi-section output: pass an array of N paths matching plan.sections length
334
+ // to record once continuously and slice the result into N per-section mp4s.
335
+ // The browser stays open for the whole recording, so visuals flow naturally
336
+ // between sections (no scroll-back-to-top between each, no page reload). When
337
+ // omitted, behaves exactly like before — single mp4 at outputPath.
338
+ output_paths,
339
+ outputPaths = output_paths,
224
340
  url,
225
341
  viewport = DEFAULT_VIEWPORT,
226
342
  fps = DEFAULT_FPS,
@@ -234,6 +350,7 @@ export async function recordUrlNarration({
234
350
  launchChromiumFn = launchChromiumMobile,
235
351
  openPageFn = openPageAndSettle,
236
352
  transcodeFn = transcodeWebmToMp4,
353
+ cutFn = cutMp4Slice,
237
354
  nowMs = () => Date.now(),
238
355
  } = {}) {
239
356
  const zoom = Number.isFinite(Number(page_zoom)) && Number(page_zoom) > 0 ? Number(page_zoom) : 1.1;
@@ -249,6 +366,28 @@ export async function recordUrlNarration({
249
366
  const resolvedUrl = resolveUrl({ url, plan });
250
367
  const normalizedViewport = normalizeViewport(viewport);
251
368
  const normalizedFps = normalizeInteger(fps, DEFAULT_FPS);
369
+ const resolvedOutputPaths = normalizeOutputPaths(outputPaths);
370
+ // output_paths is REQUIRED. Single-section recordings just pass an array
371
+ // of one. Removing the optional path forces 1:1 alignment with plan.sections
372
+ // and eliminates the "default to single output_path master" pattern that
373
+ // led agents to call this tool once per section instead of once per URL.
374
+ if (!resolvedOutputPaths) {
375
+ const error = new Error(
376
+ 'output_paths is required — one entry per plan.sections (single section is a 1-element array).',
377
+ );
378
+ error.code = 'OUTPUT_PATHS_REQUIRED';
379
+ throw error;
380
+ }
381
+ if (resolvedOutputPaths.length !== phases.length) {
382
+ const error = new Error(
383
+ `output_paths_count_mismatch:expected=${phases.length}:got=${resolvedOutputPaths.length}`,
384
+ );
385
+ error.code = 'OUTPUT_PATHS_COUNT_MISMATCH';
386
+ throw error;
387
+ }
388
+ for (const p of resolvedOutputPaths) {
389
+ mkdirSync(path.dirname(p), { recursive: true });
390
+ }
252
391
 
253
392
  mkdirSync(path.dirname(resolvedOutputPath), { recursive: true });
254
393
  mkdirSync(path.dirname(resolvedEventsPath), { recursive: true });
@@ -367,12 +506,45 @@ export async function recordUrlNarration({
367
506
  ? eventsLog.reduce((max, ev) => Math.max(max, Number(ev?.t_ms) || 0), 0)
368
507
  : 0;
369
508
 
509
+ // Slice the consolidated mp4 at section boundaries (derived from
510
+ // phase_start / phase_end events). All slices come from the SAME
511
+ // continuous recording, so the visual flow between sections stays
512
+ // natural — no browser reload, no scroll-back-to-top per segment.
513
+ const cutPoints = deriveSectionCutPoints(eventsLog, phases.length);
514
+ const sectionOutputs = [];
515
+ for (let i = 0; i < cutPoints.length; i += 1) {
516
+ const cut = cutPoints[i];
517
+ const outPath = resolvedOutputPaths[i];
518
+ await cutFn({
519
+ inputPath: resolvedOutputPath,
520
+ outputPath: outPath,
521
+ startMs: cut.start_ms,
522
+ durationMs: cut.duration_ms,
523
+ fps: normalizedFps,
524
+ });
525
+ const sliceStat = await stat(outPath);
526
+ if (!sliceStat.isFile() || sliceStat.size <= 0) {
527
+ const error = new Error(`section_slice_empty:${outPath}`);
528
+ error.code = 'SECTION_SLICE_EMPTY';
529
+ throw error;
530
+ }
531
+ sectionOutputs.push({
532
+ phase_id: cut.phase_id,
533
+ video_path: outPath,
534
+ start_ms: cut.start_ms,
535
+ end_ms: cut.end_ms,
536
+ duration_ms: cut.duration_ms,
537
+ size_bytes: Number(sliceStat.size ?? 0),
538
+ });
539
+ }
540
+
370
541
  return {
371
- video_path: resolvedOutputPath,
542
+ master_video_path: resolvedOutputPath,
372
543
  events_path: resolvedEventsPath,
373
544
  events_log: eventsLog,
374
545
  duration_ms: lastTms > 0 ? lastTms : null,
375
546
  display,
547
+ sections: sectionOutputs,
376
548
  };
377
549
  } catch (error) {
378
550
  primaryError = error;
@@ -27,14 +27,31 @@ export async function runComposeVideoV2Tool({
27
27
  outro_paths,
28
28
  format,
29
29
  resolution,
30
+ variants,
31
+ // Trapping legacy params: agents that still pass these from older prompts
32
+ // need an explicit error so they migrate, not silent fallback.
30
33
  output_path,
31
34
  burn_subtitles,
32
- variants,
33
35
  workspaceDir,
34
36
  }) {
37
+ if (output_path != null || burn_subtitles != null) {
38
+ return toolError(
39
+ 'compose_video_v2: output_path and burn_subtitles are no longer accepted at the top level. '
40
+ + 'Pass variants:[{output_path, burn_subtitles?, include_audio?}] — single output is a '
41
+ + '1-element array. See frag.short.video_synthesis_tools.',
42
+ );
43
+ }
44
+
35
45
  if (!Array.isArray(segments) || segments.length === 0) {
36
46
  return toolError('segments must be a non-empty array.');
37
47
  }
48
+ if (!Array.isArray(variants) || variants.length === 0) {
49
+ return toolError(
50
+ 'compose_video_v2: variants[] is required. Single output is variants:[{output_path:"..."}]. '
51
+ + 'Multi-output dual delivery (字幕版 + 无字幕版) is variants:[{output_path:"sub.mp4"}, '
52
+ + '{output_path:"clean.mp4", burn_subtitles:false, include_audio:false}].',
53
+ );
54
+ }
38
55
 
39
56
  const imagePaths = [];
40
57
  for (let i = 0; i < segments.length; i++) {
@@ -69,34 +86,24 @@ export async function runComposeVideoV2Tool({
69
86
  }
70
87
  }
71
88
 
72
- // Normalize variants. If caller passed a variants[] array, that takes
73
- // priority — multi-output mode. Otherwise build a single-element variants
74
- // array from the legacy output_path + burn_subtitles params.
89
+ // Normalize variants. Each entry needs an output_path; flags default to
90
+ // burn_subtitles=true, include_audio=true.
75
91
  const outDir = workspaceDir
76
92
  ? path.join(workspaceDir, 'artifacts', 'video')
77
93
  : path.join(os.tmpdir(), 'lightcone-video');
78
94
 
79
- let normalizedVariants;
80
- if (Array.isArray(variants) && variants.length > 0) {
81
- normalizedVariants = variants.map((v, idx) => {
82
- if (!v || typeof v !== 'object') {
83
- return null; // surfaced below
84
- }
85
- const outPath = String(v.output_path ?? '').trim()
86
- || path.join(outDir, `composed-${Date.now()}-${idx}-${randomUUID().slice(0, 8)}.mp4`);
87
- return {
88
- output_path: outPath,
89
- burn_subtitles: v.burn_subtitles !== false,
90
- include_audio: v.include_audio !== false,
91
- };
92
- });
93
- if (normalizedVariants.some(v => v === null)) {
94
- return toolError('variants must be an array of objects, each with { output_path, burn_subtitles?, include_audio? }.');
95
- }
96
- } else {
97
- const burnSubtitles = burn_subtitles !== false;
98
- const outPath = output_path ?? path.join(outDir, `composed-${Date.now()}-${randomUUID().slice(0, 8)}.mp4`);
99
- normalizedVariants = [{ output_path: outPath, burn_subtitles: burnSubtitles, include_audio: true }];
95
+ const normalizedVariants = variants.map((v, idx) => {
96
+ if (!v || typeof v !== 'object') return null;
97
+ const outPath = String(v.output_path ?? '').trim()
98
+ || path.join(outDir, `composed-${Date.now()}-${idx}-${randomUUID().slice(0, 8)}.mp4`);
99
+ return {
100
+ output_path: outPath,
101
+ burn_subtitles: v.burn_subtitles !== false,
102
+ include_audio: v.include_audio !== false,
103
+ };
104
+ });
105
+ if (normalizedVariants.some(v => v === null)) {
106
+ return toolError('variants must be an array of objects, each with { output_path, burn_subtitles?, include_audio? }.');
100
107
  }
101
108
 
102
109
  const warnings = [];
@@ -135,31 +142,16 @@ export async function runComposeVideoV2Tool({
135
142
  variants: normalizedVariants,
136
143
  });
137
144
 
138
- const outputs = Array.isArray(result?.variants) && result.variants.length > 0
139
- ? result.variants
140
- : [{ path: result.path, duration_ms: result.duration_ms, size_bytes: result.size_bytes,
141
- burn_subtitles: normalizedVariants[0].burn_subtitles,
142
- include_audio: normalizedVariants[0].include_audio }];
143
-
144
- const lines = ['compose_video_v2 completed.'];
145
- if (outputs.length === 1) {
146
- const v = outputs[0];
145
+ const outputs = Array.isArray(result?.variants) ? result.variants : [];
146
+ const lines = ['compose_video_v2 completed.', `variants=${outputs.length}`];
147
+ outputs.forEach((v, idx) => {
148
+ lines.push(`--- variant ${idx} ---`);
147
149
  lines.push(`path=${v.path}`);
148
150
  lines.push(`duration_ms=${v.duration_ms}`);
149
151
  lines.push(`size_bytes=${v.size_bytes ?? 'unknown'}`);
150
152
  lines.push(`burn_subtitles=${v.burn_subtitles}`);
151
153
  lines.push(`include_audio=${v.include_audio}`);
152
- } else {
153
- lines.push(`variants=${outputs.length}`);
154
- outputs.forEach((v, idx) => {
155
- lines.push(`--- variant ${idx} ---`);
156
- lines.push(`path=${v.path}`);
157
- lines.push(`duration_ms=${v.duration_ms}`);
158
- lines.push(`size_bytes=${v.size_bytes ?? 'unknown'}`);
159
- lines.push(`burn_subtitles=${v.burn_subtitles}`);
160
- lines.push(`include_audio=${v.include_audio}`);
161
- });
162
- }
154
+ });
163
155
  lines.push(`segments=${segments.length}`);
164
156
  lines.push(`outro_clips=${(outro_paths ?? []).length}`);
165
157
  for (const w of warnings) lines.push(w);
@@ -181,6 +181,21 @@ export function resolveRecordUrlNarrationPaths({
181
181
  };
182
182
  }
183
183
 
184
+ function resolveOutputPaths(rawList, { workspaceDir }) {
185
+ if (rawList == null) return null;
186
+ if (!Array.isArray(rawList)) {
187
+ throw new Error('output_paths must be an array of file paths (one per section).');
188
+ }
189
+ if (rawList.length === 0) return null;
190
+ return rawList.map((entry, idx) => {
191
+ const normalized = normalizeText(entry);
192
+ if (!normalized) {
193
+ throw new Error(`output_paths[${idx}] is empty — every entry must be a non-empty path.`);
194
+ }
195
+ return path.resolve(workspaceDir, normalized);
196
+ });
197
+ }
198
+
184
199
  export async function runRecordUrlNarrationTool({
185
200
  args = {},
186
201
  currentWorkspaceId = '',
@@ -242,33 +257,72 @@ export async function runRecordUrlNarrationTool({
242
257
  }
243
258
 
244
259
  try {
245
- const { resolvedOutputPath, resolvedEventsPath } = resolveRecordUrlNarrationPaths({
260
+ // output_paths is REQUIRED. The legacy "default output_path master file"
261
+ // mode is gone — agents kept defaulting to one-call-per-section because
262
+ // that was the lowest-friction path. Now every recording is sliced, even
263
+ // single-section ones (which are just a 1-element output_paths array).
264
+ let resolvedOutputPaths;
265
+ try {
266
+ resolvedOutputPaths = resolveOutputPaths(validatedInput.output_paths, { workspaceDir });
267
+ } catch (error) {
268
+ return toolError(`Error: ${error.message}`);
269
+ }
270
+ if (!resolvedOutputPaths) {
271
+ return toolError(
272
+ 'Error: output_paths is required — one workspace-relative mp4 path per plan.sections entry. '
273
+ + 'Single-section recording is a 1-element array. Multi-section recording records once '
274
+ + 'continuously (one browser session, one scrollTop) and slices the result at section '
275
+ + 'boundaries. See frag.short.video_synthesis_tools.',
276
+ );
277
+ }
278
+ const planSectionCount = (planSegments(validatedInput.plan) ?? []).length;
279
+ if (resolvedOutputPaths.length !== planSectionCount) {
280
+ return toolError(
281
+ `Error: output_paths length (${resolvedOutputPaths.length}) must match `
282
+ + `plan.sections length (${planSectionCount}). Each section produces exactly one mp4 — `
283
+ + `don't pad or truncate.`,
284
+ );
285
+ }
286
+
287
+ // The master / events JSON paths are agent-optional debug artifacts.
288
+ // Default master to a tmp path next to the first output; events default
289
+ // to <master>.events.json. Agent can override either if they care.
290
+ const { resolvedOutputPath: masterPath, resolvedEventsPath } = resolveRecordUrlNarrationPaths({
246
291
  workspaceDir,
247
292
  outputPath: validatedInput.output_path,
248
293
  eventsPath: validatedInput.events_path,
249
294
  nowMs,
250
295
  });
251
-
252
- mkdirSync(path.dirname(resolvedOutputPath), { recursive: true });
296
+ mkdirSync(path.dirname(masterPath), { recursive: true });
253
297
  mkdirSync(path.dirname(resolvedEventsPath), { recursive: true });
254
298
 
255
299
  const recorderOutput = await recordUrlNarrationFn({
256
300
  url: validatedInput.url,
257
301
  plan: validatedInput.plan,
258
- output_path: resolvedOutputPath,
302
+ output_path: masterPath,
259
303
  events_path: resolvedEventsPath,
304
+ output_paths: resolvedOutputPaths,
260
305
  viewport: validatedInput.viewport,
261
306
  fps: validatedInput.fps,
262
307
  settle_ms: validatedInput.settle_ms,
263
308
  });
264
309
 
265
- return toolText(
266
- `Recorded URL narration.\n`
267
- + `video_path=${resolvedOutputPath}\n`
268
- + `events_path=${resolvedEventsPath}\n`
269
- + `duration_ms=${deriveDurationMs(recorderOutput) ?? 'unknown'}\n`
270
- + `phases=${derivePhaseCount({ plan: validatedInput.plan, recorderOutput }) ?? 'n/a'}`
271
- );
310
+ const sections = Array.isArray(recorderOutput?.sections) ? recorderOutput.sections : [];
311
+ const lines = [
312
+ 'Recorded URL narration.',
313
+ `events_path=${resolvedEventsPath}`,
314
+ `master_video_path=${masterPath}`,
315
+ `total_duration_ms=${deriveDurationMs(recorderOutput) ?? 'unknown'}`,
316
+ `sections=${sections.length}`,
317
+ ];
318
+ sections.forEach((s, idx) => {
319
+ lines.push(`--- section ${idx} (${s.phase_id}) ---`);
320
+ lines.push(`video_path=${s.video_path}`);
321
+ lines.push(`start_ms=${s.start_ms}`);
322
+ lines.push(`duration_ms=${s.duration_ms}`);
323
+ lines.push(`size_bytes=${s.size_bytes ?? 'unknown'}`);
324
+ });
325
+ return toolText(lines.join('\n'));
272
326
  } catch (error) {
273
327
  return toolError(`Error: ${error.message}`);
274
328
  }