vargai 0.4.0-alpha36 → 0.4.0-alpha37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,155 @@
1
+ /**
2
+ * Grok Imagine Video Test Example
3
+ *
4
+ * Run with: bun run examples/grok-imagine-test.tsx
5
+ *
6
+ * Tests all three Grok Imagine Video endpoints:
7
+ * 1. Text-to-Video
8
+ * 2. Image-to-Video
9
+ * 3. Edit Video
10
+ */
11
+
12
+ import { fal } from "@fal-ai/client";
13
+ import { falProvider } from "../src/providers/fal";
14
+
15
+ // Configure fal client
16
+ const apiKey = process.env.FAL_API_KEY ?? process.env.FAL_KEY;
17
+ if (!apiKey) {
18
+ console.error("Error: FAL_API_KEY or FAL_KEY environment variable required");
19
+ process.exit(1);
20
+ }
21
+ fal.config({ credentials: apiKey });
22
+
23
+ async function testTextToVideo() {
24
+ console.log("\n=== Testing Grok Text-to-Video ===\n");
25
+
26
+ const result = await falProvider.grokTextToVideo({
27
+ prompt:
28
+ "A majestic eagle soaring through clouds at sunset, cinematic lighting, slow motion",
29
+ duration: 6,
30
+ aspectRatio: "16:9",
31
+ resolution: "720p",
32
+ });
33
+
34
+ const data = result.data as { video?: { url?: string; duration?: number } };
35
+ console.log("Text-to-Video Result:");
36
+ console.log(" Video URL:", data?.video?.url);
37
+ console.log(" Duration:", data?.video?.duration);
38
+
39
+ return data?.video?.url;
40
+ }
41
+
42
+ async function testImageToVideo(imageUrl: string) {
43
+ console.log("\n=== Testing Grok Image-to-Video ===\n");
44
+
45
+ const result = await falProvider.grokImageToVideo({
46
+ prompt:
47
+ "The subject slowly turns their head and smiles, gentle wind blowing their hair",
48
+ imageUrl,
49
+ duration: 6,
50
+ aspectRatio: "auto",
51
+ resolution: "720p",
52
+ });
53
+
54
+ const data = result.data as { video?: { url?: string; duration?: number } };
55
+ console.log("Image-to-Video Result:");
56
+ console.log(" Video URL:", data?.video?.url);
57
+ console.log(" Duration:", data?.video?.duration);
58
+
59
+ return data?.video?.url;
60
+ }
61
+
62
+ async function testEditVideo(videoUrl: string) {
63
+ console.log("\n=== Testing Grok Edit Video ===\n");
64
+
65
+ const result = await falProvider.grokEditVideo({
66
+ prompt: "Add a vintage film grain effect and warm color grading",
67
+ videoUrl,
68
+ resolution: "auto",
69
+ });
70
+
71
+ const data = result.data as { video?: { url?: string; duration?: number } };
72
+ console.log("Edit Video Result:");
73
+ console.log(" Video URL:", data?.video?.url);
74
+ console.log(" Duration:", data?.video?.duration);
75
+
76
+ return data?.video?.url;
77
+ }
78
+
79
+ // Main execution
80
+ async function main() {
81
+ const args = process.argv.slice(2);
82
+ const mode = args[0] || "t2v";
83
+
84
+ console.log("Grok Imagine Video Test");
85
+ console.log("=======================");
86
+ console.log(`Mode: ${mode}`);
87
+
88
+ try {
89
+ switch (mode) {
90
+ case "t2v":
91
+ case "text-to-video": {
92
+ await testTextToVideo();
93
+ break;
94
+ }
95
+
96
+ case "i2v":
97
+ case "image-to-video": {
98
+ const imageUrl =
99
+ args[1] ||
100
+ "https://v3b.fal.media/files/b/0a8b90e0/BFLE9VDlZqsryU-UA3BoD_image_004.png";
101
+ await testImageToVideo(imageUrl);
102
+ break;
103
+ }
104
+
105
+ case "edit":
106
+ case "edit-video": {
107
+ const videoUrl =
108
+ args[1] ||
109
+ "https://v3b.fal.media/files/b/0a8b9112/V5Z_NIPE3ppMDWivNo6_q_video_019.mp4";
110
+ await testEditVideo(videoUrl);
111
+ break;
112
+ }
113
+
114
+ case "all": {
115
+ // Run all tests in sequence
116
+ const t2vUrl = await testTextToVideo();
117
+
118
+ // Use a sample image for i2v test
119
+ const sampleImage =
120
+ "https://v3b.fal.media/files/b/0a8b90e0/BFLE9VDlZqsryU-UA3BoD_image_004.png";
121
+ await testImageToVideo(sampleImage);
122
+
123
+ // Use the t2v result for edit test if available
124
+ if (t2vUrl) {
125
+ await testEditVideo(t2vUrl);
126
+ }
127
+ break;
128
+ }
129
+
130
+ default:
131
+ console.log(`
132
+ Usage: bun run examples/grok-imagine-test.tsx [mode] [url]
133
+
134
+ Modes:
135
+ t2v, text-to-video Generate video from text prompt
136
+ i2v, image-to-video Generate video from image (provide image URL)
137
+ edit, edit-video Edit existing video (provide video URL)
138
+ all Run all tests
139
+
140
+ Examples:
141
+ bun run examples/grok-imagine-test.tsx t2v
142
+ bun run examples/grok-imagine-test.tsx i2v https://example.com/image.png
143
+ bun run examples/grok-imagine-test.tsx edit https://example.com/video.mp4
144
+ bun run examples/grok-imagine-test.tsx all
145
+ `);
146
+ }
147
+
148
+ console.log("\nTest completed successfully!");
149
+ } catch (error) {
150
+ console.error("\nTest failed:", error);
151
+ process.exit(1);
152
+ }
153
+ }
154
+
155
+ main();
package/package.json CHANGED
@@ -68,7 +68,7 @@
68
68
  "sharp": "^0.34.5",
69
69
  "zod": "^4.2.1"
70
70
  },
71
- "version": "0.4.0-alpha36",
71
+ "version": "0.4.0-alpha37",
72
72
  "exports": {
73
73
  ".": "./src/index.ts",
74
74
  "./ai": "./src/ai-sdk/index.ts",
@@ -48,6 +48,16 @@ const VIDEO_MODELS: Record<string, { t2v: string; i2v: string }> = {
48
48
  t2v: "fal-ai/ltx-2-19b/distilled/text-to-video",
49
49
  i2v: "fal-ai/ltx-2-19b/distilled/image-to-video",
50
50
  },
51
+ // Grok Imagine Video - xAI's video generation with audio
52
+ "grok-imagine": {
53
+ t2v: "xai/grok-imagine-video/text-to-video",
54
+ i2v: "xai/grok-imagine-video/image-to-video",
55
+ },
56
+ };
57
+
58
+ // Video edit models - video-to-video editing
59
+ const VIDEO_EDIT_MODELS: Record<string, string> = {
60
+ "grok-imagine-edit": "xai/grok-imagine-video/edit-video",
51
61
  };
52
62
 
53
63
  // Motion control models - video-to-video with motion transfer
@@ -72,6 +82,8 @@ const IMAGE_MODELS: Record<string, string> = {
72
82
  "nano-banana-pro": "fal-ai/nano-banana-pro",
73
83
  "nano-banana-pro/edit": "fal-ai/nano-banana-pro/edit",
74
84
  "seedream-v4.5/edit": "fal-ai/bytedance/seedream/v4.5/edit",
85
+ // Qwen Image Edit 2511 Multiple Angles - camera angle adjustment
86
+ "qwen-angles": "fal-ai/qwen-image-edit-2511-multiple-angles",
75
87
  };
76
88
 
77
89
  // Models that use image_size instead of aspect_ratio
@@ -82,6 +94,23 @@ const IMAGE_SIZE_MODELS = new Set([
82
94
  "seedream-v4.5/edit",
83
95
  ]);
84
96
 
97
+ // Qwen Angles model - image-to-image with camera angle adjustment
98
+ const QWEN_ANGLES_MODEL = "qwen-angles";
99
+
100
+ // Map aspect ratio to image_size for Qwen Angles (base dimension 1024)
101
+ const ASPECT_RATIO_TO_QWEN_SIZE: Record<
102
+ string,
103
+ { width: number; height: number }
104
+ > = {
105
+ "1:1": { width: 1024, height: 1024 },
106
+ "4:3": { width: 1024, height: 768 },
107
+ "3:4": { width: 768, height: 1024 },
108
+ "16:9": { width: 1024, height: 576 },
109
+ "9:16": { width: 576, height: 1024 },
110
+ "3:2": { width: 1024, height: 683 },
111
+ "2:3": { width: 683, height: 1024 },
112
+ };
113
+
85
114
  // Map aspect ratio strings to image_size enum values
86
115
  const ASPECT_RATIO_TO_IMAGE_SIZE: Record<string, string> = {
87
116
  "1:1": "square",
@@ -186,14 +215,18 @@ class FalVideoModel implements VideoModelV3 {
186
215
 
187
216
  const isLipsync = LIPSYNC_MODELS[this.modelId] !== undefined;
188
217
  const isMotionControl = MOTION_CONTROL_MODELS[this.modelId] !== undefined;
218
+ const isVideoEdit = VIDEO_EDIT_MODELS[this.modelId] !== undefined;
189
219
  const isKlingV26 = this.modelId === "kling-v2.6";
190
220
  const isLtx2 = this.modelId === "ltx-2-19b-distilled";
221
+ const isGrokImagine = this.modelId === "grok-imagine";
191
222
 
192
223
  const endpoint = isLipsync
193
224
  ? this.resolveLipsyncEndpoint()
194
225
  : isMotionControl
195
226
  ? this.resolveMotionControlEndpoint()
196
- : this.resolveEndpoint(hasImageInput ?? false);
227
+ : isVideoEdit
228
+ ? this.resolveVideoEditEndpoint()
229
+ : this.resolveEndpoint(hasImageInput ?? false);
197
230
 
198
231
  const input: Record<string, unknown> = {
199
232
  ...(providerOptions?.fal ?? {}),
@@ -243,6 +276,22 @@ class FalVideoModel implements VideoModelV3 {
243
276
  if (input.keep_original_sound === undefined) {
244
277
  input.keep_original_sound = true;
245
278
  }
279
+ } else if (isVideoEdit) {
280
+ // Video edit: video input + prompt for editing instruction
281
+ input.prompt = prompt;
282
+
283
+ const videoFile = files?.find((f) =>
284
+ getMediaType(f)?.startsWith("video/"),
285
+ );
286
+
287
+ if (videoFile) {
288
+ input.video_url = await fileToUrl(videoFile);
289
+ }
290
+
291
+ // Grok Imagine Edit supports resolution: "auto", "480p", "720p"
292
+ if (!input.resolution) {
293
+ input.resolution = "auto";
294
+ }
246
295
  } else {
247
296
  // Standard video generation
248
297
  input.prompt = prompt;
@@ -263,6 +312,13 @@ class FalVideoModel implements VideoModelV3 {
263
312
  } else if (isKlingV26) {
264
313
  // Duration must be string "5" or "10" for Kling v2.6
265
314
  input.duration = String(duration ?? 5);
315
+ } else if (isGrokImagine) {
316
+ // Grok Imagine: duration 1-15 seconds (default 6)
317
+ input.duration = duration ?? 6;
318
+ // Grok Imagine supports resolution: "480p", "720p" (default "720p")
319
+ if (!input.resolution) {
320
+ input.resolution = "720p";
321
+ }
266
322
  } else {
267
323
  input.duration = duration ?? 5;
268
324
  }
@@ -400,6 +456,14 @@ class FalVideoModel implements VideoModelV3 {
400
456
 
401
457
  return MOTION_CONTROL_MODELS[this.modelId] ?? this.modelId;
402
458
  }
459
+
460
+ private resolveVideoEditEndpoint(): string {
461
+ if (this.modelId.startsWith("raw:")) {
462
+ return this.modelId.slice(4);
463
+ }
464
+
465
+ return VIDEO_EDIT_MODELS[this.modelId] ?? this.modelId;
466
+ }
403
467
  }
404
468
 
405
469
  class FalImageModel implements ImageModelV3 {
@@ -425,14 +489,28 @@ class FalImageModel implements ImageModelV3 {
425
489
  } = options;
426
490
  const warnings: SharedV3Warning[] = [];
427
491
 
492
+ const isQwenAngles = this.modelId === QWEN_ANGLES_MODEL;
493
+
428
494
  const input: Record<string, unknown> = {
429
- prompt,
430
495
  num_images: n ?? 1,
431
- // Use high acceleration for faster queue processing on supported models (flux-schnell)
432
- acceleration: "high",
433
496
  ...(providerOptions?.fal ?? {}),
434
497
  };
435
498
 
499
+ // Qwen Angles uses additional_prompt instead of prompt
500
+ if (isQwenAngles) {
501
+ if (prompt) {
502
+ input.additional_prompt = prompt;
503
+ }
504
+ // Qwen Angles supports "regular" or "none" acceleration, not "high"
505
+ if (!input.acceleration) {
506
+ input.acceleration = "regular";
507
+ }
508
+ } else {
509
+ input.prompt = prompt;
510
+ // Use high acceleration for faster queue processing on supported models (flux-schnell)
511
+ input.acceleration = "high";
512
+ }
513
+
436
514
  const usesImageSize = IMAGE_SIZE_MODELS.has(this.modelId);
437
515
 
438
516
  if (size) {
@@ -446,7 +524,21 @@ class FalImageModel implements ImageModelV3 {
446
524
  }
447
525
 
448
526
  if (aspectRatio) {
449
- if (usesImageSize) {
527
+ if (isQwenAngles) {
528
+ // Convert aspect ratio to image_size dimensions for Qwen Angles
529
+ if (!input.image_size) {
530
+ const qwenSize = ASPECT_RATIO_TO_QWEN_SIZE[aspectRatio];
531
+ if (qwenSize) {
532
+ input.image_size = qwenSize;
533
+ } else {
534
+ warnings.push({
535
+ type: "unsupported",
536
+ feature: "aspectRatio",
537
+ details: `Aspect ratio "${aspectRatio}" not supported for qwen-angles, use one of: ${Object.keys(ASPECT_RATIO_TO_QWEN_SIZE).join(", ")}`,
538
+ });
539
+ }
540
+ }
541
+ } else if (usesImageSize) {
450
542
  // Convert aspect ratio to image_size enum for models that require it
451
543
  // Only set if size wasn't already provided
452
544
  if (!input.image_size) {
@@ -475,11 +567,16 @@ class FalImageModel implements ImageModelV3 {
475
567
  input.image_urls = await Promise.all(files.map((f) => fileToUrl(f)));
476
568
  }
477
569
 
570
+ // Qwen Angles requires image_urls
571
+ if (isQwenAngles && !input.image_urls) {
572
+ throw new Error("qwen-angles requires at least one image file");
573
+ }
574
+
478
575
  const hasImageUrls =
479
576
  hasFiles ||
480
577
  !!(providerOptions?.fal as Record<string, unknown>)?.image_urls;
481
578
  if (hasImageUrls) {
482
- if (!files) {
579
+ if (!files && !isQwenAngles) {
483
580
  throw new Error("No files provided");
484
581
  }
485
582
  }
@@ -407,6 +407,36 @@ function escapeAttr(str: string): string {
407
407
  .replace(/>/g, "&gt;");
408
408
  }
409
409
 
410
+ function isLocalFilePath(src: string): boolean {
411
+ if (src.startsWith("http://") || src.startsWith("https://")) return false;
412
+ if (src.startsWith("data:")) return false;
413
+ return true;
414
+ }
415
+
416
+ async function localFileToDataUrl(src: string): Promise<string | undefined> {
417
+ try {
418
+ const resolved = resolve(process.cwd(), src);
419
+ const file = Bun.file(resolved);
420
+ if (!(await file.exists())) return undefined;
421
+ const buffer = await file.arrayBuffer();
422
+ const ext = src.split(".").pop()?.toLowerCase();
423
+ const mimeType =
424
+ ext === "png"
425
+ ? "image/png"
426
+ : ext === "jpg" || ext === "jpeg"
427
+ ? "image/jpeg"
428
+ : ext === "gif"
429
+ ? "image/gif"
430
+ : ext === "webp"
431
+ ? "image/webp"
432
+ : "image/png";
433
+ const base64 = Buffer.from(buffer).toString("base64");
434
+ return `data:${mimeType};base64,${base64}`;
435
+ } catch {
436
+ return undefined;
437
+ }
438
+ }
439
+
410
440
  function generateHtml(storyboard: Storyboard, sourceFile: string): string {
411
441
  const escapedSourceFile = escapeHtml(sourceFile);
412
442
 
@@ -434,22 +464,30 @@ function generateHtml(storyboard: Storyboard, sourceFile: string): string {
434
464
  )
435
465
  .join("");
436
466
 
437
- const isInputWithUrl =
438
- el.type === "input" &&
439
- el.src &&
440
- (el.src.startsWith("http://") || el.src.startsWith("https://"));
467
+ const hasSrcWithPreview =
468
+ el.src && (el.type === "input" || (el.type === "image" && !el.prompt));
469
+ const previewSrc =
470
+ el.imageDataUrl ||
471
+ (el.src && !isLocalFilePath(el.src) ? el.src : undefined);
441
472
 
442
- if (isInputWithUrl) {
443
- const shortUrl =
473
+ if (hasSrcWithPreview) {
474
+ const shortPath =
444
475
  el.src!.length > 50 ? `${el.src!.slice(0, 50)}...` : el.src!;
476
+ const isUrl =
477
+ el.src!.startsWith("http://") || el.src!.startsWith("https://");
445
478
  const escapedSrc = escapeAttr(el.src!);
479
+ const previewImgSrc = previewSrc ? escapeAttr(previewSrc) : undefined;
446
480
  return `
447
481
  <div class="tree-node" style="--depth: ${depth}">
448
482
  <span class="tree-prefix">${parentPrefix}${connector}</span>
449
483
  <span class="type-tag" style="background: ${color}">${el.type}</span>
450
484
  <span class="input-preview-wrapper">
451
- <a href="${escapedSrc}" target="_blank" rel="noopener noreferrer" class="tree-prompt input-url">${escapeHtml(shortUrl)}</a>
452
- <span class="input-preview-tooltip"><img src="${escapedSrc}" alt="preview" /></span>
485
+ ${
486
+ isUrl
487
+ ? `<a href="${escapedSrc}" target="_blank" rel="noopener noreferrer" class="tree-prompt input-url">${escapeHtml(shortPath)}</a>`
488
+ : `<span class="tree-prompt input-url">${escapeHtml(shortPath)}</span>`
489
+ }
490
+ ${previewImgSrc ? `<span class="input-preview-tooltip"><img src="${previewImgSrc}" alt="preview" /></span>` : ""}
453
491
  </span>
454
492
  </div>${childrenHtml}`;
455
493
  }
@@ -522,24 +560,34 @@ function generateHtml(storyboard: Storyboard, sourceFile: string): string {
522
560
  const grandChildren =
523
561
  (child.details.children as StoryboardElement[]) || [];
524
562
 
525
- const isInputWithUrl =
526
- child.type === "input" &&
563
+ const hasSrcWithPreview =
527
564
  child.src &&
528
- (child.src.startsWith("http://") || child.src.startsWith("https://"));
565
+ (child.type === "input" || (child.type === "image" && !child.prompt));
566
+ const previewSrc =
567
+ child.imageDataUrl ||
568
+ (child.src && !isLocalFilePath(child.src) ? child.src : undefined);
529
569
 
530
- if (isInputWithUrl) {
531
- const shortUrl =
570
+ if (hasSrcWithPreview) {
571
+ const shortPath =
532
572
  child.src!.length > 60
533
573
  ? `${child.src!.slice(0, 60)}...`
534
574
  : child.src!;
575
+ const isUrl =
576
+ child.src!.startsWith("http://") ||
577
+ child.src!.startsWith("https://");
535
578
  const escapedSrc = escapeAttr(child.src!);
579
+ const previewImgSrc = previewSrc ? escapeAttr(previewSrc) : undefined;
536
580
  return `
537
581
  <div class="timeline-nested">
538
582
  <span class="nested-connector">${connector}</span>
539
583
  <span class="nested-type" style="background: ${color}">${child.type}</span>
540
584
  <span class="input-preview-wrapper">
541
- <a href="${escapedSrc}" target="_blank" rel="noopener noreferrer" class="nested-prompt input-url">${escapeHtml(shortUrl)}</a>
542
- <span class="input-preview-tooltip"><img src="${escapedSrc}" alt="preview" /></span>
585
+ ${
586
+ isUrl
587
+ ? `<a href="${escapedSrc}" target="_blank" rel="noopener noreferrer" class="nested-prompt input-url">${escapeHtml(shortPath)}</a>`
588
+ : `<span class="nested-prompt input-url">${escapeHtml(shortPath)}</span>`
589
+ }
590
+ ${previewImgSrc ? `<span class="input-preview-tooltip"><img src="${previewImgSrc}" alt="preview" /></span>` : ""}
543
591
  </span>
544
592
  </div>
545
593
  ${grandChildren.length > 0 ? renderNestedTree(grandChildren, depth + 1) : ""}`;
@@ -1451,7 +1499,17 @@ async function populateCachedImages(
1451
1499
  let foundCount = 0;
1452
1500
 
1453
1501
  async function lookupImage(el: StoryboardElement): Promise<void> {
1454
- if (el.type === "image" && el._element) {
1502
+ // Handle local file sources (for both "image" and "input" types)
1503
+ if (el.src && isLocalFilePath(el.src) && !el.imageDataUrl) {
1504
+ const dataUrl = await localFileToDataUrl(el.src);
1505
+ if (dataUrl) {
1506
+ el.imageDataUrl = dataUrl;
1507
+ foundCount++;
1508
+ }
1509
+ }
1510
+
1511
+ // Handle cached generated images
1512
+ if (el.type === "image" && el._element && !el.imageDataUrl) {
1455
1513
  const cacheKeyParts = computeCacheKey(el._element);
1456
1514
  const cacheKey = `generateImage:${cacheKeyParts.map((d) => String(d ?? "")).join(":")}`;
1457
1515
  const cached = (await cache.get(cacheKey)) as
@@ -0,0 +1,133 @@
1
+ /**
2
+ * Grok Imagine Video Edit action
3
+ * Edit videos using xAI's Grok Imagine Video model
4
+ */
5
+
6
+ import { z } from "zod";
7
+ import { filePathSchema } from "../../core/schema/shared";
8
+ import type { ActionDefinition, ZodSchema } from "../../core/schema/types";
9
+ import { falProvider } from "../../providers/fal";
10
+
11
+ // Resolution enum matching the API spec
12
+ const grokEditResolutionSchema = z
13
+ .enum(["auto", "480p", "720p"])
14
+ .default("auto")
15
+ .describe("Resolution of the output video");
16
+
17
+ // Input schema with Zod
18
+ const grokEditInputSchema = z.object({
19
+ prompt: z.string().describe("Text description of the desired edit"),
20
+ video: filePathSchema.describe(
21
+ "Input video to edit (will be resized to max 854x480 and truncated to 8 seconds)",
22
+ ),
23
+ resolution: grokEditResolutionSchema,
24
+ });
25
+
26
+ // Output schema with Zod
27
+ const grokEditOutputSchema = z.object({
28
+ videoUrl: z.string(),
29
+ width: z.number().optional(),
30
+ height: z.number().optional(),
31
+ duration: z.number().optional(),
32
+ fps: z.number().optional(),
33
+ });
34
+
35
+ // Schema object for the definition
36
+ const schema: ZodSchema<
37
+ typeof grokEditInputSchema,
38
+ typeof grokEditOutputSchema
39
+ > = {
40
+ input: grokEditInputSchema,
41
+ output: grokEditOutputSchema,
42
+ };
43
+
44
+ export const definition: ActionDefinition<typeof schema> = {
45
+ type: "action",
46
+ name: "grok-edit",
47
+ description: "Edit video using xAI's Grok Imagine Video",
48
+ schema,
49
+ routes: [
50
+ {
51
+ target: "xai/grok-imagine-video/edit-video",
52
+ priority: 10,
53
+ },
54
+ ],
55
+ execute: async (inputs) => {
56
+ const { prompt, video, resolution } = inputs;
57
+
58
+ console.log("[action/grok-edit] editing video with Grok Imagine");
59
+
60
+ const result = await falProvider.grokEditVideo({
61
+ prompt,
62
+ videoUrl: video,
63
+ resolution,
64
+ });
65
+
66
+ const data = result.data as {
67
+ video?: {
68
+ url?: string;
69
+ width?: number;
70
+ height?: number;
71
+ duration?: number;
72
+ fps?: number;
73
+ };
74
+ };
75
+
76
+ const videoUrl = data?.video?.url;
77
+ if (!videoUrl) {
78
+ throw new Error("No video URL in result");
79
+ }
80
+
81
+ return {
82
+ videoUrl,
83
+ width: data.video?.width,
84
+ height: data.video?.height,
85
+ duration: data.video?.duration,
86
+ fps: data.video?.fps,
87
+ };
88
+ },
89
+ };
90
+
91
+ // Re-export types for convenience
92
+ export type GrokEditInput = z.infer<typeof grokEditInputSchema>;
93
+ export type GrokEditOutput = z.infer<typeof grokEditOutputSchema>;
94
+
95
+ // Convenience function
96
+ export async function grokEditVideo(
97
+ prompt: string,
98
+ videoUrl: string,
99
+ options: { resolution?: "auto" | "480p" | "720p" } = {},
100
+ ): Promise<GrokEditOutput> {
101
+ console.log("[grok-edit] editing video");
102
+
103
+ const result = await falProvider.grokEditVideo({
104
+ prompt,
105
+ videoUrl,
106
+ resolution: options.resolution,
107
+ });
108
+
109
+ const data = result.data as {
110
+ video?: {
111
+ url?: string;
112
+ width?: number;
113
+ height?: number;
114
+ duration?: number;
115
+ fps?: number;
116
+ };
117
+ };
118
+
119
+ const url = data?.video?.url;
120
+ if (!url) {
121
+ throw new Error("No video URL in result");
122
+ }
123
+
124
+ return {
125
+ videoUrl: url,
126
+ width: data.video?.width,
127
+ height: data.video?.height,
128
+ duration: data.video?.duration,
129
+ fps: data.video?.fps,
130
+ };
131
+ }
132
+
133
+ export default definition;
@@ -38,6 +38,12 @@ export {
38
38
  trim,
39
39
  trimDefinition,
40
40
  } from "./edit";
41
+ export type { GrokEditInput, GrokEditOutput } from "./grok-edit";
42
+ // Grok Imagine Video Edit
43
+ export {
44
+ definition as grokEdit,
45
+ grokEditVideo,
46
+ } from "./grok-edit";
41
47
  export type { ImageGenerationResult } from "./image";
42
48
  // Image generation
43
49
  export {
@@ -48,6 +54,12 @@ export {
48
54
  export type { GenerateMusicOptions, MusicResult } from "./music";
49
55
  // Music generation
50
56
  export { definition as music, generateMusic } from "./music";
57
+ export type { QwenAnglesInput, QwenAnglesOutput } from "./qwen-angles";
58
+ // Qwen Image Edit 2511 Multiple Angles
59
+ export {
60
+ definition as qwenAngles,
61
+ qwenAngles as qwenAnglesImage,
62
+ } from "./qwen-angles";
51
63
  export type { LipsyncOptions, LipsyncResult, Wav2LipOptions } from "./sync";
52
64
  // Lip sync
53
65
  export {
@@ -87,8 +99,10 @@ import {
87
99
  transitionDefinition,
88
100
  trimDefinition,
89
101
  } from "./edit";
102
+ import { definition as grokEditDefinition } from "./grok-edit";
90
103
  import { definition as imageDefinition } from "./image";
91
104
  import { definition as musicDefinition } from "./music";
105
+ import { definition as qwenAnglesDefinition } from "./qwen-angles";
92
106
  import { definition as syncDefinition } from "./sync";
93
107
  import { definition as transcribeDefinition } from "./transcribe";
94
108
  import { definition as uploadDefinition } from "./upload";
@@ -103,6 +117,8 @@ export const allActions = [
103
117
  musicDefinition,
104
118
  syncDefinition,
105
119
  captionsDefinition,
120
+ grokEditDefinition,
121
+ qwenAnglesDefinition,
106
122
  trimDefinition,
107
123
  cutDefinition,
108
124
  mergeDefinition,
@@ -0,0 +1,218 @@
1
+ /**
2
+ * Qwen Image Edit 2511 Multiple Angles action
3
+ * Generates same scene from different camera angles (azimuth/elevation)
4
+ */
5
+
6
+ import { z } from "zod";
7
+ import { filePathSchema } from "../../core/schema/shared";
8
+ import type { ActionDefinition, ZodSchema } from "../../core/schema/types";
9
+ import { falProvider } from "../../providers/fal";
10
+
11
+ // Input schema with Zod
12
+ const qwenAnglesInputSchema = z.object({
13
+ image: filePathSchema.describe("Input image to adjust camera angle for"),
14
+ horizontalAngle: z
15
+ .number()
16
+ .min(0)
17
+ .max(360)
18
+ .default(0)
19
+ .describe(
20
+ "Horizontal rotation angle in degrees. 0=front, 90=right side, 180=back, 270=left side, 360=front",
21
+ ),
22
+ verticalAngle: z
23
+ .number()
24
+ .min(-30)
25
+ .max(90)
26
+ .default(0)
27
+ .describe(
28
+ "Vertical camera angle in degrees. -30=looking up, 0=eye-level, 30=elevated, 60=high-angle, 90=bird's-eye",
29
+ ),
30
+ zoom: z
31
+ .number()
32
+ .min(0)
33
+ .max(10)
34
+ .default(5)
35
+ .describe(
36
+ "Camera zoom/distance. 0=wide shot (far), 5=medium shot (normal), 10=close-up (very close)",
37
+ ),
38
+ prompt: z
39
+ .string()
40
+ .optional()
41
+ .describe(
42
+ "Additional text to append to the automatically generated prompt",
43
+ ),
44
+ loraScale: z
45
+ .number()
46
+ .min(0)
47
+ .max(4)
48
+ .default(1)
49
+ .describe("Strength of the camera control effect"),
50
+ guidanceScale: z
51
+ .number()
52
+ .min(1)
53
+ .max(20)
54
+ .default(4.5)
55
+ .describe("CFG (Classifier Free Guidance) scale"),
56
+ numInferenceSteps: z
57
+ .number()
58
+ .min(1)
59
+ .max(50)
60
+ .default(28)
61
+ .describe("Number of inference steps"),
62
+ negativePrompt: z
63
+ .string()
64
+ .default("")
65
+ .describe("Negative prompt for the generation"),
66
+ seed: z.number().optional().describe("Random seed for reproducibility"),
67
+ outputFormat: z
68
+ .enum(["png", "jpeg", "webp"])
69
+ .default("png")
70
+ .describe("Output image format"),
71
+ numImages: z
72
+ .number()
73
+ .min(1)
74
+ .max(4)
75
+ .default(1)
76
+ .describe("Number of images to generate"),
77
+ });
78
+
79
+ // Output schema with Zod
80
+ const qwenAnglesOutputSchema = z.object({
81
+ imageUrl: z.string(),
82
+ images: z.array(z.object({ url: z.string() })).optional(),
83
+ seed: z.number().optional(),
84
+ prompt: z.string().optional(),
85
+ });
86
+
87
+ // Schema object for the definition
88
+ const schema: ZodSchema<
89
+ typeof qwenAnglesInputSchema,
90
+ typeof qwenAnglesOutputSchema
91
+ > = {
92
+ input: qwenAnglesInputSchema,
93
+ output: qwenAnglesOutputSchema,
94
+ };
95
+
96
+ export const definition: ActionDefinition<typeof schema> = {
97
+ type: "action",
98
+ name: "qwen-angles",
99
+ description:
100
+ "Adjust camera angle of an image using Qwen Image Edit 2511 Multiple Angles",
101
+ schema,
102
+ routes: [
103
+ {
104
+ target: "fal-ai/qwen-image-edit-2511-multiple-angles",
105
+ priority: 10,
106
+ },
107
+ ],
108
+ execute: async (inputs) => {
109
+ const {
110
+ image,
111
+ horizontalAngle,
112
+ verticalAngle,
113
+ zoom,
114
+ prompt,
115
+ loraScale,
116
+ guidanceScale,
117
+ numInferenceSteps,
118
+ negativePrompt,
119
+ seed,
120
+ outputFormat,
121
+ numImages,
122
+ } = inputs;
123
+
124
+ console.log("[action/qwen-angles] adjusting camera angle");
125
+
126
+ const result = await falProvider.qwenMultipleAngles({
127
+ imageUrl: image,
128
+ horizontalAngle,
129
+ verticalAngle,
130
+ zoom,
131
+ additionalPrompt: prompt,
132
+ loraScale,
133
+ guidanceScale,
134
+ numInferenceSteps,
135
+ negativePrompt,
136
+ seed,
137
+ outputFormat,
138
+ numImages,
139
+ });
140
+
141
+ const data = result.data as {
142
+ images?: Array<{ url: string }>;
143
+ seed?: number;
144
+ prompt?: string;
145
+ };
146
+
147
+ const images = data?.images;
148
+ if (!images || images.length === 0) {
149
+ throw new Error("No images in result");
150
+ }
151
+
152
+ return {
153
+ imageUrl: images[0]!.url,
154
+ images,
155
+ seed: data?.seed,
156
+ prompt: data?.prompt,
157
+ };
158
+ },
159
+ };
160
+
161
+ // Re-export types for convenience
162
+ export type QwenAnglesInput = z.infer<typeof qwenAnglesInputSchema>;
163
+ export type QwenAnglesOutput = z.infer<typeof qwenAnglesOutputSchema>;
164
+
165
+ // Convenience function
166
+ export async function qwenAngles(
167
+ imageUrl: string,
168
+ options: {
169
+ horizontalAngle?: number;
170
+ verticalAngle?: number;
171
+ zoom?: number;
172
+ prompt?: string;
173
+ loraScale?: number;
174
+ guidanceScale?: number;
175
+ numInferenceSteps?: number;
176
+ negativePrompt?: string;
177
+ seed?: number;
178
+ outputFormat?: "png" | "jpeg" | "webp";
179
+ numImages?: number;
180
+ } = {},
181
+ ): Promise<QwenAnglesOutput> {
182
+ console.log("[qwen-angles] adjusting camera angle");
183
+
184
+ const result = await falProvider.qwenMultipleAngles({
185
+ imageUrl,
186
+ horizontalAngle: options.horizontalAngle,
187
+ verticalAngle: options.verticalAngle,
188
+ zoom: options.zoom,
189
+ additionalPrompt: options.prompt,
190
+ loraScale: options.loraScale,
191
+ guidanceScale: options.guidanceScale,
192
+ numInferenceSteps: options.numInferenceSteps,
193
+ negativePrompt: options.negativePrompt,
194
+ seed: options.seed,
195
+ outputFormat: options.outputFormat,
196
+ numImages: options.numImages,
197
+ });
198
+
199
+ const data = result.data as {
200
+ images?: Array<{ url: string }>;
201
+ seed?: number;
202
+ prompt?: string;
203
+ };
204
+
205
+ const images = data?.images;
206
+ if (!images || images.length === 0) {
207
+ throw new Error("No images in result");
208
+ }
209
+
210
+ return {
211
+ imageUrl: images[0]!.url,
212
+ images,
213
+ seed: data?.seed,
214
+ prompt: data?.prompt,
215
+ };
216
+ }
217
+
218
+ export default definition;
@@ -374,6 +374,197 @@ export class FalProvider extends BaseProvider {
374
374
  console.log("[fal] completed!");
375
375
  return result;
376
376
  }
377
+
378
+ // ============================================================================
379
+ // Grok Imagine Video methods (xAI)
380
+ // ============================================================================
381
+
382
+ /**
383
+ * Generate video from text using Grok Imagine Video
384
+ * Supports 1-15 second videos at 480p or 720p resolution
385
+ */
386
+ async grokTextToVideo(args: {
387
+ prompt: string;
388
+ duration?: number;
389
+ aspectRatio?: "16:9" | "4:3" | "3:2" | "1:1" | "2:3" | "3:4" | "9:16";
390
+ resolution?: "480p" | "720p";
391
+ }) {
392
+ const modelId = "xai/grok-imagine-video/text-to-video";
393
+
394
+ console.log(`[fal] starting grok text-to-video: ${modelId}`);
395
+ console.log(`[fal] prompt: ${args.prompt}`);
396
+
397
+ const result = await fal.subscribe(modelId, {
398
+ input: {
399
+ prompt: args.prompt,
400
+ duration: args.duration ?? 6,
401
+ aspect_ratio: args.aspectRatio ?? "16:9",
402
+ resolution: args.resolution ?? "720p",
403
+ },
404
+ logs: true,
405
+ onQueueUpdate: (update) => {
406
+ if (update.status === "IN_PROGRESS") {
407
+ console.log(
408
+ `[fal] ${update.logs?.map((l) => l.message).join(" ") || "processing..."}`,
409
+ );
410
+ }
411
+ },
412
+ });
413
+
414
+ console.log("[fal] completed!");
415
+ return result;
416
+ }
417
+
418
+ /**
419
+ * Generate video from image using Grok Imagine Video
420
+ * Supports 1-15 second videos at 480p or 720p resolution
421
+ */
422
+ async grokImageToVideo(args: {
423
+ prompt: string;
424
+ imageUrl: string;
425
+ duration?: number;
426
+ aspectRatio?:
427
+ | "auto"
428
+ | "16:9"
429
+ | "4:3"
430
+ | "3:2"
431
+ | "1:1"
432
+ | "2:3"
433
+ | "3:4"
434
+ | "9:16";
435
+ resolution?: "480p" | "720p";
436
+ }) {
437
+ const modelId = "xai/grok-imagine-video/image-to-video";
438
+
439
+ console.log(`[fal] starting grok image-to-video: ${modelId}`);
440
+ console.log(`[fal] prompt: ${args.prompt}`);
441
+
442
+ const imageUrl = await ensureUrl(args.imageUrl, (buffer) =>
443
+ this.uploadFile(buffer),
444
+ );
445
+
446
+ const result = await fal.subscribe(modelId, {
447
+ input: {
448
+ prompt: args.prompt,
449
+ image_url: imageUrl,
450
+ duration: args.duration ?? 6,
451
+ aspect_ratio: args.aspectRatio ?? "auto",
452
+ resolution: args.resolution ?? "720p",
453
+ },
454
+ logs: true,
455
+ onQueueUpdate: (update) => {
456
+ if (update.status === "IN_PROGRESS") {
457
+ console.log(
458
+ `[fal] ${update.logs?.map((l) => l.message).join(" ") || "processing..."}`,
459
+ );
460
+ }
461
+ },
462
+ });
463
+
464
+ console.log("[fal] completed!");
465
+ return result;
466
+ }
467
+
468
+ /**
469
+ * Edit video using Grok Imagine Video
470
+ * Video will be resized to max 854x480 and truncated to 8 seconds
471
+ */
472
+ async grokEditVideo(args: {
473
+ prompt: string;
474
+ videoUrl: string;
475
+ resolution?: "auto" | "480p" | "720p";
476
+ }) {
477
+ const modelId = "xai/grok-imagine-video/edit-video";
478
+
479
+ console.log(`[fal] starting grok edit-video: ${modelId}`);
480
+ console.log(`[fal] prompt: ${args.prompt}`);
481
+
482
+ const videoUrl = await ensureUrl(args.videoUrl, (buffer) =>
483
+ this.uploadFile(buffer),
484
+ );
485
+
486
+ const result = await fal.subscribe(modelId, {
487
+ input: {
488
+ prompt: args.prompt,
489
+ video_url: videoUrl,
490
+ resolution: args.resolution ?? "auto",
491
+ },
492
+ logs: true,
493
+ onQueueUpdate: (update) => {
494
+ if (update.status === "IN_PROGRESS") {
495
+ console.log(
496
+ `[fal] ${update.logs?.map((l) => l.message).join(" ") || "processing..."}`,
497
+ );
498
+ }
499
+ },
500
+ });
501
+
502
+ console.log("[fal] completed!");
503
+ return result;
504
+ }
505
+
506
+ // ============================================================================
507
+ // Qwen Image Edit 2511 Multiple Angles
508
+ // ============================================================================
509
+
510
+ /**
511
+ * Adjust camera angle of an image using Qwen Image Edit 2511 Multiple Angles
512
+ * Generates same scene from different angles (azimuth/elevation)
513
+ */
514
+ async qwenMultipleAngles(args: {
515
+ imageUrl: string;
516
+ horizontalAngle?: number;
517
+ verticalAngle?: number;
518
+ zoom?: number;
519
+ additionalPrompt?: string;
520
+ loraScale?: number;
521
+ imageSize?: string | { width: number; height: number };
522
+ guidanceScale?: number;
523
+ numInferenceSteps?: number;
524
+ acceleration?: "none" | "regular";
525
+ negativePrompt?: string;
526
+ seed?: number;
527
+ outputFormat?: "png" | "jpeg" | "webp";
528
+ numImages?: number;
529
+ }) {
530
+ const modelId = "fal-ai/qwen-image-edit-2511-multiple-angles";
531
+
532
+ console.log(`[fal] starting qwen multiple angles: ${modelId}`);
533
+
534
+ const imageUrl = await ensureUrl(args.imageUrl, (buffer) =>
535
+ this.uploadFile(buffer),
536
+ );
537
+
538
+ const result = await fal.subscribe(modelId, {
539
+ input: {
540
+ image_urls: [imageUrl],
541
+ horizontal_angle: args.horizontalAngle ?? 0,
542
+ vertical_angle: args.verticalAngle ?? 0,
543
+ zoom: args.zoom ?? 5,
544
+ additional_prompt: args.additionalPrompt,
545
+ lora_scale: args.loraScale ?? 1,
546
+ image_size: args.imageSize,
547
+ guidance_scale: args.guidanceScale ?? 4.5,
548
+ num_inference_steps: args.numInferenceSteps ?? 28,
549
+ acceleration: args.acceleration ?? "regular",
550
+ negative_prompt: args.negativePrompt ?? "",
551
+ seed: args.seed,
552
+ output_format: args.outputFormat ?? "png",
553
+ num_images: args.numImages ?? 1,
554
+ },
555
+ logs: true,
556
+ onQueueUpdate: (update) => {
557
+ if (update.status === "IN_PROGRESS") {
558
+ console.log(
559
+ `[fal] ${update.logs?.map((l) => l.message).join(" ") || "processing..."}`,
560
+ );
561
+ }
562
+ },
563
+ });
564
+
565
+ console.log("[fal] completed!");
566
+ return result;
567
+ }
377
568
  }
378
569
 
379
570
  // Export singleton instance
@@ -0,0 +1,72 @@
1
+ /**
2
+ * Qwen Image Edit 2511 Multiple Angles Test
3
+ *
4
+ * Demonstrates camera angle adjustment using Qwen model.
5
+ * Generates the same scene from different perspectives (azimuth/elevation).
6
+ *
7
+ * Run with: bun run src/cli/index.ts render src/react/examples/qwen-angles-test.tsx
8
+ */
9
+
10
+ import { fal } from "../../ai-sdk/providers/fal";
11
+ import { Clip, Image, Render } from "..";
12
+
13
+ // Source image - replace with your own image URL or local path
14
+ const sourceImage =
15
+ "https://v3b.fal.media/files/b/0a8973cb/qUbVwDCcMlvX4drBGYB1H.png";
16
+
17
+ export default (
18
+ <Render width={1024} height={1024}>
19
+ {/* Original image - front view */}
20
+ <Clip duration={2}>
21
+ <Image src={sourceImage} resize="cover" />
22
+ </Clip>
23
+
24
+ {/* Right side view (90 degrees horizontal) */}
25
+ <Clip duration={2}>
26
+ <Image
27
+ prompt={{ text: "", images: [sourceImage] }}
28
+ model={fal.imageModel("qwen-angles")}
29
+ aspectRatio="1:1"
30
+ providerOptions={{
31
+ fal: {
32
+ horizontal_angle: 90,
33
+ vertical_angle: 0,
34
+ zoom: 5,
35
+ },
36
+ }}
37
+ />
38
+ </Clip>
39
+
40
+ {/* Bird's eye view (60 degrees vertical) */}
41
+ <Clip duration={2}>
42
+ <Image
43
+ prompt={{ text: "", images: [sourceImage] }}
44
+ model={fal.imageModel("qwen-angles")}
45
+ aspectRatio="1:1"
46
+ providerOptions={{
47
+ fal: {
48
+ horizontal_angle: 0,
49
+ vertical_angle: 60,
50
+ zoom: 5,
51
+ },
52
+ }}
53
+ />
54
+ </Clip>
55
+
56
+ {/* Close-up from 45 degree angle */}
57
+ <Clip duration={2}>
58
+ <Image
59
+ prompt={{ text: "", images: [sourceImage] }}
60
+ model={fal.imageModel("qwen-angles")}
61
+ aspectRatio="1:1"
62
+ providerOptions={{
63
+ fal: {
64
+ horizontal_angle: 45,
65
+ vertical_angle: 30,
66
+ zoom: 8,
67
+ },
68
+ }}
69
+ />
70
+ </Clip>
71
+ </Render>
72
+ );
@@ -1,4 +1,4 @@
1
- import { describe, expect, test } from "bun:test";
1
+ import { describe, expect, mock, test } from "bun:test";
2
2
  import { existsSync, unlinkSync } from "node:fs";
3
3
  import { fal } from "../ai-sdk/providers/fal";
4
4
  import {
@@ -157,6 +157,55 @@ describe("varg-react render", () => {
157
157
 
158
158
  expect(render(root)).rejects.toThrow("model");
159
159
  });
160
+
161
+ test("parallel failures preserve successful results and report all errors", async () => {
162
+ let callCount = 0;
163
+ const mockModel = {
164
+ specificationVersion: "v3" as const,
165
+ provider: "mock",
166
+ modelId: "mock-model",
167
+ maxImagesPerCall: 1,
168
+ doGenerate: mock(async () => {
169
+ callCount++;
170
+ if (callCount === 2) {
171
+ throw new Error("Request Timeout");
172
+ }
173
+ return {
174
+ images: [new Uint8Array([0x89, 0x50, 0x4e, 0x47])],
175
+ warnings: [],
176
+ response: {
177
+ timestamp: new Date(),
178
+ modelId: "mock",
179
+ headers: undefined,
180
+ },
181
+ };
182
+ }),
183
+ };
184
+
185
+ const root = Render({
186
+ width: 720,
187
+ height: 720,
188
+ children: [
189
+ Clip({
190
+ duration: 1,
191
+ children: [Image({ prompt: "first", model: mockModel })],
192
+ }),
193
+ Clip({
194
+ duration: 1,
195
+ children: [Image({ prompt: "second", model: mockModel })],
196
+ }),
197
+ Clip({
198
+ duration: 1,
199
+ children: [Image({ prompt: "third", model: mockModel })],
200
+ }),
201
+ ],
202
+ });
203
+
204
+ const error = await render(root, { quiet: true }).catch((e) => e);
205
+ expect(error.message).toContain("1 of 3 clips failed");
206
+ expect(error.message).toContain("Request Timeout");
207
+ expect(callCount).toBe(3);
208
+ });
160
209
  });
161
210
 
162
211
  describe("layout renderers", () => {
@@ -226,11 +226,28 @@ async function renderClipLayers(
226
226
  }
227
227
  }
228
228
 
229
- const layers = await Promise.all(
230
- pending.map((p) => (p.type === "sync" ? p.layer : p.promise)),
229
+ const layerResults = await Promise.allSettled(
230
+ pending.map((p) =>
231
+ p.type === "sync" ? Promise.resolve(p.layer) : p.promise,
232
+ ),
231
233
  );
232
234
 
233
- return layers;
235
+ const failures = layerResults
236
+ .map((r, i) =>
237
+ r.status === "rejected" ? { index: i, reason: r.reason } : null,
238
+ )
239
+ .filter(Boolean) as { index: number; reason: Error }[];
240
+
241
+ if (failures.length > 0) {
242
+ const errors = failures
243
+ .map((f) => f.reason?.message || "Unknown error")
244
+ .join("; ");
245
+ throw new Error(
246
+ `${failures.length} of ${layerResults.length} layers failed: ${errors}`,
247
+ );
248
+ }
249
+
250
+ return layerResults.map((r) => (r as PromiseFulfilledResult<Layer>).value);
234
251
  }
235
252
 
236
253
  export async function renderClip(
@@ -205,10 +205,37 @@ export async function renderRoot(
205
205
  }
206
206
  }
207
207
 
208
- const renderedClips = await Promise.all(
208
+ const clipResults = await Promise.allSettled(
209
209
  clipElements.map((clipElement) => renderClip(clipElement, ctx)),
210
210
  );
211
211
 
212
+ const failures = clipResults
213
+ .map((r, i) =>
214
+ r.status === "rejected" ? { index: i, reason: r.reason } : null,
215
+ )
216
+ .filter(Boolean) as { index: number; reason: Error }[];
217
+
218
+ if (failures.length > 0) {
219
+ const successCount = clipResults.length - failures.length;
220
+ if (successCount > 0) {
221
+ console.log(
222
+ `\x1b[33mℹ ${successCount} clip(s) cached, ${failures.length} failed\x1b[0m`,
223
+ );
224
+ }
225
+ const errors = failures
226
+ .map((f) => f.reason?.message || "Unknown error")
227
+ .join("; ");
228
+ throw new Error(
229
+ `${failures.length} of ${clipResults.length} clips failed: ${errors}`,
230
+ );
231
+ }
232
+
233
+ const renderedClips = clipResults.map(
234
+ (r) =>
235
+ (r as PromiseFulfilledResult<Awaited<ReturnType<typeof renderClip>>>)
236
+ .value,
237
+ );
238
+
212
239
  const clips: Clip[] = [];
213
240
  let currentTime = 0;
214
241