vargai 0.3.2 → 0.4.0-alpha2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/biome.json +6 -1
  2. package/docs/index.html +1130 -0
  3. package/docs/prompting.md +326 -0
  4. package/docs/react.md +834 -0
  5. package/package.json +10 -4
  6. package/src/cli/commands/index.ts +1 -4
  7. package/src/cli/commands/render.tsx +94 -0
  8. package/src/cli/index.ts +3 -2
  9. package/src/react/cli.ts +52 -0
  10. package/src/react/elements.ts +146 -0
  11. package/src/react/examples/branching.tsx +66 -0
  12. package/src/react/examples/captions-demo.tsx +37 -0
  13. package/src/react/examples/character-video.tsx +84 -0
  14. package/src/react/examples/grid.tsx +53 -0
  15. package/src/react/examples/layouts-demo.tsx +57 -0
  16. package/src/react/examples/madi.tsx +60 -0
  17. package/src/react/examples/music-test.tsx +35 -0
  18. package/src/react/examples/onlyfans-1m/workflow.tsx +88 -0
  19. package/src/react/examples/orange-portrait.tsx +41 -0
  20. package/src/react/examples/split-element-demo.tsx +60 -0
  21. package/src/react/examples/split-layout-demo.tsx +60 -0
  22. package/src/react/examples/split.tsx +41 -0
  23. package/src/react/examples/video-grid.tsx +46 -0
  24. package/src/react/index.ts +43 -0
  25. package/src/react/layouts/grid.tsx +28 -0
  26. package/src/react/layouts/index.ts +2 -0
  27. package/src/react/layouts/split.tsx +20 -0
  28. package/src/react/react.test.ts +309 -0
  29. package/src/react/render.ts +21 -0
  30. package/src/react/renderers/animate.ts +59 -0
  31. package/src/react/renderers/captions.ts +297 -0
  32. package/src/react/renderers/clip.ts +248 -0
  33. package/src/react/renderers/context.ts +17 -0
  34. package/src/react/renderers/image.ts +109 -0
  35. package/src/react/renderers/index.ts +22 -0
  36. package/src/react/renderers/music.ts +60 -0
  37. package/src/react/renderers/packshot.ts +84 -0
  38. package/src/react/renderers/progress.ts +173 -0
  39. package/src/react/renderers/render.ts +319 -0
  40. package/src/react/renderers/slider.ts +69 -0
  41. package/src/react/renderers/speech.ts +53 -0
  42. package/src/react/renderers/split.ts +91 -0
  43. package/src/react/renderers/subtitle.ts +16 -0
  44. package/src/react/renderers/swipe.ts +75 -0
  45. package/src/react/renderers/title.ts +17 -0
  46. package/src/react/renderers/utils.ts +124 -0
  47. package/src/react/renderers/video.ts +127 -0
  48. package/src/react/runtime/jsx-dev-runtime.ts +43 -0
  49. package/src/react/runtime/jsx-runtime.ts +35 -0
  50. package/src/react/types.ts +235 -0
  51. package/src/studio/index.ts +26 -0
  52. package/src/studio/scanner.ts +102 -0
  53. package/src/studio/server.ts +554 -0
  54. package/src/studio/stages.ts +251 -0
  55. package/src/studio/step-renderer.ts +279 -0
  56. package/src/studio/types.ts +60 -0
  57. package/src/studio/ui/cache.html +303 -0
  58. package/src/studio/ui/index.html +1820 -0
  59. package/tsconfig.cli.json +8 -0
  60. package/tsconfig.json +3 -1
  61. package/bun.lock +0 -1255
  62. package/docs/plan.md +0 -66
  63. package/docs/todo.md +0 -14
  64. /package/docs/{varg-sdk.md → sdk.md} +0 -0
@@ -0,0 +1,60 @@
1
+ import { elevenlabs } from "../../ai-sdk/providers/elevenlabs";
2
+ import { fal } from "../../ai-sdk/providers/fal";
3
+ import { Animate, Clip, Image, Music, Render } from "..";
4
+
5
+ const MADI_REF =
6
+ "https://s3.varg.ai/fellowers/madi/character_shots/madi_shot_03_closeup.png";
7
+
8
+ const SCENES = [
9
+ {
10
+ prompt:
11
+ "extreme close-up face shot, surprised expression with wide eyes, looking directly at camera, holding peach near lips",
12
+ motion:
13
+ "eyes widen in surprise, eyebrows raise slightly, subtle head tilt forward. Static shot, no camera movement.",
14
+ },
15
+ {
16
+ prompt:
17
+ "45-degree angle medium shot showing face and hands, biting into peach with exaggerated enjoyment, juice on lips, playful expression",
18
+ motion:
19
+ "turns head 45 degrees, bites into peach, juice drips down chin, hands move expressively. Slow push-in camera movement.",
20
+ },
21
+ {
22
+ prompt:
23
+ "low angle shot from below, looking down at camera with confident smirk, holding peach triumphantly, dramatic perspective",
24
+ motion:
25
+ "looks down at camera with growing smile, raises peach slightly, confident head tilt. Static camera, slight lens distortion.",
26
+ },
27
+ {
28
+ prompt:
29
+ "high angle shot from above, looking up at camera with playful smile, arms spread wide, peach in one hand, endearing expression",
30
+ motion:
31
+ "looks up at camera, expression shifts from neutral to excited smile, subtle wink, slight forward lean. Gentle camera tilt down.",
32
+ },
33
+ ];
34
+
35
+ export default (
36
+ <Render width={1080} height={1920}>
37
+ <Music
38
+ prompt="upbeat electronic pop, energetic female vocal chops, modern tiktok vibe, catchy melody"
39
+ model={elevenlabs.musicModel()}
40
+ duration={10}
41
+ volume={0.6}
42
+ />
43
+
44
+ {SCENES.map((scene) => (
45
+ <Clip key={scene.prompt} duration={2}>
46
+ <Animate
47
+ image={Image({
48
+ prompt: { text: scene.prompt, images: [MADI_REF] },
49
+ model: fal.imageModel("nano-banana-pro/edit"),
50
+ aspectRatio: "9:16",
51
+ resize: "cover",
52
+ })}
53
+ motion={scene.motion}
54
+ model={fal.videoModel("wan-2.5")}
55
+ duration={5}
56
+ />
57
+ </Clip>
58
+ ))}
59
+ </Render>
60
+ );
@@ -0,0 +1,35 @@
1
+ import { elevenlabs } from "../../ai-sdk/providers/elevenlabs";
2
+ import { Clip, Image, Music, Render, render } from "..";
3
+
4
+ async function main() {
5
+ const video = (
6
+ <Render width={1080} height={1920}>
7
+ <Music
8
+ prompt="chill lo-fi hip hop beats, relaxing piano melody"
9
+ model={elevenlabs.musicModel()}
10
+ />
11
+
12
+ <Clip duration={3}>
13
+ <Image src="media/cyberpunk-street.png" resize="cover" />
14
+ </Clip>
15
+ <Clip duration={3}>
16
+ <Image src="media/madi-portrait.png" resize="cover" />
17
+ </Clip>
18
+ <Clip duration={3}>
19
+ <Image src="media/replicate-forest.png" resize="cover" />
20
+ </Clip>
21
+ </Render>
22
+ );
23
+
24
+ console.log("testing music auto-trim (no duration specified)");
25
+ console.log("3 clips x 3s = 9s video, music should auto-trim to 9s\n");
26
+
27
+ await render(video, {
28
+ output: "output/music-test.mp4",
29
+ cache: ".cache/ai",
30
+ });
31
+
32
+ console.log("\ndone! check output/music-test.mp4");
33
+ }
34
+
35
+ main().catch(console.error);
@@ -0,0 +1,88 @@
1
+ /**
2
+ * OnlyFans-style selfie video workflow (React DSL)
3
+ *
4
+ * 1. Generate base girl image using Higgsfield Soul
5
+ * 2. Edit bra color using nano-banana-pro/edit
6
+ * 3. Generate 5 photos with different angles using nano-banana
7
+ * 4. Animate all with prompts (step back, turn left/right)
8
+ * 5. Concatenate into ~10 second video
9
+ *
10
+ * Run: bun run src/ai-sdk/react/cli.ts src/ai-sdk/examples/onlyfans-1m/workflow.tsx -o output/onlyfans-1m.mp4
11
+ *
12
+ * Required env: HIGGSFIELD_API_KEY, HIGGSFIELD_SECRET, FAL_KEY
13
+ */
14
+
15
+ import { fal, higgsfield } from "../../../ai-sdk";
16
+ import { Clip, Image, Render, Video } from "../..";
17
+
18
+ // ============================================================================
19
+ // CHARACTER DEFINITION
20
+ // ============================================================================
21
+
22
+ const NEW_BRA_COLOR = "deep purple";
23
+
24
+ // Base character prompt for Higgsfield
25
+ const baseCharacter = Image({
26
+ prompt:
27
+ "A beautiful Slavic woman in her late 20s with platinum blonde hair, icy blue eyes, and perfect skin. She bends very close to the phone lens, her chest framed by a white sports bra with a bold neckline, and she is wearing high-waisted athletic shorts in pale grey that accentuate her figure. Her expression is confident and slightly teasing. The background shows a modern apartment with soft daylight through large windows, reinforcing the natural homemade vibe",
28
+ model: higgsfield.imageModel("soul", {
29
+ quality: "1080p",
30
+ styleId: higgsfield.styles.CAM_360,
31
+ }),
32
+ aspectRatio: "9:16",
33
+ });
34
+
35
+ // Recolor the bra using nano-banana
36
+ const background = Image({
37
+ prompt: {
38
+ text: `Remove character from the image`,
39
+ images: [baseCharacter],
40
+ },
41
+ model: fal.imageModel("nano-banana-pro/edit"),
42
+ aspectRatio: "9:16",
43
+ });
44
+
45
+ const newBraCharacter = Image({
46
+ prompt: {
47
+ text: `Change the sports bra colour to ${NEW_BRA_COLOR}. Keep everything else exactly the same - same woman, same pose, same lighting, same background.`,
48
+ images: [baseCharacter, background],
49
+ },
50
+ model: fal.imageModel("seedream-v4.5/edit"),
51
+ aspectRatio: "9:16",
52
+ });
53
+
54
+ const newAngleCharacter = Image({
55
+ prompt: {
56
+ text: `Slightly change the pose of the character, keeping the same pose, lighting, and background. Put the character two steps aways from the`,
57
+ images: [newBraCharacter, background],
58
+ },
59
+ model: fal.imageModel("nano-banana-pro/edit"),
60
+ aspectRatio: "9:16",
61
+ });
62
+
63
+ const motionLeft = `A woman in stylish ${NEW_BRA_COLOR} sportswear takes a selfie. She starts very close to camera showing face and decolletage, then steps back to reveal more of her body. She turns slightly to the LEFT to show her figure in profile. Warm daylight, authentic homemade video feel. Camera static.`;
64
+
65
+ export default (
66
+ <Render width={1080} height={1920}>
67
+ {/* Clip 1: Left angle, turns left */}
68
+ <Clip duration={3}>
69
+ <Video
70
+ prompt={{
71
+ images: [newBraCharacter],
72
+ text: motionLeft,
73
+ }}
74
+ model={fal.videoModel("kling-v2.5")}
75
+ />
76
+ </Clip>
77
+
78
+ <Clip duration={3}>
79
+ <Video
80
+ prompt={{
81
+ images: [newAngleCharacter],
82
+ text: motionLeft,
83
+ }}
84
+ model={fal.videoModel("kling-v2.5")}
85
+ />
86
+ </Clip>
87
+ </Render>
88
+ );
@@ -0,0 +1,41 @@
1
+ import { fal } from "../../ai-sdk/providers/fal";
2
+ import { Clip, Image, Render, Video } from "..";
3
+
4
+ // character: young woman, short dark brown bob with wispy bangs, oval face, fair skin,
5
+ // large dark brown eyes, full lips, silver hoop earrings
6
+ // style: deep black bg, dramatic orange rim lighting, noir/premium aesthetic
7
+
8
+ export default (
9
+ <Render width={1080} height={1920}>
10
+ <Clip duration={5}>
11
+ <Video
12
+ prompt={{
13
+ text: "She steps back, camera reveals more of her body until she appears fully in frame. Studio lighting, authentic confident slightly playful atmosphere. Camera static. She poses naturally, first looking straight at camera, then turning slightly to the side to show her figure in profile. Intense orange lighting.",
14
+ images: [
15
+ Image({
16
+ prompt: {
17
+ text: `Using the attached reference images, generate a photorealistic Three-quarter editorial portrait of the exact same character — maintain identical face, hairstyle, and proportions from Image 1.
18
+
19
+ Framing: Head and shoulders, cropped at upper chest. Direct eye contact with camera.
20
+
21
+ Natural confident expression, relaxed shoulders.
22
+ Preserve the outfit neckline and visible clothing details from reference.
23
+
24
+ Background: Deep black with two contrasting orange gradient accents matching Reference 2. Soft gradient bleed, no hard edges.
25
+
26
+ Shot on 85mm f/1.4 lens, shallow depth of field. Clean studio lighting — soft key light on face, subtle rim light on hair and shoulders for separation. High-end fashion editorial aesthetic.`,
27
+ images: [
28
+ "https://s3.varg.ai/uploads/images/1_0475e227.png",
29
+ "https://s3.varg.ai/uploads/images/xyearp51qvve-zi3nrcve-zbno2hfgt5gergjrof_995f553d.png",
30
+ ],
31
+ },
32
+ model: fal.imageModel("nano-banana-pro/edit"),
33
+ }),
34
+ ],
35
+ }}
36
+ model={fal.videoModel("kling-v2.5")}
37
+ duration={5}
38
+ />
39
+ </Clip>
40
+ </Render>
41
+ );
@@ -0,0 +1,60 @@
1
+ /**
2
+ * Split Element Demo - uses the Split renderer (creates separate video composition)
3
+ *
4
+ * The Split element renders children into a side-by-side video using editly.
5
+ * Good for: when you need a self-contained split-screen video layer
6
+ */
7
+ import {
8
+ Clip,
9
+ Image,
10
+ Packshot,
11
+ Render,
12
+ render,
13
+ Slider,
14
+ Split,
15
+ Swipe,
16
+ Title,
17
+ } from "..";
18
+
19
+ async function main() {
20
+ console.log("Split Element Demo (uses renderer)\n");
21
+
22
+ const img1 = Image({ src: "media/cyberpunk-street.png" });
23
+ const img2 = Image({ src: "media/fal-coffee-shop.png" });
24
+ const img3 = Image({ src: "media/kirill.png" });
25
+
26
+ const video = (
27
+ <Render width={1280} height={720}>
28
+ <Clip duration={3}>
29
+ <Split direction="horizontal">{[img1, img2]}</Split>
30
+ <Title position="bottom">Split Element (renderer)</Title>
31
+ </Clip>
32
+
33
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
34
+ <Slider direction="horizontal">{[img1, img2, img3]}</Slider>
35
+ </Clip>
36
+
37
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
38
+ <Swipe direction="left" interval={1.5}>
39
+ {[img1, img2, img3]}
40
+ </Swipe>
41
+ </Clip>
42
+
43
+ <Clip duration={3} transition={{ name: "fade", duration: 0.5 }}>
44
+ <Packshot
45
+ background="#1a1a2e"
46
+ logo="media/cyberpunk-street.png"
47
+ logoPosition="center"
48
+ logoSize="50%"
49
+ cta="Subscribe for more!"
50
+ ctaColor="#FFD700"
51
+ />
52
+ </Clip>
53
+ </Render>
54
+ );
55
+
56
+ await render(video, { output: "output/split-element-demo.mp4" });
57
+ console.log("\ndone! check output/split-element-demo.mp4");
58
+ }
59
+
60
+ main().catch(console.error);
@@ -0,0 +1,60 @@
1
+ /**
2
+ * SplitLayout Demo - uses the layout helper (positions children in clip)
3
+ *
4
+ * SplitLayout just adds position props to children, letting the clip handle rendering.
5
+ * Good for: when you want positioned images/videos within a clip alongside other layers
6
+ */
7
+ import {
8
+ Clip,
9
+ Image,
10
+ Packshot,
11
+ Render,
12
+ render,
13
+ Slider,
14
+ SplitLayout,
15
+ Swipe,
16
+ Title,
17
+ } from "..";
18
+
19
+ async function main() {
20
+ console.log("SplitLayout Demo (uses layout helper)\n");
21
+
22
+ const img1 = Image({ src: "media/cyberpunk-street.png" });
23
+ const img2 = Image({ src: "media/fal-coffee-shop.png" });
24
+ const img3 = Image({ src: "media/kirill.png" });
25
+
26
+ const video = (
27
+ <Render width={1280} height={720}>
28
+ <Clip duration={3}>
29
+ <SplitLayout direction="horizontal" left={img1} right={img2} />
30
+ <Title position="bottom">SplitLayout (layout helper)</Title>
31
+ </Clip>
32
+
33
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
34
+ <Slider direction="horizontal">{[img1, img2, img3]}</Slider>
35
+ </Clip>
36
+
37
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
38
+ <Swipe direction="left" interval={1.5}>
39
+ {[img1, img2, img3]}
40
+ </Swipe>
41
+ </Clip>
42
+
43
+ <Clip duration={3} transition={{ name: "fade", duration: 0.5 }}>
44
+ <Packshot
45
+ background="#1a1a2e"
46
+ logo="media/cyberpunk-street.png"
47
+ logoPosition="center"
48
+ logoSize="50%"
49
+ cta="Subscribe for more!"
50
+ ctaColor="#FFD700"
51
+ />
52
+ </Clip>
53
+ </Render>
54
+ );
55
+
56
+ await render(video, { output: "output/split-layout-demo.mp4" });
57
+ console.log("\ndone! check output/split-layout-demo.mp4");
58
+ }
59
+
60
+ main().catch(console.error);
@@ -0,0 +1,41 @@
1
+ import { fal } from "../../ai-sdk/providers/fal";
2
+ import { Clip, Image, Render, render, SplitLayout as Split, Title } from "..";
3
+
4
+ async function main() {
5
+ console.log("creating before/after split screen...\n");
6
+
7
+ const before = Image({
8
+ prompt:
9
+ "overweight man sitting on couch, tired expression, pale skin, messy hair, wearing stained t-shirt",
10
+ model: fal.imageModel("flux-schnell"),
11
+ });
12
+
13
+ const after = Image({
14
+ prompt:
15
+ "fit muscular man standing confidently, tanned skin, bright smile, wearing fitted athletic shirt",
16
+ model: fal.imageModel("flux-schnell"),
17
+ });
18
+
19
+ const video = (
20
+ <Render width={1920} height={1080}>
21
+ <Clip duration={5}>
22
+ <Split left={before} right={after} />
23
+ <Title position="bottom" color="#ffffff">
24
+ 30 Day Transformation
25
+ </Title>
26
+ </Clip>
27
+ </Render>
28
+ );
29
+
30
+ console.log("video tree:", JSON.stringify(video, null, 2));
31
+
32
+ const buffer = await render(video, {
33
+ output: "output/react-split.mp4",
34
+ cache: ".cache/ai",
35
+ });
36
+
37
+ console.log(`\ndone! ${buffer.byteLength} bytes`);
38
+ console.log("output: output/react-split.mp4");
39
+ }
40
+
41
+ main().catch(console.error);
@@ -0,0 +1,46 @@
1
+ import { fal } from "../../ai-sdk/providers/fal";
2
+ import { Clip, Grid, Render, render, Title, Video } from "..";
3
+
4
+ async function main() {
5
+ console.log("creating 2x2 video grid...\n");
6
+
7
+ const video = (
8
+ <Render width={1920} height={1080}>
9
+ <Clip duration={5}>
10
+ <Grid columns={2}>
11
+ <Video
12
+ prompt="ocean waves crashing on rocks, slow motion, cinematic"
13
+ model={fal.videoModel("wan-2.5")}
14
+ />
15
+ <Video
16
+ prompt="fire burning in fireplace, cozy, warm light"
17
+ model={fal.videoModel("wan-2.5")}
18
+ />
19
+ <Video
20
+ prompt="rain falling on window glass, close up, moody"
21
+ model={fal.videoModel("wan-2.5")}
22
+ />
23
+ <Video
24
+ prompt="clouds moving across blue sky, timelapse, peaceful"
25
+ model={fal.videoModel("wan-2.5")}
26
+ />
27
+ </Grid>
28
+ <Title position="bottom" color="#ffffff">
29
+ Elements
30
+ </Title>
31
+ </Clip>
32
+ </Render>
33
+ );
34
+
35
+ console.log("video tree:", JSON.stringify(video, null, 2));
36
+
37
+ const buffer = await render(video, {
38
+ output: "output/react-video-grid.mp4",
39
+ cache: ".cache/ai",
40
+ });
41
+
42
+ console.log(`\ndone! ${buffer.byteLength} bytes`);
43
+ console.log("output: output/react-video-grid.mp4");
44
+ }
45
+
46
+ main().catch(console.error);
@@ -0,0 +1,43 @@
1
+ export type { SizeValue } from "../ai-sdk/providers/editly/types";
2
+ export {
3
+ Animate,
4
+ Captions,
5
+ Clip,
6
+ Image,
7
+ Music,
8
+ Overlay,
9
+ Packshot,
10
+ Render,
11
+ Slider,
12
+ Speech,
13
+ Split,
14
+ Subtitle,
15
+ Swipe,
16
+ TalkingHead,
17
+ Title,
18
+ Video,
19
+ } from "./elements";
20
+ export { Grid, SplitLayout } from "./layouts";
21
+ export { render, renderStream } from "./render";
22
+ export type {
23
+ AnimateProps,
24
+ CaptionsProps,
25
+ ClipProps,
26
+ ImageProps,
27
+ MusicProps,
28
+ OverlayProps,
29
+ PackshotProps,
30
+ PositionProps,
31
+ RenderOptions,
32
+ RenderProps,
33
+ SliderProps,
34
+ SpeechProps,
35
+ SplitProps,
36
+ SubtitleProps,
37
+ SwipeProps,
38
+ TalkingHeadProps,
39
+ TitleProps,
40
+ VargElement,
41
+ VargNode,
42
+ VideoProps,
43
+ } from "./types";
@@ -0,0 +1,28 @@
1
+ import type { VargElement } from "../types";
2
+
3
+ export const Grid = ({
4
+ columns,
5
+ rows,
6
+ children,
7
+ resize = "contain",
8
+ }: {
9
+ columns?: number;
10
+ rows?: number;
11
+ children: VargElement[];
12
+ resize?: "cover" | "contain" | "stretch";
13
+ }) => {
14
+ const cols = columns ?? children.length;
15
+ const rowCount = rows ?? Math.ceil(children.length / cols);
16
+ const positioned = children.map((el, i) => ({
17
+ ...el,
18
+ props: {
19
+ ...el.props,
20
+ left: `${((i % cols) / cols) * 100}%`,
21
+ top: `${(Math.floor(i / cols) / rowCount) * 100}%`,
22
+ width: `${(1 / cols) * 100}%`,
23
+ height: `${(1 / rowCount) * 100}%`,
24
+ resize,
25
+ },
26
+ }));
27
+ return <>{positioned}</>;
28
+ };
@@ -0,0 +1,2 @@
1
+ export { Grid } from "./grid";
2
+ export { SplitLayout } from "./split";
@@ -0,0 +1,20 @@
1
+ import type { VargElement } from "../types";
2
+ import { Grid } from "./grid";
3
+
4
+ export const SplitLayout = ({
5
+ left,
6
+ right,
7
+ direction = "horizontal",
8
+ }: {
9
+ left: VargElement;
10
+ right: VargElement;
11
+ direction?: "horizontal" | "vertical";
12
+ }) => (
13
+ <Grid
14
+ columns={direction === "horizontal" ? 2 : 1}
15
+ rows={direction === "vertical" ? 2 : 1}
16
+ >
17
+ {left}
18
+ {right}
19
+ </Grid>
20
+ );