varg.ai-sdk 0.1.0 → 0.4.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (236) hide show
  1. package/.claude/settings.local.json +1 -1
  2. package/.env.example +3 -0
  3. package/.github/workflows/ci.yml +23 -0
  4. package/.husky/README.md +102 -0
  5. package/.husky/commit-msg +6 -0
  6. package/.husky/pre-commit +9 -0
  7. package/.husky/pre-push +6 -0
  8. package/.size-limit.json +8 -0
  9. package/.test-hooks.ts +5 -0
  10. package/CLAUDE.md +10 -3
  11. package/CONTRIBUTING.md +150 -0
  12. package/LICENSE.md +53 -0
  13. package/README.md +56 -209
  14. package/SKILLS.md +26 -10
  15. package/biome.json +7 -1
  16. package/bun.lock +1286 -0
  17. package/commitlint.config.js +22 -0
  18. package/docs/index.html +1130 -0
  19. package/docs/prompting.md +326 -0
  20. package/docs/react.md +834 -0
  21. package/docs/sdk.md +812 -0
  22. package/ffmpeg/CLAUDE.md +68 -0
  23. package/package.json +48 -8
  24. package/pipeline/cookbooks/scripts/animate-frames-parallel.ts +84 -0
  25. package/pipeline/cookbooks/scripts/combine-scenes.sh +53 -0
  26. package/pipeline/cookbooks/scripts/generate-frames-parallel.ts +99 -0
  27. package/pipeline/cookbooks/scripts/still-to-video.sh +37 -0
  28. package/pipeline/cookbooks/text-to-tiktok.md +669 -0
  29. package/pipeline/cookbooks/trendwatching.md +156 -0
  30. package/plan.md +281 -0
  31. package/scripts/.gitkeep +0 -0
  32. package/src/ai-sdk/cache.ts +142 -0
  33. package/src/ai-sdk/examples/cached-generation.ts +53 -0
  34. package/src/ai-sdk/examples/duet-scene-4.ts +53 -0
  35. package/src/ai-sdk/examples/duet-scene-5-audio.ts +32 -0
  36. package/src/ai-sdk/examples/duet-video.ts +56 -0
  37. package/src/ai-sdk/examples/editly-composition.ts +63 -0
  38. package/src/ai-sdk/examples/editly-test.ts +57 -0
  39. package/src/ai-sdk/examples/editly-video-test.ts +52 -0
  40. package/src/ai-sdk/examples/fal-lipsync.ts +43 -0
  41. package/src/ai-sdk/examples/higgsfield-image.ts +61 -0
  42. package/src/ai-sdk/examples/music-generation.ts +19 -0
  43. package/src/ai-sdk/examples/openai-sora.ts +34 -0
  44. package/src/ai-sdk/examples/replicate-bg-removal.ts +52 -0
  45. package/src/ai-sdk/examples/simpsons-scene.ts +61 -0
  46. package/src/ai-sdk/examples/talking-lion.ts +55 -0
  47. package/src/ai-sdk/examples/video-generation.ts +39 -0
  48. package/src/ai-sdk/examples/workflow-animated-girl.ts +104 -0
  49. package/src/ai-sdk/examples/workflow-before-after.ts +114 -0
  50. package/src/ai-sdk/examples/workflow-character-grid.ts +112 -0
  51. package/src/ai-sdk/examples/workflow-slideshow.ts +161 -0
  52. package/src/ai-sdk/file-cache.ts +112 -0
  53. package/src/ai-sdk/file.ts +238 -0
  54. package/src/ai-sdk/generate-element.ts +92 -0
  55. package/src/ai-sdk/generate-music.ts +46 -0
  56. package/src/ai-sdk/generate-video.ts +165 -0
  57. package/src/ai-sdk/index.ts +72 -0
  58. package/src/ai-sdk/music-model.ts +110 -0
  59. package/src/ai-sdk/providers/editly/editly.test.ts +1108 -0
  60. package/src/ai-sdk/providers/editly/ffmpeg.ts +60 -0
  61. package/src/ai-sdk/providers/editly/index.ts +817 -0
  62. package/src/ai-sdk/providers/editly/layers.ts +776 -0
  63. package/src/ai-sdk/providers/editly/plan.md +144 -0
  64. package/src/ai-sdk/providers/editly/types.ts +328 -0
  65. package/src/ai-sdk/providers/elevenlabs-provider.ts +255 -0
  66. package/src/ai-sdk/providers/fal-provider.ts +512 -0
  67. package/src/ai-sdk/providers/higgsfield.ts +379 -0
  68. package/src/ai-sdk/providers/openai.ts +251 -0
  69. package/src/ai-sdk/providers/replicate.ts +16 -0
  70. package/src/ai-sdk/video-model.ts +185 -0
  71. package/src/cli/commands/find.tsx +137 -0
  72. package/src/cli/commands/help.tsx +85 -0
  73. package/src/cli/commands/index.ts +6 -0
  74. package/src/cli/commands/list.tsx +238 -0
  75. package/src/cli/commands/render.tsx +71 -0
  76. package/src/cli/commands/run.tsx +511 -0
  77. package/src/cli/commands/which.tsx +253 -0
  78. package/src/cli/index.ts +114 -0
  79. package/src/cli/quiet.ts +44 -0
  80. package/src/cli/types.ts +32 -0
  81. package/src/cli/ui/components/Badge.tsx +29 -0
  82. package/src/cli/ui/components/DataTable.tsx +51 -0
  83. package/src/cli/ui/components/Header.tsx +23 -0
  84. package/src/cli/ui/components/HelpBlock.tsx +44 -0
  85. package/src/cli/ui/components/KeyValue.tsx +33 -0
  86. package/src/cli/ui/components/OptionRow.tsx +81 -0
  87. package/src/cli/ui/components/Separator.tsx +23 -0
  88. package/src/cli/ui/components/StatusBox.tsx +108 -0
  89. package/src/cli/ui/components/VargBox.tsx +51 -0
  90. package/src/cli/ui/components/VargProgress.tsx +36 -0
  91. package/src/cli/ui/components/VargSpinner.tsx +34 -0
  92. package/src/cli/ui/components/VargText.tsx +56 -0
  93. package/src/cli/ui/components/index.ts +19 -0
  94. package/src/cli/ui/index.ts +12 -0
  95. package/src/cli/ui/render.ts +35 -0
  96. package/src/cli/ui/theme.ts +63 -0
  97. package/src/cli/utils.ts +78 -0
  98. package/src/core/executor/executor.ts +201 -0
  99. package/src/core/executor/index.ts +13 -0
  100. package/src/core/executor/job.ts +214 -0
  101. package/src/core/executor/pipeline.ts +222 -0
  102. package/src/core/index.ts +11 -0
  103. package/src/core/registry/index.ts +9 -0
  104. package/src/core/registry/loader.ts +149 -0
  105. package/src/core/registry/registry.ts +221 -0
  106. package/src/core/registry/resolver.ts +206 -0
  107. package/src/core/schema/helpers.ts +134 -0
  108. package/src/core/schema/index.ts +8 -0
  109. package/src/core/schema/shared.ts +102 -0
  110. package/src/core/schema/types.ts +279 -0
  111. package/src/core/schema/validator.ts +92 -0
  112. package/src/definitions/actions/captions.ts +261 -0
  113. package/src/definitions/actions/edit.ts +298 -0
  114. package/src/definitions/actions/image.ts +125 -0
  115. package/src/definitions/actions/index.ts +114 -0
  116. package/src/definitions/actions/music.ts +205 -0
  117. package/src/definitions/actions/sync.ts +128 -0
  118. package/{action/transcribe/index.ts → src/definitions/actions/transcribe.ts} +63 -90
  119. package/src/definitions/actions/upload.ts +111 -0
  120. package/src/definitions/actions/video.ts +163 -0
  121. package/src/definitions/actions/voice.ts +119 -0
  122. package/src/definitions/index.ts +23 -0
  123. package/src/definitions/models/elevenlabs.ts +50 -0
  124. package/src/definitions/models/flux.ts +56 -0
  125. package/src/definitions/models/index.ts +36 -0
  126. package/src/definitions/models/kling.ts +56 -0
  127. package/src/definitions/models/llama.ts +54 -0
  128. package/src/definitions/models/nano-banana-pro.ts +102 -0
  129. package/src/definitions/models/sonauto.ts +68 -0
  130. package/src/definitions/models/soul.ts +65 -0
  131. package/src/definitions/models/wan.ts +54 -0
  132. package/src/definitions/models/whisper.ts +44 -0
  133. package/src/definitions/skills/index.ts +12 -0
  134. package/src/definitions/skills/talking-character.ts +87 -0
  135. package/src/definitions/skills/text-to-tiktok.ts +97 -0
  136. package/src/index.ts +118 -0
  137. package/src/providers/apify.ts +269 -0
  138. package/src/providers/base.ts +264 -0
  139. package/src/providers/elevenlabs.ts +217 -0
  140. package/src/providers/fal.ts +392 -0
  141. package/src/providers/ffmpeg.ts +544 -0
  142. package/src/providers/fireworks.ts +193 -0
  143. package/src/providers/groq.ts +149 -0
  144. package/src/providers/higgsfield.ts +145 -0
  145. package/src/providers/index.ts +143 -0
  146. package/src/providers/replicate.ts +147 -0
  147. package/src/providers/storage.ts +206 -0
  148. package/src/react/cli.ts +52 -0
  149. package/src/react/elements.ts +146 -0
  150. package/src/react/examples/branching.tsx +66 -0
  151. package/src/react/examples/captions-demo.tsx +37 -0
  152. package/src/react/examples/character-video.tsx +84 -0
  153. package/src/react/examples/grid.tsx +53 -0
  154. package/src/react/examples/layouts-demo.tsx +57 -0
  155. package/src/react/examples/madi.tsx +60 -0
  156. package/src/react/examples/music-test.tsx +35 -0
  157. package/src/react/examples/onlyfans-1m/workflow.tsx +88 -0
  158. package/src/react/examples/orange-portrait.tsx +41 -0
  159. package/src/react/examples/split-element-demo.tsx +60 -0
  160. package/src/react/examples/split-layout-demo.tsx +60 -0
  161. package/src/react/examples/split.tsx +41 -0
  162. package/src/react/examples/video-grid.tsx +46 -0
  163. package/src/react/index.ts +43 -0
  164. package/src/react/layouts/grid.tsx +28 -0
  165. package/src/react/layouts/index.ts +2 -0
  166. package/src/react/layouts/split.tsx +20 -0
  167. package/src/react/react.test.ts +309 -0
  168. package/src/react/render.ts +21 -0
  169. package/src/react/renderers/animate.ts +59 -0
  170. package/src/react/renderers/captions.ts +297 -0
  171. package/src/react/renderers/clip.ts +248 -0
  172. package/src/react/renderers/context.ts +17 -0
  173. package/src/react/renderers/image.ts +109 -0
  174. package/src/react/renderers/index.ts +22 -0
  175. package/src/react/renderers/music.ts +60 -0
  176. package/src/react/renderers/packshot.ts +84 -0
  177. package/src/react/renderers/progress.ts +173 -0
  178. package/src/react/renderers/render.ts +243 -0
  179. package/src/react/renderers/slider.ts +69 -0
  180. package/src/react/renderers/speech.ts +53 -0
  181. package/src/react/renderers/split.ts +91 -0
  182. package/src/react/renderers/subtitle.ts +16 -0
  183. package/src/react/renderers/swipe.ts +75 -0
  184. package/src/react/renderers/title.ts +17 -0
  185. package/src/react/renderers/utils.ts +124 -0
  186. package/src/react/renderers/video.ts +127 -0
  187. package/src/react/runtime/jsx-dev-runtime.ts +43 -0
  188. package/src/react/runtime/jsx-runtime.ts +35 -0
  189. package/src/react/types.ts +232 -0
  190. package/src/studio/index.ts +26 -0
  191. package/src/studio/scanner.ts +102 -0
  192. package/src/studio/server.ts +554 -0
  193. package/src/studio/stages.ts +251 -0
  194. package/src/studio/step-renderer.ts +279 -0
  195. package/src/studio/types.ts +60 -0
  196. package/src/studio/ui/cache.html +303 -0
  197. package/src/studio/ui/index.html +1820 -0
  198. package/src/tests/all.test.ts +509 -0
  199. package/src/tests/index.ts +33 -0
  200. package/src/tests/unit.test.ts +403 -0
  201. package/tsconfig.cli.json +8 -0
  202. package/tsconfig.json +21 -3
  203. package/TEST_RESULTS.md +0 -122
  204. package/action/captions/SKILL.md +0 -170
  205. package/action/captions/index.ts +0 -227
  206. package/action/edit/SKILL.md +0 -235
  207. package/action/edit/index.ts +0 -493
  208. package/action/image/SKILL.md +0 -140
  209. package/action/image/index.ts +0 -112
  210. package/action/sync/SKILL.md +0 -136
  211. package/action/sync/index.ts +0 -187
  212. package/action/transcribe/SKILL.md +0 -179
  213. package/action/video/SKILL.md +0 -116
  214. package/action/video/index.ts +0 -135
  215. package/action/voice/SKILL.md +0 -125
  216. package/action/voice/index.ts +0 -201
  217. package/index.ts +0 -38
  218. package/lib/README.md +0 -144
  219. package/lib/ai-sdk/fal.ts +0 -106
  220. package/lib/ai-sdk/replicate.ts +0 -107
  221. package/lib/elevenlabs.ts +0 -382
  222. package/lib/fal.ts +0 -478
  223. package/lib/ffmpeg.ts +0 -467
  224. package/lib/fireworks.ts +0 -235
  225. package/lib/groq.ts +0 -246
  226. package/lib/higgsfield.ts +0 -176
  227. package/lib/remotion/SKILL.md +0 -823
  228. package/lib/remotion/cli.ts +0 -115
  229. package/lib/remotion/functions.ts +0 -283
  230. package/lib/remotion/index.ts +0 -19
  231. package/lib/remotion/templates.ts +0 -73
  232. package/lib/replicate.ts +0 -304
  233. package/output.txt +0 -1
  234. package/test-import.ts +0 -7
  235. package/test-services.ts +0 -97
  236. package/utilities/s3.ts +0 -147
@@ -0,0 +1,84 @@
1
+ import { elevenlabs } from "../../ai-sdk/providers/elevenlabs-provider";
2
+ import { fal } from "../../ai-sdk/providers/fal-provider";
3
+ import { Animate, Clip, Image, Music, Render, render } from "..";
4
+
5
+ const MADI_REF =
6
+ "https://s3.varg.ai/fellowers/madi/character_shots/madi_shot_03_closeup.png";
7
+
8
+ // TikTok timeline structure:
9
+ // 0-2s: HOOK - frontal close-up, grab attention
10
+ // 2-4s: 45° medium shot + expression change
11
+ // 4-6s: low angle or extreme close-up
12
+ // 6-8s: high angle + new emotion
13
+ const SCENES = [
14
+ {
15
+ // 0-2s: HOOK - frontal extreme close-up, surprised/curious expression
16
+ prompt:
17
+ "extreme close-up face shot, surprised expression with wide eyes, looking directly at camera, holding peach near lips",
18
+ motion:
19
+ "eyes widen in surprise, eyebrows raise slightly, subtle head tilt forward. Static shot, no camera movement.",
20
+ },
21
+ {
22
+ // 2-4s: 45° medium shot, playful grimace while eating
23
+ prompt:
24
+ "45-degree angle medium shot showing face and hands, biting into peach with exaggerated enjoyment, juice on lips, playful expression",
25
+ motion:
26
+ "turns head 45 degrees, bites into peach, juice drips down chin, hands move expressively. Slow push-in camera movement.",
27
+ },
28
+ {
29
+ // 4-6s: low angle (up shot), confident/powerful vibe
30
+ prompt:
31
+ "low angle shot from below, looking down at camera with confident smirk, holding peach triumphantly, dramatic perspective",
32
+ motion:
33
+ "looks down at camera with growing smile, raises peach slightly, confident head tilt. Static camera, slight lens distortion.",
34
+ },
35
+ {
36
+ // 6-8s: high angle (down shot), playful vulnerability + CTA energy
37
+ prompt:
38
+ "high angle shot from above, looking up at camera with playful smile, arms spread wide, peach in one hand, endearing expression",
39
+ motion:
40
+ "looks up at camera, expression shifts from neutral to excited smile, subtle wink, slight forward lean. Gentle camera tilt down.",
41
+ },
42
+ ];
43
+
44
+ async function main() {
45
+ console.log("creating madi peach video (animated)...\n");
46
+
47
+ const video = (
48
+ <Render width={1080} height={1920}>
49
+ <Music
50
+ prompt="upbeat electronic pop, energetic female vocal chops, modern tiktok vibe, catchy melody"
51
+ model={elevenlabs.musicModel()}
52
+ duration={8}
53
+ />
54
+
55
+ {SCENES.map((scene) => (
56
+ <Clip key={scene.prompt} duration={2}>
57
+ <Animate
58
+ image={Image({
59
+ prompt: { text: scene.prompt, images: [MADI_REF] },
60
+ model: fal.imageModel("nano-banana-pro/edit"),
61
+ aspectRatio: "9:16",
62
+ resize: "cover",
63
+ })}
64
+ motion={scene.motion}
65
+ model={fal.videoModel("wan-2.5")}
66
+ duration={5}
67
+ />
68
+ </Clip>
69
+ ))}
70
+ </Render>
71
+ );
72
+
73
+ console.log("rendering", SCENES.length, "animated clips in parallel...");
74
+
75
+ const buffer = await render(video, {
76
+ output: "output/react-madi.mp4",
77
+ cache: ".cache/ai",
78
+ });
79
+
80
+ console.log(`\ndone! ${buffer.byteLength} bytes`);
81
+ console.log("output: output/react-madi.mp4");
82
+ }
83
+
84
+ main().catch(console.error);
@@ -0,0 +1,53 @@
1
+ import { fal } from "../../ai-sdk/providers/fal-provider";
2
+ import { Clip, Grid, Image, Render, render, Title } from "..";
3
+
4
+ const CHARACTER_PROMPTS = [
5
+ { name: "Warrior", prompt: "fierce warrior with sword, armor" },
6
+ { name: "Mage", prompt: "mystical mage with glowing staff, robes" },
7
+ { name: "Rogue", prompt: "stealthy rogue with daggers, hooded" },
8
+ { name: "Healer", prompt: "gentle healer with staff, white robes" },
9
+ { name: "Archer", prompt: "skilled archer with bow, leather armor" },
10
+ { name: "Knight", prompt: "noble knight with shield, heavy armor" },
11
+ { name: "Necro", prompt: "dark necromancer with skull staff" },
12
+ { name: "Paladin", prompt: "holy paladin with hammer, golden armor" },
13
+ { name: "Bard", prompt: "charismatic bard with lute, colorful" },
14
+ { name: "Druid", prompt: "nature druid with wooden staff, leaves" },
15
+ { name: "Monk", prompt: "disciplined monk with wrapped fists" },
16
+ { name: "Assassin", prompt: "deadly assassin with hidden blades" },
17
+ ];
18
+
19
+ async function main() {
20
+ console.log("creating 3x4 character grid...\n");
21
+
22
+ const baseStyle = "fantasy portrait, stylized art, vibrant colors";
23
+
24
+ const images = CHARACTER_PROMPTS.map(({ prompt }) =>
25
+ Image({
26
+ prompt: `${prompt}, ${baseStyle}`,
27
+ model: fal.imageModel("flux-schnell"),
28
+ }),
29
+ );
30
+
31
+ const video = (
32
+ <Render width={1080} height={1440}>
33
+ <Clip duration={5}>
34
+ <Grid columns={3}>{images}</Grid>
35
+ <Title position="bottom" color="#ffffff">
36
+ Fantasy Characters
37
+ </Title>
38
+ </Clip>
39
+ </Render>
40
+ );
41
+
42
+ console.log("video tree:", JSON.stringify(video, null, 2));
43
+
44
+ const buffer = await render(video, {
45
+ output: "output/react-grid.mp4",
46
+ cache: ".cache/ai",
47
+ });
48
+
49
+ console.log(`\ndone! ${buffer.byteLength} bytes`);
50
+ console.log("output: output/react-grid.mp4");
51
+ }
52
+
53
+ main().catch(console.error);
@@ -0,0 +1,57 @@
1
+ import {
2
+ Clip,
3
+ Image,
4
+ Packshot,
5
+ Render,
6
+ render,
7
+ Slider,
8
+ Split,
9
+ Swipe,
10
+ Title,
11
+ } from "..";
12
+
13
+ async function main() {
14
+ const img1 = Image({ src: "media/cyberpunk-street.png" });
15
+ const img2 = Image({ src: "media/fal-coffee-shop.png" });
16
+ const img3 = Image({ src: "media/kirill.png" });
17
+
18
+ const video = (
19
+ <Render width={1280} height={720}>
20
+ <Clip duration={3}>
21
+ <Split direction="horizontal">{[img1, img2]}</Split>
22
+ <Title position="bottom">Split Layout</Title>
23
+ </Clip>
24
+
25
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
26
+ <Slider direction="horizontal">{[img1, img2, img3]}</Slider>
27
+ </Clip>
28
+
29
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
30
+ <Swipe direction="left" interval={1.5}>
31
+ {[img1, img2, img3]}
32
+ </Swipe>
33
+ </Clip>
34
+
35
+ <Clip duration={3} transition={{ name: "fade", duration: 0.5 }}>
36
+ <Packshot
37
+ background="#1a1a2e"
38
+ logo="media/cyberpunk-street.png"
39
+ logoPosition="center"
40
+ logoSize="50%"
41
+ cta="Subscribe for more!"
42
+ ctaColor="#FFD700"
43
+ />
44
+ </Clip>
45
+ </Render>
46
+ );
47
+
48
+ console.log("rendering layouts demo...\n");
49
+
50
+ await render(video, {
51
+ output: "output/layouts-demo.mp4",
52
+ });
53
+
54
+ console.log("\ndone! check output/layouts-demo.mp4");
55
+ }
56
+
57
+ main().catch(console.error);
@@ -0,0 +1,60 @@
1
+ import { elevenlabs } from "../../ai-sdk/providers/elevenlabs-provider";
2
+ import { fal } from "../../ai-sdk/providers/fal-provider";
3
+ import { Animate, Clip, Image, Music, Render } from "..";
4
+
5
+ const MADI_REF =
6
+ "https://s3.varg.ai/fellowers/madi/character_shots/madi_shot_03_closeup.png";
7
+
8
+ const SCENES = [
9
+ {
10
+ prompt:
11
+ "extreme close-up face shot, surprised expression with wide eyes, looking directly at camera, holding peach near lips",
12
+ motion:
13
+ "eyes widen in surprise, eyebrows raise slightly, subtle head tilt forward. Static shot, no camera movement.",
14
+ },
15
+ {
16
+ prompt:
17
+ "45-degree angle medium shot showing face and hands, biting into peach with exaggerated enjoyment, juice on lips, playful expression",
18
+ motion:
19
+ "turns head 45 degrees, bites into peach, juice drips down chin, hands move expressively. Slow push-in camera movement.",
20
+ },
21
+ {
22
+ prompt:
23
+ "low angle shot from below, looking down at camera with confident smirk, holding peach triumphantly, dramatic perspective",
24
+ motion:
25
+ "looks down at camera with growing smile, raises peach slightly, confident head tilt. Static camera, slight lens distortion.",
26
+ },
27
+ {
28
+ prompt:
29
+ "high angle shot from above, looking up at camera with playful smile, arms spread wide, peach in one hand, endearing expression",
30
+ motion:
31
+ "looks up at camera, expression shifts from neutral to excited smile, subtle wink, slight forward lean. Gentle camera tilt down.",
32
+ },
33
+ ];
34
+
35
+ export default (
36
+ <Render width={1080} height={1920}>
37
+ <Music
38
+ prompt="upbeat electronic pop, energetic female vocal chops, modern tiktok vibe, catchy melody"
39
+ model={elevenlabs.musicModel()}
40
+ duration={10}
41
+ volume={0.6}
42
+ />
43
+
44
+ {SCENES.map((scene) => (
45
+ <Clip key={scene.prompt} duration={2}>
46
+ <Animate
47
+ image={Image({
48
+ prompt: { text: scene.prompt, images: [MADI_REF] },
49
+ model: fal.imageModel("nano-banana-pro/edit"),
50
+ aspectRatio: "9:16",
51
+ resize: "cover",
52
+ })}
53
+ motion={scene.motion}
54
+ model={fal.videoModel("wan-2.5")}
55
+ duration={5}
56
+ />
57
+ </Clip>
58
+ ))}
59
+ </Render>
60
+ );
@@ -0,0 +1,35 @@
1
+ import { elevenlabs } from "../../ai-sdk/providers/elevenlabs-provider";
2
+ import { Clip, Image, Music, Render, render } from "..";
3
+
4
+ async function main() {
5
+ const video = (
6
+ <Render width={1080} height={1920}>
7
+ <Music
8
+ prompt="chill lo-fi hip hop beats, relaxing piano melody"
9
+ model={elevenlabs.musicModel()}
10
+ />
11
+
12
+ <Clip duration={3}>
13
+ <Image src="media/cyberpunk-street.png" resize="cover" />
14
+ </Clip>
15
+ <Clip duration={3}>
16
+ <Image src="media/madi-portrait.png" resize="cover" />
17
+ </Clip>
18
+ <Clip duration={3}>
19
+ <Image src="media/replicate-forest.png" resize="cover" />
20
+ </Clip>
21
+ </Render>
22
+ );
23
+
24
+ console.log("testing music auto-trim (no duration specified)");
25
+ console.log("3 clips x 3s = 9s video, music should auto-trim to 9s\n");
26
+
27
+ await render(video, {
28
+ output: "output/music-test.mp4",
29
+ cache: ".cache/ai",
30
+ });
31
+
32
+ console.log("\ndone! check output/music-test.mp4");
33
+ }
34
+
35
+ main().catch(console.error);
@@ -0,0 +1,88 @@
1
+ /**
2
+ * OnlyFans-style selfie video workflow (React DSL)
3
+ *
4
+ * 1. Generate base girl image using Higgsfield Soul
5
+ * 2. Edit bra color using nano-banana-pro/edit
6
+ * 3. Generate 5 photos with different angles using nano-banana
7
+ * 4. Animate all with prompts (step back, turn left/right)
8
+ * 5. Concatenate into ~10 second video
9
+ *
10
+ * Run: bun run src/ai-sdk/react/cli.ts src/ai-sdk/examples/onlyfans-1m/workflow.tsx -o output/onlyfans-1m.mp4
11
+ *
12
+ * Required env: HIGGSFIELD_API_KEY, HIGGSFIELD_SECRET, FAL_KEY
13
+ */
14
+
15
+ import { fal, higgsfield } from "../../../ai-sdk";
16
+ import { Clip, Image, Render, Video } from "../..";
17
+
18
+ // ============================================================================
19
+ // CHARACTER DEFINITION
20
+ // ============================================================================
21
+
22
+ const NEW_BRA_COLOR = "deep purple";
23
+
24
+ // Base character prompt for Higgsfield
25
+ const baseCharacter = Image({
26
+ prompt:
27
+ "A beautiful Slavic woman in her late 20s with platinum blonde hair, icy blue eyes, and perfect skin. She bends very close to the phone lens, her chest framed by a white sports bra with a bold neckline, and she is wearing high-waisted athletic shorts in pale grey that accentuate her figure. Her expression is confident and slightly teasing. The background shows a modern apartment with soft daylight through large windows, reinforcing the natural homemade vibe",
28
+ model: higgsfield.imageModel("soul", {
29
+ quality: "1080p",
30
+ styleId: higgsfield.styles.CAM_360,
31
+ }),
32
+ aspectRatio: "9:16",
33
+ });
34
+
35
+ // Recolor the bra using nano-banana
36
+ const background = Image({
37
+ prompt: {
38
+ text: `Remove character from the image`,
39
+ images: [baseCharacter],
40
+ },
41
+ model: fal.imageModel("nano-banana-pro/edit"),
42
+ aspectRatio: "9:16",
43
+ });
44
+
45
+ const newBraCharacter = Image({
46
+ prompt: {
47
+ text: `Change the sports bra colour to ${NEW_BRA_COLOR}. Keep everything else exactly the same - same woman, same pose, same lighting, same background.`,
48
+ images: [baseCharacter, background],
49
+ },
50
+ model: fal.imageModel("seedream-v4.5/edit"),
51
+ aspectRatio: "9:16",
52
+ });
53
+
54
+ const newAngleCharacter = Image({
55
+ prompt: {
56
+ text: `Slightly change the pose of the character, keeping the same pose, lighting, and background. Put the character two steps aways from the`,
57
+ images: [newBraCharacter, background],
58
+ },
59
+ model: fal.imageModel("nano-banana-pro/edit"),
60
+ aspectRatio: "9:16",
61
+ });
62
+
63
+ const motionLeft = `A woman in stylish ${NEW_BRA_COLOR} sportswear takes a selfie. She starts very close to camera showing face and decolletage, then steps back to reveal more of her body. She turns slightly to the LEFT to show her figure in profile. Warm daylight, authentic homemade video feel. Camera static.`;
64
+
65
+ export default (
66
+ <Render width={1080} height={1920}>
67
+ {/* Clip 1: Left angle, turns left */}
68
+ <Clip duration={3}>
69
+ <Video
70
+ prompt={{
71
+ images: [newBraCharacter],
72
+ text: motionLeft,
73
+ }}
74
+ model={fal.videoModel("kling-v2.5")}
75
+ />
76
+ </Clip>
77
+
78
+ <Clip duration={3}>
79
+ <Video
80
+ prompt={{
81
+ images: [newAngleCharacter],
82
+ text: motionLeft,
83
+ }}
84
+ model={fal.videoModel("kling-v2.5")}
85
+ />
86
+ </Clip>
87
+ </Render>
88
+ );
@@ -0,0 +1,41 @@
1
+ import { fal } from "../../ai-sdk/providers/fal-provider";
2
+ import { Clip, Image, Render, Video } from "..";
3
+
4
+ // character: young woman, short dark brown bob with wispy bangs, oval face, fair skin,
5
+ // large dark brown eyes, full lips, silver hoop earrings
6
+ // style: deep black bg, dramatic orange rim lighting, noir/premium aesthetic
7
+
8
+ export default (
9
+ <Render width={1080} height={1920}>
10
+ <Clip duration={5}>
11
+ <Video
12
+ prompt={{
13
+ text: "She steps back, camera reveals more of her body until she appears fully in frame. Studio lighting, authentic confident slightly playful atmosphere. Camera static. She poses naturally, first looking straight at camera, then turning slightly to the side to show her figure in profile. Intense orange lighting.",
14
+ images: [
15
+ Image({
16
+ prompt: {
17
+ text: `Using the attached reference images, generate a photorealistic Three-quarter editorial portrait of the exact same character — maintain identical face, hairstyle, and proportions from Image 1.
18
+
19
+ Framing: Head and shoulders, cropped at upper chest. Direct eye contact with camera.
20
+
21
+ Natural confident expression, relaxed shoulders.
22
+ Preserve the outfit neckline and visible clothing details from reference.
23
+
24
+ Background: Deep black with two contrasting orange gradient accents matching Reference 2. Soft gradient bleed, no hard edges.
25
+
26
+ Shot on 85mm f/1.4 lens, shallow depth of field. Clean studio lighting — soft key light on face, subtle rim light on hair and shoulders for separation. High-end fashion editorial aesthetic.`,
27
+ images: [
28
+ "https://s3.varg.ai/uploads/images/1_0475e227.png",
29
+ "https://s3.varg.ai/uploads/images/xyearp51qvve-zi3nrcve-zbno2hfgt5gergjrof_995f553d.png",
30
+ ],
31
+ },
32
+ model: fal.imageModel("nano-banana-pro/edit"),
33
+ }),
34
+ ],
35
+ }}
36
+ model={fal.videoModel("kling-v2.5")}
37
+ duration={5}
38
+ />
39
+ </Clip>
40
+ </Render>
41
+ );
@@ -0,0 +1,60 @@
1
+ /**
2
+ * Split Element Demo - uses the Split renderer (creates separate video composition)
3
+ *
4
+ * The Split element renders children into a side-by-side video using editly.
5
+ * Good for: when you need a self-contained split-screen video layer
6
+ */
7
+ import {
8
+ Clip,
9
+ Image,
10
+ Packshot,
11
+ Render,
12
+ render,
13
+ Slider,
14
+ Split,
15
+ Swipe,
16
+ Title,
17
+ } from "..";
18
+
19
+ async function main() {
20
+ console.log("Split Element Demo (uses renderer)\n");
21
+
22
+ const img1 = Image({ src: "media/cyberpunk-street.png" });
23
+ const img2 = Image({ src: "media/fal-coffee-shop.png" });
24
+ const img3 = Image({ src: "media/kirill.png" });
25
+
26
+ const video = (
27
+ <Render width={1280} height={720}>
28
+ <Clip duration={3}>
29
+ <Split direction="horizontal">{[img1, img2]}</Split>
30
+ <Title position="bottom">Split Element (renderer)</Title>
31
+ </Clip>
32
+
33
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
34
+ <Slider direction="horizontal">{[img1, img2, img3]}</Slider>
35
+ </Clip>
36
+
37
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
38
+ <Swipe direction="left" interval={1.5}>
39
+ {[img1, img2, img3]}
40
+ </Swipe>
41
+ </Clip>
42
+
43
+ <Clip duration={3} transition={{ name: "fade", duration: 0.5 }}>
44
+ <Packshot
45
+ background="#1a1a2e"
46
+ logo="media/cyberpunk-street.png"
47
+ logoPosition="center"
48
+ logoSize="50%"
49
+ cta="Subscribe for more!"
50
+ ctaColor="#FFD700"
51
+ />
52
+ </Clip>
53
+ </Render>
54
+ );
55
+
56
+ await render(video, { output: "output/split-element-demo.mp4" });
57
+ console.log("\ndone! check output/split-element-demo.mp4");
58
+ }
59
+
60
+ main().catch(console.error);
@@ -0,0 +1,60 @@
1
+ /**
2
+ * SplitLayout Demo - uses the layout helper (positions children in clip)
3
+ *
4
+ * SplitLayout just adds position props to children, letting the clip handle rendering.
5
+ * Good for: when you want positioned images/videos within a clip alongside other layers
6
+ */
7
+ import {
8
+ Clip,
9
+ Image,
10
+ Packshot,
11
+ Render,
12
+ render,
13
+ Slider,
14
+ SplitLayout,
15
+ Swipe,
16
+ Title,
17
+ } from "..";
18
+
19
+ async function main() {
20
+ console.log("SplitLayout Demo (uses layout helper)\n");
21
+
22
+ const img1 = Image({ src: "media/cyberpunk-street.png" });
23
+ const img2 = Image({ src: "media/fal-coffee-shop.png" });
24
+ const img3 = Image({ src: "media/kirill.png" });
25
+
26
+ const video = (
27
+ <Render width={1280} height={720}>
28
+ <Clip duration={3}>
29
+ <SplitLayout direction="horizontal" left={img1} right={img2} />
30
+ <Title position="bottom">SplitLayout (layout helper)</Title>
31
+ </Clip>
32
+
33
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
34
+ <Slider direction="horizontal">{[img1, img2, img3]}</Slider>
35
+ </Clip>
36
+
37
+ <Clip duration={4} transition={{ name: "fade", duration: 0.5 }}>
38
+ <Swipe direction="left" interval={1.5}>
39
+ {[img1, img2, img3]}
40
+ </Swipe>
41
+ </Clip>
42
+
43
+ <Clip duration={3} transition={{ name: "fade", duration: 0.5 }}>
44
+ <Packshot
45
+ background="#1a1a2e"
46
+ logo="media/cyberpunk-street.png"
47
+ logoPosition="center"
48
+ logoSize="50%"
49
+ cta="Subscribe for more!"
50
+ ctaColor="#FFD700"
51
+ />
52
+ </Clip>
53
+ </Render>
54
+ );
55
+
56
+ await render(video, { output: "output/split-layout-demo.mp4" });
57
+ console.log("\ndone! check output/split-layout-demo.mp4");
58
+ }
59
+
60
+ main().catch(console.error);
@@ -0,0 +1,41 @@
1
+ import { fal } from "../../ai-sdk/providers/fal-provider";
2
+ import { Clip, Image, Render, render, SplitLayout as Split, Title } from "..";
3
+
4
+ async function main() {
5
+ console.log("creating before/after split screen...\n");
6
+
7
+ const before = Image({
8
+ prompt:
9
+ "overweight man sitting on couch, tired expression, pale skin, messy hair, wearing stained t-shirt",
10
+ model: fal.imageModel("flux-schnell"),
11
+ });
12
+
13
+ const after = Image({
14
+ prompt:
15
+ "fit muscular man standing confidently, tanned skin, bright smile, wearing fitted athletic shirt",
16
+ model: fal.imageModel("flux-schnell"),
17
+ });
18
+
19
+ const video = (
20
+ <Render width={1920} height={1080}>
21
+ <Clip duration={5}>
22
+ <Split left={before} right={after} />
23
+ <Title position="bottom" color="#ffffff">
24
+ 30 Day Transformation
25
+ </Title>
26
+ </Clip>
27
+ </Render>
28
+ );
29
+
30
+ console.log("video tree:", JSON.stringify(video, null, 2));
31
+
32
+ const buffer = await render(video, {
33
+ output: "output/react-split.mp4",
34
+ cache: ".cache/ai",
35
+ });
36
+
37
+ console.log(`\ndone! ${buffer.byteLength} bytes`);
38
+ console.log("output: output/react-split.mp4");
39
+ }
40
+
41
+ main().catch(console.error);
@@ -0,0 +1,46 @@
1
+ import { fal } from "../../ai-sdk/providers/fal-provider";
2
+ import { Clip, Grid, Render, render, Title, Video } from "..";
3
+
4
+ async function main() {
5
+ console.log("creating 2x2 video grid...\n");
6
+
7
+ const video = (
8
+ <Render width={1920} height={1080}>
9
+ <Clip duration={5}>
10
+ <Grid columns={2}>
11
+ <Video
12
+ prompt="ocean waves crashing on rocks, slow motion, cinematic"
13
+ model={fal.videoModel("wan-2.5")}
14
+ />
15
+ <Video
16
+ prompt="fire burning in fireplace, cozy, warm light"
17
+ model={fal.videoModel("wan-2.5")}
18
+ />
19
+ <Video
20
+ prompt="rain falling on window glass, close up, moody"
21
+ model={fal.videoModel("wan-2.5")}
22
+ />
23
+ <Video
24
+ prompt="clouds moving across blue sky, timelapse, peaceful"
25
+ model={fal.videoModel("wan-2.5")}
26
+ />
27
+ </Grid>
28
+ <Title position="bottom" color="#ffffff">
29
+ Elements
30
+ </Title>
31
+ </Clip>
32
+ </Render>
33
+ );
34
+
35
+ console.log("video tree:", JSON.stringify(video, null, 2));
36
+
37
+ const buffer = await render(video, {
38
+ output: "output/react-video-grid.mp4",
39
+ cache: ".cache/ai",
40
+ });
41
+
42
+ console.log(`\ndone! ${buffer.byteLength} bytes`);
43
+ console.log("output: output/react-video-grid.mp4");
44
+ }
45
+
46
+ main().catch(console.error);
@@ -0,0 +1,43 @@
1
+ export type { SizeValue } from "../ai-sdk/providers/editly/types";
2
+ export {
3
+ Animate,
4
+ Captions,
5
+ Clip,
6
+ Image,
7
+ Music,
8
+ Overlay,
9
+ Packshot,
10
+ Render,
11
+ Slider,
12
+ Speech,
13
+ Split,
14
+ Subtitle,
15
+ Swipe,
16
+ TalkingHead,
17
+ Title,
18
+ Video,
19
+ } from "./elements";
20
+ export { Grid, SplitLayout } from "./layouts";
21
+ export { render, renderStream } from "./render";
22
+ export type {
23
+ AnimateProps,
24
+ CaptionsProps,
25
+ ClipProps,
26
+ ImageProps,
27
+ MusicProps,
28
+ OverlayProps,
29
+ PackshotProps,
30
+ PositionProps,
31
+ RenderOptions,
32
+ RenderProps,
33
+ SliderProps,
34
+ SpeechProps,
35
+ SplitProps,
36
+ SubtitleProps,
37
+ SwipeProps,
38
+ TalkingHeadProps,
39
+ TitleProps,
40
+ VargElement,
41
+ VargNode,
42
+ VideoProps,
43
+ } from "./types";