@vargai/sdk 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/.env.example +24 -0
  2. package/CLAUDE.md +118 -0
  3. package/HIGGSFIELD_REWRITE_SUMMARY.md +300 -0
  4. package/README.md +231 -0
  5. package/SKILLS.md +157 -0
  6. package/STRUCTURE.md +92 -0
  7. package/TEST_RESULTS.md +122 -0
  8. package/action/captions/SKILL.md +170 -0
  9. package/action/captions/index.ts +169 -0
  10. package/action/edit/SKILL.md +235 -0
  11. package/action/edit/index.ts +437 -0
  12. package/action/image/SKILL.md +140 -0
  13. package/action/image/index.ts +105 -0
  14. package/action/sync/SKILL.md +136 -0
  15. package/action/sync/index.ts +145 -0
  16. package/action/transcribe/SKILL.md +179 -0
  17. package/action/transcribe/index.ts +210 -0
  18. package/action/video/SKILL.md +116 -0
  19. package/action/video/index.ts +125 -0
  20. package/action/voice/SKILL.md +125 -0
  21. package/action/voice/index.ts +136 -0
  22. package/biome.json +33 -0
  23. package/bun.lock +842 -0
  24. package/cli/commands/find.ts +58 -0
  25. package/cli/commands/help.ts +70 -0
  26. package/cli/commands/list.ts +49 -0
  27. package/cli/commands/run.ts +237 -0
  28. package/cli/commands/which.ts +66 -0
  29. package/cli/discover.ts +66 -0
  30. package/cli/index.ts +33 -0
  31. package/cli/runner.ts +65 -0
  32. package/cli/types.ts +49 -0
  33. package/cli/ui.ts +185 -0
  34. package/index.ts +75 -0
  35. package/lib/README.md +144 -0
  36. package/lib/ai-sdk/fal.ts +106 -0
  37. package/lib/ai-sdk/replicate.ts +107 -0
  38. package/lib/elevenlabs.ts +382 -0
  39. package/lib/fal.ts +467 -0
  40. package/lib/ffmpeg.ts +467 -0
  41. package/lib/fireworks.ts +235 -0
  42. package/lib/groq.ts +246 -0
  43. package/lib/higgsfield/MIGRATION.md +308 -0
  44. package/lib/higgsfield/README.md +273 -0
  45. package/lib/higgsfield/example.ts +228 -0
  46. package/lib/higgsfield/index.ts +241 -0
  47. package/lib/higgsfield/soul.ts +262 -0
  48. package/lib/higgsfield.ts +176 -0
  49. package/lib/remotion/SKILL.md +823 -0
  50. package/lib/remotion/cli.ts +115 -0
  51. package/lib/remotion/functions.ts +283 -0
  52. package/lib/remotion/index.ts +19 -0
  53. package/lib/remotion/templates.ts +73 -0
  54. package/lib/replicate.ts +304 -0
  55. package/output.txt +1 -0
  56. package/package.json +42 -0
  57. package/pipeline/cookbooks/SKILL.md +285 -0
  58. package/pipeline/cookbooks/remotion-video.md +585 -0
  59. package/pipeline/cookbooks/round-video-character.md +337 -0
  60. package/pipeline/cookbooks/talking-character.md +59 -0
  61. package/scripts/produce-menopause-campaign.sh +202 -0
  62. package/service/music/SKILL.md +229 -0
  63. package/service/music/index.ts +296 -0
  64. package/test-import.ts +7 -0
  65. package/test-services.ts +97 -0
  66. package/tsconfig.json +29 -0
  67. package/utilities/s3.ts +147 -0
@@ -0,0 +1,304 @@
1
+ #!/usr/bin/env bun
2
+
3
+ /**
4
+ * replicate.com api wrapper for video/image generation
5
+ * supports models like minimax, kling, luma, stable diffusion
6
+ */
7
+
8
+ import Replicate from "replicate";
9
+
10
+ const replicate = new Replicate({
11
+ auth: process.env.REPLICATE_API_TOKEN || "",
12
+ });
13
+
14
+ // types
15
+ export interface RunVideoOptions {
16
+ model: string;
17
+ input: Record<string, unknown>;
18
+ }
19
+
20
+ export interface RunImageOptions {
21
+ model: string;
22
+ input: Record<string, unknown>;
23
+ }
24
+
25
+ // core functions
26
+ export async function runModel(model: string, input: Record<string, unknown>) {
27
+ console.log(`[replicate] running ${model}...`);
28
+
29
+ try {
30
+ const output = await replicate.run(model as `${string}/${string}`, {
31
+ input,
32
+ });
33
+ console.log(`[replicate] completed`);
34
+ return output;
35
+ } catch (error) {
36
+ console.error(`[replicate] error:`, error);
37
+ throw error;
38
+ }
39
+ }
40
+
41
+ export async function runVideo(options: RunVideoOptions) {
42
+ const { model, input } = options;
43
+
44
+ if (!model || !input) {
45
+ throw new Error("model and input are required");
46
+ }
47
+
48
+ return await runModel(model, input);
49
+ }
50
+
51
+ export async function runImage(options: RunImageOptions) {
52
+ const { model, input } = options;
53
+
54
+ if (!model || !input) {
55
+ throw new Error("model and input are required");
56
+ }
57
+
58
+ return await runModel(model, input);
59
+ }
60
+
61
+ // popular models
62
+ export const MODELS = {
63
+ // video generation
64
+ VIDEO: {
65
+ MINIMAX: "minimax/video-01",
66
+ KLING: "fofr/kling-v1.5",
67
+ LUMA: "fofr/ltx-video",
68
+ RUNWAY_GEN3: "replicate/runway-gen3-turbo",
69
+ WAN_2_5: "wan-video/wan-2.5-i2v",
70
+ },
71
+ // image generation
72
+ IMAGE: {
73
+ FLUX_PRO: "black-forest-labs/flux-1.1-pro",
74
+ FLUX_DEV: "black-forest-labs/flux-dev",
75
+ FLUX_SCHNELL: "black-forest-labs/flux-schnell",
76
+ STABLE_DIFFUSION: "stability-ai/sdxl",
77
+ },
78
+ };
79
+
80
+ // cli
81
+ async function cli() {
82
+ const args = process.argv.slice(2);
83
+ const command = args[0];
84
+
85
+ if (!command || command === "help") {
86
+ console.log(`
87
+ usage:
88
+ bun run lib/replicate.ts <command> [args]
89
+
90
+ commands:
91
+ video <model> <prompt> [imageUrl] generate video
92
+ image <model> <prompt> generate image
93
+ minimax <prompt> [imageUrl] generate video with minimax-01
94
+ kling <prompt> [imageUrl] generate video with kling-v1.5
95
+ wan <imageUrl> <audioUrl> <prompt> generate talking video with wan 2.5
96
+ flux <prompt> generate image with flux-dev
97
+ list list recent predictions
98
+ get <predictionId> get prediction by id
99
+ help show this help
100
+
101
+ examples:
102
+ bun run lib/replicate.ts minimax "person walking on beach"
103
+ bun run lib/replicate.ts minimax "camera zoom in" https://example.com/img.jpg
104
+ bun run lib/replicate.ts kling "cinematic city scene"
105
+ bun run lib/replicate.ts wan https://image.jpg https://audio.mp3 "person talking to camera"
106
+ bun run lib/replicate.ts flux "cyberpunk cityscape"
107
+ bun run lib/replicate.ts video "minimax/video-01" "dancing robot"
108
+ bun run lib/replicate.ts image "black-forest-labs/flux-dev" "sunset landscape"
109
+
110
+ environment:
111
+ REPLICATE_API_TOKEN - your replicate api token
112
+ `);
113
+ process.exit(0);
114
+ }
115
+
116
+ try {
117
+ switch (command) {
118
+ case "minimax": {
119
+ const prompt = args[1];
120
+ const imageUrl = args[2];
121
+
122
+ if (!prompt) {
123
+ throw new Error("prompt is required");
124
+ }
125
+
126
+ const input: Record<string, unknown> = { prompt };
127
+ if (imageUrl) {
128
+ input.first_frame_image = imageUrl;
129
+ }
130
+
131
+ const output = await runVideo({
132
+ model: MODELS.VIDEO.MINIMAX,
133
+ input,
134
+ });
135
+
136
+ console.log(`[replicate] minimax output:`, output);
137
+ break;
138
+ }
139
+
140
+ case "kling": {
141
+ const prompt = args[1];
142
+ const imageUrl = args[2];
143
+
144
+ if (!prompt) {
145
+ throw new Error("prompt is required");
146
+ }
147
+
148
+ const input: Record<string, unknown> = { prompt };
149
+ if (imageUrl) {
150
+ input.image = imageUrl;
151
+ }
152
+
153
+ const output = await runVideo({
154
+ model: MODELS.VIDEO.KLING,
155
+ input,
156
+ });
157
+
158
+ console.log(`[replicate] kling output:`, output);
159
+ break;
160
+ }
161
+
162
+ case "wan": {
163
+ const imageUrl = args[1];
164
+ const audioUrl = args[2];
165
+ const prompt = args[3];
166
+ const duration = args[4] ? Number.parseInt(args[4], 10) : 10;
167
+ const resolution = args[5] || "480p";
168
+
169
+ if (!imageUrl || !audioUrl || !prompt) {
170
+ throw new Error("imageUrl, audioUrl, and prompt are required");
171
+ }
172
+
173
+ const input: Record<string, unknown> = {
174
+ image: imageUrl,
175
+ audio: audioUrl,
176
+ prompt,
177
+ duration,
178
+ resolution,
179
+ enable_prompt_expansion: true,
180
+ };
181
+
182
+ console.log(`[replicate] running wan 2.5...`);
183
+ console.log(`[replicate] this may take 3-5 minutes...`);
184
+
185
+ const output = await runVideo({
186
+ model: MODELS.VIDEO.WAN_2_5,
187
+ input,
188
+ });
189
+
190
+ console.log(`[replicate] wan 2.5 output:`, output);
191
+ break;
192
+ }
193
+
194
+ case "list": {
195
+ console.log(`[replicate] fetching recent predictions...`);
196
+ const predictions = await replicate.predictions.list();
197
+
198
+ console.log(`\nrecent predictions:\n`);
199
+ for (const pred of predictions.results.slice(0, 10)) {
200
+ console.log(`id: ${pred.id}`);
201
+ console.log(`status: ${pred.status}`);
202
+ console.log(`model: ${pred.version}`);
203
+ console.log(`created: ${pred.created_at}`);
204
+ if (pred.output) {
205
+ console.log(
206
+ `output: ${JSON.stringify(pred.output).substring(0, 100)}...`,
207
+ );
208
+ }
209
+ console.log(`---`);
210
+ }
211
+ break;
212
+ }
213
+
214
+ case "get": {
215
+ const predictionId = args[1];
216
+
217
+ if (!predictionId) {
218
+ throw new Error("predictionId is required");
219
+ }
220
+
221
+ console.log(`[replicate] fetching prediction ${predictionId}...`);
222
+ const prediction = await replicate.predictions.get(predictionId);
223
+
224
+ console.log(`\nstatus: ${prediction.status}`);
225
+ console.log(`model: ${prediction.version}`);
226
+ console.log(`created: ${prediction.created_at}`);
227
+
228
+ if (prediction.status === "succeeded") {
229
+ console.log(`\noutput:`);
230
+ console.log(JSON.stringify(prediction.output, null, 2));
231
+ } else if (prediction.status === "failed") {
232
+ console.log(`\nerror: ${prediction.error}`);
233
+ } else {
234
+ console.log(`\nstill processing...`);
235
+ }
236
+ break;
237
+ }
238
+
239
+ case "flux": {
240
+ const prompt = args[1];
241
+
242
+ if (!prompt) {
243
+ throw new Error("prompt is required");
244
+ }
245
+
246
+ const output = await runImage({
247
+ model: MODELS.IMAGE.FLUX_DEV,
248
+ input: { prompt },
249
+ });
250
+
251
+ console.log(`[replicate] flux output:`, output);
252
+ break;
253
+ }
254
+
255
+ case "video": {
256
+ const model = args[1];
257
+ const prompt = args[2];
258
+ const imageUrl = args[3];
259
+
260
+ if (!model || !prompt) {
261
+ throw new Error("model and prompt are required");
262
+ }
263
+
264
+ const input: Record<string, unknown> = { prompt };
265
+ if (imageUrl) {
266
+ input.image = imageUrl;
267
+ }
268
+
269
+ const output = await runVideo({ model, input });
270
+ console.log(`[replicate] video output:`, output);
271
+ break;
272
+ }
273
+
274
+ case "image": {
275
+ const model = args[1];
276
+ const prompt = args[2];
277
+
278
+ if (!model || !prompt) {
279
+ throw new Error("model and prompt are required");
280
+ }
281
+
282
+ const output = await runImage({
283
+ model,
284
+ input: { prompt },
285
+ });
286
+
287
+ console.log(`[replicate] image output:`, output);
288
+ break;
289
+ }
290
+
291
+ default:
292
+ console.error(`unknown command: ${command}`);
293
+ console.log(`run 'bun run lib/replicate.ts help' for usage`);
294
+ process.exit(1);
295
+ }
296
+ } catch (error) {
297
+ console.error(`[replicate] error:`, error);
298
+ process.exit(1);
299
+ }
300
+ }
301
+
302
+ if (import.meta.main) {
303
+ cli();
304
+ }
package/output.txt ADDED
@@ -0,0 +1 @@
1
+ Let's say I've just joined the Roark team as a marketer and I want to add a new article to the website to boost our SEO. The devs won't get to this task anytime soon, but thanks to YoloCode AI, I can take care of it myself. Any changes I make show up instantly on the right so I can see exactly how it looked in production before opening a pull request. So now I'm ready to submit and push this feature and here we go!
package/package.json ADDED
@@ -0,0 +1,42 @@
1
+ {
2
+ "name": "@vargai/sdk",
3
+ "module": "index.ts",
4
+ "type": "module",
5
+ "bin": {
6
+ "varg": "./cli/index.ts"
7
+ },
8
+ "scripts": {
9
+ "lint": "biome check .",
10
+ "format": "biome format --write ."
11
+ },
12
+ "devDependencies": {
13
+ "@biomejs/biome": "^2.3.7",
14
+ "@types/bun": "latest"
15
+ },
16
+ "peerDependencies": {
17
+ "typescript": "^5"
18
+ },
19
+ "dependencies": {
20
+ "@ai-sdk/fal": "^1.0.23",
21
+ "@ai-sdk/replicate": "^1.0.18",
22
+ "@aws-sdk/client-s3": "^3.937.0",
23
+ "@aws-sdk/s3-request-presigner": "^3.937.0",
24
+ "@elevenlabs/elevenlabs-js": "^2.25.0",
25
+ "@fal-ai/client": "^1.7.2",
26
+ "@higgsfield/client": "^0.1.2",
27
+ "@remotion/cli": "^4.0.377",
28
+ "@types/fluent-ffmpeg": "^2.1.28",
29
+ "ai": "^5.0.98",
30
+ "citty": "^0.1.6",
31
+ "fluent-ffmpeg": "^2.1.3",
32
+ "groq-sdk": "^0.36.0",
33
+ "react": "^19.2.0",
34
+ "react-dom": "^19.2.0",
35
+ "remotion": "^4.0.377",
36
+ "replicate": "^1.4.0"
37
+ },
38
+ "version": "0.1.1",
39
+ "exports": {
40
+ ".": "./index.ts"
41
+ }
42
+ }
@@ -0,0 +1,285 @@
1
+ ---
2
+ name: talking-character-pipeline
3
+ description: complete workflow to create talking character videos with lipsync and captions. use when creating ai character videos, talking avatars, narrated content, or social media character content with voiceover.
4
+ allowed-tools: Read, Bash
5
+ ---
6
+
7
+ # talking character pipeline
8
+
9
+ create professional talking character videos from scratch using the complete varg.ai sdk workflow.
10
+
11
+ ## overview
12
+
13
+ this pipeline combines multiple services to create a fully produced talking character video:
14
+ 1. character headshot generation
15
+ 2. voiceover synthesis
16
+ 3. character animation
17
+ 4. lipsync
18
+ 5. auto-generated captions
19
+ 6. social media optimization
20
+
21
+ **total time**: ~4-5 minutes per video
22
+
23
+ ## step-by-step workflow
24
+
25
+ ### 1. create character headshot
26
+ ```bash
27
+ bun run service/image.ts soul "professional headshot of a friendly person, studio lighting" true
28
+ ```
29
+
30
+ **output**: character image url + s3 url
31
+ **time**: ~30 seconds
32
+
33
+ **tip**: be specific about character appearance, lighting, and style for best results
34
+
35
+ ### 2. generate voiceover
36
+ ```bash
37
+ bun run service/voice.ts elevenlabs "hello world, this is my character speaking" rachel true
38
+ ```
39
+
40
+ **output**: `media/voice-{timestamp}.mp3` + s3 url
41
+ **time**: ~10 seconds
42
+
43
+ **tip**: choose voice that matches character (rachel/bella for female, josh/antoni for male)
44
+
45
+ ### 3. animate character
46
+ ```bash
47
+ bun run service/video.ts from_image "person talking naturally, professional demeanor" <headshot_url> 5 true
48
+ ```
49
+
50
+ **output**: animated video url + s3 url
51
+ **time**: ~2-3 minutes
52
+
53
+ **tip**: use subtle motion prompts like "person talking naturally" or "slight head movement"
54
+
55
+ ### 4. add lipsync
56
+ ```bash
57
+ bun run service/sync.ts wav2lip <video_url> <audio_url>
58
+ ```
59
+
60
+ **output**: lipsynced video url
61
+ **time**: ~30 seconds
62
+
63
+ **tip**: wav2lip works best with close-up character shots and clear audio
64
+
65
+ ### 5. add captions
66
+ ```bash
67
+ bun run service/captions.ts <video_path> captioned.mp4 --provider fireworks
68
+ ```
69
+
70
+ **output**: `captioned.mp4` with subtitles
71
+ **time**: ~15 seconds (includes transcription)
72
+
73
+ **tip**: fireworks provider gives word-level timing for professional captions
74
+
75
+ ### 6. prepare for social media
76
+ ```bash
77
+ bun run service/edit.ts social captioned.mp4 final-tiktok.mp4 tiktok
78
+ ```
79
+
80
+ **output**: `final-tiktok.mp4` optimized for platform
81
+ **time**: ~5 seconds
82
+
83
+ **platforms**: tiktok, instagram, youtube-shorts, youtube, twitter
84
+
85
+ ## complete example
86
+
87
+ ```bash
88
+ # step 1: generate character
89
+ bun run service/image.ts soul \
90
+ "professional business woman, friendly smile, studio lighting" \
91
+ true
92
+
93
+ # step 2: create voiceover
94
+ bun run service/voice.ts elevenlabs \
95
+ "welcome to our company. we're excited to show you our new product" \
96
+ rachel \
97
+ true
98
+
99
+ # step 3: animate character
100
+ bun run service/video.ts from_image \
101
+ "person talking professionally" \
102
+ https://your-s3-url/character.jpg \
103
+ 5 \
104
+ true
105
+
106
+ # step 4: sync lips
107
+ bun run service/sync.ts wav2lip \
108
+ https://your-s3-url/animated.mp4 \
109
+ https://your-s3-url/voice.mp3
110
+
111
+ # step 5: add captions
112
+ bun run service/captions.ts \
113
+ synced-video.mp4 \
114
+ captioned.mp4 \
115
+ --provider fireworks \
116
+ --font "Arial Black" \
117
+ --size 32
118
+
119
+ # step 6: optimize for tiktok
120
+ bun run service/edit.ts social \
121
+ captioned.mp4 \
122
+ final-tiktok.mp4 \
123
+ tiktok
124
+ ```
125
+
126
+ ## programmatic workflow
127
+
128
+ ```typescript
129
+ import { generateWithSoul } from "./service/image"
130
+ import { generateVoice } from "./service/voice"
131
+ import { generateVideoFromImage } from "./service/video"
132
+ import { lipsyncWav2Lip } from "./service/sync"
133
+ import { addCaptions } from "./service/captions"
134
+ import { prepareForSocial } from "./service/edit"
135
+
136
+ // 1. character
137
+ const character = await generateWithSoul(
138
+ "friendly business person, professional",
139
+ { upload: true }
140
+ )
141
+
142
+ // 2. voice
143
+ const voice = await generateVoice({
144
+ text: "hello, welcome to our video",
145
+ voice: "rachel",
146
+ upload: true,
147
+ outputPath: "media/voice.mp3"
148
+ })
149
+
150
+ // 3. animate
151
+ const video = await generateVideoFromImage(
152
+ "person talking naturally",
153
+ character.uploaded!,
154
+ { duration: 5, upload: true }
155
+ )
156
+
157
+ // 4. lipsync
158
+ const synced = await lipsyncWav2Lip({
159
+ videoUrl: video.uploaded!,
160
+ audioUrl: voice.uploadUrl!
161
+ })
162
+
163
+ // 5. captions
164
+ const captioned = await addCaptions({
165
+ videoPath: synced,
166
+ output: "captioned.mp4",
167
+ provider: "fireworks"
168
+ })
169
+
170
+ // 6. social media
171
+ const final = await prepareForSocial({
172
+ input: captioned,
173
+ output: "final.mp4",
174
+ platform: "tiktok"
175
+ })
176
+ ```
177
+
178
+ ## use cases
179
+
180
+ ### marketing content
181
+ - product announcements
182
+ - brand messaging
183
+ - explainer videos
184
+ - social media ads
185
+
186
+ ### educational content
187
+ - course introductions
188
+ - tutorial narration
189
+ - lesson summaries
190
+ - educational social media
191
+
192
+ ### social media
193
+ - tiktok character content
194
+ - instagram reels with narration
195
+ - youtube shorts
196
+ - twitter video posts
197
+
198
+ ## tips for best results
199
+
200
+ **character creation:**
201
+ - be specific about appearance, expression, lighting
202
+ - "professional", "friendly", "casual" work well
203
+ - mention "studio lighting" for clean backgrounds
204
+
205
+ **voiceover:**
206
+ - write natural, conversational scripts
207
+ - add punctuation for natural pauses
208
+ - keep sentences short and clear
209
+ - match voice gender to character
210
+
211
+ **animation:**
212
+ - use subtle motion prompts
213
+ - 5 seconds is perfect for character talking shots
214
+ - avoid complex camera movements
215
+
216
+ **lipsync:**
217
+ - wav2lip works best with frontal face views
218
+ - ensure audio is clear and well-paced
219
+ - close-up shots give better results
220
+
221
+ **captions:**
222
+ - use fireworks for word-level timing
223
+ - larger font sizes (28-32) work better on mobile
224
+ - white text with black outline is most readable
225
+
226
+ **social media:**
227
+ - vertical (9:16) for tiktok/instagram/shorts
228
+ - landscape (16:9) for youtube/twitter
229
+ - keep total video under 60 seconds for best engagement
230
+
231
+ ## estimated costs
232
+
233
+ per video (approximate):
234
+ - character image: $0.05 (higgsfield soul)
235
+ - voiceover: $0.10 (elevenlabs)
236
+ - animation: $0.20 (fal image-to-video)
237
+ - lipsync: $0.10 (replicate wav2lip)
238
+ - transcription: $0.02 (fireworks)
239
+
240
+ **total**: ~$0.47 per video
241
+
242
+ ## troubleshooting
243
+
244
+ **character doesn't look consistent:**
245
+ - use higgsfield soul instead of fal for characters
246
+ - save character image and reuse for consistency
247
+
248
+ **lipsync doesn't match well:**
249
+ - ensure video shows face clearly
250
+ - use close-up shots
251
+ - check audio quality and clarity
252
+
253
+ **animation looks unnatural:**
254
+ - simplify motion prompt
255
+ - use "person talking naturally" or "slight movement"
256
+ - avoid dramatic camera movements
257
+
258
+ **captions are off-sync:**
259
+ - use fireworks provider for better timing
260
+ - check audio quality
261
+ - verify video fps is standard (24/30fps)
262
+
263
+ ## required environment variables
264
+
265
+ ```bash
266
+ HIGGSFIELD_API_KEY=hf_xxx
267
+ HIGGSFIELD_SECRET=secret_xxx
268
+ ELEVENLABS_API_KEY=el_xxx
269
+ FAL_API_KEY=fal_xxx
270
+ REPLICATE_API_TOKEN=r8_xxx
271
+ FIREWORKS_API_KEY=fw_xxx
272
+ CLOUDFLARE_R2_API_URL=https://xxx.r2.cloudflarestorage.com
273
+ CLOUDFLARE_ACCESS_KEY_ID=xxx
274
+ CLOUDFLARE_ACCESS_SECRET=xxx
275
+ CLOUDFLARE_R2_BUCKET=m
276
+ ```
277
+
278
+ ## next steps
279
+
280
+ after creating your talking character video:
281
+ - upload to social platforms
282
+ - analyze performance metrics
283
+ - iterate on character design and scripts
284
+ - create series with consistent character
285
+ - experiment with different voices and styles