@genfeedai/types 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1092 @@
1
+ /**
2
+ * Auto-generated Replicate model types
3
+ * DO NOT EDIT - Run `bun run sync:replicate` to regenerate
4
+ * Generated at: 2026-01-29T02:24:16.582Z
5
+ */
6
+ /**
7
+ * Input parameters for google/nano-banana
8
+ */
9
+ interface NanoBananaInput {
10
+ /**
11
+ * A text description of the image you want to generate
12
+ */
13
+ prompt: string;
14
+ /**
15
+ * Input images to transform or use as reference (supports multiple images)
16
+ * @default []
17
+ */
18
+ image_input?: string[];
19
+ /**
20
+ * Aspect ratio of the generated image
21
+ * @default "match_input_image"
22
+ */
23
+ aspect_ratio?: unknown;
24
+ /**
25
+ * Format of the output image
26
+ * @default "jpg"
27
+ */
28
+ output_format?: unknown;
29
+ }
30
+ /** Output type for google/nano-banana */
31
+ type NanoBananaOutput = string;
32
+ /**
33
+ * Input parameters for google/nano-banana-pro
34
+ */
35
+ interface NanoBananaProInput {
36
+ /**
37
+ * A text description of the image you want to generate
38
+ */
39
+ prompt: string;
40
+ /**
41
+ * Input images to transform or use as reference (supports up to 14 images)
42
+ * @default []
43
+ */
44
+ image_input?: string[];
45
+ /**
46
+ * Aspect ratio of the generated image
47
+ * @default "match_input_image"
48
+ */
49
+ aspect_ratio?: unknown;
50
+ /**
51
+ * Resolution of the generated image
52
+ * @default "2K"
53
+ */
54
+ resolution?: unknown;
55
+ /**
56
+ * Format of the output image
57
+ * @default "jpg"
58
+ */
59
+ output_format?: unknown;
60
+ /**
61
+ * block_low_and_above is strictest, block_medium_and_above blocks some prompts, block_only_high is most permissive but some prompts will still be blocked
62
+ * @default "block_only_high"
63
+ */
64
+ safety_filter_level?: unknown;
65
+ }
66
+ /** Output type for google/nano-banana-pro */
67
+ type NanoBananaProOutput = string;
68
+ /**
69
+ * Input parameters for prunaai/z-image-turbo
70
+ */
71
+ interface ZImageTurboInput {
72
+ /**
73
+ * Text prompt for image generation
74
+ */
75
+ prompt: string;
76
+ /**
77
+ * Height of the generated image
78
+ * @default 1024
79
+ * @range min: 64, max: 2048
80
+ */
81
+ height?: number;
82
+ /**
83
+ * Width of the generated image
84
+ * @default 1024
85
+ * @range min: 64, max: 2048
86
+ */
87
+ width?: number;
88
+ /**
89
+ * Number of inference steps.
90
+ * @default 8
91
+ * @range min: 1, max: 50
92
+ */
93
+ num_inference_steps?: number;
94
+ /**
95
+ * Guidance scale. Should be 0 for Turbo models
96
+ * @default 0
97
+ * @range min: 0, max: 20
98
+ */
99
+ guidance_scale?: number;
100
+ /**
101
+ * Random seed. Set for reproducible generation
102
+ */
103
+ seed?: number;
104
+ /**
105
+ * Apply additional optimizations for faster generation
106
+ * @default false
107
+ */
108
+ go_fast?: boolean;
109
+ /**
110
+ * Format of the output images
111
+ * @default "jpg"
112
+ */
113
+ output_format?: unknown;
114
+ /**
115
+ * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs
116
+ * @default 80
117
+ * @range min: 0, max: 100
118
+ */
119
+ output_quality?: number;
120
+ }
121
+ /** Output type for prunaai/z-image-turbo */
122
+ type ZImageTurboOutput = string;
123
+ /**
124
+ * Input parameters for black-forest-labs/flux-schnell
125
+ */
126
+ interface FluxSchnellInput {
127
+ /**
128
+ * Prompt for generated image
129
+ */
130
+ prompt: string;
131
+ /**
132
+ * Aspect ratio for the generated image
133
+ * @default "1:1"
134
+ */
135
+ aspect_ratio?: unknown;
136
+ /**
137
+ * Number of outputs to generate
138
+ * @default 1
139
+ * @range min: 1, max: 4
140
+ */
141
+ num_outputs?: number;
142
+ /**
143
+ * Number of denoising steps. 4 is recommended, and lower number of steps produce lower quality outputs, faster.
144
+ * @default 4
145
+ * @range min: 1, max: 4
146
+ */
147
+ num_inference_steps?: number;
148
+ /**
149
+ * Random seed. Set for reproducible generation
150
+ */
151
+ seed?: number;
152
+ /**
153
+ * Format of the output images
154
+ * @default "webp"
155
+ */
156
+ output_format?: unknown;
157
+ /**
158
+ * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs
159
+ * @default 80
160
+ * @range min: 0, max: 100
161
+ */
162
+ output_quality?: number;
163
+ /**
164
+ * Disable safety checker for generated images.
165
+ * @default false
166
+ */
167
+ disable_safety_checker?: boolean;
168
+ /**
169
+ * Run faster predictions with model optimized for speed (currently fp8 quantized); disable to run in original bf16. Note that outputs will not be deterministic when this is enabled, even if you set a seed.
170
+ * @default true
171
+ */
172
+ go_fast?: boolean;
173
+ /**
174
+ * Approximate number of megapixels for generated image
175
+ * @default "1"
176
+ */
177
+ megapixels?: unknown;
178
+ }
179
+ /** Output type for black-forest-labs/flux-schnell */
180
+ type FluxSchnellOutput = string[];
181
+ /**
182
+ * Input parameters for black-forest-labs/flux-dev
183
+ */
184
+ interface FluxDevInput {
185
+ /**
186
+ * Prompt for generated image
187
+ */
188
+ prompt: string;
189
+ /**
190
+ * Aspect ratio for the generated image
191
+ * @default "1:1"
192
+ */
193
+ aspect_ratio?: unknown;
194
+ /**
195
+ * Input image for image to image mode. The aspect ratio of your output will match this image
196
+ */
197
+ image?: string;
198
+ /**
199
+ * Prompt strength when using img2img. 1.0 corresponds to full destruction of information in image
200
+ * @default 0.8
201
+ * @range min: 0, max: 1
202
+ */
203
+ prompt_strength?: number;
204
+ /**
205
+ * Number of outputs to generate
206
+ * @default 1
207
+ * @range min: 1, max: 4
208
+ */
209
+ num_outputs?: number;
210
+ /**
211
+ * Number of denoising steps. Recommended range is 28-50, and lower number of steps produce lower quality outputs, faster.
212
+ * @default 28
213
+ * @range min: 1, max: 50
214
+ */
215
+ num_inference_steps?: number;
216
+ /**
217
+ * Guidance for generated image
218
+ * @default 3
219
+ * @range min: 0, max: 10
220
+ */
221
+ guidance?: number;
222
+ /**
223
+ * Random seed. Set for reproducible generation
224
+ */
225
+ seed?: number;
226
+ /**
227
+ * Format of the output images
228
+ * @default "webp"
229
+ */
230
+ output_format?: unknown;
231
+ /**
232
+ * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs
233
+ * @default 80
234
+ * @range min: 0, max: 100
235
+ */
236
+ output_quality?: number;
237
+ /**
238
+ * Disable safety checker for generated images.
239
+ * @default false
240
+ */
241
+ disable_safety_checker?: boolean;
242
+ /**
243
+ * Run faster predictions with model optimized for speed (currently fp8 quantized); disable to run in original bf16. Note that outputs will not be deterministic when this is enabled, even if you set a seed.
244
+ * @default true
245
+ */
246
+ go_fast?: boolean;
247
+ /**
248
+ * Approximate number of megapixels for generated image
249
+ * @default "1"
250
+ */
251
+ megapixels?: unknown;
252
+ }
253
+ /** Output type for black-forest-labs/flux-dev */
254
+ type FluxDevOutput = string[];
255
+ /**
256
+ * Input parameters for black-forest-labs/flux-1.1-pro
257
+ */
258
+ interface Flux11ProInput {
259
+ /**
260
+ * Aspect ratio for the generated image
261
+ * @default "1:1"
262
+ */
263
+ aspect_ratio?: unknown;
264
+ /**
265
+ * Format of the output images.
266
+ * @default "webp"
267
+ */
268
+ output_format?: unknown;
269
+ /**
270
+ * Random seed. Set for reproducible generation
271
+ */
272
+ seed?: number;
273
+ /**
274
+ * Width of the generated image in text-to-image mode. Only used when aspect_ratio=custom. Must be a multiple of 32 (if it's not, it will be rounded to nearest multiple of 32). Note: Ignored in img2img and inpainting modes.
275
+ * @range min: 256, max: 1440
276
+ */
277
+ width?: number;
278
+ /**
279
+ * Height of the generated image in text-to-image mode. Only used when aspect_ratio=custom. Must be a multiple of 32 (if it's not, it will be rounded to nearest multiple of 32). Note: Ignored in img2img and inpainting modes.
280
+ * @range min: 256, max: 1440
281
+ */
282
+ height?: number;
283
+ /**
284
+ * Text prompt for image generation
285
+ */
286
+ prompt: string;
287
+ /**
288
+ * Image to use with Flux Redux. This is used together with the text prompt to guide the generation towards the composition of the image_prompt. Must be jpeg, png, gif, or webp.
289
+ */
290
+ image_prompt?: string;
291
+ /**
292
+ * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs
293
+ * @default 80
294
+ * @range min: 0, max: 100
295
+ */
296
+ output_quality?: number;
297
+ /**
298
+ * Safety tolerance, 1 is most strict and 6 is most permissive
299
+ * @default 2
300
+ * @range min: 1, max: 6
301
+ */
302
+ safety_tolerance?: number;
303
+ /**
304
+ * Automatically modify the prompt for more creative generation
305
+ * @default false
306
+ */
307
+ prompt_upsampling?: boolean;
308
+ }
309
+ /** Output type for black-forest-labs/flux-1.1-pro */
310
+ type Flux11ProOutput = string;
311
+ /**
312
+ * Input parameters for stability-ai/sdxl
313
+ */
314
+ interface SDXLInput {
315
+ /**
316
+ * Input prompt
317
+ * @default "An astronaut riding a rainbow unicorn"
318
+ */
319
+ prompt?: string;
320
+ /**
321
+ * Input Negative Prompt
322
+ * @default ""
323
+ */
324
+ negative_prompt?: string;
325
+ /**
326
+ * Input image for img2img or inpaint mode
327
+ */
328
+ image?: string;
329
+ /**
330
+ * Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted.
331
+ */
332
+ mask?: string;
333
+ /**
334
+ * Width of output image
335
+ * @default 1024
336
+ */
337
+ width?: number;
338
+ /**
339
+ * Height of output image
340
+ * @default 1024
341
+ */
342
+ height?: number;
343
+ /**
344
+ * Number of images to output.
345
+ * @default 1
346
+ * @range min: 1, max: 4
347
+ */
348
+ num_outputs?: number;
349
+ /**
350
+ * scheduler
351
+ * @default "K_EULER"
352
+ */
353
+ scheduler?: unknown;
354
+ /**
355
+ * Number of denoising steps
356
+ * @default 50
357
+ * @range min: 1, max: 500
358
+ */
359
+ num_inference_steps?: number;
360
+ /**
361
+ * Scale for classifier-free guidance
362
+ * @default 7.5
363
+ * @range min: 1, max: 50
364
+ */
365
+ guidance_scale?: number;
366
+ /**
367
+ * Prompt strength when using img2img / inpaint. 1.0 corresponds to full destruction of information in image
368
+ * @default 0.8
369
+ * @range min: 0, max: 1
370
+ */
371
+ prompt_strength?: number;
372
+ /**
373
+ * Random seed. Leave blank to randomize the seed
374
+ */
375
+ seed?: number;
376
+ /**
377
+ * Which refine style to use
378
+ * @default "no_refiner"
379
+ */
380
+ refine?: unknown;
381
+ /**
382
+ * For expert_ensemble_refiner, the fraction of noise to use
383
+ * @default 0.8
384
+ * @range min: 0, max: 1
385
+ */
386
+ high_noise_frac?: number;
387
+ /**
388
+ * For base_image_refiner, the number of steps to refine, defaults to num_inference_steps
389
+ */
390
+ refine_steps?: number;
391
+ /**
392
+ * Applies a watermark to enable determining if an image is generated in downstream applications. If you have other provisions for generating or deploying images safely, you can use this to disable watermarking.
393
+ * @default true
394
+ */
395
+ apply_watermark?: boolean;
396
+ /**
397
+ * LoRA additive scale. Only applicable on trained models.
398
+ * @default 0.6
399
+ * @range min: 0, max: 1
400
+ */
401
+ lora_scale?: number;
402
+ /**
403
+ * Replicate LoRA weights to use. Leave blank to use the default weights.
404
+ */
405
+ replicate_weights?: string;
406
+ /**
407
+ * Disable safety checker for generated images. This feature is only available through the API. See [https://replicate.com/docs/how-does-replicate-work#safety](https://replicate.com/docs/how-does-replicate-work#safety)
408
+ * @default false
409
+ */
410
+ disable_safety_checker?: boolean;
411
+ }
412
+ /** Output type for stability-ai/sdxl */
413
+ type SDXLOutput = string[];
414
+ /**
415
+ * Input parameters for bytedance/sdxl-lightning-4step
416
+ */
417
+ interface SDXLLightningInput {
418
+ /**
419
+ * Input prompt
420
+ * @default "self-portrait of a woman, lightning in the background"
421
+ */
422
+ prompt?: string;
423
+ /**
424
+ * Negative Input prompt
425
+ * @default "worst quality, low quality"
426
+ */
427
+ negative_prompt?: string;
428
+ /**
429
+ * Width of output image. Recommended 1024 or 1280
430
+ * @default 1024
431
+ * @range min: 256, max: 1280
432
+ */
433
+ width?: number;
434
+ /**
435
+ * Height of output image. Recommended 1024 or 1280
436
+ * @default 1024
437
+ * @range min: 256, max: 1280
438
+ */
439
+ height?: number;
440
+ /**
441
+ * Number of images to output.
442
+ * @default 1
443
+ * @range min: 1, max: 4
444
+ */
445
+ num_outputs?: number;
446
+ /**
447
+ * scheduler
448
+ * @default "K_EULER"
449
+ */
450
+ scheduler?: unknown;
451
+ /**
452
+ * Number of denoising steps. 4 for best results
453
+ * @default 4
454
+ * @range min: 1, max: 10
455
+ */
456
+ num_inference_steps?: number;
457
+ /**
458
+ * Scale for classifier-free guidance
459
+ * @default 0
460
+ * @range min: 0, max: 50
461
+ */
462
+ guidance_scale?: number;
463
+ /**
464
+ * Random seed. Leave blank to randomize the seed
465
+ * @default 0
466
+ */
467
+ seed?: number;
468
+ /**
469
+ * Disable safety checker for generated images
470
+ * @default false
471
+ */
472
+ disable_safety_checker?: boolean;
473
+ }
474
+ /** Output type for bytedance/sdxl-lightning-4step */
475
+ type SDXLLightningOutput = string[];
476
+ /**
477
+ * Input parameters for black-forest-labs/flux-kontext-dev
478
+ */
479
+ interface FluxKontextDevInput {
480
+ /**
481
+ * Text description of what you want to generate, or the instruction on how to edit the given image.
482
+ */
483
+ prompt: string;
484
+ /**
485
+ * Image to use as reference. Must be jpeg, png, gif, or webp.
486
+ */
487
+ input_image: string;
488
+ /**
489
+ * Aspect ratio of the generated image. Use 'match_input_image' to match the aspect ratio of the input image.
490
+ * @default "match_input_image"
491
+ */
492
+ aspect_ratio?: unknown;
493
+ /**
494
+ * Number of inference steps
495
+ * @default 30
496
+ * @range min: 4, max: 50
497
+ */
498
+ num_inference_steps?: number;
499
+ /**
500
+ * Guidance scale for generation
501
+ * @default 2.5
502
+ * @range min: 0, max: 10
503
+ */
504
+ guidance?: number;
505
+ /**
506
+ * Random seed for reproducible generation. Leave blank for random.
507
+ */
508
+ seed?: number;
509
+ /**
510
+ * Output image format
511
+ * @default "webp"
512
+ */
513
+ output_format?: unknown;
514
+ /**
515
+ * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs
516
+ * @default 80
517
+ * @range min: 0, max: 100
518
+ */
519
+ output_quality?: number;
520
+ /**
521
+ * Disable NSFW safety checker
522
+ * @default false
523
+ */
524
+ disable_safety_checker?: boolean;
525
+ }
526
+ /** Output type for black-forest-labs/flux-kontext-dev */
527
+ type FluxKontextDevOutput = string;
528
+ /**
529
+ * Input parameters for google/veo-3.1-fast
530
+ */
531
+ interface Veo31FastInput {
532
+ /**
533
+ * Text prompt for video generation
534
+ */
535
+ prompt: string;
536
+ /**
537
+ * Video aspect ratio
538
+ * @default "16:9"
539
+ */
540
+ aspect_ratio?: unknown;
541
+ /**
542
+ * Video duration in seconds
543
+ * @default 8
544
+ */
545
+ duration?: unknown;
546
+ /**
547
+ * Input image to start generating from. Ideal images are 16:9 or 9:16 and 1280x720 or 720x1280, depending on the aspect ratio you choose.
548
+ */
549
+ image?: string;
550
+ /**
551
+ * Ending image for interpolation. When provided with an input image, creates a transition between the two images.
552
+ */
553
+ last_frame?: string;
554
+ /**
555
+ * Description of what to exclude from the generated video
556
+ */
557
+ negative_prompt?: string;
558
+ /**
559
+ * Resolution of the generated video
560
+ * @default "1080p"
561
+ */
562
+ resolution?: unknown;
563
+ /**
564
+ * Generate audio with the video
565
+ * @default true
566
+ */
567
+ generate_audio?: boolean;
568
+ /**
569
+ * Random seed. Omit for random generations
570
+ */
571
+ seed?: number;
572
+ }
573
+ /** Output type for google/veo-3.1-fast */
574
+ type Veo31FastOutput = string;
575
+ /**
576
+ * Input parameters for google/veo-3.1
577
+ */
578
+ interface Veo31Input {
579
+ /**
580
+ * Text prompt for video generation
581
+ */
582
+ prompt: string;
583
+ /**
584
+ * Video aspect ratio
585
+ * @default "16:9"
586
+ */
587
+ aspect_ratio?: unknown;
588
+ /**
589
+ * Video duration in seconds
590
+ * @default 8
591
+ */
592
+ duration?: unknown;
593
+ /**
594
+ * Input image to start generating from. Ideal images are 16:9 or 9:16 and 1280x720 or 720x1280, depending on the aspect ratio you choose.
595
+ */
596
+ image?: string;
597
+ /**
598
+ * Ending image for interpolation. When provided with an input image, creates a transition between the two images.
599
+ */
600
+ last_frame?: string;
601
+ /**
602
+ * 1 to 3 reference images for subject-consistent generation (reference-to-video, or R2V). Reference images only work with 16:9 aspect ratio and 8-second duration. Last frame is ignored if reference images are provided.
603
+ * @default []
604
+ */
605
+ reference_images?: string[];
606
+ /**
607
+ * Description of what to exclude from the generated video
608
+ */
609
+ negative_prompt?: string;
610
+ /**
611
+ * Resolution of the generated video
612
+ * @default "1080p"
613
+ */
614
+ resolution?: unknown;
615
+ /**
616
+ * Generate audio with the video
617
+ * @default true
618
+ */
619
+ generate_audio?: boolean;
620
+ /**
621
+ * Random seed. Omit for random generations
622
+ */
623
+ seed?: number;
624
+ }
625
+ /** Output type for google/veo-3.1 */
626
+ type Veo31Output = string;
627
+ /**
628
+ * Input parameters for kwaivgi/kling-v2.5-turbo-pro
629
+ */
630
+ interface KlingV25TurboProInput {
631
+ /**
632
+ * Text prompt for video generation
633
+ */
634
+ prompt: string;
635
+ /**
636
+ * Things you do not want to see in the video
637
+ * @default ""
638
+ */
639
+ negative_prompt?: string;
640
+ /**
641
+ * First frame of the video
642
+ */
643
+ start_image?: string;
644
+ /**
645
+ * Last frame of the video
646
+ */
647
+ end_image?: string;
648
+ /**
649
+ * Aspect ratio of the video. Ignored if start_image is provided.
650
+ * @default "16:9"
651
+ */
652
+ aspect_ratio?: unknown;
653
+ /**
654
+ * Duration of the video in seconds
655
+ * @default 5
656
+ */
657
+ duration?: unknown;
658
+ /**
659
+ * Deprecated: Use start_image instead.
660
+ */
661
+ image?: string;
662
+ }
663
+ /** Output type for kwaivgi/kling-v2.5-turbo-pro */
664
+ type KlingV25TurboProOutput = string;
665
+ /**
666
+ * Input parameters for kwaivgi/kling-v2.6-motion-control
667
+ */
668
+ interface KlingV26MotionControlInput {
669
+ /**
670
+ * Text prompt for video generation. You can add elements to the screen and achieve motion effects through prompt words.
671
+ * @default ""
672
+ */
673
+ prompt?: string;
674
+ /**
675
+ * Reference image. The characters, backgrounds, and other elements in the generated video are based on the reference image. Supports .jpg/.jpeg/.png, max 10MB, dimensions 340px-3850px, aspect ratio 1:2.5 to 2.5:1.
676
+ */
677
+ image: string;
678
+ /**
679
+ * Reference video. The character actions in the generated video are consistent with the reference video. Supports .mp4/.mov, max 100MB, 3-30 seconds duration depending on character_orientation.
680
+ */
681
+ video: string;
682
+ /**
683
+ * Generate the orientation of the characters in the video. 'image': same orientation as the person in the picture (max 10s video). 'video': consistent with the orientation of the characters in the video (max 30s video).
684
+ * @default "image"
685
+ */
686
+ character_orientation?: unknown;
687
+ /**
688
+ * Video generation mode. 'std': Standard mode (cost-effective). 'pro': Professional mode (higher quality).
689
+ * @default "std"
690
+ */
691
+ mode?: unknown;
692
+ /**
693
+ * Whether to keep the original sound of the video
694
+ * @default true
695
+ */
696
+ keep_original_sound?: boolean;
697
+ }
698
+ /** Output type for kwaivgi/kling-v2.6-motion-control */
699
+ type KlingV26MotionControlOutput = string;
700
+ /**
701
+ * Input parameters for minimax/video-01
702
+ */
703
+ interface MinimaxVideo01Input {
704
+ /**
705
+ * Text prompt for generation
706
+ */
707
+ prompt: string;
708
+ /**
709
+ * Use prompt optimizer
710
+ * @default true
711
+ */
712
+ prompt_optimizer?: boolean;
713
+ /**
714
+ * First frame image for video generation. The output video will have the same aspect ratio as this image.
715
+ */
716
+ first_frame_image?: string;
717
+ /**
718
+ * An optional character reference image to use as the subject in the generated video (this will use the S2V-01 model)
719
+ */
720
+ subject_reference?: string;
721
+ }
722
+ /** Output type for minimax/video-01 */
723
+ type MinimaxVideo01Output = string;
724
+ /**
725
+ * Input parameters for luma/ray
726
+ */
727
+ interface LumaRayInput {
728
+ /**
729
+ * Aspect ratio of the video. Ignored if a start frame, end frame or video ID is given.
730
+ * @default "16:9"
731
+ */
732
+ aspect_ratio?: unknown;
733
+ /**
734
+ * Whether the video should loop, with the last frame matching the first frame for smooth, continuous playback. This input is ignored if end_image or end_video_id are set.
735
+ * @default false
736
+ */
737
+ loop?: boolean;
738
+ /**
739
+ * Text prompt for video generation
740
+ */
741
+ prompt: string;
742
+ /**
743
+ * An optional last frame of the video to use as the ending frame.
744
+ */
745
+ end_image?: string;
746
+ /**
747
+ * An optional first frame of the video to use as the starting frame.
748
+ */
749
+ start_image?: string;
750
+ /**
751
+ * Prepend a new video generation to the beginning of an existing one (Also called 'reverse extend'). You can combine this with start_image, or start_video_id.
752
+ */
753
+ end_video_id?: string;
754
+ /**
755
+ * Deprecated: Use end_image instead
756
+ */
757
+ end_image_url?: string;
758
+ /**
759
+ * Continue or extend a video generation with a new generation. You can combine this with end_image, or end_video_id.
760
+ */
761
+ start_video_id?: string;
762
+ /**
763
+ * Deprecated: Use start_image instead
764
+ */
765
+ start_image_url?: string;
766
+ }
767
+ /** Output type for luma/ray */
768
+ type LumaRayOutput = string;
769
+ /**
770
+ * Input parameters for meta/meta-llama-3.1-405b-instruct
771
+ */
772
+ interface MetaLlama31Input {
773
+ /**
774
+ * Prompt
775
+ * @default ""
776
+ */
777
+ prompt?: string;
778
+ /**
779
+ * System prompt to send to the model. This is prepended to the prompt and helps guide system behavior. Ignored for non-chat models.
780
+ * @default "You are a helpful assistant."
781
+ */
782
+ system_prompt?: string;
783
+ /**
784
+ * The minimum number of tokens the model should generate as output.
785
+ * @default 0
786
+ */
787
+ min_tokens?: number;
788
+ /**
789
+ * The maximum number of tokens the model should generate as output.
790
+ * @default 512
791
+ */
792
+ max_tokens?: number;
793
+ /**
794
+ * The value used to modulate the next token probabilities.
795
+ * @default 0.6
796
+ */
797
+ temperature?: number;
798
+ /**
799
+ * A probability threshold for generating the output. If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751).
800
+ * @default 0.9
801
+ */
802
+ top_p?: number;
803
+ /**
804
+ * The number of highest probability tokens to consider for generating the output. If > 0, only keep the top k tokens with highest probability (top-k filtering).
805
+ * @default 50
806
+ */
807
+ top_k?: number;
808
+ /**
809
+ * Presence penalty
810
+ * @default 0
811
+ */
812
+ presence_penalty?: number;
813
+ /**
814
+ * Frequency penalty
815
+ * @default 0
816
+ */
817
+ frequency_penalty?: number;
818
+ /**
819
+ * A comma-separated list of sequences to stop generation at. For example, '<end>,<stop>' will stop generation at the first instance of 'end' or '<stop>'.
820
+ * @default ""
821
+ */
822
+ stop_sequences?: string;
823
+ /**
824
+ * A template to format the prompt with. If not provided, the default prompt template will be used.
825
+ * @default ""
826
+ */
827
+ prompt_template?: string;
828
+ }
829
+ /** Output type for meta/meta-llama-3.1-405b-instruct */
830
+ type MetaLlama31Output = string[];
831
+ /**
832
+ * Input parameters for luma/reframe-image
833
+ */
834
+ interface LumaReframeImageInput {
835
+ /**
836
+ * Aspect ratio of the output
837
+ * @default "16:9"
838
+ */
839
+ aspect_ratio?: unknown;
840
+ /**
841
+ * The model to use for the reframe generation
842
+ * @default "photon-flash-1"
843
+ */
844
+ model?: unknown;
845
+ /**
846
+ * The image to reframe
847
+ */
848
+ image?: string;
849
+ /**
850
+ * The x end of the crop bounds, in pixels. Defines the right boundary where your source will be placed in the output frame. The distance between x_start and x_end determines the resized width of your content.
851
+ */
852
+ x_end?: number;
853
+ /**
854
+ * The y end of the crop bounds, in pixels. Defines the bottom boundary where your source will be placed in the output frame. The distance between y_start and y_end determines the resized height of your content.
855
+ */
856
+ y_end?: number;
857
+ /**
858
+ * A prompt to guide the reframing generation
859
+ */
860
+ prompt?: string;
861
+ /**
862
+ * The x start of the crop bounds, in pixels. Defines the left boundary where your source will be placed in the output frame. The distance between x_start and x_end determines the resized width of your content.
863
+ */
864
+ x_start?: number;
865
+ /**
866
+ * The y start of the crop bounds, in pixels. Defines the top boundary where your source will be placed in the output frame. The distance between y_start and y_end determines the resized height of your content.
867
+ */
868
+ y_start?: number;
869
+ /**
870
+ * URL of the image to reframe
871
+ */
872
+ image_url?: string;
873
+ /**
874
+ * The x position of the input in the grid, in pixels. Controls horizontal positioning of the source within the target output dimensions.
875
+ */
876
+ grid_position_x?: number;
877
+ /**
878
+ * The y position of the input in the grid, in pixels. Controls vertical positioning of the source within the target output dimensions.
879
+ */
880
+ grid_position_y?: number;
881
+ }
882
+ /** Output type for luma/reframe-image */
883
+ type LumaReframeImageOutput = string;
884
+ /**
885
+ * Input parameters for luma/reframe-video
886
+ */
887
+ interface LumaReframeVideoInput {
888
+ /**
889
+ * Aspect ratio of the output
890
+ * @default "16:9"
891
+ */
892
+ aspect_ratio?: unknown;
893
+ /**
894
+ * The video to reframe. Maximum video duration is 10 seconds.
895
+ */
896
+ video?: string;
897
+ /**
898
+ * The x end of the crop bounds, in pixels. Defines the right boundary where your source will be placed in the output frame. The distance between x_start and x_end determines the resized width of your content.
899
+ */
900
+ x_end?: number;
901
+ /**
902
+ * The y end of the crop bounds, in pixels. Defines the bottom boundary where your source will be placed in the output frame. The distance between y_start and y_end determines the resized height of your content.
903
+ */
904
+ y_end?: number;
905
+ /**
906
+ * A prompt to guide the reframing generation
907
+ */
908
+ prompt?: string;
909
+ /**
910
+ * The x start of the crop bounds, in pixels. Defines the left boundary where your source will be placed in the output frame. The distance between x_start and x_end determines the resized width of your content.
911
+ */
912
+ x_start?: number;
913
+ /**
914
+ * The y start of the crop bounds, in pixels. Defines the top boundary where your source will be placed in the output frame. The distance between y_start and y_end determines the resized height of your content.
915
+ */
916
+ y_start?: number;
917
+ /**
918
+ * URL of the video to reframe. Maximum video duration is 10 seconds.
919
+ */
920
+ video_url?: string;
921
+ /**
922
+ * The x position of the input in the grid, in pixels. Controls horizontal positioning of the source within the target output dimensions.
923
+ */
924
+ grid_position_x?: number;
925
+ /**
926
+ * The y position of the input in the grid, in pixels. Controls vertical positioning of the source within the target output dimensions.
927
+ */
928
+ grid_position_y?: number;
929
+ }
930
+ /** Output type for luma/reframe-video */
931
+ type LumaReframeVideoOutput = string;
932
+ /**
933
+ * Input parameters for sync/lipsync-2
934
+ */
935
+ interface Lipsync2Input {
936
+ /**
937
+ * Lipsync mode when audio and video durations are out of sync
938
+ * @default "loop"
939
+ */
940
+ sync_mode?: unknown;
941
+ /**
942
+ * Input audio file (.wav)
943
+ */
944
+ audio: string;
945
+ /**
946
+ * Input video file (.mp4)
947
+ */
948
+ video: string;
949
+ /**
950
+ * How expressive lipsync can be (0-1)
951
+ * @default 0.5
952
+ * @range min: 0, max: 1
953
+ */
954
+ temperature?: number;
955
+ /**
956
+ * Whether to detect active speaker (i.e. whoever is speaking in the clip will be used for lipsync)
957
+ * @default false
958
+ */
959
+ active_speaker?: boolean;
960
+ }
961
+ /** Output type for sync/lipsync-2 */
962
+ type Lipsync2Output = string;
963
+ /**
964
+ * Input parameters for sync/lipsync-2-pro
965
+ */
966
+ interface Lipsync2ProInput {
967
+ /**
968
+ * Lipsync mode when audio and video durations are out of sync
969
+ * @default "loop"
970
+ */
971
+ sync_mode?: unknown;
972
+ /**
973
+ * Input audio file (.wav)
974
+ */
975
+ audio: string;
976
+ /**
977
+ * Input video file (.mp4)
978
+ */
979
+ video: string;
980
+ /**
981
+ * How expressive lipsync can be (0-1)
982
+ * @default 0.5
983
+ * @range min: 0, max: 1
984
+ */
985
+ temperature?: number;
986
+ /**
987
+ * Whether to detect active speaker (i.e. whoever is speaking in the clip will be used for lipsync)
988
+ * @default false
989
+ */
990
+ active_speaker?: boolean;
991
+ }
992
+ /** Output type for sync/lipsync-2-pro */
993
+ type Lipsync2ProOutput = string;
994
+ /**
995
+ * Input parameters for bytedance/latentsync
996
+ */
997
+ interface LatentSyncInput {
998
+ /**
999
+ * Input video
1000
+ */
1001
+ video?: string;
1002
+ /**
1003
+ * Input audio to
1004
+ */
1005
+ audio?: string;
1006
+ /**
1007
+ * Guidance scale
1008
+ * @default 1
1009
+ * @range min: 0, max: 10
1010
+ */
1011
+ guidance_scale?: number;
1012
+ /**
1013
+ * Set to 0 for Random seed
1014
+ * @default 0
1015
+ */
1016
+ seed?: number;
1017
+ }
1018
+ /** Output type for bytedance/latentsync */
1019
+ type LatentSyncOutput = string;
1020
+ /**
1021
+ * Input parameters for pixverse/lipsync
1022
+ */
1023
+ interface PixverseLipsyncInput {
1024
+ /**
1025
+ * Video file to upload to PixVerse as media
1026
+ */
1027
+ video: string;
1028
+ /**
1029
+ * Audio file to upload to PixVerse as media
1030
+ */
1031
+ audio: string;
1032
+ }
1033
+ /** Output type for pixverse/lipsync */
1034
+ type PixverseLipsyncOutput = string;
1035
+ /** All supported Replicate model IDs */
1036
+ type ReplicateModelId = 'google/nano-banana' | 'google/nano-banana-pro' | 'prunaai/z-image-turbo' | 'black-forest-labs/flux-schnell' | 'black-forest-labs/flux-dev' | 'black-forest-labs/flux-1.1-pro' | 'stability-ai/sdxl' | 'bytedance/sdxl-lightning-4step' | 'black-forest-labs/flux-kontext-dev' | 'google/veo-3.1-fast' | 'google/veo-3.1' | 'kwaivgi/kling-v2.5-turbo-pro' | 'kwaivgi/kling-v2.6-motion-control' | 'minimax/video-01' | 'luma/ray' | 'meta/meta-llama-3.1-405b-instruct' | 'luma/reframe-image' | 'luma/reframe-video' | 'sync/lipsync-2' | 'sync/lipsync-2-pro' | 'bytedance/latentsync' | 'pixverse/lipsync';
1037
+ /** Map from model ID to input type */
1038
+ interface ReplicateModelInputMap {
1039
+ 'google/nano-banana': NanoBananaInput;
1040
+ 'google/nano-banana-pro': NanoBananaProInput;
1041
+ 'prunaai/z-image-turbo': ZImageTurboInput;
1042
+ 'black-forest-labs/flux-schnell': FluxSchnellInput;
1043
+ 'black-forest-labs/flux-dev': FluxDevInput;
1044
+ 'black-forest-labs/flux-1.1-pro': Flux11ProInput;
1045
+ 'stability-ai/sdxl': SDXLInput;
1046
+ 'bytedance/sdxl-lightning-4step': SDXLLightningInput;
1047
+ 'black-forest-labs/flux-kontext-dev': FluxKontextDevInput;
1048
+ 'google/veo-3.1-fast': Veo31FastInput;
1049
+ 'google/veo-3.1': Veo31Input;
1050
+ 'kwaivgi/kling-v2.5-turbo-pro': KlingV25TurboProInput;
1051
+ 'kwaivgi/kling-v2.6-motion-control': KlingV26MotionControlInput;
1052
+ 'minimax/video-01': MinimaxVideo01Input;
1053
+ 'luma/ray': LumaRayInput;
1054
+ 'meta/meta-llama-3.1-405b-instruct': MetaLlama31Input;
1055
+ 'luma/reframe-image': LumaReframeImageInput;
1056
+ 'luma/reframe-video': LumaReframeVideoInput;
1057
+ 'sync/lipsync-2': Lipsync2Input;
1058
+ 'sync/lipsync-2-pro': Lipsync2ProInput;
1059
+ 'bytedance/latentsync': LatentSyncInput;
1060
+ 'pixverse/lipsync': PixverseLipsyncInput;
1061
+ }
1062
+ /** Map from model ID to output type */
1063
+ interface ReplicateModelOutputMap {
1064
+ 'google/nano-banana': NanoBananaOutput;
1065
+ 'google/nano-banana-pro': NanoBananaProOutput;
1066
+ 'prunaai/z-image-turbo': ZImageTurboOutput;
1067
+ 'black-forest-labs/flux-schnell': FluxSchnellOutput;
1068
+ 'black-forest-labs/flux-dev': FluxDevOutput;
1069
+ 'black-forest-labs/flux-1.1-pro': Flux11ProOutput;
1070
+ 'stability-ai/sdxl': SDXLOutput;
1071
+ 'bytedance/sdxl-lightning-4step': SDXLLightningOutput;
1072
+ 'black-forest-labs/flux-kontext-dev': FluxKontextDevOutput;
1073
+ 'google/veo-3.1-fast': Veo31FastOutput;
1074
+ 'google/veo-3.1': Veo31Output;
1075
+ 'kwaivgi/kling-v2.5-turbo-pro': KlingV25TurboProOutput;
1076
+ 'kwaivgi/kling-v2.6-motion-control': KlingV26MotionControlOutput;
1077
+ 'minimax/video-01': MinimaxVideo01Output;
1078
+ 'luma/ray': LumaRayOutput;
1079
+ 'meta/meta-llama-3.1-405b-instruct': MetaLlama31Output;
1080
+ 'luma/reframe-image': LumaReframeImageOutput;
1081
+ 'luma/reframe-video': LumaReframeVideoOutput;
1082
+ 'sync/lipsync-2': Lipsync2Output;
1083
+ 'sync/lipsync-2-pro': Lipsync2ProOutput;
1084
+ 'bytedance/latentsync': LatentSyncOutput;
1085
+ 'pixverse/lipsync': PixverseLipsyncOutput;
1086
+ }
1087
+ /** Get input type for a model */
1088
+ type ModelInput<T extends ReplicateModelId> = ReplicateModelInputMap[T];
1089
+ /** Get output type for a model */
1090
+ type ModelOutput<T extends ReplicateModelId> = ReplicateModelOutputMap[T];
1091
+
1092
+ export type { Flux11ProInput, Flux11ProOutput, FluxDevInput, FluxDevOutput, FluxKontextDevInput, FluxKontextDevOutput, FluxSchnellInput, FluxSchnellOutput, KlingV25TurboProInput, KlingV25TurboProOutput, KlingV26MotionControlInput, KlingV26MotionControlOutput, LatentSyncInput, LatentSyncOutput, Lipsync2Input, Lipsync2Output, Lipsync2ProInput, Lipsync2ProOutput, LumaRayInput, LumaRayOutput, LumaReframeImageInput, LumaReframeImageOutput, LumaReframeVideoInput, LumaReframeVideoOutput, MetaLlama31Input, MetaLlama31Output, MinimaxVideo01Input, MinimaxVideo01Output, ModelInput, ModelOutput, NanoBananaInput, NanoBananaOutput, NanoBananaProInput, NanoBananaProOutput, PixverseLipsyncInput, PixverseLipsyncOutput, ReplicateModelId, ReplicateModelInputMap, ReplicateModelOutputMap, SDXLInput, SDXLLightningInput, SDXLLightningOutput, SDXLOutput, Veo31FastInput, Veo31FastOutput, Veo31Input, Veo31Output, ZImageTurboInput, ZImageTurboOutput };