@civitai/client 0.2.0-beta.2 → 0.2.0-beta.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,84 @@
1
+ /**
2
+ * Base input for AI Toolkit training across all ecosystems
3
+ */
4
+ export type AiToolkitTrainingInput = TrainingInput & {
5
+ engine: 'ai-toolkit';
6
+ } & {
7
+ ecosystem: string;
8
+ /**
9
+ * Number of training epochs. An epoch is one complete pass through the training dataset.
10
+ * Maximum of 20 epochs can be specified.
11
+ * Note: ai-toolkit uses steps internally, calculated based on epochs, image count, and internal parameters.
12
+ */
13
+ epochs?: number;
14
+ /**
15
+ * Specify the maximum resolution of training images. If the training images exceed the resolution specified here, they will be scaled down to this resolution
16
+ */
17
+ resolution?: number | null;
18
+ /**
19
+ * Sets the learning rate for the model. This is the learning rate when performing additional learning on each attention block (and other blocks depending on the setting).
20
+ */
21
+ lr?: number;
22
+ /**
23
+ * Sets the learning rate for the text encoder. Only used when TrainTextEncoder is true. For models with multiple text encoders, this applies to all of them.
24
+ */
25
+ textEncoderLr?: number | null;
26
+ /**
27
+ * Whether to train the text encoder(s) alongside the model. Enabling this can improve prompt understanding but increases training time and memory usage.
28
+ */
29
+ trainTextEncoder?: boolean | null;
30
+ /**
31
+ * You can change the learning rate in the middle of learning. A scheduler is a setting for how to change the learning rate.
32
+ */
33
+ lrScheduler?: 'constant' | 'constant_with_warmup' | 'cosine' | 'linear' | 'step';
34
+ /**
35
+ * The optimizer determines how to update the neural net weights during training.
36
+ * Various methods have been proposed for smart learning, but the most commonly used in LoRA learning is "adamw8bit".
37
+ */
38
+ optimizerType?:
39
+ | 'adam'
40
+ | 'adamw'
41
+ | 'adamw8bit'
42
+ | 'adam8bit'
43
+ | 'lion'
44
+ | 'lion8bit'
45
+ | 'adafactor'
46
+ | 'adagrad'
47
+ | 'prodigy'
48
+ | 'prodigy8bit';
49
+ /**
50
+ * The larger the Dim setting, the more learning information can be stored, but the possibility of learning unnecessary information other than the learning target increases. A larger Dim also increases LoRA file size.
51
+ */
52
+ networkDim?: number | null;
53
+ /**
54
+ * The smaller the Network alpha value, the larger the stored LoRA neural net weights.
55
+ * For example, with an Alpha of 16 and a Dim of 32, the strength of the weight used is 16/32 = 0.5,
56
+ * meaning that the learning rate is only half as powerful as the Learning Rate setting.
57
+ *
58
+ * If Alpha and Dim are the same number, the strength used will be 1 and will have no effect on the learning rate.
59
+ */
60
+ networkAlpha?: number | null;
61
+ /**
62
+ * Adds noise to training images. 0 adds no noise at all. A value of 1 adds strong noise.
63
+ */
64
+ noiseOffset?: number | null;
65
+ /**
66
+ * If this option is turned on, the image will be horizontally flipped randomly. It can learn left and right angles, which is useful when you want to learn symmetrical people and objects.
67
+ */
68
+ flipAugmentation?: boolean;
69
+ /**
70
+ * Randomly changes the order of your tags during training. The intent of shuffling is to improve learning. If you are using captions (sentences), this option has no meaning.
71
+ */
72
+ shuffleTokens?: boolean;
73
+ /**
74
+ * If your training images have tags, you can randomly shuffle them.
75
+ * However, if you have words that you want to keep at the beginning, you can use this option to specify "Keep the first 0 words at the beginning".
76
+ * This option does nothing if the Shuffle Tokens option is off.
77
+ */
78
+ keepTokens?: number;
79
+ } & {
80
+ engine: 'ai-toolkit';
81
+ };
1
82
  export type AgeClassificationInput = {
2
83
  /**
3
84
  * An optional model to use for age classification. If not provided, the default model will determined by the worker
@@ -13,14 +94,7 @@ export type AgeClassificationOutput = {
13
94
  [key: string]: Array<AgeClassifierLabel>;
14
95
  };
15
96
  hasMinor: boolean;
16
- prediction: AgeClassificationPrediction;
17
97
  };
18
- export declare const AgeClassificationPrediction: {
19
- readonly PASS: 'pass';
20
- readonly FAIL: 'fail';
21
- };
22
- export type AgeClassificationPrediction =
23
- (typeof AgeClassificationPrediction)[keyof typeof AgeClassificationPrediction];
24
98
  /**
25
99
  * Age classification
26
100
  */
@@ -47,6 +121,27 @@ export type AgeClassifierLabel = {
47
121
  isMinor: boolean;
48
122
  boundingBox: Array<number>;
49
123
  };
124
+ export declare const AnimalPoseBboxDetector: {
125
+ readonly YOLOX_L_TORCHSCRIPT_PT: 'yolox_l.torchscript.pt';
126
+ readonly YOLOX_L_ONNX: 'yolox_l.onnx';
127
+ readonly YOLO_NAS_L_FP16_ONNX: 'yolo_nas_l_fp16.onnx';
128
+ readonly YOLO_NAS_M_FP16_ONNX: 'yolo_nas_m_fp16.onnx';
129
+ readonly YOLO_NAS_S_FP16_ONNX: 'yolo_nas_s_fp16.onnx';
130
+ };
131
+ export type AnimalPoseBboxDetector =
132
+ (typeof AnimalPoseBboxDetector)[keyof typeof AnimalPoseBboxDetector];
133
+ export declare const AnimalPoseEstimator: {
134
+ readonly RTMPOSE_M_AP10K_256_BS5_TORCHSCRIPT_PT: 'rtmpose-m_ap10k_256_bs5.torchscript.pt';
135
+ readonly RTMPOSE_M_AP10K_256_ONNX: 'rtmpose-m_ap10k_256.onnx';
136
+ };
137
+ export type AnimalPoseEstimator = (typeof AnimalPoseEstimator)[keyof typeof AnimalPoseEstimator];
138
+ export declare const AnylineMergeWith: {
139
+ readonly LINEART_STANDARD: 'lineart_standard';
140
+ readonly LINEART_REALISTIC: 'lineart_realistic';
141
+ readonly LINEART_ANIME: 'lineart_anime';
142
+ readonly MANGA_LINE: 'manga_line';
143
+ };
144
+ export type AnylineMergeWith = (typeof AnylineMergeWith)[keyof typeof AnylineMergeWith];
50
145
  export type BatchOcrSafetyClassificationInput = {
51
146
  mediaUrls: Array<string>;
52
147
  };
@@ -96,6 +191,17 @@ export declare const BuzzClientAccount: {
96
191
  readonly FAKE_RED: 'fakeRed';
97
192
  };
98
193
  export type BuzzClientAccount = (typeof BuzzClientAccount)[keyof typeof BuzzClientAccount];
194
+ /**
195
+ * AI Toolkit training for Chroma models
196
+ */
197
+ export type ChromaAiToolkitTrainingInput = AiToolkitTrainingInput & {} & {
198
+ ecosystem: 'chroma';
199
+ };
200
+ export declare const CoarseMode: {
201
+ readonly DISABLE: 'disable';
202
+ readonly ENABLE: 'enable';
203
+ };
204
+ export type CoarseMode = (typeof CoarseMode)[keyof typeof CoarseMode];
99
205
  export type ComfyInput = {
100
206
  /**
101
207
  * Get the comfy workflow that needs to be executed
@@ -158,10 +264,91 @@ export declare const ContainerFormat: {
158
264
  readonly WEB_M: 'webM';
159
265
  };
160
266
  export type ContainerFormat = (typeof ContainerFormat)[keyof typeof ContainerFormat];
267
+ /**
268
+ * Input configuration for the ConvertImage workflow step.
269
+ */
270
+ export type ConvertImageInput = {
271
+ /**
272
+ * The source image to convert.
273
+ */
274
+ image: string;
275
+ /**
276
+ * Optional list of transforms to apply to the image before conversion.
277
+ * Transforms are applied in order.
278
+ */
279
+ transforms?: Array<ImageTransform>;
280
+ output: ImageOutputFormat;
281
+ };
282
+ /**
283
+ * Output from the ConvertImage workflow step.
284
+ */
285
+ export type ConvertImageOutput = {
286
+ blob: ImageBlob;
287
+ };
288
+ /**
289
+ * A workflow step that converts images to different formats and applies optional transforms.
290
+ */
291
+ export type ConvertImageStep = WorkflowStep & {
292
+ $type: 'convertImage';
293
+ } & {
294
+ input: ConvertImageInput;
295
+ output?: ConvertImageOutput;
296
+ } & {
297
+ $type: 'convertImage';
298
+ };
299
+ /**
300
+ * A workflow step that converts images to different formats and applies optional transforms.
301
+ */
302
+ export type ConvertImageStepTemplate = WorkflowStepTemplate & {
303
+ $type: 'convertImage';
304
+ } & {
305
+ input: ConvertImageInput;
306
+ } & {
307
+ $type: 'convertImage';
308
+ };
161
309
  export type CursedArrayOfTelemetryCursorAndWorkflow = {
162
310
  next: string;
163
311
  items: Array<Workflow>;
164
312
  };
313
+ export declare const DensePoseColormap: {
314
+ readonly 'VIRIDIS (_MAGIC_ANIMATE)': 'Viridis (MagicAnimate)';
315
+ readonly 'PARULA (_CIVIT_AI)': 'Parula (CivitAI)';
316
+ };
317
+ export type DensePoseColormap = (typeof DensePoseColormap)[keyof typeof DensePoseColormap];
318
+ export declare const DensePoseModel: {
319
+ readonly DENSEPOSE_R50_FPN_DL_TORCHSCRIPT: 'densepose_r50_fpn_dl.torchscript';
320
+ readonly DENSEPOSE_R101_FPN_DL_TORCHSCRIPT: 'densepose_r101_fpn_dl.torchscript';
321
+ };
322
+ export type DensePoseModel = (typeof DensePoseModel)[keyof typeof DensePoseModel];
323
+ export declare const DepthAnythingCheckpoint: {
324
+ readonly DEPTH_ANYTHING_VITL14_PTH: 'depth_anything_vitl14.pth';
325
+ readonly DEPTH_ANYTHING_VITB14_PTH: 'depth_anything_vitb14.pth';
326
+ readonly DEPTH_ANYTHING_VITS14_PTH: 'depth_anything_vits14.pth';
327
+ };
328
+ export type DepthAnythingCheckpoint =
329
+ (typeof DepthAnythingCheckpoint)[keyof typeof DepthAnythingCheckpoint];
330
+ export declare const DepthAnythingV2Checkpoint: {
331
+ readonly DEPTH_ANYTHING_V2_VITG_PTH: 'depth_anything_v2_vitg.pth';
332
+ readonly DEPTH_ANYTHING_V2_VITL_PTH: 'depth_anything_v2_vitl.pth';
333
+ readonly DEPTH_ANYTHING_V2_VITB_PTH: 'depth_anything_v2_vitb.pth';
334
+ readonly DEPTH_ANYTHING_V2_VITS_PTH: 'depth_anything_v2_vits.pth';
335
+ };
336
+ export type DepthAnythingV2Checkpoint =
337
+ (typeof DepthAnythingV2Checkpoint)[keyof typeof DepthAnythingV2Checkpoint];
338
+ export declare const DwPoseBboxDetector: {
339
+ readonly YOLOX_L_ONNX: 'yolox_l.onnx';
340
+ readonly YOLOX_L_TORCHSCRIPT_PT: 'yolox_l.torchscript.pt';
341
+ readonly YOLO_NAS_L_FP16_ONNX: 'yolo_nas_l_fp16.onnx';
342
+ readonly YOLO_NAS_M_FP16_ONNX: 'yolo_nas_m_fp16.onnx';
343
+ readonly YOLO_NAS_S_FP16_ONNX: 'yolo_nas_s_fp16.onnx';
344
+ };
345
+ export type DwPoseBboxDetector = (typeof DwPoseBboxDetector)[keyof typeof DwPoseBboxDetector];
346
+ export declare const DwPoseEstimator: {
347
+ readonly DW_LL_UCOCO_384_BS5_TORCHSCRIPT_PT: 'dw-ll_ucoco_384_bs5.torchscript.pt';
348
+ readonly DW_LL_UCOCO_384_ONNX: 'dw-ll_ucoco_384.onnx';
349
+ readonly DW_LL_UCOCO_ONNX: 'dw-ll_ucoco.onnx';
350
+ };
351
+ export type DwPoseEstimator = (typeof DwPoseEstimator)[keyof typeof DwPoseEstimator];
165
352
  /**
166
353
  * Represents the input information needed for the Echo workflow step.
167
354
  */
@@ -230,8 +417,23 @@ export declare const FileFormat: {
230
417
  readonly DIFFUSERS: 'diffusers';
231
418
  readonly CORE_ML: 'coreML';
232
419
  readonly ONNX: 'onnx';
420
+ readonly TAR: 'tar';
233
421
  };
234
422
  export type FileFormat = (typeof FileFormat)[keyof typeof FileFormat];
423
+ /**
424
+ * AI Toolkit training for Flux.1 models
425
+ */
426
+ export type Flux1AiToolkitTrainingInput = AiToolkitTrainingInput & {
427
+ modelVariant: string;
428
+ } & {
429
+ ecosystem: 'flux1';
430
+ };
431
+ /**
432
+ * AI Toolkit training for Flux.1 Dev models
433
+ */
434
+ export type Flux1DevAiToolkitTrainingInput = Flux1AiToolkitTrainingInput & {} & {
435
+ modelVariant: 'dev';
436
+ };
235
437
  export type Flux1KontextDevImageGenInput = Flux1KontextImageGenInput & {
236
438
  readonly model: string;
237
439
  } & {
@@ -261,11 +463,195 @@ export type Flux1KontextProImageGenInput = Flux1KontextImageGenInput & {
261
463
  } & {
262
464
  model: 'pro';
263
465
  };
466
+ /**
467
+ * AI Toolkit training for Flux.1 Schnell models
468
+ */
469
+ export type Flux1SchnellAiToolkitTrainingInput = Flux1AiToolkitTrainingInput & {} & {
470
+ modelVariant: 'schnell';
471
+ };
472
+ export type Flux2DevCreateImageInput = Flux2DevImageGenInput & {
473
+ readonly operation: string;
474
+ } & {
475
+ operation: 'createImage';
476
+ };
477
+ export type Flux2DevEditImageInput = Flux2DevImageGenInput & {
478
+ images?: Array<string>;
479
+ readonly operation: string;
480
+ } & {
481
+ operation: 'editImage';
482
+ };
483
+ /**
484
+ * Input for Flux 2 Dev image editing LoRA training via FAL.
485
+ */
486
+ export type Flux2DevEditImageResourceTrainingInput = ImageResourceTrainingInput & {
487
+ engine: 'flux2-dev-edit';
488
+ } & {
489
+ /**
490
+ * Number of training steps. Must be in increments of 100.
491
+ */
492
+ steps?: number;
493
+ /**
494
+ * Learning rate for training.
495
+ */
496
+ learningRate?: number;
497
+ /**
498
+ * Default caption to use if caption files are missing from training data.
499
+ */
500
+ defaultCaption?: string | null;
501
+ /**
502
+ * Number of reference images per training pair (1-4).
503
+ * Affects cost calculation via reference multiplier.
504
+ */
505
+ referenceImageCount?: number;
506
+ } & {
507
+ engine: 'flux2-dev-edit';
508
+ };
509
+ export type Flux2DevImageGenInput = Flux2ImageGenInput & {
510
+ operation: string;
511
+ guidanceScale?: number;
512
+ numInferenceSteps?: number;
513
+ loras?: Array<ImageGenInputLora>;
514
+ readonly modelVariant?: string;
515
+ } & {
516
+ model: 'dev';
517
+ };
518
+ /**
519
+ * Input for Flux 2 Dev text-to-image LoRA training via FAL.
520
+ */
521
+ export type Flux2DevImageResourceTrainingInput = ImageResourceTrainingInput & {
522
+ engine: 'flux2-dev';
523
+ } & {
524
+ /**
525
+ * Number of training steps. Must be in increments of 100.
526
+ */
527
+ steps?: number;
528
+ /**
529
+ * Learning rate for training.
530
+ */
531
+ learningRate?: number;
532
+ /**
533
+ * Default caption to use if caption files are missing from training data.
534
+ */
535
+ defaultCaption?: string | null;
536
+ } & {
537
+ engine: 'flux2-dev';
538
+ };
539
+ export type Flux2FlexCreateImageInput = Flux2FlexImageGenInput & {
540
+ readonly operation: string;
541
+ } & {
542
+ operation: 'createImage';
543
+ };
544
+ export type Flux2FlexEditImageInput = Flux2FlexImageGenInput & {
545
+ images?: Array<string>;
546
+ readonly operation: string;
547
+ } & {
548
+ operation: 'editImage';
549
+ };
550
+ export type Flux2FlexImageGenInput = Flux2ImageGenInput & {
551
+ operation: string;
552
+ guidanceScale?: number;
553
+ numInferenceSteps?: number;
554
+ readonly modelVariant?: string;
555
+ } & {
556
+ model: 'flex';
557
+ };
558
+ export type Flux2ImageGenInput = ImageGenInput & {
559
+ engine: 'flux2';
560
+ } & {
561
+ model: string;
562
+ prompt: string;
563
+ width?: number;
564
+ height?: number;
565
+ outputFormat?: 'jpeg' | 'png';
566
+ seed?: number | null;
567
+ quantity?: number;
568
+ enablePromptExpansion?: boolean;
569
+ /**
570
+ * The model variant (dev, flex, pro)
571
+ */
572
+ readonly modelVariant?: string;
573
+ /**
574
+ * The operation type (createImage, editImage)
575
+ */
576
+ readonly operation?: string;
577
+ } & {
578
+ engine: 'flux2';
579
+ };
580
+ export type Flux2MaxCreateImageInput = Flux2MaxImageGenInput & {
581
+ readonly operation: string;
582
+ } & {
583
+ operation: 'createImage';
584
+ };
585
+ export type Flux2MaxEditImageInput = Flux2MaxImageGenInput & {
586
+ images?: Array<string>;
587
+ readonly operation: string;
588
+ } & {
589
+ operation: 'editImage';
590
+ };
591
+ export type Flux2MaxImageGenInput = Flux2ImageGenInput & {
592
+ operation: string;
593
+ readonly modelVariant?: string;
594
+ } & {
595
+ model: 'max';
596
+ };
597
+ export type Flux2ProCreateImageInput = Flux2ProImageGenInput & {
598
+ readonly operation: string;
599
+ } & {
600
+ operation: 'createImage';
601
+ };
602
+ export type Flux2ProEditImageInput = Flux2ProImageGenInput & {
603
+ images?: Array<string>;
604
+ readonly operation: string;
605
+ } & {
606
+ operation: 'editImage';
607
+ };
608
+ export type Flux2ProImageGenInput = Flux2ImageGenInput & {
609
+ operation: string;
610
+ readonly modelVariant?: string;
611
+ } & {
612
+ model: 'pro';
613
+ };
264
614
  export type FluxDevFastImageResourceTrainingInput = ImageResourceTrainingInput & {
265
615
  engine: 'flux-dev-fast';
266
616
  } & {} & {
267
617
  engine: 'flux-dev-fast';
268
618
  };
619
+ export type Gemini25FlashCreateImageGenInput = Gemini25FlashImageGenInput & {} & {
620
+ operation: 'createImage';
621
+ };
622
+ export type Gemini25FlashEditImageGenInput = Gemini25FlashImageGenInput & {
623
+ images: Array<string>;
624
+ } & {
625
+ operation: 'editImage';
626
+ };
627
+ export type Gemini25FlashImageGenInput = GeminiImageGenInput & {
628
+ operation: string;
629
+ quantity?: number;
630
+ } & {
631
+ model: '2.5-flash';
632
+ };
633
+ export type GeminiImageGenInput = ImageGenInput & {
634
+ engine: 'gemini';
635
+ } & {
636
+ model: string;
637
+ prompt: string;
638
+ } & {
639
+ engine: 'gemini';
640
+ };
641
+ /**
642
+ * GIF output format configuration.
643
+ */
644
+ export type GifOutputFormat = ImageOutputFormat & {
645
+ format: 'gif';
646
+ } & {
647
+ /**
648
+ * Maximum number of frames to include in the output. Set to 1 to extract only the first frame from animated images.
649
+ * When null, all frames are preserved.
650
+ */
651
+ maxFrames?: number | null;
652
+ } & {
653
+ format: 'gif';
654
+ };
269
655
  export type GoogleImageGenInput = ImageGenInput & {
270
656
  engine: 'google';
271
657
  } & {
@@ -356,6 +742,14 @@ export type ImageBlob = Blob & {
356
742
  } & {
357
743
  width?: number | null;
358
744
  height?: number | null;
745
+ /**
746
+ * Gets a url that can be used to preview a resized version of the image.
747
+ */
748
+ previewUrl?: string | null;
749
+ /**
750
+ * Gets when the preview url is set to expire.
751
+ */
752
+ previewUrlExpiresAt?: string | null;
359
753
  } & {
360
754
  type: 'image';
361
755
  };
@@ -371,6 +765,10 @@ export type ImageGenOutput = {
371
765
  * A collection of output images.
372
766
  */
373
767
  images: Array<ImageBlob>;
768
+ /**
769
+ * An optional list of errors related to generation failures
770
+ */
771
+ errors?: Array<string> | null;
374
772
  };
375
773
  /**
376
774
  * Image Generation
@@ -425,6 +823,12 @@ export type ImageJobNetworkParams = {
425
823
  */
426
824
  type?: string | null;
427
825
  };
826
+ /**
827
+ * Base class for image output formats. Uses "format" as the type discriminator.
828
+ */
829
+ export type ImageOutputFormat = {
830
+ format: string;
831
+ };
428
832
  export declare const ImageResouceTrainingModerationStatus: {
429
833
  readonly EVALUATING: 'evaluating';
430
834
  readonly UNDER_REVIEW: 'underReview';
@@ -458,6 +862,10 @@ export type ImageResourceTrainingInput = {
458
862
  * A selection of sample prompts.
459
863
  */
460
864
  samplePrompts?: Array<string>;
865
+ /**
866
+ * An optional negative prompt that will get applied when generating samples
867
+ */
868
+ negativePrompt?: string | null;
461
869
  };
462
870
  export type ImageResourceTrainingOutput = {
463
871
  moderationStatus: ImageResouceTrainingModerationStatus;
@@ -504,6 +912,12 @@ export type ImageResourceTrainingStepTemplate = WorkflowStepTemplate & {
504
912
  } & {
505
913
  $type: 'imageResourceTraining';
506
914
  };
915
+ /**
916
+ * Base class for image transforms that can be applied during image conversion.
917
+ */
918
+ export type ImageTransform = {
919
+ type: string;
920
+ };
507
921
  /**
508
922
  * Available image transformers.
509
923
  */
@@ -547,6 +961,31 @@ export type ImageUploadStepTemplate = WorkflowStepTemplate & {
547
961
  } & {
548
962
  $type: 'imageUpload';
549
963
  };
964
+ export type ImageUpscalerInput = {
965
+ /**
966
+ * Either A URL, A DataURL or a Base64 string
967
+ */
968
+ image: string;
969
+ scaleFactor?: number;
970
+ };
971
+ export type ImageUpscalerOutput = {
972
+ blob: ImageBlob;
973
+ };
974
+ export type ImageUpscalerStep = WorkflowStep & {
975
+ $type: 'imageUpscaler';
976
+ } & {
977
+ input: ImageUpscalerInput;
978
+ output?: ImageUpscalerOutput;
979
+ } & {
980
+ $type: 'imageUpscaler';
981
+ };
982
+ export type ImageUpscalerStepTemplate = WorkflowStepTemplate & {
983
+ $type: 'imageUpscaler';
984
+ } & {
985
+ input: ImageUpscalerInput;
986
+ } & {
987
+ $type: 'imageUpscaler';
988
+ };
550
989
  export type Imagen4ImageGenInput = GoogleImageGenInput & {
551
990
  prompt: string;
552
991
  negativePrompt?: string;
@@ -568,6 +1007,19 @@ export declare const JobSupport: {
568
1007
  * Available levels of job support.
569
1008
  */
570
1009
  export type JobSupport = (typeof JobSupport)[keyof typeof JobSupport];
1010
+ /**
1011
+ * JPEG output format configuration.
1012
+ */
1013
+ export type JpegOutputFormat = ImageOutputFormat & {
1014
+ format: 'jpeg';
1015
+ } & {
1016
+ /**
1017
+ * Quality setting for JPEG compression (1-100). Higher values produce better quality but larger files.
1018
+ */
1019
+ quality?: number;
1020
+ } & {
1021
+ format: 'jpeg';
1022
+ };
571
1023
  /**
572
1024
  * Array of operations to perform
573
1025
  */
@@ -637,9 +1089,10 @@ export declare const KlingMode: {
637
1089
  export type KlingMode = (typeof KlingMode)[keyof typeof KlingMode];
638
1090
  export declare const KlingModel: {
639
1091
  readonly V1: 'v1';
640
- readonly V1_5: 'v1_5';
641
- readonly V1_6: 'v1_6';
1092
+ readonly V1_5: 'v1.5';
1093
+ readonly V1_6: 'v1.6';
642
1094
  readonly V2: 'v2';
1095
+ readonly V2_5_TURBO: 'v2.5-turbo';
643
1096
  };
644
1097
  export type KlingModel = (typeof KlingModel)[keyof typeof KlingModel];
645
1098
  export declare const KlingVideoGenAspectRatio: {
@@ -765,6 +1218,11 @@ export type KohyaImageResourceTrainingInput = ImageResourceTrainingInput & {
765
1218
  } & {
766
1219
  engine: 'kohya';
767
1220
  };
1221
+ export declare const LeresBoost: {
1222
+ readonly DISABLE: 'disable';
1223
+ readonly ENABLE: 'enable';
1224
+ };
1225
+ export type LeresBoost = (typeof LeresBoost)[keyof typeof LeresBoost];
768
1226
  export declare const LightricksAspectRatio: {
769
1227
  readonly '1:1': '1:1';
770
1228
  readonly '16:9': '16:9';
@@ -792,22 +1250,129 @@ export type LightricksVideoGenInput = VideoGenInput & {
792
1250
  } & {
793
1251
  engine: 'lightricks';
794
1252
  };
795
- export type MiniMaxVideoGenInput = VideoGenInput & {
796
- engine: 'minimax';
797
- } & {
798
- model?: MiniMaxVideoGenModel;
799
- enablePromptEnhancer?: boolean;
1253
+ /**
1254
+ * Represents the input information needed for the MediaHash workflow step.
1255
+ */
1256
+ export type MediaHashInput = {
1257
+ mediaUrl: string;
800
1258
  /**
801
- * Either A URL, A DataURL or a Base64 string
1259
+ * The types of hashes to generate.
802
1260
  */
803
- sourceImage?: string | null;
804
- } & {
805
- engine: 'minimax';
1261
+ hashTypes: Array<MediaHashType>;
806
1262
  };
807
- export declare const MiniMaxVideoGenModel: {
808
- readonly HAILOU: 'hailou';
1263
+ /**
1264
+ * Represents the output information returned from the MediaHash workflow step.
1265
+ */
1266
+ export type MediaHashOutput = {
1267
+ /**
1268
+ * The generated hashes, keyed by hash type (e.g., "perceptual" -> "12345678").
1269
+ */
1270
+ hashes: {
1271
+ [key: string]: string;
1272
+ };
809
1273
  };
810
- export type MiniMaxVideoGenModel = (typeof MiniMaxVideoGenModel)[keyof typeof MiniMaxVideoGenModel];
1274
+ /**
1275
+ * MediaHash
1276
+ */
1277
+ export type MediaHashStep = WorkflowStep & {
1278
+ $type: 'mediaHash';
1279
+ } & {
1280
+ input: MediaHashInput;
1281
+ output?: MediaHashOutput;
1282
+ } & {
1283
+ $type: 'mediaHash';
1284
+ };
1285
+ /**
1286
+ * MediaHash
1287
+ */
1288
+ export type MediaHashStepTemplate = WorkflowStepTemplate & {
1289
+ $type: 'mediaHash';
1290
+ } & {
1291
+ input: MediaHashInput;
1292
+ } & {
1293
+ $type: 'mediaHash';
1294
+ };
1295
+ /**
1296
+ * Represents the type of hash algorithm to use for media content.
1297
+ */
1298
+ export declare const MediaHashType: {
1299
+ readonly PERCEPTUAL: 'perceptual';
1300
+ };
1301
+ /**
1302
+ * Represents the type of hash algorithm to use for media content.
1303
+ */
1304
+ export type MediaHashType = (typeof MediaHashType)[keyof typeof MediaHashType];
1305
+ /**
1306
+ * Represents the input information needed for the MediaRating workflow step.
1307
+ */
1308
+ export type MediaRatingInput = {
1309
+ mediaUrl: string;
1310
+ /**
1311
+ * The engine to use for media rating. Valid values: "default" (HiveVLM) or "civitai".
1312
+ */
1313
+ engine?: string;
1314
+ };
1315
+ /**
1316
+ * Represents the output information returned from the MediaRating workflow step.
1317
+ */
1318
+ export type MediaRatingOutput = {
1319
+ nsfwLevel: NsfwLevel;
1320
+ /**
1321
+ * The reason the content was blocked, if any.
1322
+ */
1323
+ blockedReason?: string | null;
1324
+ /**
1325
+ * Whether the content is blocked.
1326
+ */
1327
+ isBlocked: boolean;
1328
+ /**
1329
+ * Detected content labels (e.g., "Animal", "Child", etc.).
1330
+ */
1331
+ labels?: Array<string> | null;
1332
+ };
1333
+ /**
1334
+ * MediaRating
1335
+ */
1336
+ export type MediaRatingStep = WorkflowStep & {
1337
+ $type: 'mediaRating';
1338
+ } & {
1339
+ input: MediaRatingInput;
1340
+ output?: MediaRatingOutput;
1341
+ } & {
1342
+ $type: 'mediaRating';
1343
+ };
1344
+ /**
1345
+ * MediaRating
1346
+ */
1347
+ export type MediaRatingStepTemplate = WorkflowStepTemplate & {
1348
+ $type: 'mediaRating';
1349
+ } & {
1350
+ input: MediaRatingInput;
1351
+ } & {
1352
+ $type: 'mediaRating';
1353
+ };
1354
+ export declare const Metric3dBackbone: {
1355
+ readonly VIT_SMALL: 'vit-small';
1356
+ readonly VIT_LARGE: 'vit-large';
1357
+ readonly VIT_GIANT2: 'vit-giant2';
1358
+ };
1359
+ export type Metric3dBackbone = (typeof Metric3dBackbone)[keyof typeof Metric3dBackbone];
1360
+ export type MiniMaxVideoGenInput = VideoGenInput & {
1361
+ engine: 'minimax';
1362
+ } & {
1363
+ model?: MiniMaxVideoGenModel;
1364
+ enablePromptEnhancer?: boolean;
1365
+ /**
1366
+ * Either A URL, A DataURL or a Base64 string
1367
+ */
1368
+ sourceImage?: string | null;
1369
+ } & {
1370
+ engine: 'minimax';
1371
+ };
1372
+ export declare const MiniMaxVideoGenModel: {
1373
+ readonly HAILOU: 'hailou';
1374
+ };
1375
+ export type MiniMaxVideoGenModel = (typeof MiniMaxVideoGenModel)[keyof typeof MiniMaxVideoGenModel];
811
1376
  export type MochiVideoGenInput = VideoGenInput & {
812
1377
  engine: 'mochi';
813
1378
  } & {
@@ -820,17 +1385,10 @@ export type MusubiImageResourceTrainingInput = ImageResourceTrainingInput & {
820
1385
  engine: 'musubi';
821
1386
  } & {
822
1387
  /**
823
- * An epoch is one set of learning. By default, we will save a maximum of 20 epochs (evenly distributed), and they are all available for download.
824
- */
825
- maxTrainEpochs?: number;
826
- /**
827
- * Num Repeats defines how many times each individual image gets put into VRAM. As opposed to batch size, which is how many images are placed into VRAM at once.
1388
+ * Number of training epochs. An epoch is one complete pass through the training dataset.
1389
+ * Maximum of 20 epochs can be specified.
828
1390
  */
829
- numRepeats?: number;
830
- /**
831
- * Batch size is the number of images that will be placed into VRAM at once. A batch size of 2 will train two images at a time, simultaneously.
832
- */
833
- trainBatchSize?: number | null;
1391
+ epochs?: number;
834
1392
  /**
835
1393
  * Specify the maximum resolution of training images. If the training images exceed the resolution specified here, they will be scaled down to this resolution
836
1394
  */
@@ -875,13 +1433,23 @@ export type MusubiImageResourceTrainingInput = ImageResourceTrainingInput & {
875
1433
  };
876
1434
  export declare const NsfwLevel: {
877
1435
  readonly PG: 'pg';
878
- readonly P_G13: 'pG13';
1436
+ readonly PG13: 'pg13';
879
1437
  readonly R: 'r';
880
1438
  readonly X: 'x';
881
1439
  readonly XXX: 'xxx';
882
1440
  readonly NA: 'na';
883
1441
  };
884
1442
  export type NsfwLevel = (typeof NsfwLevel)[keyof typeof NsfwLevel];
1443
+ export type NanoBananaProImageGenInput = GoogleImageGenInput & {
1444
+ prompt: string;
1445
+ aspectRatio?: '21:9' | '16:9' | '3:2' | '4:3' | '5:4' | '1:1' | '4:5' | '3:4' | '2:3' | '9:16';
1446
+ numImages?: number;
1447
+ resolution?: '1K' | '2K' | '4K';
1448
+ outputFormat?: 'jpeg' | 'png' | 'webp';
1449
+ images?: Array<string>;
1450
+ } & {
1451
+ model: 'nano-banana-pro';
1452
+ };
885
1453
  export type OpenAiDallE2CreateImageGenInput = OpenAiDallE2ImageGenInput & {
886
1454
  background?: 'auto' | 'transparent' | 'opaque';
887
1455
  } & {
@@ -921,6 +1489,33 @@ export type OpenAiDallE3ImageGenInput = OpenApiImageGenInput & {
921
1489
  } & {
922
1490
  model: 'dall-e-3';
923
1491
  };
1492
+ export type OpenAiGpt15CreateImageInput = OpenAiGpt15ImageGenInput & {} & {
1493
+ operation: 'createImage';
1494
+ };
1495
+ export type OpenAiGpt15EditImageInput = OpenAiGpt15ImageGenInput & {
1496
+ images: Array<string>;
1497
+ /**
1498
+ * Input fidelity: low or high
1499
+ */
1500
+ inputFidelity?: 'low' | 'high';
1501
+ /**
1502
+ * Image size for edit mode: auto, 1024x1024, 1536x1024, or 1024x1536
1503
+ */
1504
+ size?: 'auto' | '1024x1024' | '1536x1024' | '1024x1536';
1505
+ } & {
1506
+ operation: 'editImage';
1507
+ };
1508
+ export type OpenAiGpt15ImageGenInput = OpenApiImageGenInput & {
1509
+ operation: string;
1510
+ prompt: string;
1511
+ size?: '1024x1024' | '1536x1024' | '1024x1536';
1512
+ quantity?: number;
1513
+ background?: 'auto' | 'transparent' | 'opaque';
1514
+ quality?: 'low' | 'medium' | 'high';
1515
+ outputFormat?: 'jpeg' | 'png' | 'webp';
1516
+ } & {
1517
+ model: 'gpt-image-1.5';
1518
+ };
924
1519
  export type OpenAiGpt1CreateImageInput = OpenAiGpt1ImageGenInput & {} & {
925
1520
  operation: 'createImage';
926
1521
  };
@@ -951,6 +1546,317 @@ export type OpenApiImageGenInput = ImageGenInput & {
951
1546
  } & {
952
1547
  engine: 'openai';
953
1548
  };
1549
+ export declare const OutputFormat: {
1550
+ readonly PNG: 'png';
1551
+ readonly JPEG: 'jpeg';
1552
+ readonly WEB_P: 'webP';
1553
+ };
1554
+ export type OutputFormat = (typeof OutputFormat)[keyof typeof OutputFormat];
1555
+ /**
1556
+ * PNG output format configuration.
1557
+ */
1558
+ export type PngOutputFormat = ImageOutputFormat & {
1559
+ format: 'png';
1560
+ } & {} & {
1561
+ format: 'png';
1562
+ };
1563
+ export type PreprocessImageAnimalPoseInput = PreprocessImageInput & {
1564
+ kind: 'animal-pose';
1565
+ } & {
1566
+ bboxDetector?: AnimalPoseBboxDetector;
1567
+ poseEstimator?: AnimalPoseEstimator;
1568
+ } & {
1569
+ kind: 'animal-pose';
1570
+ };
1571
+ export type PreprocessImageAnimeLineartInput = PreprocessImageInput & {
1572
+ kind: 'lineart-anime';
1573
+ } & {} & {
1574
+ kind: 'lineart-anime';
1575
+ };
1576
+ export type PreprocessImageAnylineInput = PreprocessImageInput & {
1577
+ kind: 'anyline';
1578
+ } & {
1579
+ mergeWithLineart?: AnylineMergeWith;
1580
+ lineartLowerBound?: number;
1581
+ lineartUpperBound?: number;
1582
+ objectMinSize?: number;
1583
+ objectConnectivity?: number;
1584
+ } & {
1585
+ kind: 'anyline';
1586
+ };
1587
+ export type PreprocessImageBaeNormalInput = PreprocessImageInput & {
1588
+ kind: 'bae-normal';
1589
+ } & {} & {
1590
+ kind: 'bae-normal';
1591
+ };
1592
+ export type PreprocessImageBinaryInput = PreprocessImageInput & {
1593
+ kind: 'binary';
1594
+ } & {
1595
+ binThreshold?: number;
1596
+ } & {
1597
+ kind: 'binary';
1598
+ };
1599
+ export type PreprocessImageCannyInput = PreprocessImageInput & {
1600
+ kind: 'canny';
1601
+ } & {
1602
+ lowThreshold?: number;
1603
+ highThreshold?: number;
1604
+ } & {
1605
+ kind: 'canny';
1606
+ };
1607
+ export type PreprocessImageColorInput = PreprocessImageInput & {
1608
+ kind: 'color';
1609
+ } & {} & {
1610
+ kind: 'color';
1611
+ };
1612
+ export type PreprocessImageDensePoseInput = PreprocessImageInput & {
1613
+ kind: 'densepose';
1614
+ } & {
1615
+ model?: DensePoseModel;
1616
+ colormap?: DensePoseColormap;
1617
+ } & {
1618
+ kind: 'densepose';
1619
+ };
1620
+ export type PreprocessImageDepthAnythingInput = PreprocessImageInput & {
1621
+ kind: 'depth-anything';
1622
+ } & {
1623
+ checkpoint?: DepthAnythingCheckpoint;
1624
+ } & {
1625
+ kind: 'depth-anything';
1626
+ };
1627
+ export type PreprocessImageDepthAnythingV2Input = PreprocessImageInput & {
1628
+ kind: 'depth-anything-v2';
1629
+ } & {
1630
+ checkpoint?: DepthAnythingV2Checkpoint;
1631
+ } & {
1632
+ kind: 'depth-anything-v2';
1633
+ };
1634
+ export type PreprocessImageDsineNormalInput = PreprocessImageInput & {
1635
+ kind: 'dsine-normal';
1636
+ } & {
1637
+ fov?: number;
1638
+ iterations?: number;
1639
+ } & {
1640
+ kind: 'dsine-normal';
1641
+ };
1642
+ export type PreprocessImageDwPoseInput = PreprocessImageInput & {
1643
+ kind: 'dwpose';
1644
+ } & {
1645
+ detectHand?: boolean;
1646
+ detectBody?: boolean;
1647
+ detectFace?: boolean;
1648
+ bboxDetector?: DwPoseBboxDetector;
1649
+ poseEstimator?: DwPoseEstimator;
1650
+ } & {
1651
+ kind: 'dwpose';
1652
+ };
1653
+ export type PreprocessImageFakeScribbleInput = PreprocessImageInput & {
1654
+ kind: 'fake-scribble';
1655
+ } & {
1656
+ safe?: SafeMode;
1657
+ } & {
1658
+ kind: 'fake-scribble';
1659
+ };
1660
+ export type PreprocessImageHedInput = PreprocessImageInput & {
1661
+ kind: 'hed';
1662
+ } & {
1663
+ safe?: SafeMode;
1664
+ } & {
1665
+ kind: 'hed';
1666
+ };
1667
+ export type PreprocessImageInput = {
1668
+ kind: string;
1669
+ /**
1670
+ * Either A URL, A DataURL or a Base64 string
1671
+ */
1672
+ image: string;
1673
+ resolution?: number;
1674
+ /**
1675
+ * Gets the preprocessor type identifier used to map to ComfyUI nodes.
1676
+ * This is derived from the JsonDerivedType discriminator.
1677
+ */
1678
+ preprocessorType?: string;
1679
+ };
1680
+ export type PreprocessImageLeresDepthInput = PreprocessImageInput & {
1681
+ kind: 'leres-depth';
1682
+ } & {
1683
+ removeNearest?: number;
1684
+ removeBackground?: number;
1685
+ boost?: LeresBoost;
1686
+ } & {
1687
+ kind: 'leres-depth';
1688
+ };
1689
+ export type PreprocessImageMangaLineartInput = PreprocessImageInput & {
1690
+ kind: 'lineart-manga';
1691
+ } & {} & {
1692
+ kind: 'lineart-manga';
1693
+ };
1694
+ export type PreprocessImageMediaPipeFaceInput = PreprocessImageInput & {
1695
+ kind: 'mediapipe-face';
1696
+ } & {
1697
+ maxFaces?: number;
1698
+ minConfidence?: number;
1699
+ } & {
1700
+ kind: 'mediapipe-face';
1701
+ };
1702
+ export type PreprocessImageMetric3dDepthInput = PreprocessImageInput & {
1703
+ kind: 'metric3d-depth';
1704
+ } & {
1705
+ backbone?: Metric3dBackbone;
1706
+ fx?: number;
1707
+ fy?: number;
1708
+ } & {
1709
+ kind: 'metric3d-depth';
1710
+ };
1711
+ export type PreprocessImageMetric3dNormalInput = PreprocessImageInput & {
1712
+ kind: 'metric3d-normal';
1713
+ } & {
1714
+ backbone?: Metric3dBackbone;
1715
+ fx?: number;
1716
+ fy?: number;
1717
+ } & {
1718
+ kind: 'metric3d-normal';
1719
+ };
1720
+ export type PreprocessImageMidasDepthInput = PreprocessImageInput & {
1721
+ kind: 'midas-depth';
1722
+ } & {
1723
+ a?: number;
1724
+ backgroundThreshold?: number;
1725
+ } & {
1726
+ kind: 'midas-depth';
1727
+ };
1728
+ export type PreprocessImageMidasNormalInput = PreprocessImageInput & {
1729
+ kind: 'midas-normal';
1730
+ } & {
1731
+ a?: number;
1732
+ backgroundThreshold?: number;
1733
+ } & {
1734
+ kind: 'midas-normal';
1735
+ };
1736
+ export type PreprocessImageMlsdInput = PreprocessImageInput & {
1737
+ kind: 'mlsd';
1738
+ } & {
1739
+ scoreThreshold?: number;
1740
+ distanceThreshold?: number;
1741
+ } & {
1742
+ kind: 'mlsd';
1743
+ };
1744
+ export type PreprocessImageOneFormerAde20kInput = PreprocessImageInput & {
1745
+ kind: 'oneformer-ade20k';
1746
+ } & {} & {
1747
+ kind: 'oneformer-ade20k';
1748
+ };
1749
+ export type PreprocessImageOneFormerCocoInput = PreprocessImageInput & {
1750
+ kind: 'oneformer-coco';
1751
+ } & {} & {
1752
+ kind: 'oneformer-coco';
1753
+ };
1754
+ export type PreprocessImageOpenPoseInput = PreprocessImageInput & {
1755
+ kind: 'openpose';
1756
+ } & {
1757
+ detectHand?: boolean;
1758
+ detectBody?: boolean;
1759
+ detectFace?: boolean;
1760
+ } & {
1761
+ kind: 'openpose';
1762
+ };
1763
+ export type PreprocessImageOutput = {
1764
+ blob: ImageBlob;
1765
+ };
1766
+ export type PreprocessImagePidinetInput = PreprocessImageInput & {
1767
+ kind: 'pidinet';
1768
+ } & {
1769
+ safe?: SafeMode;
1770
+ } & {
1771
+ kind: 'pidinet';
1772
+ };
1773
+ export type PreprocessImageRealisticLineartInput = PreprocessImageInput & {
1774
+ kind: 'lineart-realistic';
1775
+ } & {
1776
+ coarse?: CoarseMode;
1777
+ } & {
1778
+ kind: 'lineart-realistic';
1779
+ };
1780
+ export type PreprocessImageScribbleInput = PreprocessImageInput & {
1781
+ kind: 'scribble';
1782
+ } & {} & {
1783
+ kind: 'scribble';
1784
+ };
1785
+ export type PreprocessImageScribblePidinetInput = PreprocessImageInput & {
1786
+ kind: 'scribble-pidinet';
1787
+ } & {
1788
+ safe?: SafeMode;
1789
+ } & {
1790
+ kind: 'scribble-pidinet';
1791
+ };
1792
+ export type PreprocessImageScribbleXdogInput = PreprocessImageInput & {
1793
+ kind: 'scribble-xdog';
1794
+ } & {
1795
+ threshold?: number;
1796
+ } & {
1797
+ kind: 'scribble-xdog';
1798
+ };
1799
+ export type PreprocessImageShuffleInput = PreprocessImageInput & {
1800
+ kind: 'shuffle';
1801
+ } & {
1802
+ seed?: number;
1803
+ } & {
1804
+ kind: 'shuffle';
1805
+ };
1806
+ export type PreprocessImageStandardLineartInput = PreprocessImageInput & {
1807
+ kind: 'lineart-standard';
1808
+ } & {
1809
+ gaussianSigma?: number;
1810
+ intensityThreshold?: number;
1811
+ } & {
1812
+ kind: 'lineart-standard';
1813
+ };
1814
+ export type PreprocessImageStep = WorkflowStep & {
1815
+ $type: 'preprocessImage';
1816
+ } & {
1817
+ input: PreprocessImageInput;
1818
+ output?: PreprocessImageOutput;
1819
+ } & {
1820
+ $type: 'preprocessImage';
1821
+ };
1822
+ export type PreprocessImageStepTemplate = WorkflowStepTemplate & {
1823
+ $type: 'preprocessImage';
1824
+ } & {
1825
+ input: PreprocessImageInput;
1826
+ } & {
1827
+ $type: 'preprocessImage';
1828
+ };
1829
+ export type PreprocessImageTeedInput = PreprocessImageInput & {
1830
+ kind: 'teed';
1831
+ } & {
1832
+ safeSteps?: number;
1833
+ } & {
1834
+ kind: 'teed';
1835
+ };
1836
+ export type PreprocessImageTileInput = PreprocessImageInput & {
1837
+ kind: 'tile';
1838
+ } & {
1839
+ pyrUpIterations?: number;
1840
+ } & {
1841
+ kind: 'tile';
1842
+ };
1843
+ export type PreprocessImageUniFormerInput = PreprocessImageInput & {
1844
+ kind: 'uniformer';
1845
+ } & {} & {
1846
+ kind: 'uniformer';
1847
+ };
1848
+ export type PreprocessImageZoeDepthAnythingInput = PreprocessImageInput & {
1849
+ kind: 'zoe-depth-anything';
1850
+ } & {
1851
+ environment?: ZoeDepthEnvironment;
1852
+ } & {
1853
+ kind: 'zoe-depth-anything';
1854
+ };
1855
+ export type PreprocessImageZoeDepthInput = PreprocessImageInput & {
1856
+ kind: 'zoe-depth';
1857
+ } & {} & {
1858
+ kind: 'zoe-depth';
1859
+ };
954
1860
  /**
955
1861
  * Available options for priority.
956
1862
  */
@@ -978,6 +1884,98 @@ export type ProblemDetails = {
978
1884
  | (string | null)
979
1885
  | undefined;
980
1886
  };
1887
+ export type Qwen20bCreateImageGenInput = Qwen20bImageGenInput & {
1888
+ width?: number;
1889
+ height?: number;
1890
+ } & {
1891
+ operation: 'createImage';
1892
+ };
1893
+ export type Qwen20bEditImageGenInput = Qwen20bImageGenInput & {
1894
+ images: Array<string>;
1895
+ readonly width?: number;
1896
+ readonly height?: number;
1897
+ } & {
1898
+ operation: 'editImage';
1899
+ };
1900
+ export type Qwen20bImageGenInput = QwenImageGenInput & {
1901
+ operation: string;
1902
+ diffuserModel?: string;
1903
+ prompt: string;
1904
+ negativePrompt?: string | null;
1905
+ sampleMethod?: SdCppSampleMethod;
1906
+ schedule?: SdCppSchedule;
1907
+ steps?: number;
1908
+ cfgScale?: number;
1909
+ seed?: number | null;
1910
+ quantity?: number;
1911
+ } & {
1912
+ model: '20b';
1913
+ };
1914
+ export type Qwen20bVariantImageGenInput = Qwen20bImageGenInput & {
1915
+ /**
1916
+ * Either A URL, A DataURL or a Base64 string
1917
+ */
1918
+ image: string;
1919
+ strength?: number;
1920
+ readonly width?: number;
1921
+ readonly height?: number;
1922
+ } & {
1923
+ operation: 'createVariant';
1924
+ };
1925
+ /**
1926
+ * AI Toolkit training for Qwen Image models
1927
+ */
1928
+ export type QwenAiToolkitTrainingInput = AiToolkitTrainingInput & {} & {
1929
+ ecosystem: 'qwen';
1930
+ };
1931
+ export type QwenImageGenInput = SdCppImageGenInput & {
1932
+ model: string;
1933
+ } & {
1934
+ ecosystem: 'qwen';
1935
+ };
1936
+ /**
1937
+ * Represents the "for" clause in a repeat step that specifies what to iterate over.
1938
+ */
1939
+ export type RepeatForClause = {
1940
+ /**
1941
+ * The dynamic assignment reference to the array to iterate over (e.g., "$stepName").
1942
+ */
1943
+ $ref: string;
1944
+ /**
1945
+ * The path to the array property in the referenced step's output (e.g., "output.frames").
1946
+ */
1947
+ path: string;
1948
+ /**
1949
+ * The variable name to use when referencing the current iteration item in template steps (e.g., "frame").
1950
+ */
1951
+ as: string;
1952
+ };
1953
+ /**
1954
+ * Represents the input information needed for the Repeat workflow step.
1955
+ */
1956
+ export type RepeatInput = {
1957
+ for: RepeatForClause;
1958
+ template: WorkflowStepTemplate;
1959
+ };
1960
+ /**
1961
+ * Represents the output information returned from the Repeat workflow step.
1962
+ */
1963
+ export type RepeatOutput = {
1964
+ steps: Array<WorkflowStep>;
1965
+ };
1966
+ /**
1967
+ * Resizes an image to a target width while maintaining aspect ratio.
1968
+ */
1969
+ export type ResizeTransform = ImageTransform & {
1970
+ type: 'resize';
1971
+ } & {
1972
+ /**
1973
+ * Target width in pixels. Height is calculated to maintain aspect ratio.
1974
+ */
1975
+ targetWidth?: number | null;
1976
+ } & {
1977
+ type: 'resize';
1978
+ };
981
1979
  /**
982
1980
  * Details for a specific resource.
983
1981
  */
@@ -1056,6 +2054,31 @@ export type ResourceInfo = {
1056
2054
  */
1057
2055
  hasNSFWContentRestriction?: boolean;
1058
2056
  };
2057
+ /**
2058
+ * AI Toolkit training for Stable Diffusion 1.5 models
2059
+ */
2060
+ export type Sd1AiToolkitTrainingInput = AiToolkitTrainingInput & {
2061
+ /**
2062
+ * Learning is performed by putting noise of various strengths on the training image,
2063
+ * but depending on the difference in strength of the noise on which it is placed, learning will be
2064
+ * stable by moving closer to or farther from the learning target.
2065
+ *
2066
+ * Min SNR gamma was introduced to compensate for that. When learning images have little noise,
2067
+ * it may deviate greatly from the target, so try to suppress this jump.
2068
+ */
2069
+ minSnrGamma?: number | null;
2070
+ /**
2071
+ * The primary model to train upon.
2072
+ */
2073
+ model?: string;
2074
+ } & {
2075
+ ecosystem: 'sd1';
2076
+ };
2077
+ export declare const SafeMode: {
2078
+ readonly ENABLE: 'enable';
2079
+ readonly DISABLE: 'disable';
2080
+ };
2081
+ export type SafeMode = (typeof SafeMode)[keyof typeof SafeMode];
1059
2082
  /**
1060
2083
  * The available options for schedulers used in image generation.
1061
2084
  */
@@ -1090,6 +2113,113 @@ export declare const Scheduler: {
1090
2113
  * The available options for schedulers used in image generation.
1091
2114
  */
1092
2115
  export type Scheduler = (typeof Scheduler)[keyof typeof Scheduler];
2116
+ export type SdCppImageGenInput = ImageGenInput & {
2117
+ engine: 'sdcpp';
2118
+ } & {
2119
+ ecosystem: string;
2120
+ } & {
2121
+ engine: 'sdcpp';
2122
+ };
2123
+ export declare const SdCppSampleMethod: {
2124
+ readonly EULER: 'euler';
2125
+ readonly HEUN: 'heun';
2126
+ readonly DPM2: 'dpm2';
2127
+ readonly 'DPM++2S_A': 'dpm++2s_a';
2128
+ readonly 'DPM++2M': 'dpm++2m';
2129
+ readonly 'DPM++2MV2': 'dpm++2mv2';
2130
+ readonly IPNDM: 'ipndm';
2131
+ readonly IPNDM_V: 'ipndm_v';
2132
+ readonly DDIM_TRAILING: 'ddim_trailing';
2133
+ readonly EULER_A: 'euler_a';
2134
+ readonly LCM: 'lcm';
2135
+ };
2136
+ export type SdCppSampleMethod = (typeof SdCppSampleMethod)[keyof typeof SdCppSampleMethod];
2137
+ export declare const SdCppSchedule: {
2138
+ readonly SIMPLE: 'simple';
2139
+ readonly DISCRETE: 'discrete';
2140
+ readonly KARRAS: 'karras';
2141
+ readonly EXPONENTIAL: 'exponential';
2142
+ readonly AYS: 'ays';
2143
+ };
2144
+ export type SdCppSchedule = (typeof SdCppSchedule)[keyof typeof SdCppSchedule];
2145
+ /**
2146
+ * AI Toolkit training for Stable Diffusion XL models
2147
+ */
2148
+ export type SdxlAiToolkitTrainingInput = AiToolkitTrainingInput & {
2149
+ /**
2150
+ * Learning is performed by putting noise of various strengths on the training image,
2151
+ * but depending on the difference in strength of the noise on which it is placed, learning will be
2152
+ * stable by moving closer to or farther from the learning target.
2153
+ *
2154
+ * Min SNR gamma was introduced to compensate for that. When learning images have little noise,
2155
+ * it may deviate greatly from the target, so try to suppress this jump.
2156
+ */
2157
+ minSnrGamma?: number | null;
2158
+ /**
2159
+ * The primary model to train upon.
2160
+ */
2161
+ model?: string;
2162
+ } & {
2163
+ ecosystem: 'sdxl';
2164
+ };
2165
+ export type SeedreamImageGenInput = ImageGenInput & {
2166
+ engine: 'seedream';
2167
+ } & {
2168
+ prompt: string;
2169
+ quantity?: number;
2170
+ width?: number;
2171
+ height?: number;
2172
+ guidanceScale?: number;
2173
+ seed?: number | null;
2174
+ enableSafetyChecker?: boolean;
2175
+ version?: SeedreamVersion;
2176
+ images?: Array<string>;
2177
+ } & {
2178
+ engine: 'seedream';
2179
+ };
2180
+ export declare const SeedreamVersion: {
2181
+ readonly V3: 'v3';
2182
+ readonly V4: 'v4';
2183
+ readonly V4_5: 'v4.5';
2184
+ };
2185
+ export type SeedreamVersion = (typeof SeedreamVersion)[keyof typeof SeedreamVersion];
2186
+ /**
2187
+ * Sora 2 Image-to-Video
2188
+ * FAL Endpoints:
2189
+ * - Standard: https://fal.ai/api/openapi/queue/openapi.json?endpoint_id=fal-ai/sora-2/image-to-video
2190
+ * - Pro: https://fal.ai/api/openapi/queue/openapi.json?endpoint_id=fal-ai/sora-2/image-to-video/pro
2191
+ */
2192
+ export type Sora2ImageToVideoInput = SoraVideoGenInput & {
2193
+ images?: Array<string>;
2194
+ } & {
2195
+ operation: 'image-to-video';
2196
+ };
2197
+ /**
2198
+ * Sora 2 Text-to-Video
2199
+ * FAL Endpoints:
2200
+ * - Standard: https://fal.ai/api/openapi/queue/openapi.json?endpoint_id=fal-ai/sora-2/text-to-video
2201
+ * - Pro: https://fal.ai/api/openapi/queue/openapi.json?endpoint_id=fal-ai/sora-2/text-to-video/pro
2202
+ */
2203
+ export type Sora2TextToVideoInput = SoraVideoGenInput & {} & {
2204
+ operation: 'text-to-video';
2205
+ };
2206
+ /**
2207
+ * Base class for Sora 2 video generation (OpenAI's Sora-2 model via FAL)
2208
+ * Since FAL has a one-to-one mapping with OpenAI's Sora API, we don't need a provider layer.
2209
+ * Discriminator: operation (text-to-video or image-to-video)
2210
+ */
2211
+ export type SoraVideoGenInput = VideoGenInput & {
2212
+ engine: 'sora';
2213
+ } & {
2214
+ operation: string | null;
2215
+ duration?: number;
2216
+ seed?: number | null;
2217
+ resolution?: '720p' | '1080p';
2218
+ aspectRatio?: 'auto' | '16:9' | '9:16';
2219
+ usePro?: boolean;
2220
+ } & {
2221
+ engine: 'sora';
2222
+ };
1093
2223
  /**
1094
2224
  * Input for an text to image step.
1095
2225
  */
@@ -1157,6 +2287,7 @@ export type TextToImageInput = {
1157
2287
  * An optional engine to use for generation.
1158
2288
  */
1159
2289
  engine?: string | null;
2290
+ outputFormat?: OutputFormat;
1160
2291
  };
1161
2292
  /**
1162
2293
  * Represents the output of a TextToImage workflow step.
@@ -1188,6 +2319,92 @@ export type TextToImageStepTemplate = WorkflowStepTemplate & {
1188
2319
  } & {
1189
2320
  $type: 'textToImage';
1190
2321
  };
2322
+ /**
2323
+ * Represents training data in various formats
2324
+ */
2325
+ export type TrainingData = {
2326
+ type: string;
2327
+ };
2328
+ /**
2329
+ * Input for a training step.
2330
+ */
2331
+ export type TrainingInput = {
2332
+ engine: string;
2333
+ trainingData: TrainingData;
2334
+ samples?: TrainingInputSamples;
2335
+ };
2336
+ /**
2337
+ * Sample generation configuration for training workflows
2338
+ */
2339
+ export type TrainingInputSamples = {
2340
+ /**
2341
+ * A selection of sample prompts to generate preview outputs during training.
2342
+ */
2343
+ prompts?: Array<string>;
2344
+ /**
2345
+ * An optional negative prompt that will be applied when generating samples
2346
+ */
2347
+ negativePrompt?: string | null;
2348
+ };
2349
+ /**
2350
+ * The moderation status of the training data
2351
+ */
2352
+ export declare const TrainingModerationStatus: {
2353
+ readonly EVALUATING: 'evaluating';
2354
+ readonly UNDER_REVIEW: 'underReview';
2355
+ readonly APPROVED: 'approved';
2356
+ readonly REJECTED: 'rejected';
2357
+ };
2358
+ /**
2359
+ * The moderation status of the training data
2360
+ */
2361
+ export type TrainingModerationStatus =
2362
+ (typeof TrainingModerationStatus)[keyof typeof TrainingModerationStatus];
2363
+ /**
2364
+ * Output from a training step.
2365
+ */
2366
+ export type TrainingOutput = {
2367
+ moderationStatus?: TrainingModerationStatus;
2368
+ /**
2369
+ * The trained model artifacts for each epoch
2370
+ */
2371
+ epochs?: Array<TrainingOutputEpochResult>;
2372
+ };
2373
+ /**
2374
+ * Represents the output of a single training epoch
2375
+ */
2376
+ export type TrainingOutputEpochResult = {
2377
+ /**
2378
+ * The epoch number (1-based)
2379
+ */
2380
+ epochNumber?: number;
2381
+ model: Blob;
2382
+ /**
2383
+ * Sample outputs (images/videos/audio) generated with this epoch's model
2384
+ */
2385
+ samples?: Array<Blob>;
2386
+ };
2387
+ /**
2388
+ * Training
2389
+ */
2390
+ export type TrainingStep = WorkflowStep & {
2391
+ $type: 'training';
2392
+ } & {
2393
+ input: TrainingInput;
2394
+ output?: TrainingOutput;
2395
+ } & {
2396
+ $type: 'training';
2397
+ };
2398
+ /**
2399
+ * Training
2400
+ */
2401
+ export type TrainingStepTemplate = WorkflowStepTemplate & {
2402
+ $type: 'training';
2403
+ } & {
2404
+ input: TrainingInput;
2405
+ } & {
2406
+ $type: 'training';
2407
+ };
1191
2408
  /**
1192
2409
  * Transaction information.
1193
2410
  */
@@ -1257,6 +2474,16 @@ export type TranscodeStep = WorkflowStep & {
1257
2474
  } & {
1258
2475
  $type: 'transcode';
1259
2476
  };
2477
+ /**
2478
+ * Transcoding
2479
+ */
2480
+ export type TranscodeStepTemplate = WorkflowStepTemplate & {
2481
+ $type: 'transcode';
2482
+ } & {
2483
+ input: TranscodeInput;
2484
+ } & {
2485
+ $type: 'transcode';
2486
+ };
1260
2487
  export type TryOnUInput = {
1261
2488
  subjectUrl: string;
1262
2489
  garmentUrl: string;
@@ -1339,6 +2566,11 @@ export declare const Veo3AspectRatio: {
1339
2566
  readonly '1:1': '1:1';
1340
2567
  };
1341
2568
  export type Veo3AspectRatio = (typeof Veo3AspectRatio)[keyof typeof Veo3AspectRatio];
2569
+ export declare const Veo3Version: {
2570
+ readonly '3_0': '3.0';
2571
+ readonly '3_1': '3.1';
2572
+ };
2573
+ export type Veo3Version = (typeof Veo3Version)[keyof typeof Veo3Version];
1342
2574
  export type Veo3VideoGenInput = VideoGenInput & {
1343
2575
  engine: 'veo3';
1344
2576
  } & {
@@ -1350,6 +2582,7 @@ export type Veo3VideoGenInput = VideoGenInput & {
1350
2582
  seed?: number | null;
1351
2583
  fastMode?: boolean;
1352
2584
  images?: Array<string>;
2585
+ version?: Veo3Version;
1353
2586
  } & {
1354
2587
  engine: 'veo3';
1355
2588
  };
@@ -1378,25 +2611,68 @@ export type VideoEnhancementOutput = {
1378
2611
  video: VideoBlob;
1379
2612
  };
1380
2613
  /**
1381
- * Upscale videos and/or interpolate frames
2614
+ * Upscale videos and/or interpolate frames
2615
+ */
2616
+ export type VideoEnhancementStep = WorkflowStep & {
2617
+ $type: 'videoEnhancement';
2618
+ } & {
2619
+ input: VideoEnhancementInput;
2620
+ output?: VideoEnhancementOutput;
2621
+ } & {
2622
+ $type: 'videoEnhancement';
2623
+ };
2624
+ /**
2625
+ * Upscale videos and/or interpolate frames
2626
+ */
2627
+ export type VideoEnhancementStepTemplate = WorkflowStepTemplate & {
2628
+ $type: 'videoEnhancement';
2629
+ } & {
2630
+ input: VideoEnhancementInput;
2631
+ } & {
2632
+ $type: 'videoEnhancement';
2633
+ };
2634
+ /**
2635
+ * Represents the input information needed for the VideoFrameExtraction workflow step.
2636
+ */
2637
+ export type VideoFrameExtractionInput = {
2638
+ videoUrl: string;
2639
+ frameRate?: number;
2640
+ uniqueThreshold?: number;
2641
+ maxFrames?: number;
2642
+ };
2643
+ /**
2644
+ * Represents the output from the VideoFrameExtraction workflow step.
2645
+ */
2646
+ export type VideoFrameExtractionOutput = {
2647
+ /**
2648
+ * A collection of extracted unique video frames as image blobs.
2649
+ */
2650
+ frames: Array<ImageBlob>;
2651
+ /**
2652
+ * The total number of unique frames extracted from the video.
2653
+ */
2654
+ totalFramesExtracted?: number;
2655
+ };
2656
+ /**
2657
+ * Video Frame Extraction
1382
2658
  */
1383
- export type VideoEnhancementStep = WorkflowStep & {
1384
- $type: 'videoEnhancement';
2659
+ export type VideoFrameExtractionStep = WorkflowStep & {
2660
+ $type: 'videoFrameExtraction';
1385
2661
  } & {
1386
- input: VideoEnhancementInput;
1387
- output?: VideoEnhancementOutput;
2662
+ input: VideoFrameExtractionInput;
2663
+ output?: VideoFrameExtractionOutput;
1388
2664
  } & {
1389
- $type: 'videoEnhancement';
2665
+ $type: 'videoFrameExtraction';
1390
2666
  };
1391
2667
  /**
1392
- * Upscale videos and/or interpolate frames
2668
+ * Video Frame Extraction
1393
2669
  */
1394
- export type VideoEnhancementStepTemplate = WorkflowStepTemplate & {
1395
- $type: 'videoEnhancement';
2670
+ export type VideoFrameExtractionStepTemplate = WorkflowStepTemplate & {
2671
+ $type: 'videoFrameExtraction';
1396
2672
  } & {
1397
- input: VideoEnhancementInput;
2673
+ input: VideoFrameExtractionInput;
1398
2674
  } & {
1399
- $type: 'videoEnhancement';
2675
+ $type: 'videoFrameExtraction';
1400
2676
  };
1401
2677
  export type VideoGenInput = {
1402
2678
  engine: string;
@@ -1430,6 +2706,114 @@ export type VideoGenStepTemplate = WorkflowStepTemplate & {
1430
2706
  } & {
1431
2707
  $type: 'videoGen';
1432
2708
  };
2709
+ export type VideoInterpolationInput = {
2710
+ video: string;
2711
+ interpolationFactor?: number;
2712
+ model?: string;
2713
+ };
2714
+ export type VideoInterpolationOutput = {
2715
+ video: VideoBlob;
2716
+ };
2717
+ /**
2718
+ * Interpolate videos using VFI Mamba
2719
+ */
2720
+ export type VideoInterpolationStep = WorkflowStep & {
2721
+ $type: 'videoInterpolation';
2722
+ } & {
2723
+ input: VideoInterpolationInput;
2724
+ output?: VideoInterpolationOutput;
2725
+ } & {
2726
+ $type: 'videoInterpolation';
2727
+ };
2728
+ /**
2729
+ * Interpolate videos using VFI Mamba
2730
+ */
2731
+ export type VideoInterpolationStepTemplate = WorkflowStepTemplate & {
2732
+ $type: 'videoInterpolation';
2733
+ } & {
2734
+ input: VideoInterpolationInput;
2735
+ } & {
2736
+ $type: 'videoInterpolation';
2737
+ };
2738
+ /**
2739
+ * Represents the input information needed for the VideoMetadata workflow step.
2740
+ */
2741
+ export type VideoMetadataInput = {
2742
+ /**
2743
+ * The video file to extract metadata from.
2744
+ */
2745
+ video: string;
2746
+ };
2747
+ /**
2748
+ * Represents the output information returned from the VideoMetadata workflow step.
2749
+ */
2750
+ export type VideoMetadataOutput = {
2751
+ /**
2752
+ * The width of the video in pixels.
2753
+ */
2754
+ width: number;
2755
+ /**
2756
+ * The height of the video in pixels.
2757
+ */
2758
+ height: number;
2759
+ /**
2760
+ * The frame rate of the video in frames per second.
2761
+ */
2762
+ fps: number;
2763
+ /**
2764
+ * The duration of the video.
2765
+ */
2766
+ duration: string;
2767
+ };
2768
+ /**
2769
+ * Extract metadata from videos including width, height, FPS, and duration
2770
+ */
2771
+ export type VideoMetadataStep = WorkflowStep & {
2772
+ $type: 'videoMetadata';
2773
+ } & {
2774
+ input: VideoMetadataInput;
2775
+ output?: VideoMetadataOutput;
2776
+ } & {
2777
+ $type: 'videoMetadata';
2778
+ };
2779
+ /**
2780
+ * Extract metadata from videos including width, height, FPS, and duration
2781
+ */
2782
+ export type VideoMetadataStepTemplate = WorkflowStepTemplate & {
2783
+ $type: 'videoMetadata';
2784
+ } & {
2785
+ input: VideoMetadataInput;
2786
+ } & {
2787
+ $type: 'videoMetadata';
2788
+ };
2789
+ export type VideoUpscalerInput = {
2790
+ video: string;
2791
+ scaleFactor?: number;
2792
+ };
2793
+ export type VideoUpscalerOutput = {
2794
+ video: VideoBlob;
2795
+ };
2796
+ /**
2797
+ * Upscale videos using FlashVSR
2798
+ */
2799
+ export type VideoUpscalerStep = WorkflowStep & {
2800
+ $type: 'videoUpscaler';
2801
+ } & {
2802
+ input: VideoUpscalerInput;
2803
+ output?: VideoUpscalerOutput;
2804
+ } & {
2805
+ $type: 'videoUpscaler';
2806
+ };
2807
+ /**
2808
+ * Upscale videos using FlashVSR
2809
+ */
2810
+ export type VideoUpscalerStepTemplate = WorkflowStepTemplate & {
2811
+ $type: 'videoUpscaler';
2812
+ } & {
2813
+ input: VideoUpscalerInput;
2814
+ } & {
2815
+ $type: 'videoUpscaler';
2816
+ };
1433
2817
  export type ViduVideoGenInput = VideoGenInput & {
1434
2818
  engine: 'vidu';
1435
2819
  } & {
@@ -1463,6 +2847,62 @@ export declare const ViduVideoGenStyle: {
1463
2847
  readonly ANIME: 'anime';
1464
2848
  };
1465
2849
  export type ViduVideoGenStyle = (typeof ViduVideoGenStyle)[keyof typeof ViduVideoGenStyle];
2850
+ /**
2851
+ * Represents the input information needed for the WDTagging workflow step.
2852
+ */
2853
+ export type WdTaggingInput = {
2854
+ /**
2855
+ * The model to use for tagging (e.g., "wd14-vit.v1").
2856
+ */
2857
+ model?: string | null;
2858
+ mediaUrl: string;
2859
+ /**
2860
+ * Optional threshold for tag confidence filtering. Tags below this threshold will be excluded.
2861
+ */
2862
+ threshold?: number | null;
2863
+ /**
2864
+ * Optional prompt to guide the tagging process.
2865
+ */
2866
+ prompt?: string | null;
2867
+ };
2868
+ /**
2869
+ * Represents the output information returned from the WDTagging workflow step.
2870
+ */
2871
+ export type WdTaggingOutput = {
2872
+ /**
2873
+ * The detected tags with their confidence scores.
2874
+ */
2875
+ tags: {
2876
+ [key: string]: number;
2877
+ };
2878
+ /**
2879
+ * The content rating scores (general, sensitive, questionable, explicit).
2880
+ */
2881
+ rating?: {
2882
+ [key: string]: number;
2883
+ } | null;
2884
+ };
2885
+ /**
2886
+ * WDTagging
2887
+ */
2888
+ export type WdTaggingStep = WorkflowStep & {
2889
+ $type: 'wdTagging';
2890
+ } & {
2891
+ input: WdTaggingInput;
2892
+ output?: WdTaggingOutput;
2893
+ } & {
2894
+ $type: 'wdTagging';
2895
+ };
2896
+ /**
2897
+ * WDTagging
2898
+ */
2899
+ export type WdTaggingStepTemplate = WorkflowStepTemplate & {
2900
+ $type: 'wdTagging';
2901
+ } & {
2902
+ input: WdTaggingInput;
2903
+ } & {
2904
+ $type: 'wdTagging';
2905
+ };
1466
2906
  export type Wan21CivitaiVideoGenInput = Wan21VideoGenInput & {
1467
2907
  width?: number;
1468
2908
  height?: number;
@@ -1499,6 +2939,7 @@ export type Wan225bFalVideoGenInput = Wan225bVideoGenInput & {
1499
2939
  aspectRatio?: '1:1' | '16:9' | '9:16' | 'auto';
1500
2940
  enablePromptExpansion?: boolean;
1501
2941
  useDistill?: boolean;
2942
+ useFastWan?: boolean;
1502
2943
  interpolatorModel?: 'none' | 'film' | 'rife';
1503
2944
  negativePrompt?: string | null;
1504
2945
  enableSafetyChecker?: boolean;
@@ -1555,6 +2996,46 @@ export type Wan22VideoGenInput = WanVideoGenInput & {
1555
2996
  } & {
1556
2997
  version: 'v2.2';
1557
2998
  };
2999
+ export type Wan25FalImageGenInput = Wan25ImageGenInput & {
3000
+ operation: string | null;
3001
+ } & {
3002
+ provider: 'fal';
3003
+ };
3004
+ export type Wan25FalImageToImageInput = Wan25FalImageGenInput & {
3005
+ images?: Array<string>;
3006
+ } & {
3007
+ operation: 'image-to-image';
3008
+ };
3009
+ export type Wan25FalImageToVideoInput = Wan25FalVideoGenInput & {
3010
+ images?: Array<string>;
3011
+ } & {
3012
+ operation: 'image-to-video';
3013
+ };
3014
+ export type Wan25FalTextToImageInput = Wan25FalImageGenInput & {} & {
3015
+ operation: 'text-to-image';
3016
+ };
3017
+ export type Wan25FalTextToVideoInput = Wan25FalVideoGenInput & {} & {
3018
+ operation: 'text-to-video';
3019
+ };
3020
+ export type Wan25FalVideoGenInput = Wan25VideoGenInput & {
3021
+ operation: string | null;
3022
+ resolution?: '480p' | '720p' | '1080p';
3023
+ aspectRatio?: '16:9' | '9:16' | '1:1';
3024
+ enablePromptExpansion?: boolean;
3025
+ negativePrompt?: string | null;
3026
+ } & {
3027
+ provider: 'fal';
3028
+ };
3029
+ export type Wan25ImageGenInput = WanImageGenInput & {
3030
+ provider: string | null;
3031
+ } & {
3032
+ version: 'v2.5';
3033
+ };
3034
+ export type Wan25VideoGenInput = WanVideoGenInput & {
3035
+ provider: string | null;
3036
+ } & {
3037
+ version: 'v2.5';
3038
+ };
1558
3039
  export type WanImageGenInput = ImageGenInput & {
1559
3040
  engine: 'wan';
1560
3041
  } & {
@@ -1588,6 +3069,28 @@ export type WanVideoGenInput = VideoGenInput & {
1588
3069
  } & {
1589
3070
  engine: 'wan';
1590
3071
  };
3072
+ /**
3073
+ * WebP output format configuration.
3074
+ */
3075
+ export type WebpOutputFormat = ImageOutputFormat & {
3076
+ format: 'webp';
3077
+ } & {
3078
+ /**
3079
+ * Quality setting for WebP compression (1-100). Only applies when Lossless is false.
3080
+ */
3081
+ quality?: number;
3082
+ /**
3083
+ * When true, uses lossless compression. When false, uses lossy compression with the Quality setting.
3084
+ */
3085
+ lossless?: boolean;
3086
+ /**
3087
+ * Maximum number of frames to include in the output. Set to 1 to extract only the first frame from animated images.
3088
+ * When null, all frames are preserved.
3089
+ */
3090
+ maxFrames?: number | null;
3091
+ } & {
3092
+ format: 'webp';
3093
+ };
1591
3094
  /**
1592
3095
  * Details of a workflow.
1593
3096
  */
@@ -1646,6 +3149,10 @@ export type Workflow = {
1646
3149
  */
1647
3150
  allowMatureContent?: boolean | null;
1648
3151
  upgradeMode?: WorkflowUpgradeMode;
3152
+ /**
3153
+ * An optional set of currencies to use for this workflow.
3154
+ */
3155
+ currencies?: Array<BuzzClientAccount>;
1649
3156
  };
1650
3157
  /**
1651
3158
  * Details of a callback setup for a workflow.
@@ -1681,6 +3188,10 @@ export type WorkflowCallback = {
1681
3188
  | 'job:expired'
1682
3189
  | 'job:canceled'
1683
3190
  >;
3191
+ /**
3192
+ * Whether to include detailed output (step/workflow results) in the callback payload.
3193
+ */
3194
+ detailed?: boolean | null;
1684
3195
  };
1685
3196
  export type WorkflowCost = {
1686
3197
  /**
@@ -1732,6 +3243,76 @@ export type WorkflowEvent = {
1732
3243
  */
1733
3244
  timestamp?: string;
1734
3245
  $type?: string;
3246
+ details?: WorkflowEventDetails;
3247
+ };
3248
+ /**
3249
+ * Detailed information about a workflow included in webhook callbacks when detailed mode is enabled.
3250
+ */
3251
+ export type WorkflowEventDetails = {
3252
+ /**
3253
+ * Custom metadata associated with the workflow
3254
+ */
3255
+ metadata?: {
3256
+ [key: string]: unknown;
3257
+ } | null;
3258
+ /**
3259
+ * Arguments provided to the workflow
3260
+ */
3261
+ arguments?: {
3262
+ [key: string]: unknown;
3263
+ } | null;
3264
+ /**
3265
+ * When the workflow was created
3266
+ */
3267
+ createdAt?: string;
3268
+ /**
3269
+ * When the workflow completed execution
3270
+ */
3271
+ completedAt?: string | null;
3272
+ /**
3273
+ * When the workflow started execution
3274
+ */
3275
+ startedAt?: string | null;
3276
+ /**
3277
+ * Details about each step in the workflow
3278
+ */
3279
+ steps?: Array<WorkflowEventStepDetails>;
3280
+ };
3281
+ /**
3282
+ * Detailed information about a workflow step within a workflow event.
3283
+ */
3284
+ export type WorkflowEventStepDetails = {
3285
+ /**
3286
+ * The name of the step
3287
+ */
3288
+ name: string;
3289
+ status: WorkflowStatus;
3290
+ /**
3291
+ * When the step started execution
3292
+ */
3293
+ startedAt?: string | null;
3294
+ /**
3295
+ * When the step completed execution
3296
+ */
3297
+ completedAt?: string | null;
3298
+ /**
3299
+ * Custom metadata associated with the step
3300
+ */
3301
+ metadata?: {
3302
+ [key: string]: unknown;
3303
+ } | null;
3304
+ /**
3305
+ * The input configuration for the step
3306
+ */
3307
+ input?: unknown;
3308
+ /**
3309
+ * The output result from the step
3310
+ */
3311
+ output?: unknown;
3312
+ /**
3313
+ * The cost of executing the step
3314
+ */
3315
+ cost?: number | null;
1735
3316
  };
1736
3317
  /**
1737
3318
  * Values available to represent workflow status.
@@ -1803,9 +3384,45 @@ export type WorkflowStepEvent = {
1803
3384
  /**
1804
3385
  * The workflow step's name.
1805
3386
  */
1806
- stepName: string;
3387
+ name: string;
1807
3388
  status: WorkflowStatus;
1808
3389
  $type?: string;
3390
+ details?: WorkflowStepEventDetails;
3391
+ };
3392
+ /**
3393
+ * Detailed information about a workflow step included in webhook callbacks when detailed mode is enabled.
3394
+ */
3395
+ export type WorkflowStepEventDetails = {
3396
+ /**
3397
+ * When the step started execution
3398
+ */
3399
+ startedAt?: string | null;
3400
+ /**
3401
+ * When the step completed execution
3402
+ */
3403
+ completedAt?: string | null;
3404
+ /**
3405
+ * Estimated progress rate of the step (0.0 to 1.0)
3406
+ */
3407
+ estimatedProgressRate?: number | null;
3408
+ /**
3409
+ * Number of times the step has been retried
3410
+ */
3411
+ retries?: number;
3412
+ /**
3413
+ * Custom metadata associated with the step
3414
+ */
3415
+ metadata?: {
3416
+ [key: string]: unknown;
3417
+ } | null;
3418
+ /**
3419
+ * The input configuration for the step
3420
+ */
3421
+ input?: unknown;
3422
+ /**
3423
+ * The output result from the step
3424
+ */
3425
+ output?: unknown;
1809
3426
  };
1810
3427
  /**
1811
3428
  * Details of a job produced by a workflow step.
@@ -1943,6 +3560,10 @@ export type WorkflowTemplate = {
1943
3560
  */
1944
3561
  allowMatureContent?: boolean | null;
1945
3562
  upgradeMode?: WorkflowUpgradeMode;
3563
+ /**
3564
+ * Limit the currencies that can be used to pay for this workflow.
3565
+ */
3566
+ currencies?: Array<BuzzClientAccount>;
1946
3567
  };
1947
3568
  export type WorkflowTips = {
1948
3569
  /**
@@ -1965,6 +3586,58 @@ export declare const WorkflowUpgradeMode: {
1965
3586
  * Specifies how a workflow should be upgraded when mature content is detected and green or blue buzz was used for payment.
1966
3587
  */
1967
3588
  export type WorkflowUpgradeMode = (typeof WorkflowUpgradeMode)[keyof typeof WorkflowUpgradeMode];
3589
+ export type ZImageImageGenInput = SdCppImageGenInput & {
3590
+ model: string;
3591
+ } & {
3592
+ ecosystem: 'zImage';
3593
+ };
3594
+ /**
3595
+ * AI Toolkit training for Z Image Turbo models
3596
+ */
3597
+ export type ZImageTurboAiToolkitTrainingInput = AiToolkitTrainingInput & {} & {
3598
+ ecosystem: 'zimageturbo';
3599
+ };
3600
+ export type ZImageTurboCreateImageGenInput = ZImageTurboImageGenInput & {
3601
+ width?: number;
3602
+ height?: number;
3603
+ } & {
3604
+ operation: 'createImage';
3605
+ };
3606
+ export type ZImageTurboImageGenInput = ZImageImageGenInput & {
3607
+ operation: string;
3608
+ prompt: string;
3609
+ negativePrompt?: string | null;
3610
+ sampleMethod?: SdCppSampleMethod;
3611
+ schedule?: SdCppSchedule;
3612
+ steps?: number;
3613
+ cfgScale?: number;
3614
+ seed?: number | null;
3615
+ quantity?: number;
3616
+ } & {
3617
+ model: 'turbo';
3618
+ };
3619
+ /**
3620
+ * Training data packaged as a zip file
3621
+ */
3622
+ export type ZipTrainingData = TrainingData & {
3623
+ type: 'zip';
3624
+ } & {
3625
+ /**
3626
+ * AIR pointing to the zip file containing training data
3627
+ */
3628
+ sourceUrl: string;
3629
+ /**
3630
+ * The number of images/frames/items in this training data
3631
+ */
3632
+ count: number;
3633
+ } & {
3634
+ type: 'zip';
3635
+ };
3636
+ export declare const ZoeDepthEnvironment: {
3637
+ readonly INDOOR: 'indoor';
3638
+ readonly OUTDOOR: 'outdoor';
3639
+ };
3640
+ export type ZoeDepthEnvironment = (typeof ZoeDepthEnvironment)[keyof typeof ZoeDepthEnvironment];
1968
3641
  export type GetBlobData = {
1969
3642
  body?: never;
1970
3643
  path: {
@@ -1990,6 +3663,14 @@ export type GetBlobErrors = {
1990
3663
  * Unauthorized
1991
3664
  */
1992
3665
  401: ProblemDetails;
3666
+ /**
3667
+ * Forbidden
3668
+ */
3669
+ 403: ProblemDetails;
3670
+ /**
3671
+ * Not Found
3672
+ */
3673
+ 404: ProblemDetails;
1993
3674
  };
1994
3675
  export type GetBlobError = GetBlobErrors[keyof GetBlobErrors];
1995
3676
  export type HeadBlobData = {
@@ -2021,10 +3702,71 @@ export type HeadBlobResponses = {
2021
3702
  204: void;
2022
3703
  };
2023
3704
  export type HeadBlobResponse = HeadBlobResponses[keyof HeadBlobResponses];
3705
+ export type GetBlobContentData = {
3706
+ body?: never;
3707
+ path: {
3708
+ /**
3709
+ * The encrypted token containing blob access parameters
3710
+ */
3711
+ encryptedToken: string;
3712
+ };
3713
+ query?: never;
3714
+ url: '/v2/consumer/blobs/content/{encryptedToken}';
3715
+ };
3716
+ export type GetBlobContentErrors = {
3717
+ /**
3718
+ * Bad Request
3719
+ */
3720
+ 400: ProblemDetails;
3721
+ /**
3722
+ * Unauthorized
3723
+ */
3724
+ 401: ProblemDetails;
3725
+ /**
3726
+ * Not Found
3727
+ */
3728
+ 404: ProblemDetails;
3729
+ /**
3730
+ * Internal Server Error
3731
+ */
3732
+ 500: unknown;
3733
+ };
3734
+ export type GetBlobContentError = GetBlobContentErrors[keyof GetBlobContentErrors];
3735
+ export type GetBlobContentResponses = {
3736
+ /**
3737
+ * OK
3738
+ */
3739
+ 200: unknown;
3740
+ };
3741
+ export type GetBlockedContentData = {
3742
+ body?: never;
3743
+ path: {
3744
+ /**
3745
+ * The encrypted token containing blocked content parameters
3746
+ */
3747
+ encryptedToken: string;
3748
+ };
3749
+ query?: never;
3750
+ url: '/v2/consumer/blobs/blocked/{encryptedToken}';
3751
+ };
3752
+ export type GetBlockedContentErrors = {
3753
+ /**
3754
+ * Unauthorized
3755
+ */
3756
+ 401: ProblemDetails;
3757
+ /**
3758
+ * Forbidden
3759
+ */
3760
+ 403: ProblemDetails;
3761
+ };
3762
+ export type GetBlockedContentError = GetBlockedContentErrors[keyof GetBlockedContentErrors];
2024
3763
  export type InvokeAgeClassificationStepTemplateData = {
2025
3764
  body?: AgeClassificationInput;
2026
3765
  path?: never;
2027
- query?: never;
3766
+ query?: {
3767
+ experimental?: boolean;
3768
+ allowMatureContent?: boolean;
3769
+ };
2028
3770
  url: '/v2/consumer/recipes/ageClassification';
2029
3771
  };
2030
3772
  export type InvokeAgeClassificationStepTemplateErrors = {
@@ -2050,7 +3792,10 @@ export type InvokeAgeClassificationStepTemplateResponse =
2050
3792
  export type InvokeComfyStepTemplateData = {
2051
3793
  body?: ComfyInput;
2052
3794
  path?: never;
2053
- query?: never;
3795
+ query?: {
3796
+ experimental?: boolean;
3797
+ allowMatureContent?: boolean;
3798
+ };
2054
3799
  url: '/v2/consumer/recipes/comfy';
2055
3800
  };
2056
3801
  export type InvokeComfyStepTemplateErrors = {
@@ -2073,10 +3818,42 @@ export type InvokeComfyStepTemplateResponses = {
2073
3818
  };
2074
3819
  export type InvokeComfyStepTemplateResponse =
2075
3820
  InvokeComfyStepTemplateResponses[keyof InvokeComfyStepTemplateResponses];
3821
+ export type InvokeConvertImageStepTemplateData = {
3822
+ body?: ConvertImageInput;
3823
+ path?: never;
3824
+ query?: {
3825
+ experimental?: boolean;
3826
+ allowMatureContent?: boolean;
3827
+ };
3828
+ url: '/v2/consumer/recipes/convertImage';
3829
+ };
3830
+ export type InvokeConvertImageStepTemplateErrors = {
3831
+ /**
3832
+ * Bad Request
3833
+ */
3834
+ 400: ProblemDetails;
3835
+ /**
3836
+ * Unauthorized
3837
+ */
3838
+ 401: ProblemDetails;
3839
+ };
3840
+ export type InvokeConvertImageStepTemplateError =
3841
+ InvokeConvertImageStepTemplateErrors[keyof InvokeConvertImageStepTemplateErrors];
3842
+ export type InvokeConvertImageStepTemplateResponses = {
3843
+ /**
3844
+ * OK
3845
+ */
3846
+ 200: ConvertImageOutput;
3847
+ };
3848
+ export type InvokeConvertImageStepTemplateResponse =
3849
+ InvokeConvertImageStepTemplateResponses[keyof InvokeConvertImageStepTemplateResponses];
2076
3850
  export type InvokeEchoStepTemplateData = {
2077
3851
  body?: EchoInput;
2078
3852
  path?: never;
2079
- query?: never;
3853
+ query?: {
3854
+ experimental?: boolean;
3855
+ allowMatureContent?: boolean;
3856
+ };
2080
3857
  url: '/v2/consumer/recipes/echo';
2081
3858
  };
2082
3859
  export type InvokeEchoStepTemplateErrors = {
@@ -2102,7 +3879,10 @@ export type InvokeEchoStepTemplateResponse =
2102
3879
  export type InvokeImageGenStepTemplateData = {
2103
3880
  body?: ImageGenInput;
2104
3881
  path?: never;
2105
- query?: never;
3882
+ query?: {
3883
+ experimental?: boolean;
3884
+ allowMatureContent?: boolean;
3885
+ };
2106
3886
  url: '/v2/consumer/recipes/imageGen';
2107
3887
  };
2108
3888
  export type InvokeImageGenStepTemplateErrors = {
@@ -2128,7 +3908,10 @@ export type InvokeImageGenStepTemplateResponse =
2128
3908
  export type InvokeImageResourceTrainingStepTemplateData = {
2129
3909
  body?: ImageResourceTrainingInput;
2130
3910
  path?: never;
2131
- query?: never;
3911
+ query?: {
3912
+ experimental?: boolean;
3913
+ allowMatureContent?: boolean;
3914
+ };
2132
3915
  url: '/v2/consumer/recipes/imageResourceTraining';
2133
3916
  };
2134
3917
  export type InvokeImageResourceTrainingStepTemplateErrors = {
@@ -2154,7 +3937,10 @@ export type InvokeImageResourceTrainingStepTemplateResponse =
2154
3937
  export type InvokeImageUploadStepTemplateData = {
2155
3938
  body?: string;
2156
3939
  path?: never;
2157
- query?: never;
3940
+ query?: {
3941
+ experimental?: boolean;
3942
+ allowMatureContent?: boolean;
3943
+ };
2158
3944
  url: '/v2/consumer/recipes/imageUpload';
2159
3945
  };
2160
3946
  export type InvokeImageUploadStepTemplateErrors = {
@@ -2177,10 +3963,129 @@ export type InvokeImageUploadStepTemplateResponses = {
2177
3963
  };
2178
3964
  export type InvokeImageUploadStepTemplateResponse =
2179
3965
  InvokeImageUploadStepTemplateResponses[keyof InvokeImageUploadStepTemplateResponses];
3966
+ export type InvokeImageUpscalerStepTemplateData = {
3967
+ body?: ImageUpscalerInput;
3968
+ path?: never;
3969
+ query?: {
3970
+ experimental?: boolean;
3971
+ allowMatureContent?: boolean;
3972
+ };
3973
+ url: '/v2/consumer/recipes/imageUpscaler';
3974
+ };
3975
+ export type InvokeImageUpscalerStepTemplateErrors = {
3976
+ /**
3977
+ * Bad Request
3978
+ */
3979
+ 400: ProblemDetails;
3980
+ /**
3981
+ * Unauthorized
3982
+ */
3983
+ 401: ProblemDetails;
3984
+ };
3985
+ export type InvokeImageUpscalerStepTemplateError =
3986
+ InvokeImageUpscalerStepTemplateErrors[keyof InvokeImageUpscalerStepTemplateErrors];
3987
+ export type InvokeImageUpscalerStepTemplateResponses = {
3988
+ /**
3989
+ * OK
3990
+ */
3991
+ 200: ImageUpscalerOutput;
3992
+ };
3993
+ export type InvokeImageUpscalerStepTemplateResponse =
3994
+ InvokeImageUpscalerStepTemplateResponses[keyof InvokeImageUpscalerStepTemplateResponses];
3995
+ export type InvokeMediaHashStepTemplateData = {
3996
+ body?: MediaHashInput;
3997
+ path?: never;
3998
+ query?: {
3999
+ experimental?: boolean;
4000
+ allowMatureContent?: boolean;
4001
+ };
4002
+ url: '/v2/consumer/recipes/mediaHash';
4003
+ };
4004
+ export type InvokeMediaHashStepTemplateErrors = {
4005
+ /**
4006
+ * Bad Request
4007
+ */
4008
+ 400: ProblemDetails;
4009
+ /**
4010
+ * Unauthorized
4011
+ */
4012
+ 401: ProblemDetails;
4013
+ };
4014
+ export type InvokeMediaHashStepTemplateError =
4015
+ InvokeMediaHashStepTemplateErrors[keyof InvokeMediaHashStepTemplateErrors];
4016
+ export type InvokeMediaHashStepTemplateResponses = {
4017
+ /**
4018
+ * OK
4019
+ */
4020
+ 200: MediaHashOutput;
4021
+ };
4022
+ export type InvokeMediaHashStepTemplateResponse =
4023
+ InvokeMediaHashStepTemplateResponses[keyof InvokeMediaHashStepTemplateResponses];
4024
+ export type InvokeMediaRatingStepTemplateData = {
4025
+ body?: MediaRatingInput;
4026
+ path?: never;
4027
+ query?: {
4028
+ experimental?: boolean;
4029
+ allowMatureContent?: boolean;
4030
+ };
4031
+ url: '/v2/consumer/recipes/mediaRating';
4032
+ };
4033
+ export type InvokeMediaRatingStepTemplateErrors = {
4034
+ /**
4035
+ * Bad Request
4036
+ */
4037
+ 400: ProblemDetails;
4038
+ /**
4039
+ * Unauthorized
4040
+ */
4041
+ 401: ProblemDetails;
4042
+ };
4043
+ export type InvokeMediaRatingStepTemplateError =
4044
+ InvokeMediaRatingStepTemplateErrors[keyof InvokeMediaRatingStepTemplateErrors];
4045
+ export type InvokeMediaRatingStepTemplateResponses = {
4046
+ /**
4047
+ * OK
4048
+ */
4049
+ 200: MediaRatingOutput;
4050
+ };
4051
+ export type InvokeMediaRatingStepTemplateResponse =
4052
+ InvokeMediaRatingStepTemplateResponses[keyof InvokeMediaRatingStepTemplateResponses];
4053
+ export type InvokePreprocessImageStepTemplateData = {
4054
+ body?: PreprocessImageInput;
4055
+ path?: never;
4056
+ query?: {
4057
+ experimental?: boolean;
4058
+ allowMatureContent?: boolean;
4059
+ };
4060
+ url: '/v2/consumer/recipes/preprocessImage';
4061
+ };
4062
+ export type InvokePreprocessImageStepTemplateErrors = {
4063
+ /**
4064
+ * Bad Request
4065
+ */
4066
+ 400: ProblemDetails;
4067
+ /**
4068
+ * Unauthorized
4069
+ */
4070
+ 401: ProblemDetails;
4071
+ };
4072
+ export type InvokePreprocessImageStepTemplateError =
4073
+ InvokePreprocessImageStepTemplateErrors[keyof InvokePreprocessImageStepTemplateErrors];
4074
+ export type InvokePreprocessImageStepTemplateResponses = {
4075
+ /**
4076
+ * OK
4077
+ */
4078
+ 200: PreprocessImageOutput;
4079
+ };
4080
+ export type InvokePreprocessImageStepTemplateResponse =
4081
+ InvokePreprocessImageStepTemplateResponses[keyof InvokePreprocessImageStepTemplateResponses];
2180
4082
  export type InvokeTextToImageStepTemplateData = {
2181
4083
  body?: TextToImageInput;
2182
4084
  path?: never;
2183
- query?: never;
4085
+ query?: {
4086
+ experimental?: boolean;
4087
+ allowMatureContent?: boolean;
4088
+ };
2184
4089
  url: '/v2/consumer/recipes/textToImage';
2185
4090
  };
2186
4091
  export type InvokeTextToImageStepTemplateErrors = {
@@ -2203,10 +4108,42 @@ export type InvokeTextToImageStepTemplateResponses = {
2203
4108
  };
2204
4109
  export type InvokeTextToImageStepTemplateResponse =
2205
4110
  InvokeTextToImageStepTemplateResponses[keyof InvokeTextToImageStepTemplateResponses];
4111
+ export type InvokeTrainingStepTemplateData = {
4112
+ body?: TrainingInput;
4113
+ path?: never;
4114
+ query?: {
4115
+ experimental?: boolean;
4116
+ allowMatureContent?: boolean;
4117
+ };
4118
+ url: '/v2/consumer/recipes/training';
4119
+ };
4120
+ export type InvokeTrainingStepTemplateErrors = {
4121
+ /**
4122
+ * Bad Request
4123
+ */
4124
+ 400: ProblemDetails;
4125
+ /**
4126
+ * Unauthorized
4127
+ */
4128
+ 401: ProblemDetails;
4129
+ };
4130
+ export type InvokeTrainingStepTemplateError =
4131
+ InvokeTrainingStepTemplateErrors[keyof InvokeTrainingStepTemplateErrors];
4132
+ export type InvokeTrainingStepTemplateResponses = {
4133
+ /**
4134
+ * OK
4135
+ */
4136
+ 200: TrainingOutput;
4137
+ };
4138
+ export type InvokeTrainingStepTemplateResponse =
4139
+ InvokeTrainingStepTemplateResponses[keyof InvokeTrainingStepTemplateResponses];
2206
4140
  export type InvokeVideoEnhancementStepTemplateData = {
2207
4141
  body?: VideoEnhancementInput;
2208
4142
  path?: never;
2209
- query?: never;
4143
+ query?: {
4144
+ experimental?: boolean;
4145
+ allowMatureContent?: boolean;
4146
+ };
2210
4147
  url: '/v2/consumer/recipes/videoEnhancement';
2211
4148
  };
2212
4149
  export type InvokeVideoEnhancementStepTemplateErrors = {
@@ -2229,10 +4166,42 @@ export type InvokeVideoEnhancementStepTemplateResponses = {
2229
4166
  };
2230
4167
  export type InvokeVideoEnhancementStepTemplateResponse =
2231
4168
  InvokeVideoEnhancementStepTemplateResponses[keyof InvokeVideoEnhancementStepTemplateResponses];
4169
+ export type InvokeVideoFrameExtractionStepTemplateData = {
4170
+ body?: VideoFrameExtractionInput;
4171
+ path?: never;
4172
+ query?: {
4173
+ experimental?: boolean;
4174
+ allowMatureContent?: boolean;
4175
+ };
4176
+ url: '/v2/consumer/recipes/videoFrameExtraction';
4177
+ };
4178
+ export type InvokeVideoFrameExtractionStepTemplateErrors = {
4179
+ /**
4180
+ * Bad Request
4181
+ */
4182
+ 400: ProblemDetails;
4183
+ /**
4184
+ * Unauthorized
4185
+ */
4186
+ 401: ProblemDetails;
4187
+ };
4188
+ export type InvokeVideoFrameExtractionStepTemplateError =
4189
+ InvokeVideoFrameExtractionStepTemplateErrors[keyof InvokeVideoFrameExtractionStepTemplateErrors];
4190
+ export type InvokeVideoFrameExtractionStepTemplateResponses = {
4191
+ /**
4192
+ * OK
4193
+ */
4194
+ 200: VideoFrameExtractionOutput;
4195
+ };
4196
+ export type InvokeVideoFrameExtractionStepTemplateResponse =
4197
+ InvokeVideoFrameExtractionStepTemplateResponses[keyof InvokeVideoFrameExtractionStepTemplateResponses];
2232
4198
  export type InvokeVideoGenStepTemplateData = {
2233
4199
  body?: VideoGenInput;
2234
4200
  path?: never;
2235
- query?: never;
4201
+ query?: {
4202
+ experimental?: boolean;
4203
+ allowMatureContent?: boolean;
4204
+ };
2236
4205
  url: '/v2/consumer/recipes/videoGen';
2237
4206
  };
2238
4207
  export type InvokeVideoGenStepTemplateErrors = {
@@ -2255,6 +4224,122 @@ export type InvokeVideoGenStepTemplateResponses = {
2255
4224
  };
2256
4225
  export type InvokeVideoGenStepTemplateResponse =
2257
4226
  InvokeVideoGenStepTemplateResponses[keyof InvokeVideoGenStepTemplateResponses];
4227
+ export type InvokeVideoInterpolationStepTemplateData = {
4228
+ body?: VideoInterpolationInput;
4229
+ path?: never;
4230
+ query?: {
4231
+ experimental?: boolean;
4232
+ allowMatureContent?: boolean;
4233
+ };
4234
+ url: '/v2/consumer/recipes/videoInterpolation';
4235
+ };
4236
+ export type InvokeVideoInterpolationStepTemplateErrors = {
4237
+ /**
4238
+ * Bad Request
4239
+ */
4240
+ 400: ProblemDetails;
4241
+ /**
4242
+ * Unauthorized
4243
+ */
4244
+ 401: ProblemDetails;
4245
+ };
4246
+ export type InvokeVideoInterpolationStepTemplateError =
4247
+ InvokeVideoInterpolationStepTemplateErrors[keyof InvokeVideoInterpolationStepTemplateErrors];
4248
+ export type InvokeVideoInterpolationStepTemplateResponses = {
4249
+ /**
4250
+ * OK
4251
+ */
4252
+ 200: VideoInterpolationOutput;
4253
+ };
4254
+ export type InvokeVideoInterpolationStepTemplateResponse =
4255
+ InvokeVideoInterpolationStepTemplateResponses[keyof InvokeVideoInterpolationStepTemplateResponses];
4256
+ export type InvokeVideoMetadataStepTemplateData = {
4257
+ body?: VideoMetadataInput;
4258
+ path?: never;
4259
+ query?: {
4260
+ experimental?: boolean;
4261
+ allowMatureContent?: boolean;
4262
+ };
4263
+ url: '/v2/consumer/recipes/videoMetadata';
4264
+ };
4265
+ export type InvokeVideoMetadataStepTemplateErrors = {
4266
+ /**
4267
+ * Bad Request
4268
+ */
4269
+ 400: ProblemDetails;
4270
+ /**
4271
+ * Unauthorized
4272
+ */
4273
+ 401: ProblemDetails;
4274
+ };
4275
+ export type InvokeVideoMetadataStepTemplateError =
4276
+ InvokeVideoMetadataStepTemplateErrors[keyof InvokeVideoMetadataStepTemplateErrors];
4277
+ export type InvokeVideoMetadataStepTemplateResponses = {
4278
+ /**
4279
+ * OK
4280
+ */
4281
+ 200: VideoMetadataOutput;
4282
+ };
4283
+ export type InvokeVideoMetadataStepTemplateResponse =
4284
+ InvokeVideoMetadataStepTemplateResponses[keyof InvokeVideoMetadataStepTemplateResponses];
4285
+ export type InvokeVideoUpscalerStepTemplateData = {
4286
+ body?: VideoUpscalerInput;
4287
+ path?: never;
4288
+ query?: {
4289
+ experimental?: boolean;
4290
+ allowMatureContent?: boolean;
4291
+ };
4292
+ url: '/v2/consumer/recipes/videoUpscaler';
4293
+ };
4294
+ export type InvokeVideoUpscalerStepTemplateErrors = {
4295
+ /**
4296
+ * Bad Request
4297
+ */
4298
+ 400: ProblemDetails;
4299
+ /**
4300
+ * Unauthorized
4301
+ */
4302
+ 401: ProblemDetails;
4303
+ };
4304
+ export type InvokeVideoUpscalerStepTemplateError =
4305
+ InvokeVideoUpscalerStepTemplateErrors[keyof InvokeVideoUpscalerStepTemplateErrors];
4306
+ export type InvokeVideoUpscalerStepTemplateResponses = {
4307
+ /**
4308
+ * OK
4309
+ */
4310
+ 200: VideoUpscalerOutput;
4311
+ };
4312
+ export type InvokeVideoUpscalerStepTemplateResponse =
4313
+ InvokeVideoUpscalerStepTemplateResponses[keyof InvokeVideoUpscalerStepTemplateResponses];
4314
+ export type InvokeWdTaggingStepTemplateData = {
4315
+ body?: WdTaggingInput;
4316
+ path?: never;
4317
+ query?: {
4318
+ experimental?: boolean;
4319
+ allowMatureContent?: boolean;
4320
+ };
4321
+ url: '/v2/consumer/recipes/wdTagging';
4322
+ };
4323
+ export type InvokeWdTaggingStepTemplateErrors = {
4324
+ /**
4325
+ * Bad Request
4326
+ */
4327
+ 400: ProblemDetails;
4328
+ /**
4329
+ * Unauthorized
4330
+ */
4331
+ 401: ProblemDetails;
4332
+ };
4333
+ export type InvokeWdTaggingStepTemplateError =
4334
+ InvokeWdTaggingStepTemplateErrors[keyof InvokeWdTaggingStepTemplateErrors];
4335
+ export type InvokeWdTaggingStepTemplateResponses = {
4336
+ /**
4337
+ * OK
4338
+ */
4339
+ 200: WdTaggingOutput;
4340
+ };
4341
+ export type InvokeWdTaggingStepTemplateResponse =
4342
+ InvokeWdTaggingStepTemplateResponses[keyof InvokeWdTaggingStepTemplateResponses];
2258
4343
  export type InvalidateResourceData = {
2259
4344
  body?: never;
2260
4345
  path: {