@civitai/client 0.2.0-beta.37 → 0.2.0-beta.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -49,13 +49,13 @@ export type AiToolkitTrainingInput = TrainingInput & {
49
49
  * Various methods have been proposed for smart learning, but the most commonly used in LoRA learning is "adamw8bit".
50
50
  */
51
51
  optimizerType?:
52
- | 'adam'
53
52
  | 'adamw'
54
53
  | 'adamw8bit'
55
54
  | 'adam8bit'
56
55
  | 'lion'
57
56
  | 'lion8bit'
58
57
  | 'adafactor'
58
+ | 'adafactor '
59
59
  | 'adagrad'
60
60
  | 'prodigy'
61
61
  | 'prodigy8bit'
@@ -93,6 +93,16 @@ export type AiToolkitTrainingInput = TrainingInput & {
93
93
  } & {
94
94
  engine: 'ai-toolkit';
95
95
  };
96
+ /**
97
+ * Cover image configuration for ACE Step audio output.
98
+ * When present, the output is a WebM video with this image as the visual.
99
+ */
100
+ export type AceStepAudioCover = {
101
+ /**
102
+ * Either A URL, A DataURL or a Base64 string
103
+ */
104
+ imageUrl: string;
105
+ };
96
106
  /**
97
107
  * Input parameters for ACE Step 1.5 audio generation workflow step
98
108
  */
@@ -135,12 +145,14 @@ export type AceStepAudioInput = {
135
145
  * Optional model override (uses default ACE Step 1.5 turbo if not specified)
136
146
  */
137
147
  model?: string | null;
148
+ cover?: AceStepAudioCover;
138
149
  };
139
150
  /**
140
- * Output from ACE Step 1.5 audio generation workflow step
151
+ * Output from ACE Step 1.5 audio generation workflow step.
152
+ * Returns a VideoBlob (when a background image is provided) or an AudioBlob (audio only).
141
153
  */
142
154
  export type AceStepAudioOutput = {
143
- audioBlob: AudioBlob;
155
+ blob: Blob;
144
156
  };
145
157
  /**
146
158
  * Workflow step for generating music using ACE Step 1.5.
@@ -704,6 +716,25 @@ export type ComfyLtx2ExtendVideoInput = ComfyLtx2VideoGenInput & {
704
716
  } & {
705
717
  operation: 'extendVideo';
706
718
  };
719
+ /**
720
+ * Generate video guided by first and/or last frame images using LTXVAddGuide conditioning (ComfyUI backend)
721
+ */
722
+ export type ComfyLtx2FirstLastFrameToVideoInput = ComfyLtx2VideoGenInput & {
723
+ /**
724
+ * First frame guide image. At least one of FirstFrame or LastFrame must be provided.
725
+ */
726
+ firstFrame?: string | null;
727
+ /**
728
+ * Last frame guide image. At least one of FirstFrame or LastFrame must be provided.
729
+ */
730
+ lastFrame?: string | null;
731
+ /**
732
+ * Strength of the frame guide conditioning (0.0 to 1.0).
733
+ */
734
+ frameGuideStrength?: number;
735
+ } & {
736
+ operation: 'firstLastFrameToVideo';
737
+ };
707
738
  /**
708
739
  * LTX Video v2 generation via ComfyUI backend
709
740
  */
@@ -1315,6 +1346,85 @@ export type GoogleImageGenInput = ImageGenInput & {
1315
1346
  } & {
1316
1347
  engine: 'google';
1317
1348
  };
1349
+ export type GrokCreateImageGenInput = GrokImageGenInput & {
1350
+ /**
1351
+ * Aspect ratio: 2:1, 20:9, 19.5:9, 16:9, 4:3, 3:2, 1:1, 2:3, 3:4, 9:16, 9:19.5, 9:20, 1:2
1352
+ */
1353
+ aspectRatio?:
1354
+ | '2:1'
1355
+ | '20:9'
1356
+ | '19.5:9'
1357
+ | '16:9'
1358
+ | '4:3'
1359
+ | '3:2'
1360
+ | '1:1'
1361
+ | '2:3'
1362
+ | '3:4'
1363
+ | '9:16'
1364
+ | '9:19.5'
1365
+ | '9:20'
1366
+ | '1:2';
1367
+ } & {
1368
+ operation: 'createImage';
1369
+ };
1370
+ export type GrokEditImageGenInput = GrokImageGenInput & {
1371
+ images: Array<string>;
1372
+ } & {
1373
+ operation: 'editImage';
1374
+ };
1375
+ /**
1376
+ * Grok Edit-Video
1377
+ * FAL Endpoint: xai/grok-imagine-video/edit-video
1378
+ * Input video is resized to max 854x480 and truncated to 8 seconds.
1379
+ * Uses FFProbe to analyze input video duration for accurate costing.
1380
+ */
1381
+ export type GrokEditVideoInput = GrokVideoGenInput & {
1382
+ videoUrl: string;
1383
+ analyzedDuration?: number | null;
1384
+ } & {
1385
+ operation: 'edit-video';
1386
+ };
1387
+ export type GrokImageGenInput = ImageGenInput & {
1388
+ engine: 'grok';
1389
+ } & {
1390
+ operation: string;
1391
+ prompt: string;
1392
+ quantity?: number;
1393
+ } & {
1394
+ engine: 'grok';
1395
+ };
1396
+ /**
1397
+ * Grok Image-to-Video
1398
+ * FAL Endpoint: xai/grok-imagine-video/image-to-video
1399
+ */
1400
+ export type GrokImageToVideoInput = GrokVideoGenInput & {
1401
+ aspectRatio?: 'auto' | '16:9' | '4:3' | '3:2' | '1:1' | '2:3' | '3:4' | '9:16';
1402
+ images?: [string];
1403
+ } & {
1404
+ operation: 'image-to-video';
1405
+ };
1406
+ /**
1407
+ * Grok Text-to-Video
1408
+ * FAL Endpoint: xai/grok-imagine-video/text-to-video
1409
+ */
1410
+ export type GrokTextToVideoInput = GrokVideoGenInput & {
1411
+ aspectRatio?: '16:9' | '4:3' | '3:2' | '1:1' | '2:3' | '3:4' | '9:16';
1412
+ } & {
1413
+ operation: 'text-to-video';
1414
+ };
1415
+ /**
1416
+ * Base class for Grok video generation (xAI's Grok-Imagine-Video model via FAL).
1417
+ * Discriminator: operation (text-to-video, image-to-video, edit-video)
1418
+ */
1419
+ export type GrokVideoGenInput = VideoGenInput & {
1420
+ engine: 'grok';
1421
+ } & {
1422
+ operation: string | null;
1423
+ duration?: number;
1424
+ resolution?: '480p' | '720p';
1425
+ } & {
1426
+ engine: 'grok';
1427
+ };
1318
1428
  export declare const HaiperVideoGenAspectRatio: {
1319
1429
  readonly '16:9': '16:9';
1320
1430
  readonly '4:3': '4:3';
@@ -2880,6 +2990,33 @@ export declare const Scheduler: {
2880
2990
  * The available options for schedulers used in image generation.
2881
2991
  */
2882
2992
  export type Scheduler = (typeof Scheduler)[keyof typeof Scheduler];
2993
+ export type Sd1CreateImageGenInput = Sd1ImageGenInput & {
2994
+ width?: number;
2995
+ height?: number;
2996
+ } & {
2997
+ operation: 'createImage';
2998
+ };
2999
+ export type Sd1ImageGenInput = SdCppImageGenInput & {
3000
+ operation: string;
3001
+ prompt: string;
3002
+ negativePrompt?: string | null;
3003
+ sampleMethod?: SdCppSampleMethod;
3004
+ schedule?: SdCppSchedule;
3005
+ steps?: number;
3006
+ cfgScale?: number;
3007
+ seed?: number | null;
3008
+ quantity?: number;
3009
+ model: string;
3010
+ vaeModel?: string | null;
3011
+ loras?: {
3012
+ [key: string]: number;
3013
+ };
3014
+ embeddings?: Array<string>;
3015
+ clipSkip?: number;
3016
+ uCache?: SdCppUCacheMode;
3017
+ } & {
3018
+ ecosystem: 'sd1';
3019
+ };
2883
3020
  export type SdCppImageGenInput = ImageGenInput & {
2884
3021
  engine: 'sdcpp';
2885
3022
  } & {
@@ -2912,6 +3049,11 @@ export declare const SdCppSchedule: {
2912
3049
  readonly BONG_TANGENT: 'bong_tangent';
2913
3050
  };
2914
3051
  export type SdCppSchedule = (typeof SdCppSchedule)[keyof typeof SdCppSchedule];
3052
+ export declare const SdCppUCacheMode: {
3053
+ readonly OFF: 'off';
3054
+ readonly NORMAL: 'normal';
3055
+ };
3056
+ export type SdCppUCacheMode = (typeof SdCppUCacheMode)[keyof typeof SdCppUCacheMode];
2915
3057
  /**
2916
3058
  * AI Toolkit training for Stable Diffusion XL models
2917
3059
  */
@@ -2932,6 +3074,32 @@ export type SdxlAiToolkitTrainingInput = AiToolkitTrainingInput & {
2932
3074
  } & {
2933
3075
  ecosystem: 'sdxl';
2934
3076
  };
3077
+ export type SdxlCreateImageGenInput = SdxlImageGenInput & {
3078
+ width?: number;
3079
+ height?: number;
3080
+ } & {
3081
+ operation: 'createImage';
3082
+ };
3083
+ export type SdxlImageGenInput = SdCppImageGenInput & {
3084
+ operation: string;
3085
+ prompt: string;
3086
+ negativePrompt?: string | null;
3087
+ sampleMethod?: SdCppSampleMethod;
3088
+ schedule?: SdCppSchedule;
3089
+ steps?: number;
3090
+ cfgScale?: number;
3091
+ seed?: number | null;
3092
+ quantity?: number;
3093
+ model: string;
3094
+ vaeModel?: string | null;
3095
+ loras?: {
3096
+ [key: string]: number;
3097
+ };
3098
+ embeddings?: Array<string>;
3099
+ uCache?: SdCppUCacheMode;
3100
+ } & {
3101
+ ecosystem: 'sdxl';
3102
+ };
2935
3103
  export type SeedreamImageGenInput = ImageGenInput & {
2936
3104
  engine: 'seedream';
2937
3105
  } & {
@@ -3743,6 +3911,19 @@ export type VideoUpscalerStepTemplate = WorkflowStepTemplate & {
3743
3911
  } & {
3744
3912
  $type: 'videoUpscaler';
3745
3913
  };
3914
+ export type ViduQ3VideoGenInput = VideoGenInput & {
3915
+ engine: 'vidu-q3';
3916
+ } & {
3917
+ seed?: number | null;
3918
+ duration?: number;
3919
+ resolution?: '360p' | '540p' | '720p' | '1080p';
3920
+ turbo?: boolean;
3921
+ enableAudio?: boolean;
3922
+ aspectRatio?: '16:9' | '9:16' | '1:1' | '4:3' | '3:4';
3923
+ images?: Array<string>;
3924
+ } & {
3925
+ engine: 'vidu-q3';
3926
+ };
3746
3927
  export type ViduVideoGenInput = VideoGenInput & {
3747
3928
  engine: 'vidu';
3748
3929
  } & {
@@ -3769,6 +3950,7 @@ export type ViduVideoGenInput = VideoGenInput & {
3769
3950
  export declare const ViduVideoGenModel: {
3770
3951
  readonly DEFAULT: 'default';
3771
3952
  readonly Q1: 'q1';
3953
+ readonly Q3: 'q3';
3772
3954
  };
3773
3955
  export type ViduVideoGenModel = (typeof ViduVideoGenModel)[keyof typeof ViduVideoGenModel];
3774
3956
  export declare const ViduVideoGenStyle: {
@@ -260,6 +260,10 @@ export const SdCppSchedule = {
260
260
  AYS: 'ays',
261
261
  BONG_TANGENT: 'bong_tangent',
262
262
  };
263
+ export const SdCppUCacheMode = {
264
+ OFF: 'off',
265
+ NORMAL: 'normal',
266
+ };
263
267
  export const SeedreamVersion = {
264
268
  V3: 'v3',
265
269
  V4: 'v4',
@@ -297,6 +301,7 @@ export const Veo3Version = {
297
301
  export const ViduVideoGenModel = {
298
302
  DEFAULT: 'default',
299
303
  Q1: 'q1',
304
+ Q3: 'q3',
300
305
  };
301
306
  export const ViduVideoGenStyle = {
302
307
  GENERAL: 'general',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@civitai/client",
3
- "version": "0.2.0-beta.37",
3
+ "version": "0.2.0-beta.39",
4
4
  "description": "Civitai's javascript client for generating ai content",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",