@civitai/client 0.2.0-beta.10 → 0.2.0-beta.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -5,6 +5,9 @@ import type {
|
|
|
5
5
|
HeadBlobData,
|
|
6
6
|
HeadBlobResponses,
|
|
7
7
|
HeadBlobErrors,
|
|
8
|
+
GetBlobContentData,
|
|
9
|
+
GetBlobContentResponses,
|
|
10
|
+
GetBlobContentErrors,
|
|
8
11
|
InvokeAgeClassificationStepTemplateData,
|
|
9
12
|
InvokeAgeClassificationStepTemplateResponses,
|
|
10
13
|
InvokeAgeClassificationStepTemplateErrors,
|
|
@@ -20,6 +23,9 @@ import type {
|
|
|
20
23
|
InvokeImageResourceTrainingStepTemplateData,
|
|
21
24
|
InvokeImageResourceTrainingStepTemplateResponses,
|
|
22
25
|
InvokeImageResourceTrainingStepTemplateErrors,
|
|
26
|
+
InvokeTrainingStepTemplateData,
|
|
27
|
+
InvokeTrainingStepTemplateResponses,
|
|
28
|
+
InvokeTrainingStepTemplateErrors,
|
|
23
29
|
InvokeImageUploadStepTemplateData,
|
|
24
30
|
InvokeImageUploadStepTemplateResponses,
|
|
25
31
|
InvokeImageUploadStepTemplateErrors,
|
|
@@ -92,7 +98,7 @@ export type Options<
|
|
|
92
98
|
meta?: Record<string, unknown>;
|
|
93
99
|
};
|
|
94
100
|
/**
|
|
95
|
-
* Get blob by ID. This will
|
|
101
|
+
* Get blob by ID. This will redirect to a cacheable content URL.
|
|
96
102
|
*/
|
|
97
103
|
export declare const getBlob: <ThrowOnError extends boolean = false>(
|
|
98
104
|
options: Options<GetBlobData, ThrowOnError>
|
|
@@ -103,6 +109,17 @@ export declare const getBlob: <ThrowOnError extends boolean = false>(
|
|
|
103
109
|
export declare const headBlob: <ThrowOnError extends boolean = false>(
|
|
104
110
|
options: Options<HeadBlobData, ThrowOnError>
|
|
105
111
|
) => import('./client').RequestResult<HeadBlobResponses, HeadBlobErrors, ThrowOnError, 'fields'>;
|
|
112
|
+
/**
|
|
113
|
+
* Serves cacheable blob content using a deterministic encrypted token
|
|
114
|
+
*/
|
|
115
|
+
export declare const getBlobContent: <ThrowOnError extends boolean = false>(
|
|
116
|
+
options: Options<GetBlobContentData, ThrowOnError>
|
|
117
|
+
) => import('./client').RequestResult<
|
|
118
|
+
GetBlobContentResponses,
|
|
119
|
+
GetBlobContentErrors,
|
|
120
|
+
ThrowOnError,
|
|
121
|
+
'fields'
|
|
122
|
+
>;
|
|
106
123
|
/**
|
|
107
124
|
* Age classification
|
|
108
125
|
* Detects minors in media content. Returns a boolean value indicating whether the content contains minors as well as details on where minors are detected.
|
|
@@ -166,6 +183,18 @@ export declare const invokeImageResourceTrainingStepTemplate: <
|
|
|
166
183
|
ThrowOnError,
|
|
167
184
|
'fields'
|
|
168
185
|
>;
|
|
186
|
+
/**
|
|
187
|
+
* AI Toolkit Training
|
|
188
|
+
* Train models using AI Toolkit engine
|
|
189
|
+
*/
|
|
190
|
+
export declare const invokeTrainingStepTemplate: <ThrowOnError extends boolean = false>(
|
|
191
|
+
options?: Options<InvokeTrainingStepTemplateData, ThrowOnError>
|
|
192
|
+
) => import('./client').RequestResult<
|
|
193
|
+
InvokeTrainingStepTemplateResponses,
|
|
194
|
+
InvokeTrainingStepTemplateErrors,
|
|
195
|
+
ThrowOnError,
|
|
196
|
+
'fields'
|
|
197
|
+
>;
|
|
169
198
|
/**
|
|
170
199
|
* Image upload
|
|
171
200
|
* Uploads an image to be used in a workflow
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
// This file is auto-generated by @hey-api/openapi-ts
|
|
2
2
|
import { client as _heyApiClient } from './client.gen';
|
|
3
3
|
/**
|
|
4
|
-
* Get blob by ID. This will
|
|
4
|
+
* Get blob by ID. This will redirect to a cacheable content URL.
|
|
5
5
|
*/
|
|
6
6
|
export const getBlob = (options) => {
|
|
7
7
|
var _a;
|
|
@@ -32,6 +32,22 @@ export const headBlob = (options) => {
|
|
|
32
32
|
...options,
|
|
33
33
|
});
|
|
34
34
|
};
|
|
35
|
+
/**
|
|
36
|
+
* Serves cacheable blob content using a deterministic encrypted token
|
|
37
|
+
*/
|
|
38
|
+
export const getBlobContent = (options) => {
|
|
39
|
+
var _a;
|
|
40
|
+
return ((_a = options.client) !== null && _a !== void 0 ? _a : _heyApiClient).get({
|
|
41
|
+
security: [
|
|
42
|
+
{
|
|
43
|
+
scheme: 'bearer',
|
|
44
|
+
type: 'http',
|
|
45
|
+
},
|
|
46
|
+
],
|
|
47
|
+
url: '/v2/consumer/blobs/content/{encryptedToken}',
|
|
48
|
+
...options,
|
|
49
|
+
});
|
|
50
|
+
};
|
|
35
51
|
/**
|
|
36
52
|
* Age classification
|
|
37
53
|
* Detects minors in media content. Returns a boolean value indicating whether the content contains minors as well as details on where minors are detected.
|
|
@@ -138,6 +154,27 @@ export const invokeImageResourceTrainingStepTemplate = (options) => {
|
|
|
138
154
|
},
|
|
139
155
|
});
|
|
140
156
|
};
|
|
157
|
+
/**
|
|
158
|
+
* AI Toolkit Training
|
|
159
|
+
* Train models using AI Toolkit engine
|
|
160
|
+
*/
|
|
161
|
+
export const invokeTrainingStepTemplate = (options) => {
|
|
162
|
+
var _a;
|
|
163
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
164
|
+
security: [
|
|
165
|
+
{
|
|
166
|
+
scheme: 'bearer',
|
|
167
|
+
type: 'http',
|
|
168
|
+
},
|
|
169
|
+
],
|
|
170
|
+
url: '/v2/consumer/recipes/training',
|
|
171
|
+
...options,
|
|
172
|
+
headers: {
|
|
173
|
+
'Content-Type': 'application/json',
|
|
174
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
175
|
+
},
|
|
176
|
+
});
|
|
177
|
+
};
|
|
141
178
|
/**
|
|
142
179
|
* Image upload
|
|
143
180
|
* Uploads an image to be used in a workflow
|
|
@@ -628,6 +628,160 @@ export type ImageResourceTrainingStepTemplate = WorkflowStepTemplate & {
|
|
|
628
628
|
} & {
|
|
629
629
|
$type: 'imageResourceTraining';
|
|
630
630
|
};
|
|
631
|
+
/**
|
|
632
|
+
* Input for a training step.
|
|
633
|
+
*/
|
|
634
|
+
export type TrainingInput = {
|
|
635
|
+
/**
|
|
636
|
+
* The training engine to use
|
|
637
|
+
*/
|
|
638
|
+
engine: 'ai-toolkit';
|
|
639
|
+
/**
|
|
640
|
+
* The model ecosystem (sd1, sdxl, flux1, sd3, wan)
|
|
641
|
+
*/
|
|
642
|
+
ecosystem: string;
|
|
643
|
+
/**
|
|
644
|
+
* The model URN to train upon
|
|
645
|
+
*/
|
|
646
|
+
model: string;
|
|
647
|
+
/**
|
|
648
|
+
* Model variant (e.g., 'dev', 'large', '2.1')
|
|
649
|
+
*/
|
|
650
|
+
modelVariant?: string | null;
|
|
651
|
+
/**
|
|
652
|
+
* Training data specification
|
|
653
|
+
*/
|
|
654
|
+
trainingData: {
|
|
655
|
+
/**
|
|
656
|
+
* Type of training data (e.g., 'zip')
|
|
657
|
+
*/
|
|
658
|
+
type: string;
|
|
659
|
+
/**
|
|
660
|
+
* URN/URL to training data
|
|
661
|
+
*/
|
|
662
|
+
sourceUrl: string;
|
|
663
|
+
/**
|
|
664
|
+
* Number of training items
|
|
665
|
+
*/
|
|
666
|
+
count: number;
|
|
667
|
+
};
|
|
668
|
+
/**
|
|
669
|
+
* Sample generation configuration
|
|
670
|
+
*/
|
|
671
|
+
samples: {
|
|
672
|
+
/**
|
|
673
|
+
* Sample generation prompts
|
|
674
|
+
*/
|
|
675
|
+
prompts: Array<string>;
|
|
676
|
+
};
|
|
677
|
+
/**
|
|
678
|
+
* Number of training epochs
|
|
679
|
+
*/
|
|
680
|
+
epochs?: number;
|
|
681
|
+
/**
|
|
682
|
+
* Number of repeats per image
|
|
683
|
+
*/
|
|
684
|
+
numRepeats?: number;
|
|
685
|
+
/**
|
|
686
|
+
* Training batch size
|
|
687
|
+
*/
|
|
688
|
+
trainBatchSize?: number | null;
|
|
689
|
+
/**
|
|
690
|
+
* Training resolution (512, 1024, etc.)
|
|
691
|
+
*/
|
|
692
|
+
resolution?: number | null;
|
|
693
|
+
/**
|
|
694
|
+
* Learning rate
|
|
695
|
+
*/
|
|
696
|
+
lr?: number;
|
|
697
|
+
/**
|
|
698
|
+
* Text encoder learning rate
|
|
699
|
+
*/
|
|
700
|
+
textEncoderLr?: number | null;
|
|
701
|
+
/**
|
|
702
|
+
* Whether to train text encoder
|
|
703
|
+
*/
|
|
704
|
+
trainTextEncoder?: boolean;
|
|
705
|
+
/**
|
|
706
|
+
* Learning rate scheduler type
|
|
707
|
+
*/
|
|
708
|
+
lrScheduler?: string;
|
|
709
|
+
/**
|
|
710
|
+
* Optimizer type
|
|
711
|
+
*/
|
|
712
|
+
optimizerType?: string;
|
|
713
|
+
/**
|
|
714
|
+
* LoRA network dimension
|
|
715
|
+
*/
|
|
716
|
+
networkDim?: number;
|
|
717
|
+
/**
|
|
718
|
+
* LoRA network alpha
|
|
719
|
+
*/
|
|
720
|
+
networkAlpha?: number;
|
|
721
|
+
/**
|
|
722
|
+
* Noise offset for training
|
|
723
|
+
*/
|
|
724
|
+
noiseOffset?: number;
|
|
725
|
+
/**
|
|
726
|
+
* Min SNR gamma value
|
|
727
|
+
*/
|
|
728
|
+
minSnrGamma?: number | null;
|
|
729
|
+
/**
|
|
730
|
+
* Enable horizontal flip augmentation
|
|
731
|
+
*/
|
|
732
|
+
flipAugmentation?: boolean;
|
|
733
|
+
/**
|
|
734
|
+
* Shuffle tokens during training
|
|
735
|
+
*/
|
|
736
|
+
shuffleTokens?: boolean;
|
|
737
|
+
/**
|
|
738
|
+
* Number of tokens to keep at start
|
|
739
|
+
*/
|
|
740
|
+
keepTokens?: number;
|
|
741
|
+
};
|
|
742
|
+
export type TrainingOutput = {
|
|
743
|
+
/**
|
|
744
|
+
* Training status
|
|
745
|
+
*/
|
|
746
|
+
status: string;
|
|
747
|
+
/**
|
|
748
|
+
* Training epoch results
|
|
749
|
+
*/
|
|
750
|
+
epochs?: Array<EpochResult> | null;
|
|
751
|
+
/**
|
|
752
|
+
* Generated sample images
|
|
753
|
+
*/
|
|
754
|
+
samples?: Array<{
|
|
755
|
+
prompt?: string;
|
|
756
|
+
blob?: Blob;
|
|
757
|
+
}> | null;
|
|
758
|
+
modelBlob?: Blob;
|
|
759
|
+
/**
|
|
760
|
+
* Estimated time remaining (minutes)
|
|
761
|
+
*/
|
|
762
|
+
eta?: number | null;
|
|
763
|
+
};
|
|
764
|
+
/**
|
|
765
|
+
* AI Toolkit Training Step
|
|
766
|
+
*/
|
|
767
|
+
export type TrainingStep = WorkflowStep & {
|
|
768
|
+
$type: 'training';
|
|
769
|
+
} & {
|
|
770
|
+
input: TrainingInput;
|
|
771
|
+
output?: TrainingOutput;
|
|
772
|
+
} & {
|
|
773
|
+
$type: 'training';
|
|
774
|
+
};
|
|
775
|
+
/**
|
|
776
|
+
* AI Toolkit Training Step Template
|
|
777
|
+
*/
|
|
778
|
+
export type TrainingStepTemplate = WorkflowStepTemplate & {
|
|
779
|
+
$type: 'training';
|
|
780
|
+
} & {
|
|
781
|
+
input: TrainingInput;
|
|
782
|
+
} & {
|
|
783
|
+
$type: 'training';
|
|
784
|
+
};
|
|
631
785
|
/**
|
|
632
786
|
* Available image transformers.
|
|
633
787
|
*/
|
|
@@ -1308,7 +1462,7 @@ export type SoraVideoGenInput = VideoGenInput & {
|
|
|
1308
1462
|
duration?: number;
|
|
1309
1463
|
seed?: number | null;
|
|
1310
1464
|
resolution?: '720p' | '1080p';
|
|
1311
|
-
aspectRatio?: '16:9' | '9:16';
|
|
1465
|
+
aspectRatio?: 'auto' | '16:9' | '9:16';
|
|
1312
1466
|
usePro?: boolean;
|
|
1313
1467
|
} & {
|
|
1314
1468
|
engine: 'sora';
|
|
@@ -1563,6 +1717,11 @@ export declare const Veo3AspectRatio: {
|
|
|
1563
1717
|
readonly '1:1': '1:1';
|
|
1564
1718
|
};
|
|
1565
1719
|
export type Veo3AspectRatio = (typeof Veo3AspectRatio)[keyof typeof Veo3AspectRatio];
|
|
1720
|
+
export declare const Veo3Version: {
|
|
1721
|
+
readonly '3_0': '3.0';
|
|
1722
|
+
readonly '3_1': '3.1';
|
|
1723
|
+
};
|
|
1724
|
+
export type Veo3Version = (typeof Veo3Version)[keyof typeof Veo3Version];
|
|
1566
1725
|
export type Veo3VideoGenInput = VideoGenInput & {
|
|
1567
1726
|
engine: 'veo3';
|
|
1568
1727
|
} & {
|
|
@@ -1574,6 +1733,7 @@ export type Veo3VideoGenInput = VideoGenInput & {
|
|
|
1574
1733
|
seed?: number | null;
|
|
1575
1734
|
fastMode?: boolean;
|
|
1576
1735
|
images?: Array<string>;
|
|
1736
|
+
version?: Veo3Version;
|
|
1577
1737
|
} & {
|
|
1578
1738
|
engine: 'veo3';
|
|
1579
1739
|
};
|
|
@@ -2263,6 +2423,14 @@ export type GetBlobErrors = {
|
|
|
2263
2423
|
* Unauthorized
|
|
2264
2424
|
*/
|
|
2265
2425
|
401: ProblemDetails;
|
|
2426
|
+
/**
|
|
2427
|
+
* Forbidden
|
|
2428
|
+
*/
|
|
2429
|
+
403: ProblemDetails;
|
|
2430
|
+
/**
|
|
2431
|
+
* Not Found
|
|
2432
|
+
*/
|
|
2433
|
+
404: ProblemDetails;
|
|
2266
2434
|
};
|
|
2267
2435
|
export type GetBlobError = GetBlobErrors[keyof GetBlobErrors];
|
|
2268
2436
|
export type HeadBlobData = {
|
|
@@ -2294,6 +2462,42 @@ export type HeadBlobResponses = {
|
|
|
2294
2462
|
204: void;
|
|
2295
2463
|
};
|
|
2296
2464
|
export type HeadBlobResponse = HeadBlobResponses[keyof HeadBlobResponses];
|
|
2465
|
+
export type GetBlobContentData = {
|
|
2466
|
+
body?: never;
|
|
2467
|
+
path: {
|
|
2468
|
+
/**
|
|
2469
|
+
* The encrypted token containing blob access parameters
|
|
2470
|
+
*/
|
|
2471
|
+
encryptedToken: string;
|
|
2472
|
+
};
|
|
2473
|
+
query?: never;
|
|
2474
|
+
url: '/v2/consumer/blobs/content/{encryptedToken}';
|
|
2475
|
+
};
|
|
2476
|
+
export type GetBlobContentErrors = {
|
|
2477
|
+
/**
|
|
2478
|
+
* Bad Request
|
|
2479
|
+
*/
|
|
2480
|
+
400: ProblemDetails;
|
|
2481
|
+
/**
|
|
2482
|
+
* Unauthorized
|
|
2483
|
+
*/
|
|
2484
|
+
401: ProblemDetails;
|
|
2485
|
+
/**
|
|
2486
|
+
* Not Found
|
|
2487
|
+
*/
|
|
2488
|
+
404: ProblemDetails;
|
|
2489
|
+
/**
|
|
2490
|
+
* Internal Server Error
|
|
2491
|
+
*/
|
|
2492
|
+
500: unknown;
|
|
2493
|
+
};
|
|
2494
|
+
export type GetBlobContentError = GetBlobContentErrors[keyof GetBlobContentErrors];
|
|
2495
|
+
export type GetBlobContentResponses = {
|
|
2496
|
+
/**
|
|
2497
|
+
* OK
|
|
2498
|
+
*/
|
|
2499
|
+
200: unknown;
|
|
2500
|
+
};
|
|
2297
2501
|
export type InvokeAgeClassificationStepTemplateData = {
|
|
2298
2502
|
body?: AgeClassificationInput;
|
|
2299
2503
|
path?: never;
|
|
@@ -2439,6 +2643,35 @@ export type InvokeImageResourceTrainingStepTemplateResponses = {
|
|
|
2439
2643
|
};
|
|
2440
2644
|
export type InvokeImageResourceTrainingStepTemplateResponse =
|
|
2441
2645
|
InvokeImageResourceTrainingStepTemplateResponses[keyof InvokeImageResourceTrainingStepTemplateResponses];
|
|
2646
|
+
export type InvokeTrainingStepTemplateData = {
|
|
2647
|
+
body?: TrainingInput;
|
|
2648
|
+
path?: never;
|
|
2649
|
+
query?: {
|
|
2650
|
+
experimental?: boolean;
|
|
2651
|
+
allowMatureContent?: boolean;
|
|
2652
|
+
};
|
|
2653
|
+
url: '/v2/consumer/recipes/training';
|
|
2654
|
+
};
|
|
2655
|
+
export type InvokeTrainingStepTemplateErrors = {
|
|
2656
|
+
/**
|
|
2657
|
+
* Bad Request
|
|
2658
|
+
*/
|
|
2659
|
+
400: ProblemDetails;
|
|
2660
|
+
/**
|
|
2661
|
+
* Unauthorized
|
|
2662
|
+
*/
|
|
2663
|
+
401: ProblemDetails;
|
|
2664
|
+
};
|
|
2665
|
+
export type InvokeTrainingStepTemplateError =
|
|
2666
|
+
InvokeTrainingStepTemplateErrors[keyof InvokeTrainingStepTemplateErrors];
|
|
2667
|
+
export type InvokeTrainingStepTemplateResponses = {
|
|
2668
|
+
/**
|
|
2669
|
+
* OK
|
|
2670
|
+
*/
|
|
2671
|
+
200: TrainingOutput;
|
|
2672
|
+
};
|
|
2673
|
+
export type InvokeTrainingStepTemplateResponse =
|
|
2674
|
+
InvokeTrainingStepTemplateResponses[keyof InvokeTrainingStepTemplateResponses];
|
|
2442
2675
|
export type InvokeImageUploadStepTemplateData = {
|
|
2443
2676
|
body?: string;
|
|
2444
2677
|
path?: never;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@civitai/client",
|
|
3
|
-
"version": "0.2.0-beta.
|
|
3
|
+
"version": "0.2.0-beta.12",
|
|
4
4
|
"description": "Civitai's javascript client for generating ai content",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -54,4 +54,4 @@
|
|
|
54
54
|
"publishConfig": {
|
|
55
55
|
"access": "public"
|
|
56
56
|
}
|
|
57
|
-
}
|
|
57
|
+
}
|