@civitai/client 0.2.0-beta.18 → 0.2.0-beta.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/generated/sdk.gen.d.ts +11 -0
- package/dist/generated/sdk.gen.js +17 -0
- package/dist/generated/types.gen.d.ts +421 -37
- package/dist/generated/types.gen.js +71 -0
- package/package.json +1 -1
|
@@ -35,6 +35,9 @@ import type {
|
|
|
35
35
|
InvokeMediaRatingStepTemplateData,
|
|
36
36
|
InvokeMediaRatingStepTemplateResponses,
|
|
37
37
|
InvokeMediaRatingStepTemplateErrors,
|
|
38
|
+
InvokePreprocessImageStepTemplateData,
|
|
39
|
+
InvokePreprocessImageStepTemplateResponses,
|
|
40
|
+
InvokePreprocessImageStepTemplateErrors,
|
|
38
41
|
InvokeTextToImageStepTemplateData,
|
|
39
42
|
InvokeTextToImageStepTemplateResponses,
|
|
40
43
|
InvokeTextToImageStepTemplateErrors,
|
|
@@ -251,6 +254,14 @@ export declare const invokeMediaRatingStepTemplate: <ThrowOnError extends boolea
|
|
|
251
254
|
ThrowOnError,
|
|
252
255
|
'fields'
|
|
253
256
|
>;
|
|
257
|
+
export declare const invokePreprocessImageStepTemplate: <ThrowOnError extends boolean = false>(
|
|
258
|
+
options?: Options<InvokePreprocessImageStepTemplateData, ThrowOnError>
|
|
259
|
+
) => import('./client').RequestResult<
|
|
260
|
+
InvokePreprocessImageStepTemplateResponses,
|
|
261
|
+
InvokePreprocessImageStepTemplateErrors,
|
|
262
|
+
ThrowOnError,
|
|
263
|
+
'fields'
|
|
264
|
+
>;
|
|
254
265
|
/**
|
|
255
266
|
* TextToImage
|
|
256
267
|
* Generate images using text as input
|
|
@@ -234,6 +234,23 @@ export const invokeMediaRatingStepTemplate = (options) => {
|
|
|
234
234
|
},
|
|
235
235
|
});
|
|
236
236
|
};
|
|
237
|
+
export const invokePreprocessImageStepTemplate = (options) => {
|
|
238
|
+
var _a;
|
|
239
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
240
|
+
security: [
|
|
241
|
+
{
|
|
242
|
+
scheme: 'bearer',
|
|
243
|
+
type: 'http',
|
|
244
|
+
},
|
|
245
|
+
],
|
|
246
|
+
url: '/v2/consumer/recipes/preprocessImage',
|
|
247
|
+
...options,
|
|
248
|
+
headers: {
|
|
249
|
+
'Content-Type': 'application/json',
|
|
250
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
251
|
+
},
|
|
252
|
+
});
|
|
253
|
+
};
|
|
237
254
|
/**
|
|
238
255
|
* TextToImage
|
|
239
256
|
* Generate images using text as input
|
|
@@ -121,6 +121,27 @@ export type AgeClassifierLabel = {
|
|
|
121
121
|
isMinor: boolean;
|
|
122
122
|
boundingBox: Array<number>;
|
|
123
123
|
};
|
|
124
|
+
export declare const AnimalPoseBboxDetector: {
|
|
125
|
+
readonly YOLOX_L_TORCHSCRIPT_PT: 'yolox_l.torchscript.pt';
|
|
126
|
+
readonly YOLOX_L_ONNX: 'yolox_l.onnx';
|
|
127
|
+
readonly YOLO_NAS_L_FP16_ONNX: 'yolo_nas_l_fp16.onnx';
|
|
128
|
+
readonly YOLO_NAS_M_FP16_ONNX: 'yolo_nas_m_fp16.onnx';
|
|
129
|
+
readonly YOLO_NAS_S_FP16_ONNX: 'yolo_nas_s_fp16.onnx';
|
|
130
|
+
};
|
|
131
|
+
export type AnimalPoseBboxDetector =
|
|
132
|
+
(typeof AnimalPoseBboxDetector)[keyof typeof AnimalPoseBboxDetector];
|
|
133
|
+
export declare const AnimalPoseEstimator: {
|
|
134
|
+
readonly RTMPOSE_M_AP10K_256_BS5_TORCHSCRIPT_PT: 'rtmpose-m_ap10k_256_bs5.torchscript.pt';
|
|
135
|
+
readonly RTMPOSE_M_AP10K_256_ONNX: 'rtmpose-m_ap10k_256.onnx';
|
|
136
|
+
};
|
|
137
|
+
export type AnimalPoseEstimator = (typeof AnimalPoseEstimator)[keyof typeof AnimalPoseEstimator];
|
|
138
|
+
export declare const AnylineMergeWith: {
|
|
139
|
+
readonly LINEART_STANDARD: 'lineart_standard';
|
|
140
|
+
readonly LINEART_REALISTIC: 'lineart_realistic';
|
|
141
|
+
readonly LINEART_ANIME: 'lineart_anime';
|
|
142
|
+
readonly MANGA_LINE: 'manga_line';
|
|
143
|
+
};
|
|
144
|
+
export type AnylineMergeWith = (typeof AnylineMergeWith)[keyof typeof AnylineMergeWith];
|
|
124
145
|
export type BatchOcrSafetyClassificationInput = {
|
|
125
146
|
mediaUrls: Array<string>;
|
|
126
147
|
};
|
|
@@ -176,6 +197,11 @@ export type BuzzClientAccount = (typeof BuzzClientAccount)[keyof typeof BuzzClie
|
|
|
176
197
|
export type ChromaAiToolkitTrainingInput = AiToolkitTrainingInput & {} & {
|
|
177
198
|
ecosystem: 'chroma';
|
|
178
199
|
};
|
|
200
|
+
export declare const CoarseMode: {
|
|
201
|
+
readonly DISABLE: 'disable';
|
|
202
|
+
readonly ENABLE: 'enable';
|
|
203
|
+
};
|
|
204
|
+
export type CoarseMode = (typeof CoarseMode)[keyof typeof CoarseMode];
|
|
179
205
|
export type ComfyInput = {
|
|
180
206
|
/**
|
|
181
207
|
* Get the comfy workflow that needs to be executed
|
|
@@ -242,6 +268,45 @@ export type CursedArrayOfTelemetryCursorAndWorkflow = {
|
|
|
242
268
|
next: string;
|
|
243
269
|
items: Array<Workflow>;
|
|
244
270
|
};
|
|
271
|
+
export declare const DensePoseColormap: {
|
|
272
|
+
readonly 'VIRIDIS (_MAGIC_ANIMATE)': 'Viridis (MagicAnimate)';
|
|
273
|
+
readonly 'PARULA (_CIVIT_AI)': 'Parula (CivitAI)';
|
|
274
|
+
};
|
|
275
|
+
export type DensePoseColormap = (typeof DensePoseColormap)[keyof typeof DensePoseColormap];
|
|
276
|
+
export declare const DensePoseModel: {
|
|
277
|
+
readonly DENSEPOSE_R50_FPN_DL_TORCHSCRIPT: 'densepose_r50_fpn_dl.torchscript';
|
|
278
|
+
readonly DENSEPOSE_R101_FPN_DL_TORCHSCRIPT: 'densepose_r101_fpn_dl.torchscript';
|
|
279
|
+
};
|
|
280
|
+
export type DensePoseModel = (typeof DensePoseModel)[keyof typeof DensePoseModel];
|
|
281
|
+
export declare const DepthAnythingCheckpoint: {
|
|
282
|
+
readonly DEPTH_ANYTHING_VITL14_PTH: 'depth_anything_vitl14.pth';
|
|
283
|
+
readonly DEPTH_ANYTHING_VITB14_PTH: 'depth_anything_vitb14.pth';
|
|
284
|
+
readonly DEPTH_ANYTHING_VITS14_PTH: 'depth_anything_vits14.pth';
|
|
285
|
+
};
|
|
286
|
+
export type DepthAnythingCheckpoint =
|
|
287
|
+
(typeof DepthAnythingCheckpoint)[keyof typeof DepthAnythingCheckpoint];
|
|
288
|
+
export declare const DepthAnythingV2Checkpoint: {
|
|
289
|
+
readonly DEPTH_ANYTHING_V2_VITG_PTH: 'depth_anything_v2_vitg.pth';
|
|
290
|
+
readonly DEPTH_ANYTHING_V2_VITL_PTH: 'depth_anything_v2_vitl.pth';
|
|
291
|
+
readonly DEPTH_ANYTHING_V2_VITB_PTH: 'depth_anything_v2_vitb.pth';
|
|
292
|
+
readonly DEPTH_ANYTHING_V2_VITS_PTH: 'depth_anything_v2_vits.pth';
|
|
293
|
+
};
|
|
294
|
+
export type DepthAnythingV2Checkpoint =
|
|
295
|
+
(typeof DepthAnythingV2Checkpoint)[keyof typeof DepthAnythingV2Checkpoint];
|
|
296
|
+
export declare const DwPoseBboxDetector: {
|
|
297
|
+
readonly YOLOX_L_ONNX: 'yolox_l.onnx';
|
|
298
|
+
readonly YOLOX_L_TORCHSCRIPT_PT: 'yolox_l.torchscript.pt';
|
|
299
|
+
readonly YOLO_NAS_L_FP16_ONNX: 'yolo_nas_l_fp16.onnx';
|
|
300
|
+
readonly YOLO_NAS_M_FP16_ONNX: 'yolo_nas_m_fp16.onnx';
|
|
301
|
+
readonly YOLO_NAS_S_FP16_ONNX: 'yolo_nas_s_fp16.onnx';
|
|
302
|
+
};
|
|
303
|
+
export type DwPoseBboxDetector = (typeof DwPoseBboxDetector)[keyof typeof DwPoseBboxDetector];
|
|
304
|
+
export declare const DwPoseEstimator: {
|
|
305
|
+
readonly DW_LL_UCOCO_384_BS5_TORCHSCRIPT_PT: 'dw-ll_ucoco_384_bs5.torchscript.pt';
|
|
306
|
+
readonly DW_LL_UCOCO_384_ONNX: 'dw-ll_ucoco_384.onnx';
|
|
307
|
+
readonly DW_LL_UCOCO_ONNX: 'dw-ll_ucoco.onnx';
|
|
308
|
+
};
|
|
309
|
+
export type DwPoseEstimator = (typeof DwPoseEstimator)[keyof typeof DwPoseEstimator];
|
|
245
310
|
/**
|
|
246
311
|
* Represents the input information needed for the Echo workflow step.
|
|
247
312
|
*/
|
|
@@ -310,6 +375,7 @@ export declare const FileFormat: {
|
|
|
310
375
|
readonly DIFFUSERS: 'diffusers';
|
|
311
376
|
readonly CORE_ML: 'coreML';
|
|
312
377
|
readonly ONNX: 'onnx';
|
|
378
|
+
readonly TAR: 'tar';
|
|
313
379
|
};
|
|
314
380
|
export type FileFormat = (typeof FileFormat)[keyof typeof FileFormat];
|
|
315
381
|
/**
|
|
@@ -1046,6 +1112,11 @@ export type KohyaImageResourceTrainingInput = ImageResourceTrainingInput & {
|
|
|
1046
1112
|
} & {
|
|
1047
1113
|
engine: 'kohya';
|
|
1048
1114
|
};
|
|
1115
|
+
export declare const LeresBoost: {
|
|
1116
|
+
readonly DISABLE: 'disable';
|
|
1117
|
+
readonly ENABLE: 'enable';
|
|
1118
|
+
};
|
|
1119
|
+
export type LeresBoost = (typeof LeresBoost)[keyof typeof LeresBoost];
|
|
1049
1120
|
export declare const LightricksAspectRatio: {
|
|
1050
1121
|
readonly '1:1': '1:1';
|
|
1051
1122
|
readonly '16:9': '16:9';
|
|
@@ -1174,6 +1245,12 @@ export type MediaRatingStepTemplate = WorkflowStepTemplate & {
|
|
|
1174
1245
|
} & {
|
|
1175
1246
|
$type: 'mediaRating';
|
|
1176
1247
|
};
|
|
1248
|
+
export declare const Metric3dBackbone: {
|
|
1249
|
+
readonly VIT_SMALL: 'vit-small';
|
|
1250
|
+
readonly VIT_LARGE: 'vit-large';
|
|
1251
|
+
readonly VIT_GIANT2: 'vit-giant2';
|
|
1252
|
+
};
|
|
1253
|
+
export type Metric3dBackbone = (typeof Metric3dBackbone)[keyof typeof Metric3dBackbone];
|
|
1177
1254
|
export type MiniMaxVideoGenInput = VideoGenInput & {
|
|
1178
1255
|
engine: 'minimax';
|
|
1179
1256
|
} & {
|
|
@@ -1342,6 +1419,303 @@ export declare const OutputFormat: {
|
|
|
1342
1419
|
readonly WEB_P: 'webP';
|
|
1343
1420
|
};
|
|
1344
1421
|
export type OutputFormat = (typeof OutputFormat)[keyof typeof OutputFormat];
|
|
1422
|
+
export type PreprocessImageAnimalPoseInput = PreprocessImageInput & {
|
|
1423
|
+
kind: 'animal-pose';
|
|
1424
|
+
} & {
|
|
1425
|
+
bboxDetector?: AnimalPoseBboxDetector;
|
|
1426
|
+
poseEstimator?: AnimalPoseEstimator;
|
|
1427
|
+
} & {
|
|
1428
|
+
kind: 'animal-pose';
|
|
1429
|
+
};
|
|
1430
|
+
export type PreprocessImageAnimeLineartInput = PreprocessImageInput & {
|
|
1431
|
+
kind: 'lineart-anime';
|
|
1432
|
+
} & {} & {
|
|
1433
|
+
kind: 'lineart-anime';
|
|
1434
|
+
};
|
|
1435
|
+
export type PreprocessImageAnylineInput = PreprocessImageInput & {
|
|
1436
|
+
kind: 'anyline';
|
|
1437
|
+
} & {
|
|
1438
|
+
mergeWithLineart?: AnylineMergeWith;
|
|
1439
|
+
lineartLowerBound?: number;
|
|
1440
|
+
lineartUpperBound?: number;
|
|
1441
|
+
objectMinSize?: number;
|
|
1442
|
+
objectConnectivity?: number;
|
|
1443
|
+
} & {
|
|
1444
|
+
kind: 'anyline';
|
|
1445
|
+
};
|
|
1446
|
+
export type PreprocessImageBaeNormalInput = PreprocessImageInput & {
|
|
1447
|
+
kind: 'bae-normal';
|
|
1448
|
+
} & {} & {
|
|
1449
|
+
kind: 'bae-normal';
|
|
1450
|
+
};
|
|
1451
|
+
export type PreprocessImageBinaryInput = PreprocessImageInput & {
|
|
1452
|
+
kind: 'binary';
|
|
1453
|
+
} & {
|
|
1454
|
+
binThreshold?: number;
|
|
1455
|
+
} & {
|
|
1456
|
+
kind: 'binary';
|
|
1457
|
+
};
|
|
1458
|
+
export type PreprocessImageCannyInput = PreprocessImageInput & {
|
|
1459
|
+
kind: 'canny';
|
|
1460
|
+
} & {
|
|
1461
|
+
lowThreshold?: number;
|
|
1462
|
+
highThreshold?: number;
|
|
1463
|
+
} & {
|
|
1464
|
+
kind: 'canny';
|
|
1465
|
+
};
|
|
1466
|
+
export type PreprocessImageColorInput = PreprocessImageInput & {
|
|
1467
|
+
kind: 'color';
|
|
1468
|
+
} & {} & {
|
|
1469
|
+
kind: 'color';
|
|
1470
|
+
};
|
|
1471
|
+
export type PreprocessImageDensePoseInput = PreprocessImageInput & {
|
|
1472
|
+
kind: 'densepose';
|
|
1473
|
+
} & {
|
|
1474
|
+
model?: DensePoseModel;
|
|
1475
|
+
colormap?: DensePoseColormap;
|
|
1476
|
+
} & {
|
|
1477
|
+
kind: 'densepose';
|
|
1478
|
+
};
|
|
1479
|
+
export type PreprocessImageDepthAnythingInput = PreprocessImageInput & {
|
|
1480
|
+
kind: 'depth-anything';
|
|
1481
|
+
} & {
|
|
1482
|
+
checkpoint?: DepthAnythingCheckpoint;
|
|
1483
|
+
} & {
|
|
1484
|
+
kind: 'depth-anything';
|
|
1485
|
+
};
|
|
1486
|
+
export type PreprocessImageDepthAnythingV2Input = PreprocessImageInput & {
|
|
1487
|
+
kind: 'depth-anything-v2';
|
|
1488
|
+
} & {
|
|
1489
|
+
checkpoint?: DepthAnythingV2Checkpoint;
|
|
1490
|
+
} & {
|
|
1491
|
+
kind: 'depth-anything-v2';
|
|
1492
|
+
};
|
|
1493
|
+
export type PreprocessImageDsineNormalInput = PreprocessImageInput & {
|
|
1494
|
+
kind: 'dsine-normal';
|
|
1495
|
+
} & {
|
|
1496
|
+
fov?: number;
|
|
1497
|
+
iterations?: number;
|
|
1498
|
+
} & {
|
|
1499
|
+
kind: 'dsine-normal';
|
|
1500
|
+
};
|
|
1501
|
+
export type PreprocessImageDwPoseInput = PreprocessImageInput & {
|
|
1502
|
+
kind: 'dwpose';
|
|
1503
|
+
} & {
|
|
1504
|
+
detectHand?: boolean;
|
|
1505
|
+
detectBody?: boolean;
|
|
1506
|
+
detectFace?: boolean;
|
|
1507
|
+
bboxDetector?: DwPoseBboxDetector;
|
|
1508
|
+
poseEstimator?: DwPoseEstimator;
|
|
1509
|
+
} & {
|
|
1510
|
+
kind: 'dwpose';
|
|
1511
|
+
};
|
|
1512
|
+
export type PreprocessImageFakeScribbleInput = PreprocessImageInput & {
|
|
1513
|
+
kind: 'fake-scribble';
|
|
1514
|
+
} & {
|
|
1515
|
+
safe?: SafeMode;
|
|
1516
|
+
} & {
|
|
1517
|
+
kind: 'fake-scribble';
|
|
1518
|
+
};
|
|
1519
|
+
export type PreprocessImageHedInput = PreprocessImageInput & {
|
|
1520
|
+
kind: 'hed';
|
|
1521
|
+
} & {
|
|
1522
|
+
safe?: SafeMode;
|
|
1523
|
+
} & {
|
|
1524
|
+
kind: 'hed';
|
|
1525
|
+
};
|
|
1526
|
+
export type PreprocessImageInput = {
|
|
1527
|
+
kind: string;
|
|
1528
|
+
/**
|
|
1529
|
+
* Either A URL, A DataURL or a Base64 string
|
|
1530
|
+
*/
|
|
1531
|
+
image: string;
|
|
1532
|
+
resolution?: number;
|
|
1533
|
+
/**
|
|
1534
|
+
* Gets the preprocessor type identifier used to map to ComfyUI nodes.
|
|
1535
|
+
* This is derived from the JsonDerivedType discriminator.
|
|
1536
|
+
*/
|
|
1537
|
+
preprocessorType?: string;
|
|
1538
|
+
};
|
|
1539
|
+
export type PreprocessImageLeresDepthInput = PreprocessImageInput & {
|
|
1540
|
+
kind: 'leres-depth';
|
|
1541
|
+
} & {
|
|
1542
|
+
removeNearest?: number;
|
|
1543
|
+
removeBackground?: number;
|
|
1544
|
+
boost?: LeresBoost;
|
|
1545
|
+
} & {
|
|
1546
|
+
kind: 'leres-depth';
|
|
1547
|
+
};
|
|
1548
|
+
export type PreprocessImageMangaLineartInput = PreprocessImageInput & {
|
|
1549
|
+
kind: 'lineart-manga';
|
|
1550
|
+
} & {} & {
|
|
1551
|
+
kind: 'lineart-manga';
|
|
1552
|
+
};
|
|
1553
|
+
export type PreprocessImageMediaPipeFaceInput = PreprocessImageInput & {
|
|
1554
|
+
kind: 'mediapipe-face';
|
|
1555
|
+
} & {
|
|
1556
|
+
maxFaces?: number;
|
|
1557
|
+
minConfidence?: number;
|
|
1558
|
+
} & {
|
|
1559
|
+
kind: 'mediapipe-face';
|
|
1560
|
+
};
|
|
1561
|
+
export type PreprocessImageMetric3dDepthInput = PreprocessImageInput & {
|
|
1562
|
+
kind: 'metric3d-depth';
|
|
1563
|
+
} & {
|
|
1564
|
+
backbone?: Metric3dBackbone;
|
|
1565
|
+
fx?: number;
|
|
1566
|
+
fy?: number;
|
|
1567
|
+
} & {
|
|
1568
|
+
kind: 'metric3d-depth';
|
|
1569
|
+
};
|
|
1570
|
+
export type PreprocessImageMetric3dNormalInput = PreprocessImageInput & {
|
|
1571
|
+
kind: 'metric3d-normal';
|
|
1572
|
+
} & {
|
|
1573
|
+
backbone?: Metric3dBackbone;
|
|
1574
|
+
fx?: number;
|
|
1575
|
+
fy?: number;
|
|
1576
|
+
} & {
|
|
1577
|
+
kind: 'metric3d-normal';
|
|
1578
|
+
};
|
|
1579
|
+
export type PreprocessImageMidasDepthInput = PreprocessImageInput & {
|
|
1580
|
+
kind: 'midas-depth';
|
|
1581
|
+
} & {
|
|
1582
|
+
a?: number;
|
|
1583
|
+
backgroundThreshold?: number;
|
|
1584
|
+
} & {
|
|
1585
|
+
kind: 'midas-depth';
|
|
1586
|
+
};
|
|
1587
|
+
export type PreprocessImageMidasNormalInput = PreprocessImageInput & {
|
|
1588
|
+
kind: 'midas-normal';
|
|
1589
|
+
} & {
|
|
1590
|
+
a?: number;
|
|
1591
|
+
backgroundThreshold?: number;
|
|
1592
|
+
} & {
|
|
1593
|
+
kind: 'midas-normal';
|
|
1594
|
+
};
|
|
1595
|
+
export type PreprocessImageMlsdInput = PreprocessImageInput & {
|
|
1596
|
+
kind: 'mlsd';
|
|
1597
|
+
} & {
|
|
1598
|
+
scoreThreshold?: number;
|
|
1599
|
+
distanceThreshold?: number;
|
|
1600
|
+
} & {
|
|
1601
|
+
kind: 'mlsd';
|
|
1602
|
+
};
|
|
1603
|
+
export type PreprocessImageOneFormerAde20kInput = PreprocessImageInput & {
|
|
1604
|
+
kind: 'oneformer-ade20k';
|
|
1605
|
+
} & {} & {
|
|
1606
|
+
kind: 'oneformer-ade20k';
|
|
1607
|
+
};
|
|
1608
|
+
export type PreprocessImageOneFormerCocoInput = PreprocessImageInput & {
|
|
1609
|
+
kind: 'oneformer-coco';
|
|
1610
|
+
} & {} & {
|
|
1611
|
+
kind: 'oneformer-coco';
|
|
1612
|
+
};
|
|
1613
|
+
export type PreprocessImageOpenPoseInput = PreprocessImageInput & {
|
|
1614
|
+
kind: 'openpose';
|
|
1615
|
+
} & {
|
|
1616
|
+
detectHand?: boolean;
|
|
1617
|
+
detectBody?: boolean;
|
|
1618
|
+
detectFace?: boolean;
|
|
1619
|
+
} & {
|
|
1620
|
+
kind: 'openpose';
|
|
1621
|
+
};
|
|
1622
|
+
export type PreprocessImageOutput = {
|
|
1623
|
+
blob: ImageBlob;
|
|
1624
|
+
};
|
|
1625
|
+
export type PreprocessImagePidinetInput = PreprocessImageInput & {
|
|
1626
|
+
kind: 'pidinet';
|
|
1627
|
+
} & {
|
|
1628
|
+
safe?: SafeMode;
|
|
1629
|
+
} & {
|
|
1630
|
+
kind: 'pidinet';
|
|
1631
|
+
};
|
|
1632
|
+
export type PreprocessImageRealisticLineartInput = PreprocessImageInput & {
|
|
1633
|
+
kind: 'lineart-realistic';
|
|
1634
|
+
} & {
|
|
1635
|
+
coarse?: CoarseMode;
|
|
1636
|
+
} & {
|
|
1637
|
+
kind: 'lineart-realistic';
|
|
1638
|
+
};
|
|
1639
|
+
export type PreprocessImageScribbleInput = PreprocessImageInput & {
|
|
1640
|
+
kind: 'scribble';
|
|
1641
|
+
} & {} & {
|
|
1642
|
+
kind: 'scribble';
|
|
1643
|
+
};
|
|
1644
|
+
export type PreprocessImageScribblePidinetInput = PreprocessImageInput & {
|
|
1645
|
+
kind: 'scribble-pidinet';
|
|
1646
|
+
} & {
|
|
1647
|
+
safe?: SafeMode;
|
|
1648
|
+
} & {
|
|
1649
|
+
kind: 'scribble-pidinet';
|
|
1650
|
+
};
|
|
1651
|
+
export type PreprocessImageScribbleXdogInput = PreprocessImageInput & {
|
|
1652
|
+
kind: 'scribble-xdog';
|
|
1653
|
+
} & {
|
|
1654
|
+
threshold?: number;
|
|
1655
|
+
} & {
|
|
1656
|
+
kind: 'scribble-xdog';
|
|
1657
|
+
};
|
|
1658
|
+
export type PreprocessImageShuffleInput = PreprocessImageInput & {
|
|
1659
|
+
kind: 'shuffle';
|
|
1660
|
+
} & {
|
|
1661
|
+
seed?: number;
|
|
1662
|
+
} & {
|
|
1663
|
+
kind: 'shuffle';
|
|
1664
|
+
};
|
|
1665
|
+
export type PreprocessImageStandardLineartInput = PreprocessImageInput & {
|
|
1666
|
+
kind: 'lineart-standard';
|
|
1667
|
+
} & {
|
|
1668
|
+
gaussianSigma?: number;
|
|
1669
|
+
intensityThreshold?: number;
|
|
1670
|
+
} & {
|
|
1671
|
+
kind: 'lineart-standard';
|
|
1672
|
+
};
|
|
1673
|
+
export type PreprocessImageStep = WorkflowStep & {
|
|
1674
|
+
$type: 'preprocessImage';
|
|
1675
|
+
} & {
|
|
1676
|
+
input: PreprocessImageInput;
|
|
1677
|
+
output?: PreprocessImageOutput;
|
|
1678
|
+
} & {
|
|
1679
|
+
$type: 'preprocessImage';
|
|
1680
|
+
};
|
|
1681
|
+
export type PreprocessImageStepTemplate = WorkflowStepTemplate & {
|
|
1682
|
+
$type: 'preprocessImage';
|
|
1683
|
+
} & {
|
|
1684
|
+
input: PreprocessImageInput;
|
|
1685
|
+
} & {
|
|
1686
|
+
$type: 'preprocessImage';
|
|
1687
|
+
};
|
|
1688
|
+
export type PreprocessImageTeedInput = PreprocessImageInput & {
|
|
1689
|
+
kind: 'teed';
|
|
1690
|
+
} & {
|
|
1691
|
+
safeSteps?: number;
|
|
1692
|
+
} & {
|
|
1693
|
+
kind: 'teed';
|
|
1694
|
+
};
|
|
1695
|
+
export type PreprocessImageTileInput = PreprocessImageInput & {
|
|
1696
|
+
kind: 'tile';
|
|
1697
|
+
} & {
|
|
1698
|
+
pyrUpIterations?: number;
|
|
1699
|
+
} & {
|
|
1700
|
+
kind: 'tile';
|
|
1701
|
+
};
|
|
1702
|
+
export type PreprocessImageUniFormerInput = PreprocessImageInput & {
|
|
1703
|
+
kind: 'uniformer';
|
|
1704
|
+
} & {} & {
|
|
1705
|
+
kind: 'uniformer';
|
|
1706
|
+
};
|
|
1707
|
+
export type PreprocessImageZoeDepthAnythingInput = PreprocessImageInput & {
|
|
1708
|
+
kind: 'zoe-depth-anything';
|
|
1709
|
+
} & {
|
|
1710
|
+
environment?: ZoeDepthEnvironment;
|
|
1711
|
+
} & {
|
|
1712
|
+
kind: 'zoe-depth-anything';
|
|
1713
|
+
};
|
|
1714
|
+
export type PreprocessImageZoeDepthInput = PreprocessImageInput & {
|
|
1715
|
+
kind: 'zoe-depth';
|
|
1716
|
+
} & {} & {
|
|
1717
|
+
kind: 'zoe-depth';
|
|
1718
|
+
};
|
|
1345
1719
|
/**
|
|
1346
1720
|
* Available options for priority.
|
|
1347
1721
|
*/
|
|
@@ -1539,23 +1913,18 @@ export type Sd1AiToolkitTrainingInput = AiToolkitTrainingInput & {
|
|
|
1539
1913
|
* it may deviate greatly from the target, so try to suppress this jump.
|
|
1540
1914
|
*/
|
|
1541
1915
|
minSnrGamma?: number | null;
|
|
1916
|
+
/**
|
|
1917
|
+
* The primary model to train upon.
|
|
1918
|
+
*/
|
|
1919
|
+
model?: string;
|
|
1542
1920
|
} & {
|
|
1543
1921
|
ecosystem: 'sd1';
|
|
1544
1922
|
};
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
export type Sd35LargeAiToolkitTrainingInput = Sd3AiToolkitTrainingInput & {} & {
|
|
1549
|
-
modelVariant: 'large';
|
|
1550
|
-
};
|
|
1551
|
-
/**
|
|
1552
|
-
* AI Toolkit training for Stable Diffusion 3 models
|
|
1553
|
-
*/
|
|
1554
|
-
export type Sd3AiToolkitTrainingInput = AiToolkitTrainingInput & {
|
|
1555
|
-
modelVariant: string;
|
|
1556
|
-
} & {
|
|
1557
|
-
ecosystem: 'sd3';
|
|
1923
|
+
export declare const SafeMode: {
|
|
1924
|
+
readonly ENABLE: 'enable';
|
|
1925
|
+
readonly DISABLE: 'disable';
|
|
1558
1926
|
};
|
|
1927
|
+
export type SafeMode = (typeof SafeMode)[keyof typeof SafeMode];
|
|
1559
1928
|
/**
|
|
1560
1929
|
* The available options for schedulers used in image generation.
|
|
1561
1930
|
*/
|
|
@@ -1632,6 +2001,10 @@ export type SdxlAiToolkitTrainingInput = AiToolkitTrainingInput & {
|
|
|
1632
2001
|
* it may deviate greatly from the target, so try to suppress this jump.
|
|
1633
2002
|
*/
|
|
1634
2003
|
minSnrGamma?: number | null;
|
|
2004
|
+
/**
|
|
2005
|
+
* The primary model to train upon.
|
|
2006
|
+
*/
|
|
2007
|
+
model?: string;
|
|
1635
2008
|
} & {
|
|
1636
2009
|
ecosystem: 'sdxl';
|
|
1637
2010
|
};
|
|
@@ -1653,6 +2026,7 @@ export type SeedreamImageGenInput = ImageGenInput & {
|
|
|
1653
2026
|
export declare const SeedreamVersion: {
|
|
1654
2027
|
readonly V3: 'v3';
|
|
1655
2028
|
readonly V4: 'v4';
|
|
2029
|
+
readonly V4_5: 'v4.5';
|
|
1656
2030
|
};
|
|
1657
2031
|
export type SeedreamVersion = (typeof SeedreamVersion)[keyof typeof SeedreamVersion];
|
|
1658
2032
|
/**
|
|
@@ -1802,10 +2176,6 @@ export type TrainingData = {
|
|
|
1802
2176
|
*/
|
|
1803
2177
|
export type TrainingInput = {
|
|
1804
2178
|
engine: string;
|
|
1805
|
-
/**
|
|
1806
|
-
* The primary model to train upon.
|
|
1807
|
-
*/
|
|
1808
|
-
model: string;
|
|
1809
2179
|
trainingData: TrainingData;
|
|
1810
2180
|
samples?: TrainingInputSamples;
|
|
1811
2181
|
};
|
|
@@ -2369,12 +2739,6 @@ export type WdTaggingStepTemplate = WorkflowStepTemplate & {
|
|
|
2369
2739
|
} & {
|
|
2370
2740
|
$type: 'wdTagging';
|
|
2371
2741
|
};
|
|
2372
|
-
/**
|
|
2373
|
-
* AI Toolkit training for Wan 2.1 models
|
|
2374
|
-
*/
|
|
2375
|
-
export type Wan21AiToolkitTrainingInput = WanAiToolkitTrainingInput & {} & {
|
|
2376
|
-
modelVariant: '2.1';
|
|
2377
|
-
};
|
|
2378
2742
|
export type Wan21CivitaiVideoGenInput = Wan21VideoGenInput & {
|
|
2379
2743
|
width?: number;
|
|
2380
2744
|
height?: number;
|
|
@@ -2431,12 +2795,6 @@ export type Wan225bVideoGenInput = WanVideoGenInput & {
|
|
|
2431
2795
|
} & {
|
|
2432
2796
|
version: 'v2.2-5b';
|
|
2433
2797
|
};
|
|
2434
|
-
/**
|
|
2435
|
-
* AI Toolkit training for Wan 2.2 models
|
|
2436
|
-
*/
|
|
2437
|
-
export type Wan22AiToolkitTrainingInput = WanAiToolkitTrainingInput & {} & {
|
|
2438
|
-
modelVariant: '2.2';
|
|
2439
|
-
};
|
|
2440
2798
|
export type Wan22FalImageGenInput = Wan22ImageGenInput & {
|
|
2441
2799
|
acceleration?: 'none' | 'fast' | 'faster';
|
|
2442
2800
|
} & {
|
|
@@ -2514,14 +2872,6 @@ export type Wan25VideoGenInput = WanVideoGenInput & {
|
|
|
2514
2872
|
} & {
|
|
2515
2873
|
version: 'v2.5';
|
|
2516
2874
|
};
|
|
2517
|
-
/**
|
|
2518
|
-
* AI Toolkit training for Wan (video) models
|
|
2519
|
-
*/
|
|
2520
|
-
export type WanAiToolkitTrainingInput = AiToolkitTrainingInput & {
|
|
2521
|
-
modelVariant: string;
|
|
2522
|
-
} & {
|
|
2523
|
-
ecosystem: 'wan';
|
|
2524
|
-
};
|
|
2525
2875
|
export type WanImageGenInput = ImageGenInput & {
|
|
2526
2876
|
engine: 'wan';
|
|
2527
2877
|
} & {
|
|
@@ -3097,6 +3447,11 @@ export type ZipTrainingData = TrainingData & {
|
|
|
3097
3447
|
} & {
|
|
3098
3448
|
type: 'zip';
|
|
3099
3449
|
};
|
|
3450
|
+
export declare const ZoeDepthEnvironment: {
|
|
3451
|
+
readonly INDOOR: 'indoor';
|
|
3452
|
+
readonly OUTDOOR: 'outdoor';
|
|
3453
|
+
};
|
|
3454
|
+
export type ZoeDepthEnvironment = (typeof ZoeDepthEnvironment)[keyof typeof ZoeDepthEnvironment];
|
|
3100
3455
|
export type GetBlobData = {
|
|
3101
3456
|
body?: never;
|
|
3102
3457
|
path: {
|
|
@@ -3458,6 +3813,35 @@ export type InvokeMediaRatingStepTemplateResponses = {
|
|
|
3458
3813
|
};
|
|
3459
3814
|
export type InvokeMediaRatingStepTemplateResponse =
|
|
3460
3815
|
InvokeMediaRatingStepTemplateResponses[keyof InvokeMediaRatingStepTemplateResponses];
|
|
3816
|
+
export type InvokePreprocessImageStepTemplateData = {
|
|
3817
|
+
body?: PreprocessImageInput;
|
|
3818
|
+
path?: never;
|
|
3819
|
+
query?: {
|
|
3820
|
+
experimental?: boolean;
|
|
3821
|
+
allowMatureContent?: boolean;
|
|
3822
|
+
};
|
|
3823
|
+
url: '/v2/consumer/recipes/preprocessImage';
|
|
3824
|
+
};
|
|
3825
|
+
export type InvokePreprocessImageStepTemplateErrors = {
|
|
3826
|
+
/**
|
|
3827
|
+
* Bad Request
|
|
3828
|
+
*/
|
|
3829
|
+
400: ProblemDetails;
|
|
3830
|
+
/**
|
|
3831
|
+
* Unauthorized
|
|
3832
|
+
*/
|
|
3833
|
+
401: ProblemDetails;
|
|
3834
|
+
};
|
|
3835
|
+
export type InvokePreprocessImageStepTemplateError =
|
|
3836
|
+
InvokePreprocessImageStepTemplateErrors[keyof InvokePreprocessImageStepTemplateErrors];
|
|
3837
|
+
export type InvokePreprocessImageStepTemplateResponses = {
|
|
3838
|
+
/**
|
|
3839
|
+
* OK
|
|
3840
|
+
*/
|
|
3841
|
+
200: PreprocessImageOutput;
|
|
3842
|
+
};
|
|
3843
|
+
export type InvokePreprocessImageStepTemplateResponse =
|
|
3844
|
+
InvokePreprocessImageStepTemplateResponses[keyof InvokePreprocessImageStepTemplateResponses];
|
|
3461
3845
|
export type InvokeTextToImageStepTemplateData = {
|
|
3462
3846
|
body?: TextToImageInput;
|
|
3463
3847
|
path?: never;
|
|
@@ -1,14 +1,66 @@
|
|
|
1
1
|
// This file is auto-generated by @hey-api/openapi-ts
|
|
2
|
+
export const AnimalPoseBboxDetector = {
|
|
3
|
+
YOLOX_L_TORCHSCRIPT_PT: 'yolox_l.torchscript.pt',
|
|
4
|
+
YOLOX_L_ONNX: 'yolox_l.onnx',
|
|
5
|
+
YOLO_NAS_L_FP16_ONNX: 'yolo_nas_l_fp16.onnx',
|
|
6
|
+
YOLO_NAS_M_FP16_ONNX: 'yolo_nas_m_fp16.onnx',
|
|
7
|
+
YOLO_NAS_S_FP16_ONNX: 'yolo_nas_s_fp16.onnx',
|
|
8
|
+
};
|
|
9
|
+
export const AnimalPoseEstimator = {
|
|
10
|
+
RTMPOSE_M_AP10K_256_BS5_TORCHSCRIPT_PT: 'rtmpose-m_ap10k_256_bs5.torchscript.pt',
|
|
11
|
+
RTMPOSE_M_AP10K_256_ONNX: 'rtmpose-m_ap10k_256.onnx',
|
|
12
|
+
};
|
|
13
|
+
export const AnylineMergeWith = {
|
|
14
|
+
LINEART_STANDARD: 'lineart_standard',
|
|
15
|
+
LINEART_REALISTIC: 'lineart_realistic',
|
|
16
|
+
LINEART_ANIME: 'lineart_anime',
|
|
17
|
+
MANGA_LINE: 'manga_line',
|
|
18
|
+
};
|
|
2
19
|
export const BuzzClientAccount = {
|
|
3
20
|
YELLOW: 'yellow',
|
|
4
21
|
BLUE: 'blue',
|
|
5
22
|
GREEN: 'green',
|
|
6
23
|
FAKE_RED: 'fakeRed',
|
|
7
24
|
};
|
|
25
|
+
export const CoarseMode = {
|
|
26
|
+
DISABLE: 'disable',
|
|
27
|
+
ENABLE: 'enable',
|
|
28
|
+
};
|
|
8
29
|
export const ContainerFormat = {
|
|
9
30
|
MP4: 'mp4',
|
|
10
31
|
WEB_M: 'webM',
|
|
11
32
|
};
|
|
33
|
+
export const DensePoseColormap = {
|
|
34
|
+
'VIRIDIS (_MAGIC_ANIMATE)': 'Viridis (MagicAnimate)',
|
|
35
|
+
'PARULA (_CIVIT_AI)': 'Parula (CivitAI)',
|
|
36
|
+
};
|
|
37
|
+
export const DensePoseModel = {
|
|
38
|
+
DENSEPOSE_R50_FPN_DL_TORCHSCRIPT: 'densepose_r50_fpn_dl.torchscript',
|
|
39
|
+
DENSEPOSE_R101_FPN_DL_TORCHSCRIPT: 'densepose_r101_fpn_dl.torchscript',
|
|
40
|
+
};
|
|
41
|
+
export const DepthAnythingCheckpoint = {
|
|
42
|
+
DEPTH_ANYTHING_VITL14_PTH: 'depth_anything_vitl14.pth',
|
|
43
|
+
DEPTH_ANYTHING_VITB14_PTH: 'depth_anything_vitb14.pth',
|
|
44
|
+
DEPTH_ANYTHING_VITS14_PTH: 'depth_anything_vits14.pth',
|
|
45
|
+
};
|
|
46
|
+
export const DepthAnythingV2Checkpoint = {
|
|
47
|
+
DEPTH_ANYTHING_V2_VITG_PTH: 'depth_anything_v2_vitg.pth',
|
|
48
|
+
DEPTH_ANYTHING_V2_VITL_PTH: 'depth_anything_v2_vitl.pth',
|
|
49
|
+
DEPTH_ANYTHING_V2_VITB_PTH: 'depth_anything_v2_vitb.pth',
|
|
50
|
+
DEPTH_ANYTHING_V2_VITS_PTH: 'depth_anything_v2_vits.pth',
|
|
51
|
+
};
|
|
52
|
+
export const DwPoseBboxDetector = {
|
|
53
|
+
YOLOX_L_ONNX: 'yolox_l.onnx',
|
|
54
|
+
YOLOX_L_TORCHSCRIPT_PT: 'yolox_l.torchscript.pt',
|
|
55
|
+
YOLO_NAS_L_FP16_ONNX: 'yolo_nas_l_fp16.onnx',
|
|
56
|
+
YOLO_NAS_M_FP16_ONNX: 'yolo_nas_m_fp16.onnx',
|
|
57
|
+
YOLO_NAS_S_FP16_ONNX: 'yolo_nas_s_fp16.onnx',
|
|
58
|
+
};
|
|
59
|
+
export const DwPoseEstimator = {
|
|
60
|
+
DW_LL_UCOCO_384_BS5_TORCHSCRIPT_PT: 'dw-ll_ucoco_384_bs5.torchscript.pt',
|
|
61
|
+
DW_LL_UCOCO_384_ONNX: 'dw-ll_ucoco_384.onnx',
|
|
62
|
+
DW_LL_UCOCO_ONNX: 'dw-ll_ucoco.onnx',
|
|
63
|
+
};
|
|
12
64
|
export const FileFormat = {
|
|
13
65
|
UNKNOWN: 'unknown',
|
|
14
66
|
SAFE_TENSOR: 'safeTensor',
|
|
@@ -16,6 +68,7 @@ export const FileFormat = {
|
|
|
16
68
|
DIFFUSERS: 'diffusers',
|
|
17
69
|
CORE_ML: 'coreML',
|
|
18
70
|
ONNX: 'onnx',
|
|
71
|
+
TAR: 'tar',
|
|
19
72
|
};
|
|
20
73
|
export const HaiperVideoGenAspectRatio = {
|
|
21
74
|
'16:9': '16:9',
|
|
@@ -85,6 +138,10 @@ export const KlingVideoGenDuration = {
|
|
|
85
138
|
5: '5',
|
|
86
139
|
10: '10',
|
|
87
140
|
};
|
|
141
|
+
export const LeresBoost = {
|
|
142
|
+
DISABLE: 'disable',
|
|
143
|
+
ENABLE: 'enable',
|
|
144
|
+
};
|
|
88
145
|
export const LightricksAspectRatio = {
|
|
89
146
|
'1:1': '1:1',
|
|
90
147
|
'16:9': '16:9',
|
|
@@ -98,6 +155,11 @@ export const LightricksAspectRatio = {
|
|
|
98
155
|
export const MediaHashType = {
|
|
99
156
|
PERCEPTUAL: 'perceptual',
|
|
100
157
|
};
|
|
158
|
+
export const Metric3dBackbone = {
|
|
159
|
+
VIT_SMALL: 'vit-small',
|
|
160
|
+
VIT_LARGE: 'vit-large',
|
|
161
|
+
VIT_GIANT2: 'vit-giant2',
|
|
162
|
+
};
|
|
101
163
|
export const MiniMaxVideoGenModel = {
|
|
102
164
|
HAILOU: 'hailou',
|
|
103
165
|
};
|
|
@@ -122,6 +184,10 @@ export const Priority = {
|
|
|
122
184
|
NORMAL: 'normal',
|
|
123
185
|
LOW: 'low',
|
|
124
186
|
};
|
|
187
|
+
export const SafeMode = {
|
|
188
|
+
ENABLE: 'enable',
|
|
189
|
+
DISABLE: 'disable',
|
|
190
|
+
};
|
|
125
191
|
/**
|
|
126
192
|
* The available options for schedulers used in image generation.
|
|
127
193
|
*/
|
|
@@ -175,6 +241,7 @@ export const SdCppSchedule = {
|
|
|
175
241
|
export const SeedreamVersion = {
|
|
176
242
|
V3: 'v3',
|
|
177
243
|
V4: 'v4',
|
|
244
|
+
V4_5: 'v4.5',
|
|
178
245
|
};
|
|
179
246
|
/**
|
|
180
247
|
* The moderation status of the training data
|
|
@@ -232,3 +299,7 @@ export const WorkflowUpgradeMode = {
|
|
|
232
299
|
MANUAL: 'manual',
|
|
233
300
|
AUTOMATIC: 'automatic',
|
|
234
301
|
};
|
|
302
|
+
export const ZoeDepthEnvironment = {
|
|
303
|
+
INDOOR: 'indoor',
|
|
304
|
+
OUTDOOR: 'outdoor',
|
|
305
|
+
};
|