@civitai/client 0.2.0-beta.2 → 0.2.0-beta.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client/CivitaiClient.js +3 -1
- package/dist/generated/schemas.gen.d.ts +1811 -1866
- package/dist/generated/schemas.gen.js +2020 -2082
- package/dist/generated/sdk.gen.d.ts +179 -1
- package/dist/generated/sdk.gen.js +255 -1
- package/dist/generated/services.gen.d.ts +90 -88
- package/dist/generated/services.gen.js +147 -100
- package/dist/generated/types.gen.d.ts +2138 -53
- package/dist/generated/types.gen.js +122 -6
- package/package.json +2 -2
- package/dist/utils/types.d.ts +0 -3
- package/dist/utils/types.js +0 -1
|
@@ -5,12 +5,20 @@ import type {
|
|
|
5
5
|
HeadBlobData,
|
|
6
6
|
HeadBlobResponses,
|
|
7
7
|
HeadBlobErrors,
|
|
8
|
+
GetBlobContentData,
|
|
9
|
+
GetBlobContentResponses,
|
|
10
|
+
GetBlobContentErrors,
|
|
11
|
+
GetBlockedContentData,
|
|
12
|
+
GetBlockedContentErrors,
|
|
8
13
|
InvokeAgeClassificationStepTemplateData,
|
|
9
14
|
InvokeAgeClassificationStepTemplateResponses,
|
|
10
15
|
InvokeAgeClassificationStepTemplateErrors,
|
|
11
16
|
InvokeComfyStepTemplateData,
|
|
12
17
|
InvokeComfyStepTemplateResponses,
|
|
13
18
|
InvokeComfyStepTemplateErrors,
|
|
19
|
+
InvokeConvertImageStepTemplateData,
|
|
20
|
+
InvokeConvertImageStepTemplateResponses,
|
|
21
|
+
InvokeConvertImageStepTemplateErrors,
|
|
14
22
|
InvokeEchoStepTemplateData,
|
|
15
23
|
InvokeEchoStepTemplateResponses,
|
|
16
24
|
InvokeEchoStepTemplateErrors,
|
|
@@ -23,15 +31,45 @@ import type {
|
|
|
23
31
|
InvokeImageUploadStepTemplateData,
|
|
24
32
|
InvokeImageUploadStepTemplateResponses,
|
|
25
33
|
InvokeImageUploadStepTemplateErrors,
|
|
34
|
+
InvokeImageUpscalerStepTemplateData,
|
|
35
|
+
InvokeImageUpscalerStepTemplateResponses,
|
|
36
|
+
InvokeImageUpscalerStepTemplateErrors,
|
|
37
|
+
InvokeMediaHashStepTemplateData,
|
|
38
|
+
InvokeMediaHashStepTemplateResponses,
|
|
39
|
+
InvokeMediaHashStepTemplateErrors,
|
|
40
|
+
InvokeMediaRatingStepTemplateData,
|
|
41
|
+
InvokeMediaRatingStepTemplateResponses,
|
|
42
|
+
InvokeMediaRatingStepTemplateErrors,
|
|
43
|
+
InvokePreprocessImageStepTemplateData,
|
|
44
|
+
InvokePreprocessImageStepTemplateResponses,
|
|
45
|
+
InvokePreprocessImageStepTemplateErrors,
|
|
26
46
|
InvokeTextToImageStepTemplateData,
|
|
27
47
|
InvokeTextToImageStepTemplateResponses,
|
|
28
48
|
InvokeTextToImageStepTemplateErrors,
|
|
49
|
+
InvokeTrainingStepTemplateData,
|
|
50
|
+
InvokeTrainingStepTemplateResponses,
|
|
51
|
+
InvokeTrainingStepTemplateErrors,
|
|
29
52
|
InvokeVideoEnhancementStepTemplateData,
|
|
30
53
|
InvokeVideoEnhancementStepTemplateResponses,
|
|
31
54
|
InvokeVideoEnhancementStepTemplateErrors,
|
|
55
|
+
InvokeVideoFrameExtractionStepTemplateData,
|
|
56
|
+
InvokeVideoFrameExtractionStepTemplateResponses,
|
|
57
|
+
InvokeVideoFrameExtractionStepTemplateErrors,
|
|
32
58
|
InvokeVideoGenStepTemplateData,
|
|
33
59
|
InvokeVideoGenStepTemplateResponses,
|
|
34
60
|
InvokeVideoGenStepTemplateErrors,
|
|
61
|
+
InvokeVideoInterpolationStepTemplateData,
|
|
62
|
+
InvokeVideoInterpolationStepTemplateResponses,
|
|
63
|
+
InvokeVideoInterpolationStepTemplateErrors,
|
|
64
|
+
InvokeVideoMetadataStepTemplateData,
|
|
65
|
+
InvokeVideoMetadataStepTemplateResponses,
|
|
66
|
+
InvokeVideoMetadataStepTemplateErrors,
|
|
67
|
+
InvokeVideoUpscalerStepTemplateData,
|
|
68
|
+
InvokeVideoUpscalerStepTemplateResponses,
|
|
69
|
+
InvokeVideoUpscalerStepTemplateErrors,
|
|
70
|
+
InvokeWdTaggingStepTemplateData,
|
|
71
|
+
InvokeWdTaggingStepTemplateResponses,
|
|
72
|
+
InvokeWdTaggingStepTemplateErrors,
|
|
35
73
|
InvalidateResourceData,
|
|
36
74
|
InvalidateResourceResponses,
|
|
37
75
|
InvalidateResourceErrors,
|
|
@@ -92,7 +130,7 @@ export type Options<
|
|
|
92
130
|
meta?: Record<string, unknown>;
|
|
93
131
|
};
|
|
94
132
|
/**
|
|
95
|
-
* Get blob by ID. This will
|
|
133
|
+
* Get blob by ID. This will redirect to a cacheable content URL.
|
|
96
134
|
*/
|
|
97
135
|
export declare const getBlob: <ThrowOnError extends boolean = false>(
|
|
98
136
|
options: Options<GetBlobData, ThrowOnError>
|
|
@@ -103,6 +141,23 @@ export declare const getBlob: <ThrowOnError extends boolean = false>(
|
|
|
103
141
|
export declare const headBlob: <ThrowOnError extends boolean = false>(
|
|
104
142
|
options: Options<HeadBlobData, ThrowOnError>
|
|
105
143
|
) => import('./client').RequestResult<HeadBlobResponses, HeadBlobErrors, ThrowOnError, 'fields'>;
|
|
144
|
+
/**
|
|
145
|
+
* Serves cacheable blob content using a deterministic encrypted token
|
|
146
|
+
*/
|
|
147
|
+
export declare const getBlobContent: <ThrowOnError extends boolean = false>(
|
|
148
|
+
options: Options<GetBlobContentData, ThrowOnError>
|
|
149
|
+
) => import('./client').RequestResult<
|
|
150
|
+
GetBlobContentResponses,
|
|
151
|
+
GetBlobContentErrors,
|
|
152
|
+
ThrowOnError,
|
|
153
|
+
'fields'
|
|
154
|
+
>;
|
|
155
|
+
/**
|
|
156
|
+
* Serves a blocked placeholder image with the specified dimensions
|
|
157
|
+
*/
|
|
158
|
+
export declare const getBlockedContent: <ThrowOnError extends boolean = false>(
|
|
159
|
+
options: Options<GetBlockedContentData, ThrowOnError>
|
|
160
|
+
) => import('./client').RequestResult<unknown, GetBlockedContentErrors, ThrowOnError, 'fields'>;
|
|
106
161
|
/**
|
|
107
162
|
* Age classification
|
|
108
163
|
* Detects minors in media content. Returns a boolean value indicating whether the content contains minors as well as details on where minors are detected.
|
|
@@ -127,6 +182,17 @@ export declare const invokeComfyStepTemplate: <ThrowOnError extends boolean = fa
|
|
|
127
182
|
ThrowOnError,
|
|
128
183
|
'fields'
|
|
129
184
|
>;
|
|
185
|
+
/**
|
|
186
|
+
* A workflow step that converts images to different formats and applies optional transforms.
|
|
187
|
+
*/
|
|
188
|
+
export declare const invokeConvertImageStepTemplate: <ThrowOnError extends boolean = false>(
|
|
189
|
+
options?: Options<InvokeConvertImageStepTemplateData, ThrowOnError>
|
|
190
|
+
) => import('./client').RequestResult<
|
|
191
|
+
InvokeConvertImageStepTemplateResponses,
|
|
192
|
+
InvokeConvertImageStepTemplateErrors,
|
|
193
|
+
ThrowOnError,
|
|
194
|
+
'fields'
|
|
195
|
+
>;
|
|
130
196
|
/**
|
|
131
197
|
* Echo
|
|
132
198
|
* A workflow step that takes a message string and retuns it.
|
|
@@ -178,6 +244,46 @@ export declare const invokeImageUploadStepTemplate: <ThrowOnError extends boolea
|
|
|
178
244
|
ThrowOnError,
|
|
179
245
|
'fields'
|
|
180
246
|
>;
|
|
247
|
+
export declare const invokeImageUpscalerStepTemplate: <ThrowOnError extends boolean = false>(
|
|
248
|
+
options?: Options<InvokeImageUpscalerStepTemplateData, ThrowOnError>
|
|
249
|
+
) => import('./client').RequestResult<
|
|
250
|
+
InvokeImageUpscalerStepTemplateResponses,
|
|
251
|
+
InvokeImageUpscalerStepTemplateErrors,
|
|
252
|
+
ThrowOnError,
|
|
253
|
+
'fields'
|
|
254
|
+
>;
|
|
255
|
+
/**
|
|
256
|
+
* MediaHash
|
|
257
|
+
* Generates perceptual hashes for media content to enable similarity detection and duplicate identification.
|
|
258
|
+
*/
|
|
259
|
+
export declare const invokeMediaHashStepTemplate: <ThrowOnError extends boolean = false>(
|
|
260
|
+
options?: Options<InvokeMediaHashStepTemplateData, ThrowOnError>
|
|
261
|
+
) => import('./client').RequestResult<
|
|
262
|
+
InvokeMediaHashStepTemplateResponses,
|
|
263
|
+
InvokeMediaHashStepTemplateErrors,
|
|
264
|
+
ThrowOnError,
|
|
265
|
+
'fields'
|
|
266
|
+
>;
|
|
267
|
+
/**
|
|
268
|
+
* MediaRating
|
|
269
|
+
* Performs NSFW level detection and content safety classification on media content.
|
|
270
|
+
*/
|
|
271
|
+
export declare const invokeMediaRatingStepTemplate: <ThrowOnError extends boolean = false>(
|
|
272
|
+
options?: Options<InvokeMediaRatingStepTemplateData, ThrowOnError>
|
|
273
|
+
) => import('./client').RequestResult<
|
|
274
|
+
InvokeMediaRatingStepTemplateResponses,
|
|
275
|
+
InvokeMediaRatingStepTemplateErrors,
|
|
276
|
+
ThrowOnError,
|
|
277
|
+
'fields'
|
|
278
|
+
>;
|
|
279
|
+
export declare const invokePreprocessImageStepTemplate: <ThrowOnError extends boolean = false>(
|
|
280
|
+
options?: Options<InvokePreprocessImageStepTemplateData, ThrowOnError>
|
|
281
|
+
) => import('./client').RequestResult<
|
|
282
|
+
InvokePreprocessImageStepTemplateResponses,
|
|
283
|
+
InvokePreprocessImageStepTemplateErrors,
|
|
284
|
+
ThrowOnError,
|
|
285
|
+
'fields'
|
|
286
|
+
>;
|
|
181
287
|
/**
|
|
182
288
|
* TextToImage
|
|
183
289
|
* Generate images using text as input
|
|
@@ -190,6 +296,20 @@ export declare const invokeTextToImageStepTemplate: <ThrowOnError extends boolea
|
|
|
190
296
|
ThrowOnError,
|
|
191
297
|
'fields'
|
|
192
298
|
>;
|
|
299
|
+
/**
|
|
300
|
+
* Training
|
|
301
|
+
* A workflow step for training machine learning models (LoRAs, checkpoints, etc.)
|
|
302
|
+
* /// on various types of data (images, videos, audio). This replaces ImageResourceTraining
|
|
303
|
+
* /// with a cleaner architecture that creates one job per epoch instead of a single monolithic job.
|
|
304
|
+
*/
|
|
305
|
+
export declare const invokeTrainingStepTemplate: <ThrowOnError extends boolean = false>(
|
|
306
|
+
options?: Options<InvokeTrainingStepTemplateData, ThrowOnError>
|
|
307
|
+
) => import('./client').RequestResult<
|
|
308
|
+
InvokeTrainingStepTemplateResponses,
|
|
309
|
+
InvokeTrainingStepTemplateErrors,
|
|
310
|
+
ThrowOnError,
|
|
311
|
+
'fields'
|
|
312
|
+
>;
|
|
193
313
|
/**
|
|
194
314
|
* Upscale videos and/or interpolate frames
|
|
195
315
|
*/
|
|
@@ -201,6 +321,19 @@ export declare const invokeVideoEnhancementStepTemplate: <ThrowOnError extends b
|
|
|
201
321
|
ThrowOnError,
|
|
202
322
|
'fields'
|
|
203
323
|
>;
|
|
324
|
+
/**
|
|
325
|
+
* Video Frame Extraction
|
|
326
|
+
* Extracts unique frames from a video at a specified rate using perceptual hashing to filter out duplicate/similar frames.
|
|
327
|
+
* /// Useful for generating video previews, storyboards, or analyzing video content.
|
|
328
|
+
*/
|
|
329
|
+
export declare const invokeVideoFrameExtractionStepTemplate: <ThrowOnError extends boolean = false>(
|
|
330
|
+
options?: Options<InvokeVideoFrameExtractionStepTemplateData, ThrowOnError>
|
|
331
|
+
) => import('./client').RequestResult<
|
|
332
|
+
InvokeVideoFrameExtractionStepTemplateResponses,
|
|
333
|
+
InvokeVideoFrameExtractionStepTemplateErrors,
|
|
334
|
+
ThrowOnError,
|
|
335
|
+
'fields'
|
|
336
|
+
>;
|
|
204
337
|
/**
|
|
205
338
|
* Video generation
|
|
206
339
|
* Generate videos through text/image inputs using any of our supported engines
|
|
@@ -213,6 +346,51 @@ export declare const invokeVideoGenStepTemplate: <ThrowOnError extends boolean =
|
|
|
213
346
|
ThrowOnError,
|
|
214
347
|
'fields'
|
|
215
348
|
>;
|
|
349
|
+
/**
|
|
350
|
+
* Interpolate videos using VFI Mamba
|
|
351
|
+
*/
|
|
352
|
+
export declare const invokeVideoInterpolationStepTemplate: <ThrowOnError extends boolean = false>(
|
|
353
|
+
options?: Options<InvokeVideoInterpolationStepTemplateData, ThrowOnError>
|
|
354
|
+
) => import('./client').RequestResult<
|
|
355
|
+
InvokeVideoInterpolationStepTemplateResponses,
|
|
356
|
+
InvokeVideoInterpolationStepTemplateErrors,
|
|
357
|
+
ThrowOnError,
|
|
358
|
+
'fields'
|
|
359
|
+
>;
|
|
360
|
+
/**
|
|
361
|
+
* Extract metadata from videos including width, height, FPS, and duration
|
|
362
|
+
*/
|
|
363
|
+
export declare const invokeVideoMetadataStepTemplate: <ThrowOnError extends boolean = false>(
|
|
364
|
+
options?: Options<InvokeVideoMetadataStepTemplateData, ThrowOnError>
|
|
365
|
+
) => import('./client').RequestResult<
|
|
366
|
+
InvokeVideoMetadataStepTemplateResponses,
|
|
367
|
+
InvokeVideoMetadataStepTemplateErrors,
|
|
368
|
+
ThrowOnError,
|
|
369
|
+
'fields'
|
|
370
|
+
>;
|
|
371
|
+
/**
|
|
372
|
+
* Upscale videos using FlashVSR
|
|
373
|
+
*/
|
|
374
|
+
export declare const invokeVideoUpscalerStepTemplate: <ThrowOnError extends boolean = false>(
|
|
375
|
+
options?: Options<InvokeVideoUpscalerStepTemplateData, ThrowOnError>
|
|
376
|
+
) => import('./client').RequestResult<
|
|
377
|
+
InvokeVideoUpscalerStepTemplateResponses,
|
|
378
|
+
InvokeVideoUpscalerStepTemplateErrors,
|
|
379
|
+
ThrowOnError,
|
|
380
|
+
'fields'
|
|
381
|
+
>;
|
|
382
|
+
/**
|
|
383
|
+
* WDTagging
|
|
384
|
+
* Performs Waifu Diffusion tagging on media content to identify characteristics, objects, and themes.
|
|
385
|
+
*/
|
|
386
|
+
export declare const invokeWdTaggingStepTemplate: <ThrowOnError extends boolean = false>(
|
|
387
|
+
options?: Options<InvokeWdTaggingStepTemplateData, ThrowOnError>
|
|
388
|
+
) => import('./client').RequestResult<
|
|
389
|
+
InvokeWdTaggingStepTemplateResponses,
|
|
390
|
+
InvokeWdTaggingStepTemplateErrors,
|
|
391
|
+
ThrowOnError,
|
|
392
|
+
'fields'
|
|
393
|
+
>;
|
|
216
394
|
/**
|
|
217
395
|
* Invalidates the cache of a specific resource.
|
|
218
396
|
*/
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
// This file is auto-generated by @hey-api/openapi-ts
|
|
2
2
|
import { client as _heyApiClient } from './client.gen';
|
|
3
3
|
/**
|
|
4
|
-
* Get blob by ID. This will
|
|
4
|
+
* Get blob by ID. This will redirect to a cacheable content URL.
|
|
5
5
|
*/
|
|
6
6
|
export const getBlob = (options) => {
|
|
7
7
|
var _a;
|
|
@@ -32,6 +32,38 @@ export const headBlob = (options) => {
|
|
|
32
32
|
...options,
|
|
33
33
|
});
|
|
34
34
|
};
|
|
35
|
+
/**
|
|
36
|
+
* Serves cacheable blob content using a deterministic encrypted token
|
|
37
|
+
*/
|
|
38
|
+
export const getBlobContent = (options) => {
|
|
39
|
+
var _a;
|
|
40
|
+
return ((_a = options.client) !== null && _a !== void 0 ? _a : _heyApiClient).get({
|
|
41
|
+
security: [
|
|
42
|
+
{
|
|
43
|
+
scheme: 'bearer',
|
|
44
|
+
type: 'http',
|
|
45
|
+
},
|
|
46
|
+
],
|
|
47
|
+
url: '/v2/consumer/blobs/content/{encryptedToken}',
|
|
48
|
+
...options,
|
|
49
|
+
});
|
|
50
|
+
};
|
|
51
|
+
/**
|
|
52
|
+
* Serves a blocked placeholder image with the specified dimensions
|
|
53
|
+
*/
|
|
54
|
+
export const getBlockedContent = (options) => {
|
|
55
|
+
var _a;
|
|
56
|
+
return ((_a = options.client) !== null && _a !== void 0 ? _a : _heyApiClient).get({
|
|
57
|
+
security: [
|
|
58
|
+
{
|
|
59
|
+
scheme: 'bearer',
|
|
60
|
+
type: 'http',
|
|
61
|
+
},
|
|
62
|
+
],
|
|
63
|
+
url: '/v2/consumer/blobs/blocked/{encryptedToken}',
|
|
64
|
+
...options,
|
|
65
|
+
});
|
|
66
|
+
};
|
|
35
67
|
/**
|
|
36
68
|
* Age classification
|
|
37
69
|
* Detects minors in media content. Returns a boolean value indicating whether the content contains minors as well as details on where minors are detected.
|
|
@@ -74,6 +106,26 @@ export const invokeComfyStepTemplate = (options) => {
|
|
|
74
106
|
},
|
|
75
107
|
});
|
|
76
108
|
};
|
|
109
|
+
/**
|
|
110
|
+
* A workflow step that converts images to different formats and applies optional transforms.
|
|
111
|
+
*/
|
|
112
|
+
export const invokeConvertImageStepTemplate = (options) => {
|
|
113
|
+
var _a;
|
|
114
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
115
|
+
security: [
|
|
116
|
+
{
|
|
117
|
+
scheme: 'bearer',
|
|
118
|
+
type: 'http',
|
|
119
|
+
},
|
|
120
|
+
],
|
|
121
|
+
url: '/v2/consumer/recipes/convertImage',
|
|
122
|
+
...options,
|
|
123
|
+
headers: {
|
|
124
|
+
'Content-Type': 'application/json',
|
|
125
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
126
|
+
},
|
|
127
|
+
});
|
|
128
|
+
};
|
|
77
129
|
/**
|
|
78
130
|
* Echo
|
|
79
131
|
* A workflow step that takes a message string and retuns it.
|
|
@@ -159,6 +211,82 @@ export const invokeImageUploadStepTemplate = (options) => {
|
|
|
159
211
|
},
|
|
160
212
|
});
|
|
161
213
|
};
|
|
214
|
+
export const invokeImageUpscalerStepTemplate = (options) => {
|
|
215
|
+
var _a;
|
|
216
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
217
|
+
security: [
|
|
218
|
+
{
|
|
219
|
+
scheme: 'bearer',
|
|
220
|
+
type: 'http',
|
|
221
|
+
},
|
|
222
|
+
],
|
|
223
|
+
url: '/v2/consumer/recipes/imageUpscaler',
|
|
224
|
+
...options,
|
|
225
|
+
headers: {
|
|
226
|
+
'Content-Type': 'application/json',
|
|
227
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
228
|
+
},
|
|
229
|
+
});
|
|
230
|
+
};
|
|
231
|
+
/**
|
|
232
|
+
* MediaHash
|
|
233
|
+
* Generates perceptual hashes for media content to enable similarity detection and duplicate identification.
|
|
234
|
+
*/
|
|
235
|
+
export const invokeMediaHashStepTemplate = (options) => {
|
|
236
|
+
var _a;
|
|
237
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
238
|
+
security: [
|
|
239
|
+
{
|
|
240
|
+
scheme: 'bearer',
|
|
241
|
+
type: 'http',
|
|
242
|
+
},
|
|
243
|
+
],
|
|
244
|
+
url: '/v2/consumer/recipes/mediaHash',
|
|
245
|
+
...options,
|
|
246
|
+
headers: {
|
|
247
|
+
'Content-Type': 'application/json',
|
|
248
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
249
|
+
},
|
|
250
|
+
});
|
|
251
|
+
};
|
|
252
|
+
/**
|
|
253
|
+
* MediaRating
|
|
254
|
+
* Performs NSFW level detection and content safety classification on media content.
|
|
255
|
+
*/
|
|
256
|
+
export const invokeMediaRatingStepTemplate = (options) => {
|
|
257
|
+
var _a;
|
|
258
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
259
|
+
security: [
|
|
260
|
+
{
|
|
261
|
+
scheme: 'bearer',
|
|
262
|
+
type: 'http',
|
|
263
|
+
},
|
|
264
|
+
],
|
|
265
|
+
url: '/v2/consumer/recipes/mediaRating',
|
|
266
|
+
...options,
|
|
267
|
+
headers: {
|
|
268
|
+
'Content-Type': 'application/json',
|
|
269
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
270
|
+
},
|
|
271
|
+
});
|
|
272
|
+
};
|
|
273
|
+
export const invokePreprocessImageStepTemplate = (options) => {
|
|
274
|
+
var _a;
|
|
275
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
276
|
+
security: [
|
|
277
|
+
{
|
|
278
|
+
scheme: 'bearer',
|
|
279
|
+
type: 'http',
|
|
280
|
+
},
|
|
281
|
+
],
|
|
282
|
+
url: '/v2/consumer/recipes/preprocessImage',
|
|
283
|
+
...options,
|
|
284
|
+
headers: {
|
|
285
|
+
'Content-Type': 'application/json',
|
|
286
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
287
|
+
},
|
|
288
|
+
});
|
|
289
|
+
};
|
|
162
290
|
/**
|
|
163
291
|
* TextToImage
|
|
164
292
|
* Generate images using text as input
|
|
@@ -180,6 +308,29 @@ export const invokeTextToImageStepTemplate = (options) => {
|
|
|
180
308
|
},
|
|
181
309
|
});
|
|
182
310
|
};
|
|
311
|
+
/**
|
|
312
|
+
* Training
|
|
313
|
+
* A workflow step for training machine learning models (LoRAs, checkpoints, etc.)
|
|
314
|
+
* /// on various types of data (images, videos, audio). This replaces ImageResourceTraining
|
|
315
|
+
* /// with a cleaner architecture that creates one job per epoch instead of a single monolithic job.
|
|
316
|
+
*/
|
|
317
|
+
export const invokeTrainingStepTemplate = (options) => {
|
|
318
|
+
var _a;
|
|
319
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
320
|
+
security: [
|
|
321
|
+
{
|
|
322
|
+
scheme: 'bearer',
|
|
323
|
+
type: 'http',
|
|
324
|
+
},
|
|
325
|
+
],
|
|
326
|
+
url: '/v2/consumer/recipes/training',
|
|
327
|
+
...options,
|
|
328
|
+
headers: {
|
|
329
|
+
'Content-Type': 'application/json',
|
|
330
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
331
|
+
},
|
|
332
|
+
});
|
|
333
|
+
};
|
|
183
334
|
/**
|
|
184
335
|
* Upscale videos and/or interpolate frames
|
|
185
336
|
*/
|
|
@@ -200,6 +351,28 @@ export const invokeVideoEnhancementStepTemplate = (options) => {
|
|
|
200
351
|
},
|
|
201
352
|
});
|
|
202
353
|
};
|
|
354
|
+
/**
|
|
355
|
+
* Video Frame Extraction
|
|
356
|
+
* Extracts unique frames from a video at a specified rate using perceptual hashing to filter out duplicate/similar frames.
|
|
357
|
+
* /// Useful for generating video previews, storyboards, or analyzing video content.
|
|
358
|
+
*/
|
|
359
|
+
export const invokeVideoFrameExtractionStepTemplate = (options) => {
|
|
360
|
+
var _a;
|
|
361
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
362
|
+
security: [
|
|
363
|
+
{
|
|
364
|
+
scheme: 'bearer',
|
|
365
|
+
type: 'http',
|
|
366
|
+
},
|
|
367
|
+
],
|
|
368
|
+
url: '/v2/consumer/recipes/videoFrameExtraction',
|
|
369
|
+
...options,
|
|
370
|
+
headers: {
|
|
371
|
+
'Content-Type': 'application/json',
|
|
372
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
373
|
+
},
|
|
374
|
+
});
|
|
375
|
+
};
|
|
203
376
|
/**
|
|
204
377
|
* Video generation
|
|
205
378
|
* Generate videos through text/image inputs using any of our supported engines
|
|
@@ -221,6 +394,87 @@ export const invokeVideoGenStepTemplate = (options) => {
|
|
|
221
394
|
},
|
|
222
395
|
});
|
|
223
396
|
};
|
|
397
|
+
/**
|
|
398
|
+
* Interpolate videos using VFI Mamba
|
|
399
|
+
*/
|
|
400
|
+
export const invokeVideoInterpolationStepTemplate = (options) => {
|
|
401
|
+
var _a;
|
|
402
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
403
|
+
security: [
|
|
404
|
+
{
|
|
405
|
+
scheme: 'bearer',
|
|
406
|
+
type: 'http',
|
|
407
|
+
},
|
|
408
|
+
],
|
|
409
|
+
url: '/v2/consumer/recipes/videoInterpolation',
|
|
410
|
+
...options,
|
|
411
|
+
headers: {
|
|
412
|
+
'Content-Type': 'application/json',
|
|
413
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
414
|
+
},
|
|
415
|
+
});
|
|
416
|
+
};
|
|
417
|
+
/**
|
|
418
|
+
* Extract metadata from videos including width, height, FPS, and duration
|
|
419
|
+
*/
|
|
420
|
+
export const invokeVideoMetadataStepTemplate = (options) => {
|
|
421
|
+
var _a;
|
|
422
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
423
|
+
security: [
|
|
424
|
+
{
|
|
425
|
+
scheme: 'bearer',
|
|
426
|
+
type: 'http',
|
|
427
|
+
},
|
|
428
|
+
],
|
|
429
|
+
url: '/v2/consumer/recipes/videoMetadata',
|
|
430
|
+
...options,
|
|
431
|
+
headers: {
|
|
432
|
+
'Content-Type': 'application/json',
|
|
433
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
434
|
+
},
|
|
435
|
+
});
|
|
436
|
+
};
|
|
437
|
+
/**
|
|
438
|
+
* Upscale videos using FlashVSR
|
|
439
|
+
*/
|
|
440
|
+
export const invokeVideoUpscalerStepTemplate = (options) => {
|
|
441
|
+
var _a;
|
|
442
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
443
|
+
security: [
|
|
444
|
+
{
|
|
445
|
+
scheme: 'bearer',
|
|
446
|
+
type: 'http',
|
|
447
|
+
},
|
|
448
|
+
],
|
|
449
|
+
url: '/v2/consumer/recipes/videoUpscaler',
|
|
450
|
+
...options,
|
|
451
|
+
headers: {
|
|
452
|
+
'Content-Type': 'application/json',
|
|
453
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
454
|
+
},
|
|
455
|
+
});
|
|
456
|
+
};
|
|
457
|
+
/**
|
|
458
|
+
* WDTagging
|
|
459
|
+
* Performs Waifu Diffusion tagging on media content to identify characteristics, objects, and themes.
|
|
460
|
+
*/
|
|
461
|
+
export const invokeWdTaggingStepTemplate = (options) => {
|
|
462
|
+
var _a;
|
|
463
|
+
return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
|
|
464
|
+
security: [
|
|
465
|
+
{
|
|
466
|
+
scheme: 'bearer',
|
|
467
|
+
type: 'http',
|
|
468
|
+
},
|
|
469
|
+
],
|
|
470
|
+
url: '/v2/consumer/recipes/wdTagging',
|
|
471
|
+
...options,
|
|
472
|
+
headers: {
|
|
473
|
+
'Content-Type': 'application/json',
|
|
474
|
+
...options === null || options === void 0 ? void 0 : options.headers,
|
|
475
|
+
},
|
|
476
|
+
});
|
|
477
|
+
};
|
|
224
478
|
/**
|
|
225
479
|
* Invalidates the cache of a specific resource.
|
|
226
480
|
*/
|