@civitai/client 0.2.0-beta.3 → 0.2.0-beta.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,7 +13,9 @@ export function createCivitaiClient(config) {
13
13
  client.interceptors.response.use(async (response) => {
14
14
  if (!response.ok) {
15
15
  const error = (response.status === 400
16
- ? await response.json()
16
+ ? await response
17
+ .json()
18
+ .catch(() => ({ status: response.status, detail: response.statusText }))
17
19
  : { status: response.status, detail: response.statusText });
18
20
  const newResponse = new Response(JSON.stringify(error), {
19
21
  status: response.status,
@@ -5,12 +5,29 @@ import type {
5
5
  HeadBlobData,
6
6
  HeadBlobResponses,
7
7
  HeadBlobErrors,
8
+ GetConsumerBlobUploadUrlData,
9
+ GetConsumerBlobUploadUrlResponses,
10
+ GetConsumerBlobUploadUrlErrors,
11
+ UploadConsumerBlobData,
12
+ UploadConsumerBlobResponses,
13
+ UploadConsumerBlobErrors,
14
+ GetBlobContentData,
15
+ GetBlobContentResponses,
16
+ GetBlobContentErrors,
17
+ GetBlockedContentData,
18
+ GetBlockedContentErrors,
8
19
  InvokeAgeClassificationStepTemplateData,
9
20
  InvokeAgeClassificationStepTemplateResponses,
10
21
  InvokeAgeClassificationStepTemplateErrors,
22
+ InvokeChatCompletionStepTemplateData,
23
+ InvokeChatCompletionStepTemplateResponses,
24
+ InvokeChatCompletionStepTemplateErrors,
11
25
  InvokeComfyStepTemplateData,
12
26
  InvokeComfyStepTemplateResponses,
13
27
  InvokeComfyStepTemplateErrors,
28
+ InvokeConvertImageStepTemplateData,
29
+ InvokeConvertImageStepTemplateResponses,
30
+ InvokeConvertImageStepTemplateErrors,
14
31
  InvokeEchoStepTemplateData,
15
32
  InvokeEchoStepTemplateResponses,
16
33
  InvokeEchoStepTemplateErrors,
@@ -23,15 +40,45 @@ import type {
23
40
  InvokeImageUploadStepTemplateData,
24
41
  InvokeImageUploadStepTemplateResponses,
25
42
  InvokeImageUploadStepTemplateErrors,
43
+ InvokeImageUpscalerStepTemplateData,
44
+ InvokeImageUpscalerStepTemplateResponses,
45
+ InvokeImageUpscalerStepTemplateErrors,
46
+ InvokeMediaHashStepTemplateData,
47
+ InvokeMediaHashStepTemplateResponses,
48
+ InvokeMediaHashStepTemplateErrors,
49
+ InvokeMediaRatingStepTemplateData,
50
+ InvokeMediaRatingStepTemplateResponses,
51
+ InvokeMediaRatingStepTemplateErrors,
52
+ InvokePreprocessImageStepTemplateData,
53
+ InvokePreprocessImageStepTemplateResponses,
54
+ InvokePreprocessImageStepTemplateErrors,
26
55
  InvokeTextToImageStepTemplateData,
27
56
  InvokeTextToImageStepTemplateResponses,
28
57
  InvokeTextToImageStepTemplateErrors,
58
+ InvokeTrainingStepTemplateData,
59
+ InvokeTrainingStepTemplateResponses,
60
+ InvokeTrainingStepTemplateErrors,
29
61
  InvokeVideoEnhancementStepTemplateData,
30
62
  InvokeVideoEnhancementStepTemplateResponses,
31
63
  InvokeVideoEnhancementStepTemplateErrors,
64
+ InvokeVideoFrameExtractionStepTemplateData,
65
+ InvokeVideoFrameExtractionStepTemplateResponses,
66
+ InvokeVideoFrameExtractionStepTemplateErrors,
32
67
  InvokeVideoGenStepTemplateData,
33
68
  InvokeVideoGenStepTemplateResponses,
34
69
  InvokeVideoGenStepTemplateErrors,
70
+ InvokeVideoInterpolationStepTemplateData,
71
+ InvokeVideoInterpolationStepTemplateResponses,
72
+ InvokeVideoInterpolationStepTemplateErrors,
73
+ InvokeVideoMetadataStepTemplateData,
74
+ InvokeVideoMetadataStepTemplateResponses,
75
+ InvokeVideoMetadataStepTemplateErrors,
76
+ InvokeVideoUpscalerStepTemplateData,
77
+ InvokeVideoUpscalerStepTemplateResponses,
78
+ InvokeVideoUpscalerStepTemplateErrors,
79
+ InvokeWdTaggingStepTemplateData,
80
+ InvokeWdTaggingStepTemplateResponses,
81
+ InvokeWdTaggingStepTemplateErrors,
35
82
  InvalidateResourceData,
36
83
  InvalidateResourceResponses,
37
84
  InvalidateResourceErrors,
@@ -92,7 +139,7 @@ export type Options<
92
139
  meta?: Record<string, unknown>;
93
140
  };
94
141
  /**
95
- * Get blob by ID. This will return the blob as a binary stream.
142
+ * Get blob by ID. This will redirect to a cacheable content URL.
96
143
  */
97
144
  export declare const getBlob: <ThrowOnError extends boolean = false>(
98
145
  options: Options<GetBlobData, ThrowOnError>
@@ -103,6 +150,47 @@ export declare const getBlob: <ThrowOnError extends boolean = false>(
103
150
  export declare const headBlob: <ThrowOnError extends boolean = false>(
104
151
  options: Options<HeadBlobData, ThrowOnError>
105
152
  ) => import('./client').RequestResult<HeadBlobResponses, HeadBlobErrors, ThrowOnError, 'fields'>;
153
+ /**
154
+ * Get a presigned URL for browser uploads.
155
+ * The returned URL points to POST /v2/consumer/blobs with a signature for authentication.
156
+ */
157
+ export declare const getConsumerBlobUploadUrl: <ThrowOnError extends boolean = false>(
158
+ options?: Options<GetConsumerBlobUploadUrlData, ThrowOnError>
159
+ ) => import('./client').RequestResult<
160
+ GetConsumerBlobUploadUrlResponses,
161
+ GetConsumerBlobUploadUrlErrors,
162
+ ThrowOnError,
163
+ 'fields'
164
+ >;
165
+ /**
166
+ * Upload a blob directly with on-the-fly moderation.
167
+ * Supports both authenticated requests (Bearer token) and presigned URLs (sig query param).
168
+ */
169
+ export declare const uploadConsumerBlob: <ThrowOnError extends boolean = false>(
170
+ options?: Options<UploadConsumerBlobData, ThrowOnError>
171
+ ) => import('./client').RequestResult<
172
+ UploadConsumerBlobResponses,
173
+ UploadConsumerBlobErrors,
174
+ ThrowOnError,
175
+ 'fields'
176
+ >;
177
+ /**
178
+ * Serves cacheable blob content using a deterministic encrypted token
179
+ */
180
+ export declare const getBlobContent: <ThrowOnError extends boolean = false>(
181
+ options: Options<GetBlobContentData, ThrowOnError>
182
+ ) => import('./client').RequestResult<
183
+ GetBlobContentResponses,
184
+ GetBlobContentErrors,
185
+ ThrowOnError,
186
+ 'fields'
187
+ >;
188
+ /**
189
+ * Serves a blocked placeholder image with the specified dimensions
190
+ */
191
+ export declare const getBlockedContent: <ThrowOnError extends boolean = false>(
192
+ options: Options<GetBlockedContentData, ThrowOnError>
193
+ ) => import('./client').RequestResult<unknown, GetBlockedContentErrors, ThrowOnError, 'fields'>;
106
194
  /**
107
195
  * Age classification
108
196
  * Detects minors in media content. Returns a boolean value indicating whether the content contains minors as well as details on where minors are detected.
@@ -115,6 +203,19 @@ export declare const invokeAgeClassificationStepTemplate: <ThrowOnError extends
115
203
  ThrowOnError,
116
204
  'fields'
117
205
  >;
206
+ /**
207
+ * ChatCompletion
208
+ * Generate chat completions using language models with support for text and image inputs.
209
+ * /// Compatible with OpenAI Chat Completions API format.
210
+ */
211
+ export declare const invokeChatCompletionStepTemplate: <ThrowOnError extends boolean = false>(
212
+ options?: Options<InvokeChatCompletionStepTemplateData, ThrowOnError>
213
+ ) => import('./client').RequestResult<
214
+ InvokeChatCompletionStepTemplateResponses,
215
+ InvokeChatCompletionStepTemplateErrors,
216
+ ThrowOnError,
217
+ 'fields'
218
+ >;
118
219
  /**
119
220
  * Comfy workflows
120
221
  * Runs a comfy workflow. Currently there are limited nodes available. Contact support for more information.
@@ -127,6 +228,17 @@ export declare const invokeComfyStepTemplate: <ThrowOnError extends boolean = fa
127
228
  ThrowOnError,
128
229
  'fields'
129
230
  >;
231
+ /**
232
+ * A workflow step that converts images to different formats and applies optional transforms.
233
+ */
234
+ export declare const invokeConvertImageStepTemplate: <ThrowOnError extends boolean = false>(
235
+ options?: Options<InvokeConvertImageStepTemplateData, ThrowOnError>
236
+ ) => import('./client').RequestResult<
237
+ InvokeConvertImageStepTemplateResponses,
238
+ InvokeConvertImageStepTemplateErrors,
239
+ ThrowOnError,
240
+ 'fields'
241
+ >;
130
242
  /**
131
243
  * Echo
132
244
  * A workflow step that takes a message string and retuns it.
@@ -178,6 +290,46 @@ export declare const invokeImageUploadStepTemplate: <ThrowOnError extends boolea
178
290
  ThrowOnError,
179
291
  'fields'
180
292
  >;
293
+ export declare const invokeImageUpscalerStepTemplate: <ThrowOnError extends boolean = false>(
294
+ options?: Options<InvokeImageUpscalerStepTemplateData, ThrowOnError>
295
+ ) => import('./client').RequestResult<
296
+ InvokeImageUpscalerStepTemplateResponses,
297
+ InvokeImageUpscalerStepTemplateErrors,
298
+ ThrowOnError,
299
+ 'fields'
300
+ >;
301
+ /**
302
+ * MediaHash
303
+ * Generates perceptual hashes for media content to enable similarity detection and duplicate identification.
304
+ */
305
+ export declare const invokeMediaHashStepTemplate: <ThrowOnError extends boolean = false>(
306
+ options?: Options<InvokeMediaHashStepTemplateData, ThrowOnError>
307
+ ) => import('./client').RequestResult<
308
+ InvokeMediaHashStepTemplateResponses,
309
+ InvokeMediaHashStepTemplateErrors,
310
+ ThrowOnError,
311
+ 'fields'
312
+ >;
313
+ /**
314
+ * MediaRating
315
+ * Performs NSFW level detection and content safety classification on media content.
316
+ */
317
+ export declare const invokeMediaRatingStepTemplate: <ThrowOnError extends boolean = false>(
318
+ options?: Options<InvokeMediaRatingStepTemplateData, ThrowOnError>
319
+ ) => import('./client').RequestResult<
320
+ InvokeMediaRatingStepTemplateResponses,
321
+ InvokeMediaRatingStepTemplateErrors,
322
+ ThrowOnError,
323
+ 'fields'
324
+ >;
325
+ export declare const invokePreprocessImageStepTemplate: <ThrowOnError extends boolean = false>(
326
+ options?: Options<InvokePreprocessImageStepTemplateData, ThrowOnError>
327
+ ) => import('./client').RequestResult<
328
+ InvokePreprocessImageStepTemplateResponses,
329
+ InvokePreprocessImageStepTemplateErrors,
330
+ ThrowOnError,
331
+ 'fields'
332
+ >;
181
333
  /**
182
334
  * TextToImage
183
335
  * Generate images using text as input
@@ -190,6 +342,20 @@ export declare const invokeTextToImageStepTemplate: <ThrowOnError extends boolea
190
342
  ThrowOnError,
191
343
  'fields'
192
344
  >;
345
+ /**
346
+ * Training
347
+ * A workflow step for training machine learning models (LoRAs, checkpoints, etc.)
348
+ * /// on various types of data (images, videos, audio). This replaces ImageResourceTraining
349
+ * /// with a cleaner architecture that creates one job per epoch instead of a single monolithic job.
350
+ */
351
+ export declare const invokeTrainingStepTemplate: <ThrowOnError extends boolean = false>(
352
+ options?: Options<InvokeTrainingStepTemplateData, ThrowOnError>
353
+ ) => import('./client').RequestResult<
354
+ InvokeTrainingStepTemplateResponses,
355
+ InvokeTrainingStepTemplateErrors,
356
+ ThrowOnError,
357
+ 'fields'
358
+ >;
193
359
  /**
194
360
  * Upscale videos and/or interpolate frames
195
361
  */
@@ -201,6 +367,19 @@ export declare const invokeVideoEnhancementStepTemplate: <ThrowOnError extends b
201
367
  ThrowOnError,
202
368
  'fields'
203
369
  >;
370
+ /**
371
+ * Video Frame Extraction
372
+ * Extracts unique frames from a video at a specified rate using perceptual hashing to filter out duplicate/similar frames.
373
+ * /// Useful for generating video previews, storyboards, or analyzing video content.
374
+ */
375
+ export declare const invokeVideoFrameExtractionStepTemplate: <ThrowOnError extends boolean = false>(
376
+ options?: Options<InvokeVideoFrameExtractionStepTemplateData, ThrowOnError>
377
+ ) => import('./client').RequestResult<
378
+ InvokeVideoFrameExtractionStepTemplateResponses,
379
+ InvokeVideoFrameExtractionStepTemplateErrors,
380
+ ThrowOnError,
381
+ 'fields'
382
+ >;
204
383
  /**
205
384
  * Video generation
206
385
  * Generate videos through text/image inputs using any of our supported engines
@@ -213,6 +392,51 @@ export declare const invokeVideoGenStepTemplate: <ThrowOnError extends boolean =
213
392
  ThrowOnError,
214
393
  'fields'
215
394
  >;
395
+ /**
396
+ * Interpolate videos using VFI Mamba
397
+ */
398
+ export declare const invokeVideoInterpolationStepTemplate: <ThrowOnError extends boolean = false>(
399
+ options?: Options<InvokeVideoInterpolationStepTemplateData, ThrowOnError>
400
+ ) => import('./client').RequestResult<
401
+ InvokeVideoInterpolationStepTemplateResponses,
402
+ InvokeVideoInterpolationStepTemplateErrors,
403
+ ThrowOnError,
404
+ 'fields'
405
+ >;
406
+ /**
407
+ * Extract metadata from videos including width, height, FPS, and duration
408
+ */
409
+ export declare const invokeVideoMetadataStepTemplate: <ThrowOnError extends boolean = false>(
410
+ options?: Options<InvokeVideoMetadataStepTemplateData, ThrowOnError>
411
+ ) => import('./client').RequestResult<
412
+ InvokeVideoMetadataStepTemplateResponses,
413
+ InvokeVideoMetadataStepTemplateErrors,
414
+ ThrowOnError,
415
+ 'fields'
416
+ >;
417
+ /**
418
+ * Upscale videos using FlashVSR
419
+ */
420
+ export declare const invokeVideoUpscalerStepTemplate: <ThrowOnError extends boolean = false>(
421
+ options?: Options<InvokeVideoUpscalerStepTemplateData, ThrowOnError>
422
+ ) => import('./client').RequestResult<
423
+ InvokeVideoUpscalerStepTemplateResponses,
424
+ InvokeVideoUpscalerStepTemplateErrors,
425
+ ThrowOnError,
426
+ 'fields'
427
+ >;
428
+ /**
429
+ * WDTagging
430
+ * Performs Waifu Diffusion tagging on media content to identify characteristics, objects, and themes.
431
+ */
432
+ export declare const invokeWdTaggingStepTemplate: <ThrowOnError extends boolean = false>(
433
+ options?: Options<InvokeWdTaggingStepTemplateData, ThrowOnError>
434
+ ) => import('./client').RequestResult<
435
+ InvokeWdTaggingStepTemplateResponses,
436
+ InvokeWdTaggingStepTemplateErrors,
437
+ ThrowOnError,
438
+ 'fields'
439
+ >;
216
440
  /**
217
441
  * Invalidates the cache of a specific resource.
218
442
  */
@@ -1,7 +1,7 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
2
  import { client as _heyApiClient } from './client.gen';
3
3
  /**
4
- * Get blob by ID. This will return the blob as a binary stream.
4
+ * Get blob by ID. This will redirect to a cacheable content URL.
5
5
  */
6
6
  export const getBlob = (options) => {
7
7
  var _a;
@@ -32,6 +32,72 @@ export const headBlob = (options) => {
32
32
  ...options,
33
33
  });
34
34
  };
35
+ /**
36
+ * Get a presigned URL for browser uploads.
37
+ * The returned URL points to POST /v2/consumer/blobs with a signature for authentication.
38
+ */
39
+ export const getConsumerBlobUploadUrl = (options) => {
40
+ var _a;
41
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).get({
42
+ security: [
43
+ {
44
+ scheme: 'bearer',
45
+ type: 'http',
46
+ },
47
+ ],
48
+ url: '/v2/consumer/blobs/upload',
49
+ ...options,
50
+ });
51
+ };
52
+ /**
53
+ * Upload a blob directly with on-the-fly moderation.
54
+ * Supports both authenticated requests (Bearer token) and presigned URLs (sig query param).
55
+ */
56
+ export const uploadConsumerBlob = (options) => {
57
+ var _a;
58
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
59
+ security: [
60
+ {
61
+ scheme: 'bearer',
62
+ type: 'http',
63
+ },
64
+ ],
65
+ url: '/v2/consumer/blobs',
66
+ ...options,
67
+ });
68
+ };
69
+ /**
70
+ * Serves cacheable blob content using a deterministic encrypted token
71
+ */
72
+ export const getBlobContent = (options) => {
73
+ var _a;
74
+ return ((_a = options.client) !== null && _a !== void 0 ? _a : _heyApiClient).get({
75
+ security: [
76
+ {
77
+ scheme: 'bearer',
78
+ type: 'http',
79
+ },
80
+ ],
81
+ url: '/v2/consumer/blobs/content/{encryptedToken}',
82
+ ...options,
83
+ });
84
+ };
85
+ /**
86
+ * Serves a blocked placeholder image with the specified dimensions
87
+ */
88
+ export const getBlockedContent = (options) => {
89
+ var _a;
90
+ return ((_a = options.client) !== null && _a !== void 0 ? _a : _heyApiClient).get({
91
+ security: [
92
+ {
93
+ scheme: 'bearer',
94
+ type: 'http',
95
+ },
96
+ ],
97
+ url: '/v2/consumer/blobs/blocked/{encryptedToken}',
98
+ ...options,
99
+ });
100
+ };
35
101
  /**
36
102
  * Age classification
37
103
  * Detects minors in media content. Returns a boolean value indicating whether the content contains minors as well as details on where minors are detected.
@@ -53,6 +119,28 @@ export const invokeAgeClassificationStepTemplate = (options) => {
53
119
  },
54
120
  });
55
121
  };
122
+ /**
123
+ * ChatCompletion
124
+ * Generate chat completions using language models with support for text and image inputs.
125
+ * /// Compatible with OpenAI Chat Completions API format.
126
+ */
127
+ export const invokeChatCompletionStepTemplate = (options) => {
128
+ var _a;
129
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
130
+ security: [
131
+ {
132
+ scheme: 'bearer',
133
+ type: 'http',
134
+ },
135
+ ],
136
+ url: '/v2/consumer/recipes/chatCompletion',
137
+ ...options,
138
+ headers: {
139
+ 'Content-Type': 'application/json',
140
+ ...options === null || options === void 0 ? void 0 : options.headers,
141
+ },
142
+ });
143
+ };
56
144
  /**
57
145
  * Comfy workflows
58
146
  * Runs a comfy workflow. Currently there are limited nodes available. Contact support for more information.
@@ -74,6 +162,26 @@ export const invokeComfyStepTemplate = (options) => {
74
162
  },
75
163
  });
76
164
  };
165
+ /**
166
+ * A workflow step that converts images to different formats and applies optional transforms.
167
+ */
168
+ export const invokeConvertImageStepTemplate = (options) => {
169
+ var _a;
170
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
171
+ security: [
172
+ {
173
+ scheme: 'bearer',
174
+ type: 'http',
175
+ },
176
+ ],
177
+ url: '/v2/consumer/recipes/convertImage',
178
+ ...options,
179
+ headers: {
180
+ 'Content-Type': 'application/json',
181
+ ...options === null || options === void 0 ? void 0 : options.headers,
182
+ },
183
+ });
184
+ };
77
185
  /**
78
186
  * Echo
79
187
  * A workflow step that takes a message string and retuns it.
@@ -159,6 +267,82 @@ export const invokeImageUploadStepTemplate = (options) => {
159
267
  },
160
268
  });
161
269
  };
270
+ export const invokeImageUpscalerStepTemplate = (options) => {
271
+ var _a;
272
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
273
+ security: [
274
+ {
275
+ scheme: 'bearer',
276
+ type: 'http',
277
+ },
278
+ ],
279
+ url: '/v2/consumer/recipes/imageUpscaler',
280
+ ...options,
281
+ headers: {
282
+ 'Content-Type': 'application/json',
283
+ ...options === null || options === void 0 ? void 0 : options.headers,
284
+ },
285
+ });
286
+ };
287
+ /**
288
+ * MediaHash
289
+ * Generates perceptual hashes for media content to enable similarity detection and duplicate identification.
290
+ */
291
+ export const invokeMediaHashStepTemplate = (options) => {
292
+ var _a;
293
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
294
+ security: [
295
+ {
296
+ scheme: 'bearer',
297
+ type: 'http',
298
+ },
299
+ ],
300
+ url: '/v2/consumer/recipes/mediaHash',
301
+ ...options,
302
+ headers: {
303
+ 'Content-Type': 'application/json',
304
+ ...options === null || options === void 0 ? void 0 : options.headers,
305
+ },
306
+ });
307
+ };
308
+ /**
309
+ * MediaRating
310
+ * Performs NSFW level detection and content safety classification on media content.
311
+ */
312
+ export const invokeMediaRatingStepTemplate = (options) => {
313
+ var _a;
314
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
315
+ security: [
316
+ {
317
+ scheme: 'bearer',
318
+ type: 'http',
319
+ },
320
+ ],
321
+ url: '/v2/consumer/recipes/mediaRating',
322
+ ...options,
323
+ headers: {
324
+ 'Content-Type': 'application/json',
325
+ ...options === null || options === void 0 ? void 0 : options.headers,
326
+ },
327
+ });
328
+ };
329
+ export const invokePreprocessImageStepTemplate = (options) => {
330
+ var _a;
331
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
332
+ security: [
333
+ {
334
+ scheme: 'bearer',
335
+ type: 'http',
336
+ },
337
+ ],
338
+ url: '/v2/consumer/recipes/preprocessImage',
339
+ ...options,
340
+ headers: {
341
+ 'Content-Type': 'application/json',
342
+ ...options === null || options === void 0 ? void 0 : options.headers,
343
+ },
344
+ });
345
+ };
162
346
  /**
163
347
  * TextToImage
164
348
  * Generate images using text as input
@@ -180,6 +364,29 @@ export const invokeTextToImageStepTemplate = (options) => {
180
364
  },
181
365
  });
182
366
  };
367
+ /**
368
+ * Training
369
+ * A workflow step for training machine learning models (LoRAs, checkpoints, etc.)
370
+ * /// on various types of data (images, videos, audio). This replaces ImageResourceTraining
371
+ * /// with a cleaner architecture that creates one job per epoch instead of a single monolithic job.
372
+ */
373
+ export const invokeTrainingStepTemplate = (options) => {
374
+ var _a;
375
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
376
+ security: [
377
+ {
378
+ scheme: 'bearer',
379
+ type: 'http',
380
+ },
381
+ ],
382
+ url: '/v2/consumer/recipes/training',
383
+ ...options,
384
+ headers: {
385
+ 'Content-Type': 'application/json',
386
+ ...options === null || options === void 0 ? void 0 : options.headers,
387
+ },
388
+ });
389
+ };
183
390
  /**
184
391
  * Upscale videos and/or interpolate frames
185
392
  */
@@ -200,6 +407,28 @@ export const invokeVideoEnhancementStepTemplate = (options) => {
200
407
  },
201
408
  });
202
409
  };
410
+ /**
411
+ * Video Frame Extraction
412
+ * Extracts unique frames from a video at a specified rate using perceptual hashing to filter out duplicate/similar frames.
413
+ * /// Useful for generating video previews, storyboards, or analyzing video content.
414
+ */
415
+ export const invokeVideoFrameExtractionStepTemplate = (options) => {
416
+ var _a;
417
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
418
+ security: [
419
+ {
420
+ scheme: 'bearer',
421
+ type: 'http',
422
+ },
423
+ ],
424
+ url: '/v2/consumer/recipes/videoFrameExtraction',
425
+ ...options,
426
+ headers: {
427
+ 'Content-Type': 'application/json',
428
+ ...options === null || options === void 0 ? void 0 : options.headers,
429
+ },
430
+ });
431
+ };
203
432
  /**
204
433
  * Video generation
205
434
  * Generate videos through text/image inputs using any of our supported engines
@@ -221,6 +450,87 @@ export const invokeVideoGenStepTemplate = (options) => {
221
450
  },
222
451
  });
223
452
  };
453
+ /**
454
+ * Interpolate videos using VFI Mamba
455
+ */
456
+ export const invokeVideoInterpolationStepTemplate = (options) => {
457
+ var _a;
458
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
459
+ security: [
460
+ {
461
+ scheme: 'bearer',
462
+ type: 'http',
463
+ },
464
+ ],
465
+ url: '/v2/consumer/recipes/videoInterpolation',
466
+ ...options,
467
+ headers: {
468
+ 'Content-Type': 'application/json',
469
+ ...options === null || options === void 0 ? void 0 : options.headers,
470
+ },
471
+ });
472
+ };
473
+ /**
474
+ * Extract metadata from videos including width, height, FPS, and duration
475
+ */
476
+ export const invokeVideoMetadataStepTemplate = (options) => {
477
+ var _a;
478
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
479
+ security: [
480
+ {
481
+ scheme: 'bearer',
482
+ type: 'http',
483
+ },
484
+ ],
485
+ url: '/v2/consumer/recipes/videoMetadata',
486
+ ...options,
487
+ headers: {
488
+ 'Content-Type': 'application/json',
489
+ ...options === null || options === void 0 ? void 0 : options.headers,
490
+ },
491
+ });
492
+ };
493
+ /**
494
+ * Upscale videos using FlashVSR
495
+ */
496
+ export const invokeVideoUpscalerStepTemplate = (options) => {
497
+ var _a;
498
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
499
+ security: [
500
+ {
501
+ scheme: 'bearer',
502
+ type: 'http',
503
+ },
504
+ ],
505
+ url: '/v2/consumer/recipes/videoUpscaler',
506
+ ...options,
507
+ headers: {
508
+ 'Content-Type': 'application/json',
509
+ ...options === null || options === void 0 ? void 0 : options.headers,
510
+ },
511
+ });
512
+ };
513
+ /**
514
+ * WDTagging
515
+ * Performs Waifu Diffusion tagging on media content to identify characteristics, objects, and themes.
516
+ */
517
+ export const invokeWdTaggingStepTemplate = (options) => {
518
+ var _a;
519
+ return ((_a = options === null || options === void 0 ? void 0 : options.client) !== null && _a !== void 0 ? _a : _heyApiClient).post({
520
+ security: [
521
+ {
522
+ scheme: 'bearer',
523
+ type: 'http',
524
+ },
525
+ ],
526
+ url: '/v2/consumer/recipes/wdTagging',
527
+ ...options,
528
+ headers: {
529
+ 'Content-Type': 'application/json',
530
+ ...options === null || options === void 0 ? void 0 : options.headers,
531
+ },
532
+ });
533
+ };
224
534
  /**
225
535
  * Invalidates the cache of a specific resource.
226
536
  */