@genfeedai/types 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-7NJUD2WZ.mjs +1 -0
- package/dist/chunk-KXAKQO3U.js +11 -0
- package/dist/chunk-OGJX4J7H.mjs +808 -0
- package/dist/chunk-P7K5LM6V.js +825 -0
- package/dist/chunk-RNGYPX4W.js +2 -0
- package/dist/chunk-WT2F5CAF.mjs +9 -0
- package/dist/comfyui.d.mts +91 -0
- package/dist/comfyui.d.ts +91 -0
- package/dist/comfyui.js +4 -0
- package/dist/comfyui.mjs +1 -0
- package/dist/index.d.mts +268 -0
- package/dist/index.d.ts +268 -0
- package/dist/index.js +185 -0
- package/dist/index.mjs +109 -0
- package/dist/nodes.d.mts +19 -0
- package/dist/nodes.d.ts +19 -0
- package/dist/nodes.js +54 -0
- package/dist/nodes.mjs +1 -0
- package/dist/replicate.d.mts +1092 -0
- package/dist/replicate.d.ts +1092 -0
- package/dist/replicate.js +2 -0
- package/dist/replicate.mjs +1 -0
- package/dist/union-CtYAY8Mv.d.mts +541 -0
- package/dist/union-CtYAY8Mv.d.ts +541 -0
- package/dist/workflow-CGpVuRPg.d.mts +99 -0
- package/dist/workflow-Di5vTrXa.d.ts +99 -0
- package/dist/workflow.d.mts +3 -0
- package/dist/workflow.d.ts +3 -0
- package/dist/workflow.js +10 -0
- package/dist/workflow.mjs +1 -0
- package/package.json +60 -0
- package/src/replicate/schemas.json +6636 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,541 @@
|
|
|
1
|
+
import { Node, Edge } from '@xyflow/react';
|
|
2
|
+
|
|
3
|
+
declare enum TemplateCategory {
|
|
4
|
+
IMAGE = "image",
|
|
5
|
+
VIDEO = "video",
|
|
6
|
+
AUDIO = "audio",
|
|
7
|
+
FULL_PIPELINE = "full-pipeline"
|
|
8
|
+
}
|
|
9
|
+
/**
|
|
10
|
+
* Reframe node types (Luma)
|
|
11
|
+
*/
|
|
12
|
+
declare enum ReframeNodeType {
|
|
13
|
+
REFRAME = "reframe",
|
|
14
|
+
LUMA_REFRAME_IMAGE = "lumaReframeImage",
|
|
15
|
+
LUMA_REFRAME_VIDEO = "lumaReframeVideo"
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* Upscale node types (Topaz)
|
|
19
|
+
*/
|
|
20
|
+
declare enum UpscaleNodeType {
|
|
21
|
+
UPSCALE = "upscale",
|
|
22
|
+
TOPAZ_IMAGE_UPSCALE = "topazImageUpscale",
|
|
23
|
+
TOPAZ_VIDEO_UPSCALE = "topazVideoUpscale"
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Processing node types - all nodes handled by the processing processor
|
|
27
|
+
*/
|
|
28
|
+
declare enum KlingQuality {
|
|
29
|
+
STANDARD = "std",
|
|
30
|
+
PRO = "pro"
|
|
31
|
+
}
|
|
32
|
+
declare enum ProcessingNodeType {
|
|
33
|
+
REFRAME = "reframe",
|
|
34
|
+
LUMA_REFRAME_IMAGE = "lumaReframeImage",
|
|
35
|
+
LUMA_REFRAME_VIDEO = "lumaReframeVideo",
|
|
36
|
+
UPSCALE = "upscale",
|
|
37
|
+
TOPAZ_IMAGE_UPSCALE = "topazImageUpscale",
|
|
38
|
+
TOPAZ_VIDEO_UPSCALE = "topazVideoUpscale",
|
|
39
|
+
VIDEO_FRAME_EXTRACT = "videoFrameExtract",
|
|
40
|
+
LIP_SYNC = "lipSync",
|
|
41
|
+
TEXT_TO_SPEECH = "textToSpeech",
|
|
42
|
+
VOICE_CHANGE = "voiceChange",
|
|
43
|
+
SUBTITLE = "subtitle",
|
|
44
|
+
VIDEO_STITCH = "videoStitch",
|
|
45
|
+
WORKFLOW_REF = "workflowRef"
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
declare enum HandleTypeEnum {
|
|
49
|
+
IMAGE = "image",
|
|
50
|
+
TEXT = "text",
|
|
51
|
+
VIDEO = "video",
|
|
52
|
+
NUMBER = "number",
|
|
53
|
+
AUDIO = "audio"
|
|
54
|
+
}
|
|
55
|
+
type HandleType = `${HandleTypeEnum}`;
|
|
56
|
+
interface HandleDefinition {
|
|
57
|
+
id: string;
|
|
58
|
+
type: HandleType;
|
|
59
|
+
label: string;
|
|
60
|
+
multiple?: boolean;
|
|
61
|
+
required?: boolean;
|
|
62
|
+
/** True if handle was dynamically generated from model schema */
|
|
63
|
+
fromSchema?: boolean;
|
|
64
|
+
}
|
|
65
|
+
declare const CONNECTION_RULES: Record<HandleType, HandleType[]>;
|
|
66
|
+
|
|
67
|
+
declare enum ProviderTypeEnum {
|
|
68
|
+
REPLICATE = "replicate",
|
|
69
|
+
FAL = "fal",
|
|
70
|
+
HUGGINGFACE = "huggingface",
|
|
71
|
+
GENFEED_AI = "genfeed-ai"
|
|
72
|
+
}
|
|
73
|
+
type ProviderType = `${ProviderTypeEnum}`;
|
|
74
|
+
declare enum ModelCapabilityEnum {
|
|
75
|
+
TEXT_TO_IMAGE = "text-to-image",
|
|
76
|
+
IMAGE_TO_IMAGE = "image-to-image",
|
|
77
|
+
TEXT_TO_VIDEO = "text-to-video",
|
|
78
|
+
IMAGE_TO_VIDEO = "image-to-video",
|
|
79
|
+
TEXT_GENERATION = "text-generation"
|
|
80
|
+
}
|
|
81
|
+
type ModelCapability = `${ModelCapabilityEnum}`;
|
|
82
|
+
declare enum ModelUseCaseEnum {
|
|
83
|
+
STYLE_TRANSFER = "style-transfer",
|
|
84
|
+
CHARACTER_CONSISTENT = "character-consistent",
|
|
85
|
+
IMAGE_VARIATION = "image-variation",
|
|
86
|
+
INPAINTING = "inpainting",
|
|
87
|
+
UPSCALE = "upscale",
|
|
88
|
+
GENERAL = "general"
|
|
89
|
+
}
|
|
90
|
+
type ModelUseCase = `${ModelUseCaseEnum}`;
|
|
91
|
+
interface ProviderModel {
|
|
92
|
+
id: string;
|
|
93
|
+
displayName: string;
|
|
94
|
+
provider: ProviderType;
|
|
95
|
+
capabilities: ModelCapability[];
|
|
96
|
+
description?: string;
|
|
97
|
+
thumbnail?: string;
|
|
98
|
+
pricing?: string;
|
|
99
|
+
inputSchema?: Record<string, unknown>;
|
|
100
|
+
/** Component schemas containing enum definitions (aspect_ratio, duration, etc.) */
|
|
101
|
+
componentSchemas?: Record<string, unknown>;
|
|
102
|
+
/** Use cases this model supports (style transfer, upscale, etc.) */
|
|
103
|
+
useCases?: ModelUseCase[];
|
|
104
|
+
}
|
|
105
|
+
interface SelectedModel {
|
|
106
|
+
/** Alias for modelId - used by hooks that expect id */
|
|
107
|
+
id?: string;
|
|
108
|
+
provider: ProviderType;
|
|
109
|
+
modelId: string;
|
|
110
|
+
displayName: string;
|
|
111
|
+
inputSchema?: Record<string, unknown>;
|
|
112
|
+
/** Component schemas containing enum definitions (aspect_ratio, duration, etc.) */
|
|
113
|
+
componentSchemas?: Record<string, unknown>;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
declare enum NodeTypeEnum {
|
|
117
|
+
IMAGE_INPUT = "imageInput",
|
|
118
|
+
AUDIO_INPUT = "audioInput",
|
|
119
|
+
VIDEO_INPUT = "videoInput",
|
|
120
|
+
PROMPT = "prompt",
|
|
121
|
+
PROMPT_CONSTRUCTOR = "promptConstructor",
|
|
122
|
+
IMAGE_GEN = "imageGen",
|
|
123
|
+
VIDEO_GEN = "videoGen",
|
|
124
|
+
LLM = "llm",
|
|
125
|
+
LIP_SYNC = "lipSync",
|
|
126
|
+
VOICE_CHANGE = "voiceChange",
|
|
127
|
+
TEXT_TO_SPEECH = "textToSpeech",
|
|
128
|
+
TRANSCRIBE = "transcribe",
|
|
129
|
+
MOTION_CONTROL = "motionControl",
|
|
130
|
+
RESIZE = "resize",
|
|
131
|
+
ANIMATION = "animation",
|
|
132
|
+
VIDEO_STITCH = "videoStitch",
|
|
133
|
+
VIDEO_TRIM = "videoTrim",
|
|
134
|
+
VIDEO_FRAME_EXTRACT = "videoFrameExtract",
|
|
135
|
+
REFRAME = "reframe",
|
|
136
|
+
UPSCALE = "upscale",
|
|
137
|
+
IMAGE_GRID_SPLIT = "imageGridSplit",
|
|
138
|
+
ANNOTATION = "annotation",
|
|
139
|
+
SUBTITLE = "subtitle",
|
|
140
|
+
OUTPUT_GALLERY = "outputGallery",
|
|
141
|
+
IMAGE_COMPARE = "imageCompare",
|
|
142
|
+
DOWNLOAD = "download",
|
|
143
|
+
WORKFLOW_INPUT = "workflowInput",
|
|
144
|
+
WORKFLOW_OUTPUT = "workflowOutput",
|
|
145
|
+
WORKFLOW_REF = "workflowRef"
|
|
146
|
+
}
|
|
147
|
+
type NodeType = `${NodeTypeEnum}`;
|
|
148
|
+
declare enum NodeCategoryEnum {
|
|
149
|
+
INPUT = "input",
|
|
150
|
+
AI = "ai",
|
|
151
|
+
PROCESSING = "processing",
|
|
152
|
+
OUTPUT = "output",
|
|
153
|
+
COMPOSITION = "composition"
|
|
154
|
+
}
|
|
155
|
+
type NodeCategory = `${NodeCategoryEnum}`;
|
|
156
|
+
declare enum NodeStatusEnum {
|
|
157
|
+
IDLE = "idle",
|
|
158
|
+
PENDING = "pending",
|
|
159
|
+
PROCESSING = "processing",
|
|
160
|
+
COMPLETE = "complete",
|
|
161
|
+
ERROR = "error"
|
|
162
|
+
}
|
|
163
|
+
type NodeStatus = `${NodeStatusEnum}`;
|
|
164
|
+
interface BaseNodeData extends Record<string, unknown> {
|
|
165
|
+
label: string;
|
|
166
|
+
status: NodeStatus;
|
|
167
|
+
error?: string;
|
|
168
|
+
progress?: number;
|
|
169
|
+
isLocked?: boolean;
|
|
170
|
+
cachedOutput?: unknown;
|
|
171
|
+
lockTimestamp?: number;
|
|
172
|
+
comment?: string;
|
|
173
|
+
color?: string;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
interface ImageInputNodeData extends BaseNodeData {
|
|
177
|
+
image: string | null;
|
|
178
|
+
filename: string | null;
|
|
179
|
+
dimensions: {
|
|
180
|
+
width: number;
|
|
181
|
+
height: number;
|
|
182
|
+
} | null;
|
|
183
|
+
source: 'upload' | 'url';
|
|
184
|
+
url?: string;
|
|
185
|
+
}
|
|
186
|
+
interface PromptNodeData extends BaseNodeData {
|
|
187
|
+
prompt: string;
|
|
188
|
+
variables: Record<string, string>;
|
|
189
|
+
}
|
|
190
|
+
interface AudioInputNodeData extends BaseNodeData {
|
|
191
|
+
audio: string | null;
|
|
192
|
+
filename: string | null;
|
|
193
|
+
duration: number | null;
|
|
194
|
+
source: 'upload' | 'url';
|
|
195
|
+
url?: string;
|
|
196
|
+
}
|
|
197
|
+
interface VideoInputNodeData extends BaseNodeData {
|
|
198
|
+
video: string | null;
|
|
199
|
+
filename: string | null;
|
|
200
|
+
duration: number | null;
|
|
201
|
+
dimensions: {
|
|
202
|
+
width: number;
|
|
203
|
+
height: number;
|
|
204
|
+
} | null;
|
|
205
|
+
source: 'upload' | 'url';
|
|
206
|
+
url?: string;
|
|
207
|
+
}
|
|
208
|
+
interface AvailableVariable {
|
|
209
|
+
name: string;
|
|
210
|
+
value: string;
|
|
211
|
+
nodeId: string;
|
|
212
|
+
}
|
|
213
|
+
interface PromptConstructorNodeData extends BaseNodeData {
|
|
214
|
+
template: string;
|
|
215
|
+
outputText: string | null;
|
|
216
|
+
unresolvedVars: string[];
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
type ImageModel = 'nano-banana' | 'nano-banana-pro';
|
|
220
|
+
type VideoModel = 'veo-3.1-fast' | 'veo-3.1';
|
|
221
|
+
type AspectRatio = '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | '4:5' | '5:4' | '2:3' | '3:2' | '21:9';
|
|
222
|
+
type Resolution = '1K' | '2K' | '4K';
|
|
223
|
+
type VideoResolution = '720p' | '1080p';
|
|
224
|
+
type VideoDuration = 4 | 6 | 8;
|
|
225
|
+
type OutputFormat = 'jpg' | 'png' | 'webp';
|
|
226
|
+
type LumaAspectRatio = '1:1' | '3:4' | '4:3' | '9:16' | '16:9' | '9:21' | '21:9';
|
|
227
|
+
type LumaReframeModel = 'photon-flash-1' | 'photon-1';
|
|
228
|
+
interface GridPosition {
|
|
229
|
+
x: number;
|
|
230
|
+
y: number;
|
|
231
|
+
}
|
|
232
|
+
type TopazEnhanceModel = 'Standard V2' | 'Low Resolution V2' | 'CGI' | 'High Fidelity V2' | 'Text Refine';
|
|
233
|
+
type TopazUpscaleFactor = 'None' | '2x' | '4x' | '6x';
|
|
234
|
+
type TopazVideoResolution = '720p' | '1080p' | '4k';
|
|
235
|
+
type TopazVideoFPS = 15 | 24 | 30 | 60;
|
|
236
|
+
interface ImageGenNodeData extends BaseNodeData {
|
|
237
|
+
inputImages: string[];
|
|
238
|
+
inputPrompt: string | null;
|
|
239
|
+
outputImage: string | null;
|
|
240
|
+
outputImages: string[];
|
|
241
|
+
model: ImageModel;
|
|
242
|
+
aspectRatio: AspectRatio;
|
|
243
|
+
resolution: Resolution;
|
|
244
|
+
outputFormat: OutputFormat;
|
|
245
|
+
provider?: ProviderType;
|
|
246
|
+
selectedModel?: SelectedModel;
|
|
247
|
+
schemaParams?: Record<string, unknown>;
|
|
248
|
+
jobId: string | null;
|
|
249
|
+
}
|
|
250
|
+
interface VideoGenNodeData extends BaseNodeData {
|
|
251
|
+
inputImage: string | null;
|
|
252
|
+
lastFrame: string | null;
|
|
253
|
+
referenceImages: string[];
|
|
254
|
+
inputPrompt: string | null;
|
|
255
|
+
negativePrompt: string;
|
|
256
|
+
outputVideo: string | null;
|
|
257
|
+
model: VideoModel;
|
|
258
|
+
duration: VideoDuration;
|
|
259
|
+
aspectRatio: AspectRatio;
|
|
260
|
+
resolution: VideoResolution;
|
|
261
|
+
generateAudio: boolean;
|
|
262
|
+
provider?: ProviderType;
|
|
263
|
+
selectedModel?: SelectedModel;
|
|
264
|
+
schemaParams?: Record<string, unknown>;
|
|
265
|
+
jobId: string | null;
|
|
266
|
+
}
|
|
267
|
+
type TextModel = 'meta-llama-3.1-405b-instruct' | 'claude-4.5-sonnet';
|
|
268
|
+
interface LLMNodeData extends BaseNodeData {
|
|
269
|
+
inputPrompt: string | null;
|
|
270
|
+
outputText: string | null;
|
|
271
|
+
model: TextModel;
|
|
272
|
+
systemPrompt: string;
|
|
273
|
+
temperature: number;
|
|
274
|
+
maxTokens: number;
|
|
275
|
+
topP: number;
|
|
276
|
+
provider?: ProviderType;
|
|
277
|
+
selectedModel?: SelectedModel;
|
|
278
|
+
schemaParams?: Record<string, unknown>;
|
|
279
|
+
jobId: string | null;
|
|
280
|
+
}
|
|
281
|
+
type LipSyncModel = 'sync/lipsync-2-pro' | 'sync/lipsync-2' | 'pixverse/lipsync' | 'bytedance/omni-human' | 'veed/fabric-1.0';
|
|
282
|
+
type LipSyncMode = 'loop' | 'bounce' | 'cut_off' | 'silence' | 'remap';
|
|
283
|
+
interface LipSyncNodeData extends BaseNodeData {
|
|
284
|
+
inputImage: string | null;
|
|
285
|
+
inputVideo: string | null;
|
|
286
|
+
inputAudio: string | null;
|
|
287
|
+
outputVideo: string | null;
|
|
288
|
+
model: LipSyncModel;
|
|
289
|
+
syncMode: LipSyncMode;
|
|
290
|
+
temperature: number;
|
|
291
|
+
activeSpeaker: boolean;
|
|
292
|
+
jobId: string | null;
|
|
293
|
+
}
|
|
294
|
+
interface VoiceChangeNodeData extends BaseNodeData {
|
|
295
|
+
inputVideo: string | null;
|
|
296
|
+
inputAudio: string | null;
|
|
297
|
+
outputVideo: string | null;
|
|
298
|
+
preserveOriginalAudio: boolean;
|
|
299
|
+
audioMixLevel: number;
|
|
300
|
+
jobId: string | null;
|
|
301
|
+
}
|
|
302
|
+
type TTSProvider = 'elevenlabs' | 'openai';
|
|
303
|
+
type TTSVoice = 'rachel' | 'drew' | 'clyde' | 'paul' | 'domi' | 'dave' | 'fin' | 'sarah' | 'antoni' | 'thomas' | 'charlie' | 'george' | 'emily' | 'elli' | 'callum' | 'patrick' | 'harry' | 'liam' | 'dorothy' | 'josh' | 'arnold' | 'charlotte' | 'matilda' | 'matthew' | 'james' | 'joseph' | 'jeremy' | 'michael' | 'ethan' | 'gigi' | 'freya' | 'grace' | 'daniel' | 'lily' | 'serena' | 'adam' | 'nicole' | 'jessie' | 'ryan' | 'sam' | 'glinda' | 'giovanni' | 'mimi';
|
|
304
|
+
interface TextToSpeechNodeData extends BaseNodeData {
|
|
305
|
+
inputText: string | null;
|
|
306
|
+
outputAudio: string | null;
|
|
307
|
+
provider: TTSProvider;
|
|
308
|
+
voice: TTSVoice;
|
|
309
|
+
stability: number;
|
|
310
|
+
similarityBoost: number;
|
|
311
|
+
speed: number;
|
|
312
|
+
jobId: string | null;
|
|
313
|
+
}
|
|
314
|
+
type TranscribeLanguage = 'auto' | 'en' | 'es' | 'fr' | 'de' | 'ja' | 'zh' | 'ko' | 'pt';
|
|
315
|
+
interface TranscribeNodeData extends BaseNodeData {
|
|
316
|
+
inputVideo: string | null;
|
|
317
|
+
inputAudio: string | null;
|
|
318
|
+
outputText: string | null;
|
|
319
|
+
language: TranscribeLanguage;
|
|
320
|
+
timestamps: boolean;
|
|
321
|
+
jobId: string | null;
|
|
322
|
+
}
|
|
323
|
+
type MotionControlMode = 'trajectory' | 'camera' | 'combined' | 'video_transfer';
|
|
324
|
+
type CameraMovement = 'static' | 'pan_left' | 'pan_right' | 'pan_up' | 'pan_down' | 'zoom_in' | 'zoom_out' | 'rotate_cw' | 'rotate_ccw' | 'dolly_in' | 'dolly_out';
|
|
325
|
+
|
|
326
|
+
type KlingQualityMode = `${KlingQuality}`;
|
|
327
|
+
type CharacterOrientation = 'image' | 'video';
|
|
328
|
+
interface TrajectoryPoint {
|
|
329
|
+
x: number;
|
|
330
|
+
y: number;
|
|
331
|
+
frame: number;
|
|
332
|
+
}
|
|
333
|
+
interface MotionControlNodeData extends BaseNodeData {
|
|
334
|
+
inputImage: string | null;
|
|
335
|
+
inputVideo: string | null;
|
|
336
|
+
inputPrompt: string | null;
|
|
337
|
+
outputVideo: string | null;
|
|
338
|
+
mode: MotionControlMode;
|
|
339
|
+
duration: 5 | 10;
|
|
340
|
+
aspectRatio: '16:9' | '9:16' | '1:1';
|
|
341
|
+
trajectoryPoints: TrajectoryPoint[];
|
|
342
|
+
cameraMovement: CameraMovement;
|
|
343
|
+
cameraIntensity: number;
|
|
344
|
+
qualityMode: KlingQualityMode;
|
|
345
|
+
characterOrientation: CharacterOrientation;
|
|
346
|
+
keepOriginalSound: boolean;
|
|
347
|
+
motionStrength: number;
|
|
348
|
+
negativePrompt: string;
|
|
349
|
+
seed: number | null;
|
|
350
|
+
jobId: string | null;
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
type EasingPreset = 'linear' | 'easeIn' | 'easeOut' | 'easeInOut' | 'easeInQuad' | 'easeOutQuad' | 'easeInOutQuad' | 'easeInCubic' | 'easeOutCubic' | 'easeInOutCubic' | 'easeInExpo' | 'easeOutExpo' | 'easeInOutExpo';
|
|
354
|
+
type CubicBezier = [number, number, number, number];
|
|
355
|
+
interface AnimationNodeData extends BaseNodeData {
|
|
356
|
+
inputVideo: string | null;
|
|
357
|
+
outputVideo: string | null;
|
|
358
|
+
curveType: 'preset' | 'custom';
|
|
359
|
+
preset: EasingPreset;
|
|
360
|
+
customCurve: CubicBezier;
|
|
361
|
+
speedMultiplier: number;
|
|
362
|
+
}
|
|
363
|
+
type TransitionType = 'cut' | 'crossfade' | 'wipe' | 'fade';
|
|
364
|
+
type AudioCodec = 'aac' | 'mp3';
|
|
365
|
+
type OutputQuality = 'full' | 'draft';
|
|
366
|
+
interface VideoStitchNodeData extends BaseNodeData {
|
|
367
|
+
inputVideos: string[];
|
|
368
|
+
outputVideo: string | null;
|
|
369
|
+
transitionType: TransitionType;
|
|
370
|
+
transitionDuration: number;
|
|
371
|
+
seamlessLoop: boolean;
|
|
372
|
+
audioCodec: AudioCodec;
|
|
373
|
+
outputQuality: OutputQuality;
|
|
374
|
+
}
|
|
375
|
+
interface ResizeNodeData extends BaseNodeData {
|
|
376
|
+
inputMedia: string | null;
|
|
377
|
+
inputType: 'image' | 'video' | null;
|
|
378
|
+
outputMedia: string | null;
|
|
379
|
+
targetAspectRatio: LumaAspectRatio;
|
|
380
|
+
prompt: string;
|
|
381
|
+
gridPosition: GridPosition;
|
|
382
|
+
jobId: string | null;
|
|
383
|
+
}
|
|
384
|
+
interface VideoTrimNodeData extends BaseNodeData {
|
|
385
|
+
inputVideo: string | null;
|
|
386
|
+
outputVideo: string | null;
|
|
387
|
+
startTime: number;
|
|
388
|
+
endTime: number;
|
|
389
|
+
duration: number | null;
|
|
390
|
+
jobId: string | null;
|
|
391
|
+
}
|
|
392
|
+
type FrameSelectionMode = 'first' | 'last' | 'timestamp' | 'percentage';
|
|
393
|
+
interface VideoFrameExtractNodeData extends BaseNodeData {
|
|
394
|
+
inputVideo: string | null;
|
|
395
|
+
outputImage: string | null;
|
|
396
|
+
selectionMode: FrameSelectionMode;
|
|
397
|
+
timestampSeconds: number;
|
|
398
|
+
percentagePosition: number;
|
|
399
|
+
videoDuration: number | null;
|
|
400
|
+
jobId: string | null;
|
|
401
|
+
}
|
|
402
|
+
type ReframeInputType = 'image' | 'video' | null;
|
|
403
|
+
interface ReframeNodeData extends BaseNodeData {
|
|
404
|
+
inputImage: string | null;
|
|
405
|
+
inputVideo: string | null;
|
|
406
|
+
inputType: ReframeInputType;
|
|
407
|
+
outputImage: string | null;
|
|
408
|
+
outputVideo: string | null;
|
|
409
|
+
model: LumaReframeModel;
|
|
410
|
+
aspectRatio: LumaAspectRatio;
|
|
411
|
+
prompt: string;
|
|
412
|
+
gridPosition: GridPosition;
|
|
413
|
+
jobId: string | null;
|
|
414
|
+
}
|
|
415
|
+
type UpscaleInputType = 'image' | 'video' | null;
|
|
416
|
+
type UpscaleModel = 'topaz-standard-v2' | 'topaz-low-res-v2' | 'topaz-cgi' | 'topaz-high-fidelity-v2' | 'topaz-text-refine' | 'topaz-video';
|
|
417
|
+
interface UpscaleNodeData extends BaseNodeData {
|
|
418
|
+
inputImage: string | null;
|
|
419
|
+
inputVideo: string | null;
|
|
420
|
+
inputType: UpscaleInputType;
|
|
421
|
+
outputImage: string | null;
|
|
422
|
+
outputVideo: string | null;
|
|
423
|
+
originalPreview: string | null;
|
|
424
|
+
outputPreview: string | null;
|
|
425
|
+
model: UpscaleModel;
|
|
426
|
+
upscaleFactor: TopazUpscaleFactor;
|
|
427
|
+
outputFormat: 'jpg' | 'png';
|
|
428
|
+
faceEnhancement: boolean;
|
|
429
|
+
faceEnhancementStrength: number;
|
|
430
|
+
faceEnhancementCreativity: number;
|
|
431
|
+
targetResolution: TopazVideoResolution;
|
|
432
|
+
targetFps: TopazVideoFPS;
|
|
433
|
+
showComparison: boolean;
|
|
434
|
+
comparisonPosition: number;
|
|
435
|
+
jobId: string | null;
|
|
436
|
+
}
|
|
437
|
+
type GridOutputFormat = 'jpg' | 'png' | 'webp';
|
|
438
|
+
interface ImageGridSplitNodeData extends BaseNodeData {
|
|
439
|
+
inputImage: string | null;
|
|
440
|
+
outputImages: string[];
|
|
441
|
+
gridRows: number;
|
|
442
|
+
gridCols: number;
|
|
443
|
+
borderInset: number;
|
|
444
|
+
outputFormat: GridOutputFormat;
|
|
445
|
+
quality: number;
|
|
446
|
+
}
|
|
447
|
+
interface AnnotationShapeData {
|
|
448
|
+
id: string;
|
|
449
|
+
type: 'rectangle' | 'circle' | 'arrow' | 'freehand' | 'text';
|
|
450
|
+
strokeColor: string;
|
|
451
|
+
strokeWidth: number;
|
|
452
|
+
fillColor: string | null;
|
|
453
|
+
props: Record<string, unknown>;
|
|
454
|
+
}
|
|
455
|
+
interface AnnotationNodeData extends BaseNodeData {
|
|
456
|
+
inputImage: string | null;
|
|
457
|
+
outputImage: string | null;
|
|
458
|
+
annotations: AnnotationShapeData[];
|
|
459
|
+
hasAnnotations: boolean;
|
|
460
|
+
}
|
|
461
|
+
type SubtitleStyle = 'default' | 'modern' | 'minimal' | 'bold';
|
|
462
|
+
type SubtitlePosition = 'top' | 'center' | 'bottom';
|
|
463
|
+
interface SubtitleNodeData extends BaseNodeData {
|
|
464
|
+
inputVideo: string | null;
|
|
465
|
+
inputText: string | null;
|
|
466
|
+
outputVideo: string | null;
|
|
467
|
+
style: SubtitleStyle;
|
|
468
|
+
position: SubtitlePosition;
|
|
469
|
+
fontSize: number;
|
|
470
|
+
fontColor: string;
|
|
471
|
+
backgroundColor: string | null;
|
|
472
|
+
fontFamily: string;
|
|
473
|
+
jobId: string | null;
|
|
474
|
+
}
|
|
475
|
+
interface OutputGalleryNodeData extends BaseNodeData {
|
|
476
|
+
images: string[];
|
|
477
|
+
}
|
|
478
|
+
interface ImageCompareNodeData extends BaseNodeData {
|
|
479
|
+
imageA: string | null;
|
|
480
|
+
imageB: string | null;
|
|
481
|
+
}
|
|
482
|
+
type OutputInputType = 'image' | 'video' | null;
|
|
483
|
+
interface DownloadNodeData extends BaseNodeData {
|
|
484
|
+
inputImage: string | null;
|
|
485
|
+
inputVideo: string | null;
|
|
486
|
+
inputType: OutputInputType;
|
|
487
|
+
outputName: string;
|
|
488
|
+
}
|
|
489
|
+
/** @deprecated Use DownloadNodeData instead */
|
|
490
|
+
type OutputNodeData = DownloadNodeData;
|
|
491
|
+
|
|
492
|
+
interface WorkflowInputNodeData extends BaseNodeData {
|
|
493
|
+
inputName: string;
|
|
494
|
+
inputType: HandleType;
|
|
495
|
+
required: boolean;
|
|
496
|
+
description?: string;
|
|
497
|
+
}
|
|
498
|
+
interface WorkflowOutputNodeData extends BaseNodeData {
|
|
499
|
+
outputName: string;
|
|
500
|
+
outputType: HandleType;
|
|
501
|
+
description?: string;
|
|
502
|
+
inputValue: string | null;
|
|
503
|
+
}
|
|
504
|
+
interface WorkflowInterfaceInput {
|
|
505
|
+
nodeId: string;
|
|
506
|
+
name: string;
|
|
507
|
+
type: HandleType;
|
|
508
|
+
required: boolean;
|
|
509
|
+
}
|
|
510
|
+
interface WorkflowInterfaceOutput {
|
|
511
|
+
nodeId: string;
|
|
512
|
+
name: string;
|
|
513
|
+
type: HandleType;
|
|
514
|
+
}
|
|
515
|
+
interface WorkflowInterface {
|
|
516
|
+
inputs: WorkflowInterfaceInput[];
|
|
517
|
+
outputs: WorkflowInterfaceOutput[];
|
|
518
|
+
}
|
|
519
|
+
interface WorkflowRefNodeData extends BaseNodeData {
|
|
520
|
+
referencedWorkflowId: string | null;
|
|
521
|
+
referencedWorkflowName: string | null;
|
|
522
|
+
cachedInterface: WorkflowInterface | null;
|
|
523
|
+
inputMappings: Record<string, string | null>;
|
|
524
|
+
outputMappings: Record<string, string | null>;
|
|
525
|
+
childExecutionId: string | null;
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
type WorkflowNodeData = ImageInputNodeData | AudioInputNodeData | VideoInputNodeData | PromptNodeData | PromptConstructorNodeData | ImageGenNodeData | VideoGenNodeData | LLMNodeData | LipSyncNodeData | VoiceChangeNodeData | TextToSpeechNodeData | TranscribeNodeData | MotionControlNodeData | AnimationNodeData | VideoStitchNodeData | ResizeNodeData | VideoTrimNodeData | VideoFrameExtractNodeData | ReframeNodeData | UpscaleNodeData | ImageGridSplitNodeData | AnnotationNodeData | SubtitleNodeData | OutputGalleryNodeData | ImageCompareNodeData | OutputNodeData | WorkflowInputNodeData | WorkflowOutputNodeData | WorkflowRefNodeData;
|
|
529
|
+
type WorkflowNode = Node<WorkflowNodeData, NodeType>;
|
|
530
|
+
/**
|
|
531
|
+
* Edge data for workflow edges
|
|
532
|
+
*/
|
|
533
|
+
interface WorkflowEdgeData {
|
|
534
|
+
/** Whether execution should pause before the target node */
|
|
535
|
+
hasPause?: boolean;
|
|
536
|
+
/** Index signature for React Flow Edge compatibility */
|
|
537
|
+
[key: string]: unknown;
|
|
538
|
+
}
|
|
539
|
+
type WorkflowEdge = Edge<WorkflowEdgeData>;
|
|
540
|
+
|
|
541
|
+
export { type TTSVoice as $, type AudioInputNodeData as A, type BaseNodeData as B, CONNECTION_RULES as C, type TopazVideoFPS as D, type ImageGenNodeData as E, type VideoGenNodeData as F, type GridPosition as G, HandleTypeEnum as H, type ImageInputNodeData as I, type TextModel as J, KlingQuality as K, type LumaAspectRatio as L, ModelCapabilityEnum as M, NodeTypeEnum as N, type OutputFormat as O, ProcessingNodeType as P, type LLMNodeData as Q, ReframeNodeType as R, type SelectedModel as S, TemplateCategory as T, UpscaleNodeType as U, type VideoInputNodeData as V, type LipSyncModel as W, type LipSyncMode as X, type LipSyncNodeData as Y, type VoiceChangeNodeData as Z, type TTSProvider as _, type HandleType as a, type TextToSpeechNodeData as a0, type TranscribeLanguage as a1, type TranscribeNodeData as a2, type MotionControlMode as a3, type CameraMovement as a4, type KlingQualityMode as a5, type CharacterOrientation as a6, type TrajectoryPoint as a7, type MotionControlNodeData as a8, type EasingPreset as a9, type OutputNodeData as aA, type WorkflowInputNodeData as aB, type WorkflowOutputNodeData as aC, type WorkflowInterfaceInput as aD, type WorkflowInterfaceOutput as aE, type WorkflowInterface as aF, type WorkflowRefNodeData as aG, type WorkflowNodeData as aH, type WorkflowNode as aI, type WorkflowEdgeData as aJ, type WorkflowEdge as aK, type CubicBezier as aa, type AnimationNodeData as ab, type TransitionType as ac, type AudioCodec as ad, type OutputQuality as ae, type VideoStitchNodeData as af, type ResizeNodeData as ag, type VideoTrimNodeData as ah, type FrameSelectionMode as ai, type VideoFrameExtractNodeData as aj, type ReframeInputType as ak, type ReframeNodeData as al, type UpscaleInputType as am, type UpscaleModel as an, type UpscaleNodeData as ao, type GridOutputFormat as ap, type ImageGridSplitNodeData as aq, type AnnotationShapeData as ar, type AnnotationNodeData as as, type SubtitleStyle as at, type SubtitlePosition as au, type SubtitleNodeData as av, type OutputGalleryNodeData as aw, type ImageCompareNodeData as ax, type OutputInputType as ay, type DownloadNodeData as az, type HandleDefinition as b, ProviderTypeEnum as c, type ProviderType as d, type ModelCapability as e, ModelUseCaseEnum as f, type ModelUseCase as g, type ProviderModel as h, type NodeType as i, NodeCategoryEnum as j, type NodeCategory as k, NodeStatusEnum as l, type NodeStatus as m, type PromptNodeData as n, type AvailableVariable as o, type PromptConstructorNodeData as p, type ImageModel as q, type VideoModel as r, type AspectRatio as s, type Resolution as t, type VideoResolution as u, type VideoDuration as v, type LumaReframeModel as w, type TopazEnhanceModel as x, type TopazUpscaleFactor as y, type TopazVideoResolution as z };
|