@genfeedai/types 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-7NJUD2WZ.mjs +1 -0
- package/dist/chunk-KXAKQO3U.js +11 -0
- package/dist/chunk-OGJX4J7H.mjs +808 -0
- package/dist/chunk-P7K5LM6V.js +825 -0
- package/dist/chunk-RNGYPX4W.js +2 -0
- package/dist/chunk-WT2F5CAF.mjs +9 -0
- package/dist/comfyui.d.mts +91 -0
- package/dist/comfyui.d.ts +91 -0
- package/dist/comfyui.js +4 -0
- package/dist/comfyui.mjs +1 -0
- package/dist/index.d.mts +268 -0
- package/dist/index.d.ts +268 -0
- package/dist/index.js +185 -0
- package/dist/index.mjs +109 -0
- package/dist/nodes.d.mts +19 -0
- package/dist/nodes.d.ts +19 -0
- package/dist/nodes.js +54 -0
- package/dist/nodes.mjs +1 -0
- package/dist/replicate.d.mts +1092 -0
- package/dist/replicate.d.ts +1092 -0
- package/dist/replicate.js +2 -0
- package/dist/replicate.mjs +1 -0
- package/dist/union-CtYAY8Mv.d.mts +541 -0
- package/dist/union-CtYAY8Mv.d.ts +541 -0
- package/dist/workflow-CGpVuRPg.d.mts +99 -0
- package/dist/workflow-Di5vTrXa.d.ts +99 -0
- package/dist/workflow.d.mts +3 -0
- package/dist/workflow.d.ts +3 -0
- package/dist/workflow.js +10 -0
- package/dist/workflow.mjs +1 -0
- package/package.json +60 -0
- package/src/replicate/schemas.json +6636 -0
|
@@ -0,0 +1,825 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// src/nodes/handles.ts
|
|
4
|
+
var HandleTypeEnum = /* @__PURE__ */ ((HandleTypeEnum2) => {
|
|
5
|
+
HandleTypeEnum2["IMAGE"] = "image";
|
|
6
|
+
HandleTypeEnum2["TEXT"] = "text";
|
|
7
|
+
HandleTypeEnum2["VIDEO"] = "video";
|
|
8
|
+
HandleTypeEnum2["NUMBER"] = "number";
|
|
9
|
+
HandleTypeEnum2["AUDIO"] = "audio";
|
|
10
|
+
return HandleTypeEnum2;
|
|
11
|
+
})(HandleTypeEnum || {});
|
|
12
|
+
var CONNECTION_RULES = {
|
|
13
|
+
image: ["image"],
|
|
14
|
+
text: ["text"],
|
|
15
|
+
video: ["video"],
|
|
16
|
+
number: ["number"],
|
|
17
|
+
audio: ["audio"]
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
// src/nodes/providers.ts
|
|
21
|
+
var ProviderTypeEnum = /* @__PURE__ */ ((ProviderTypeEnum2) => {
|
|
22
|
+
ProviderTypeEnum2["REPLICATE"] = "replicate";
|
|
23
|
+
ProviderTypeEnum2["FAL"] = "fal";
|
|
24
|
+
ProviderTypeEnum2["HUGGINGFACE"] = "huggingface";
|
|
25
|
+
ProviderTypeEnum2["GENFEED_AI"] = "genfeed-ai";
|
|
26
|
+
return ProviderTypeEnum2;
|
|
27
|
+
})(ProviderTypeEnum || {});
|
|
28
|
+
var ModelCapabilityEnum = /* @__PURE__ */ ((ModelCapabilityEnum2) => {
|
|
29
|
+
ModelCapabilityEnum2["TEXT_TO_IMAGE"] = "text-to-image";
|
|
30
|
+
ModelCapabilityEnum2["IMAGE_TO_IMAGE"] = "image-to-image";
|
|
31
|
+
ModelCapabilityEnum2["TEXT_TO_VIDEO"] = "text-to-video";
|
|
32
|
+
ModelCapabilityEnum2["IMAGE_TO_VIDEO"] = "image-to-video";
|
|
33
|
+
ModelCapabilityEnum2["TEXT_GENERATION"] = "text-generation";
|
|
34
|
+
return ModelCapabilityEnum2;
|
|
35
|
+
})(ModelCapabilityEnum || {});
|
|
36
|
+
var ModelUseCaseEnum = /* @__PURE__ */ ((ModelUseCaseEnum2) => {
|
|
37
|
+
ModelUseCaseEnum2["STYLE_TRANSFER"] = "style-transfer";
|
|
38
|
+
ModelUseCaseEnum2["CHARACTER_CONSISTENT"] = "character-consistent";
|
|
39
|
+
ModelUseCaseEnum2["IMAGE_VARIATION"] = "image-variation";
|
|
40
|
+
ModelUseCaseEnum2["INPAINTING"] = "inpainting";
|
|
41
|
+
ModelUseCaseEnum2["UPSCALE"] = "upscale";
|
|
42
|
+
ModelUseCaseEnum2["GENERAL"] = "general";
|
|
43
|
+
return ModelUseCaseEnum2;
|
|
44
|
+
})(ModelUseCaseEnum || {});
|
|
45
|
+
|
|
46
|
+
// src/nodes/base.ts
|
|
47
|
+
var NodeTypeEnum = /* @__PURE__ */ ((NodeTypeEnum2) => {
|
|
48
|
+
NodeTypeEnum2["IMAGE_INPUT"] = "imageInput";
|
|
49
|
+
NodeTypeEnum2["AUDIO_INPUT"] = "audioInput";
|
|
50
|
+
NodeTypeEnum2["VIDEO_INPUT"] = "videoInput";
|
|
51
|
+
NodeTypeEnum2["PROMPT"] = "prompt";
|
|
52
|
+
NodeTypeEnum2["PROMPT_CONSTRUCTOR"] = "promptConstructor";
|
|
53
|
+
NodeTypeEnum2["IMAGE_GEN"] = "imageGen";
|
|
54
|
+
NodeTypeEnum2["VIDEO_GEN"] = "videoGen";
|
|
55
|
+
NodeTypeEnum2["LLM"] = "llm";
|
|
56
|
+
NodeTypeEnum2["LIP_SYNC"] = "lipSync";
|
|
57
|
+
NodeTypeEnum2["VOICE_CHANGE"] = "voiceChange";
|
|
58
|
+
NodeTypeEnum2["TEXT_TO_SPEECH"] = "textToSpeech";
|
|
59
|
+
NodeTypeEnum2["TRANSCRIBE"] = "transcribe";
|
|
60
|
+
NodeTypeEnum2["MOTION_CONTROL"] = "motionControl";
|
|
61
|
+
NodeTypeEnum2["RESIZE"] = "resize";
|
|
62
|
+
NodeTypeEnum2["ANIMATION"] = "animation";
|
|
63
|
+
NodeTypeEnum2["VIDEO_STITCH"] = "videoStitch";
|
|
64
|
+
NodeTypeEnum2["VIDEO_TRIM"] = "videoTrim";
|
|
65
|
+
NodeTypeEnum2["VIDEO_FRAME_EXTRACT"] = "videoFrameExtract";
|
|
66
|
+
NodeTypeEnum2["REFRAME"] = "reframe";
|
|
67
|
+
NodeTypeEnum2["UPSCALE"] = "upscale";
|
|
68
|
+
NodeTypeEnum2["IMAGE_GRID_SPLIT"] = "imageGridSplit";
|
|
69
|
+
NodeTypeEnum2["ANNOTATION"] = "annotation";
|
|
70
|
+
NodeTypeEnum2["SUBTITLE"] = "subtitle";
|
|
71
|
+
NodeTypeEnum2["OUTPUT_GALLERY"] = "outputGallery";
|
|
72
|
+
NodeTypeEnum2["IMAGE_COMPARE"] = "imageCompare";
|
|
73
|
+
NodeTypeEnum2["DOWNLOAD"] = "download";
|
|
74
|
+
NodeTypeEnum2["WORKFLOW_INPUT"] = "workflowInput";
|
|
75
|
+
NodeTypeEnum2["WORKFLOW_OUTPUT"] = "workflowOutput";
|
|
76
|
+
NodeTypeEnum2["WORKFLOW_REF"] = "workflowRef";
|
|
77
|
+
return NodeTypeEnum2;
|
|
78
|
+
})(NodeTypeEnum || {});
|
|
79
|
+
var NodeCategoryEnum = /* @__PURE__ */ ((NodeCategoryEnum2) => {
|
|
80
|
+
NodeCategoryEnum2["INPUT"] = "input";
|
|
81
|
+
NodeCategoryEnum2["AI"] = "ai";
|
|
82
|
+
NodeCategoryEnum2["PROCESSING"] = "processing";
|
|
83
|
+
NodeCategoryEnum2["OUTPUT"] = "output";
|
|
84
|
+
NodeCategoryEnum2["COMPOSITION"] = "composition";
|
|
85
|
+
return NodeCategoryEnum2;
|
|
86
|
+
})(NodeCategoryEnum || {});
|
|
87
|
+
var NodeStatusEnum = /* @__PURE__ */ ((NodeStatusEnum2) => {
|
|
88
|
+
NodeStatusEnum2["IDLE"] = "idle";
|
|
89
|
+
NodeStatusEnum2["PENDING"] = "pending";
|
|
90
|
+
NodeStatusEnum2["PROCESSING"] = "processing";
|
|
91
|
+
NodeStatusEnum2["COMPLETE"] = "complete";
|
|
92
|
+
NodeStatusEnum2["ERROR"] = "error";
|
|
93
|
+
return NodeStatusEnum2;
|
|
94
|
+
})(NodeStatusEnum || {});
|
|
95
|
+
|
|
96
|
+
// src/enums.ts
|
|
97
|
+
var TemplateCategory = /* @__PURE__ */ ((TemplateCategory2) => {
|
|
98
|
+
TemplateCategory2["IMAGE"] = "image";
|
|
99
|
+
TemplateCategory2["VIDEO"] = "video";
|
|
100
|
+
TemplateCategory2["AUDIO"] = "audio";
|
|
101
|
+
TemplateCategory2["FULL_PIPELINE"] = "full-pipeline";
|
|
102
|
+
return TemplateCategory2;
|
|
103
|
+
})(TemplateCategory || {});
|
|
104
|
+
var ReframeNodeType = /* @__PURE__ */ ((ReframeNodeType2) => {
|
|
105
|
+
ReframeNodeType2["REFRAME"] = "reframe";
|
|
106
|
+
ReframeNodeType2["LUMA_REFRAME_IMAGE"] = "lumaReframeImage";
|
|
107
|
+
ReframeNodeType2["LUMA_REFRAME_VIDEO"] = "lumaReframeVideo";
|
|
108
|
+
return ReframeNodeType2;
|
|
109
|
+
})(ReframeNodeType || {});
|
|
110
|
+
var UpscaleNodeType = /* @__PURE__ */ ((UpscaleNodeType2) => {
|
|
111
|
+
UpscaleNodeType2["UPSCALE"] = "upscale";
|
|
112
|
+
UpscaleNodeType2["TOPAZ_IMAGE_UPSCALE"] = "topazImageUpscale";
|
|
113
|
+
UpscaleNodeType2["TOPAZ_VIDEO_UPSCALE"] = "topazVideoUpscale";
|
|
114
|
+
return UpscaleNodeType2;
|
|
115
|
+
})(UpscaleNodeType || {});
|
|
116
|
+
var KlingQuality = /* @__PURE__ */ ((KlingQuality2) => {
|
|
117
|
+
KlingQuality2["STANDARD"] = "std";
|
|
118
|
+
KlingQuality2["PRO"] = "pro";
|
|
119
|
+
return KlingQuality2;
|
|
120
|
+
})(KlingQuality || {});
|
|
121
|
+
var ProcessingNodeType = /* @__PURE__ */ ((ProcessingNodeType2) => {
|
|
122
|
+
ProcessingNodeType2["REFRAME"] = "reframe";
|
|
123
|
+
ProcessingNodeType2["LUMA_REFRAME_IMAGE"] = "lumaReframeImage";
|
|
124
|
+
ProcessingNodeType2["LUMA_REFRAME_VIDEO"] = "lumaReframeVideo";
|
|
125
|
+
ProcessingNodeType2["UPSCALE"] = "upscale";
|
|
126
|
+
ProcessingNodeType2["TOPAZ_IMAGE_UPSCALE"] = "topazImageUpscale";
|
|
127
|
+
ProcessingNodeType2["TOPAZ_VIDEO_UPSCALE"] = "topazVideoUpscale";
|
|
128
|
+
ProcessingNodeType2["VIDEO_FRAME_EXTRACT"] = "videoFrameExtract";
|
|
129
|
+
ProcessingNodeType2["LIP_SYNC"] = "lipSync";
|
|
130
|
+
ProcessingNodeType2["TEXT_TO_SPEECH"] = "textToSpeech";
|
|
131
|
+
ProcessingNodeType2["VOICE_CHANGE"] = "voiceChange";
|
|
132
|
+
ProcessingNodeType2["SUBTITLE"] = "subtitle";
|
|
133
|
+
ProcessingNodeType2["VIDEO_STITCH"] = "videoStitch";
|
|
134
|
+
ProcessingNodeType2["WORKFLOW_REF"] = "workflowRef";
|
|
135
|
+
return ProcessingNodeType2;
|
|
136
|
+
})(ProcessingNodeType || {});
|
|
137
|
+
|
|
138
|
+
// src/nodes/registry.ts
|
|
139
|
+
var NODE_DEFINITIONS = {
|
|
140
|
+
// Input nodes
|
|
141
|
+
imageInput: {
|
|
142
|
+
type: "imageInput",
|
|
143
|
+
label: "Image",
|
|
144
|
+
description: "Upload or reference an image",
|
|
145
|
+
category: "input",
|
|
146
|
+
icon: "Image",
|
|
147
|
+
inputs: [],
|
|
148
|
+
outputs: [{ id: "image", type: "image", label: "Image" }],
|
|
149
|
+
defaultData: {
|
|
150
|
+
label: "Image",
|
|
151
|
+
status: "idle",
|
|
152
|
+
image: null,
|
|
153
|
+
filename: null,
|
|
154
|
+
dimensions: null,
|
|
155
|
+
source: "upload"
|
|
156
|
+
}
|
|
157
|
+
},
|
|
158
|
+
prompt: {
|
|
159
|
+
type: "prompt",
|
|
160
|
+
label: "Prompt",
|
|
161
|
+
description: "Text prompt for AI generation",
|
|
162
|
+
category: "input",
|
|
163
|
+
icon: "MessageSquare",
|
|
164
|
+
inputs: [],
|
|
165
|
+
outputs: [{ id: "text", type: "text", label: "Prompt" }],
|
|
166
|
+
defaultData: {
|
|
167
|
+
label: "Prompt",
|
|
168
|
+
status: "idle",
|
|
169
|
+
prompt: "",
|
|
170
|
+
variables: {}
|
|
171
|
+
}
|
|
172
|
+
},
|
|
173
|
+
audioInput: {
|
|
174
|
+
type: "audioInput",
|
|
175
|
+
label: "Audio",
|
|
176
|
+
description: "Upload an audio file (MP3, WAV)",
|
|
177
|
+
category: "input",
|
|
178
|
+
icon: "Volume2",
|
|
179
|
+
inputs: [],
|
|
180
|
+
outputs: [{ id: "audio", type: "audio", label: "Audio" }],
|
|
181
|
+
defaultData: {
|
|
182
|
+
label: "Audio",
|
|
183
|
+
status: "idle",
|
|
184
|
+
audio: null,
|
|
185
|
+
filename: null,
|
|
186
|
+
duration: null,
|
|
187
|
+
source: "upload"
|
|
188
|
+
}
|
|
189
|
+
},
|
|
190
|
+
videoInput: {
|
|
191
|
+
type: "videoInput",
|
|
192
|
+
label: "Video",
|
|
193
|
+
description: "Upload or reference a video file",
|
|
194
|
+
category: "input",
|
|
195
|
+
icon: "FileVideo",
|
|
196
|
+
inputs: [],
|
|
197
|
+
outputs: [{ id: "video", type: "video", label: "Video" }],
|
|
198
|
+
defaultData: {
|
|
199
|
+
label: "Video",
|
|
200
|
+
status: "idle",
|
|
201
|
+
video: null,
|
|
202
|
+
filename: null,
|
|
203
|
+
duration: null,
|
|
204
|
+
dimensions: null,
|
|
205
|
+
source: "upload"
|
|
206
|
+
}
|
|
207
|
+
},
|
|
208
|
+
promptConstructor: {
|
|
209
|
+
type: "promptConstructor",
|
|
210
|
+
label: "Prompt Constructor",
|
|
211
|
+
description: "Template-based prompt with @variable interpolation from connected Prompt nodes",
|
|
212
|
+
category: "input",
|
|
213
|
+
icon: "Puzzle",
|
|
214
|
+
inputs: [{ id: "text", type: "text", label: "Variables", multiple: true }],
|
|
215
|
+
outputs: [{ id: "text", type: "text", label: "Prompt" }],
|
|
216
|
+
defaultData: {
|
|
217
|
+
label: "Prompt Constructor",
|
|
218
|
+
status: "idle",
|
|
219
|
+
template: "",
|
|
220
|
+
outputText: null,
|
|
221
|
+
unresolvedVars: []
|
|
222
|
+
}
|
|
223
|
+
},
|
|
224
|
+
// AI nodes
|
|
225
|
+
imageGen: {
|
|
226
|
+
type: "imageGen",
|
|
227
|
+
label: "Image Generator",
|
|
228
|
+
description: "Generate images with nano-banana models",
|
|
229
|
+
category: "ai",
|
|
230
|
+
icon: "Sparkles",
|
|
231
|
+
inputs: [
|
|
232
|
+
{ id: "prompt", type: "text", label: "Prompt", required: true },
|
|
233
|
+
{ id: "images", type: "image", label: "Reference Images", multiple: true }
|
|
234
|
+
],
|
|
235
|
+
outputs: [{ id: "image", type: "image", label: "Generated Image" }],
|
|
236
|
+
defaultData: {
|
|
237
|
+
label: "Image Generator",
|
|
238
|
+
status: "idle",
|
|
239
|
+
inputImages: [],
|
|
240
|
+
inputPrompt: null,
|
|
241
|
+
outputImage: null,
|
|
242
|
+
outputImages: [],
|
|
243
|
+
model: "nano-banana-pro",
|
|
244
|
+
aspectRatio: "1:1",
|
|
245
|
+
resolution: "2K",
|
|
246
|
+
outputFormat: "jpg",
|
|
247
|
+
jobId: null
|
|
248
|
+
}
|
|
249
|
+
},
|
|
250
|
+
videoGen: {
|
|
251
|
+
type: "videoGen",
|
|
252
|
+
label: "Video Generator",
|
|
253
|
+
description: "Generate videos with veo-3.1 models",
|
|
254
|
+
category: "ai",
|
|
255
|
+
icon: "Video",
|
|
256
|
+
inputs: [
|
|
257
|
+
{ id: "prompt", type: "text", label: "Prompt", required: true },
|
|
258
|
+
{ id: "image", type: "image", label: "Starting Frame" },
|
|
259
|
+
{ id: "lastFrame", type: "image", label: "Last Frame (interpolation)" }
|
|
260
|
+
],
|
|
261
|
+
outputs: [{ id: "video", type: "video", label: "Generated Video" }],
|
|
262
|
+
defaultData: {
|
|
263
|
+
label: "Video Generator",
|
|
264
|
+
status: "idle",
|
|
265
|
+
inputImage: null,
|
|
266
|
+
lastFrame: null,
|
|
267
|
+
referenceImages: [],
|
|
268
|
+
inputPrompt: null,
|
|
269
|
+
negativePrompt: "",
|
|
270
|
+
outputVideo: null,
|
|
271
|
+
model: "veo-3.1-fast",
|
|
272
|
+
duration: 8,
|
|
273
|
+
aspectRatio: "16:9",
|
|
274
|
+
resolution: "1080p",
|
|
275
|
+
generateAudio: true,
|
|
276
|
+
jobId: null
|
|
277
|
+
}
|
|
278
|
+
},
|
|
279
|
+
llm: {
|
|
280
|
+
type: "llm",
|
|
281
|
+
label: "LLM",
|
|
282
|
+
description: "Generate text with meta-llama",
|
|
283
|
+
category: "ai",
|
|
284
|
+
icon: "Brain",
|
|
285
|
+
inputs: [{ id: "prompt", type: "text", label: "Prompt", required: true }],
|
|
286
|
+
outputs: [{ id: "text", type: "text", label: "Generated Text" }],
|
|
287
|
+
defaultData: {
|
|
288
|
+
label: "LLM",
|
|
289
|
+
status: "idle",
|
|
290
|
+
inputPrompt: null,
|
|
291
|
+
outputText: null,
|
|
292
|
+
model: "meta-llama-3.1-405b-instruct",
|
|
293
|
+
systemPrompt: "You are a creative assistant helping generate content prompts.",
|
|
294
|
+
temperature: 0.7,
|
|
295
|
+
maxTokens: 1024,
|
|
296
|
+
topP: 0.9,
|
|
297
|
+
jobId: null
|
|
298
|
+
}
|
|
299
|
+
},
|
|
300
|
+
lipSync: {
|
|
301
|
+
type: "lipSync",
|
|
302
|
+
label: "Lip Sync",
|
|
303
|
+
description: "Generate talking-head video from image/video and audio using Replicate",
|
|
304
|
+
category: "ai",
|
|
305
|
+
icon: "Mic",
|
|
306
|
+
inputs: [
|
|
307
|
+
{ id: "image", type: "image", label: "Face Image" },
|
|
308
|
+
{ id: "video", type: "video", label: "Source Video" },
|
|
309
|
+
{ id: "audio", type: "audio", label: "Audio", required: true }
|
|
310
|
+
],
|
|
311
|
+
outputs: [{ id: "video", type: "video", label: "Generated Video" }],
|
|
312
|
+
defaultData: {
|
|
313
|
+
label: "Lip Sync",
|
|
314
|
+
status: "idle",
|
|
315
|
+
inputImage: null,
|
|
316
|
+
inputVideo: null,
|
|
317
|
+
inputAudio: null,
|
|
318
|
+
outputVideo: null,
|
|
319
|
+
model: "sync/lipsync-2",
|
|
320
|
+
syncMode: "loop",
|
|
321
|
+
temperature: 0.5,
|
|
322
|
+
activeSpeaker: false,
|
|
323
|
+
jobId: null
|
|
324
|
+
}
|
|
325
|
+
},
|
|
326
|
+
voiceChange: {
|
|
327
|
+
type: "voiceChange",
|
|
328
|
+
label: "Voice Change",
|
|
329
|
+
description: "Replace or mix audio track in a video",
|
|
330
|
+
category: "ai",
|
|
331
|
+
icon: "AudioLines",
|
|
332
|
+
inputs: [
|
|
333
|
+
{ id: "video", type: "video", label: "Video", required: true },
|
|
334
|
+
{ id: "audio", type: "audio", label: "New Audio", required: true }
|
|
335
|
+
],
|
|
336
|
+
outputs: [{ id: "video", type: "video", label: "Output Video" }],
|
|
337
|
+
defaultData: {
|
|
338
|
+
label: "Voice Change",
|
|
339
|
+
status: "idle",
|
|
340
|
+
inputVideo: null,
|
|
341
|
+
inputAudio: null,
|
|
342
|
+
outputVideo: null,
|
|
343
|
+
preserveOriginalAudio: false,
|
|
344
|
+
audioMixLevel: 0.5,
|
|
345
|
+
jobId: null
|
|
346
|
+
}
|
|
347
|
+
},
|
|
348
|
+
textToSpeech: {
|
|
349
|
+
type: "textToSpeech",
|
|
350
|
+
label: "Text to Speech",
|
|
351
|
+
description: "Convert text to natural-sounding speech using ElevenLabs",
|
|
352
|
+
category: "ai",
|
|
353
|
+
icon: "AudioLines",
|
|
354
|
+
inputs: [{ id: "text", type: "text", label: "Text", required: true }],
|
|
355
|
+
outputs: [{ id: "audio", type: "audio", label: "Audio" }],
|
|
356
|
+
defaultData: {
|
|
357
|
+
label: "Text to Speech",
|
|
358
|
+
status: "idle",
|
|
359
|
+
inputText: null,
|
|
360
|
+
outputAudio: null,
|
|
361
|
+
provider: "elevenlabs",
|
|
362
|
+
voice: "rachel",
|
|
363
|
+
stability: 0.5,
|
|
364
|
+
similarityBoost: 0.75,
|
|
365
|
+
speed: 1,
|
|
366
|
+
jobId: null
|
|
367
|
+
}
|
|
368
|
+
},
|
|
369
|
+
transcribe: {
|
|
370
|
+
type: "transcribe",
|
|
371
|
+
label: "Transcribe",
|
|
372
|
+
description: "Convert video or audio to text transcript",
|
|
373
|
+
category: "ai",
|
|
374
|
+
icon: "FileText",
|
|
375
|
+
inputs: [
|
|
376
|
+
{ id: "video", type: "video", label: "Video" },
|
|
377
|
+
{ id: "audio", type: "audio", label: "Audio" }
|
|
378
|
+
],
|
|
379
|
+
outputs: [{ id: "text", type: "text", label: "Transcript" }],
|
|
380
|
+
defaultData: {
|
|
381
|
+
label: "Transcribe",
|
|
382
|
+
status: "idle",
|
|
383
|
+
inputVideo: null,
|
|
384
|
+
inputAudio: null,
|
|
385
|
+
outputText: null,
|
|
386
|
+
language: "auto",
|
|
387
|
+
timestamps: false,
|
|
388
|
+
jobId: null
|
|
389
|
+
}
|
|
390
|
+
},
|
|
391
|
+
motionControl: {
|
|
392
|
+
type: "motionControl",
|
|
393
|
+
label: "Motion Control",
|
|
394
|
+
description: "Generate video with precise motion control using Kling AI",
|
|
395
|
+
category: "ai",
|
|
396
|
+
icon: "Navigation",
|
|
397
|
+
inputs: [
|
|
398
|
+
{ id: "image", type: "image", label: "Image", required: true },
|
|
399
|
+
{ id: "video", type: "video", label: "Motion Video" },
|
|
400
|
+
{ id: "prompt", type: "text", label: "Prompt" }
|
|
401
|
+
],
|
|
402
|
+
outputs: [{ id: "video", type: "video", label: "Video" }],
|
|
403
|
+
defaultData: {
|
|
404
|
+
label: "Motion Control",
|
|
405
|
+
status: "idle",
|
|
406
|
+
inputImage: null,
|
|
407
|
+
inputVideo: null,
|
|
408
|
+
inputPrompt: null,
|
|
409
|
+
outputVideo: null,
|
|
410
|
+
mode: "video_transfer",
|
|
411
|
+
duration: 5,
|
|
412
|
+
aspectRatio: "16:9",
|
|
413
|
+
trajectoryPoints: [],
|
|
414
|
+
cameraMovement: "static",
|
|
415
|
+
cameraIntensity: 50,
|
|
416
|
+
qualityMode: "pro",
|
|
417
|
+
characterOrientation: "image",
|
|
418
|
+
keepOriginalSound: true,
|
|
419
|
+
motionStrength: 50,
|
|
420
|
+
negativePrompt: "",
|
|
421
|
+
seed: null,
|
|
422
|
+
jobId: null
|
|
423
|
+
}
|
|
424
|
+
},
|
|
425
|
+
// Processing nodes
|
|
426
|
+
resize: {
|
|
427
|
+
type: "resize",
|
|
428
|
+
label: "Resize",
|
|
429
|
+
description: "Resize images or videos to different aspect ratios using Luma AI",
|
|
430
|
+
category: "processing",
|
|
431
|
+
icon: "Maximize2",
|
|
432
|
+
inputs: [{ id: "media", type: "image", label: "Media", required: true }],
|
|
433
|
+
outputs: [{ id: "media", type: "image", label: "Resized Media" }],
|
|
434
|
+
defaultData: {
|
|
435
|
+
label: "Resize",
|
|
436
|
+
status: "idle",
|
|
437
|
+
inputMedia: null,
|
|
438
|
+
inputType: null,
|
|
439
|
+
outputMedia: null,
|
|
440
|
+
targetAspectRatio: "16:9",
|
|
441
|
+
prompt: "",
|
|
442
|
+
gridPosition: { x: 0.5, y: 0.5 },
|
|
443
|
+
jobId: null
|
|
444
|
+
}
|
|
445
|
+
},
|
|
446
|
+
animation: {
|
|
447
|
+
type: "animation",
|
|
448
|
+
label: "Animation",
|
|
449
|
+
description: "Apply easing curve to video",
|
|
450
|
+
category: "processing",
|
|
451
|
+
icon: "Wand2",
|
|
452
|
+
inputs: [{ id: "video", type: "video", label: "Video", required: true }],
|
|
453
|
+
outputs: [{ id: "video", type: "video", label: "Animated Video" }],
|
|
454
|
+
defaultData: {
|
|
455
|
+
label: "Animation",
|
|
456
|
+
status: "idle",
|
|
457
|
+
inputVideo: null,
|
|
458
|
+
outputVideo: null,
|
|
459
|
+
curveType: "preset",
|
|
460
|
+
preset: "easeInOutCubic",
|
|
461
|
+
customCurve: [0.645, 0.045, 0.355, 1],
|
|
462
|
+
speedMultiplier: 1
|
|
463
|
+
}
|
|
464
|
+
},
|
|
465
|
+
videoStitch: {
|
|
466
|
+
type: "videoStitch",
|
|
467
|
+
label: "Video Stitch",
|
|
468
|
+
description: "Concatenate multiple videos",
|
|
469
|
+
category: "processing",
|
|
470
|
+
icon: "Layers",
|
|
471
|
+
inputs: [{ id: "videos", type: "video", label: "Videos", multiple: true, required: true }],
|
|
472
|
+
outputs: [{ id: "video", type: "video", label: "Stitched Video" }],
|
|
473
|
+
defaultData: {
|
|
474
|
+
label: "Video Stitch",
|
|
475
|
+
status: "idle",
|
|
476
|
+
inputVideos: [],
|
|
477
|
+
outputVideo: null,
|
|
478
|
+
transitionType: "crossfade",
|
|
479
|
+
transitionDuration: 0.5,
|
|
480
|
+
seamlessLoop: false
|
|
481
|
+
}
|
|
482
|
+
},
|
|
483
|
+
videoTrim: {
|
|
484
|
+
type: "videoTrim",
|
|
485
|
+
label: "Video Trim",
|
|
486
|
+
description: "Trim video to a specific time range",
|
|
487
|
+
category: "processing",
|
|
488
|
+
icon: "Scissors",
|
|
489
|
+
inputs: [{ id: "video", type: "video", label: "Video", required: true }],
|
|
490
|
+
outputs: [{ id: "video", type: "video", label: "Trimmed Video" }],
|
|
491
|
+
defaultData: {
|
|
492
|
+
label: "Video Trim",
|
|
493
|
+
status: "idle",
|
|
494
|
+
inputVideo: null,
|
|
495
|
+
outputVideo: null,
|
|
496
|
+
startTime: 0,
|
|
497
|
+
endTime: 60,
|
|
498
|
+
duration: null,
|
|
499
|
+
jobId: null
|
|
500
|
+
}
|
|
501
|
+
},
|
|
502
|
+
videoFrameExtract: {
|
|
503
|
+
type: "videoFrameExtract",
|
|
504
|
+
label: "Frame Extract",
|
|
505
|
+
description: "Extract a specific frame from video as image",
|
|
506
|
+
category: "processing",
|
|
507
|
+
icon: "Film",
|
|
508
|
+
inputs: [{ id: "video", type: "video", label: "Video", required: true }],
|
|
509
|
+
outputs: [{ id: "image", type: "image", label: "Extracted Frame" }],
|
|
510
|
+
defaultData: {
|
|
511
|
+
label: "Frame Extract",
|
|
512
|
+
status: "idle",
|
|
513
|
+
inputVideo: null,
|
|
514
|
+
outputImage: null,
|
|
515
|
+
selectionMode: "last",
|
|
516
|
+
timestampSeconds: 0,
|
|
517
|
+
percentagePosition: 100,
|
|
518
|
+
videoDuration: null,
|
|
519
|
+
jobId: null
|
|
520
|
+
}
|
|
521
|
+
},
|
|
522
|
+
reframe: {
|
|
523
|
+
type: "reframe",
|
|
524
|
+
label: "Reframe",
|
|
525
|
+
description: "Reframe images or videos to different aspect ratios with AI outpainting",
|
|
526
|
+
category: "processing",
|
|
527
|
+
icon: "Crop",
|
|
528
|
+
inputs: [
|
|
529
|
+
{ id: "image", type: "image", label: "Image" },
|
|
530
|
+
{ id: "video", type: "video", label: "Video" }
|
|
531
|
+
],
|
|
532
|
+
outputs: [
|
|
533
|
+
{ id: "image", type: "image", label: "Reframed Image" },
|
|
534
|
+
{ id: "video", type: "video", label: "Reframed Video" }
|
|
535
|
+
],
|
|
536
|
+
defaultData: {
|
|
537
|
+
label: "Reframe",
|
|
538
|
+
status: "idle",
|
|
539
|
+
inputImage: null,
|
|
540
|
+
inputVideo: null,
|
|
541
|
+
inputType: null,
|
|
542
|
+
outputImage: null,
|
|
543
|
+
outputVideo: null,
|
|
544
|
+
model: "photon-flash-1",
|
|
545
|
+
aspectRatio: "16:9",
|
|
546
|
+
prompt: "",
|
|
547
|
+
gridPosition: { x: 0.5, y: 0.5 },
|
|
548
|
+
jobId: null
|
|
549
|
+
}
|
|
550
|
+
},
|
|
551
|
+
upscale: {
|
|
552
|
+
type: "upscale",
|
|
553
|
+
label: "Upscale",
|
|
554
|
+
description: "AI-powered upscaling for images and videos",
|
|
555
|
+
category: "processing",
|
|
556
|
+
icon: "Maximize",
|
|
557
|
+
inputs: [
|
|
558
|
+
{ id: "image", type: "image", label: "Image" },
|
|
559
|
+
{ id: "video", type: "video", label: "Video" }
|
|
560
|
+
],
|
|
561
|
+
outputs: [
|
|
562
|
+
{ id: "image", type: "image", label: "Upscaled Image" },
|
|
563
|
+
{ id: "video", type: "video", label: "Upscaled Video" }
|
|
564
|
+
],
|
|
565
|
+
defaultData: {
|
|
566
|
+
label: "Upscale",
|
|
567
|
+
status: "idle",
|
|
568
|
+
inputImage: null,
|
|
569
|
+
inputVideo: null,
|
|
570
|
+
inputType: null,
|
|
571
|
+
outputImage: null,
|
|
572
|
+
outputVideo: null,
|
|
573
|
+
originalPreview: null,
|
|
574
|
+
outputPreview: null,
|
|
575
|
+
model: "topaz-standard-v2",
|
|
576
|
+
upscaleFactor: "2x",
|
|
577
|
+
outputFormat: "png",
|
|
578
|
+
faceEnhancement: false,
|
|
579
|
+
faceEnhancementStrength: 80,
|
|
580
|
+
faceEnhancementCreativity: 0,
|
|
581
|
+
targetResolution: "1080p",
|
|
582
|
+
targetFps: 30,
|
|
583
|
+
showComparison: true,
|
|
584
|
+
comparisonPosition: 50,
|
|
585
|
+
jobId: null
|
|
586
|
+
}
|
|
587
|
+
},
|
|
588
|
+
imageGridSplit: {
|
|
589
|
+
type: "imageGridSplit",
|
|
590
|
+
label: "Grid Split",
|
|
591
|
+
description: "Split image into grid cells",
|
|
592
|
+
category: "processing",
|
|
593
|
+
icon: "Grid3X3",
|
|
594
|
+
inputs: [{ id: "image", type: "image", label: "Image", required: true }],
|
|
595
|
+
outputs: [{ id: "images", type: "image", label: "Split Images", multiple: true }],
|
|
596
|
+
defaultData: {
|
|
597
|
+
label: "Grid Split",
|
|
598
|
+
status: "idle",
|
|
599
|
+
inputImage: null,
|
|
600
|
+
outputImages: [],
|
|
601
|
+
gridRows: 2,
|
|
602
|
+
gridCols: 3,
|
|
603
|
+
borderInset: 10,
|
|
604
|
+
outputFormat: "jpg",
|
|
605
|
+
quality: 95
|
|
606
|
+
}
|
|
607
|
+
},
|
|
608
|
+
annotation: {
|
|
609
|
+
type: "annotation",
|
|
610
|
+
label: "Annotation",
|
|
611
|
+
description: "Add shapes, arrows, and text to images",
|
|
612
|
+
category: "processing",
|
|
613
|
+
icon: "Pencil",
|
|
614
|
+
inputs: [{ id: "image", type: "image", label: "Image", required: true }],
|
|
615
|
+
outputs: [{ id: "image", type: "image", label: "Annotated Image" }],
|
|
616
|
+
defaultData: {
|
|
617
|
+
label: "Annotation",
|
|
618
|
+
status: "idle",
|
|
619
|
+
inputImage: null,
|
|
620
|
+
outputImage: null,
|
|
621
|
+
annotations: [],
|
|
622
|
+
hasAnnotations: false
|
|
623
|
+
}
|
|
624
|
+
},
|
|
625
|
+
subtitle: {
|
|
626
|
+
type: "subtitle",
|
|
627
|
+
label: "Subtitle",
|
|
628
|
+
description: "Burn subtitles into video using FFmpeg",
|
|
629
|
+
category: "processing",
|
|
630
|
+
icon: "Subtitles",
|
|
631
|
+
inputs: [
|
|
632
|
+
{ id: "video", type: "video", label: "Video", required: true },
|
|
633
|
+
{ id: "text", type: "text", label: "Subtitle Text", required: true }
|
|
634
|
+
],
|
|
635
|
+
outputs: [{ id: "video", type: "video", label: "Video with Subtitles" }],
|
|
636
|
+
defaultData: {
|
|
637
|
+
label: "Subtitle",
|
|
638
|
+
status: "idle",
|
|
639
|
+
inputVideo: null,
|
|
640
|
+
inputText: null,
|
|
641
|
+
outputVideo: null,
|
|
642
|
+
style: "modern",
|
|
643
|
+
position: "bottom",
|
|
644
|
+
fontSize: 24,
|
|
645
|
+
fontColor: "#FFFFFF",
|
|
646
|
+
backgroundColor: "rgba(0,0,0,0.7)",
|
|
647
|
+
fontFamily: "Arial",
|
|
648
|
+
jobId: null
|
|
649
|
+
}
|
|
650
|
+
},
|
|
651
|
+
outputGallery: {
|
|
652
|
+
type: "outputGallery",
|
|
653
|
+
label: "Output Gallery",
|
|
654
|
+
description: "Thumbnail grid with lightbox for multi-image outputs",
|
|
655
|
+
category: "output",
|
|
656
|
+
icon: "LayoutGrid",
|
|
657
|
+
inputs: [{ id: "image", type: "image", label: "Images", multiple: true }],
|
|
658
|
+
outputs: [],
|
|
659
|
+
defaultData: {
|
|
660
|
+
label: "Output Gallery",
|
|
661
|
+
status: "idle",
|
|
662
|
+
images: []
|
|
663
|
+
}
|
|
664
|
+
},
|
|
665
|
+
imageCompare: {
|
|
666
|
+
type: "imageCompare",
|
|
667
|
+
label: "Image Compare",
|
|
668
|
+
description: "Side-by-side A/B comparison with draggable slider",
|
|
669
|
+
category: "output",
|
|
670
|
+
icon: "Columns2",
|
|
671
|
+
inputs: [
|
|
672
|
+
{ id: "image", type: "image", label: "Image A" },
|
|
673
|
+
{ id: "image-1", type: "image", label: "Image B" }
|
|
674
|
+
],
|
|
675
|
+
outputs: [],
|
|
676
|
+
defaultData: {
|
|
677
|
+
label: "Image Compare",
|
|
678
|
+
status: "idle",
|
|
679
|
+
imageA: null,
|
|
680
|
+
imageB: null
|
|
681
|
+
}
|
|
682
|
+
},
|
|
683
|
+
// Output nodes
|
|
684
|
+
download: {
|
|
685
|
+
type: "download",
|
|
686
|
+
label: "Download",
|
|
687
|
+
description: "Download workflow output with custom filename",
|
|
688
|
+
category: "output",
|
|
689
|
+
icon: "Download",
|
|
690
|
+
inputs: [
|
|
691
|
+
{ id: "image", type: "image", label: "Image" },
|
|
692
|
+
{ id: "video", type: "video", label: "Video" }
|
|
693
|
+
],
|
|
694
|
+
outputs: [],
|
|
695
|
+
defaultData: {
|
|
696
|
+
label: "Download",
|
|
697
|
+
status: "idle",
|
|
698
|
+
inputImage: null,
|
|
699
|
+
inputVideo: null,
|
|
700
|
+
inputType: null,
|
|
701
|
+
outputName: "output"
|
|
702
|
+
}
|
|
703
|
+
},
|
|
704
|
+
// Composition nodes (workflow-as-node)
|
|
705
|
+
workflowInput: {
|
|
706
|
+
type: "workflowInput",
|
|
707
|
+
label: "Workflow Input",
|
|
708
|
+
description: "Define an input port for when this workflow is used as a subworkflow",
|
|
709
|
+
category: "composition",
|
|
710
|
+
icon: "ArrowRightToLine",
|
|
711
|
+
inputs: [],
|
|
712
|
+
outputs: [{ id: "value", type: "image", label: "Value" }],
|
|
713
|
+
// Type is dynamic based on inputType
|
|
714
|
+
defaultData: {
|
|
715
|
+
label: "Workflow Input",
|
|
716
|
+
status: "idle",
|
|
717
|
+
inputName: "input",
|
|
718
|
+
inputType: "image",
|
|
719
|
+
required: true,
|
|
720
|
+
description: ""
|
|
721
|
+
}
|
|
722
|
+
},
|
|
723
|
+
workflowOutput: {
|
|
724
|
+
type: "workflowOutput",
|
|
725
|
+
label: "Workflow Output",
|
|
726
|
+
description: "Define an output port for when this workflow is used as a subworkflow",
|
|
727
|
+
category: "composition",
|
|
728
|
+
icon: "ArrowLeftFromLine",
|
|
729
|
+
inputs: [{ id: "value", type: "image", label: "Value", required: true }],
|
|
730
|
+
// Type is dynamic based on outputType
|
|
731
|
+
outputs: [],
|
|
732
|
+
defaultData: {
|
|
733
|
+
label: "Workflow Output",
|
|
734
|
+
status: "idle",
|
|
735
|
+
outputName: "output",
|
|
736
|
+
outputType: "image",
|
|
737
|
+
description: "",
|
|
738
|
+
inputValue: null
|
|
739
|
+
}
|
|
740
|
+
},
|
|
741
|
+
workflowRef: {
|
|
742
|
+
type: "workflowRef",
|
|
743
|
+
label: "Subworkflow",
|
|
744
|
+
description: "Reference another workflow as a subworkflow",
|
|
745
|
+
category: "composition",
|
|
746
|
+
icon: "GitBranch",
|
|
747
|
+
inputs: [],
|
|
748
|
+
// Dynamic based on referenced workflow interface
|
|
749
|
+
outputs: [],
|
|
750
|
+
// Dynamic based on referenced workflow interface
|
|
751
|
+
defaultData: {
|
|
752
|
+
label: "Subworkflow",
|
|
753
|
+
status: "idle",
|
|
754
|
+
referencedWorkflowId: null,
|
|
755
|
+
referencedWorkflowName: null,
|
|
756
|
+
cachedInterface: null,
|
|
757
|
+
inputMappings: {},
|
|
758
|
+
outputMappings: {},
|
|
759
|
+
childExecutionId: null
|
|
760
|
+
}
|
|
761
|
+
}
|
|
762
|
+
// Multi-format nodes removed - format conversion now handled by schema-driven engine
|
|
763
|
+
};
|
|
764
|
+
var NODE_ORDER = {
|
|
765
|
+
input: ["imageInput", "videoInput", "audioInput", "prompt", "promptConstructor"],
|
|
766
|
+
ai: [
|
|
767
|
+
"imageGen",
|
|
768
|
+
"videoGen",
|
|
769
|
+
"llm",
|
|
770
|
+
"lipSync",
|
|
771
|
+
"textToSpeech",
|
|
772
|
+
"transcribe",
|
|
773
|
+
"voiceChange",
|
|
774
|
+
"motionControl"
|
|
775
|
+
],
|
|
776
|
+
processing: [
|
|
777
|
+
"reframe",
|
|
778
|
+
"upscale",
|
|
779
|
+
"resize",
|
|
780
|
+
"videoStitch",
|
|
781
|
+
"videoTrim",
|
|
782
|
+
"videoFrameExtract",
|
|
783
|
+
"imageGridSplit",
|
|
784
|
+
"annotation",
|
|
785
|
+
"subtitle",
|
|
786
|
+
"animation"
|
|
787
|
+
],
|
|
788
|
+
output: ["download", "outputGallery", "imageCompare"],
|
|
789
|
+
composition: ["workflowRef", "workflowInput", "workflowOutput"]
|
|
790
|
+
};
|
|
791
|
+
function getNodesByCategory() {
|
|
792
|
+
const categories = {
|
|
793
|
+
input: [],
|
|
794
|
+
ai: [],
|
|
795
|
+
processing: [],
|
|
796
|
+
output: [],
|
|
797
|
+
composition: []
|
|
798
|
+
};
|
|
799
|
+
for (const category of Object.keys(NODE_ORDER)) {
|
|
800
|
+
for (const nodeType of NODE_ORDER[category]) {
|
|
801
|
+
const def = NODE_DEFINITIONS[nodeType];
|
|
802
|
+
if (def) {
|
|
803
|
+
categories[category].push(def);
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
return categories;
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
exports.CONNECTION_RULES = CONNECTION_RULES;
|
|
811
|
+
exports.HandleTypeEnum = HandleTypeEnum;
|
|
812
|
+
exports.KlingQuality = KlingQuality;
|
|
813
|
+
exports.ModelCapabilityEnum = ModelCapabilityEnum;
|
|
814
|
+
exports.ModelUseCaseEnum = ModelUseCaseEnum;
|
|
815
|
+
exports.NODE_DEFINITIONS = NODE_DEFINITIONS;
|
|
816
|
+
exports.NODE_ORDER = NODE_ORDER;
|
|
817
|
+
exports.NodeCategoryEnum = NodeCategoryEnum;
|
|
818
|
+
exports.NodeStatusEnum = NodeStatusEnum;
|
|
819
|
+
exports.NodeTypeEnum = NodeTypeEnum;
|
|
820
|
+
exports.ProcessingNodeType = ProcessingNodeType;
|
|
821
|
+
exports.ProviderTypeEnum = ProviderTypeEnum;
|
|
822
|
+
exports.ReframeNodeType = ReframeNodeType;
|
|
823
|
+
exports.TemplateCategory = TemplateCategory;
|
|
824
|
+
exports.UpscaleNodeType = UpscaleNodeType;
|
|
825
|
+
exports.getNodesByCategory = getNodesByCategory;
|