@genfeedai/types 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,808 @@
1
+ // src/nodes/handles.ts
2
+ var HandleTypeEnum = /* @__PURE__ */ ((HandleTypeEnum2) => {
3
+ HandleTypeEnum2["IMAGE"] = "image";
4
+ HandleTypeEnum2["TEXT"] = "text";
5
+ HandleTypeEnum2["VIDEO"] = "video";
6
+ HandleTypeEnum2["NUMBER"] = "number";
7
+ HandleTypeEnum2["AUDIO"] = "audio";
8
+ return HandleTypeEnum2;
9
+ })(HandleTypeEnum || {});
10
+ var CONNECTION_RULES = {
11
+ image: ["image"],
12
+ text: ["text"],
13
+ video: ["video"],
14
+ number: ["number"],
15
+ audio: ["audio"]
16
+ };
17
+
18
+ // src/nodes/providers.ts
19
+ var ProviderTypeEnum = /* @__PURE__ */ ((ProviderTypeEnum2) => {
20
+ ProviderTypeEnum2["REPLICATE"] = "replicate";
21
+ ProviderTypeEnum2["FAL"] = "fal";
22
+ ProviderTypeEnum2["HUGGINGFACE"] = "huggingface";
23
+ ProviderTypeEnum2["GENFEED_AI"] = "genfeed-ai";
24
+ return ProviderTypeEnum2;
25
+ })(ProviderTypeEnum || {});
26
+ var ModelCapabilityEnum = /* @__PURE__ */ ((ModelCapabilityEnum2) => {
27
+ ModelCapabilityEnum2["TEXT_TO_IMAGE"] = "text-to-image";
28
+ ModelCapabilityEnum2["IMAGE_TO_IMAGE"] = "image-to-image";
29
+ ModelCapabilityEnum2["TEXT_TO_VIDEO"] = "text-to-video";
30
+ ModelCapabilityEnum2["IMAGE_TO_VIDEO"] = "image-to-video";
31
+ ModelCapabilityEnum2["TEXT_GENERATION"] = "text-generation";
32
+ return ModelCapabilityEnum2;
33
+ })(ModelCapabilityEnum || {});
34
+ var ModelUseCaseEnum = /* @__PURE__ */ ((ModelUseCaseEnum2) => {
35
+ ModelUseCaseEnum2["STYLE_TRANSFER"] = "style-transfer";
36
+ ModelUseCaseEnum2["CHARACTER_CONSISTENT"] = "character-consistent";
37
+ ModelUseCaseEnum2["IMAGE_VARIATION"] = "image-variation";
38
+ ModelUseCaseEnum2["INPAINTING"] = "inpainting";
39
+ ModelUseCaseEnum2["UPSCALE"] = "upscale";
40
+ ModelUseCaseEnum2["GENERAL"] = "general";
41
+ return ModelUseCaseEnum2;
42
+ })(ModelUseCaseEnum || {});
43
+
44
+ // src/nodes/base.ts
45
+ var NodeTypeEnum = /* @__PURE__ */ ((NodeTypeEnum2) => {
46
+ NodeTypeEnum2["IMAGE_INPUT"] = "imageInput";
47
+ NodeTypeEnum2["AUDIO_INPUT"] = "audioInput";
48
+ NodeTypeEnum2["VIDEO_INPUT"] = "videoInput";
49
+ NodeTypeEnum2["PROMPT"] = "prompt";
50
+ NodeTypeEnum2["PROMPT_CONSTRUCTOR"] = "promptConstructor";
51
+ NodeTypeEnum2["IMAGE_GEN"] = "imageGen";
52
+ NodeTypeEnum2["VIDEO_GEN"] = "videoGen";
53
+ NodeTypeEnum2["LLM"] = "llm";
54
+ NodeTypeEnum2["LIP_SYNC"] = "lipSync";
55
+ NodeTypeEnum2["VOICE_CHANGE"] = "voiceChange";
56
+ NodeTypeEnum2["TEXT_TO_SPEECH"] = "textToSpeech";
57
+ NodeTypeEnum2["TRANSCRIBE"] = "transcribe";
58
+ NodeTypeEnum2["MOTION_CONTROL"] = "motionControl";
59
+ NodeTypeEnum2["RESIZE"] = "resize";
60
+ NodeTypeEnum2["ANIMATION"] = "animation";
61
+ NodeTypeEnum2["VIDEO_STITCH"] = "videoStitch";
62
+ NodeTypeEnum2["VIDEO_TRIM"] = "videoTrim";
63
+ NodeTypeEnum2["VIDEO_FRAME_EXTRACT"] = "videoFrameExtract";
64
+ NodeTypeEnum2["REFRAME"] = "reframe";
65
+ NodeTypeEnum2["UPSCALE"] = "upscale";
66
+ NodeTypeEnum2["IMAGE_GRID_SPLIT"] = "imageGridSplit";
67
+ NodeTypeEnum2["ANNOTATION"] = "annotation";
68
+ NodeTypeEnum2["SUBTITLE"] = "subtitle";
69
+ NodeTypeEnum2["OUTPUT_GALLERY"] = "outputGallery";
70
+ NodeTypeEnum2["IMAGE_COMPARE"] = "imageCompare";
71
+ NodeTypeEnum2["DOWNLOAD"] = "download";
72
+ NodeTypeEnum2["WORKFLOW_INPUT"] = "workflowInput";
73
+ NodeTypeEnum2["WORKFLOW_OUTPUT"] = "workflowOutput";
74
+ NodeTypeEnum2["WORKFLOW_REF"] = "workflowRef";
75
+ return NodeTypeEnum2;
76
+ })(NodeTypeEnum || {});
77
+ var NodeCategoryEnum = /* @__PURE__ */ ((NodeCategoryEnum2) => {
78
+ NodeCategoryEnum2["INPUT"] = "input";
79
+ NodeCategoryEnum2["AI"] = "ai";
80
+ NodeCategoryEnum2["PROCESSING"] = "processing";
81
+ NodeCategoryEnum2["OUTPUT"] = "output";
82
+ NodeCategoryEnum2["COMPOSITION"] = "composition";
83
+ return NodeCategoryEnum2;
84
+ })(NodeCategoryEnum || {});
85
+ var NodeStatusEnum = /* @__PURE__ */ ((NodeStatusEnum2) => {
86
+ NodeStatusEnum2["IDLE"] = "idle";
87
+ NodeStatusEnum2["PENDING"] = "pending";
88
+ NodeStatusEnum2["PROCESSING"] = "processing";
89
+ NodeStatusEnum2["COMPLETE"] = "complete";
90
+ NodeStatusEnum2["ERROR"] = "error";
91
+ return NodeStatusEnum2;
92
+ })(NodeStatusEnum || {});
93
+
94
+ // src/enums.ts
95
+ var TemplateCategory = /* @__PURE__ */ ((TemplateCategory2) => {
96
+ TemplateCategory2["IMAGE"] = "image";
97
+ TemplateCategory2["VIDEO"] = "video";
98
+ TemplateCategory2["AUDIO"] = "audio";
99
+ TemplateCategory2["FULL_PIPELINE"] = "full-pipeline";
100
+ return TemplateCategory2;
101
+ })(TemplateCategory || {});
102
+ var ReframeNodeType = /* @__PURE__ */ ((ReframeNodeType2) => {
103
+ ReframeNodeType2["REFRAME"] = "reframe";
104
+ ReframeNodeType2["LUMA_REFRAME_IMAGE"] = "lumaReframeImage";
105
+ ReframeNodeType2["LUMA_REFRAME_VIDEO"] = "lumaReframeVideo";
106
+ return ReframeNodeType2;
107
+ })(ReframeNodeType || {});
108
+ var UpscaleNodeType = /* @__PURE__ */ ((UpscaleNodeType2) => {
109
+ UpscaleNodeType2["UPSCALE"] = "upscale";
110
+ UpscaleNodeType2["TOPAZ_IMAGE_UPSCALE"] = "topazImageUpscale";
111
+ UpscaleNodeType2["TOPAZ_VIDEO_UPSCALE"] = "topazVideoUpscale";
112
+ return UpscaleNodeType2;
113
+ })(UpscaleNodeType || {});
114
+ var KlingQuality = /* @__PURE__ */ ((KlingQuality2) => {
115
+ KlingQuality2["STANDARD"] = "std";
116
+ KlingQuality2["PRO"] = "pro";
117
+ return KlingQuality2;
118
+ })(KlingQuality || {});
119
+ var ProcessingNodeType = /* @__PURE__ */ ((ProcessingNodeType2) => {
120
+ ProcessingNodeType2["REFRAME"] = "reframe";
121
+ ProcessingNodeType2["LUMA_REFRAME_IMAGE"] = "lumaReframeImage";
122
+ ProcessingNodeType2["LUMA_REFRAME_VIDEO"] = "lumaReframeVideo";
123
+ ProcessingNodeType2["UPSCALE"] = "upscale";
124
+ ProcessingNodeType2["TOPAZ_IMAGE_UPSCALE"] = "topazImageUpscale";
125
+ ProcessingNodeType2["TOPAZ_VIDEO_UPSCALE"] = "topazVideoUpscale";
126
+ ProcessingNodeType2["VIDEO_FRAME_EXTRACT"] = "videoFrameExtract";
127
+ ProcessingNodeType2["LIP_SYNC"] = "lipSync";
128
+ ProcessingNodeType2["TEXT_TO_SPEECH"] = "textToSpeech";
129
+ ProcessingNodeType2["VOICE_CHANGE"] = "voiceChange";
130
+ ProcessingNodeType2["SUBTITLE"] = "subtitle";
131
+ ProcessingNodeType2["VIDEO_STITCH"] = "videoStitch";
132
+ ProcessingNodeType2["WORKFLOW_REF"] = "workflowRef";
133
+ return ProcessingNodeType2;
134
+ })(ProcessingNodeType || {});
135
+
136
+ // src/nodes/registry.ts
137
+ var NODE_DEFINITIONS = {
138
+ // Input nodes
139
+ imageInput: {
140
+ type: "imageInput",
141
+ label: "Image",
142
+ description: "Upload or reference an image",
143
+ category: "input",
144
+ icon: "Image",
145
+ inputs: [],
146
+ outputs: [{ id: "image", type: "image", label: "Image" }],
147
+ defaultData: {
148
+ label: "Image",
149
+ status: "idle",
150
+ image: null,
151
+ filename: null,
152
+ dimensions: null,
153
+ source: "upload"
154
+ }
155
+ },
156
+ prompt: {
157
+ type: "prompt",
158
+ label: "Prompt",
159
+ description: "Text prompt for AI generation",
160
+ category: "input",
161
+ icon: "MessageSquare",
162
+ inputs: [],
163
+ outputs: [{ id: "text", type: "text", label: "Prompt" }],
164
+ defaultData: {
165
+ label: "Prompt",
166
+ status: "idle",
167
+ prompt: "",
168
+ variables: {}
169
+ }
170
+ },
171
+ audioInput: {
172
+ type: "audioInput",
173
+ label: "Audio",
174
+ description: "Upload an audio file (MP3, WAV)",
175
+ category: "input",
176
+ icon: "Volume2",
177
+ inputs: [],
178
+ outputs: [{ id: "audio", type: "audio", label: "Audio" }],
179
+ defaultData: {
180
+ label: "Audio",
181
+ status: "idle",
182
+ audio: null,
183
+ filename: null,
184
+ duration: null,
185
+ source: "upload"
186
+ }
187
+ },
188
+ videoInput: {
189
+ type: "videoInput",
190
+ label: "Video",
191
+ description: "Upload or reference a video file",
192
+ category: "input",
193
+ icon: "FileVideo",
194
+ inputs: [],
195
+ outputs: [{ id: "video", type: "video", label: "Video" }],
196
+ defaultData: {
197
+ label: "Video",
198
+ status: "idle",
199
+ video: null,
200
+ filename: null,
201
+ duration: null,
202
+ dimensions: null,
203
+ source: "upload"
204
+ }
205
+ },
206
+ promptConstructor: {
207
+ type: "promptConstructor",
208
+ label: "Prompt Constructor",
209
+ description: "Template-based prompt with @variable interpolation from connected Prompt nodes",
210
+ category: "input",
211
+ icon: "Puzzle",
212
+ inputs: [{ id: "text", type: "text", label: "Variables", multiple: true }],
213
+ outputs: [{ id: "text", type: "text", label: "Prompt" }],
214
+ defaultData: {
215
+ label: "Prompt Constructor",
216
+ status: "idle",
217
+ template: "",
218
+ outputText: null,
219
+ unresolvedVars: []
220
+ }
221
+ },
222
+ // AI nodes
223
+ imageGen: {
224
+ type: "imageGen",
225
+ label: "Image Generator",
226
+ description: "Generate images with nano-banana models",
227
+ category: "ai",
228
+ icon: "Sparkles",
229
+ inputs: [
230
+ { id: "prompt", type: "text", label: "Prompt", required: true },
231
+ { id: "images", type: "image", label: "Reference Images", multiple: true }
232
+ ],
233
+ outputs: [{ id: "image", type: "image", label: "Generated Image" }],
234
+ defaultData: {
235
+ label: "Image Generator",
236
+ status: "idle",
237
+ inputImages: [],
238
+ inputPrompt: null,
239
+ outputImage: null,
240
+ outputImages: [],
241
+ model: "nano-banana-pro",
242
+ aspectRatio: "1:1",
243
+ resolution: "2K",
244
+ outputFormat: "jpg",
245
+ jobId: null
246
+ }
247
+ },
248
+ videoGen: {
249
+ type: "videoGen",
250
+ label: "Video Generator",
251
+ description: "Generate videos with veo-3.1 models",
252
+ category: "ai",
253
+ icon: "Video",
254
+ inputs: [
255
+ { id: "prompt", type: "text", label: "Prompt", required: true },
256
+ { id: "image", type: "image", label: "Starting Frame" },
257
+ { id: "lastFrame", type: "image", label: "Last Frame (interpolation)" }
258
+ ],
259
+ outputs: [{ id: "video", type: "video", label: "Generated Video" }],
260
+ defaultData: {
261
+ label: "Video Generator",
262
+ status: "idle",
263
+ inputImage: null,
264
+ lastFrame: null,
265
+ referenceImages: [],
266
+ inputPrompt: null,
267
+ negativePrompt: "",
268
+ outputVideo: null,
269
+ model: "veo-3.1-fast",
270
+ duration: 8,
271
+ aspectRatio: "16:9",
272
+ resolution: "1080p",
273
+ generateAudio: true,
274
+ jobId: null
275
+ }
276
+ },
277
+ llm: {
278
+ type: "llm",
279
+ label: "LLM",
280
+ description: "Generate text with meta-llama",
281
+ category: "ai",
282
+ icon: "Brain",
283
+ inputs: [{ id: "prompt", type: "text", label: "Prompt", required: true }],
284
+ outputs: [{ id: "text", type: "text", label: "Generated Text" }],
285
+ defaultData: {
286
+ label: "LLM",
287
+ status: "idle",
288
+ inputPrompt: null,
289
+ outputText: null,
290
+ model: "meta-llama-3.1-405b-instruct",
291
+ systemPrompt: "You are a creative assistant helping generate content prompts.",
292
+ temperature: 0.7,
293
+ maxTokens: 1024,
294
+ topP: 0.9,
295
+ jobId: null
296
+ }
297
+ },
298
+ lipSync: {
299
+ type: "lipSync",
300
+ label: "Lip Sync",
301
+ description: "Generate talking-head video from image/video and audio using Replicate",
302
+ category: "ai",
303
+ icon: "Mic",
304
+ inputs: [
305
+ { id: "image", type: "image", label: "Face Image" },
306
+ { id: "video", type: "video", label: "Source Video" },
307
+ { id: "audio", type: "audio", label: "Audio", required: true }
308
+ ],
309
+ outputs: [{ id: "video", type: "video", label: "Generated Video" }],
310
+ defaultData: {
311
+ label: "Lip Sync",
312
+ status: "idle",
313
+ inputImage: null,
314
+ inputVideo: null,
315
+ inputAudio: null,
316
+ outputVideo: null,
317
+ model: "sync/lipsync-2",
318
+ syncMode: "loop",
319
+ temperature: 0.5,
320
+ activeSpeaker: false,
321
+ jobId: null
322
+ }
323
+ },
324
+ voiceChange: {
325
+ type: "voiceChange",
326
+ label: "Voice Change",
327
+ description: "Replace or mix audio track in a video",
328
+ category: "ai",
329
+ icon: "AudioLines",
330
+ inputs: [
331
+ { id: "video", type: "video", label: "Video", required: true },
332
+ { id: "audio", type: "audio", label: "New Audio", required: true }
333
+ ],
334
+ outputs: [{ id: "video", type: "video", label: "Output Video" }],
335
+ defaultData: {
336
+ label: "Voice Change",
337
+ status: "idle",
338
+ inputVideo: null,
339
+ inputAudio: null,
340
+ outputVideo: null,
341
+ preserveOriginalAudio: false,
342
+ audioMixLevel: 0.5,
343
+ jobId: null
344
+ }
345
+ },
346
+ textToSpeech: {
347
+ type: "textToSpeech",
348
+ label: "Text to Speech",
349
+ description: "Convert text to natural-sounding speech using ElevenLabs",
350
+ category: "ai",
351
+ icon: "AudioLines",
352
+ inputs: [{ id: "text", type: "text", label: "Text", required: true }],
353
+ outputs: [{ id: "audio", type: "audio", label: "Audio" }],
354
+ defaultData: {
355
+ label: "Text to Speech",
356
+ status: "idle",
357
+ inputText: null,
358
+ outputAudio: null,
359
+ provider: "elevenlabs",
360
+ voice: "rachel",
361
+ stability: 0.5,
362
+ similarityBoost: 0.75,
363
+ speed: 1,
364
+ jobId: null
365
+ }
366
+ },
367
+ transcribe: {
368
+ type: "transcribe",
369
+ label: "Transcribe",
370
+ description: "Convert video or audio to text transcript",
371
+ category: "ai",
372
+ icon: "FileText",
373
+ inputs: [
374
+ { id: "video", type: "video", label: "Video" },
375
+ { id: "audio", type: "audio", label: "Audio" }
376
+ ],
377
+ outputs: [{ id: "text", type: "text", label: "Transcript" }],
378
+ defaultData: {
379
+ label: "Transcribe",
380
+ status: "idle",
381
+ inputVideo: null,
382
+ inputAudio: null,
383
+ outputText: null,
384
+ language: "auto",
385
+ timestamps: false,
386
+ jobId: null
387
+ }
388
+ },
389
+ motionControl: {
390
+ type: "motionControl",
391
+ label: "Motion Control",
392
+ description: "Generate video with precise motion control using Kling AI",
393
+ category: "ai",
394
+ icon: "Navigation",
395
+ inputs: [
396
+ { id: "image", type: "image", label: "Image", required: true },
397
+ { id: "video", type: "video", label: "Motion Video" },
398
+ { id: "prompt", type: "text", label: "Prompt" }
399
+ ],
400
+ outputs: [{ id: "video", type: "video", label: "Video" }],
401
+ defaultData: {
402
+ label: "Motion Control",
403
+ status: "idle",
404
+ inputImage: null,
405
+ inputVideo: null,
406
+ inputPrompt: null,
407
+ outputVideo: null,
408
+ mode: "video_transfer",
409
+ duration: 5,
410
+ aspectRatio: "16:9",
411
+ trajectoryPoints: [],
412
+ cameraMovement: "static",
413
+ cameraIntensity: 50,
414
+ qualityMode: "pro",
415
+ characterOrientation: "image",
416
+ keepOriginalSound: true,
417
+ motionStrength: 50,
418
+ negativePrompt: "",
419
+ seed: null,
420
+ jobId: null
421
+ }
422
+ },
423
+ // Processing nodes
424
+ resize: {
425
+ type: "resize",
426
+ label: "Resize",
427
+ description: "Resize images or videos to different aspect ratios using Luma AI",
428
+ category: "processing",
429
+ icon: "Maximize2",
430
+ inputs: [{ id: "media", type: "image", label: "Media", required: true }],
431
+ outputs: [{ id: "media", type: "image", label: "Resized Media" }],
432
+ defaultData: {
433
+ label: "Resize",
434
+ status: "idle",
435
+ inputMedia: null,
436
+ inputType: null,
437
+ outputMedia: null,
438
+ targetAspectRatio: "16:9",
439
+ prompt: "",
440
+ gridPosition: { x: 0.5, y: 0.5 },
441
+ jobId: null
442
+ }
443
+ },
444
+ animation: {
445
+ type: "animation",
446
+ label: "Animation",
447
+ description: "Apply easing curve to video",
448
+ category: "processing",
449
+ icon: "Wand2",
450
+ inputs: [{ id: "video", type: "video", label: "Video", required: true }],
451
+ outputs: [{ id: "video", type: "video", label: "Animated Video" }],
452
+ defaultData: {
453
+ label: "Animation",
454
+ status: "idle",
455
+ inputVideo: null,
456
+ outputVideo: null,
457
+ curveType: "preset",
458
+ preset: "easeInOutCubic",
459
+ customCurve: [0.645, 0.045, 0.355, 1],
460
+ speedMultiplier: 1
461
+ }
462
+ },
463
+ videoStitch: {
464
+ type: "videoStitch",
465
+ label: "Video Stitch",
466
+ description: "Concatenate multiple videos",
467
+ category: "processing",
468
+ icon: "Layers",
469
+ inputs: [{ id: "videos", type: "video", label: "Videos", multiple: true, required: true }],
470
+ outputs: [{ id: "video", type: "video", label: "Stitched Video" }],
471
+ defaultData: {
472
+ label: "Video Stitch",
473
+ status: "idle",
474
+ inputVideos: [],
475
+ outputVideo: null,
476
+ transitionType: "crossfade",
477
+ transitionDuration: 0.5,
478
+ seamlessLoop: false
479
+ }
480
+ },
481
+ videoTrim: {
482
+ type: "videoTrim",
483
+ label: "Video Trim",
484
+ description: "Trim video to a specific time range",
485
+ category: "processing",
486
+ icon: "Scissors",
487
+ inputs: [{ id: "video", type: "video", label: "Video", required: true }],
488
+ outputs: [{ id: "video", type: "video", label: "Trimmed Video" }],
489
+ defaultData: {
490
+ label: "Video Trim",
491
+ status: "idle",
492
+ inputVideo: null,
493
+ outputVideo: null,
494
+ startTime: 0,
495
+ endTime: 60,
496
+ duration: null,
497
+ jobId: null
498
+ }
499
+ },
500
+ videoFrameExtract: {
501
+ type: "videoFrameExtract",
502
+ label: "Frame Extract",
503
+ description: "Extract a specific frame from video as image",
504
+ category: "processing",
505
+ icon: "Film",
506
+ inputs: [{ id: "video", type: "video", label: "Video", required: true }],
507
+ outputs: [{ id: "image", type: "image", label: "Extracted Frame" }],
508
+ defaultData: {
509
+ label: "Frame Extract",
510
+ status: "idle",
511
+ inputVideo: null,
512
+ outputImage: null,
513
+ selectionMode: "last",
514
+ timestampSeconds: 0,
515
+ percentagePosition: 100,
516
+ videoDuration: null,
517
+ jobId: null
518
+ }
519
+ },
520
+ reframe: {
521
+ type: "reframe",
522
+ label: "Reframe",
523
+ description: "Reframe images or videos to different aspect ratios with AI outpainting",
524
+ category: "processing",
525
+ icon: "Crop",
526
+ inputs: [
527
+ { id: "image", type: "image", label: "Image" },
528
+ { id: "video", type: "video", label: "Video" }
529
+ ],
530
+ outputs: [
531
+ { id: "image", type: "image", label: "Reframed Image" },
532
+ { id: "video", type: "video", label: "Reframed Video" }
533
+ ],
534
+ defaultData: {
535
+ label: "Reframe",
536
+ status: "idle",
537
+ inputImage: null,
538
+ inputVideo: null,
539
+ inputType: null,
540
+ outputImage: null,
541
+ outputVideo: null,
542
+ model: "photon-flash-1",
543
+ aspectRatio: "16:9",
544
+ prompt: "",
545
+ gridPosition: { x: 0.5, y: 0.5 },
546
+ jobId: null
547
+ }
548
+ },
549
+ upscale: {
550
+ type: "upscale",
551
+ label: "Upscale",
552
+ description: "AI-powered upscaling for images and videos",
553
+ category: "processing",
554
+ icon: "Maximize",
555
+ inputs: [
556
+ { id: "image", type: "image", label: "Image" },
557
+ { id: "video", type: "video", label: "Video" }
558
+ ],
559
+ outputs: [
560
+ { id: "image", type: "image", label: "Upscaled Image" },
561
+ { id: "video", type: "video", label: "Upscaled Video" }
562
+ ],
563
+ defaultData: {
564
+ label: "Upscale",
565
+ status: "idle",
566
+ inputImage: null,
567
+ inputVideo: null,
568
+ inputType: null,
569
+ outputImage: null,
570
+ outputVideo: null,
571
+ originalPreview: null,
572
+ outputPreview: null,
573
+ model: "topaz-standard-v2",
574
+ upscaleFactor: "2x",
575
+ outputFormat: "png",
576
+ faceEnhancement: false,
577
+ faceEnhancementStrength: 80,
578
+ faceEnhancementCreativity: 0,
579
+ targetResolution: "1080p",
580
+ targetFps: 30,
581
+ showComparison: true,
582
+ comparisonPosition: 50,
583
+ jobId: null
584
+ }
585
+ },
586
+ imageGridSplit: {
587
+ type: "imageGridSplit",
588
+ label: "Grid Split",
589
+ description: "Split image into grid cells",
590
+ category: "processing",
591
+ icon: "Grid3X3",
592
+ inputs: [{ id: "image", type: "image", label: "Image", required: true }],
593
+ outputs: [{ id: "images", type: "image", label: "Split Images", multiple: true }],
594
+ defaultData: {
595
+ label: "Grid Split",
596
+ status: "idle",
597
+ inputImage: null,
598
+ outputImages: [],
599
+ gridRows: 2,
600
+ gridCols: 3,
601
+ borderInset: 10,
602
+ outputFormat: "jpg",
603
+ quality: 95
604
+ }
605
+ },
606
+ annotation: {
607
+ type: "annotation",
608
+ label: "Annotation",
609
+ description: "Add shapes, arrows, and text to images",
610
+ category: "processing",
611
+ icon: "Pencil",
612
+ inputs: [{ id: "image", type: "image", label: "Image", required: true }],
613
+ outputs: [{ id: "image", type: "image", label: "Annotated Image" }],
614
+ defaultData: {
615
+ label: "Annotation",
616
+ status: "idle",
617
+ inputImage: null,
618
+ outputImage: null,
619
+ annotations: [],
620
+ hasAnnotations: false
621
+ }
622
+ },
623
+ subtitle: {
624
+ type: "subtitle",
625
+ label: "Subtitle",
626
+ description: "Burn subtitles into video using FFmpeg",
627
+ category: "processing",
628
+ icon: "Subtitles",
629
+ inputs: [
630
+ { id: "video", type: "video", label: "Video", required: true },
631
+ { id: "text", type: "text", label: "Subtitle Text", required: true }
632
+ ],
633
+ outputs: [{ id: "video", type: "video", label: "Video with Subtitles" }],
634
+ defaultData: {
635
+ label: "Subtitle",
636
+ status: "idle",
637
+ inputVideo: null,
638
+ inputText: null,
639
+ outputVideo: null,
640
+ style: "modern",
641
+ position: "bottom",
642
+ fontSize: 24,
643
+ fontColor: "#FFFFFF",
644
+ backgroundColor: "rgba(0,0,0,0.7)",
645
+ fontFamily: "Arial",
646
+ jobId: null
647
+ }
648
+ },
649
+ outputGallery: {
650
+ type: "outputGallery",
651
+ label: "Output Gallery",
652
+ description: "Thumbnail grid with lightbox for multi-image outputs",
653
+ category: "output",
654
+ icon: "LayoutGrid",
655
+ inputs: [{ id: "image", type: "image", label: "Images", multiple: true }],
656
+ outputs: [],
657
+ defaultData: {
658
+ label: "Output Gallery",
659
+ status: "idle",
660
+ images: []
661
+ }
662
+ },
663
+ imageCompare: {
664
+ type: "imageCompare",
665
+ label: "Image Compare",
666
+ description: "Side-by-side A/B comparison with draggable slider",
667
+ category: "output",
668
+ icon: "Columns2",
669
+ inputs: [
670
+ { id: "image", type: "image", label: "Image A" },
671
+ { id: "image-1", type: "image", label: "Image B" }
672
+ ],
673
+ outputs: [],
674
+ defaultData: {
675
+ label: "Image Compare",
676
+ status: "idle",
677
+ imageA: null,
678
+ imageB: null
679
+ }
680
+ },
681
+ // Output nodes
682
+ download: {
683
+ type: "download",
684
+ label: "Download",
685
+ description: "Download workflow output with custom filename",
686
+ category: "output",
687
+ icon: "Download",
688
+ inputs: [
689
+ { id: "image", type: "image", label: "Image" },
690
+ { id: "video", type: "video", label: "Video" }
691
+ ],
692
+ outputs: [],
693
+ defaultData: {
694
+ label: "Download",
695
+ status: "idle",
696
+ inputImage: null,
697
+ inputVideo: null,
698
+ inputType: null,
699
+ outputName: "output"
700
+ }
701
+ },
702
+ // Composition nodes (workflow-as-node)
703
+ workflowInput: {
704
+ type: "workflowInput",
705
+ label: "Workflow Input",
706
+ description: "Define an input port for when this workflow is used as a subworkflow",
707
+ category: "composition",
708
+ icon: "ArrowRightToLine",
709
+ inputs: [],
710
+ outputs: [{ id: "value", type: "image", label: "Value" }],
711
+ // Type is dynamic based on inputType
712
+ defaultData: {
713
+ label: "Workflow Input",
714
+ status: "idle",
715
+ inputName: "input",
716
+ inputType: "image",
717
+ required: true,
718
+ description: ""
719
+ }
720
+ },
721
+ workflowOutput: {
722
+ type: "workflowOutput",
723
+ label: "Workflow Output",
724
+ description: "Define an output port for when this workflow is used as a subworkflow",
725
+ category: "composition",
726
+ icon: "ArrowLeftFromLine",
727
+ inputs: [{ id: "value", type: "image", label: "Value", required: true }],
728
+ // Type is dynamic based on outputType
729
+ outputs: [],
730
+ defaultData: {
731
+ label: "Workflow Output",
732
+ status: "idle",
733
+ outputName: "output",
734
+ outputType: "image",
735
+ description: "",
736
+ inputValue: null
737
+ }
738
+ },
739
+ workflowRef: {
740
+ type: "workflowRef",
741
+ label: "Subworkflow",
742
+ description: "Reference another workflow as a subworkflow",
743
+ category: "composition",
744
+ icon: "GitBranch",
745
+ inputs: [],
746
+ // Dynamic based on referenced workflow interface
747
+ outputs: [],
748
+ // Dynamic based on referenced workflow interface
749
+ defaultData: {
750
+ label: "Subworkflow",
751
+ status: "idle",
752
+ referencedWorkflowId: null,
753
+ referencedWorkflowName: null,
754
+ cachedInterface: null,
755
+ inputMappings: {},
756
+ outputMappings: {},
757
+ childExecutionId: null
758
+ }
759
+ }
760
+ // Multi-format nodes removed - format conversion now handled by schema-driven engine
761
+ };
762
+ var NODE_ORDER = {
763
+ input: ["imageInput", "videoInput", "audioInput", "prompt", "promptConstructor"],
764
+ ai: [
765
+ "imageGen",
766
+ "videoGen",
767
+ "llm",
768
+ "lipSync",
769
+ "textToSpeech",
770
+ "transcribe",
771
+ "voiceChange",
772
+ "motionControl"
773
+ ],
774
+ processing: [
775
+ "reframe",
776
+ "upscale",
777
+ "resize",
778
+ "videoStitch",
779
+ "videoTrim",
780
+ "videoFrameExtract",
781
+ "imageGridSplit",
782
+ "annotation",
783
+ "subtitle",
784
+ "animation"
785
+ ],
786
+ output: ["download", "outputGallery", "imageCompare"],
787
+ composition: ["workflowRef", "workflowInput", "workflowOutput"]
788
+ };
789
+ function getNodesByCategory() {
790
+ const categories = {
791
+ input: [],
792
+ ai: [],
793
+ processing: [],
794
+ output: [],
795
+ composition: []
796
+ };
797
+ for (const category of Object.keys(NODE_ORDER)) {
798
+ for (const nodeType of NODE_ORDER[category]) {
799
+ const def = NODE_DEFINITIONS[nodeType];
800
+ if (def) {
801
+ categories[category].push(def);
802
+ }
803
+ }
804
+ }
805
+ return categories;
806
+ }
807
+
808
+ export { CONNECTION_RULES, HandleTypeEnum, KlingQuality, ModelCapabilityEnum, ModelUseCaseEnum, NODE_DEFINITIONS, NODE_ORDER, NodeCategoryEnum, NodeStatusEnum, NodeTypeEnum, ProcessingNodeType, ProviderTypeEnum, ReframeNodeType, TemplateCategory, UpscaleNodeType, getNodesByCategory };