@genfeedai/workflows 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +96 -0
- package/dist/index.d.mts +185 -0
- package/dist/index.d.ts +185 -0
- package/dist/index.js +918 -0
- package/dist/index.mjs +865 -0
- package/metadata/catalog.json +76 -0
- package/package.json +44 -0
- package/workflows/full-pipeline.json +295 -0
- package/workflows/image-series.json +151 -0
- package/workflows/image-to-video.json +132 -0
- package/workflows/single-image.json +86 -0
- package/workflows/single-video.json +90 -0
- package/workflows/ugc-factory.json +143 -0
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,865 @@
|
|
|
1
|
+
// src/comfyui/client.ts
|
|
2
|
+
var DEFAULT_POLL_MS = 2e3;
|
|
3
|
+
var DEFAULT_TIMEOUT_MS = 3e5;
|
|
4
|
+
var ComfyUIClient = class {
|
|
5
|
+
constructor(baseUrl) {
|
|
6
|
+
this.baseUrl = baseUrl;
|
|
7
|
+
}
|
|
8
|
+
/**
|
|
9
|
+
* Queue a prompt for execution on ComfyUI.
|
|
10
|
+
*/
|
|
11
|
+
async queuePrompt(prompt) {
|
|
12
|
+
const response = await fetch(`${this.baseUrl}/prompt`, {
|
|
13
|
+
method: "POST",
|
|
14
|
+
headers: { "Content-Type": "application/json" },
|
|
15
|
+
body: JSON.stringify({ prompt })
|
|
16
|
+
});
|
|
17
|
+
if (!response.ok) {
|
|
18
|
+
const text = await response.text();
|
|
19
|
+
throw new Error(`ComfyUI /prompt failed (${response.status}): ${text}`);
|
|
20
|
+
}
|
|
21
|
+
return response.json();
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Get history for a specific prompt execution.
|
|
25
|
+
*/
|
|
26
|
+
async getHistory(promptId) {
|
|
27
|
+
const response = await fetch(`${this.baseUrl}/history/${promptId}`);
|
|
28
|
+
if (!response.ok) {
|
|
29
|
+
const text = await response.text();
|
|
30
|
+
throw new Error(`ComfyUI /history failed (${response.status}): ${text}`);
|
|
31
|
+
}
|
|
32
|
+
const data = await response.json();
|
|
33
|
+
return data[promptId];
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Download an output file (image/video) from ComfyUI.
|
|
37
|
+
*/
|
|
38
|
+
async getOutput(filename, subfolder) {
|
|
39
|
+
const params = new URLSearchParams({ filename, subfolder, type: "output" });
|
|
40
|
+
const response = await fetch(`${this.baseUrl}/view?${params.toString()}`);
|
|
41
|
+
if (!response.ok) {
|
|
42
|
+
throw new Error(`ComfyUI /view failed (${response.status})`);
|
|
43
|
+
}
|
|
44
|
+
const arrayBuffer = await response.arrayBuffer();
|
|
45
|
+
return Buffer.from(arrayBuffer);
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Poll ComfyUI until the prompt completes or times out.
|
|
49
|
+
* Returns the history entry with outputs.
|
|
50
|
+
*/
|
|
51
|
+
async waitForCompletion(promptId, opts) {
|
|
52
|
+
const pollMs = opts?.pollMs ?? DEFAULT_POLL_MS;
|
|
53
|
+
const timeoutMs = opts?.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
54
|
+
const deadline = Date.now() + timeoutMs;
|
|
55
|
+
while (Date.now() < deadline) {
|
|
56
|
+
const history = await this.getHistory(promptId);
|
|
57
|
+
if (history?.status?.completed) {
|
|
58
|
+
return history;
|
|
59
|
+
}
|
|
60
|
+
if (history?.status?.status_str === "error") {
|
|
61
|
+
throw new Error(
|
|
62
|
+
`ComfyUI prompt ${promptId} failed: ${JSON.stringify(history.status.messages)}`
|
|
63
|
+
);
|
|
64
|
+
}
|
|
65
|
+
await this.sleep(pollMs);
|
|
66
|
+
}
|
|
67
|
+
throw new Error(`ComfyUI prompt ${promptId} timed out after ${timeoutMs}ms`);
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Health check — pings the ComfyUI instance.
|
|
71
|
+
*/
|
|
72
|
+
async ping() {
|
|
73
|
+
try {
|
|
74
|
+
const response = await fetch(`${this.baseUrl}/system_stats`);
|
|
75
|
+
return response.ok;
|
|
76
|
+
} catch {
|
|
77
|
+
return false;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
sleep(ms) {
|
|
81
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
// src/comfyui/template-runner.ts
|
|
86
|
+
var ComfyUITemplateRunner = class {
|
|
87
|
+
constructor(client) {
|
|
88
|
+
this.client = client;
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* Resolve template inputs into a concrete prompt.
|
|
92
|
+
* Injects user values into the template's node graph.
|
|
93
|
+
*/
|
|
94
|
+
resolvePrompt(template, values) {
|
|
95
|
+
const prompt = JSON.parse(JSON.stringify(template.prompt));
|
|
96
|
+
for (const input of template.inputs) {
|
|
97
|
+
const value = values[input.key] ?? input.default;
|
|
98
|
+
if (input.required && value === void 0) {
|
|
99
|
+
throw new Error(`Missing required input: ${input.key}`);
|
|
100
|
+
}
|
|
101
|
+
if (value !== void 0) {
|
|
102
|
+
const node = prompt[input.nodeId];
|
|
103
|
+
if (!node) {
|
|
104
|
+
throw new Error(`Template references unknown node: ${input.nodeId}`);
|
|
105
|
+
}
|
|
106
|
+
node.inputs[input.field] = value;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
return prompt;
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* Run a template with the given inputs and wait for completion.
|
|
113
|
+
*/
|
|
114
|
+
async run(template, values) {
|
|
115
|
+
const prompt = this.resolvePrompt(template, values);
|
|
116
|
+
const { prompt_id } = await this.client.queuePrompt(prompt);
|
|
117
|
+
return this.client.waitForCompletion(prompt_id);
|
|
118
|
+
}
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
// src/comfyui/prompt-builder.ts
|
|
122
|
+
function buildFluxDevPrompt(params) {
|
|
123
|
+
const {
|
|
124
|
+
prompt,
|
|
125
|
+
seed = Math.floor(Math.random() * 2 ** 32),
|
|
126
|
+
width = 1024,
|
|
127
|
+
height = 1024,
|
|
128
|
+
steps = 20,
|
|
129
|
+
cfg = 1,
|
|
130
|
+
negativePrompt = ""
|
|
131
|
+
} = params;
|
|
132
|
+
return {
|
|
133
|
+
"1": {
|
|
134
|
+
class_type: "CheckpointLoaderSimple",
|
|
135
|
+
inputs: {
|
|
136
|
+
ckpt_name: "flux1-dev.safetensors"
|
|
137
|
+
}
|
|
138
|
+
},
|
|
139
|
+
"2": {
|
|
140
|
+
class_type: "CLIPTextEncode",
|
|
141
|
+
inputs: {
|
|
142
|
+
text: prompt,
|
|
143
|
+
clip: ["1", 1]
|
|
144
|
+
}
|
|
145
|
+
},
|
|
146
|
+
"3": {
|
|
147
|
+
class_type: "CLIPTextEncode",
|
|
148
|
+
inputs: {
|
|
149
|
+
text: negativePrompt,
|
|
150
|
+
clip: ["1", 1]
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
"4": {
|
|
154
|
+
class_type: "EmptyLatentImage",
|
|
155
|
+
inputs: {
|
|
156
|
+
width,
|
|
157
|
+
height,
|
|
158
|
+
batch_size: 1
|
|
159
|
+
}
|
|
160
|
+
},
|
|
161
|
+
"5": {
|
|
162
|
+
class_type: "KSampler",
|
|
163
|
+
inputs: {
|
|
164
|
+
model: ["1", 0],
|
|
165
|
+
positive: ["2", 0],
|
|
166
|
+
negative: ["3", 0],
|
|
167
|
+
latent_image: ["4", 0],
|
|
168
|
+
seed,
|
|
169
|
+
steps,
|
|
170
|
+
cfg,
|
|
171
|
+
sampler_name: "euler",
|
|
172
|
+
scheduler: "normal",
|
|
173
|
+
denoise: 1
|
|
174
|
+
}
|
|
175
|
+
},
|
|
176
|
+
"6": {
|
|
177
|
+
class_type: "VAEDecode",
|
|
178
|
+
inputs: {
|
|
179
|
+
samples: ["5", 0],
|
|
180
|
+
vae: ["1", 2]
|
|
181
|
+
}
|
|
182
|
+
},
|
|
183
|
+
"7": {
|
|
184
|
+
class_type: "SaveImage",
|
|
185
|
+
inputs: {
|
|
186
|
+
images: ["6", 0],
|
|
187
|
+
filename_prefix: "genfeed-flux-dev"
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
function buildPulidFluxPrompt(params) {
|
|
193
|
+
const {
|
|
194
|
+
prompt,
|
|
195
|
+
faceImage,
|
|
196
|
+
seed = Math.floor(Math.random() * 2 ** 32),
|
|
197
|
+
width = 1024,
|
|
198
|
+
height = 1024,
|
|
199
|
+
steps = 20,
|
|
200
|
+
cfg = 1,
|
|
201
|
+
pulidStrength = 0.8
|
|
202
|
+
} = params;
|
|
203
|
+
return {
|
|
204
|
+
"1": {
|
|
205
|
+
class_type: "CheckpointLoaderSimple",
|
|
206
|
+
inputs: {
|
|
207
|
+
ckpt_name: "flux1-dev.safetensors"
|
|
208
|
+
}
|
|
209
|
+
},
|
|
210
|
+
"2": {
|
|
211
|
+
class_type: "PulidModelLoader",
|
|
212
|
+
inputs: {
|
|
213
|
+
pulid_file: "ip-adapter_pulid_sdxl_fp16.safetensors"
|
|
214
|
+
}
|
|
215
|
+
},
|
|
216
|
+
"3": {
|
|
217
|
+
class_type: "LoadImage",
|
|
218
|
+
inputs: {
|
|
219
|
+
image: faceImage
|
|
220
|
+
}
|
|
221
|
+
},
|
|
222
|
+
"4": {
|
|
223
|
+
class_type: "PulidInsightFaceLoader",
|
|
224
|
+
inputs: {
|
|
225
|
+
provider: "CPU"
|
|
226
|
+
}
|
|
227
|
+
},
|
|
228
|
+
"5": {
|
|
229
|
+
class_type: "ApplyPulid",
|
|
230
|
+
inputs: {
|
|
231
|
+
model: ["1", 0],
|
|
232
|
+
pulid: ["2", 0],
|
|
233
|
+
image: ["3", 0],
|
|
234
|
+
insightface: ["4", 0],
|
|
235
|
+
weight: pulidStrength
|
|
236
|
+
}
|
|
237
|
+
},
|
|
238
|
+
"6": {
|
|
239
|
+
class_type: "CLIPTextEncode",
|
|
240
|
+
inputs: {
|
|
241
|
+
text: prompt,
|
|
242
|
+
clip: ["1", 1]
|
|
243
|
+
}
|
|
244
|
+
},
|
|
245
|
+
"7": {
|
|
246
|
+
class_type: "CLIPTextEncode",
|
|
247
|
+
inputs: {
|
|
248
|
+
text: "",
|
|
249
|
+
clip: ["1", 1]
|
|
250
|
+
}
|
|
251
|
+
},
|
|
252
|
+
"8": {
|
|
253
|
+
class_type: "EmptyLatentImage",
|
|
254
|
+
inputs: {
|
|
255
|
+
width,
|
|
256
|
+
height,
|
|
257
|
+
batch_size: 1
|
|
258
|
+
}
|
|
259
|
+
},
|
|
260
|
+
"9": {
|
|
261
|
+
class_type: "KSampler",
|
|
262
|
+
inputs: {
|
|
263
|
+
model: ["5", 0],
|
|
264
|
+
positive: ["6", 0],
|
|
265
|
+
negative: ["7", 0],
|
|
266
|
+
latent_image: ["8", 0],
|
|
267
|
+
seed,
|
|
268
|
+
steps,
|
|
269
|
+
cfg,
|
|
270
|
+
sampler_name: "euler",
|
|
271
|
+
scheduler: "normal",
|
|
272
|
+
denoise: 1
|
|
273
|
+
}
|
|
274
|
+
},
|
|
275
|
+
"10": {
|
|
276
|
+
class_type: "VAEDecode",
|
|
277
|
+
inputs: {
|
|
278
|
+
samples: ["9", 0],
|
|
279
|
+
vae: ["1", 2]
|
|
280
|
+
}
|
|
281
|
+
},
|
|
282
|
+
"11": {
|
|
283
|
+
class_type: "SaveImage",
|
|
284
|
+
inputs: {
|
|
285
|
+
images: ["10", 0],
|
|
286
|
+
filename_prefix: "genfeed-pulid-flux"
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
};
|
|
290
|
+
}
|
|
291
|
+
function buildZImageTurboPrompt(params) {
|
|
292
|
+
const {
|
|
293
|
+
prompt,
|
|
294
|
+
seed = Math.floor(Math.random() * 2 ** 32),
|
|
295
|
+
width = 1024,
|
|
296
|
+
height = 1024,
|
|
297
|
+
steps = 4
|
|
298
|
+
} = params;
|
|
299
|
+
return {
|
|
300
|
+
"1": {
|
|
301
|
+
class_type: "CheckpointLoaderSimple",
|
|
302
|
+
inputs: {
|
|
303
|
+
ckpt_name: "z-image-turbo.safetensors"
|
|
304
|
+
}
|
|
305
|
+
},
|
|
306
|
+
"2": {
|
|
307
|
+
class_type: "CLIPTextEncode",
|
|
308
|
+
inputs: {
|
|
309
|
+
text: prompt,
|
|
310
|
+
clip: ["1", 1]
|
|
311
|
+
}
|
|
312
|
+
},
|
|
313
|
+
"3": {
|
|
314
|
+
class_type: "CLIPTextEncode",
|
|
315
|
+
inputs: {
|
|
316
|
+
text: "",
|
|
317
|
+
clip: ["1", 1]
|
|
318
|
+
}
|
|
319
|
+
},
|
|
320
|
+
"4": {
|
|
321
|
+
class_type: "EmptyLatentImage",
|
|
322
|
+
inputs: {
|
|
323
|
+
width,
|
|
324
|
+
height,
|
|
325
|
+
batch_size: 1
|
|
326
|
+
}
|
|
327
|
+
},
|
|
328
|
+
"5": {
|
|
329
|
+
class_type: "KSampler",
|
|
330
|
+
inputs: {
|
|
331
|
+
model: ["1", 0],
|
|
332
|
+
positive: ["2", 0],
|
|
333
|
+
negative: ["3", 0],
|
|
334
|
+
latent_image: ["4", 0],
|
|
335
|
+
seed,
|
|
336
|
+
steps,
|
|
337
|
+
cfg: 1,
|
|
338
|
+
sampler_name: "euler_ancestral",
|
|
339
|
+
scheduler: "normal",
|
|
340
|
+
denoise: 1
|
|
341
|
+
}
|
|
342
|
+
},
|
|
343
|
+
"6": {
|
|
344
|
+
class_type: "VAEDecode",
|
|
345
|
+
inputs: {
|
|
346
|
+
samples: ["5", 0],
|
|
347
|
+
vae: ["1", 2]
|
|
348
|
+
}
|
|
349
|
+
},
|
|
350
|
+
"7": {
|
|
351
|
+
class_type: "SaveImage",
|
|
352
|
+
inputs: {
|
|
353
|
+
images: ["6", 0],
|
|
354
|
+
filename_prefix: "genfeed-z-turbo"
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
};
|
|
358
|
+
}
|
|
359
|
+
function buildFlux2DevPrompt(params) {
|
|
360
|
+
const {
|
|
361
|
+
prompt,
|
|
362
|
+
seed = Math.floor(Math.random() * 2 ** 32),
|
|
363
|
+
width = 1024,
|
|
364
|
+
height = 1024,
|
|
365
|
+
steps = 20,
|
|
366
|
+
guidance = 3.5
|
|
367
|
+
} = params;
|
|
368
|
+
return {
|
|
369
|
+
"1": {
|
|
370
|
+
class_type: "UNETLoader",
|
|
371
|
+
inputs: {
|
|
372
|
+
unet_name: "flux2_dev_fp8mixed.safetensors",
|
|
373
|
+
weight_dtype: "fp8_e4m3fn"
|
|
374
|
+
}
|
|
375
|
+
},
|
|
376
|
+
"2": {
|
|
377
|
+
class_type: "CLIPLoader",
|
|
378
|
+
inputs: {
|
|
379
|
+
clip_name: "mistral_3_small_flux2_fp4_mixed.safetensors",
|
|
380
|
+
type: "flux"
|
|
381
|
+
}
|
|
382
|
+
},
|
|
383
|
+
"3": {
|
|
384
|
+
class_type: "Flux2TextEncode",
|
|
385
|
+
inputs: {
|
|
386
|
+
text: prompt,
|
|
387
|
+
clip: ["2", 0]
|
|
388
|
+
}
|
|
389
|
+
},
|
|
390
|
+
"4": {
|
|
391
|
+
class_type: "VAELoader",
|
|
392
|
+
inputs: {
|
|
393
|
+
vae_name: "flux2-vae.safetensors"
|
|
394
|
+
}
|
|
395
|
+
},
|
|
396
|
+
"5": {
|
|
397
|
+
class_type: "EmptyLatentImage",
|
|
398
|
+
inputs: {
|
|
399
|
+
width,
|
|
400
|
+
height,
|
|
401
|
+
batch_size: 1
|
|
402
|
+
}
|
|
403
|
+
},
|
|
404
|
+
"6": {
|
|
405
|
+
class_type: "KSampler",
|
|
406
|
+
inputs: {
|
|
407
|
+
model: ["1", 0],
|
|
408
|
+
positive: ["3", 0],
|
|
409
|
+
negative: ["3", 1],
|
|
410
|
+
latent_image: ["5", 0],
|
|
411
|
+
seed,
|
|
412
|
+
steps,
|
|
413
|
+
cfg: guidance,
|
|
414
|
+
sampler_name: "euler",
|
|
415
|
+
scheduler: "normal",
|
|
416
|
+
denoise: 1
|
|
417
|
+
}
|
|
418
|
+
},
|
|
419
|
+
"7": {
|
|
420
|
+
class_type: "VAEDecode",
|
|
421
|
+
inputs: {
|
|
422
|
+
samples: ["6", 0],
|
|
423
|
+
vae: ["4", 0]
|
|
424
|
+
}
|
|
425
|
+
},
|
|
426
|
+
"8": {
|
|
427
|
+
class_type: "SaveImage",
|
|
428
|
+
inputs: {
|
|
429
|
+
images: ["7", 0],
|
|
430
|
+
filename_prefix: "genfeed-flux2-dev"
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
};
|
|
434
|
+
}
|
|
435
|
+
function buildFlux2DevPulidPrompt(params) {
|
|
436
|
+
const {
|
|
437
|
+
prompt,
|
|
438
|
+
faceImage,
|
|
439
|
+
seed = Math.floor(Math.random() * 2 ** 32),
|
|
440
|
+
width = 1024,
|
|
441
|
+
height = 1024,
|
|
442
|
+
steps = 20,
|
|
443
|
+
guidance = 3.5,
|
|
444
|
+
pulidStrength = 0.8
|
|
445
|
+
} = params;
|
|
446
|
+
return {
|
|
447
|
+
"1": {
|
|
448
|
+
class_type: "UNETLoader",
|
|
449
|
+
inputs: {
|
|
450
|
+
unet_name: "flux2_dev_fp8mixed.safetensors",
|
|
451
|
+
weight_dtype: "fp8_e4m3fn"
|
|
452
|
+
}
|
|
453
|
+
},
|
|
454
|
+
"2": {
|
|
455
|
+
class_type: "CLIPLoader",
|
|
456
|
+
inputs: {
|
|
457
|
+
clip_name: "mistral_3_small_flux2_fp4_mixed.safetensors",
|
|
458
|
+
type: "flux"
|
|
459
|
+
}
|
|
460
|
+
},
|
|
461
|
+
"3": {
|
|
462
|
+
class_type: "PulidModelLoader",
|
|
463
|
+
inputs: {
|
|
464
|
+
pulid_file: "ip-adapter_pulid_sdxl_fp16.safetensors"
|
|
465
|
+
}
|
|
466
|
+
},
|
|
467
|
+
"4": {
|
|
468
|
+
class_type: "LoadImage",
|
|
469
|
+
inputs: {
|
|
470
|
+
image: faceImage
|
|
471
|
+
}
|
|
472
|
+
},
|
|
473
|
+
"5": {
|
|
474
|
+
class_type: "PulidInsightFaceLoader",
|
|
475
|
+
inputs: {
|
|
476
|
+
provider: "CPU"
|
|
477
|
+
}
|
|
478
|
+
},
|
|
479
|
+
"6": {
|
|
480
|
+
class_type: "ApplyPulid",
|
|
481
|
+
inputs: {
|
|
482
|
+
model: ["1", 0],
|
|
483
|
+
pulid: ["3", 0],
|
|
484
|
+
image: ["4", 0],
|
|
485
|
+
insightface: ["5", 0],
|
|
486
|
+
weight: pulidStrength
|
|
487
|
+
}
|
|
488
|
+
},
|
|
489
|
+
"7": {
|
|
490
|
+
class_type: "Flux2TextEncode",
|
|
491
|
+
inputs: {
|
|
492
|
+
text: prompt,
|
|
493
|
+
clip: ["2", 0]
|
|
494
|
+
}
|
|
495
|
+
},
|
|
496
|
+
"8": {
|
|
497
|
+
class_type: "VAELoader",
|
|
498
|
+
inputs: {
|
|
499
|
+
vae_name: "flux2-vae.safetensors"
|
|
500
|
+
}
|
|
501
|
+
},
|
|
502
|
+
"9": {
|
|
503
|
+
class_type: "EmptyLatentImage",
|
|
504
|
+
inputs: {
|
|
505
|
+
width,
|
|
506
|
+
height,
|
|
507
|
+
batch_size: 1
|
|
508
|
+
}
|
|
509
|
+
},
|
|
510
|
+
"10": {
|
|
511
|
+
class_type: "KSampler",
|
|
512
|
+
inputs: {
|
|
513
|
+
model: ["6", 0],
|
|
514
|
+
positive: ["7", 0],
|
|
515
|
+
negative: ["7", 1],
|
|
516
|
+
latent_image: ["9", 0],
|
|
517
|
+
seed,
|
|
518
|
+
steps,
|
|
519
|
+
cfg: guidance,
|
|
520
|
+
sampler_name: "euler",
|
|
521
|
+
scheduler: "normal",
|
|
522
|
+
denoise: 1
|
|
523
|
+
}
|
|
524
|
+
},
|
|
525
|
+
"11": {
|
|
526
|
+
class_type: "VAEDecode",
|
|
527
|
+
inputs: {
|
|
528
|
+
samples: ["10", 0],
|
|
529
|
+
vae: ["8", 0]
|
|
530
|
+
}
|
|
531
|
+
},
|
|
532
|
+
"12": {
|
|
533
|
+
class_type: "SaveImage",
|
|
534
|
+
inputs: {
|
|
535
|
+
images: ["11", 0],
|
|
536
|
+
filename_prefix: "genfeed-flux2-pulid"
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
};
|
|
540
|
+
}
|
|
541
|
+
function buildFlux2DevPulidLoraPrompt(params) {
|
|
542
|
+
const {
|
|
543
|
+
prompt,
|
|
544
|
+
faceImage,
|
|
545
|
+
loraPath,
|
|
546
|
+
seed = Math.floor(Math.random() * 2 ** 32),
|
|
547
|
+
width = 1024,
|
|
548
|
+
height = 1024,
|
|
549
|
+
steps = 20,
|
|
550
|
+
guidance = 3.5,
|
|
551
|
+
pulidStrength = 0.8,
|
|
552
|
+
loraStrength = 0.8
|
|
553
|
+
} = params;
|
|
554
|
+
return {
|
|
555
|
+
"1": {
|
|
556
|
+
class_type: "UNETLoader",
|
|
557
|
+
inputs: {
|
|
558
|
+
unet_name: "flux2_dev_fp8mixed.safetensors",
|
|
559
|
+
weight_dtype: "fp8_e4m3fn"
|
|
560
|
+
}
|
|
561
|
+
},
|
|
562
|
+
"2": {
|
|
563
|
+
class_type: "CLIPLoader",
|
|
564
|
+
inputs: {
|
|
565
|
+
clip_name: "mistral_3_small_flux2_fp4_mixed.safetensors",
|
|
566
|
+
type: "flux"
|
|
567
|
+
}
|
|
568
|
+
},
|
|
569
|
+
"3": {
|
|
570
|
+
class_type: "LoraLoader",
|
|
571
|
+
inputs: {
|
|
572
|
+
model: ["1", 0],
|
|
573
|
+
clip: ["2", 0],
|
|
574
|
+
lora_name: loraPath,
|
|
575
|
+
strength_model: loraStrength,
|
|
576
|
+
strength_clip: loraStrength
|
|
577
|
+
}
|
|
578
|
+
},
|
|
579
|
+
"4": {
|
|
580
|
+
class_type: "PulidModelLoader",
|
|
581
|
+
inputs: {
|
|
582
|
+
pulid_file: "ip-adapter_pulid_sdxl_fp16.safetensors"
|
|
583
|
+
}
|
|
584
|
+
},
|
|
585
|
+
"5": {
|
|
586
|
+
class_type: "LoadImage",
|
|
587
|
+
inputs: {
|
|
588
|
+
image: faceImage
|
|
589
|
+
}
|
|
590
|
+
},
|
|
591
|
+
"6": {
|
|
592
|
+
class_type: "PulidInsightFaceLoader",
|
|
593
|
+
inputs: {
|
|
594
|
+
provider: "CPU"
|
|
595
|
+
}
|
|
596
|
+
},
|
|
597
|
+
"7": {
|
|
598
|
+
class_type: "ApplyPulid",
|
|
599
|
+
inputs: {
|
|
600
|
+
model: ["3", 0],
|
|
601
|
+
pulid: ["4", 0],
|
|
602
|
+
image: ["5", 0],
|
|
603
|
+
insightface: ["6", 0],
|
|
604
|
+
weight: pulidStrength
|
|
605
|
+
}
|
|
606
|
+
},
|
|
607
|
+
"8": {
|
|
608
|
+
class_type: "Flux2TextEncode",
|
|
609
|
+
inputs: {
|
|
610
|
+
text: prompt,
|
|
611
|
+
clip: ["3", 1]
|
|
612
|
+
}
|
|
613
|
+
},
|
|
614
|
+
"9": {
|
|
615
|
+
class_type: "VAELoader",
|
|
616
|
+
inputs: {
|
|
617
|
+
vae_name: "flux2-vae.safetensors"
|
|
618
|
+
}
|
|
619
|
+
},
|
|
620
|
+
"10": {
|
|
621
|
+
class_type: "EmptyLatentImage",
|
|
622
|
+
inputs: {
|
|
623
|
+
width,
|
|
624
|
+
height,
|
|
625
|
+
batch_size: 1
|
|
626
|
+
}
|
|
627
|
+
},
|
|
628
|
+
"11": {
|
|
629
|
+
class_type: "KSampler",
|
|
630
|
+
inputs: {
|
|
631
|
+
model: ["7", 0],
|
|
632
|
+
positive: ["8", 0],
|
|
633
|
+
negative: ["8", 1],
|
|
634
|
+
latent_image: ["10", 0],
|
|
635
|
+
seed,
|
|
636
|
+
steps,
|
|
637
|
+
cfg: guidance,
|
|
638
|
+
sampler_name: "euler",
|
|
639
|
+
scheduler: "normal",
|
|
640
|
+
denoise: 1
|
|
641
|
+
}
|
|
642
|
+
},
|
|
643
|
+
"12": {
|
|
644
|
+
class_type: "VAEDecode",
|
|
645
|
+
inputs: {
|
|
646
|
+
samples: ["11", 0],
|
|
647
|
+
vae: ["9", 0]
|
|
648
|
+
}
|
|
649
|
+
},
|
|
650
|
+
"13": {
|
|
651
|
+
class_type: "SaveImage",
|
|
652
|
+
inputs: {
|
|
653
|
+
images: ["12", 0],
|
|
654
|
+
filename_prefix: "genfeed-flux2-pulid-lora"
|
|
655
|
+
}
|
|
656
|
+
}
|
|
657
|
+
};
|
|
658
|
+
}
|
|
659
|
+
function buildFlux2KleinPrompt(params) {
|
|
660
|
+
const {
|
|
661
|
+
prompt,
|
|
662
|
+
seed = Math.floor(Math.random() * 2 ** 32),
|
|
663
|
+
width = 1024,
|
|
664
|
+
height = 1024,
|
|
665
|
+
steps = 6
|
|
666
|
+
} = params;
|
|
667
|
+
return {
|
|
668
|
+
"1": {
|
|
669
|
+
class_type: "UNETLoader",
|
|
670
|
+
inputs: {
|
|
671
|
+
unet_name: "flux-2-klein-9b-fp8.safetensors",
|
|
672
|
+
weight_dtype: "fp8_e4m3fn"
|
|
673
|
+
}
|
|
674
|
+
},
|
|
675
|
+
"2": {
|
|
676
|
+
class_type: "CLIPLoader",
|
|
677
|
+
inputs: {
|
|
678
|
+
clip_name: "mistral_3_small_flux2_fp4_mixed.safetensors",
|
|
679
|
+
type: "flux"
|
|
680
|
+
}
|
|
681
|
+
},
|
|
682
|
+
"3": {
|
|
683
|
+
class_type: "Flux2TextEncode",
|
|
684
|
+
inputs: {
|
|
685
|
+
text: prompt,
|
|
686
|
+
clip: ["2", 0]
|
|
687
|
+
}
|
|
688
|
+
},
|
|
689
|
+
"4": {
|
|
690
|
+
class_type: "VAELoader",
|
|
691
|
+
inputs: {
|
|
692
|
+
vae_name: "flux2-vae.safetensors"
|
|
693
|
+
}
|
|
694
|
+
},
|
|
695
|
+
"5": {
|
|
696
|
+
class_type: "EmptyLatentImage",
|
|
697
|
+
inputs: {
|
|
698
|
+
width,
|
|
699
|
+
height,
|
|
700
|
+
batch_size: 1
|
|
701
|
+
}
|
|
702
|
+
},
|
|
703
|
+
"6": {
|
|
704
|
+
class_type: "KSampler",
|
|
705
|
+
inputs: {
|
|
706
|
+
model: ["1", 0],
|
|
707
|
+
positive: ["3", 0],
|
|
708
|
+
negative: ["3", 1],
|
|
709
|
+
latent_image: ["5", 0],
|
|
710
|
+
seed,
|
|
711
|
+
steps,
|
|
712
|
+
cfg: 3.5,
|
|
713
|
+
sampler_name: "euler",
|
|
714
|
+
scheduler: "normal",
|
|
715
|
+
denoise: 1
|
|
716
|
+
}
|
|
717
|
+
},
|
|
718
|
+
"7": {
|
|
719
|
+
class_type: "VAEDecode",
|
|
720
|
+
inputs: {
|
|
721
|
+
samples: ["6", 0],
|
|
722
|
+
vae: ["4", 0]
|
|
723
|
+
}
|
|
724
|
+
},
|
|
725
|
+
"8": {
|
|
726
|
+
class_type: "SaveImage",
|
|
727
|
+
inputs: {
|
|
728
|
+
images: ["7", 0],
|
|
729
|
+
filename_prefix: "genfeed-flux2-klein"
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
};
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
// src/index.ts
|
|
736
|
+
import * as fs from "node:fs";
|
|
737
|
+
import * as path from "node:path";
|
|
738
|
+
var WORKFLOW_REGISTRY = {
|
|
739
|
+
"single-image": {
|
|
740
|
+
slug: "single-image",
|
|
741
|
+
title: "Single Image Generation",
|
|
742
|
+
description: "Generate an AI image from a source image (img2img) with enhanced styling",
|
|
743
|
+
category: "image-generation",
|
|
744
|
+
tags: ["image", "simple", "img2img", "enhancement"],
|
|
745
|
+
tier: "free",
|
|
746
|
+
icon: "\u{1F5BC}\uFE0F",
|
|
747
|
+
defaultModel: "nano-banana-pro",
|
|
748
|
+
inputTypes: ["image", "text"],
|
|
749
|
+
outputTypes: ["image"],
|
|
750
|
+
version: 1
|
|
751
|
+
},
|
|
752
|
+
"single-video": {
|
|
753
|
+
slug: "single-video",
|
|
754
|
+
title: "Single Video Generation",
|
|
755
|
+
description: "Generate an AI video from a source image (img2video) with motion effects",
|
|
756
|
+
category: "video-generation",
|
|
757
|
+
tags: ["video", "simple", "img2video", "animation"],
|
|
758
|
+
tier: "free",
|
|
759
|
+
icon: "\u{1F3AC}",
|
|
760
|
+
defaultModel: "nano-banana-pro",
|
|
761
|
+
inputTypes: ["image", "text"],
|
|
762
|
+
outputTypes: ["video"],
|
|
763
|
+
version: 1
|
|
764
|
+
},
|
|
765
|
+
"image-series": {
|
|
766
|
+
slug: "image-series",
|
|
767
|
+
title: "Image Series Generation",
|
|
768
|
+
description: "Generate a series of related images from a concept using LLM-powered prompt expansion",
|
|
769
|
+
category: "image-generation",
|
|
770
|
+
tags: ["image", "series", "llm", "batch", "automation"],
|
|
771
|
+
tier: "free",
|
|
772
|
+
icon: "\u{1F4F8}",
|
|
773
|
+
defaultModel: "nano-banana-pro",
|
|
774
|
+
inputTypes: ["text"],
|
|
775
|
+
outputTypes: ["image"],
|
|
776
|
+
version: 1
|
|
777
|
+
},
|
|
778
|
+
"image-to-video": {
|
|
779
|
+
slug: "image-to-video",
|
|
780
|
+
title: "Image to Video Transition",
|
|
781
|
+
description: "Create smooth interpolated video transitions between two images with easing effects",
|
|
782
|
+
category: "video-generation",
|
|
783
|
+
tags: ["video", "interpolation", "animation", "transition"],
|
|
784
|
+
tier: "free",
|
|
785
|
+
icon: "\u{1F39E}\uFE0F",
|
|
786
|
+
defaultModel: "nano-banana-pro",
|
|
787
|
+
inputTypes: ["image"],
|
|
788
|
+
outputTypes: ["video"],
|
|
789
|
+
version: 1
|
|
790
|
+
},
|
|
791
|
+
"full-pipeline": {
|
|
792
|
+
slug: "full-pipeline",
|
|
793
|
+
title: "Full Content Pipeline",
|
|
794
|
+
description: "Complete end-to-end workflow: concept development to final video production",
|
|
795
|
+
category: "full-pipeline",
|
|
796
|
+
tags: ["pipeline", "advanced", "complete", "automation", "production"],
|
|
797
|
+
tier: "free",
|
|
798
|
+
icon: "\u26A1",
|
|
799
|
+
defaultModel: "nano-banana-pro",
|
|
800
|
+
inputTypes: ["text"],
|
|
801
|
+
outputTypes: ["video"],
|
|
802
|
+
version: 1
|
|
803
|
+
},
|
|
804
|
+
"ugc-factory": {
|
|
805
|
+
slug: "ugc-factory",
|
|
806
|
+
title: "UGC Content Factory",
|
|
807
|
+
description: "UGC pipeline: script \u2192 voice \u2192 motion \u2192 lip sync \u2192 download",
|
|
808
|
+
category: "full-pipeline",
|
|
809
|
+
tags: ["ugc", "social", "automation", "marketing", "content"],
|
|
810
|
+
tier: "free",
|
|
811
|
+
icon: "\u{1F3ED}",
|
|
812
|
+
defaultModel: "nano-banana-pro",
|
|
813
|
+
inputTypes: ["text"],
|
|
814
|
+
outputTypes: ["video"],
|
|
815
|
+
version: 1
|
|
816
|
+
}
|
|
817
|
+
};
|
|
818
|
+
function getAllWorkflows() {
|
|
819
|
+
return Object.values(WORKFLOW_REGISTRY);
|
|
820
|
+
}
|
|
821
|
+
function getWorkflow(slug) {
|
|
822
|
+
return WORKFLOW_REGISTRY[slug];
|
|
823
|
+
}
|
|
824
|
+
function getWorkflowJson(slug) {
|
|
825
|
+
try {
|
|
826
|
+
const workflowPath = path.join(__dirname, "..", "workflows", `${slug}.json`);
|
|
827
|
+
const jsonContent = fs.readFileSync(workflowPath, "utf-8");
|
|
828
|
+
return JSON.parse(jsonContent);
|
|
829
|
+
} catch (_error) {
|
|
830
|
+
return void 0;
|
|
831
|
+
}
|
|
832
|
+
}
|
|
833
|
+
function getWorkflowIds() {
|
|
834
|
+
return Object.keys(WORKFLOW_REGISTRY);
|
|
835
|
+
}
|
|
836
|
+
function getWorkflowMetadata(id) {
|
|
837
|
+
return WORKFLOW_REGISTRY[id];
|
|
838
|
+
}
|
|
839
|
+
function getWorkflowsByCategory(category) {
|
|
840
|
+
return Object.values(WORKFLOW_REGISTRY).filter((w) => w.category === category);
|
|
841
|
+
}
|
|
842
|
+
function searchWorkflowsByTag(tag) {
|
|
843
|
+
return Object.values(WORKFLOW_REGISTRY).filter(
|
|
844
|
+
(w) => w.tags.some((t) => t.toLowerCase().includes(tag.toLowerCase()))
|
|
845
|
+
);
|
|
846
|
+
}
|
|
847
|
+
export {
|
|
848
|
+
ComfyUIClient,
|
|
849
|
+
ComfyUITemplateRunner,
|
|
850
|
+
WORKFLOW_REGISTRY,
|
|
851
|
+
buildFlux2DevPrompt,
|
|
852
|
+
buildFlux2DevPulidLoraPrompt,
|
|
853
|
+
buildFlux2DevPulidPrompt,
|
|
854
|
+
buildFlux2KleinPrompt,
|
|
855
|
+
buildFluxDevPrompt,
|
|
856
|
+
buildPulidFluxPrompt,
|
|
857
|
+
buildZImageTurboPrompt,
|
|
858
|
+
getAllWorkflows,
|
|
859
|
+
getWorkflow,
|
|
860
|
+
getWorkflowIds,
|
|
861
|
+
getWorkflowJson,
|
|
862
|
+
getWorkflowMetadata,
|
|
863
|
+
getWorkflowsByCategory,
|
|
864
|
+
searchWorkflowsByTag
|
|
865
|
+
};
|