meigen 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +242 -0
- package/README.zh-CN.md +242 -0
- package/bin/meigen-mcp.js +2 -0
- package/data/trending-prompts.json +26737 -0
- package/dist/config.d.ts +24 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +71 -0
- package/dist/config.js.map +1 -0
- package/dist/index.d.ts +6 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +18 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/meigen-api.d.ts +64 -0
- package/dist/lib/meigen-api.d.ts.map +1 -0
- package/dist/lib/meigen-api.js +118 -0
- package/dist/lib/meigen-api.js.map +1 -0
- package/dist/lib/prompt-library.d.ts +44 -0
- package/dist/lib/prompt-library.d.ts.map +1 -0
- package/dist/lib/prompt-library.js +130 -0
- package/dist/lib/prompt-library.js.map +1 -0
- package/dist/lib/prompts.d.ts +21 -0
- package/dist/lib/prompts.d.ts.map +1 -0
- package/dist/lib/prompts.js +116 -0
- package/dist/lib/prompts.js.map +1 -0
- package/dist/lib/providers/comfyui.d.ts +82 -0
- package/dist/lib/providers/comfyui.d.ts.map +1 -0
- package/dist/lib/providers/comfyui.js +436 -0
- package/dist/lib/providers/comfyui.js.map +1 -0
- package/dist/lib/providers/gemini.d.ts +13 -0
- package/dist/lib/providers/gemini.d.ts.map +1 -0
- package/dist/lib/providers/gemini.js +63 -0
- package/dist/lib/providers/gemini.js.map +1 -0
- package/dist/lib/providers/openai.d.ts +14 -0
- package/dist/lib/providers/openai.d.ts.map +1 -0
- package/dist/lib/providers/openai.js +75 -0
- package/dist/lib/providers/openai.js.map +1 -0
- package/dist/lib/providers/types.d.ts +22 -0
- package/dist/lib/providers/types.d.ts.map +1 -0
- package/dist/lib/providers/types.js +7 -0
- package/dist/lib/providers/types.js.map +1 -0
- package/dist/lib/semaphore.d.ts +13 -0
- package/dist/lib/semaphore.d.ts.map +1 -0
- package/dist/lib/semaphore.js +32 -0
- package/dist/lib/semaphore.js.map +1 -0
- package/dist/lib/upload.d.ts +17 -0
- package/dist/lib/upload.d.ts.map +1 -0
- package/dist/lib/upload.js +135 -0
- package/dist/lib/upload.js.map +1 -0
- package/dist/server.d.ts +7 -0
- package/dist/server.d.ts.map +1 -0
- package/dist/server.js +163 -0
- package/dist/server.js.map +1 -0
- package/dist/tools/comfyui-workflow.d.ts +17 -0
- package/dist/tools/comfyui-workflow.d.ts.map +1 -0
- package/dist/tools/comfyui-workflow.js +230 -0
- package/dist/tools/comfyui-workflow.js.map +1 -0
- package/dist/tools/enhance-prompt.d.ts +12 -0
- package/dist/tools/enhance-prompt.d.ts.map +1 -0
- package/dist/tools/enhance-prompt.js +27 -0
- package/dist/tools/enhance-prompt.js.map +1 -0
- package/dist/tools/generate-image.d.ts +23 -0
- package/dist/tools/generate-image.d.ts.map +1 -0
- package/dist/tools/generate-image.js +266 -0
- package/dist/tools/generate-image.js.map +1 -0
- package/dist/tools/get-inspiration.d.ts +12 -0
- package/dist/tools/get-inspiration.d.ts.map +1 -0
- package/dist/tools/get-inspiration.js +111 -0
- package/dist/tools/get-inspiration.js.map +1 -0
- package/dist/tools/list-models.d.ts +13 -0
- package/dist/tools/list-models.d.ts.map +1 -0
- package/dist/tools/list-models.js +104 -0
- package/dist/tools/list-models.js.map +1 -0
- package/dist/tools/search-gallery.d.ts +15 -0
- package/dist/tools/search-gallery.d.ts.map +1 -0
- package/dist/tools/search-gallery.js +78 -0
- package/dist/tools/search-gallery.js.map +1 -0
- package/dist/tools/upload-reference-image.d.ts +13 -0
- package/dist/tools/upload-reference-image.d.ts.map +1 -0
- package/dist/tools/upload-reference-image.js +69 -0
- package/dist/tools/upload-reference-image.js.map +1 -0
- package/package.json +45 -0
- package/skills/setup/SKILL.md +324 -0
- package/skills/visual-creative/SKILL.md +185 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Prompt enhancement system prompts
|
|
4
|
+
* Source: edgeone/vision-api/node-functions/api/[[default]].js
|
|
5
|
+
*/
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
exports.ILLUSTRATION_SYSTEM_PROMPT = exports.ANIME_SYSTEM_PROMPT = exports.REALISTIC_SYSTEM_PROMPT = void 0;
|
|
8
|
+
exports.getSystemPrompt = getSystemPrompt;
|
|
9
|
+
/**
|
|
10
|
+
* Realistic/general style enhancement prompt
|
|
11
|
+
* For Gemini, Seedream, and other photorealistic models
|
|
12
|
+
*/
|
|
13
|
+
exports.REALISTIC_SYSTEM_PROMPT = `# Role
|
|
14
|
+
You are a Senior Visual Logic Analyst specializing in reverse-engineering imagery for next-generation, high-reasoning AI models (like Gemini 3 Pro Image).
|
|
15
|
+
|
|
16
|
+
# The Paradigm Shift (Crucial)
|
|
17
|
+
Unlike older models (e.g., Midjourney) that rely on "vibe tags," next-gen models require **logical, coherent, and physically accurate specifications.**
|
|
18
|
+
|
|
19
|
+
Your goal is not just to describe *what* is in the image, but to explain the **visual logic** of *how* the scene is constructed.
|
|
20
|
+
|
|
21
|
+
# Analysis Protocol (The "Blueprint" Method)
|
|
22
|
+
|
|
23
|
+
When analyzing an image, apply these four dimensions derived from professional prompt engineering logic:
|
|
24
|
+
|
|
25
|
+
1. **Technical Precision over Feeling (Rule 1):**
|
|
26
|
+
* *Avoid vague vibes:* Don't just say "cinematic" or "sad."
|
|
27
|
+
* *Describe the technical cause:* Translate vibes into lighting and composition techniques. (e.g., instead of "sad," use "overcast diffused lighting, desaturated cool color palette, isolated composition").
|
|
28
|
+
* *Use Terminology:* Use specific terms like "chiaroscuro," "atmospheric haze," "subsurface scattering," "photorealistic rendering."
|
|
29
|
+
|
|
30
|
+
2. **Quantifiable & Spatial Logic (Rule 2):**
|
|
31
|
+
* Define spatial relationships clearly (foreground, middle ground, background).
|
|
32
|
+
* Estimate technical parameters: "Shot on a 50mm prime lens at f/1.4" (if shallow depth of field), "Iso-metric view," "Three-point lighting setup."
|
|
33
|
+
|
|
34
|
+
3. **Material & Sensory Physics (Rule 4):**
|
|
35
|
+
* Describe how materials interact with light and environment.
|
|
36
|
+
* *Stack senses:* Not just "wet ground," but "asphalt slick with rain, reflecting distorted neon signs, paved texture visible."
|
|
37
|
+
* *Describe textures:* "Brushed aluminum," "worn leather patina," "translucent biological membrane."
|
|
38
|
+
|
|
39
|
+
4. **Cohesive Narrative Structure:**
|
|
40
|
+
* The final prompt must read like a coherent, detailed paragraph from a novel or a director's script, ensuring the reasoning model understands the *context* of every element.
|
|
41
|
+
|
|
42
|
+
# Output Structure (The Hybrid Blueprint)
|
|
43
|
+
|
|
44
|
+
To maximize clarity for a reasoning model, output the prompt in two parts: a dense narrative, followed by a structured technical breakdown.
|
|
45
|
+
|
|
46
|
+
**Part 1: The Narrative Specification (A detailed, coherent paragraph):**
|
|
47
|
+
[Describe the main subject, action, and their immediate interaction with the environment. Detail the textures, the specific lighting source and its effect on the materials, and the overall mood created by these technical choices. Ensure logical flow between sentences.]
|
|
48
|
+
|
|
49
|
+
**Part 2: Structured Technical Metadata (The "Cheat Sheet"):**
|
|
50
|
+
* **Visual Style:** [e.g., Photorealistic, 3D Render (Octane), Oil Painting]
|
|
51
|
+
* **Key Elements:** [List 3-5 crucial objects/subjects]
|
|
52
|
+
* **Lighting & Color:** [e.g., Softbox side-lighting, warm tungsten palette]
|
|
53
|
+
* **Composition/Camera:** [e.g., Low-angle, 35mm lens, high detail]
|
|
54
|
+
|
|
55
|
+
# Strict Output Protocol
|
|
56
|
+
1. Output **ONLY** the structured response as shown above.
|
|
57
|
+
2. Do NOT add any conversational filler text.
|
|
58
|
+
3. Start directly with the Narrative Specification paragraph.`;
|
|
59
|
+
/**
|
|
60
|
+
* Anime/2D style enhancement prompt
|
|
61
|
+
* For Niji 7 and other anime models
|
|
62
|
+
*/
|
|
63
|
+
exports.ANIME_SYSTEM_PROMPT = `# Role
|
|
64
|
+
You are a Lead Concept Artist & Niji 7 Prompt Director.
|
|
65
|
+
Your task is to reverse-engineer images into **rich, evocative, and highly detailed** text prompts.
|
|
66
|
+
**Current Problem:** Previous prompts were too short. Your goal now is to **EXPAND** the description with imagination and sensory details.
|
|
67
|
+
|
|
68
|
+
# The "Creative Expansion" Protocol (CRITICAL)
|
|
69
|
+
Do not just list objects. You must "paint with words."
|
|
70
|
+
1. **Micro-Details:** Describe textures (e.g., "frayed fabric," "condensation on glass," "subsurface scattering on skin").
|
|
71
|
+
2. **Lighting Dynamics:** Describe how light interacts with materials (e.g., "rim light catching the hair strands," "volumetric god rays cutting through dust").
|
|
72
|
+
3. **Atmosphere:** Describe the mood (e.g., "melancholic," "ethereal," "chaotic").
|
|
73
|
+
|
|
74
|
+
# The "Trigger Word" Safety Net
|
|
75
|
+
To ensure the anime look, you MUST inject these style words into the prompt based on the visual category:
|
|
76
|
+
* **Action/TV:** \`anime screenshot, flat shading, dynamic angle, precise lineart\`
|
|
77
|
+
* **Illustration:** \`key visual, highly detailed, expressive eyes, intricate costume, cinematic lighting\`
|
|
78
|
+
* **Retro:** \`1990s anime style, retro aesthetic, grain, chromatic aberration\`
|
|
79
|
+
* **Default:** \`anime screenshot, key visual, best quality, masterpiece\`
|
|
80
|
+
|
|
81
|
+
# Strict Output Protocol
|
|
82
|
+
1. **Output ONE continuous, rich paragraph.**
|
|
83
|
+
2. **MANDATORY:** Append the negative parameter block at the very end.
|
|
84
|
+
3. **FORBIDDEN:** Do NOT output \`--ar\` or ratio parameters.
|
|
85
|
+
|
|
86
|
+
# Output Structure
|
|
87
|
+
[Rich Narrative Description focusing on Subject, Action, and Micro-Details] + [Atmospheric Environment & Lighting Description] + [Art Style Keywords] --no 3d, cgi, realistic, photorealistic, photography, photo, realism, live action, sketch, draft`;
|
|
88
|
+
/**
|
|
89
|
+
* Illustration/concept art style enhancement prompt
|
|
90
|
+
*/
|
|
91
|
+
exports.ILLUSTRATION_SYSTEM_PROMPT = `# Role
|
|
92
|
+
You are a Senior Illustration Prompt Engineer specializing in concept art and digital illustration.
|
|
93
|
+
|
|
94
|
+
# Protocol
|
|
95
|
+
Transform the user's simple prompt into a detailed, vivid description suitable for AI illustration models.
|
|
96
|
+
|
|
97
|
+
1. **Subject & Action:** Describe the main subject with rich detail - pose, expression, clothing, accessories.
|
|
98
|
+
2. **Environment:** Paint the scene with atmospheric details - weather, time of day, surroundings.
|
|
99
|
+
3. **Art Style:** Specify the illustration style - watercolor, digital painting, concept art, etc.
|
|
100
|
+
4. **Lighting & Color:** Describe the color palette and lighting setup in detail.
|
|
101
|
+
5. **Composition:** Suggest framing, perspective, and focal points.
|
|
102
|
+
|
|
103
|
+
# Output
|
|
104
|
+
Output a single detailed paragraph that reads like a professional art brief. Be specific about colors, textures, and mood. Aim for 100-200 words.`;
|
|
105
|
+
function getSystemPrompt(style) {
|
|
106
|
+
switch (style) {
|
|
107
|
+
case 'anime':
|
|
108
|
+
return exports.ANIME_SYSTEM_PROMPT;
|
|
109
|
+
case 'illustration':
|
|
110
|
+
return exports.ILLUSTRATION_SYSTEM_PROMPT;
|
|
111
|
+
case 'realistic':
|
|
112
|
+
default:
|
|
113
|
+
return exports.REALISTIC_SYSTEM_PROMPT;
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
//# sourceMappingURL=prompts.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"prompts.js","sourceRoot":"","sources":["../../src/lib/prompts.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;AAuGH,0CAUC;AA/GD;;;GAGG;AACU,QAAA,uBAAuB,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;8DA6CuB,CAAA;AAE9D;;;GAGG;AACU,QAAA,mBAAmB,GAAG;;;;;;;;;;;;;;;;;;;;;;;;uPAwBoN,CAAA;AAEvP;;GAEG;AACU,QAAA,0BAA0B,GAAG;;;;;;;;;;;;;kJAawG,CAAA;AAIlJ,SAAgB,eAAe,CAAC,KAAkB;IAChD,QAAQ,KAAK,EAAE,CAAC;QACd,KAAK,OAAO;YACV,OAAO,2BAAmB,CAAA;QAC5B,KAAK,cAAc;YACjB,OAAO,kCAA0B,CAAA;QACnC,KAAK,WAAW,CAAC;QACjB;YACE,OAAO,+BAAuB,CAAA;IAClC,CAAC;AACH,CAAC"}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ComfyUI Local Provider
|
|
3
|
+
* Template-based workflow: users provide a workflow JSON template,
|
|
4
|
+
* generation fills in prompt/seed/size at runtime.
|
|
5
|
+
*/
|
|
6
|
+
/** ComfyUI API-format workflow — node IDs as keys */
|
|
7
|
+
export type ComfyUIWorkflow = Record<string, ComfyUINode>;
|
|
8
|
+
export interface ComfyUINode {
|
|
9
|
+
class_type: string;
|
|
10
|
+
inputs: Record<string, unknown>;
|
|
11
|
+
_meta?: {
|
|
12
|
+
title?: string;
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
/** Auto-detected key node mapping */
|
|
16
|
+
export interface WorkflowNodeMap {
|
|
17
|
+
positivePrompt: string;
|
|
18
|
+
negativePrompt?: string;
|
|
19
|
+
sampler: string;
|
|
20
|
+
latentImage?: string;
|
|
21
|
+
checkpoint?: string;
|
|
22
|
+
saveImage?: string;
|
|
23
|
+
loadImages?: string[];
|
|
24
|
+
}
|
|
25
|
+
/** Workflow summary info */
|
|
26
|
+
export interface WorkflowSummary {
|
|
27
|
+
checkpoint?: string;
|
|
28
|
+
steps?: number;
|
|
29
|
+
cfg?: number;
|
|
30
|
+
sampler?: string;
|
|
31
|
+
scheduler?: string;
|
|
32
|
+
width?: number;
|
|
33
|
+
height?: number;
|
|
34
|
+
nodeCount: number;
|
|
35
|
+
}
|
|
36
|
+
export declare function getWorkflowsDir(): string;
|
|
37
|
+
export declare function listWorkflows(): string[];
|
|
38
|
+
export declare function loadWorkflow(name: string): ComfyUIWorkflow;
|
|
39
|
+
export declare function saveWorkflow(name: string, workflow: ComfyUIWorkflow): void;
|
|
40
|
+
export declare function deleteWorkflow(name: string): void;
|
|
41
|
+
export declare function workflowExists(name: string): boolean;
|
|
42
|
+
/**
|
|
43
|
+
* Auto-detect key nodes in a workflow.
|
|
44
|
+
* Strategy: find KSampler -> trace positive/negative references -> locate prompt nodes
|
|
45
|
+
*/
|
|
46
|
+
export declare function detectNodes(workflow: ComfyUIWorkflow): WorkflowNodeMap;
|
|
47
|
+
/** Get workflow summary info */
|
|
48
|
+
export declare function getWorkflowSummary(workflow: ComfyUIWorkflow): WorkflowSummary;
|
|
49
|
+
/** Get detailed editable node info for a workflow (used by view action) */
|
|
50
|
+
export declare function getEditableNodes(workflow: ComfyUIWorkflow): string;
|
|
51
|
+
export declare class ComfyUIProvider {
|
|
52
|
+
private baseUrl;
|
|
53
|
+
constructor(baseUrl: string);
|
|
54
|
+
checkConnection(): Promise<{
|
|
55
|
+
ok: boolean;
|
|
56
|
+
error?: string;
|
|
57
|
+
}>;
|
|
58
|
+
/** Upload an image to ComfyUI's input directory */
|
|
59
|
+
uploadImage(imageBuffer: Buffer, filename: string): Promise<string>;
|
|
60
|
+
listCheckpoints(): Promise<string[]>;
|
|
61
|
+
/** Submit a workflow and wait for the result */
|
|
62
|
+
generate(workflow: ComfyUIWorkflow, prompt: string, options?: {
|
|
63
|
+
seed?: number;
|
|
64
|
+
width?: number;
|
|
65
|
+
height?: number;
|
|
66
|
+
negativePrompt?: string;
|
|
67
|
+
referenceImages?: string[];
|
|
68
|
+
}, onProgress?: (elapsedMs: number) => Promise<void>): Promise<{
|
|
69
|
+
imageBase64: string;
|
|
70
|
+
mimeType: string;
|
|
71
|
+
referenceImageWarning?: string;
|
|
72
|
+
}>;
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Calculate new dimensions from aspect ratio, preserving total pixel count.
|
|
76
|
+
* Results are rounded to multiples of 8 (SD model requirement).
|
|
77
|
+
*/
|
|
78
|
+
export declare function calculateSize(aspectRatio: string, originalWidth: number, originalHeight: number): {
|
|
79
|
+
width: number;
|
|
80
|
+
height: number;
|
|
81
|
+
};
|
|
82
|
+
//# sourceMappingURL=comfyui.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"comfyui.d.ts","sourceRoot":"","sources":["../../../src/lib/providers/comfyui.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAUH,qDAAqD;AACrD,MAAM,MAAM,eAAe,GAAG,MAAM,CAAC,MAAM,EAAE,WAAW,CAAC,CAAA;AAEzD,MAAM,WAAW,WAAW;IAC1B,UAAU,EAAE,MAAM,CAAA;IAClB,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;IAC/B,KAAK,CAAC,EAAE;QAAE,KAAK,CAAC,EAAE,MAAM,CAAA;KAAE,CAAA;CAC3B;AAED,qCAAqC;AACrC,MAAM,WAAW,eAAe;IAC9B,cAAc,EAAE,MAAM,CAAA;IACtB,cAAc,CAAC,EAAE,MAAM,CAAA;IACvB,OAAO,EAAE,MAAM,CAAA;IACf,WAAW,CAAC,EAAE,MAAM,CAAA;IACpB,UAAU,CAAC,EAAE,MAAM,CAAA;IACnB,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB,UAAU,CAAC,EAAE,MAAM,EAAE,CAAA;CACtB;AAED,4BAA4B;AAC5B,MAAM,WAAW,eAAe;IAC9B,UAAU,CAAC,EAAE,MAAM,CAAA;IACnB,KAAK,CAAC,EAAE,MAAM,CAAA;IACd,GAAG,CAAC,EAAE,MAAM,CAAA;IACZ,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB,SAAS,CAAC,EAAE,MAAM,CAAA;IAClB,KAAK,CAAC,EAAE,MAAM,CAAA;IACd,MAAM,CAAC,EAAE,MAAM,CAAA;IACf,SAAS,EAAE,MAAM,CAAA;CAClB;AAMD,wBAAgB,eAAe,IAAI,MAAM,CAExC;AAED,wBAAgB,aAAa,IAAI,MAAM,EAAE,CAWxC;AAED,wBAAgB,YAAY,CAAC,IAAI,EAAE,MAAM,GAAG,eAAe,CAI1D;AAED,wBAAgB,YAAY,CAAC,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,eAAe,GAAG,IAAI,CAK1E;AAED,wBAAgB,cAAc,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,CAGjD;AAED,wBAAgB,cAAc,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAEpD;AAaD;;;GAGG;AACH,wBAAgB,WAAW,CAAC,QAAQ,EAAE,eAAe,GAAG,eAAe,CAqGtE;AAED,gCAAgC;AAChC,wBAAgB,kBAAkB,CAAC,QAAQ,EAAE,eAAe,GAAG,eAAe,CA4B7E;AAED,2EAA2E;AAC3E,wBAAgB,gBAAgB,CAAC,QAAQ,EAAE,eAAe,GAAG,MAAM,CAgElE;AAiBD,qBAAa,eAAe;IAC1B,OAAO,CAAC,OAAO,CAAQ;gBAEX,OAAO,EAAE,MAAM;IAIrB,eAAe,IAAI,OAAO,CAAC;QAAE,EAAE,EAAE,OAAO,CAAC;QAAC,KAAK,CAAC,EAAE,MAAM,CAAA;KAAE,CAAC;IAUjE,mDAAmD;IAC7C,WAAW,CAAC,WAAW,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAoBnE,eAAe,IAAI,OAAO,CAAC,MAAM,EAAE,CAAC;IAU1C,gDAAgD;IAC1C,QAAQ,CACZ,QAAQ,EAAE,eAAe,EACzB,MAAM,EAAE,MAAM,EACd,OAAO,CAAC,EAAE;QACR,IAAI,CAAC,EAAE,MAAM,CAAA;QACb,KAAK,CAAC,EAAE,MAAM,CAAA;QACd,MAAM,CAAC,EAAE,MAAM,CAAA;QACf,cAAc,CAAC,EAAE,MAAM,CAAA;QACvB,eAAe,CAAC,EAAE,MAAM,EAAE,CAAA;KAC3B,EACD,UAAU,CAAC,EAAE,CAAC,SAAS,EAAE,MAAM,KAAK,OAAO,CAAC,IAAI,CAAC,GAChD,OAAO,CAAC;QAAE,WAAW,EAAE,MAAM,CAAC;QAAC,QAAQ,EAAE,MAAM,CAAC;QAAC,qBAAqB,CAAC,EAAE,MAAM,CAAA;KAAE,CAAC;CAiItF;AAcD;;;GAGG;AACH,wBAAgB,aAAa,CAC3B,WAAW,EAAE,MAAM,EACnB,aAAa,EAAE,MAAM,EACrB,cAAc,EAAE,MAAM,GACrB;IAAE,KAAK,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,MAAM,CAAA;CAAE,CAanC"}
|
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* ComfyUI Local Provider
|
|
4
|
+
* Template-based workflow: users provide a workflow JSON template,
|
|
5
|
+
* generation fills in prompt/seed/size at runtime.
|
|
6
|
+
*/
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.ComfyUIProvider = void 0;
|
|
9
|
+
exports.getWorkflowsDir = getWorkflowsDir;
|
|
10
|
+
exports.listWorkflows = listWorkflows;
|
|
11
|
+
exports.loadWorkflow = loadWorkflow;
|
|
12
|
+
exports.saveWorkflow = saveWorkflow;
|
|
13
|
+
exports.deleteWorkflow = deleteWorkflow;
|
|
14
|
+
exports.workflowExists = workflowExists;
|
|
15
|
+
exports.detectNodes = detectNodes;
|
|
16
|
+
exports.getWorkflowSummary = getWorkflowSummary;
|
|
17
|
+
exports.getEditableNodes = getEditableNodes;
|
|
18
|
+
exports.calculateSize = calculateSize;
|
|
19
|
+
const fs_1 = require("fs");
|
|
20
|
+
const path_1 = require("path");
|
|
21
|
+
const os_1 = require("os");
|
|
22
|
+
// ============================================================
|
|
23
|
+
// Workflow File Management
|
|
24
|
+
// ============================================================
|
|
25
|
+
function getWorkflowsDir() {
|
|
26
|
+
return (0, path_1.join)((0, os_1.homedir)(), '.config', 'meigen', 'workflows');
|
|
27
|
+
}
|
|
28
|
+
function listWorkflows() {
|
|
29
|
+
try {
|
|
30
|
+
const dir = getWorkflowsDir();
|
|
31
|
+
const files = (0, fs_1.readdirSync)(dir);
|
|
32
|
+
return files
|
|
33
|
+
.filter(f => f.endsWith('.json'))
|
|
34
|
+
.map(f => f.replace(/\.json$/, ''))
|
|
35
|
+
.sort();
|
|
36
|
+
}
|
|
37
|
+
catch {
|
|
38
|
+
return [];
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
function loadWorkflow(name) {
|
|
42
|
+
const filePath = (0, path_1.join)(getWorkflowsDir(), `${name}.json`);
|
|
43
|
+
const content = (0, fs_1.readFileSync)(filePath, 'utf-8');
|
|
44
|
+
return JSON.parse(content);
|
|
45
|
+
}
|
|
46
|
+
function saveWorkflow(name, workflow) {
|
|
47
|
+
const dir = getWorkflowsDir();
|
|
48
|
+
(0, fs_1.mkdirSync)(dir, { recursive: true });
|
|
49
|
+
const filePath = (0, path_1.join)(dir, `${name}.json`);
|
|
50
|
+
(0, fs_1.writeFileSync)(filePath, JSON.stringify(workflow, null, 2), 'utf-8');
|
|
51
|
+
}
|
|
52
|
+
function deleteWorkflow(name) {
|
|
53
|
+
const filePath = (0, path_1.join)(getWorkflowsDir(), `${name}.json`);
|
|
54
|
+
(0, fs_1.unlinkSync)(filePath);
|
|
55
|
+
}
|
|
56
|
+
function workflowExists(name) {
|
|
57
|
+
return (0, fs_1.existsSync)((0, path_1.join)(getWorkflowsDir(), `${name}.json`));
|
|
58
|
+
}
|
|
59
|
+
// ============================================================
|
|
60
|
+
// Workflow Node Detection
|
|
61
|
+
// ============================================================
|
|
62
|
+
const SAMPLER_TYPES = ['KSampler', 'KSamplerAdvanced'];
|
|
63
|
+
const PROMPT_TYPES = ['CLIPTextEncode'];
|
|
64
|
+
const LATENT_TYPES = ['EmptyLatentImage'];
|
|
65
|
+
const CHECKPOINT_TYPES = ['CheckpointLoaderSimple', 'CheckpointLoader'];
|
|
66
|
+
const SAVE_TYPES = ['SaveImage', 'PreviewImage'];
|
|
67
|
+
const LOAD_IMAGE_TYPES = ['LoadImage'];
|
|
68
|
+
/**
|
|
69
|
+
* Auto-detect key nodes in a workflow.
|
|
70
|
+
* Strategy: find KSampler -> trace positive/negative references -> locate prompt nodes
|
|
71
|
+
*/
|
|
72
|
+
function detectNodes(workflow) {
|
|
73
|
+
// 1. Find KSampler
|
|
74
|
+
let samplerId;
|
|
75
|
+
for (const [id, node] of Object.entries(workflow)) {
|
|
76
|
+
if (SAMPLER_TYPES.includes(node.class_type)) {
|
|
77
|
+
samplerId = id;
|
|
78
|
+
break;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
if (!samplerId) {
|
|
82
|
+
throw new Error('No KSampler node found in workflow. Please ensure your workflow contains a KSampler node.');
|
|
83
|
+
}
|
|
84
|
+
const samplerNode = workflow[samplerId];
|
|
85
|
+
// 2. Trace positive/negative references -> CLIPTextEncode
|
|
86
|
+
let positivePromptId;
|
|
87
|
+
let negativePromptId;
|
|
88
|
+
const positiveRef = samplerNode.inputs.positive;
|
|
89
|
+
if (Array.isArray(positiveRef) && typeof positiveRef[0] === 'string') {
|
|
90
|
+
const refId = positiveRef[0];
|
|
91
|
+
if (workflow[refId] && PROMPT_TYPES.includes(workflow[refId].class_type)) {
|
|
92
|
+
positivePromptId = refId;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
const negativeRef = samplerNode.inputs.negative;
|
|
96
|
+
if (Array.isArray(negativeRef) && typeof negativeRef[0] === 'string') {
|
|
97
|
+
const refId = negativeRef[0];
|
|
98
|
+
if (workflow[refId] && PROMPT_TYPES.includes(workflow[refId].class_type)) {
|
|
99
|
+
negativePromptId = refId;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
if (!positivePromptId) {
|
|
103
|
+
// Fallback: find first CLIPTextEncode
|
|
104
|
+
for (const [id, node] of Object.entries(workflow)) {
|
|
105
|
+
if (PROMPT_TYPES.includes(node.class_type)) {
|
|
106
|
+
positivePromptId = id;
|
|
107
|
+
break;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
if (!positivePromptId) {
|
|
112
|
+
throw new Error('No CLIPTextEncode node found in workflow.');
|
|
113
|
+
}
|
|
114
|
+
// 3. Find EmptyLatentImage (prefer KSampler's latent_image reference)
|
|
115
|
+
let latentImageId;
|
|
116
|
+
const latentRef = samplerNode.inputs.latent_image;
|
|
117
|
+
if (Array.isArray(latentRef) && typeof latentRef[0] === 'string') {
|
|
118
|
+
const refId = latentRef[0];
|
|
119
|
+
if (workflow[refId] && LATENT_TYPES.includes(workflow[refId].class_type)) {
|
|
120
|
+
latentImageId = refId;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
if (!latentImageId) {
|
|
124
|
+
for (const [id, node] of Object.entries(workflow)) {
|
|
125
|
+
if (LATENT_TYPES.includes(node.class_type)) {
|
|
126
|
+
latentImageId = id;
|
|
127
|
+
break;
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
// 4. Find CheckpointLoaderSimple
|
|
132
|
+
let checkpointId;
|
|
133
|
+
for (const [id, node] of Object.entries(workflow)) {
|
|
134
|
+
if (CHECKPOINT_TYPES.includes(node.class_type)) {
|
|
135
|
+
checkpointId = id;
|
|
136
|
+
break;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
// 5. Find SaveImage
|
|
140
|
+
let saveImageId;
|
|
141
|
+
for (const [id, node] of Object.entries(workflow)) {
|
|
142
|
+
if (SAVE_TYPES.includes(node.class_type)) {
|
|
143
|
+
saveImageId = id;
|
|
144
|
+
break;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
// 6. Find LoadImage nodes (for reference image injection)
|
|
148
|
+
const loadImageIds = [];
|
|
149
|
+
for (const [id, node] of Object.entries(workflow)) {
|
|
150
|
+
if (LOAD_IMAGE_TYPES.includes(node.class_type)) {
|
|
151
|
+
loadImageIds.push(id);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
return {
|
|
155
|
+
positivePrompt: positivePromptId,
|
|
156
|
+
negativePrompt: negativePromptId,
|
|
157
|
+
sampler: samplerId,
|
|
158
|
+
latentImage: latentImageId,
|
|
159
|
+
checkpoint: checkpointId,
|
|
160
|
+
saveImage: saveImageId,
|
|
161
|
+
loadImages: loadImageIds.length > 0 ? loadImageIds : undefined,
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
/** Get workflow summary info */
|
|
165
|
+
function getWorkflowSummary(workflow) {
|
|
166
|
+
const summary = {
|
|
167
|
+
nodeCount: Object.keys(workflow).length,
|
|
168
|
+
};
|
|
169
|
+
try {
|
|
170
|
+
const nodes = detectNodes(workflow);
|
|
171
|
+
const samplerInputs = workflow[nodes.sampler].inputs;
|
|
172
|
+
summary.steps = samplerInputs.steps;
|
|
173
|
+
summary.cfg = samplerInputs.cfg;
|
|
174
|
+
summary.sampler = samplerInputs.sampler_name;
|
|
175
|
+
summary.scheduler = samplerInputs.scheduler;
|
|
176
|
+
if (nodes.latentImage) {
|
|
177
|
+
const latentInputs = workflow[nodes.latentImage].inputs;
|
|
178
|
+
summary.width = latentInputs.width;
|
|
179
|
+
summary.height = latentInputs.height;
|
|
180
|
+
}
|
|
181
|
+
if (nodes.checkpoint) {
|
|
182
|
+
summary.checkpoint = workflow[nodes.checkpoint].inputs.ckpt_name;
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
catch {
|
|
186
|
+
// Return basic info if parsing fails
|
|
187
|
+
}
|
|
188
|
+
return summary;
|
|
189
|
+
}
|
|
190
|
+
/** Get detailed editable node info for a workflow (used by view action) */
|
|
191
|
+
function getEditableNodes(workflow) {
|
|
192
|
+
let nodeMap;
|
|
193
|
+
try {
|
|
194
|
+
nodeMap = detectNodes(workflow);
|
|
195
|
+
}
|
|
196
|
+
catch (e) {
|
|
197
|
+
return `Error detecting nodes: ${e instanceof Error ? e.message : String(e)}`;
|
|
198
|
+
}
|
|
199
|
+
const lines = [];
|
|
200
|
+
// KSampler
|
|
201
|
+
const sampler = workflow[nodeMap.sampler];
|
|
202
|
+
lines.push(`Node #${nodeMap.sampler} (${sampler.class_type}) — ${sampler._meta?.title || 'Main Sampler'}`);
|
|
203
|
+
for (const [key, val] of Object.entries(sampler.inputs)) {
|
|
204
|
+
if (Array.isArray(val))
|
|
205
|
+
continue; // Skip node connection references
|
|
206
|
+
if (key === 'seed') {
|
|
207
|
+
lines.push(` ${key}: [auto-randomized per generation]`);
|
|
208
|
+
}
|
|
209
|
+
else {
|
|
210
|
+
lines.push(` ${key}: ${JSON.stringify(val)}`);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
// Positive Prompt
|
|
214
|
+
const posNode = workflow[nodeMap.positivePrompt];
|
|
215
|
+
lines.push('');
|
|
216
|
+
lines.push(`Node #${nodeMap.positivePrompt} (${posNode.class_type}) — ${posNode._meta?.title || 'Positive Prompt'}`);
|
|
217
|
+
lines.push(` text: [replaced by your prompt per generation]`);
|
|
218
|
+
// Negative Prompt
|
|
219
|
+
if (nodeMap.negativePrompt) {
|
|
220
|
+
const negNode = workflow[nodeMap.negativePrompt];
|
|
221
|
+
lines.push('');
|
|
222
|
+
lines.push(`Node #${nodeMap.negativePrompt} (${negNode.class_type}) — ${negNode._meta?.title || 'Negative Prompt'}`);
|
|
223
|
+
const negText = negNode.inputs.text;
|
|
224
|
+
lines.push(` text: ${JSON.stringify(negText)}`);
|
|
225
|
+
}
|
|
226
|
+
// EmptyLatentImage
|
|
227
|
+
if (nodeMap.latentImage) {
|
|
228
|
+
const latent = workflow[nodeMap.latentImage];
|
|
229
|
+
lines.push('');
|
|
230
|
+
lines.push(`Node #${nodeMap.latentImage} (${latent.class_type}) — ${latent._meta?.title || 'Image Size'}`);
|
|
231
|
+
for (const [key, val] of Object.entries(latent.inputs)) {
|
|
232
|
+
if (Array.isArray(val))
|
|
233
|
+
continue;
|
|
234
|
+
lines.push(` ${key}: ${JSON.stringify(val)}`);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
// CheckpointLoaderSimple
|
|
238
|
+
if (nodeMap.checkpoint) {
|
|
239
|
+
const ckpt = workflow[nodeMap.checkpoint];
|
|
240
|
+
lines.push('');
|
|
241
|
+
lines.push(`Node #${nodeMap.checkpoint} (${ckpt.class_type}) — ${ckpt._meta?.title || 'Model'}`);
|
|
242
|
+
for (const [key, val] of Object.entries(ckpt.inputs)) {
|
|
243
|
+
if (Array.isArray(val))
|
|
244
|
+
continue;
|
|
245
|
+
lines.push(` ${key}: ${JSON.stringify(val)}`);
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
lines.push('');
|
|
249
|
+
lines.push('To modify a parameter, use action "modify" with nodeId, input, and value.');
|
|
250
|
+
lines.push('Example: nodeId="3", input="steps", value="30"');
|
|
251
|
+
return lines.join('\n');
|
|
252
|
+
}
|
|
253
|
+
class ComfyUIProvider {
|
|
254
|
+
baseUrl;
|
|
255
|
+
constructor(baseUrl) {
|
|
256
|
+
this.baseUrl = baseUrl.replace(/\/$/, '');
|
|
257
|
+
}
|
|
258
|
+
async checkConnection() {
|
|
259
|
+
try {
|
|
260
|
+
const res = await fetch(`${this.baseUrl}/system_stats`);
|
|
261
|
+
if (!res.ok)
|
|
262
|
+
return { ok: false, error: `HTTP ${res.status}` };
|
|
263
|
+
return { ok: true };
|
|
264
|
+
}
|
|
265
|
+
catch (e) {
|
|
266
|
+
return { ok: false, error: e instanceof Error ? e.message : String(e) };
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
/** Upload an image to ComfyUI's input directory */
|
|
270
|
+
async uploadImage(imageBuffer, filename) {
|
|
271
|
+
const blob = new Blob([imageBuffer]);
|
|
272
|
+
const formData = new FormData();
|
|
273
|
+
formData.append('image', blob, filename);
|
|
274
|
+
formData.append('overwrite', 'true');
|
|
275
|
+
const res = await fetch(`${this.baseUrl}/upload/image`, {
|
|
276
|
+
method: 'POST',
|
|
277
|
+
body: formData,
|
|
278
|
+
});
|
|
279
|
+
if (!res.ok) {
|
|
280
|
+
const errText = await res.text();
|
|
281
|
+
throw new Error(`ComfyUI image upload failed (${res.status}): ${errText}`);
|
|
282
|
+
}
|
|
283
|
+
const json = await res.json();
|
|
284
|
+
return json.name;
|
|
285
|
+
}
|
|
286
|
+
async listCheckpoints() {
|
|
287
|
+
try {
|
|
288
|
+
const res = await fetch(`${this.baseUrl}/models/checkpoints`);
|
|
289
|
+
if (!res.ok)
|
|
290
|
+
return [];
|
|
291
|
+
return await res.json();
|
|
292
|
+
}
|
|
293
|
+
catch {
|
|
294
|
+
return [];
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
/** Submit a workflow and wait for the result */
|
|
298
|
+
async generate(workflow, prompt, options, onProgress) {
|
|
299
|
+
// 1. Deep-copy the template
|
|
300
|
+
const wf = JSON.parse(JSON.stringify(workflow));
|
|
301
|
+
// 2. Detect key nodes
|
|
302
|
+
const nodes = detectNodes(wf);
|
|
303
|
+
// 3. Fill in prompt
|
|
304
|
+
wf[nodes.positivePrompt].inputs.text = prompt;
|
|
305
|
+
// 4. Fill in negative prompt (if provided)
|
|
306
|
+
if (options?.negativePrompt && nodes.negativePrompt) {
|
|
307
|
+
wf[nodes.negativePrompt].inputs.text = options.negativePrompt;
|
|
308
|
+
}
|
|
309
|
+
// 5. Fill in seed
|
|
310
|
+
const seed = options?.seed ?? Math.floor(Math.random() * 2147483647);
|
|
311
|
+
wf[nodes.sampler].inputs.seed = seed;
|
|
312
|
+
// 6. Fill in dimensions (if provided)
|
|
313
|
+
if (nodes.latentImage && (options?.width || options?.height)) {
|
|
314
|
+
if (options?.width)
|
|
315
|
+
wf[nodes.latentImage].inputs.width = options.width;
|
|
316
|
+
if (options?.height)
|
|
317
|
+
wf[nodes.latentImage].inputs.height = options.height;
|
|
318
|
+
}
|
|
319
|
+
// 6.5. Handle reference images for LoadImage nodes
|
|
320
|
+
let referenceImageWarning;
|
|
321
|
+
if (options?.referenceImages?.length) {
|
|
322
|
+
if (nodes.loadImages?.length) {
|
|
323
|
+
const count = Math.min(options.referenceImages.length, nodes.loadImages.length);
|
|
324
|
+
for (let i = 0; i < count; i++) {
|
|
325
|
+
const url = options.referenceImages[i];
|
|
326
|
+
const nodeId = nodes.loadImages[i];
|
|
327
|
+
// Download the image from URL
|
|
328
|
+
const imgRes = await fetch(url);
|
|
329
|
+
if (!imgRes.ok) {
|
|
330
|
+
throw new Error(`Failed to download reference image from ${url}: ${imgRes.status}`);
|
|
331
|
+
}
|
|
332
|
+
const imgBuffer = Buffer.from(await imgRes.arrayBuffer());
|
|
333
|
+
// Upload to ComfyUI's input directory
|
|
334
|
+
const ext = url.match(/\.(jpe?g|png|webp|gif)(\?|$)/i)?.[1] || 'png';
|
|
335
|
+
const filename = `ref_${Date.now()}_${i}.${ext}`;
|
|
336
|
+
const uploadedName = await this.uploadImage(imgBuffer, filename);
|
|
337
|
+
// Inject into LoadImage node
|
|
338
|
+
wf[nodeId].inputs.image = uploadedName;
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
else {
|
|
342
|
+
referenceImageWarning = 'The current workflow has no LoadImage nodes, so reference images were not applied. To use reference images with ComfyUI, import a workflow that includes LoadImage nodes (e.g., an img2img workflow).';
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
// 7. Submit workflow
|
|
346
|
+
const submitRes = await fetch(`${this.baseUrl}/prompt`, {
|
|
347
|
+
method: 'POST',
|
|
348
|
+
headers: { 'Content-Type': 'application/json' },
|
|
349
|
+
body: JSON.stringify({ prompt: wf }),
|
|
350
|
+
});
|
|
351
|
+
if (!submitRes.ok) {
|
|
352
|
+
const errText = await submitRes.text();
|
|
353
|
+
throw new Error(`ComfyUI prompt submission failed (${submitRes.status}): ${errText}`);
|
|
354
|
+
}
|
|
355
|
+
const { prompt_id, node_errors } = await submitRes.json();
|
|
356
|
+
if (node_errors && Object.keys(node_errors).length > 0) {
|
|
357
|
+
throw new Error(`ComfyUI node errors: ${JSON.stringify(node_errors)}`);
|
|
358
|
+
}
|
|
359
|
+
// 8. Poll until completed (max 5 minutes)
|
|
360
|
+
const timeoutMs = 300_000;
|
|
361
|
+
const pollInterval = 2_000;
|
|
362
|
+
const startTime = Date.now();
|
|
363
|
+
let lastProgress = 0;
|
|
364
|
+
while (Date.now() - startTime < timeoutMs) {
|
|
365
|
+
await new Promise(r => setTimeout(r, pollInterval));
|
|
366
|
+
const elapsed = Date.now() - startTime;
|
|
367
|
+
if (onProgress && elapsed - lastProgress >= 15_000) {
|
|
368
|
+
await onProgress(elapsed);
|
|
369
|
+
lastProgress = elapsed;
|
|
370
|
+
}
|
|
371
|
+
const histRes = await fetch(`${this.baseUrl}/history/${prompt_id}`);
|
|
372
|
+
if (!histRes.ok)
|
|
373
|
+
continue;
|
|
374
|
+
const history = await histRes.json();
|
|
375
|
+
const entry = history[prompt_id];
|
|
376
|
+
if (!entry)
|
|
377
|
+
continue;
|
|
378
|
+
if (entry.status.status_str === 'error') {
|
|
379
|
+
throw new Error('ComfyUI generation failed');
|
|
380
|
+
}
|
|
381
|
+
if (!entry.status.completed)
|
|
382
|
+
continue;
|
|
383
|
+
// 9. Find output image
|
|
384
|
+
for (const output of Object.values(entry.outputs)) {
|
|
385
|
+
if (output.images && output.images.length > 0) {
|
|
386
|
+
const img = output.images[0];
|
|
387
|
+
const params = new URLSearchParams({
|
|
388
|
+
filename: img.filename,
|
|
389
|
+
subfolder: img.subfolder,
|
|
390
|
+
type: img.type,
|
|
391
|
+
});
|
|
392
|
+
// 10. Download image
|
|
393
|
+
const imgRes = await fetch(`${this.baseUrl}/view?${params}`);
|
|
394
|
+
if (!imgRes.ok) {
|
|
395
|
+
throw new Error(`Failed to download image from ComfyUI: ${imgRes.status}`);
|
|
396
|
+
}
|
|
397
|
+
const buffer = await imgRes.arrayBuffer();
|
|
398
|
+
const base64 = Buffer.from(buffer).toString('base64');
|
|
399
|
+
const mimeType = imgRes.headers.get('content-type') || 'image/png';
|
|
400
|
+
return { imageBase64: base64, mimeType, referenceImageWarning };
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
throw new Error('ComfyUI generation completed but no output images found');
|
|
404
|
+
}
|
|
405
|
+
throw new Error(`ComfyUI generation timed out after ${timeoutMs / 1000}s`);
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
exports.ComfyUIProvider = ComfyUIProvider;
|
|
409
|
+
// ============================================================
|
|
410
|
+
// Aspect Ratio -> Size Conversion
|
|
411
|
+
// ============================================================
|
|
412
|
+
const ASPECT_RATIOS = {
|
|
413
|
+
'1:1': [1, 1],
|
|
414
|
+
'3:4': [3, 4],
|
|
415
|
+
'4:3': [4, 3],
|
|
416
|
+
'16:9': [16, 9],
|
|
417
|
+
'9:16': [9, 16],
|
|
418
|
+
};
|
|
419
|
+
/**
|
|
420
|
+
* Calculate new dimensions from aspect ratio, preserving total pixel count.
|
|
421
|
+
* Results are rounded to multiples of 8 (SD model requirement).
|
|
422
|
+
*/
|
|
423
|
+
function calculateSize(aspectRatio, originalWidth, originalHeight) {
|
|
424
|
+
const ratio = ASPECT_RATIOS[aspectRatio];
|
|
425
|
+
if (!ratio)
|
|
426
|
+
return { width: originalWidth, height: originalHeight };
|
|
427
|
+
const [rw, rh] = ratio;
|
|
428
|
+
const totalPixels = originalWidth * originalHeight;
|
|
429
|
+
const newHeight = Math.sqrt(totalPixels * rh / rw);
|
|
430
|
+
const newWidth = newHeight * rw / rh;
|
|
431
|
+
return {
|
|
432
|
+
width: Math.round(newWidth / 8) * 8,
|
|
433
|
+
height: Math.round(newHeight / 8) * 8,
|
|
434
|
+
};
|
|
435
|
+
}
|
|
436
|
+
//# sourceMappingURL=comfyui.js.map
|