hypercli-sdk 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- c3/__init__.py +57 -0
- c3/billing.py +72 -0
- c3/client.py +60 -0
- c3/config.py +70 -0
- c3/files.py +386 -0
- c3/http.py +217 -0
- c3/instances.py +211 -0
- c3/job/__init__.py +24 -0
- c3/job/base.py +249 -0
- c3/job/comfyui.py +1469 -0
- c3/jobs.py +285 -0
- c3/logs.py +273 -0
- c3/renders.py +339 -0
- c3/user.py +37 -0
- hypercli_sdk-0.4.2.dist-info/METADATA +141 -0
- hypercli_sdk-0.4.2.dist-info/RECORD +17 -0
- hypercli_sdk-0.4.2.dist-info/WHEEL +4 -0
c3/job/comfyui.py
ADDED
|
@@ -0,0 +1,1469 @@
|
|
|
1
|
+
"""ComfyUI job helpers"""
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import random
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING, Any
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
|
|
10
|
+
from .base import BaseJob
|
|
11
|
+
from ..config import COMFYUI_IMAGE
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from ..client import C3
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Default object_info for offline workflow conversion (no running instance needed)
|
|
19
|
+
# This covers common node types - extend as needed for new workflows
|
|
20
|
+
DEFAULT_OBJECT_INFO = {
|
|
21
|
+
# Text encoders
|
|
22
|
+
"CLIPTextEncode": {
|
|
23
|
+
"input": {"required": {"clip": ["CLIP"], "text": ["STRING", {"multiline": True}]}, "optional": {}},
|
|
24
|
+
"input_order": {"required": ["clip", "text"], "optional": []},
|
|
25
|
+
},
|
|
26
|
+
"CLIPLoader": {
|
|
27
|
+
"input": {"required": {"clip_name": [["model.safetensors"], {}], "type": [["stable_diffusion", "wan"], {}], "device": [["default", "cpu"], {}]}, "optional": {}},
|
|
28
|
+
"input_order": {"required": ["clip_name", "type", "device"], "optional": []},
|
|
29
|
+
},
|
|
30
|
+
"QuadrupleCLIPLoader": {
|
|
31
|
+
"input": {"required": {"clip_name1": [["clip.safetensors"], {}], "clip_name2": [["clip.safetensors"], {}], "clip_name3": [["clip.safetensors"], {}], "clip_name4": [["clip.safetensors"], {}]}, "optional": {}},
|
|
32
|
+
"input_order": {"required": ["clip_name1", "clip_name2", "clip_name3", "clip_name4"], "optional": []},
|
|
33
|
+
},
|
|
34
|
+
"AudioEncoderLoader": {
|
|
35
|
+
"input": {"required": {"audio_encoder_name": [["whisper_large_v3_fp16.safetensors"], {}]}, "optional": {}},
|
|
36
|
+
"input_order": {"required": ["audio_encoder_name"], "optional": []},
|
|
37
|
+
},
|
|
38
|
+
"WanHuMoImageToVideo": {
|
|
39
|
+
"input": {"required": {"positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "vae": ["VAE"], "width": ["INT", {"default": 640}], "height": ["INT", {"default": 640}], "length": ["INT", {"default": 97}], "batch_size": ["INT", {"default": 1}]}, "optional": {"audio_encoder_output": ["AUDIO_ENCODER_OUTPUT"], "ref_image": ["IMAGE"]}},
|
|
40
|
+
"input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["audio_encoder_output", "ref_image"]},
|
|
41
|
+
},
|
|
42
|
+
"TextEncodeQwenImageEditPlus": {
|
|
43
|
+
"input": {
|
|
44
|
+
"required": {"clip": ["CLIP"], "vae": ["VAE"], "prompt": ["STRING", {"multiline": True}]},
|
|
45
|
+
"optional": {"image1": ["IMAGE"], "image2": ["IMAGE"], "image3": ["IMAGE"]},
|
|
46
|
+
},
|
|
47
|
+
"input_order": {"required": ["clip", "vae", "prompt"], "optional": ["image1", "image2", "image3"]},
|
|
48
|
+
},
|
|
49
|
+
"ModelSamplingAuraFlow": {
|
|
50
|
+
"input": {"required": {"model": ["MODEL"], "shift": ["FLOAT", {"default": 1.73}]}, "optional": {}},
|
|
51
|
+
"input_order": {"required": ["model", "shift"], "optional": []},
|
|
52
|
+
},
|
|
53
|
+
"CFGNorm": {
|
|
54
|
+
"input": {"required": {"model": ["MODEL"], "strength": ["FLOAT", {"default": 1.0}]}, "optional": {}},
|
|
55
|
+
"input_order": {"required": ["model", "strength"], "optional": []},
|
|
56
|
+
},
|
|
57
|
+
"FluxKontextImageScale": {
|
|
58
|
+
"input": {"required": {"image": ["IMAGE"], "max_pixels": ["INT", {"default": 1048576}]}, "optional": {}},
|
|
59
|
+
"input_order": {"required": ["image", "max_pixels"], "optional": []},
|
|
60
|
+
},
|
|
61
|
+
"ReferenceLatent": {
|
|
62
|
+
"input": {"required": {"conditioning": ["CONDITIONING"], "latent": ["LATENT"]}, "optional": {}},
|
|
63
|
+
"input_order": {"required": ["conditioning", "latent"], "optional": []},
|
|
64
|
+
},
|
|
65
|
+
# Samplers
|
|
66
|
+
"KSampler": {
|
|
67
|
+
"input": {
|
|
68
|
+
"required": {
|
|
69
|
+
"model": ["MODEL"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "latent_image": ["LATENT"],
|
|
70
|
+
"seed": ["INT", {"default": 0}], "steps": ["INT", {"default": 20}], "cfg": ["FLOAT", {"default": 8.0}],
|
|
71
|
+
"sampler_name": [["euler", "euler_ancestral", "dpm_2"], {}], "scheduler": [["normal", "karras", "simple"], {}],
|
|
72
|
+
"denoise": ["FLOAT", {"default": 1.0}],
|
|
73
|
+
},
|
|
74
|
+
"optional": {},
|
|
75
|
+
},
|
|
76
|
+
"input_order": {"required": ["model", "positive", "negative", "latent_image", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise"], "optional": []},
|
|
77
|
+
},
|
|
78
|
+
"KSamplerAdvanced": {
|
|
79
|
+
"input": {
|
|
80
|
+
"required": {
|
|
81
|
+
"model": ["MODEL"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "latent_image": ["LATENT"],
|
|
82
|
+
"add_noise": [["enable", "disable"], {}], "noise_seed": ["INT", {"default": 0}],
|
|
83
|
+
"steps": ["INT", {"default": 20}], "cfg": ["FLOAT", {"default": 8.0}],
|
|
84
|
+
"sampler_name": [["euler", "euler_ancestral"], {}], "scheduler": [["normal", "simple"], {}],
|
|
85
|
+
"start_at_step": ["INT", {"default": 0}], "end_at_step": ["INT", {"default": 10000}],
|
|
86
|
+
"return_with_leftover_noise": [["disable", "enable"], {}],
|
|
87
|
+
},
|
|
88
|
+
"optional": {},
|
|
89
|
+
},
|
|
90
|
+
"input_order": {
|
|
91
|
+
"required": ["model", "positive", "negative", "latent_image", "add_noise", "noise_seed", "steps", "cfg", "sampler_name", "scheduler", "start_at_step", "end_at_step", "return_with_leftover_noise"],
|
|
92
|
+
"optional": [],
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
# Latent generators
|
|
96
|
+
"EmptyLatentImage": {
|
|
97
|
+
"input": {"required": {"width": ["INT", {}], "height": ["INT", {}], "batch_size": ["INT", {}]}, "optional": {}},
|
|
98
|
+
"input_order": {"required": ["width", "height", "batch_size"], "optional": []},
|
|
99
|
+
},
|
|
100
|
+
"EmptySD3LatentImage": {
|
|
101
|
+
"input": {"required": {"width": ["INT", {}], "height": ["INT", {}], "batch_size": ["INT", {}]}, "optional": {}},
|
|
102
|
+
"input_order": {"required": ["width", "height", "batch_size"], "optional": []},
|
|
103
|
+
},
|
|
104
|
+
"EmptyHunyuanLatentVideo": {
|
|
105
|
+
"input": {"required": {"width": ["INT", {}], "height": ["INT", {}], "length": ["INT", {}], "batch_size": ["INT", {}]}, "optional": {}},
|
|
106
|
+
"input_order": {"required": ["width", "height", "length", "batch_size"], "optional": []},
|
|
107
|
+
},
|
|
108
|
+
"WanImageToVideo": {
|
|
109
|
+
"input": {"required": {"width": ["INT", {}], "height": ["INT", {}], "length": ["INT", {}], "batch_size": ["INT", {}]}, "optional": {}},
|
|
110
|
+
"input_order": {"required": ["width", "height", "length", "batch_size"], "optional": []},
|
|
111
|
+
},
|
|
112
|
+
"WanStartEndFrames": {
|
|
113
|
+
"input": {"required": {"width": ["INT", {}], "height": ["INT", {}], "length": ["INT", {}], "batch_size": ["INT", {}]}, "optional": {}},
|
|
114
|
+
"input_order": {"required": ["width", "height", "length", "batch_size"], "optional": []},
|
|
115
|
+
},
|
|
116
|
+
# Model loaders
|
|
117
|
+
"UNETLoader": {
|
|
118
|
+
"input": {"required": {"unet_name": [["model.safetensors"], {}], "weight_dtype": [["default", "fp8_e4m3fn"], {}]}, "optional": {}},
|
|
119
|
+
"input_order": {"required": ["unet_name", "weight_dtype"], "optional": []},
|
|
120
|
+
},
|
|
121
|
+
"VAELoader": {
|
|
122
|
+
"input": {"required": {"vae_name": [["vae.safetensors"], {}]}, "optional": {}},
|
|
123
|
+
"input_order": {"required": ["vae_name"], "optional": []},
|
|
124
|
+
},
|
|
125
|
+
"CheckpointLoaderSimple": {
|
|
126
|
+
"input": {"required": {"ckpt_name": [["model.safetensors"], {}]}, "optional": {}},
|
|
127
|
+
"input_order": {"required": ["ckpt_name"], "optional": []},
|
|
128
|
+
},
|
|
129
|
+
"LoraLoaderModelOnly": {
|
|
130
|
+
"input": {"required": {"model": ["MODEL"], "lora_name": [["lora.safetensors"], {}], "strength_model": ["FLOAT", {}]}, "optional": {}},
|
|
131
|
+
"input_order": {"required": ["model", "lora_name", "strength_model"], "optional": []},
|
|
132
|
+
},
|
|
133
|
+
"ModelSamplingSD3": {
|
|
134
|
+
"input": {"required": {"model": ["MODEL"], "shift": ["FLOAT", {}]}, "optional": {}},
|
|
135
|
+
"input_order": {"required": ["model", "shift"], "optional": []},
|
|
136
|
+
},
|
|
137
|
+
# Video/Image processing
|
|
138
|
+
"VAEDecode": {
|
|
139
|
+
"input": {"required": {"samples": ["LATENT"], "vae": ["VAE"]}, "optional": {}},
|
|
140
|
+
"input_order": {"required": ["samples", "vae"], "optional": []},
|
|
141
|
+
},
|
|
142
|
+
"VAEEncode": {
|
|
143
|
+
"input": {"required": {"pixels": ["IMAGE"], "vae": ["VAE"]}, "optional": {}},
|
|
144
|
+
"input_order": {"required": ["pixels", "vae"], "optional": []},
|
|
145
|
+
},
|
|
146
|
+
"CreateVideo": {
|
|
147
|
+
"input": {"required": {"images": ["IMAGE"], "fps": ["FLOAT", {"default": 16}]}, "optional": {"audio": ["AUDIO"]}},
|
|
148
|
+
"input_order": {"required": ["images", "fps"], "optional": ["audio"]},
|
|
149
|
+
},
|
|
150
|
+
# Save nodes
|
|
151
|
+
"SaveVideo": {
|
|
152
|
+
"input": {
|
|
153
|
+
"required": {
|
|
154
|
+
"video": ["VIDEO"],
|
|
155
|
+
"filename_prefix": ["STRING", {"default": "video/ComfyUI"}],
|
|
156
|
+
"format": ["COMBO", {"default": "auto", "options": ["auto", "mp4"]}],
|
|
157
|
+
"codec": ["COMBO", {"default": "auto", "options": ["auto", "h264"]}],
|
|
158
|
+
},
|
|
159
|
+
"optional": {},
|
|
160
|
+
},
|
|
161
|
+
"input_order": {"required": ["video", "filename_prefix", "format", "codec"], "optional": []},
|
|
162
|
+
},
|
|
163
|
+
"SaveImage": {
|
|
164
|
+
"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {}]}, "optional": {}},
|
|
165
|
+
"input_order": {"required": ["images", "filename_prefix"], "optional": []},
|
|
166
|
+
},
|
|
167
|
+
"SaveAnimatedWEBP": {
|
|
168
|
+
"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {}], "fps": ["FLOAT", {}], "lossless": ["BOOLEAN", {}], "quality": ["INT", {}], "method": [["default"], {}]}, "optional": {}},
|
|
169
|
+
"input_order": {"required": ["images", "filename_prefix", "fps", "lossless", "quality", "method"], "optional": []},
|
|
170
|
+
},
|
|
171
|
+
# Input loaders
|
|
172
|
+
"LoadImage": {
|
|
173
|
+
"input": {"required": {"image": ["STRING", {}]}, "optional": {}},
|
|
174
|
+
"input_order": {"required": ["image"], "optional": []},
|
|
175
|
+
},
|
|
176
|
+
"LoadAudio": {
|
|
177
|
+
"input": {"required": {"audio": ["STRING", {}]}, "optional": {}},
|
|
178
|
+
"input_order": {"required": ["audio"], "optional": []},
|
|
179
|
+
},
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def load_template(template_id: str) -> dict:
|
|
184
|
+
"""
|
|
185
|
+
Load workflow template from comfyui-workflow-templates package.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
template_id: Template name (e.g., "video_wan2_2_14B_t2v")
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Workflow in graph format (nodes array, links array)
|
|
192
|
+
|
|
193
|
+
Requires: pip install comfyui-workflow-templates comfyui-workflow-templates-media-image
|
|
194
|
+
"""
|
|
195
|
+
try:
|
|
196
|
+
from comfyui_workflow_templates import get_asset_path
|
|
197
|
+
except ImportError:
|
|
198
|
+
raise ImportError(
|
|
199
|
+
"comfyui-workflow-templates not installed. "
|
|
200
|
+
"Run: pip install comfyui-workflow-templates comfyui-workflow-templates-media-image"
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
workflow_path = get_asset_path(template_id, f"{template_id}.json")
|
|
204
|
+
with open(workflow_path) as f:
|
|
205
|
+
return json.load(f)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _value_matches_type(value, input_spec) -> bool:
|
|
209
|
+
"""Check if a widget value matches the expected input type."""
|
|
210
|
+
if input_spec is None:
|
|
211
|
+
return True
|
|
212
|
+
|
|
213
|
+
# input_spec is typically [type, config] or just [type]
|
|
214
|
+
if isinstance(input_spec, list) and len(input_spec) > 0:
|
|
215
|
+
type_info = input_spec[0]
|
|
216
|
+
config = input_spec[1] if len(input_spec) > 1 else {}
|
|
217
|
+
|
|
218
|
+
# List of allowed values (enum/combo) - older format
|
|
219
|
+
if isinstance(type_info, list):
|
|
220
|
+
# Accept if value is in list, or if value is a string (enum values may differ between versions)
|
|
221
|
+
if value in type_info:
|
|
222
|
+
return True
|
|
223
|
+
# Accept any string for enum - versions may have different allowed values
|
|
224
|
+
if isinstance(value, str) and any(isinstance(v, str) for v in type_info):
|
|
225
|
+
return True
|
|
226
|
+
return False
|
|
227
|
+
|
|
228
|
+
# Type string
|
|
229
|
+
if isinstance(type_info, str):
|
|
230
|
+
# COMBO type - options are in config["options"] (newer ComfyUI format)
|
|
231
|
+
if type_info == "COMBO":
|
|
232
|
+
options = config.get("options", []) if isinstance(config, dict) else []
|
|
233
|
+
if value in options:
|
|
234
|
+
return True
|
|
235
|
+
# Accept any string for combo - versions may have different allowed values
|
|
236
|
+
if isinstance(value, str) and options and any(isinstance(v, str) for v in options):
|
|
237
|
+
return True
|
|
238
|
+
return False
|
|
239
|
+
elif type_info == "INT":
|
|
240
|
+
return isinstance(value, (int, float)) and not isinstance(value, bool)
|
|
241
|
+
elif type_info == "FLOAT":
|
|
242
|
+
return isinstance(value, (int, float)) and not isinstance(value, bool)
|
|
243
|
+
elif type_info == "STRING":
|
|
244
|
+
return isinstance(value, str)
|
|
245
|
+
elif type_info == "BOOLEAN":
|
|
246
|
+
return isinstance(value, bool)
|
|
247
|
+
# Connection types (MODEL, CLIP, VIDEO, etc.) are handled via links, not widgets
|
|
248
|
+
elif type_info.isupper():
|
|
249
|
+
return False
|
|
250
|
+
|
|
251
|
+
return True
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def find_nodes(workflow: dict, class_type: str, title_contains: str = None) -> list[tuple[str, dict]]:
|
|
255
|
+
"""
|
|
256
|
+
Find nodes in API-format workflow by class_type and optional title pattern.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
workflow: Workflow in API format (node IDs as keys)
|
|
260
|
+
class_type: Node class type to match (e.g., "CLIPTextEncode", "KSampler")
|
|
261
|
+
title_contains: Optional substring to match in node title (case-insensitive)
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
List of (node_id, node) tuples matching the criteria
|
|
265
|
+
"""
|
|
266
|
+
results = []
|
|
267
|
+
for node_id, node in workflow.items():
|
|
268
|
+
if node.get("class_type") == class_type:
|
|
269
|
+
if title_contains is None:
|
|
270
|
+
results.append((node_id, node))
|
|
271
|
+
else:
|
|
272
|
+
title = node.get("_meta", {}).get("title", "")
|
|
273
|
+
if title_contains.lower() in title.lower():
|
|
274
|
+
results.append((node_id, node))
|
|
275
|
+
return results
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def find_node(workflow: dict, class_type: str, title_contains: str = None) -> tuple[str, dict] | tuple[None, None]:
|
|
279
|
+
"""Find first node matching class_type and optional title pattern."""
|
|
280
|
+
nodes = find_nodes(workflow, class_type, title_contains)
|
|
281
|
+
return nodes[0] if nodes else (None, None)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def apply_graph_modes(graph: dict, nodes_config: dict) -> dict:
|
|
285
|
+
"""
|
|
286
|
+
Enable/disable nodes in graph before conversion to API format.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
graph: ComfyUI graph (UI format with 'nodes' list)
|
|
290
|
+
nodes_config: Dict mapping node IDs to config. Supported keys:
|
|
291
|
+
- mode: 0 (active), 2 (muted), 4 (bypassed)
|
|
292
|
+
- enabled: True (mode=0) or False (mode=4) - convenience alias
|
|
293
|
+
|
|
294
|
+
Example:
|
|
295
|
+
apply_graph_modes(graph, {
|
|
296
|
+
"87": {"enabled": True}, # Enable bypassed node
|
|
297
|
+
"42": {"mode": 4}, # Bypass node
|
|
298
|
+
})
|
|
299
|
+
|
|
300
|
+
Returns:
|
|
301
|
+
Modified graph (mutated in place)
|
|
302
|
+
"""
|
|
303
|
+
nodes_by_id = {str(n.get("id")): n for n in graph.get("nodes", [])}
|
|
304
|
+
|
|
305
|
+
for node_id, config in nodes_config.items():
|
|
306
|
+
node = nodes_by_id.get(str(node_id))
|
|
307
|
+
if not node:
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
if "mode" in config:
|
|
311
|
+
node["mode"] = config["mode"]
|
|
312
|
+
elif "enabled" in config:
|
|
313
|
+
node["mode"] = 0 if config["enabled"] else 4
|
|
314
|
+
|
|
315
|
+
return graph
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def apply_params(workflow: dict, **params) -> dict:
|
|
319
|
+
"""
|
|
320
|
+
Apply parameters to workflow nodes by finding them by type/title.
|
|
321
|
+
|
|
322
|
+
This works across different workflow types (Qwen, Flux, SDXL, etc.)
|
|
323
|
+
by searching for nodes by their class_type rather than hardcoded IDs.
|
|
324
|
+
|
|
325
|
+
Supported params:
|
|
326
|
+
prompt: Text for positive prompt (CLIPTextEncode with "Positive" in title)
|
|
327
|
+
negative: Text for negative prompt (CLIPTextEncode with "Negative" in title)
|
|
328
|
+
width: Image/video width (EmptySD3LatentImage, EmptyHunyuanLatentVideo, etc.)
|
|
329
|
+
height: Image/video height (same as width)
|
|
330
|
+
length: Video length in frames (EmptyHunyuanLatentVideo, etc.)
|
|
331
|
+
seed: Random seed (KSampler uses "seed", KSamplerAdvanced uses "noise_seed")
|
|
332
|
+
steps: Sampling steps (KSampler or Flux2Scheduler)
|
|
333
|
+
cfg: CFG scale (KSampler or FluxGuidance)
|
|
334
|
+
filename_prefix: Output filename prefix (SaveImage, SaveVideo)
|
|
335
|
+
nodes: Dict mapping node IDs to input values for direct node control.
|
|
336
|
+
Format: {"node_id": {"image": "file.png", "text": "prompt", ...}}
|
|
337
|
+
Supports: LoadImage (image), LoadAudio (audio), *TextEncode* (text),
|
|
338
|
+
or any input key as generic fallback.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
Modified workflow (mutated in place)
|
|
342
|
+
"""
|
|
343
|
+
# Helper to find first matching node from a list of types
|
|
344
|
+
def find_first(types: list[str], title: str = None) -> tuple[str, dict] | tuple[None, None]:
|
|
345
|
+
for t in types:
|
|
346
|
+
node_id, node = find_node(workflow, t, title)
|
|
347
|
+
if node:
|
|
348
|
+
return node_id, node
|
|
349
|
+
return None, None
|
|
350
|
+
|
|
351
|
+
# CLIP text encode types (standard first, then variants)
|
|
352
|
+
clip_types = ["CLIPTextEncode", "CLIPTextEncodeFlux", "CLIPTextEncodeSD3", "TextEncodeQwenImageEditPlus"]
|
|
353
|
+
|
|
354
|
+
# Positive prompt - find by "Positive" in title
|
|
355
|
+
if "prompt" in params:
|
|
356
|
+
# Try TextEncodeQwenImageEditPlus first (Qwen workflows use "prompt" field)
|
|
357
|
+
node_id, node = find_node(workflow, "TextEncodeQwenImageEditPlus", "Positive")
|
|
358
|
+
if node:
|
|
359
|
+
node["inputs"]["prompt"] = params["prompt"]
|
|
360
|
+
else:
|
|
361
|
+
# Standard CLIP encoders (use "text" field)
|
|
362
|
+
node_id, node = find_first(clip_types, "Positive")
|
|
363
|
+
if not node:
|
|
364
|
+
# Fallback: find any CLIP encoder
|
|
365
|
+
for t in clip_types:
|
|
366
|
+
nodes = find_nodes(workflow, t)
|
|
367
|
+
if nodes:
|
|
368
|
+
node_id, node = nodes[0]
|
|
369
|
+
break
|
|
370
|
+
if node:
|
|
371
|
+
node["inputs"]["text"] = params["prompt"]
|
|
372
|
+
|
|
373
|
+
# Negative prompt - find by "Negative" in title
|
|
374
|
+
if "negative" in params:
|
|
375
|
+
# Try TextEncodeQwenImageEditPlus first (Qwen workflows use "prompt" field)
|
|
376
|
+
node_id, node = find_node(workflow, "TextEncodeQwenImageEditPlus", "Negative")
|
|
377
|
+
if node:
|
|
378
|
+
node["inputs"]["prompt"] = params["negative"]
|
|
379
|
+
else:
|
|
380
|
+
# Standard CLIP encoders (use "text" field)
|
|
381
|
+
node_id, node = find_first(clip_types, "Negative")
|
|
382
|
+
if node:
|
|
383
|
+
node["inputs"]["text"] = params["negative"]
|
|
384
|
+
|
|
385
|
+
# Width/Height/Length - try various latent image/video nodes
|
|
386
|
+
if "width" in params or "height" in params or "length" in params:
|
|
387
|
+
latent_types = [
|
|
388
|
+
# Image
|
|
389
|
+
"EmptySD3LatentImage", "EmptyFlux2LatentImage", "EmptyLatentImage",
|
|
390
|
+
# Video
|
|
391
|
+
"EmptyHunyuanLatentVideo", "EmptyMochiLatentVideo", "EmptyLTXVLatentVideo",
|
|
392
|
+
"WanImageToVideo", "WanStartEndFrames",
|
|
393
|
+
]
|
|
394
|
+
node_id, node = find_first(latent_types)
|
|
395
|
+
if node:
|
|
396
|
+
if "width" in params:
|
|
397
|
+
node["inputs"]["width"] = params["width"]
|
|
398
|
+
if "height" in params:
|
|
399
|
+
node["inputs"]["height"] = params["height"]
|
|
400
|
+
if "length" in params:
|
|
401
|
+
node["inputs"]["length"] = params["length"]
|
|
402
|
+
else:
|
|
403
|
+
# Try PrimitiveNode with "width"/"height" title (Flux2 style)
|
|
404
|
+
if "width" in params:
|
|
405
|
+
node_id, node = find_node(workflow, "PrimitiveNode", "width")
|
|
406
|
+
if node and "value" in node["inputs"]:
|
|
407
|
+
node["inputs"]["value"] = params["width"]
|
|
408
|
+
if "height" in params:
|
|
409
|
+
node_id, node = find_node(workflow, "PrimitiveNode", "height")
|
|
410
|
+
if node and "value" in node["inputs"]:
|
|
411
|
+
node["inputs"]["value"] = params["height"]
|
|
412
|
+
|
|
413
|
+
# Sampler types (standard first, then advanced variants)
|
|
414
|
+
sampler_types = ["KSampler", "KSamplerAdvanced", "SamplerCustom", "SamplerCustomAdvanced"]
|
|
415
|
+
|
|
416
|
+
# Seed - KSampler variants or RandomNoise
|
|
417
|
+
# Note: KSampler uses "seed", KSamplerAdvanced uses "noise_seed"
|
|
418
|
+
if "seed" in params:
|
|
419
|
+
# Try KSampler first (uses "seed")
|
|
420
|
+
node_id, node = find_node(workflow, "KSampler")
|
|
421
|
+
if node:
|
|
422
|
+
node["inputs"]["seed"] = params["seed"]
|
|
423
|
+
else:
|
|
424
|
+
# Try KSamplerAdvanced (uses "noise_seed")
|
|
425
|
+
# For multi-stage workflows, find the one with add_noise="enable" (the one that actually uses seed)
|
|
426
|
+
advanced_nodes = find_nodes(workflow, "KSamplerAdvanced")
|
|
427
|
+
target_node = None
|
|
428
|
+
for nid, n in advanced_nodes:
|
|
429
|
+
if n["inputs"].get("add_noise") == "enable":
|
|
430
|
+
target_node = n
|
|
431
|
+
break
|
|
432
|
+
# Fallback to first KSamplerAdvanced if none have add_noise="enable"
|
|
433
|
+
if not target_node and advanced_nodes:
|
|
434
|
+
target_node = advanced_nodes[0][1]
|
|
435
|
+
if target_node:
|
|
436
|
+
target_node["inputs"]["noise_seed"] = params["seed"]
|
|
437
|
+
else:
|
|
438
|
+
# Try other sampler types
|
|
439
|
+
node_id, node = find_node(workflow, "RandomNoise")
|
|
440
|
+
if node:
|
|
441
|
+
node["inputs"]["noise_seed"] = params["seed"]
|
|
442
|
+
|
|
443
|
+
# Steps - KSampler variants or Flux2Scheduler
|
|
444
|
+
if "steps" in params:
|
|
445
|
+
node_id, node = find_first(sampler_types)
|
|
446
|
+
if node:
|
|
447
|
+
node["inputs"]["steps"] = params["steps"]
|
|
448
|
+
else:
|
|
449
|
+
node_id, node = find_node(workflow, "Flux2Scheduler")
|
|
450
|
+
if node:
|
|
451
|
+
node["inputs"]["steps"] = params["steps"]
|
|
452
|
+
|
|
453
|
+
# CFG - KSampler variants or FluxGuidance
|
|
454
|
+
if "cfg" in params:
|
|
455
|
+
node_id, node = find_first(sampler_types)
|
|
456
|
+
if node:
|
|
457
|
+
node["inputs"]["cfg"] = params["cfg"]
|
|
458
|
+
else:
|
|
459
|
+
node_id, node = find_node(workflow, "FluxGuidance")
|
|
460
|
+
if node:
|
|
461
|
+
node["inputs"]["guidance"] = params["cfg"]
|
|
462
|
+
|
|
463
|
+
# Filename prefix - SaveImage, SaveVideo, or SaveAnimatedWEBP
|
|
464
|
+
if "filename_prefix" in params:
|
|
465
|
+
save_types = ["SaveImage", "SaveVideo", "SaveAnimatedWEBP", "SaveAnimatedPNG"]
|
|
466
|
+
node_id, node = find_first(save_types)
|
|
467
|
+
if node:
|
|
468
|
+
node["inputs"]["filename_prefix"] = params["filename_prefix"]
|
|
469
|
+
|
|
470
|
+
# Node-specific params by ID - for direct control over specific nodes
|
|
471
|
+
# Format: nodes={"node_id": {"image": "file.png", "text": "prompt", ...}}
|
|
472
|
+
if "nodes" in params:
|
|
473
|
+
nodes_dict = params["nodes"]
|
|
474
|
+
for node_id, values in nodes_dict.items():
|
|
475
|
+
node_id = str(node_id) # Ensure string key
|
|
476
|
+
if node_id not in workflow:
|
|
477
|
+
continue # Skip unknown nodes
|
|
478
|
+
|
|
479
|
+
node = workflow[node_id]
|
|
480
|
+
node_type = node.get("class_type", "")
|
|
481
|
+
|
|
482
|
+
# Map values to appropriate input fields based on node type
|
|
483
|
+
for key, value in values.items():
|
|
484
|
+
if key == "image" and node_type == "LoadImage":
|
|
485
|
+
node["inputs"]["image"] = value
|
|
486
|
+
elif key == "audio" and node_type == "LoadAudio":
|
|
487
|
+
node["inputs"]["audio"] = value
|
|
488
|
+
elif key == "text" and "Text" in node_type:
|
|
489
|
+
# CLIPTextEncode, CLIPTextEncodeFlux, TextEncodeQwenImageEditPlus, etc.
|
|
490
|
+
node["inputs"]["text"] = value
|
|
491
|
+
else:
|
|
492
|
+
# Generic fallback: set input directly
|
|
493
|
+
node["inputs"][key] = value
|
|
494
|
+
|
|
495
|
+
return workflow
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
def graph_to_api(graph: dict, object_info: dict = None, debug: bool = False) -> dict:
|
|
499
|
+
"""
|
|
500
|
+
Convert ComfyUI graph format (from UI) to API format (for /prompt endpoint).
|
|
501
|
+
|
|
502
|
+
Args:
|
|
503
|
+
graph: Workflow in graph format (nodes array, links array)
|
|
504
|
+
object_info: Node schemas from /object_info endpoint (uses DEFAULT_OBJECT_INFO if None)
|
|
505
|
+
debug: If True, print debug info about conversion
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
Workflow in API format (node IDs as keys)
|
|
509
|
+
"""
|
|
510
|
+
if object_info is None:
|
|
511
|
+
object_info = DEFAULT_OBJECT_INFO
|
|
512
|
+
api = {}
|
|
513
|
+
|
|
514
|
+
# Build node lookup
|
|
515
|
+
nodes_by_id = {node["id"]: node for node in graph.get("nodes", [])}
|
|
516
|
+
|
|
517
|
+
# Build link lookup: link_id -> (from_node_id, from_slot)
|
|
518
|
+
links = {}
|
|
519
|
+
for link in graph.get("links", []):
|
|
520
|
+
# link format: [link_id, from_node, from_slot, to_node, to_slot, type]
|
|
521
|
+
link_id = link[0]
|
|
522
|
+
from_node = link[1]
|
|
523
|
+
from_slot = link[2]
|
|
524
|
+
links[link_id] = (from_node, from_slot)
|
|
525
|
+
|
|
526
|
+
def is_skipped_node(node):
|
|
527
|
+
"""Check if node should be skipped in API output."""
|
|
528
|
+
if not node:
|
|
529
|
+
return True
|
|
530
|
+
class_type = node.get("type")
|
|
531
|
+
if not class_type or class_type in ("Note", "Reroute", "MarkdownNote"):
|
|
532
|
+
return True
|
|
533
|
+
# mode 2 = muted, mode 4 = bypassed
|
|
534
|
+
if node.get("mode", 0) in (2, 4):
|
|
535
|
+
return True
|
|
536
|
+
return False
|
|
537
|
+
|
|
538
|
+
def resolve_link(link_id, visited=None):
|
|
539
|
+
"""Follow link through skipped nodes (reroutes, bypassed) to find real source."""
|
|
540
|
+
if visited is None:
|
|
541
|
+
visited = set()
|
|
542
|
+
if link_id in visited:
|
|
543
|
+
return None # Cycle detection
|
|
544
|
+
visited.add(link_id)
|
|
545
|
+
|
|
546
|
+
if link_id not in links:
|
|
547
|
+
return None
|
|
548
|
+
|
|
549
|
+
from_node_id, from_slot = links[link_id]
|
|
550
|
+
from_node = nodes_by_id.get(from_node_id)
|
|
551
|
+
|
|
552
|
+
if not is_skipped_node(from_node):
|
|
553
|
+
return (from_node_id, from_slot)
|
|
554
|
+
|
|
555
|
+
# Node is skipped - follow through its input
|
|
556
|
+
# For bypassed/reroute nodes, output slot 0 passes through input slot 0
|
|
557
|
+
if from_slot == 0 and from_node:
|
|
558
|
+
node_inputs = from_node.get("inputs", [])
|
|
559
|
+
if node_inputs:
|
|
560
|
+
upstream_link = node_inputs[0].get("link")
|
|
561
|
+
if upstream_link is not None:
|
|
562
|
+
return resolve_link(upstream_link, visited)
|
|
563
|
+
|
|
564
|
+
return None
|
|
565
|
+
|
|
566
|
+
for node in graph.get("nodes", []):
|
|
567
|
+
node_id = str(node["id"])
|
|
568
|
+
class_type = node.get("type")
|
|
569
|
+
|
|
570
|
+
# Skip UI-only nodes (notes, reroutes, etc.)
|
|
571
|
+
if not class_type or class_type in ("Note", "Reroute", "MarkdownNote"):
|
|
572
|
+
continue
|
|
573
|
+
|
|
574
|
+
# Skip muted/bypassed nodes (mode 2 = muted, mode 4 = bypassed)
|
|
575
|
+
if node.get("mode", 0) in (2, 4):
|
|
576
|
+
continue
|
|
577
|
+
|
|
578
|
+
info = object_info.get(class_type, {})
|
|
579
|
+
inputs = {}
|
|
580
|
+
|
|
581
|
+
# Get input specs from schema
|
|
582
|
+
input_specs = {}
|
|
583
|
+
for section in ["required", "optional"]:
|
|
584
|
+
for name, spec in info.get("input", {}).get(section, {}).items():
|
|
585
|
+
input_specs[name] = spec
|
|
586
|
+
|
|
587
|
+
# Get input order from schema
|
|
588
|
+
# Note: input_order may be missing in some ComfyUI versions - fall back to input specs keys
|
|
589
|
+
input_order = info.get("input_order", {})
|
|
590
|
+
required_inputs = input_order.get("required", [])
|
|
591
|
+
optional_inputs = input_order.get("optional", [])
|
|
592
|
+
all_input_names = required_inputs + optional_inputs
|
|
593
|
+
|
|
594
|
+
# Fallback: if input_order is empty, use keys from input specs
|
|
595
|
+
# This handles older ComfyUI versions that don't provide input_order
|
|
596
|
+
if not all_input_names and input_specs:
|
|
597
|
+
# Use input specs keys, putting required first
|
|
598
|
+
required_spec_keys = list(info.get("input", {}).get("required", {}).keys())
|
|
599
|
+
optional_spec_keys = list(info.get("input", {}).get("optional", {}).keys())
|
|
600
|
+
all_input_names = required_spec_keys + optional_spec_keys
|
|
601
|
+
|
|
602
|
+
# Debug: show what we're working with for SaveVideo
|
|
603
|
+
if debug and class_type == "SaveVideo":
|
|
604
|
+
print(f"DEBUG SaveVideo node {node_id}:")
|
|
605
|
+
print(f" input_order: {input_order}")
|
|
606
|
+
print(f" all_input_names: {all_input_names}")
|
|
607
|
+
print(f" input_specs keys: {list(input_specs.keys())}")
|
|
608
|
+
print(f" widgets_values: {node.get('widgets_values', [])}")
|
|
609
|
+
|
|
610
|
+
# Map connections from links (node inputs that are connected)
|
|
611
|
+
connected_inputs = set()
|
|
612
|
+
for inp in node.get("inputs", []):
|
|
613
|
+
link_id = inp.get("link")
|
|
614
|
+
if link_id is not None:
|
|
615
|
+
resolved = resolve_link(link_id)
|
|
616
|
+
if resolved:
|
|
617
|
+
from_node, from_slot = resolved
|
|
618
|
+
inputs[inp["name"]] = [str(from_node), from_slot]
|
|
619
|
+
connected_inputs.add(inp["name"])
|
|
620
|
+
|
|
621
|
+
if debug and class_type == "SaveVideo":
|
|
622
|
+
print(f" connected_inputs: {connected_inputs}")
|
|
623
|
+
|
|
624
|
+
# Map widget values to unconnected inputs with type validation
|
|
625
|
+
widgets = node.get("widgets_values", [])
|
|
626
|
+
if isinstance(widgets, dict):
|
|
627
|
+
# Some nodes use dict format for widgets
|
|
628
|
+
for name, value in widgets.items():
|
|
629
|
+
if name not in connected_inputs:
|
|
630
|
+
inputs[name] = value
|
|
631
|
+
else:
|
|
632
|
+
# List format - map positionally to input names, skipping UI-only widgets
|
|
633
|
+
w_idx = 0
|
|
634
|
+
for name in all_input_names:
|
|
635
|
+
if name in connected_inputs:
|
|
636
|
+
continue
|
|
637
|
+
|
|
638
|
+
# Find next widget value that matches the expected type
|
|
639
|
+
input_spec = input_specs.get(name)
|
|
640
|
+
if debug and class_type == "SaveVideo":
|
|
641
|
+
print(f" mapping '{name}': spec={input_spec}, w_idx={w_idx}, widgets[{w_idx}:]={widgets[w_idx:] if w_idx < len(widgets) else '[]'}")
|
|
642
|
+
while w_idx < len(widgets):
|
|
643
|
+
value = widgets[w_idx]
|
|
644
|
+
w_idx += 1
|
|
645
|
+
if _value_matches_type(value, input_spec):
|
|
646
|
+
inputs[name] = value
|
|
647
|
+
if debug and class_type == "SaveVideo":
|
|
648
|
+
print(f" -> assigned {name}={value}")
|
|
649
|
+
break
|
|
650
|
+
# Skip UI-only widgets (e.g., 'randomize', 'fixed', etc.)
|
|
651
|
+
|
|
652
|
+
if debug and class_type == "SaveVideo":
|
|
653
|
+
print(f" final inputs: {inputs}")
|
|
654
|
+
|
|
655
|
+
api[node_id] = {
|
|
656
|
+
"class_type": class_type,
|
|
657
|
+
"inputs": inputs,
|
|
658
|
+
"_meta": {"title": node.get("title", class_type)},
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
return api
|
|
662
|
+
|
|
663
|
+
|
|
664
|
+
class ComfyUIJob(BaseJob):
|
|
665
|
+
"""ComfyUI-specific job with workflow execution helpers"""
|
|
666
|
+
|
|
667
|
+
DEFAULT_IMAGE = COMFYUI_IMAGE
|
|
668
|
+
DEFAULT_GPU_TYPE = "l40s"
|
|
669
|
+
HEALTH_ENDPOINT = "/system_stats"
|
|
670
|
+
|
|
671
|
+
COMFYUI_PORT = 8188
|
|
672
|
+
|
|
673
|
+
def __init__(self, c3: "C3", job, template: str = None, use_lb: bool = False, use_auth: bool = False):
|
|
674
|
+
super().__init__(c3, job)
|
|
675
|
+
self._object_info: dict | None = None
|
|
676
|
+
self._auth_headers: dict | None = None
|
|
677
|
+
self._job_token: str | None = None
|
|
678
|
+
self.template = template # Template used to launch this job
|
|
679
|
+
self._use_lb = use_lb # Using HTTPS load balancer
|
|
680
|
+
self.use_auth = use_auth # Using token auth
|
|
681
|
+
|
|
682
|
+
@property
|
|
683
|
+
def use_lb(self) -> bool:
|
|
684
|
+
return self._use_lb
|
|
685
|
+
|
|
686
|
+
@use_lb.setter
|
|
687
|
+
def use_lb(self, value: bool):
|
|
688
|
+
"""Set use_lb and clear cached base_url"""
|
|
689
|
+
self._use_lb = value
|
|
690
|
+
self._base_url = None # Clear cache so base_url recomputes
|
|
691
|
+
|
|
692
|
+
@property
|
|
693
|
+
def base_url(self) -> str:
|
|
694
|
+
"""ComfyUI base URL - HTTPS if using lb, HTTP otherwise"""
|
|
695
|
+
if not self._base_url and self.hostname:
|
|
696
|
+
if self.use_lb:
|
|
697
|
+
# HTTPS load balancer - no port needed, uses standard 443
|
|
698
|
+
self._base_url = f"https://{self.hostname}"
|
|
699
|
+
else:
|
|
700
|
+
# Direct HTTP connection to port
|
|
701
|
+
self._base_url = f"http://{self.hostname}:{self.COMFYUI_PORT}"
|
|
702
|
+
return self._base_url or ""
|
|
703
|
+
|
|
704
|
+
@property
|
|
705
|
+
def auth_headers(self) -> dict:
|
|
706
|
+
"""Headers for authenticated requests to ComfyUI"""
|
|
707
|
+
if self.use_auth:
|
|
708
|
+
# Use job-specific token for LB auth
|
|
709
|
+
if not self._job_token:
|
|
710
|
+
self._job_token = self.c3.jobs.token(self.job_id)
|
|
711
|
+
return {"Authorization": f"Bearer {self._job_token}"}
|
|
712
|
+
else:
|
|
713
|
+
# Use API key for direct connections
|
|
714
|
+
return {"Authorization": f"Bearer {self.c3._api_key}"}
|
|
715
|
+
|
|
716
|
+
@classmethod
|
|
717
|
+
def create_for_template(
|
|
718
|
+
cls,
|
|
719
|
+
c3: "C3",
|
|
720
|
+
template: str,
|
|
721
|
+
gpu_type: str = None,
|
|
722
|
+
gpu_count: int = 1,
|
|
723
|
+
runtime: int = 3600,
|
|
724
|
+
lb: int = None,
|
|
725
|
+
auth: bool = False,
|
|
726
|
+
**kwargs,
|
|
727
|
+
) -> "ComfyUIJob":
|
|
728
|
+
"""Create a new ComfyUI job configured for a specific template.
|
|
729
|
+
|
|
730
|
+
Args:
|
|
731
|
+
c3: C3 client
|
|
732
|
+
template: Template name (passed as COMFYUI_TEMPLATES env var)
|
|
733
|
+
gpu_type: GPU type
|
|
734
|
+
gpu_count: Number of GPUs
|
|
735
|
+
runtime: Max runtime in seconds
|
|
736
|
+
lb: Port for HTTPS load balancer (e.g., 8188). If set, uses HTTPS.
|
|
737
|
+
auth: Enable Bearer token auth on load balancer
|
|
738
|
+
"""
|
|
739
|
+
env = kwargs.pop("env", {}) or {}
|
|
740
|
+
env["COMFYUI_TEMPLATES"] = template
|
|
741
|
+
|
|
742
|
+
ports = kwargs.pop("ports", {}) or {}
|
|
743
|
+
if lb:
|
|
744
|
+
# Use HTTPS load balancer
|
|
745
|
+
ports["lb"] = lb
|
|
746
|
+
else:
|
|
747
|
+
# Direct port exposure (HTTP)
|
|
748
|
+
ports[str(cls.COMFYUI_PORT)] = cls.COMFYUI_PORT
|
|
749
|
+
|
|
750
|
+
job = c3.jobs.create(
|
|
751
|
+
image=cls.DEFAULT_IMAGE,
|
|
752
|
+
gpu_type=gpu_type or cls.DEFAULT_GPU_TYPE,
|
|
753
|
+
gpu_count=gpu_count,
|
|
754
|
+
runtime=runtime,
|
|
755
|
+
env=env,
|
|
756
|
+
ports=ports,
|
|
757
|
+
auth=auth,
|
|
758
|
+
**kwargs,
|
|
759
|
+
)
|
|
760
|
+
return cls(c3, job, template=template, use_lb=bool(lb), use_auth=auth)
|
|
761
|
+
|
|
762
|
+
@classmethod
|
|
763
|
+
def get_instance(
|
|
764
|
+
cls,
|
|
765
|
+
c3: "C3",
|
|
766
|
+
instance: str,
|
|
767
|
+
use_lb: bool = False,
|
|
768
|
+
use_auth: bool = False,
|
|
769
|
+
) -> "ComfyUIJob":
|
|
770
|
+
"""Connect to a specific ComfyUI instance by job ID or hostname.
|
|
771
|
+
|
|
772
|
+
Args:
|
|
773
|
+
c3: C3 client
|
|
774
|
+
instance: Job ID (UUID) or hostname
|
|
775
|
+
use_lb: Whether instance uses HTTPS load balancer
|
|
776
|
+
use_auth: Whether instance uses token auth
|
|
777
|
+
"""
|
|
778
|
+
# Check if it looks like a UUID (job ID)
|
|
779
|
+
if "-" in instance and len(instance) > 30:
|
|
780
|
+
job = c3.jobs.get(instance)
|
|
781
|
+
else:
|
|
782
|
+
# Assume hostname - search running jobs
|
|
783
|
+
jobs = c3.jobs.list(state="running")
|
|
784
|
+
job = None
|
|
785
|
+
for j in jobs:
|
|
786
|
+
if j.hostname and (j.hostname == instance or j.hostname.startswith(instance)):
|
|
787
|
+
job = j
|
|
788
|
+
break
|
|
789
|
+
if not job:
|
|
790
|
+
raise ValueError(f"No running job found with hostname: {instance}")
|
|
791
|
+
|
|
792
|
+
return cls(c3, job, use_lb=use_lb, use_auth=use_auth)
|
|
793
|
+
|
|
794
|
+
@classmethod
|
|
795
|
+
def get_or_create_for_template(
|
|
796
|
+
cls,
|
|
797
|
+
c3: "C3",
|
|
798
|
+
template: str,
|
|
799
|
+
gpu_type: str = None,
|
|
800
|
+
gpu_count: int = 1,
|
|
801
|
+
runtime: int = 3600,
|
|
802
|
+
reuse: bool = True,
|
|
803
|
+
lb: int = None,
|
|
804
|
+
auth: bool = False,
|
|
805
|
+
**kwargs,
|
|
806
|
+
) -> "ComfyUIJob":
|
|
807
|
+
"""Get existing running job or create new one for a template.
|
|
808
|
+
|
|
809
|
+
If reuse=True and a ComfyUI job is already running, it will be reused
|
|
810
|
+
(note: the existing job may have different models loaded).
|
|
811
|
+
"""
|
|
812
|
+
if reuse:
|
|
813
|
+
existing = cls.get_running(c3, image_filter=cls.DEFAULT_IMAGE)
|
|
814
|
+
if existing:
|
|
815
|
+
existing.template = template
|
|
816
|
+
# TODO: detect lb/auth from existing job's config
|
|
817
|
+
existing.use_lb = bool(lb)
|
|
818
|
+
existing.use_auth = auth
|
|
819
|
+
return existing
|
|
820
|
+
|
|
821
|
+
return cls.create_for_template(
|
|
822
|
+
c3,
|
|
823
|
+
template=template,
|
|
824
|
+
gpu_type=gpu_type,
|
|
825
|
+
gpu_count=gpu_count,
|
|
826
|
+
runtime=runtime,
|
|
827
|
+
lb=lb,
|
|
828
|
+
auth=auth,
|
|
829
|
+
**kwargs,
|
|
830
|
+
)
|
|
831
|
+
|
|
832
|
+
def get_object_info(self, refresh: bool = False) -> dict:
|
|
833
|
+
"""Fetch node schemas from ComfyUI (cached)"""
|
|
834
|
+
if self._object_info is None or refresh:
|
|
835
|
+
with httpx.Client(timeout=30) as client:
|
|
836
|
+
resp = client.get(
|
|
837
|
+
f"{self.base_url}/object_info",
|
|
838
|
+
headers=self.auth_headers,
|
|
839
|
+
)
|
|
840
|
+
resp.raise_for_status()
|
|
841
|
+
self._object_info = resp.json()
|
|
842
|
+
return self._object_info
|
|
843
|
+
|
|
844
|
+
def convert_workflow(self, graph: dict, debug: bool = False) -> dict:
|
|
845
|
+
"""Convert graph format workflow to API format"""
|
|
846
|
+
object_info = self.get_object_info()
|
|
847
|
+
return graph_to_api(graph, object_info, debug=debug)
|
|
848
|
+
|
|
849
|
+
def load_template(self, template_id: str) -> dict:
|
|
850
|
+
"""
|
|
851
|
+
Load workflow from comfyui-workflow-templates package.
|
|
852
|
+
|
|
853
|
+
Requires: pip install comfyui-workflow-templates comfyui-workflow-templates-media-image
|
|
854
|
+
"""
|
|
855
|
+
try:
|
|
856
|
+
from comfyui_workflow_templates import get_asset_path
|
|
857
|
+
except ImportError:
|
|
858
|
+
raise ImportError(
|
|
859
|
+
"comfyui-workflow-templates not installed. "
|
|
860
|
+
"Run: pip install comfyui-workflow-templates comfyui-workflow-templates-media-image"
|
|
861
|
+
)
|
|
862
|
+
|
|
863
|
+
workflow_path = get_asset_path(template_id, f"{template_id}.json")
|
|
864
|
+
with open(workflow_path) as f:
|
|
865
|
+
return json.load(f)
|
|
866
|
+
|
|
867
|
+
def upload_image(self, file_path: str | Path, filename: str = None) -> str:
|
|
868
|
+
"""Upload image to ComfyUI server, returns server filename"""
|
|
869
|
+
file_path = Path(file_path)
|
|
870
|
+
filename = filename or file_path.name
|
|
871
|
+
|
|
872
|
+
with httpx.Client(timeout=60) as client:
|
|
873
|
+
with open(file_path, "rb") as f:
|
|
874
|
+
files = {"image": (filename, f, "image/png")}
|
|
875
|
+
resp = client.post(
|
|
876
|
+
f"{self.base_url}/upload/image",
|
|
877
|
+
files=files,
|
|
878
|
+
headers=self.auth_headers,
|
|
879
|
+
)
|
|
880
|
+
resp.raise_for_status()
|
|
881
|
+
return resp.json().get("name", filename)
|
|
882
|
+
|
|
883
|
+
def upload_audio(self, file_path: str | Path, filename: str = None) -> str:
|
|
884
|
+
"""Upload audio to ComfyUI server, returns server filename.
|
|
885
|
+
|
|
886
|
+
Note: ComfyUI doesn't have a dedicated /upload/audio endpoint,
|
|
887
|
+
so we use /upload/image which accepts any file type.
|
|
888
|
+
"""
|
|
889
|
+
file_path = Path(file_path)
|
|
890
|
+
filename = filename or file_path.name
|
|
891
|
+
|
|
892
|
+
with httpx.Client(timeout=60) as client:
|
|
893
|
+
with open(file_path, "rb") as f:
|
|
894
|
+
# Use /upload/image - it accepts any file type despite the name
|
|
895
|
+
files = {"image": (filename, f, "audio/mpeg")}
|
|
896
|
+
resp = client.post(
|
|
897
|
+
f"{self.base_url}/upload/image",
|
|
898
|
+
files=files,
|
|
899
|
+
headers=self.auth_headers,
|
|
900
|
+
)
|
|
901
|
+
resp.raise_for_status()
|
|
902
|
+
return resp.json().get("name", filename)
|
|
903
|
+
|
|
904
|
+
# =========================================================================
|
|
905
|
+
# ComfyUI Manager - Custom Node Installation
|
|
906
|
+
# =========================================================================
|
|
907
|
+
|
|
908
|
+
def _comfy_request(self, method: str, path: str, **kwargs) -> httpx.Response:
|
|
909
|
+
"""Make a request to ComfyUI with retry logic for transient errors."""
|
|
910
|
+
from c3.http import request_with_retry
|
|
911
|
+
return request_with_retry(
|
|
912
|
+
method,
|
|
913
|
+
f"{self.base_url}{path}",
|
|
914
|
+
headers=self.auth_headers,
|
|
915
|
+
**kwargs,
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
def get_available_node_types(self) -> set[str]:
|
|
919
|
+
"""Get set of available node class_types from ComfyUI's /object_info.
|
|
920
|
+
|
|
921
|
+
Returns set of class_type strings that ComfyUI can currently execute.
|
|
922
|
+
"""
|
|
923
|
+
resp = self._comfy_request("get", "/object_info", timeout=60)
|
|
924
|
+
resp.raise_for_status()
|
|
925
|
+
return set(resp.json().keys())
|
|
926
|
+
|
|
927
|
+
def get_workflow_node_types(self, workflow: dict) -> set[str]:
|
|
928
|
+
"""Extract all class_type values from a workflow.
|
|
929
|
+
|
|
930
|
+
Args:
|
|
931
|
+
workflow: ComfyUI workflow dict (API format or UI format)
|
|
932
|
+
|
|
933
|
+
Returns:
|
|
934
|
+
Set of class_type strings used in the workflow
|
|
935
|
+
"""
|
|
936
|
+
class_types = set()
|
|
937
|
+
|
|
938
|
+
# UI format: {"nodes": [{"type": "..."}, ...]}
|
|
939
|
+
if "nodes" in workflow:
|
|
940
|
+
for node in workflow.get("nodes", []):
|
|
941
|
+
if "type" in node:
|
|
942
|
+
class_types.add(node["type"])
|
|
943
|
+
|
|
944
|
+
# API format: {"1": {"class_type": "..."}, ...}
|
|
945
|
+
else:
|
|
946
|
+
for node_id, node_data in workflow.items():
|
|
947
|
+
if isinstance(node_data, dict) and "class_type" in node_data:
|
|
948
|
+
class_types.add(node_data["class_type"])
|
|
949
|
+
|
|
950
|
+
return class_types
|
|
951
|
+
|
|
952
|
+
def get_missing_node_types(self, workflow: dict) -> set[str]:
|
|
953
|
+
"""Find node types in workflow that aren't available in ComfyUI.
|
|
954
|
+
|
|
955
|
+
Args:
|
|
956
|
+
workflow: ComfyUI workflow dict
|
|
957
|
+
|
|
958
|
+
Returns:
|
|
959
|
+
Set of class_type strings that are missing
|
|
960
|
+
"""
|
|
961
|
+
available = self.get_available_node_types()
|
|
962
|
+
required = self.get_workflow_node_types(workflow)
|
|
963
|
+
return required - available
|
|
964
|
+
|
|
965
|
+
def get_node_mappings(self) -> dict[str, list]:
|
|
966
|
+
"""Get node class_type to package mappings from ComfyUI Manager.
|
|
967
|
+
|
|
968
|
+
Returns:
|
|
969
|
+
Dict mapping package URL to [node_list, metadata]
|
|
970
|
+
"""
|
|
971
|
+
resp = self._comfy_request("get", "/customnode/getmappings", timeout=60)
|
|
972
|
+
resp.raise_for_status()
|
|
973
|
+
return resp.json()
|
|
974
|
+
|
|
975
|
+
def lookup_packages_for_nodes(self, node_types: set[str]) -> dict[str, list[str]]:
|
|
976
|
+
"""Look up which packages provide given node types.
|
|
977
|
+
|
|
978
|
+
Args:
|
|
979
|
+
node_types: Set of class_type strings to look up
|
|
980
|
+
|
|
981
|
+
Returns:
|
|
982
|
+
Dict mapping package URL to list of node types it provides
|
|
983
|
+
"""
|
|
984
|
+
mappings = self.get_node_mappings()
|
|
985
|
+
|
|
986
|
+
# Invert the mapping: node_type -> package_url
|
|
987
|
+
node_to_package = {}
|
|
988
|
+
for url, data in mappings.items():
|
|
989
|
+
if isinstance(data, list) and len(data) > 0:
|
|
990
|
+
node_list = data[0]
|
|
991
|
+
for node in node_list:
|
|
992
|
+
node_to_package[node] = url
|
|
993
|
+
|
|
994
|
+
# Group requested nodes by package
|
|
995
|
+
packages: dict[str, list[str]] = {}
|
|
996
|
+
for node_type in node_types:
|
|
997
|
+
pkg = node_to_package.get(node_type)
|
|
998
|
+
if pkg:
|
|
999
|
+
if pkg not in packages:
|
|
1000
|
+
packages[pkg] = []
|
|
1001
|
+
packages[pkg].append(node_type)
|
|
1002
|
+
|
|
1003
|
+
return packages
|
|
1004
|
+
|
|
1005
|
+
def get_custom_node_list(self) -> dict:
|
|
1006
|
+
"""Get full list of available custom node packages.
|
|
1007
|
+
|
|
1008
|
+
Returns:
|
|
1009
|
+
Dict with 'channel' and 'node_packs' containing package metadata
|
|
1010
|
+
"""
|
|
1011
|
+
resp = self._comfy_request("get", "/customnode/getlist", params={"skip_update": "true"}, timeout=60)
|
|
1012
|
+
resp.raise_for_status()
|
|
1013
|
+
return resp.json()
|
|
1014
|
+
|
|
1015
|
+
def install_packages_by_url(self, package_urls: list[str]) -> dict:
|
|
1016
|
+
"""Install custom node packages by their repository URLs.
|
|
1017
|
+
|
|
1018
|
+
Args:
|
|
1019
|
+
package_urls: List of package URLs (e.g., github repo URLs)
|
|
1020
|
+
|
|
1021
|
+
Returns:
|
|
1022
|
+
Dict with 'queued' and 'failed' lists
|
|
1023
|
+
"""
|
|
1024
|
+
results = {"queued": [], "failed": [], "not_found": []}
|
|
1025
|
+
|
|
1026
|
+
# Get full package list to find metadata
|
|
1027
|
+
pkg_list = self.get_custom_node_list()
|
|
1028
|
+
node_packs = pkg_list.get("node_packs", {})
|
|
1029
|
+
|
|
1030
|
+
# Build URL to package metadata mapping
|
|
1031
|
+
url_to_metadata = {}
|
|
1032
|
+
for pkg_id, metadata in node_packs.items():
|
|
1033
|
+
files = metadata.get("files", [])
|
|
1034
|
+
for url in files:
|
|
1035
|
+
url_to_metadata[url] = metadata
|
|
1036
|
+
|
|
1037
|
+
with httpx.Client(timeout=60) as client:
|
|
1038
|
+
for url in package_urls:
|
|
1039
|
+
metadata = url_to_metadata.get(url)
|
|
1040
|
+
if not metadata:
|
|
1041
|
+
results["not_found"].append(url)
|
|
1042
|
+
continue
|
|
1043
|
+
|
|
1044
|
+
resp = client.post(
|
|
1045
|
+
f"{self.base_url}/manager/queue/install",
|
|
1046
|
+
json=metadata,
|
|
1047
|
+
headers=self.auth_headers,
|
|
1048
|
+
)
|
|
1049
|
+
if resp.status_code in (200, 201):
|
|
1050
|
+
results["queued"].append(metadata.get("title", url))
|
|
1051
|
+
else:
|
|
1052
|
+
results["failed"].append(metadata.get("title", url))
|
|
1053
|
+
|
|
1054
|
+
# Start processing if we queued anything
|
|
1055
|
+
if results["queued"]:
|
|
1056
|
+
client.get(
|
|
1057
|
+
f"{self.base_url}/manager/queue/start",
|
|
1058
|
+
headers=self.auth_headers,
|
|
1059
|
+
)
|
|
1060
|
+
|
|
1061
|
+
return results
|
|
1062
|
+
|
|
1063
|
+
def auto_install_workflow_nodes(
|
|
1064
|
+
self, workflow: dict, wait: bool = True, reboot: bool = True
|
|
1065
|
+
) -> dict:
|
|
1066
|
+
"""Automatically install missing custom nodes for a workflow.
|
|
1067
|
+
|
|
1068
|
+
Args:
|
|
1069
|
+
workflow: ComfyUI workflow dict
|
|
1070
|
+
wait: Wait for installation to complete
|
|
1071
|
+
reboot: Reboot ComfyUI after installation
|
|
1072
|
+
|
|
1073
|
+
Returns:
|
|
1074
|
+
Dict with results: {
|
|
1075
|
+
'missing_nodes': [...],
|
|
1076
|
+
'packages_to_install': {...},
|
|
1077
|
+
'installed': [...],
|
|
1078
|
+
'failed': [...],
|
|
1079
|
+
'not_found_nodes': [...]
|
|
1080
|
+
}
|
|
1081
|
+
"""
|
|
1082
|
+
results = {
|
|
1083
|
+
"missing_nodes": [],
|
|
1084
|
+
"packages_to_install": {},
|
|
1085
|
+
"installed": [],
|
|
1086
|
+
"failed": [],
|
|
1087
|
+
"not_found_nodes": [],
|
|
1088
|
+
}
|
|
1089
|
+
|
|
1090
|
+
# Find missing node types
|
|
1091
|
+
missing = self.get_missing_node_types(workflow)
|
|
1092
|
+
if not missing:
|
|
1093
|
+
return results
|
|
1094
|
+
results["missing_nodes"] = list(missing)
|
|
1095
|
+
|
|
1096
|
+
# Look up packages that provide these nodes
|
|
1097
|
+
packages = self.lookup_packages_for_nodes(missing)
|
|
1098
|
+
results["packages_to_install"] = packages
|
|
1099
|
+
|
|
1100
|
+
# Track nodes we couldn't find packages for
|
|
1101
|
+
found_nodes = set()
|
|
1102
|
+
for nodes in packages.values():
|
|
1103
|
+
found_nodes.update(nodes)
|
|
1104
|
+
results["not_found_nodes"] = list(missing - found_nodes)
|
|
1105
|
+
|
|
1106
|
+
if not packages:
|
|
1107
|
+
return results
|
|
1108
|
+
|
|
1109
|
+
# Install the packages
|
|
1110
|
+
install_result = self.install_packages_by_url(list(packages.keys()))
|
|
1111
|
+
results["installed"] = install_result.get("queued", [])
|
|
1112
|
+
results["failed"] = install_result.get("failed", [])
|
|
1113
|
+
results["failed"].extend(install_result.get("not_found", []))
|
|
1114
|
+
|
|
1115
|
+
# Wait for installation
|
|
1116
|
+
if wait and results["installed"]:
|
|
1117
|
+
with httpx.Client(timeout=60) as client:
|
|
1118
|
+
for _ in range(180): # Max 3 minutes
|
|
1119
|
+
time.sleep(1)
|
|
1120
|
+
resp = client.get(
|
|
1121
|
+
f"{self.base_url}/manager/queue/status",
|
|
1122
|
+
headers=self.auth_headers,
|
|
1123
|
+
)
|
|
1124
|
+
if resp.status_code == 200:
|
|
1125
|
+
status = resp.json()
|
|
1126
|
+
if not status.get("is_processing", False):
|
|
1127
|
+
break
|
|
1128
|
+
|
|
1129
|
+
# Reboot if we installed anything
|
|
1130
|
+
if reboot and results["installed"]:
|
|
1131
|
+
self.reboot(wait_ready=True)
|
|
1132
|
+
|
|
1133
|
+
return results
|
|
1134
|
+
|
|
1135
|
+
def get_installed_nodes(self) -> dict:
|
|
1136
|
+
"""Get list of installed custom nodes from ComfyUI Manager.
|
|
1137
|
+
|
|
1138
|
+
Returns dict mapping node package name to metadata.
|
|
1139
|
+
"""
|
|
1140
|
+
with httpx.Client(timeout=30) as client:
|
|
1141
|
+
resp = client.get(
|
|
1142
|
+
f"{self.base_url}/customnode/installed",
|
|
1143
|
+
headers=self.auth_headers,
|
|
1144
|
+
)
|
|
1145
|
+
resp.raise_for_status()
|
|
1146
|
+
return resp.json()
|
|
1147
|
+
|
|
1148
|
+
def install_node(self, node_name: str) -> bool:
|
|
1149
|
+
"""Install a custom node package via ComfyUI Manager.
|
|
1150
|
+
|
|
1151
|
+
Args:
|
|
1152
|
+
node_name: Package name (e.g., "comfyui-videohelpersuite")
|
|
1153
|
+
|
|
1154
|
+
Returns:
|
|
1155
|
+
True if installation was queued successfully
|
|
1156
|
+
"""
|
|
1157
|
+
with httpx.Client(timeout=60) as client:
|
|
1158
|
+
# Queue the installation
|
|
1159
|
+
resp = client.post(
|
|
1160
|
+
f"{self.base_url}/manager/queue/install",
|
|
1161
|
+
json={"name": node_name},
|
|
1162
|
+
headers=self.auth_headers,
|
|
1163
|
+
)
|
|
1164
|
+
if resp.status_code not in (200, 201):
|
|
1165
|
+
return False
|
|
1166
|
+
|
|
1167
|
+
# Start processing the queue
|
|
1168
|
+
resp = client.get(
|
|
1169
|
+
f"{self.base_url}/manager/queue/start",
|
|
1170
|
+
headers=self.auth_headers,
|
|
1171
|
+
)
|
|
1172
|
+
return resp.status_code in (200, 201)
|
|
1173
|
+
|
|
1174
|
+
def install_nodes(self, node_names: list[str], wait: bool = True) -> dict:
|
|
1175
|
+
"""Install multiple custom node packages.
|
|
1176
|
+
|
|
1177
|
+
Args:
|
|
1178
|
+
node_names: List of package names to install
|
|
1179
|
+
wait: If True, wait for installation to complete
|
|
1180
|
+
|
|
1181
|
+
Returns:
|
|
1182
|
+
Dict with installation results
|
|
1183
|
+
"""
|
|
1184
|
+
results = {"queued": [], "failed": []}
|
|
1185
|
+
|
|
1186
|
+
with httpx.Client(timeout=60) as client:
|
|
1187
|
+
# Queue all installations
|
|
1188
|
+
for name in node_names:
|
|
1189
|
+
resp = client.post(
|
|
1190
|
+
f"{self.base_url}/manager/queue/install",
|
|
1191
|
+
json={"name": name},
|
|
1192
|
+
headers=self.auth_headers,
|
|
1193
|
+
)
|
|
1194
|
+
if resp.status_code in (200, 201):
|
|
1195
|
+
results["queued"].append(name)
|
|
1196
|
+
else:
|
|
1197
|
+
results["failed"].append(name)
|
|
1198
|
+
|
|
1199
|
+
if not results["queued"]:
|
|
1200
|
+
return results
|
|
1201
|
+
|
|
1202
|
+
# Start processing
|
|
1203
|
+
client.get(
|
|
1204
|
+
f"{self.base_url}/manager/queue/start",
|
|
1205
|
+
headers=self.auth_headers,
|
|
1206
|
+
)
|
|
1207
|
+
|
|
1208
|
+
# Wait for completion if requested
|
|
1209
|
+
if wait:
|
|
1210
|
+
for _ in range(120): # Max 2 minutes
|
|
1211
|
+
time.sleep(1)
|
|
1212
|
+
resp = client.get(
|
|
1213
|
+
f"{self.base_url}/manager/queue/status",
|
|
1214
|
+
headers=self.auth_headers,
|
|
1215
|
+
)
|
|
1216
|
+
if resp.status_code == 200:
|
|
1217
|
+
status = resp.json()
|
|
1218
|
+
if not status.get("is_processing", False):
|
|
1219
|
+
break
|
|
1220
|
+
|
|
1221
|
+
return results
|
|
1222
|
+
|
|
1223
|
+
def reboot(self, wait_ready: bool = True, timeout: int = 120) -> bool:
|
|
1224
|
+
"""Reboot ComfyUI server (required after installing nodes).
|
|
1225
|
+
|
|
1226
|
+
Args:
|
|
1227
|
+
wait_ready: If True, wait for server to come back up
|
|
1228
|
+
timeout: Max seconds to wait for server
|
|
1229
|
+
|
|
1230
|
+
Returns:
|
|
1231
|
+
True if reboot was successful
|
|
1232
|
+
"""
|
|
1233
|
+
with httpx.Client(timeout=10) as client:
|
|
1234
|
+
try:
|
|
1235
|
+
resp = client.get(
|
|
1236
|
+
f"{self.base_url}/manager/reboot",
|
|
1237
|
+
headers=self.auth_headers,
|
|
1238
|
+
)
|
|
1239
|
+
# Server may not respond as it's rebooting
|
|
1240
|
+
except (httpx.ConnectError, httpx.ReadTimeout):
|
|
1241
|
+
pass # Expected - server is rebooting
|
|
1242
|
+
|
|
1243
|
+
if wait_ready:
|
|
1244
|
+
# Wait a moment for server to start shutting down
|
|
1245
|
+
time.sleep(3)
|
|
1246
|
+
return self.wait_ready(timeout=timeout)
|
|
1247
|
+
|
|
1248
|
+
return True
|
|
1249
|
+
|
|
1250
|
+
def ensure_nodes_installed(self, node_names: list[str]) -> dict:
|
|
1251
|
+
"""Ensure custom nodes are installed, installing missing ones.
|
|
1252
|
+
|
|
1253
|
+
Args:
|
|
1254
|
+
node_names: List of required node package names
|
|
1255
|
+
|
|
1256
|
+
Returns:
|
|
1257
|
+
Dict with results: {"already_installed": [...], "installed": [...], "failed": [...]}
|
|
1258
|
+
"""
|
|
1259
|
+
results = {"already_installed": [], "installed": [], "failed": []}
|
|
1260
|
+
|
|
1261
|
+
# Get currently installed nodes
|
|
1262
|
+
try:
|
|
1263
|
+
installed = self.get_installed_nodes()
|
|
1264
|
+
installed_names = set(installed.keys()) if isinstance(installed, dict) else set()
|
|
1265
|
+
except Exception:
|
|
1266
|
+
installed_names = set()
|
|
1267
|
+
|
|
1268
|
+
# Find missing nodes
|
|
1269
|
+
missing = [n for n in node_names if n not in installed_names]
|
|
1270
|
+
results["already_installed"] = [n for n in node_names if n in installed_names]
|
|
1271
|
+
|
|
1272
|
+
if not missing:
|
|
1273
|
+
return results
|
|
1274
|
+
|
|
1275
|
+
# Install missing nodes
|
|
1276
|
+
install_result = self.install_nodes(missing, wait=True)
|
|
1277
|
+
results["installed"] = install_result.get("queued", [])
|
|
1278
|
+
results["failed"] = install_result.get("failed", [])
|
|
1279
|
+
|
|
1280
|
+
# Reboot if we installed anything
|
|
1281
|
+
if results["installed"]:
|
|
1282
|
+
self.reboot(wait_ready=True)
|
|
1283
|
+
|
|
1284
|
+
return results
|
|
1285
|
+
|
|
1286
|
+
def queue_prompt(self, workflow: dict, retries: int = 5) -> str:
|
|
1287
|
+
"""Submit workflow to ComfyUI, returns prompt_id.
|
|
1288
|
+
|
|
1289
|
+
Retries on connection errors including DNS failures.
|
|
1290
|
+
"""
|
|
1291
|
+
last_error = None
|
|
1292
|
+
for attempt in range(retries):
|
|
1293
|
+
try:
|
|
1294
|
+
with httpx.Client(timeout=30) as client:
|
|
1295
|
+
resp = client.post(
|
|
1296
|
+
f"{self.base_url}/prompt",
|
|
1297
|
+
json={"prompt": workflow},
|
|
1298
|
+
headers=self.auth_headers,
|
|
1299
|
+
)
|
|
1300
|
+
if resp.status_code != 200:
|
|
1301
|
+
# Include response body in error for debugging
|
|
1302
|
+
try:
|
|
1303
|
+
error_detail = resp.json()
|
|
1304
|
+
except Exception:
|
|
1305
|
+
error_detail = resp.text
|
|
1306
|
+
raise RuntimeError(f"ComfyUI prompt failed ({resp.status_code}): {error_detail}")
|
|
1307
|
+
return resp.json()["prompt_id"]
|
|
1308
|
+
except (httpx.ConnectError, httpx.ReadTimeout) as e:
|
|
1309
|
+
last_error = e
|
|
1310
|
+
if attempt < retries - 1:
|
|
1311
|
+
time.sleep(2 ** attempt) # Exponential backoff
|
|
1312
|
+
continue
|
|
1313
|
+
raise
|
|
1314
|
+
|
|
1315
|
+
def get_history(self, prompt_id: str, retries: int = 5) -> dict | None:
|
|
1316
|
+
"""Get execution history for a prompt.
|
|
1317
|
+
|
|
1318
|
+
Retries on connection errors including DNS failures.
|
|
1319
|
+
"""
|
|
1320
|
+
last_error = None
|
|
1321
|
+
for attempt in range(retries):
|
|
1322
|
+
try:
|
|
1323
|
+
with httpx.Client(timeout=30) as client:
|
|
1324
|
+
resp = client.get(
|
|
1325
|
+
f"{self.base_url}/history/{prompt_id}",
|
|
1326
|
+
headers=self.auth_headers,
|
|
1327
|
+
)
|
|
1328
|
+
resp.raise_for_status()
|
|
1329
|
+
data = resp.json()
|
|
1330
|
+
return data.get(prompt_id)
|
|
1331
|
+
except (httpx.ConnectError, httpx.ReadTimeout, httpx.ProxyError) as e:
|
|
1332
|
+
last_error = e
|
|
1333
|
+
if attempt < retries - 1:
|
|
1334
|
+
time.sleep(2 ** attempt) # Exponential backoff: 1s, 2s, 4s, 8s
|
|
1335
|
+
continue
|
|
1336
|
+
raise
|
|
1337
|
+
|
|
1338
|
+
def wait_for_completion(
|
|
1339
|
+
self, prompt_id: str, timeout: float = 300, poll_interval: float = 2
|
|
1340
|
+
) -> dict:
|
|
1341
|
+
"""Wait for prompt execution to complete, returns history entry"""
|
|
1342
|
+
start = time.time()
|
|
1343
|
+
consecutive_errors = 0
|
|
1344
|
+
max_consecutive_errors = 10 # Handle DNS propagation delays
|
|
1345
|
+
|
|
1346
|
+
while time.time() - start < timeout:
|
|
1347
|
+
try:
|
|
1348
|
+
history = self.get_history(prompt_id)
|
|
1349
|
+
consecutive_errors = 0 # Reset on success
|
|
1350
|
+
if history:
|
|
1351
|
+
status = history.get("status", {})
|
|
1352
|
+
if status.get("completed"):
|
|
1353
|
+
return history
|
|
1354
|
+
if status.get("status_str") == "error":
|
|
1355
|
+
raise RuntimeError(f"Workflow execution failed: {status}")
|
|
1356
|
+
except (httpx.ConnectError, httpx.ReadTimeout, httpx.ProxyError) as e:
|
|
1357
|
+
consecutive_errors += 1
|
|
1358
|
+
if consecutive_errors >= max_consecutive_errors:
|
|
1359
|
+
raise RuntimeError(
|
|
1360
|
+
f"Lost connection to ComfyUI after {consecutive_errors} retries: {e}"
|
|
1361
|
+
)
|
|
1362
|
+
# Wait longer on connection errors
|
|
1363
|
+
time.sleep(poll_interval * 2)
|
|
1364
|
+
continue
|
|
1365
|
+
|
|
1366
|
+
time.sleep(poll_interval)
|
|
1367
|
+
raise TimeoutError(f"Workflow did not complete within {timeout}s")
|
|
1368
|
+
|
|
1369
|
+
def download_output(
|
|
1370
|
+
self, filename: str, output_dir: str | Path = ".", subfolder: str = "", retries: int = 5
|
|
1371
|
+
) -> Path:
|
|
1372
|
+
"""Download output file from ComfyUI server.
|
|
1373
|
+
|
|
1374
|
+
If the file already exists, increments the name (file_1.png, file_2.png, etc.)
|
|
1375
|
+
"""
|
|
1376
|
+
output_dir = Path(output_dir)
|
|
1377
|
+
if not output_dir.exists():
|
|
1378
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
1379
|
+
|
|
1380
|
+
url = f"{self.base_url}/view"
|
|
1381
|
+
params = {"filename": filename, "type": "output"}
|
|
1382
|
+
if subfolder:
|
|
1383
|
+
params["subfolder"] = subfolder
|
|
1384
|
+
|
|
1385
|
+
for attempt in range(retries):
|
|
1386
|
+
try:
|
|
1387
|
+
with httpx.Client(timeout=120) as client:
|
|
1388
|
+
resp = client.get(url, params=params, headers=self.auth_headers)
|
|
1389
|
+
resp.raise_for_status()
|
|
1390
|
+
|
|
1391
|
+
# Auto-increment filename if exists
|
|
1392
|
+
output_path = output_dir / filename
|
|
1393
|
+
if output_path.exists():
|
|
1394
|
+
stem = output_path.stem
|
|
1395
|
+
suffix = output_path.suffix
|
|
1396
|
+
i = 1
|
|
1397
|
+
while output_path.exists():
|
|
1398
|
+
output_path = output_dir / f"{stem}_{i}{suffix}"
|
|
1399
|
+
i += 1
|
|
1400
|
+
|
|
1401
|
+
with open(output_path, "wb") as f:
|
|
1402
|
+
f.write(resp.content)
|
|
1403
|
+
return output_path
|
|
1404
|
+
except (httpx.ConnectError, httpx.ReadTimeout, httpx.ProxyError) as e:
|
|
1405
|
+
if attempt < retries - 1:
|
|
1406
|
+
time.sleep(2 ** attempt)
|
|
1407
|
+
continue
|
|
1408
|
+
raise
|
|
1409
|
+
|
|
1410
|
+
def run(
|
|
1411
|
+
self,
|
|
1412
|
+
workflow: dict,
|
|
1413
|
+
timeout: float = 300,
|
|
1414
|
+
convert: bool = True,
|
|
1415
|
+
) -> dict:
|
|
1416
|
+
"""
|
|
1417
|
+
Run a workflow and wait for completion.
|
|
1418
|
+
|
|
1419
|
+
Args:
|
|
1420
|
+
workflow: Workflow dict (graph or API format)
|
|
1421
|
+
timeout: Max seconds to wait for completion
|
|
1422
|
+
convert: If True, convert graph format to API format
|
|
1423
|
+
|
|
1424
|
+
Returns:
|
|
1425
|
+
History entry with outputs
|
|
1426
|
+
"""
|
|
1427
|
+
# Detect format and convert if needed
|
|
1428
|
+
if convert and "nodes" in workflow:
|
|
1429
|
+
workflow = self.convert_workflow(workflow)
|
|
1430
|
+
|
|
1431
|
+
prompt_id = self.queue_prompt(workflow)
|
|
1432
|
+
return self.wait_for_completion(prompt_id, timeout=timeout)
|
|
1433
|
+
|
|
1434
|
+
def run_template(
|
|
1435
|
+
self,
|
|
1436
|
+
template_id: str,
|
|
1437
|
+
timeout: float = 300,
|
|
1438
|
+
**params,
|
|
1439
|
+
) -> dict:
|
|
1440
|
+
"""
|
|
1441
|
+
Run a template from comfyui-workflow-templates with parameter overrides.
|
|
1442
|
+
|
|
1443
|
+
Args:
|
|
1444
|
+
template_id: Template name (e.g., "image_qwen_image")
|
|
1445
|
+
timeout: Max seconds to wait
|
|
1446
|
+
**params: Parameters to override (prompt, negative, width, height, seed, etc.)
|
|
1447
|
+
See apply_params() for full list of supported parameters.
|
|
1448
|
+
|
|
1449
|
+
Returns:
|
|
1450
|
+
History entry with outputs
|
|
1451
|
+
"""
|
|
1452
|
+
graph = self.load_template(template_id)
|
|
1453
|
+
workflow = self.convert_workflow(graph)
|
|
1454
|
+
|
|
1455
|
+
# Apply parameter overrides using type-based node lookup
|
|
1456
|
+
apply_params(workflow, **params)
|
|
1457
|
+
|
|
1458
|
+
return self.run(workflow, timeout=timeout, convert=False)
|
|
1459
|
+
|
|
1460
|
+
def get_output_images(self, history: dict) -> list[dict]:
|
|
1461
|
+
"""Extract output file info from history entry (images, videos, gifs)"""
|
|
1462
|
+
outputs = []
|
|
1463
|
+
for node_id, node_output in history.get("outputs", {}).items():
|
|
1464
|
+
# Check for images, videos, gifs
|
|
1465
|
+
for key in ["images", "videos", "gifs"]:
|
|
1466
|
+
if key in node_output:
|
|
1467
|
+
for item in node_output[key]:
|
|
1468
|
+
outputs.append(item)
|
|
1469
|
+
return outputs
|