bizyengine 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bizyengine/__init__.py +35 -0
- bizyengine/bizy_server/__init__.py +7 -0
- bizyengine/bizy_server/api_client.py +763 -0
- bizyengine/bizy_server/errno.py +122 -0
- bizyengine/bizy_server/error_handler.py +3 -0
- bizyengine/bizy_server/execution.py +55 -0
- bizyengine/bizy_server/resp.py +24 -0
- bizyengine/bizy_server/server.py +898 -0
- bizyengine/bizy_server/utils.py +93 -0
- bizyengine/bizyair_extras/__init__.py +24 -0
- bizyengine/bizyair_extras/nodes_advanced_refluxcontrol.py +62 -0
- bizyengine/bizyair_extras/nodes_cogview4.py +31 -0
- bizyengine/bizyair_extras/nodes_comfyui_detail_daemon.py +180 -0
- bizyengine/bizyair_extras/nodes_comfyui_instantid.py +164 -0
- bizyengine/bizyair_extras/nodes_comfyui_layerstyle_advance.py +141 -0
- bizyengine/bizyair_extras/nodes_comfyui_pulid_flux.py +88 -0
- bizyengine/bizyair_extras/nodes_controlnet.py +50 -0
- bizyengine/bizyair_extras/nodes_custom_sampler.py +130 -0
- bizyengine/bizyair_extras/nodes_dataset.py +99 -0
- bizyengine/bizyair_extras/nodes_differential_diffusion.py +16 -0
- bizyengine/bizyair_extras/nodes_flux.py +69 -0
- bizyengine/bizyair_extras/nodes_image_utils.py +93 -0
- bizyengine/bizyair_extras/nodes_ip2p.py +20 -0
- bizyengine/bizyair_extras/nodes_ipadapter_plus/__init__.py +1 -0
- bizyengine/bizyair_extras/nodes_ipadapter_plus/nodes_ipadapter_plus.py +1598 -0
- bizyengine/bizyair_extras/nodes_janus_pro.py +81 -0
- bizyengine/bizyair_extras/nodes_kolors_mz/__init__.py +86 -0
- bizyengine/bizyair_extras/nodes_model_advanced.py +62 -0
- bizyengine/bizyair_extras/nodes_sd3.py +52 -0
- bizyengine/bizyair_extras/nodes_segment_anything.py +256 -0
- bizyengine/bizyair_extras/nodes_segment_anything_utils.py +134 -0
- bizyengine/bizyair_extras/nodes_testing_utils.py +139 -0
- bizyengine/bizyair_extras/nodes_trellis.py +199 -0
- bizyengine/bizyair_extras/nodes_ultimatesdupscale.py +137 -0
- bizyengine/bizyair_extras/nodes_upscale_model.py +32 -0
- bizyengine/bizyair_extras/nodes_wan_video.py +49 -0
- bizyengine/bizyair_extras/oauth_callback/main.py +118 -0
- bizyengine/core/__init__.py +8 -0
- bizyengine/core/commands/__init__.py +1 -0
- bizyengine/core/commands/base.py +27 -0
- bizyengine/core/commands/invoker.py +4 -0
- bizyengine/core/commands/processors/model_hosting_processor.py +0 -0
- bizyengine/core/commands/processors/prompt_processor.py +123 -0
- bizyengine/core/commands/servers/model_server.py +0 -0
- bizyengine/core/commands/servers/prompt_server.py +234 -0
- bizyengine/core/common/__init__.py +8 -0
- bizyengine/core/common/caching.py +198 -0
- bizyengine/core/common/client.py +262 -0
- bizyengine/core/common/env_var.py +101 -0
- bizyengine/core/common/utils.py +93 -0
- bizyengine/core/configs/conf.py +112 -0
- bizyengine/core/configs/models.json +101 -0
- bizyengine/core/configs/models.yaml +329 -0
- bizyengine/core/data_types.py +20 -0
- bizyengine/core/image_utils.py +288 -0
- bizyengine/core/nodes_base.py +159 -0
- bizyengine/core/nodes_io.py +97 -0
- bizyengine/core/path_utils/__init__.py +9 -0
- bizyengine/core/path_utils/path_manager.py +276 -0
- bizyengine/core/path_utils/utils.py +34 -0
- bizyengine/misc/__init__.py +0 -0
- bizyengine/misc/auth.py +83 -0
- bizyengine/misc/llm.py +431 -0
- bizyengine/misc/mzkolors.py +93 -0
- bizyengine/misc/nodes.py +1208 -0
- bizyengine/misc/nodes_controlnet_aux.py +491 -0
- bizyengine/misc/nodes_controlnet_union_sdxl.py +171 -0
- bizyengine/misc/route_sam.py +60 -0
- bizyengine/misc/segment_anything.py +276 -0
- bizyengine/misc/supernode.py +182 -0
- bizyengine/misc/utils.py +218 -0
- bizyengine/version.txt +1 -0
- bizyengine-0.4.2.dist-info/METADATA +12 -0
- bizyengine-0.4.2.dist-info/RECORD +76 -0
- bizyengine-0.4.2.dist-info/WHEEL +5 -0
- bizyengine-0.4.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,491 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import torch
|
|
5
|
+
from bizyengine.core.common.env_var import BIZYAIR_SERVER_ADDRESS
|
|
6
|
+
|
|
7
|
+
from .utils import (
|
|
8
|
+
decode_and_deserialize,
|
|
9
|
+
get_api_key,
|
|
10
|
+
send_post_request,
|
|
11
|
+
serialize_and_encode,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Sync with theoritical limit from Comfy base
|
|
15
|
+
# https://github.com/comfyanonymous/ComfyUI/blob/eecd69b53a896343775bcb02a4f8349e7442ffd1/nodes.py#L45
|
|
16
|
+
MAX_RESOLUTION = 1024
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class BasePreprocessor:
|
|
20
|
+
def __init_subclass__(cls, **kwargs):
|
|
21
|
+
super().__init_subclass__(**kwargs)
|
|
22
|
+
if not hasattr(cls, "model_name"):
|
|
23
|
+
raise TypeError("Subclass must define 'model_name'")
|
|
24
|
+
cls.API_URL = f"{BIZYAIR_SERVER_ADDRESS}{cls.model_name}"
|
|
25
|
+
cls.CATEGORY = f"☁️BizyAir/{cls.CATEGORY}"
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def get_headers():
|
|
29
|
+
return {
|
|
30
|
+
"accept": "application/json",
|
|
31
|
+
"content-type": "application/json",
|
|
32
|
+
"authorization": f"Bearer {get_api_key()}",
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
RETURN_TYPES = ("IMAGE",)
|
|
36
|
+
FUNCTION = "execute"
|
|
37
|
+
|
|
38
|
+
def execute(self, **kwargs):
|
|
39
|
+
compress = True
|
|
40
|
+
image: torch.Tensor = kwargs.pop("image")
|
|
41
|
+
device = image.device
|
|
42
|
+
kwargs["image"] = serialize_and_encode(image, compress)[0]
|
|
43
|
+
kwargs["is_compress"] = compress
|
|
44
|
+
response: str = send_post_request(
|
|
45
|
+
self.API_URL, payload=kwargs, headers=self.get_headers()
|
|
46
|
+
)
|
|
47
|
+
image_np = decode_and_deserialize(response)
|
|
48
|
+
image_torch = torch.from_numpy(image_np).to(device)
|
|
49
|
+
return (image_torch,)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def create_node_input_types(**extra_kwargs):
|
|
53
|
+
return {
|
|
54
|
+
"required": {"image": ("IMAGE",)},
|
|
55
|
+
"optional": {
|
|
56
|
+
**extra_kwargs,
|
|
57
|
+
"resolution": (
|
|
58
|
+
"INT",
|
|
59
|
+
{
|
|
60
|
+
"default": 512,
|
|
61
|
+
"min": 64,
|
|
62
|
+
"max": MAX_RESOLUTION,
|
|
63
|
+
"step": 64,
|
|
64
|
+
"display": "number",
|
|
65
|
+
},
|
|
66
|
+
), # Cosmetic only: display as "number" or "slider"})
|
|
67
|
+
},
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class PiDiNetPreprocessor(BasePreprocessor):
|
|
72
|
+
model_name = "/supernode/controlnetauxpidinetpreprocessor"
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
def INPUT_TYPES(s):
|
|
76
|
+
return create_node_input_types(
|
|
77
|
+
safe=(["enable", "disable"], {"default": "enable"})
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
RETURN_TYPES = ("IMAGE",)
|
|
81
|
+
FUNCTION = "execute"
|
|
82
|
+
|
|
83
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class ColorPreprocessor(BasePreprocessor):
|
|
87
|
+
model_name = "/supernode/controlnetauxcolorpreprocessor"
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def INPUT_TYPES(s):
|
|
91
|
+
return create_node_input_types()
|
|
92
|
+
|
|
93
|
+
RETURN_TYPES = ("IMAGE",)
|
|
94
|
+
FUNCTION = "execute"
|
|
95
|
+
|
|
96
|
+
CATEGORY = "ControlNet Preprocessors/T2IAdapter-only"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class CannyEdgePreprocessor(BasePreprocessor):
|
|
100
|
+
model_name = "/supernode/controlnetauxcannyedgepreprocessor"
|
|
101
|
+
|
|
102
|
+
@classmethod
|
|
103
|
+
def INPUT_TYPES(s):
|
|
104
|
+
return create_node_input_types(
|
|
105
|
+
low_threshold=("INT", {"default": 100, "min": 0, "max": 255, "step": 1}),
|
|
106
|
+
high_threshold=("INT", {"default": 200, "min": 0, "max": 255, "step": 1}),
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class SAMPreprocessor(BasePreprocessor):
|
|
113
|
+
model_name = "/supernode/controlnetauxsampreprocessor"
|
|
114
|
+
|
|
115
|
+
@classmethod
|
|
116
|
+
def INPUT_TYPES(s):
|
|
117
|
+
return create_node_input_types()
|
|
118
|
+
|
|
119
|
+
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class BinaryPreprocessor(BasePreprocessor):
|
|
123
|
+
model_name = "/supernode/controlnetauxbinarypreprocessor"
|
|
124
|
+
|
|
125
|
+
@classmethod
|
|
126
|
+
def INPUT_TYPES(s):
|
|
127
|
+
return create_node_input_types(
|
|
128
|
+
bin_threshold=("INT", {"default": 100, "min": 0, "max": 255, "step": 1})
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
RETURN_TYPES = ("IMAGE",)
|
|
132
|
+
FUNCTION = "execute"
|
|
133
|
+
|
|
134
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class ScribblePreprocessor(BasePreprocessor):
|
|
138
|
+
model_name = "/supernode/controlnetauxscribblepreprocessor"
|
|
139
|
+
|
|
140
|
+
@classmethod
|
|
141
|
+
def INPUT_TYPES(s):
|
|
142
|
+
return create_node_input_types(
|
|
143
|
+
safe=(["enable", "disable"], {"default": "enable"})
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
RETURN_TYPES = ("IMAGE",)
|
|
147
|
+
FUNCTION = "execute"
|
|
148
|
+
|
|
149
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class M_LSDPreprocessor(BasePreprocessor):
|
|
153
|
+
model_name = "/supernode/controlnetauxm-lsdpreprocessor"
|
|
154
|
+
|
|
155
|
+
@classmethod
|
|
156
|
+
def INPUT_TYPES(s):
|
|
157
|
+
return create_node_input_types(
|
|
158
|
+
score_threshold=(
|
|
159
|
+
"FLOAT",
|
|
160
|
+
{"default": 0.1, "min": 0.01, "max": 2.0, "step": 0.01},
|
|
161
|
+
),
|
|
162
|
+
dist_threshold=(
|
|
163
|
+
"FLOAT",
|
|
164
|
+
{"default": 0.1, "min": 0.01, "max": 20.0, "step": 0.01},
|
|
165
|
+
),
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
RETURN_TYPES = ("IMAGE",)
|
|
169
|
+
FUNCTION = "execute"
|
|
170
|
+
|
|
171
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class UniFormer_SemSegPreprocessor(BasePreprocessor):
|
|
175
|
+
model_name = "/supernode/controlnetauxuniformer-semsegpreprocessor"
|
|
176
|
+
|
|
177
|
+
@classmethod
|
|
178
|
+
def INPUT_TYPES(s):
|
|
179
|
+
return create_node_input_types()
|
|
180
|
+
|
|
181
|
+
RETURN_TYPES = ("IMAGE",)
|
|
182
|
+
|
|
183
|
+
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class Zoe_DepthMapPreprocessor(BasePreprocessor):
|
|
187
|
+
model_name = "/supernode/controlnetauxzoe-depthmappreprocessor"
|
|
188
|
+
|
|
189
|
+
@classmethod
|
|
190
|
+
def INPUT_TYPES(s):
|
|
191
|
+
return create_node_input_types()
|
|
192
|
+
|
|
193
|
+
RETURN_TYPES = ("IMAGE",)
|
|
194
|
+
FUNCTION = "execute"
|
|
195
|
+
|
|
196
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class MiDaS_NormalMapPreprocessor(BasePreprocessor):
|
|
200
|
+
model_name = "/supernode/controlnetauxmidas-normalmappreprocessor"
|
|
201
|
+
|
|
202
|
+
@classmethod
|
|
203
|
+
def INPUT_TYPES(s):
|
|
204
|
+
return create_node_input_types(
|
|
205
|
+
a=(
|
|
206
|
+
"FLOAT",
|
|
207
|
+
{"default": np.pi * 2.0, "min": 0.0, "max": np.pi * 5.0, "step": 0.05},
|
|
208
|
+
),
|
|
209
|
+
bg_threshold=("FLOAT", {"default": 0.1, "min": 0, "max": 1, "step": 0.05}),
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
RETURN_TYPES = ("IMAGE",)
|
|
213
|
+
|
|
214
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class MiDaS_DepthMapPreprocessor(BasePreprocessor):
|
|
218
|
+
model_name = "/supernode/controlnetauxmidas-depthmappreprocessor"
|
|
219
|
+
|
|
220
|
+
@classmethod
|
|
221
|
+
def INPUT_TYPES(s):
|
|
222
|
+
return create_node_input_types(
|
|
223
|
+
a=(
|
|
224
|
+
"FLOAT",
|
|
225
|
+
{"default": np.pi * 2.0, "min": 0.0, "max": np.pi * 5.0, "step": 0.05},
|
|
226
|
+
),
|
|
227
|
+
bg_threshold=("FLOAT", {"default": 0.1, "min": 0, "max": 1, "step": 0.05}),
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
RETURN_TYPES = ("IMAGE",)
|
|
231
|
+
FUNCTION = "execute"
|
|
232
|
+
|
|
233
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
class OpenposePreprocessor(BasePreprocessor):
|
|
237
|
+
model_name = "/supernode/controlnetauxopenposepreprocessor"
|
|
238
|
+
|
|
239
|
+
@classmethod
|
|
240
|
+
def INPUT_TYPES(s):
|
|
241
|
+
return create_node_input_types(
|
|
242
|
+
detect_hand=(["enable", "disable"], {"default": "enable"}),
|
|
243
|
+
detect_body=(["enable", "disable"], {"default": "enable"}),
|
|
244
|
+
detect_face=(["enable", "disable"], {"default": "enable"}),
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
RETURN_TYPES = ("IMAGE", "POSE_KEYPOINT")
|
|
248
|
+
CATEGORY = "ControlNet Preprocessors/Faces and Poses Estimators"
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class LineArtPreprocessor(BasePreprocessor):
|
|
252
|
+
model_name = "/supernode/controlnetauxlineartpreprocessor"
|
|
253
|
+
|
|
254
|
+
@classmethod
|
|
255
|
+
def INPUT_TYPES(s):
|
|
256
|
+
return create_node_input_types(
|
|
257
|
+
coarse=(["disable", "enable"], {"default": "disable"})
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
class LeReS_DepthMapPreprocessor(BasePreprocessor):
|
|
264
|
+
model_name = "/supernode/controlnetauxleres-depthmappreprocessor"
|
|
265
|
+
|
|
266
|
+
@classmethod
|
|
267
|
+
def INPUT_TYPES(s):
|
|
268
|
+
return create_node_input_types(
|
|
269
|
+
rm_nearest=("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}),
|
|
270
|
+
rm_background=(
|
|
271
|
+
"FLOAT",
|
|
272
|
+
{"default": 0.0, "min": 0.0, "max": 100, "step": 0.1},
|
|
273
|
+
),
|
|
274
|
+
boost=(["enable", "disable"], {"default": "disable"}),
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
class BAE_NormalMapPreprocessor(BasePreprocessor):
|
|
281
|
+
|
|
282
|
+
model_name = "/supernode/controlnetauxbae-normalmappreprocessor"
|
|
283
|
+
|
|
284
|
+
@classmethod
|
|
285
|
+
def INPUT_TYPES(s):
|
|
286
|
+
return create_node_input_types()
|
|
287
|
+
|
|
288
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
class OneFormer_COCO_SemSegPreprocessor(BasePreprocessor):
|
|
292
|
+
model_name = "/supernode/controlnetauxoneformer-coco-semsegpreprocessor"
|
|
293
|
+
|
|
294
|
+
@classmethod
|
|
295
|
+
def INPUT_TYPES(s):
|
|
296
|
+
return create_node_input_types()
|
|
297
|
+
|
|
298
|
+
RETURN_TYPES = ("IMAGE",)
|
|
299
|
+
|
|
300
|
+
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
class OneFormer_ADE20K_SemSegPreprocessor(BasePreprocessor):
|
|
304
|
+
model_name = "/supernode/controlnetauxoneformer-ade20k-semsegpreprocessor"
|
|
305
|
+
|
|
306
|
+
@classmethod
|
|
307
|
+
def INPUT_TYPES(s):
|
|
308
|
+
return create_node_input_types()
|
|
309
|
+
|
|
310
|
+
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
class HEDPreprocessor(BasePreprocessor):
|
|
314
|
+
model_name = "/supernode/controlnetauxhedpreprocessor"
|
|
315
|
+
|
|
316
|
+
@classmethod
|
|
317
|
+
def INPUT_TYPES(s):
|
|
318
|
+
return create_node_input_types(
|
|
319
|
+
safe=(["enable", "disable"], {"default": "enable"})
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
class FakeScribblePreprocessor(BasePreprocessor):
|
|
326
|
+
model_name = "/supernode/controlnetauxfakescribblepreprocessor"
|
|
327
|
+
|
|
328
|
+
@classmethod
|
|
329
|
+
def INPUT_TYPES(s):
|
|
330
|
+
return create_node_input_types(
|
|
331
|
+
safe=(["enable", "disable"], {"default": "enable"})
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
CATEGORY = "ControlNet Preprocessors/Line Extractors"
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
class TilePreprocessor(BasePreprocessor):
|
|
338
|
+
model_name = "/supernode/controlnetauxtilepreprocessor"
|
|
339
|
+
|
|
340
|
+
@classmethod
|
|
341
|
+
def INPUT_TYPES(s):
|
|
342
|
+
return create_node_input_types(
|
|
343
|
+
pyrUp_iters=("INT", {"default": 3, "min": 1, "max": 10, "step": 1})
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
CATEGORY = "ControlNet Preprocessors/tile"
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
class DepthAnythingV2Preprocessor(BasePreprocessor):
|
|
350
|
+
model_name = "/supernode/controlnetauxdepthanythingv2preprocessor"
|
|
351
|
+
|
|
352
|
+
@classmethod
|
|
353
|
+
def INPUT_TYPES(s):
|
|
354
|
+
return create_node_input_types(
|
|
355
|
+
ckpt_name=(
|
|
356
|
+
[
|
|
357
|
+
"depth_anything_v2_vitg.pth",
|
|
358
|
+
"depth_anything_v2_vitl.pth",
|
|
359
|
+
"depth_anything_v2_vitb.pth",
|
|
360
|
+
"depth_anything_v2_vits.pth",
|
|
361
|
+
],
|
|
362
|
+
{"default": "depth_anything_v2_vitl.pth"},
|
|
363
|
+
)
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
class Metric3D_DepthMapPreprocessor(BasePreprocessor):
|
|
370
|
+
model_name = "/supernode/controlnetauxmetric3d-depthmappreprocessor"
|
|
371
|
+
|
|
372
|
+
@classmethod
|
|
373
|
+
def INPUT_TYPES(s):
|
|
374
|
+
return create_node_input_types(
|
|
375
|
+
backbone=(
|
|
376
|
+
["vit-small", "vit-large", "vit-giant2"],
|
|
377
|
+
{"default": "vit-small"},
|
|
378
|
+
),
|
|
379
|
+
fx=("INT", {"default": 1000, "min": 1, "max": MAX_RESOLUTION}),
|
|
380
|
+
fy=("INT", {"default": 1000, "min": 1, "max": MAX_RESOLUTION}),
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
class Metric3D_NormalMapPreprocessor(BasePreprocessor):
|
|
387
|
+
model_name = "/supernode/controlnetauxmetric3d-normalmappreprocessor"
|
|
388
|
+
|
|
389
|
+
@classmethod
|
|
390
|
+
def INPUT_TYPES(s):
|
|
391
|
+
return create_node_input_types(
|
|
392
|
+
backbone=(
|
|
393
|
+
["vit-small", "vit-large", "vit-giant2"],
|
|
394
|
+
{"default": "vit-small"},
|
|
395
|
+
),
|
|
396
|
+
fx=("INT", {"default": 1000, "min": 1, "max": MAX_RESOLUTION}),
|
|
397
|
+
fy=("INT", {"default": 1000, "min": 1, "max": MAX_RESOLUTION}),
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
class DWPreprocessor(BasePreprocessor):
|
|
404
|
+
model_name = "/supernode/controlnetauxdwpreprocessor"
|
|
405
|
+
|
|
406
|
+
@classmethod
|
|
407
|
+
def INPUT_TYPES(s):
|
|
408
|
+
input_types = create_node_input_types(
|
|
409
|
+
detect_hand=(["enable", "disable"], {"default": "enable"}),
|
|
410
|
+
detect_body=(["enable", "disable"], {"default": "enable"}),
|
|
411
|
+
detect_face=(["enable", "disable"], {"default": "enable"}),
|
|
412
|
+
)
|
|
413
|
+
input_types["optional"] = {
|
|
414
|
+
**input_types["optional"],
|
|
415
|
+
"bbox_detector": (
|
|
416
|
+
[
|
|
417
|
+
"yolox_l.torchscript.pt",
|
|
418
|
+
"yolox_l.onnx",
|
|
419
|
+
"yolo_nas_l_fp16.onnx",
|
|
420
|
+
"yolo_nas_m_fp16.onnx",
|
|
421
|
+
"yolo_nas_s_fp16.onnx",
|
|
422
|
+
],
|
|
423
|
+
{"default": "yolox_l.onnx"},
|
|
424
|
+
),
|
|
425
|
+
"pose_estimator": (
|
|
426
|
+
[
|
|
427
|
+
"dw-ll_ucoco_384_bs5.torchscript.pt",
|
|
428
|
+
"dw-ll_ucoco_384.onnx",
|
|
429
|
+
"dw-ll_ucoco.onnx",
|
|
430
|
+
],
|
|
431
|
+
{"default": "dw-ll_ucoco_384_bs5.torchscript.pt"},
|
|
432
|
+
),
|
|
433
|
+
}
|
|
434
|
+
return input_types
|
|
435
|
+
|
|
436
|
+
CATEGORY = "ControlNet Preprocessors/Faces and Poses Estimators"
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
NODE_CLASS_MAPPINGS = {
|
|
440
|
+
"BizyAirPiDiNetPreprocessor": PiDiNetPreprocessor,
|
|
441
|
+
"BizyAirColorPreprocessor": ColorPreprocessor,
|
|
442
|
+
"BizyAirCannyEdgePreprocessor": CannyEdgePreprocessor,
|
|
443
|
+
"BizyAirSAMPreprocessor": SAMPreprocessor,
|
|
444
|
+
"BizyAirBinaryPreprocessor": BinaryPreprocessor,
|
|
445
|
+
"BizyAirScribblePreprocessor": ScribblePreprocessor,
|
|
446
|
+
"BizyAirM_LSDPreprocessor": M_LSDPreprocessor,
|
|
447
|
+
"BizyAirUniFormer_SemSegPreprocessor": UniFormer_SemSegPreprocessor,
|
|
448
|
+
"BizyAirZoe_DepthMapPreprocessor": Zoe_DepthMapPreprocessor,
|
|
449
|
+
"BizyAirMiDaS_NormalMapPreprocessor": MiDaS_NormalMapPreprocessor,
|
|
450
|
+
"BizyAirMiDaS_DepthMapPreprocessor": MiDaS_DepthMapPreprocessor,
|
|
451
|
+
"BizyAirOpenposePreprocessor": OpenposePreprocessor,
|
|
452
|
+
"BizyAirLineArtPreprocessor": LineArtPreprocessor,
|
|
453
|
+
"BizyAirLeReS_DepthMapPreprocessor": LeReS_DepthMapPreprocessor,
|
|
454
|
+
"BizyAirBAE_NormalMapPreprocessor": BAE_NormalMapPreprocessor,
|
|
455
|
+
"BizyAirOneFormer_COCO_SemSegPreprocessor": OneFormer_COCO_SemSegPreprocessor,
|
|
456
|
+
"BizyAirOneFormer_ADE20K_SemSegPreprocessor": OneFormer_ADE20K_SemSegPreprocessor,
|
|
457
|
+
"BizyAirHEDPreprocessor": HEDPreprocessor,
|
|
458
|
+
"BizyAirFakeScribblePreprocessor": FakeScribblePreprocessor,
|
|
459
|
+
"BizyAirTilePreprocessor": TilePreprocessor,
|
|
460
|
+
"BizyAirDepthAnythingV2Preprocessor": DepthAnythingV2Preprocessor,
|
|
461
|
+
"BizyAirMetric3D_DepthMapPreprocessor": Metric3D_DepthMapPreprocessor,
|
|
462
|
+
"BizyAirMetric3D_NormalMapPreprocessor": Metric3D_NormalMapPreprocessor,
|
|
463
|
+
"BizyAirDWPreprocessor": DWPreprocessor,
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
467
|
+
"BizyAirPiDiNetPreprocessor": "☁️BizyAir PiDiNet Soft-Edge Lines",
|
|
468
|
+
"BizyAirColorPreprocessor": "☁️BizyAir Color Pallete",
|
|
469
|
+
"BizyAirCannyEdgePreprocessor": "☁️BizyAir Canny Edge",
|
|
470
|
+
"BizyAirSAMPreprocessor": "☁️BizyAir SAM Segmentor",
|
|
471
|
+
"BizyAirBinaryPreprocessor": "☁️BizyAir Binary Lines",
|
|
472
|
+
"BizyAirScribblePreprocessor": "☁️BizyAir Scribble Lines",
|
|
473
|
+
"BizyAirM_LSDPreprocessor": "☁️BizyAir M-LSD Lines",
|
|
474
|
+
"BizyAirUniFormer_SemSegPreprocessor": "☁️BizyAir UniFormer Segmentor",
|
|
475
|
+
"BizyAirZoe_DepthMapPreprocessor": "☁️BizyAir Zoe Depth Map",
|
|
476
|
+
"BizyAirMiDaS_NormalMapPreprocessor": "☁️BizyAir MiDaS Normal Map",
|
|
477
|
+
"BizyAirMiDaS_DepthMapPreprocessor": "☁️BizyAir MiDaS Depth Map",
|
|
478
|
+
"BizyAirOpenposePreprocessor": "☁️BizyAir OpenPose Pose",
|
|
479
|
+
"BizyAirLineArtPreprocessor": "☁️BizyAir Realistic Lineart",
|
|
480
|
+
"BizyAirLeReS_DepthMapPreprocessor": "☁️BizyAir LeReS Depth Map (enable boost for leres++)",
|
|
481
|
+
"BizyAirBAE_NormalMapPreprocessor": "☁️BizyAir BAE Normal Map",
|
|
482
|
+
"BizyAirOneFormer_COCO_SemSegPreprocessor": "☁️BizyAir OneFormer COCO Segmentor",
|
|
483
|
+
"BizyAirOneFormer_ADE20K_SemSegPreprocessor": "☁️BizyAir OneFormer ADE20K Segmentor",
|
|
484
|
+
"BizyAirHEDPreprocessor": "☁️BizyAir HED Soft-Edge Lines",
|
|
485
|
+
"BizyAirFakeScribblePreprocessor": "☁️BizyAir Fake Scribble Lines (aka scribble_hed)",
|
|
486
|
+
"BizyAirTilePreprocessor": "☁️BizyAir Tile",
|
|
487
|
+
"BizyAirDepthAnythingV2Preprocessor": "☁️BizyAir Depth Anything V2 - Relative",
|
|
488
|
+
"BizyAirMetric3D_DepthMapPreprocessor": "☁️BizyAir Metric3D Depth Map",
|
|
489
|
+
"BizyAirMetric3D_NormalMapPreprocessor": "☁️BizyAir Metric3D Normal Map",
|
|
490
|
+
"BizyAirDWPreprocessor": "☁️BizyAir DWPose Estimator",
|
|
491
|
+
}
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
"""
|
|
2
|
+
huggingface: https://huggingface.co/xinsir/controlnet-union-sdxl-1.0
|
|
3
|
+
github: https://github.com/xinsir6/ControlNetPlus/tree/main
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import requests
|
|
11
|
+
from bizyengine.core.common.env_var import BIZYAIR_SERVER_ADDRESS
|
|
12
|
+
from bizyengine.core.image_utils import decode_comfy_image, encode_comfy_image
|
|
13
|
+
|
|
14
|
+
from .utils import get_api_key
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class StableDiffusionXLControlNetUnionPipeline:
|
|
18
|
+
API_URL = f"{BIZYAIR_SERVER_ADDRESS}/supernode/diffusers-v1-stablediffusionxlcontrolnetunionpipeline"
|
|
19
|
+
|
|
20
|
+
@classmethod
|
|
21
|
+
def INPUT_TYPES(s):
|
|
22
|
+
return {
|
|
23
|
+
"required": {
|
|
24
|
+
"seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}),
|
|
25
|
+
"num_inference_steps": ("INT", {"default": 20, "min": 1, "max": 50}),
|
|
26
|
+
"num_images_per_prompt": ("INT", {"default": 1, "min": 1, "max": 4}),
|
|
27
|
+
"guidance_scale": (
|
|
28
|
+
"FLOAT",
|
|
29
|
+
{
|
|
30
|
+
"default": 5.0,
|
|
31
|
+
"min": 0.0,
|
|
32
|
+
"max": 100.0,
|
|
33
|
+
"step": 0.1,
|
|
34
|
+
"round": 0.01,
|
|
35
|
+
},
|
|
36
|
+
),
|
|
37
|
+
},
|
|
38
|
+
"optional": {
|
|
39
|
+
"openpose_image": ("IMAGE",),
|
|
40
|
+
"depth_image": ("IMAGE",),
|
|
41
|
+
"hed_pidi_scribble_ted_image": ("IMAGE",),
|
|
42
|
+
"canny_lineart_anime_lineart_mlsd_image": ("IMAGE",),
|
|
43
|
+
"normal_image": ("IMAGE",),
|
|
44
|
+
"segment_image": ("IMAGE",),
|
|
45
|
+
"prompt": (
|
|
46
|
+
"STRING",
|
|
47
|
+
{
|
|
48
|
+
"default": "a car",
|
|
49
|
+
"multiline": True,
|
|
50
|
+
"dynamicPrompts": True,
|
|
51
|
+
},
|
|
52
|
+
),
|
|
53
|
+
"negative_prompt": (
|
|
54
|
+
"STRING",
|
|
55
|
+
{
|
|
56
|
+
"default": "watermark, text",
|
|
57
|
+
"multiline": True,
|
|
58
|
+
"dynamicPrompts": True,
|
|
59
|
+
},
|
|
60
|
+
),
|
|
61
|
+
"control_guidance_start": (
|
|
62
|
+
"FLOAT",
|
|
63
|
+
{
|
|
64
|
+
"default": 0,
|
|
65
|
+
"min": 0.0,
|
|
66
|
+
"max": 1,
|
|
67
|
+
"step": 0.01,
|
|
68
|
+
},
|
|
69
|
+
),
|
|
70
|
+
"control_guidance_end": (
|
|
71
|
+
"FLOAT",
|
|
72
|
+
{
|
|
73
|
+
"default": 1.0,
|
|
74
|
+
"min": 0.0,
|
|
75
|
+
"max": 1,
|
|
76
|
+
"step": 0.01,
|
|
77
|
+
},
|
|
78
|
+
),
|
|
79
|
+
},
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
RETURN_TYPES = ("IMAGE",)
|
|
83
|
+
FUNCTION = "process"
|
|
84
|
+
CATEGORY = "☁️BizyAir/ControlNet"
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
def get_headers():
|
|
88
|
+
return {
|
|
89
|
+
"accept": "application/json",
|
|
90
|
+
"content-type": "application/json",
|
|
91
|
+
"authorization": f"Bearer {get_api_key()}",
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
def process(
|
|
95
|
+
self,
|
|
96
|
+
openpose_image=None,
|
|
97
|
+
depth_image=None,
|
|
98
|
+
hed_pidi_scribble_ted_image=None,
|
|
99
|
+
canny_lineart_anime_lineart_mlsd_image=None,
|
|
100
|
+
normal_image=None,
|
|
101
|
+
segment_image=None,
|
|
102
|
+
**kwargs,
|
|
103
|
+
):
|
|
104
|
+
controlnet_img = {
|
|
105
|
+
0: openpose_image,
|
|
106
|
+
1: depth_image,
|
|
107
|
+
2: hed_pidi_scribble_ted_image,
|
|
108
|
+
3: canny_lineart_anime_lineart_mlsd_image,
|
|
109
|
+
4: normal_image,
|
|
110
|
+
5: segment_image,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
for k, v in controlnet_img.items():
|
|
114
|
+
if v is not None:
|
|
115
|
+
# need to resize the image resolution to 1024 * 1024 or same bucket resolution to get the best performance
|
|
116
|
+
# https://github.com/xinsir6/ControlNetPlus/blob/ba6c35b62e9df4c8f3b6429c4844ecc92685c8ec/controlnet_union_test_depth.py#L54-L56
|
|
117
|
+
height, width = v.shape[1:3]
|
|
118
|
+
ratio = np.sqrt(1024.0 * 1024.0 / (width * height))
|
|
119
|
+
new_width, new_height = int(width * ratio), int(height * ratio)
|
|
120
|
+
controlnet_img[k] = encode_comfy_image(v, old_version=True)
|
|
121
|
+
|
|
122
|
+
if new_width > 1536 or new_height > 1536:
|
|
123
|
+
error_message = (
|
|
124
|
+
f"Error: Adjusted image dimensions exceed the limit. "
|
|
125
|
+
f"Height: {new_height}, Width: {new_width}. "
|
|
126
|
+
f"Please resize the original image with dimensions "
|
|
127
|
+
f"Height: {height}, Width: {width} to ensure "
|
|
128
|
+
f"Adjusted image dimensions are within 1536 pixels. "
|
|
129
|
+
f"Recommended dimensions: Height: {1024}, Width: {1024}."
|
|
130
|
+
)
|
|
131
|
+
raise RuntimeError(error_message)
|
|
132
|
+
|
|
133
|
+
print(
|
|
134
|
+
f"Utilizing a height of {new_height} and width of {new_width} for processing."
|
|
135
|
+
)
|
|
136
|
+
payload = {
|
|
137
|
+
"width": new_width,
|
|
138
|
+
"height": new_height,
|
|
139
|
+
"controlnet_img": controlnet_img,
|
|
140
|
+
}
|
|
141
|
+
payload.update(**kwargs)
|
|
142
|
+
|
|
143
|
+
response = requests.post(
|
|
144
|
+
self.API_URL,
|
|
145
|
+
json=payload,
|
|
146
|
+
headers=self.get_headers(),
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
result = response.json()
|
|
150
|
+
if response.status_code != 200:
|
|
151
|
+
raise RuntimeError(f"Failed to create task: {result['error']}")
|
|
152
|
+
|
|
153
|
+
if "result" in result: # cloud
|
|
154
|
+
msg = json.loads(result["result"])
|
|
155
|
+
if "error" in msg:
|
|
156
|
+
raise RuntimeError(f"{msg['error']}")
|
|
157
|
+
img_data = msg["data"]["payload"]
|
|
158
|
+
else: # local
|
|
159
|
+
img_data = result["data"]["payload"]
|
|
160
|
+
|
|
161
|
+
output = decode_comfy_image(img_data, old_version=True)
|
|
162
|
+
return (output,)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
NODE_CLASS_MAPPINGS = {
|
|
166
|
+
"StableDiffusionXLControlNetUnionPipeline": StableDiffusionXLControlNetUnionPipeline,
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
170
|
+
"StableDiffusionXLControlNetUnionPipeline": "☁️BizyAir Controlnet Union SDXL 1.0",
|
|
171
|
+
}
|