bizyengine 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. bizyengine/__init__.py +35 -0
  2. bizyengine/bizy_server/__init__.py +7 -0
  3. bizyengine/bizy_server/api_client.py +763 -0
  4. bizyengine/bizy_server/errno.py +122 -0
  5. bizyengine/bizy_server/error_handler.py +3 -0
  6. bizyengine/bizy_server/execution.py +55 -0
  7. bizyengine/bizy_server/resp.py +24 -0
  8. bizyengine/bizy_server/server.py +898 -0
  9. bizyengine/bizy_server/utils.py +93 -0
  10. bizyengine/bizyair_extras/__init__.py +24 -0
  11. bizyengine/bizyair_extras/nodes_advanced_refluxcontrol.py +62 -0
  12. bizyengine/bizyair_extras/nodes_cogview4.py +31 -0
  13. bizyengine/bizyair_extras/nodes_comfyui_detail_daemon.py +180 -0
  14. bizyengine/bizyair_extras/nodes_comfyui_instantid.py +164 -0
  15. bizyengine/bizyair_extras/nodes_comfyui_layerstyle_advance.py +141 -0
  16. bizyengine/bizyair_extras/nodes_comfyui_pulid_flux.py +88 -0
  17. bizyengine/bizyair_extras/nodes_controlnet.py +50 -0
  18. bizyengine/bizyair_extras/nodes_custom_sampler.py +130 -0
  19. bizyengine/bizyair_extras/nodes_dataset.py +99 -0
  20. bizyengine/bizyair_extras/nodes_differential_diffusion.py +16 -0
  21. bizyengine/bizyair_extras/nodes_flux.py +69 -0
  22. bizyengine/bizyair_extras/nodes_image_utils.py +93 -0
  23. bizyengine/bizyair_extras/nodes_ip2p.py +20 -0
  24. bizyengine/bizyair_extras/nodes_ipadapter_plus/__init__.py +1 -0
  25. bizyengine/bizyair_extras/nodes_ipadapter_plus/nodes_ipadapter_plus.py +1598 -0
  26. bizyengine/bizyair_extras/nodes_janus_pro.py +81 -0
  27. bizyengine/bizyair_extras/nodes_kolors_mz/__init__.py +86 -0
  28. bizyengine/bizyair_extras/nodes_model_advanced.py +62 -0
  29. bizyengine/bizyair_extras/nodes_sd3.py +52 -0
  30. bizyengine/bizyair_extras/nodes_segment_anything.py +256 -0
  31. bizyengine/bizyair_extras/nodes_segment_anything_utils.py +134 -0
  32. bizyengine/bizyair_extras/nodes_testing_utils.py +139 -0
  33. bizyengine/bizyair_extras/nodes_trellis.py +199 -0
  34. bizyengine/bizyair_extras/nodes_ultimatesdupscale.py +137 -0
  35. bizyengine/bizyair_extras/nodes_upscale_model.py +32 -0
  36. bizyengine/bizyair_extras/nodes_wan_video.py +49 -0
  37. bizyengine/bizyair_extras/oauth_callback/main.py +118 -0
  38. bizyengine/core/__init__.py +8 -0
  39. bizyengine/core/commands/__init__.py +1 -0
  40. bizyengine/core/commands/base.py +27 -0
  41. bizyengine/core/commands/invoker.py +4 -0
  42. bizyengine/core/commands/processors/model_hosting_processor.py +0 -0
  43. bizyengine/core/commands/processors/prompt_processor.py +123 -0
  44. bizyengine/core/commands/servers/model_server.py +0 -0
  45. bizyengine/core/commands/servers/prompt_server.py +234 -0
  46. bizyengine/core/common/__init__.py +8 -0
  47. bizyengine/core/common/caching.py +198 -0
  48. bizyengine/core/common/client.py +262 -0
  49. bizyengine/core/common/env_var.py +101 -0
  50. bizyengine/core/common/utils.py +93 -0
  51. bizyengine/core/configs/conf.py +112 -0
  52. bizyengine/core/configs/models.json +101 -0
  53. bizyengine/core/configs/models.yaml +329 -0
  54. bizyengine/core/data_types.py +20 -0
  55. bizyengine/core/image_utils.py +288 -0
  56. bizyengine/core/nodes_base.py +159 -0
  57. bizyengine/core/nodes_io.py +97 -0
  58. bizyengine/core/path_utils/__init__.py +9 -0
  59. bizyengine/core/path_utils/path_manager.py +276 -0
  60. bizyengine/core/path_utils/utils.py +34 -0
  61. bizyengine/misc/__init__.py +0 -0
  62. bizyengine/misc/auth.py +83 -0
  63. bizyengine/misc/llm.py +431 -0
  64. bizyengine/misc/mzkolors.py +93 -0
  65. bizyengine/misc/nodes.py +1208 -0
  66. bizyengine/misc/nodes_controlnet_aux.py +491 -0
  67. bizyengine/misc/nodes_controlnet_union_sdxl.py +171 -0
  68. bizyengine/misc/route_sam.py +60 -0
  69. bizyengine/misc/segment_anything.py +276 -0
  70. bizyengine/misc/supernode.py +182 -0
  71. bizyengine/misc/utils.py +218 -0
  72. bizyengine/version.txt +1 -0
  73. bizyengine-0.4.2.dist-info/METADATA +12 -0
  74. bizyengine-0.4.2.dist-info/RECORD +76 -0
  75. bizyengine-0.4.2.dist-info/WHEEL +5 -0
  76. bizyengine-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1598 @@
1
+ import math
2
+ import os
3
+
4
+ import folder_paths
5
+ import torch
6
+ from bizyengine.core import BizyAirBaseNode, BizyAirNodeIO, create_node_data
7
+ from bizyengine.core.data_types import CLIP, CONDITIONING, MODEL
8
+
9
+ # set the models directory
10
+ if "ipadapter" not in folder_paths.folder_names_and_paths:
11
+ current_paths = [os.path.join(folder_paths.models_dir, "ipadapter")]
12
+ else:
13
+ current_paths, _ = folder_paths.folder_names_and_paths["ipadapter"]
14
+ folder_paths.folder_names_and_paths["ipadapter"] = (
15
+ current_paths,
16
+ folder_paths.supported_pt_extensions,
17
+ )
18
+
19
+ WEIGHT_TYPES = [
20
+ "linear",
21
+ "ease in",
22
+ "ease out",
23
+ "ease in-out",
24
+ "reverse in-out",
25
+ "weak input",
26
+ "weak output",
27
+ "weak middle",
28
+ "strong middle",
29
+ "style transfer",
30
+ "composition",
31
+ "strong style transfer",
32
+ "style and composition",
33
+ "style transfer precise",
34
+ "composition precise",
35
+ ]
36
+
37
+
38
+ # """
39
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
40
+ # Loaders
41
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
42
+ # """
43
+
44
+
45
+ class IPAdapterUnifiedLoader(BizyAirBaseNode):
46
+ def __init__(self):
47
+ super().__init__()
48
+ self.lora = None
49
+ self.clipvision = {"file": None, "model": None}
50
+ self.ipadapter = {"file": None, "model": None}
51
+ self.insightface = {"provider": None, "model": None}
52
+
53
+ @classmethod
54
+ def INPUT_TYPES(s):
55
+ return {
56
+ "required": {
57
+ "model": (MODEL,),
58
+ "preset": (
59
+ [
60
+ # "LIGHT - SD1.5 only (low strength)",
61
+ # "STANDARD (medium strength)",
62
+ # "VIT-G (medium strength)",
63
+ "PLUS (high strength)",
64
+ "PLUS FACE (portraits)",
65
+ # "FULL FACE - SD1.5 only (portraits stronger)",
66
+ ],
67
+ ),
68
+ },
69
+ "optional": {
70
+ "ipadapter": ("IPADAPTER",),
71
+ },
72
+ }
73
+
74
+ NODE_DISPLAY_NAME = "IPAdapter Unified Loader"
75
+ RETURN_TYPES = (
76
+ MODEL,
77
+ "IPADAPTER",
78
+ )
79
+ RETURN_NAMES = (
80
+ "model",
81
+ "ipadapter",
82
+ )
83
+ FUNCTION = "load_models"
84
+ CATEGORY = "ipadapter"
85
+
86
+ def load_models(self, **kwargs):
87
+ assert kwargs.get("ipadapter", None) is None, "TODO"
88
+
89
+ new_model: BizyAirNodeIO = kwargs["model"].copy(self.assigned_id)
90
+ new_model.add_node_data(
91
+ class_type="IPAdapterUnifiedLoader",
92
+ inputs=kwargs,
93
+ outputs={"slot_index": 0},
94
+ )
95
+ ipadapter: BizyAirNodeIO = kwargs["model"].copy(self.assigned_id)
96
+ ipadapter.add_node_data(
97
+ class_type="IPAdapterUnifiedLoader",
98
+ inputs=kwargs,
99
+ outputs={"slot_index": 1},
100
+ )
101
+ return (
102
+ new_model,
103
+ ipadapter,
104
+ )
105
+
106
+
107
+ # class IPAdapterUnifiedLoaderFaceID(IPAdapterUnifiedLoader):
108
+ # @classmethod
109
+ # def INPUT_TYPES(s):
110
+ # return {
111
+ # "required": {
112
+ # "model": ("MODEL",),
113
+ # "preset": (
114
+ # [
115
+ # "FACEID",
116
+ # "FACEID PLUS - SD1.5 only",
117
+ # "FACEID PLUS V2",
118
+ # "FACEID PORTRAIT (style transfer)",
119
+ # "FACEID PORTRAIT UNNORM - SDXL only (strong)",
120
+ # ],
121
+ # ),
122
+ # "lora_strength": (
123
+ # "FLOAT",
124
+ # {"default": 0.6, "min": 0, "max": 1, "step": 0.01},
125
+ # ),
126
+ # "provider": (
127
+ # ["CPU", "CUDA", "ROCM", "DirectML", "OpenVINO", "CoreML"],
128
+ # ),
129
+ # },
130
+ # "optional": {
131
+ # "ipadapter": ("IPADAPTER",),
132
+ # },
133
+ # }
134
+
135
+ # RETURN_NAMES = (
136
+ # "MODEL",
137
+ # "ipadapter",
138
+ # )
139
+ # CATEGORY = "ipadapter/faceid"
140
+
141
+
142
+ # class IPAdapterUnifiedLoaderCommunity(IPAdapterUnifiedLoader):
143
+ # @classmethod
144
+ # def INPUT_TYPES(s):
145
+ # return {
146
+ # "required": {
147
+ # "model": ("MODEL",),
148
+ # "preset": (["Composition", "Kolors"],),
149
+ # },
150
+ # "optional": {
151
+ # "ipadapter": ("IPADAPTER",),
152
+ # },
153
+ # }
154
+
155
+ # CATEGORY = "ipadapter/loaders"
156
+
157
+
158
+ class IPAdapterModelLoader(BizyAirBaseNode):
159
+ @classmethod
160
+ def INPUT_TYPES(s):
161
+ return {
162
+ "required": {"ipadapter_file": (["kolors/ip_adapter_plus_general.bin"],)}
163
+ }
164
+
165
+ RETURN_TYPES = ("IPADAPTER",)
166
+ FUNCTION = "load_ipadapter_model"
167
+ CATEGORY = "ipadapter/loaders"
168
+
169
+ def load_ipadapter_model(self, **kwargs):
170
+ node_data = create_node_data(
171
+ class_type="IPAdapterModelLoader",
172
+ inputs=kwargs,
173
+ outputs={"slot_index": 0},
174
+ )
175
+ return (BizyAirNodeIO(self.assigned_id, nodes={self.assigned_id: node_data}),)
176
+
177
+
178
+ # class IPAdapterInsightFaceLoader:
179
+ # @classmethod
180
+ # def INPUT_TYPES(s):
181
+ # return {
182
+ # "required": {
183
+ # "provider": (["CPU", "CUDA", "ROCM"],),
184
+ # },
185
+ # }
186
+
187
+ # RETURN_TYPES = ("INSIGHTFACE",)
188
+ # FUNCTION = "load_insightface"
189
+ # CATEGORY = "ipadapter/loaders"
190
+
191
+
192
+ # """
193
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
194
+ # Main Apply Nodes
195
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
196
+ # """
197
+
198
+
199
+ class IPAdapterSimple(BizyAirBaseNode):
200
+ @classmethod
201
+ def INPUT_TYPES(s):
202
+ return {
203
+ "required": {
204
+ "model": (MODEL,),
205
+ "ipadapter": ("IPADAPTER",),
206
+ "image": ("IMAGE",),
207
+ "weight": (
208
+ "FLOAT",
209
+ {"default": 1.0, "min": -1, "max": 3, "step": 0.05},
210
+ ),
211
+ "start_at": (
212
+ "FLOAT",
213
+ {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
214
+ ),
215
+ "end_at": (
216
+ "FLOAT",
217
+ {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
218
+ ),
219
+ "weight_type": (
220
+ ["standard", "prompt is more important", "style transfer"],
221
+ ),
222
+ },
223
+ "optional": {
224
+ "attn_mask": ("MASK",),
225
+ },
226
+ }
227
+
228
+ NODE_DISPLAY_NAME = "IPAdapter"
229
+ RETURN_TYPES = (MODEL,)
230
+ RETURN_NAMES = "model"
231
+ FUNCTION = "apply_ipadapter"
232
+ CATEGORY = "ipadapter"
233
+
234
+ def apply_ipadapter(self, **kwargs):
235
+ new_model: BizyAirNodeIO = kwargs["model"].copy(self.assigned_id)
236
+ new_model.add_node_data(
237
+ class_type="IPAdapter",
238
+ inputs=kwargs,
239
+ outputs={"slot_index": 0},
240
+ )
241
+ return (new_model,)
242
+
243
+
244
+ class IPAdapterAdvanced(BizyAirBaseNode):
245
+ def __init__(self):
246
+ super().__init__()
247
+ self.unfold_batch = False
248
+
249
+ @classmethod
250
+ def INPUT_TYPES(s):
251
+ return {
252
+ "required": {
253
+ "model": (MODEL,),
254
+ "ipadapter": ("IPADAPTER",),
255
+ "image": ("IMAGE",),
256
+ "weight": (
257
+ "FLOAT",
258
+ {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
259
+ ),
260
+ "weight_type": (WEIGHT_TYPES,),
261
+ "combine_embeds": (
262
+ ["concat", "add", "subtract", "average", "norm average"],
263
+ ),
264
+ "start_at": (
265
+ "FLOAT",
266
+ {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
267
+ ),
268
+ "end_at": (
269
+ "FLOAT",
270
+ {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
271
+ ),
272
+ "embeds_scaling": (
273
+ ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
274
+ ),
275
+ },
276
+ "optional": {
277
+ "image_negative": ("IMAGE",),
278
+ "attn_mask": ("MASK",),
279
+ "clip_vision": ("CLIP_VISION",),
280
+ },
281
+ }
282
+
283
+ RETURN_TYPES = (MODEL,)
284
+ RETURN_NAMES = ("model",)
285
+ FUNCTION = "apply_ipadapter"
286
+ CATEGORY = "ipadapter"
287
+
288
+ def apply_ipadapter(self, **kwargs):
289
+ new_model: BizyAirNodeIO = kwargs["model"].copy(self.assigned_id)
290
+ new_model.add_node_data(
291
+ class_type=self.__class__.__name__, inputs=kwargs, outputs={"slot_index": 0}
292
+ )
293
+ return (new_model,)
294
+
295
+
296
+ # class IPAdapterBatch(IPAdapterAdvanced):
297
+ # def __init__(self):
298
+ # super().__init__()
299
+ # self.unfold_batch = True
300
+
301
+ # NODE_DISPLAY_NAME = "IPAdapter Batch (Adv.)"
302
+
303
+ # @classmethod
304
+ # def INPUT_TYPES(s):
305
+ # return {
306
+ # "required": {
307
+ # "model": ("MODEL",),
308
+ # "ipadapter": ("IPADAPTER",),
309
+ # "image": ("IMAGE",),
310
+ # "weight": (
311
+ # "FLOAT",
312
+ # {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
313
+ # ),
314
+ # "weight_type": (WEIGHT_TYPES,),
315
+ # "start_at": (
316
+ # "FLOAT",
317
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
318
+ # ),
319
+ # "end_at": (
320
+ # "FLOAT",
321
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
322
+ # ),
323
+ # "embeds_scaling": (
324
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
325
+ # ),
326
+ # "encode_batch_size": ("INT", {"default": 0, "min": 0, "max": 4096}),
327
+ # },
328
+ # "optional": {
329
+ # "image_negative": ("IMAGE",),
330
+ # "attn_mask": ("MASK",),
331
+ # "clip_vision": ("CLIP_VISION",),
332
+ # },
333
+ # }
334
+
335
+
336
+ class IPAdapterStyleComposition(IPAdapterAdvanced):
337
+ def __init__(self):
338
+ super().__init__()
339
+
340
+ @classmethod
341
+ def INPUT_TYPES(s):
342
+ return {
343
+ "required": {
344
+ "model": (MODEL,),
345
+ "ipadapter": ("IPADAPTER",),
346
+ "image_style": ("IMAGE",),
347
+ "image_composition": ("IMAGE",),
348
+ "weight_style": (
349
+ "FLOAT",
350
+ {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
351
+ ),
352
+ "weight_composition": (
353
+ "FLOAT",
354
+ {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
355
+ ),
356
+ "expand_style": ("BOOLEAN", {"default": False}),
357
+ "combine_embeds": (
358
+ ["concat", "add", "subtract", "average", "norm average"],
359
+ {"default": "average"},
360
+ ),
361
+ "start_at": (
362
+ "FLOAT",
363
+ {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
364
+ ),
365
+ "end_at": (
366
+ "FLOAT",
367
+ {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
368
+ ),
369
+ "embeds_scaling": (
370
+ ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
371
+ ),
372
+ },
373
+ "optional": {
374
+ "image_negative": ("IMAGE",),
375
+ "attn_mask": ("MASK",),
376
+ "clip_vision": ("CLIP_VISION",),
377
+ },
378
+ }
379
+
380
+ CATEGORY = "ipadapter/style_composition"
381
+
382
+
383
+ # class IPAdapterStyleCompositionBatch(IPAdapterStyleComposition):
384
+ # def __init__(self):
385
+ # super().__init__()
386
+ # self.unfold_batch = True
387
+
388
+ # @classmethod
389
+ # def INPUT_TYPES(s):
390
+ # return {
391
+ # "required": {
392
+ # "model": ("MODEL",),
393
+ # "ipadapter": ("IPADAPTER",),
394
+ # "image_style": ("IMAGE",),
395
+ # "image_composition": ("IMAGE",),
396
+ # "weight_style": (
397
+ # "FLOAT",
398
+ # {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
399
+ # ),
400
+ # "weight_composition": (
401
+ # "FLOAT",
402
+ # {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
403
+ # ),
404
+ # "expand_style": ("BOOLEAN", {"default": False}),
405
+ # "start_at": (
406
+ # "FLOAT",
407
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
408
+ # ),
409
+ # "end_at": (
410
+ # "FLOAT",
411
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
412
+ # ),
413
+ # "embeds_scaling": (
414
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
415
+ # ),
416
+ # },
417
+ # "optional": {
418
+ # "image_negative": ("IMAGE",),
419
+ # "attn_mask": ("MASK",),
420
+ # "clip_vision": ("CLIP_VISION",),
421
+ # },
422
+ # }
423
+
424
+ # NODE_DISPLAY_NAME = "IPAdapter Style & Composition Batch SDXL"
425
+
426
+
427
+ # class IPAdapterFaceID(IPAdapterAdvanced):
428
+ # @classmethod
429
+ # def INPUT_TYPES(s):
430
+ # return {
431
+ # "required": {
432
+ # "model": ("MODEL",),
433
+ # "ipadapter": ("IPADAPTER",),
434
+ # "image": ("IMAGE",),
435
+ # "weight": (
436
+ # "FLOAT",
437
+ # {"default": 1.0, "min": -1, "max": 3, "step": 0.05},
438
+ # ),
439
+ # "weight_faceidv2": (
440
+ # "FLOAT",
441
+ # {"default": 1.0, "min": -1, "max": 5.0, "step": 0.05},
442
+ # ),
443
+ # "weight_type": (WEIGHT_TYPES,),
444
+ # "combine_embeds": (
445
+ # ["concat", "add", "subtract", "average", "norm average"],
446
+ # ),
447
+ # "start_at": (
448
+ # "FLOAT",
449
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
450
+ # ),
451
+ # "end_at": (
452
+ # "FLOAT",
453
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
454
+ # ),
455
+ # "embeds_scaling": (
456
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
457
+ # ),
458
+ # },
459
+ # "optional": {
460
+ # "image_negative": ("IMAGE",),
461
+ # "attn_mask": ("MASK",),
462
+ # "clip_vision": ("CLIP_VISION",),
463
+ # "insightface": ("INSIGHTFACE",),
464
+ # },
465
+ # }
466
+
467
+ # CATEGORY = "ipadapter/faceid"
468
+ # RETURN_TYPES = (
469
+ # "MODEL",
470
+ # "IMAGE",
471
+ # )
472
+ # RETURN_NAMES = (
473
+ # "MODEL",
474
+ # "face_image",
475
+ # )
476
+ # NODE_DISPLAY_NAME = "IPAdapter FaceID"
477
+
478
+
479
+ # class IPAAdapterFaceIDBatch(IPAdapterFaceID):
480
+ # def __init__(self):
481
+ # super().__init__()
482
+ # self.unfold_batch = True
483
+
484
+ # NODE_DISPLAY_NAME = "IPAdapter FaceID Batch"
485
+
486
+
487
+ # class IPAdapterTiled:
488
+ # def __init__(self):
489
+ # self.unfold_batch = False
490
+
491
+ # @classmethod
492
+ # def INPUT_TYPES(s):
493
+ # return {
494
+ # "required": {
495
+ # "model": ("MODEL",),
496
+ # "ipadapter": ("IPADAPTER",),
497
+ # "image": ("IMAGE",),
498
+ # "weight": (
499
+ # "FLOAT",
500
+ # {"default": 1.0, "min": -1, "max": 3, "step": 0.05},
501
+ # ),
502
+ # "weight_type": (WEIGHT_TYPES,),
503
+ # "combine_embeds": (
504
+ # ["concat", "add", "subtract", "average", "norm average"],
505
+ # ),
506
+ # "start_at": (
507
+ # "FLOAT",
508
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
509
+ # ),
510
+ # "end_at": (
511
+ # "FLOAT",
512
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
513
+ # ),
514
+ # "sharpening": (
515
+ # "FLOAT",
516
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05},
517
+ # ),
518
+ # "embeds_scaling": (
519
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
520
+ # ),
521
+ # },
522
+ # "optional": {
523
+ # "image_negative": ("IMAGE",),
524
+ # "attn_mask": ("MASK",),
525
+ # "clip_vision": ("CLIP_VISION",),
526
+ # },
527
+ # }
528
+
529
+ # RETURN_TYPES = (
530
+ # "MODEL",
531
+ # "IMAGE",
532
+ # "MASK",
533
+ # )
534
+ # RETURN_NAMES = (
535
+ # "MODEL",
536
+ # "tiles",
537
+ # "masks",
538
+ # )
539
+ # FUNCTION = "apply_tiled"
540
+ # CATEGORY = "ipadapter/tiled"
541
+
542
+ # def apply_tiled(
543
+ # self,
544
+ # model,
545
+ # ipadapter,
546
+ # image,
547
+ # weight,
548
+ # weight_type,
549
+ # start_at,
550
+ # end_at,
551
+ # sharpening,
552
+ # combine_embeds="concat",
553
+ # image_negative=None,
554
+ # attn_mask=None,
555
+ # clip_vision=None,
556
+ # embeds_scaling="V only",
557
+ # encode_batch_size=0,
558
+ # ):
559
+ # # 1. Select the models
560
+ # if "ipadapter" in ipadapter:
561
+ # ipadapter_model = ipadapter["ipadapter"]["model"]
562
+ # clip_vision = (
563
+ # clip_vision
564
+ # if clip_vision is not None
565
+ # else ipadapter["clipvision"]["model"]
566
+ # )
567
+ # else:
568
+ # ipadapter_model = ipadapter
569
+ # clip_vision = clip_vision
570
+
571
+ # if clip_vision is None:
572
+ # raise Exception("Missing CLIPVision model.")
573
+
574
+ # del ipadapter
575
+
576
+ # # 2. Extract the tiles
577
+ # tile_size = 256 # I'm using 256 instead of 224 as it is more likely divisible by the latent size, it will be downscaled to 224 by the clip vision encoder
578
+ # _, oh, ow, _ = image.shape
579
+ # if attn_mask is None:
580
+ # attn_mask = torch.ones([1, oh, ow], dtype=image.dtype, device=image.device)
581
+
582
+ # image = image.permute([0, 3, 1, 2])
583
+ # attn_mask = attn_mask.unsqueeze(1)
584
+ # # the mask should have the same proportions as the reference image and the latent
585
+ # attn_mask = T.Resize(
586
+ # (oh, ow), interpolation=T.InterpolationMode.BICUBIC, antialias=True
587
+ # )(attn_mask)
588
+
589
+ # # if the image is almost a square, we crop it to a square
590
+ # if oh / ow > 0.75 and oh / ow < 1.33:
591
+ # # crop the image to a square
592
+ # image = T.CenterCrop(min(oh, ow))(image)
593
+ # resize = (tile_size * 2, tile_size * 2)
594
+
595
+ # attn_mask = T.CenterCrop(min(oh, ow))(attn_mask)
596
+ # # otherwise resize the smallest side and the other proportionally
597
+ # else:
598
+ # resize = (
599
+ # (int(tile_size * ow / oh), tile_size)
600
+ # if oh < ow
601
+ # else (tile_size, int(tile_size * oh / ow))
602
+ # )
603
+
604
+ # # using PIL for better results
605
+ # imgs = []
606
+ # for img in image:
607
+ # img = T.ToPILImage()(img)
608
+ # img = img.resize(resize, resample=Image.Resampling["LANCZOS"])
609
+ # imgs.append(T.ToTensor()(img))
610
+ # image = torch.stack(imgs)
611
+ # del imgs, img
612
+
613
+ # # we don't need a high quality resize for the mask
614
+ # attn_mask = T.Resize(
615
+ # resize[::-1], interpolation=T.InterpolationMode.BICUBIC, antialias=True
616
+ # )(attn_mask)
617
+
618
+ # # we allow a maximum of 4 tiles
619
+ # if oh / ow > 4 or oh / ow < 0.25:
620
+ # crop = (tile_size, tile_size * 4) if oh < ow else (tile_size * 4, tile_size)
621
+ # image = T.CenterCrop(crop)(image)
622
+ # attn_mask = T.CenterCrop(crop)(attn_mask)
623
+
624
+ # attn_mask = attn_mask.squeeze(1)
625
+
626
+ # if sharpening > 0:
627
+ # image = contrast_adaptive_sharpening(image, sharpening)
628
+
629
+ # image = image.permute([0, 2, 3, 1])
630
+
631
+ # _, oh, ow, _ = image.shape
632
+
633
+ # # find the number of tiles for each side
634
+ # tiles_x = math.ceil(ow / tile_size)
635
+ # tiles_y = math.ceil(oh / tile_size)
636
+ # overlap_x = max(
637
+ # 0, (tiles_x * tile_size - ow) / (tiles_x - 1 if tiles_x > 1 else 1)
638
+ # )
639
+ # overlap_y = max(
640
+ # 0, (tiles_y * tile_size - oh) / (tiles_y - 1 if tiles_y > 1 else 1)
641
+ # )
642
+
643
+ # base_mask = torch.zeros(
644
+ # [attn_mask.shape[0], oh, ow], dtype=image.dtype, device=image.device
645
+ # )
646
+
647
+ # # extract all the tiles from the image and create the masks
648
+ # tiles = []
649
+ # masks = []
650
+ # for y in range(tiles_y):
651
+ # for x in range(tiles_x):
652
+ # start_x = int(x * (tile_size - overlap_x))
653
+ # start_y = int(y * (tile_size - overlap_y))
654
+ # tiles.append(
655
+ # image[
656
+ # :,
657
+ # start_y : start_y + tile_size,
658
+ # start_x : start_x + tile_size,
659
+ # :,
660
+ # ]
661
+ # )
662
+ # mask = base_mask.clone()
663
+ # mask[
664
+ # :, start_y : start_y + tile_size, start_x : start_x + tile_size
665
+ # ] = attn_mask[
666
+ # :, start_y : start_y + tile_size, start_x : start_x + tile_size
667
+ # ]
668
+ # masks.append(mask)
669
+ # del mask
670
+
671
+ # # 3. Apply the ipadapter to each group of tiles
672
+ # model = model.clone()
673
+ # for i in range(len(tiles)):
674
+ # ipa_args = {
675
+ # "image": tiles[i],
676
+ # "image_negative": image_negative,
677
+ # "weight": weight,
678
+ # "weight_type": weight_type,
679
+ # "combine_embeds": combine_embeds,
680
+ # "start_at": start_at,
681
+ # "end_at": end_at,
682
+ # "attn_mask": masks[i],
683
+ # "unfold_batch": self.unfold_batch,
684
+ # "embeds_scaling": embeds_scaling,
685
+ # "encode_batch_size": encode_batch_size,
686
+ # }
687
+ # # apply the ipadapter to the model without cloning it
688
+ # model, _ = ipadapter_execute(
689
+ # model, ipadapter_model, clip_vision, **ipa_args
690
+ # )
691
+
692
+ # return (
693
+ # model,
694
+ # torch.cat(tiles),
695
+ # torch.cat(masks),
696
+ # )
697
+
698
+
699
+ # class IPAdapterTiledBatch(IPAdapterTiled):
700
+ # def __init__(self):
701
+ # self.unfold_batch = True
702
+
703
+ # @classmethod
704
+ # def INPUT_TYPES(s):
705
+ # return {
706
+ # "required": {
707
+ # "model": ("MODEL",),
708
+ # "ipadapter": ("IPADAPTER",),
709
+ # "image": ("IMAGE",),
710
+ # "weight": (
711
+ # "FLOAT",
712
+ # {"default": 1.0, "min": -1, "max": 3, "step": 0.05},
713
+ # ),
714
+ # "weight_type": (WEIGHT_TYPES,),
715
+ # "start_at": (
716
+ # "FLOAT",
717
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
718
+ # ),
719
+ # "end_at": (
720
+ # "FLOAT",
721
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
722
+ # ),
723
+ # "sharpening": (
724
+ # "FLOAT",
725
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05},
726
+ # ),
727
+ # "embeds_scaling": (
728
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
729
+ # ),
730
+ # "encode_batch_size": ("INT", {"default": 0, "min": 0, "max": 4096}),
731
+ # },
732
+ # "optional": {
733
+ # "image_negative": ("IMAGE",),
734
+ # "attn_mask": ("MASK",),
735
+ # "clip_vision": ("CLIP_VISION",),
736
+ # },
737
+ # }
738
+
739
+
740
+ # class IPAdapterEmbeds:
741
+ # def __init__(self):
742
+ # self.unfold_batch = False
743
+
744
+ # @classmethod
745
+ # def INPUT_TYPES(s):
746
+ # return {
747
+ # "required": {
748
+ # "model": ("MODEL",),
749
+ # "ipadapter": ("IPADAPTER",),
750
+ # "pos_embed": ("EMBEDS",),
751
+ # "weight": (
752
+ # "FLOAT",
753
+ # {"default": 1.0, "min": -1, "max": 3, "step": 0.05},
754
+ # ),
755
+ # "weight_type": (WEIGHT_TYPES,),
756
+ # "start_at": (
757
+ # "FLOAT",
758
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
759
+ # ),
760
+ # "end_at": (
761
+ # "FLOAT",
762
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
763
+ # ),
764
+ # "embeds_scaling": (
765
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
766
+ # ),
767
+ # },
768
+ # "optional": {
769
+ # "neg_embed": ("EMBEDS",),
770
+ # "attn_mask": ("MASK",),
771
+ # "clip_vision": ("CLIP_VISION",),
772
+ # },
773
+ # }
774
+
775
+ # RETURN_TYPES = ("MODEL",)
776
+ # FUNCTION = "apply_ipadapter"
777
+ # CATEGORY = "ipadapter/embeds"
778
+
779
+ # def apply_ipadapter(
780
+ # self,
781
+ # model,
782
+ # ipadapter,
783
+ # pos_embed,
784
+ # weight,
785
+ # weight_type,
786
+ # start_at,
787
+ # end_at,
788
+ # neg_embed=None,
789
+ # attn_mask=None,
790
+ # clip_vision=None,
791
+ # embeds_scaling="V only",
792
+ # ):
793
+ # ipa_args = {
794
+ # "pos_embed": pos_embed,
795
+ # "neg_embed": neg_embed,
796
+ # "weight": weight,
797
+ # "weight_type": weight_type,
798
+ # "start_at": start_at,
799
+ # "end_at": end_at,
800
+ # "attn_mask": attn_mask,
801
+ # "embeds_scaling": embeds_scaling,
802
+ # "unfold_batch": self.unfold_batch,
803
+ # }
804
+
805
+ # if "ipadapter" in ipadapter:
806
+ # ipadapter_model = ipadapter["ipadapter"]["model"]
807
+ # clip_vision = (
808
+ # clip_vision
809
+ # if clip_vision is not None
810
+ # else ipadapter["clipvision"]["model"]
811
+ # )
812
+ # else:
813
+ # ipadapter_model = ipadapter
814
+ # clip_vision = clip_vision
815
+
816
+ # if clip_vision is None and neg_embed is None:
817
+ # raise Exception("Missing CLIPVision model.")
818
+
819
+ # del ipadapter
820
+
821
+ # return ipadapter_execute(
822
+ # model.clone(), ipadapter_model, clip_vision, **ipa_args
823
+ # )
824
+
825
+
826
+ # class IPAdapterEmbedsBatch(IPAdapterEmbeds):
827
+ # def __init__(self):
828
+ # self.unfold_batch = True
829
+
830
+
831
+ # class IPAdapterMS(IPAdapterAdvanced):
832
+ # @classmethod
833
+ # def INPUT_TYPES(s):
834
+ # return {
835
+ # "required": {
836
+ # "model": ("MODEL",),
837
+ # "ipadapter": ("IPADAPTER",),
838
+ # "image": ("IMAGE",),
839
+ # "weight": (
840
+ # "FLOAT",
841
+ # {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
842
+ # ),
843
+ # "weight_faceidv2": (
844
+ # "FLOAT",
845
+ # {"default": 1.0, "min": -1, "max": 5.0, "step": 0.05},
846
+ # ),
847
+ # "weight_type": (WEIGHT_TYPES,),
848
+ # "combine_embeds": (
849
+ # ["concat", "add", "subtract", "average", "norm average"],
850
+ # ),
851
+ # "start_at": (
852
+ # "FLOAT",
853
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
854
+ # ),
855
+ # "end_at": (
856
+ # "FLOAT",
857
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
858
+ # ),
859
+ # "embeds_scaling": (
860
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
861
+ # ),
862
+ # "layer_weights": ("STRING", {"default": "", "multiline": True}),
863
+ # },
864
+ # "optional": {
865
+ # "image_negative": ("IMAGE",),
866
+ # "attn_mask": ("MASK",),
867
+ # "clip_vision": ("CLIP_VISION",),
868
+ # "insightface": ("INSIGHTFACE",),
869
+ # },
870
+ # }
871
+
872
+ # CATEGORY = "ipadapter/dev"
873
+
874
+
875
+ # class IPAdapterClipVisionEnhancer(IPAdapterAdvanced):
876
+ # @classmethod
877
+ # def INPUT_TYPES(s):
878
+ # return {
879
+ # "required": {
880
+ # "model": ("MODEL",),
881
+ # "ipadapter": ("IPADAPTER",),
882
+ # "image": ("IMAGE",),
883
+ # "weight": (
884
+ # "FLOAT",
885
+ # {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
886
+ # ),
887
+ # "weight_type": (WEIGHT_TYPES,),
888
+ # "combine_embeds": (
889
+ # ["concat", "add", "subtract", "average", "norm average"],
890
+ # ),
891
+ # "start_at": (
892
+ # "FLOAT",
893
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
894
+ # ),
895
+ # "end_at": (
896
+ # "FLOAT",
897
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
898
+ # ),
899
+ # "embeds_scaling": (
900
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
901
+ # ),
902
+ # "enhance_tiles": ("INT", {"default": 2, "min": 1, "max": 16}),
903
+ # "enhance_ratio": (
904
+ # "FLOAT",
905
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.05},
906
+ # ),
907
+ # },
908
+ # "optional": {
909
+ # "image_negative": ("IMAGE",),
910
+ # "attn_mask": ("MASK",),
911
+ # "clip_vision": ("CLIP_VISION",),
912
+ # },
913
+ # }
914
+
915
+ # CATEGORY = "ipadapter/dev"
916
+
917
+
918
+ # class IPAdapterFromParams(IPAdapterAdvanced):
919
+ # @classmethod
920
+ # def INPUT_TYPES(s):
921
+ # return {
922
+ # "required": {
923
+ # "model": ("MODEL",),
924
+ # "ipadapter": ("IPADAPTER",),
925
+ # "ipadapter_params": ("IPADAPTER_PARAMS",),
926
+ # "combine_embeds": (
927
+ # ["concat", "add", "subtract", "average", "norm average"],
928
+ # ),
929
+ # "embeds_scaling": (
930
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
931
+ # ),
932
+ # },
933
+ # "optional": {
934
+ # "image_negative": ("IMAGE",),
935
+ # "clip_vision": ("CLIP_VISION",),
936
+ # },
937
+ # }
938
+
939
+ # CATEGORY = "ipadapter/params"
940
+
941
+
942
+ # class IPAdapterPreciseStyleTransfer(IPAdapterAdvanced):
943
+ # @classmethod
944
+ # def INPUT_TYPES(s):
945
+ # return {
946
+ # "required": {
947
+ # "model": ("MODEL",),
948
+ # "ipadapter": ("IPADAPTER",),
949
+ # "image": ("IMAGE",),
950
+ # "weight": (
951
+ # "FLOAT",
952
+ # {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
953
+ # ),
954
+ # "style_boost": (
955
+ # "FLOAT",
956
+ # {"default": 1.0, "min": -5, "max": 5, "step": 0.05},
957
+ # ),
958
+ # "combine_embeds": (
959
+ # ["concat", "add", "subtract", "average", "norm average"],
960
+ # ),
961
+ # "start_at": (
962
+ # "FLOAT",
963
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
964
+ # ),
965
+ # "end_at": (
966
+ # "FLOAT",
967
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
968
+ # ),
969
+ # "embeds_scaling": (
970
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
971
+ # ),
972
+ # },
973
+ # "optional": {
974
+ # "image_negative": ("IMAGE",),
975
+ # "attn_mask": ("MASK",),
976
+ # "clip_vision": ("CLIP_VISION",),
977
+ # },
978
+ # }
979
+
980
+
981
+ # class IPAdapterPreciseStyleTransferBatch(IPAdapterPreciseStyleTransfer):
982
+ # def __init__(self):
983
+ # self.unfold_batch = True
984
+
985
+
986
+ # class IPAdapterPreciseComposition(IPAdapterAdvanced):
987
+ # @classmethod
988
+ # def INPUT_TYPES(s):
989
+ # return {
990
+ # "required": {
991
+ # "model": ("MODEL",),
992
+ # "ipadapter": ("IPADAPTER",),
993
+ # "image": ("IMAGE",),
994
+ # "weight": (
995
+ # "FLOAT",
996
+ # {"default": 1.0, "min": -1, "max": 5, "step": 0.05},
997
+ # ),
998
+ # "composition_boost": (
999
+ # "FLOAT",
1000
+ # {"default": 0.0, "min": -5, "max": 5, "step": 0.05},
1001
+ # ),
1002
+ # "combine_embeds": (
1003
+ # ["concat", "add", "subtract", "average", "norm average"],
1004
+ # ),
1005
+ # "start_at": (
1006
+ # "FLOAT",
1007
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
1008
+ # ),
1009
+ # "end_at": (
1010
+ # "FLOAT",
1011
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
1012
+ # ),
1013
+ # "embeds_scaling": (
1014
+ # ["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"],
1015
+ # ),
1016
+ # },
1017
+ # "optional": {
1018
+ # "image_negative": ("IMAGE",),
1019
+ # "attn_mask": ("MASK",),
1020
+ # "clip_vision": ("CLIP_VISION",),
1021
+ # },
1022
+ # }
1023
+
1024
+
1025
+ # class IPAdapterPreciseCompositionBatch(IPAdapterPreciseComposition):
1026
+ # def __init__(self):
1027
+ # self.unfold_batch = True
1028
+
1029
+
1030
+ # """
1031
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1032
+ # Helpers
1033
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1034
+ # """
1035
+
1036
+
1037
+ # class IPAdapterEncoder:
1038
+ # @classmethod
1039
+ # def INPUT_TYPES(s):
1040
+ # return {
1041
+ # "required": {
1042
+ # "ipadapter": ("IPADAPTER",),
1043
+ # "image": ("IMAGE",),
1044
+ # "weight": (
1045
+ # "FLOAT",
1046
+ # {"default": 1.0, "min": -1.0, "max": 3.0, "step": 0.01},
1047
+ # ),
1048
+ # },
1049
+ # "optional": {
1050
+ # "mask": ("MASK",),
1051
+ # "clip_vision": ("CLIP_VISION",),
1052
+ # },
1053
+ # }
1054
+
1055
+ # RETURN_TYPES = (
1056
+ # "EMBEDS",
1057
+ # "EMBEDS",
1058
+ # )
1059
+ # RETURN_NAMES = (
1060
+ # "pos_embed",
1061
+ # "neg_embed",
1062
+ # )
1063
+ # FUNCTION = "encode"
1064
+ # CATEGORY = "ipadapter/embeds"
1065
+
1066
+
1067
+ # class IPAdapterCombineEmbeds:
1068
+ # @classmethod
1069
+ # def INPUT_TYPES(s):
1070
+ # return {
1071
+ # "required": {
1072
+ # "embed1": ("EMBEDS",),
1073
+ # "method": (
1074
+ # [
1075
+ # "concat",
1076
+ # "add",
1077
+ # "subtract",
1078
+ # "average",
1079
+ # "norm average",
1080
+ # "max",
1081
+ # "min",
1082
+ # ],
1083
+ # ),
1084
+ # },
1085
+ # "optional": {
1086
+ # "embed2": ("EMBEDS",),
1087
+ # "embed3": ("EMBEDS",),
1088
+ # "embed4": ("EMBEDS",),
1089
+ # "embed5": ("EMBEDS",),
1090
+ # },
1091
+ # }
1092
+
1093
+ # RETURN_TYPES = ("EMBEDS",)
1094
+ # FUNCTION = "batch"
1095
+ # CATEGORY = "ipadapter/embeds"
1096
+
1097
+ # def batch(self, embed1, method, embed2=None, embed3=None, embed4=None, embed5=None):
1098
+ # if (
1099
+ # method == "concat"
1100
+ # and embed2 is None
1101
+ # and embed3 is None
1102
+ # and embed4 is None
1103
+ # and embed5 is None
1104
+ # ):
1105
+ # return (embed1,)
1106
+
1107
+ # embeds = [embed1, embed2, embed3, embed4, embed5]
1108
+ # embeds = [embed for embed in embeds if embed is not None]
1109
+ # embeds = torch.cat(embeds, dim=0)
1110
+
1111
+ # if method == "add":
1112
+ # embeds = torch.sum(embeds, dim=0).unsqueeze(0)
1113
+ # elif method == "subtract":
1114
+ # embeds = embeds[0] - torch.mean(embeds[1:], dim=0)
1115
+ # embeds = embeds.unsqueeze(0)
1116
+ # elif method == "average":
1117
+ # embeds = torch.mean(embeds, dim=0).unsqueeze(0)
1118
+ # elif method == "norm average":
1119
+ # embeds = torch.mean(
1120
+ # embeds / torch.norm(embeds, dim=0, keepdim=True), dim=0
1121
+ # ).unsqueeze(0)
1122
+ # elif method == "max":
1123
+ # embeds = torch.max(embeds, dim=0).values.unsqueeze(0)
1124
+ # elif method == "min":
1125
+ # embeds = torch.min(embeds, dim=0).values.unsqueeze(0)
1126
+
1127
+ # return (embeds,)
1128
+
1129
+
1130
+ # class IPAdapterNoise:
1131
+ # @classmethod
1132
+ # def INPUT_TYPES(s):
1133
+ # return {
1134
+ # "required": {
1135
+ # "type": (["fade", "dissolve", "gaussian", "shuffle"],),
1136
+ # "strength": (
1137
+ # "FLOAT",
1138
+ # {"default": 1.0, "min": 0, "max": 1, "step": 0.05},
1139
+ # ),
1140
+ # "blur": ("INT", {"default": 0, "min": 0, "max": 32, "step": 1}),
1141
+ # },
1142
+ # "optional": {
1143
+ # "image_optional": ("IMAGE",),
1144
+ # },
1145
+ # }
1146
+
1147
+ # RETURN_TYPES = ("IMAGE",)
1148
+ # FUNCTION = "make_noise"
1149
+ # CATEGORY = "ipadapter/utils"
1150
+
1151
+ # def make_noise(self, type, strength, blur, image_optional=None):
1152
+ # if image_optional is None:
1153
+ # image = torch.zeros([1, 224, 224, 3])
1154
+ # else:
1155
+ # transforms = T.Compose(
1156
+ # [
1157
+ # T.CenterCrop(min(image_optional.shape[1], image_optional.shape[2])),
1158
+ # T.Resize(
1159
+ # (224, 224),
1160
+ # interpolation=T.InterpolationMode.BICUBIC,
1161
+ # antialias=True,
1162
+ # ),
1163
+ # ]
1164
+ # )
1165
+ # image = transforms(image_optional.permute([0, 3, 1, 2])).permute(
1166
+ # [0, 2, 3, 1]
1167
+ # )
1168
+
1169
+ # seed = (
1170
+ # int(torch.sum(image).item()) % 1000000007
1171
+ # ) # hash the image to get a seed, grants predictability
1172
+ # torch.manual_seed(seed)
1173
+
1174
+ # if type == "fade":
1175
+ # noise = torch.rand_like(image)
1176
+ # noise = image * (1 - strength) + noise * strength
1177
+ # elif type == "dissolve":
1178
+ # mask = (torch.rand_like(image) < strength).float()
1179
+ # noise = torch.rand_like(image)
1180
+ # noise = image * (1 - mask) + noise * mask
1181
+ # elif type == "gaussian":
1182
+ # noise = torch.randn_like(image) * strength
1183
+ # noise = image + noise
1184
+ # elif type == "shuffle":
1185
+ # transforms = T.Compose(
1186
+ # [
1187
+ # T.ElasticTransform(alpha=75.0, sigma=(1 - strength) * 3.5),
1188
+ # T.RandomVerticalFlip(p=1.0),
1189
+ # T.RandomHorizontalFlip(p=1.0),
1190
+ # ]
1191
+ # )
1192
+ # image = transforms(image.permute([0, 3, 1, 2])).permute([0, 2, 3, 1])
1193
+ # noise = torch.randn_like(image) * (strength * 0.75)
1194
+ # noise = image * (1 - noise) + noise
1195
+
1196
+ # del image
1197
+ # noise = torch.clamp(noise, 0, 1)
1198
+
1199
+ # if blur > 0:
1200
+ # if blur % 2 == 0:
1201
+ # blur += 1
1202
+ # noise = T.functional.gaussian_blur(
1203
+ # noise.permute([0, 3, 1, 2]), blur
1204
+ # ).permute([0, 2, 3, 1])
1205
+
1206
+ # return (noise,)
1207
+
1208
+
1209
+ # class PrepImageForClipVision:
1210
+ # @classmethod
1211
+ # def INPUT_TYPES(s):
1212
+ # return {
1213
+ # "required": {
1214
+ # "image": ("IMAGE",),
1215
+ # "interpolation": (
1216
+ # ["LANCZOS", "BICUBIC", "HAMMING", "BILINEAR", "BOX", "NEAREST"],
1217
+ # ),
1218
+ # "crop_position": (["top", "bottom", "left", "right", "center", "pad"],),
1219
+ # "sharpening": (
1220
+ # "FLOAT",
1221
+ # {"default": 0.0, "min": 0, "max": 1, "step": 0.05},
1222
+ # ),
1223
+ # },
1224
+ # }
1225
+
1226
+ # RETURN_TYPES = ("IMAGE",)
1227
+ # FUNCTION = "prep_image"
1228
+
1229
+ # CATEGORY = "ipadapter/utils"
1230
+
1231
+ # def prep_image(
1232
+ # self, image, interpolation="LANCZOS", crop_position="center", sharpening=0.0
1233
+ # ):
1234
+ # size = (224, 224)
1235
+ # _, oh, ow, _ = image.shape
1236
+ # output = image.permute([0, 3, 1, 2])
1237
+
1238
+ # if crop_position == "pad":
1239
+ # if oh != ow:
1240
+ # if oh > ow:
1241
+ # pad = (oh - ow) // 2
1242
+ # pad = (pad, 0, pad, 0)
1243
+ # elif ow > oh:
1244
+ # pad = (ow - oh) // 2
1245
+ # pad = (0, pad, 0, pad)
1246
+ # output = T.functional.pad(output, pad, fill=0)
1247
+ # else:
1248
+ # crop_size = min(oh, ow)
1249
+ # x = (ow - crop_size) // 2
1250
+ # y = (oh - crop_size) // 2
1251
+ # if "top" in crop_position:
1252
+ # y = 0
1253
+ # elif "bottom" in crop_position:
1254
+ # y = oh - crop_size
1255
+ # elif "left" in crop_position:
1256
+ # x = 0
1257
+ # elif "right" in crop_position:
1258
+ # x = ow - crop_size
1259
+
1260
+ # x2 = x + crop_size
1261
+ # y2 = y + crop_size
1262
+
1263
+ # output = output[:, :, y:y2, x:x2]
1264
+
1265
+ # imgs = []
1266
+ # for img in output:
1267
+ # img = T.ToPILImage()(img) # using PIL for better results
1268
+ # img = img.resize(size, resample=Image.Resampling[interpolation])
1269
+ # imgs.append(T.ToTensor()(img))
1270
+ # output = torch.stack(imgs, dim=0)
1271
+ # del imgs, img
1272
+
1273
+ # if sharpening > 0:
1274
+ # output = contrast_adaptive_sharpening(output, sharpening)
1275
+
1276
+ # output = output.permute([0, 2, 3, 1])
1277
+
1278
+ # return (output,)
1279
+
1280
+
1281
+ # class IPAdapterSaveEmbeds:
1282
+ # def __init__(self):
1283
+ # self.output_dir = folder_paths.get_output_directory()
1284
+
1285
+ # @classmethod
1286
+ # def INPUT_TYPES(s):
1287
+ # return {
1288
+ # "required": {
1289
+ # "embeds": ("EMBEDS",),
1290
+ # "filename_prefix": ("STRING", {"default": "IP_embeds"}),
1291
+ # },
1292
+ # }
1293
+
1294
+ # RETURN_TYPES = ()
1295
+ # FUNCTION = "save"
1296
+ # OUTPUT_NODE = True
1297
+ # CATEGORY = "ipadapter/embeds"
1298
+
1299
+ # def save(self, embeds, filename_prefix):
1300
+ # (
1301
+ # full_output_folder,
1302
+ # filename,
1303
+ # counter,
1304
+ # subfolder,
1305
+ # filename_prefix,
1306
+ # ) = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
1307
+ # file = f"{filename}_{counter:05}.ipadpt"
1308
+ # file = os.path.join(full_output_folder, file)
1309
+
1310
+ # torch.save(embeds, file)
1311
+ # return (None,)
1312
+
1313
+
1314
+ # class IPAdapterLoadEmbeds:
1315
+ # @classmethod
1316
+ # def INPUT_TYPES(s):
1317
+ # input_dir = folder_paths.get_input_directory()
1318
+ # files = [
1319
+ # os.path.relpath(os.path.join(root, file), input_dir)
1320
+ # for root, dirs, files in os.walk(input_dir)
1321
+ # for file in files
1322
+ # if file.endswith(".ipadpt")
1323
+ # ]
1324
+ # return {
1325
+ # "required": {
1326
+ # "embeds": [
1327
+ # sorted(files),
1328
+ # ]
1329
+ # },
1330
+ # }
1331
+
1332
+ # RETURN_TYPES = ("EMBEDS",)
1333
+ # FUNCTION = "load"
1334
+ # CATEGORY = "ipadapter/embeds"
1335
+
1336
+ # def load(self, embeds):
1337
+ # path = folder_paths.get_annotated_filepath(embeds)
1338
+ # return (torch.load(path).cpu(),)
1339
+
1340
+
1341
+ # class IPAdapterWeights:
1342
+ # @classmethod
1343
+ # def INPUT_TYPES(s):
1344
+ # return {
1345
+ # "required": {
1346
+ # "weights": ("STRING", {"default": "1.0, 0.0", "multiline": True}),
1347
+ # "timing": (
1348
+ # [
1349
+ # "custom",
1350
+ # "linear",
1351
+ # "ease_in_out",
1352
+ # "ease_in",
1353
+ # "ease_out",
1354
+ # "random",
1355
+ # ],
1356
+ # {"default": "linear"},
1357
+ # ),
1358
+ # "frames": ("INT", {"default": 0, "min": 0, "max": 9999, "step": 1}),
1359
+ # "start_frame": (
1360
+ # "INT",
1361
+ # {"default": 0, "min": 0, "max": 9999, "step": 1},
1362
+ # ),
1363
+ # "end_frame": (
1364
+ # "INT",
1365
+ # {"default": 9999, "min": 0, "max": 9999, "step": 1},
1366
+ # ),
1367
+ # "add_starting_frames": (
1368
+ # "INT",
1369
+ # {"default": 0, "min": 0, "max": 9999, "step": 1},
1370
+ # ),
1371
+ # "add_ending_frames": (
1372
+ # "INT",
1373
+ # {"default": 0, "min": 0, "max": 9999, "step": 1},
1374
+ # ),
1375
+ # "method": (
1376
+ # ["full batch", "shift batches", "alternate batches"],
1377
+ # {"default": "full batch"},
1378
+ # ),
1379
+ # },
1380
+ # "optional": {
1381
+ # "image": ("IMAGE",),
1382
+ # },
1383
+ # }
1384
+
1385
+ # RETURN_TYPES = ("FLOAT", "FLOAT", "INT", "IMAGE", "IMAGE", "WEIGHTS_STRATEGY")
1386
+ # RETURN_NAMES = (
1387
+ # "weights",
1388
+ # "weights_invert",
1389
+ # "total_frames",
1390
+ # "image_1",
1391
+ # "image_2",
1392
+ # "weights_strategy",
1393
+ # )
1394
+ # FUNCTION = "weights"
1395
+ # CATEGORY = "ipadapter/weights"
1396
+
1397
+
1398
+ # class IPAdapterWeightsFromStrategy(IPAdapterWeights):
1399
+ # @classmethod
1400
+ # def INPUT_TYPES(s):
1401
+ # return {
1402
+ # "required": {
1403
+ # "weights_strategy": ("WEIGHTS_STRATEGY",),
1404
+ # },
1405
+ # "optional": {
1406
+ # "image": ("IMAGE",),
1407
+ # },
1408
+ # }
1409
+
1410
+
1411
+ # class IPAdapterPromptScheduleFromWeightsStrategy:
1412
+ # @classmethod
1413
+ # def INPUT_TYPES(s):
1414
+ # return {
1415
+ # "required": {
1416
+ # "weights_strategy": ("WEIGHTS_STRATEGY",),
1417
+ # "prompt": ("STRING", {"default": "", "multiline": True}),
1418
+ # }
1419
+ # }
1420
+
1421
+ # RETURN_TYPES = ("STRING",)
1422
+ # RETURN_NAMES = ("prompt_schedule",)
1423
+ # FUNCTION = "prompt_schedule"
1424
+ # CATEGORY = "ipadapter/weights"
1425
+
1426
+
1427
+ # class IPAdapterCombineWeights:
1428
+ # @classmethod
1429
+ # def INPUT_TYPES(s):
1430
+ # return {
1431
+ # "required": {
1432
+ # "weights_1": (
1433
+ # "FLOAT",
1434
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05},
1435
+ # ),
1436
+ # "weights_2": (
1437
+ # "FLOAT",
1438
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05},
1439
+ # ),
1440
+ # }
1441
+ # }
1442
+
1443
+ # RETURN_TYPES = ("FLOAT", "INT")
1444
+ # RETURN_NAMES = ("weights", "count")
1445
+ # FUNCTION = "combine"
1446
+ # CATEGORY = "ipadapter/utils"
1447
+
1448
+
1449
+ # class IPAdapterRegionalConditioning:
1450
+ # @classmethod
1451
+ # def INPUT_TYPES(s):
1452
+ # return {
1453
+ # "required": {
1454
+ # # "set_cond_area": (["default", "mask bounds"],),
1455
+ # "image": ("IMAGE",),
1456
+ # "image_weight": (
1457
+ # "FLOAT",
1458
+ # {"default": 1.0, "min": -1.0, "max": 3.0, "step": 0.05},
1459
+ # ),
1460
+ # "prompt_weight": (
1461
+ # "FLOAT",
1462
+ # {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.05},
1463
+ # ),
1464
+ # "weight_type": (WEIGHT_TYPES,),
1465
+ # "start_at": (
1466
+ # "FLOAT",
1467
+ # {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001},
1468
+ # ),
1469
+ # "end_at": (
1470
+ # "FLOAT",
1471
+ # {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001},
1472
+ # ),
1473
+ # },
1474
+ # "optional": {
1475
+ # "mask": ("MASK",),
1476
+ # "positive": ("CONDITIONING",),
1477
+ # "negative": ("CONDITIONING",),
1478
+ # },
1479
+ # }
1480
+
1481
+ # RETURN_TYPES = (
1482
+ # "IPADAPTER_PARAMS",
1483
+ # "CONDITIONING",
1484
+ # "CONDITIONING",
1485
+ # )
1486
+ # RETURN_NAMES = ("IPADAPTER_PARAMS", "POSITIVE", "NEGATIVE")
1487
+ # FUNCTION = "conditioning"
1488
+
1489
+ # CATEGORY = "ipadapter/params"
1490
+
1491
+
1492
+ # class IPAdapterCombineParams:
1493
+ # @classmethod
1494
+ # def INPUT_TYPES(s):
1495
+ # return {
1496
+ # "required": {
1497
+ # "params_1": ("IPADAPTER_PARAMS",),
1498
+ # "params_2": ("IPADAPTER_PARAMS",),
1499
+ # },
1500
+ # "optional": {
1501
+ # "params_3": ("IPADAPTER_PARAMS",),
1502
+ # "params_4": ("IPADAPTER_PARAMS",),
1503
+ # "params_5": ("IPADAPTER_PARAMS",),
1504
+ # },
1505
+ # }
1506
+
1507
+ # RETURN_TYPES = ("IPADAPTER_PARAMS",)
1508
+ # FUNCTION = "combine"
1509
+ # CATEGORY = "ipadapter/params"
1510
+
1511
+
1512
+ # """
1513
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1514
+ # Register
1515
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1516
+ # """
1517
+
1518
+ # NODE_CLASS_MAPPINGS = {
1519
+ # # Main Apply Nodes
1520
+ # "IPAdapter": IPAdapterSimple,
1521
+ # "IPAdapterAdvanced": IPAdapterAdvanced,
1522
+ # "IPAdapterBatch": IPAdapterBatch,
1523
+ # "IPAdapterFaceID": IPAdapterFaceID,
1524
+ # "IPAAdapterFaceIDBatch": IPAAdapterFaceIDBatch,
1525
+ # "IPAdapterTiled": IPAdapterTiled,
1526
+ # "IPAdapterTiledBatch": IPAdapterTiledBatch,
1527
+ # "IPAdapterEmbeds": IPAdapterEmbeds,
1528
+ # "IPAdapterEmbedsBatch": IPAdapterEmbedsBatch,
1529
+ # "IPAdapterStyleComposition": IPAdapterStyleComposition,
1530
+ # "IPAdapterStyleCompositionBatch": IPAdapterStyleCompositionBatch,
1531
+ # "IPAdapterMS": IPAdapterMS,
1532
+ # "IPAdapterClipVisionEnhancer": IPAdapterClipVisionEnhancer,
1533
+ # "IPAdapterFromParams": IPAdapterFromParams,
1534
+ # "IPAdapterPreciseStyleTransfer": IPAdapterPreciseStyleTransfer,
1535
+ # "IPAdapterPreciseStyleTransferBatch": IPAdapterPreciseStyleTransferBatch,
1536
+ # "IPAdapterPreciseComposition": IPAdapterPreciseComposition,
1537
+ # "IPAdapterPreciseCompositionBatch": IPAdapterPreciseCompositionBatch,
1538
+ # # Loaders
1539
+ # "IPAdapterUnifiedLoader": IPAdapterUnifiedLoader,
1540
+ # "IPAdapterUnifiedLoaderFaceID": IPAdapterUnifiedLoaderFaceID,
1541
+ # "IPAdapterModelLoader": IPAdapterModelLoader,
1542
+ # "IPAdapterInsightFaceLoader": IPAdapterInsightFaceLoader,
1543
+ # "IPAdapterUnifiedLoaderCommunity": IPAdapterUnifiedLoaderCommunity,
1544
+ # # Helpers
1545
+ # "IPAdapterEncoder": IPAdapterEncoder,
1546
+ # "IPAdapterCombineEmbeds": IPAdapterCombineEmbeds,
1547
+ # "IPAdapterNoise": IPAdapterNoise,
1548
+ # "PrepImageForClipVision": PrepImageForClipVision,
1549
+ # "IPAdapterSaveEmbeds": IPAdapterSaveEmbeds,
1550
+ # "IPAdapterLoadEmbeds": IPAdapterLoadEmbeds,
1551
+ # "IPAdapterWeights": IPAdapterWeights,
1552
+ # "IPAdapterCombineWeights": IPAdapterCombineWeights,
1553
+ # "IPAdapterWeightsFromStrategy": IPAdapterWeightsFromStrategy,
1554
+ # "IPAdapterPromptScheduleFromWeightsStrategy": IPAdapterPromptScheduleFromWeightsStrategy,
1555
+ # "IPAdapterRegionalConditioning": IPAdapterRegionalConditioning,
1556
+ # "IPAdapterCombineParams": IPAdapterCombineParams,
1557
+ # }
1558
+
1559
+ # NODE_DISPLAY_NAME_MAPPINGS = {
1560
+ # # Main Apply Nodes
1561
+ # "IPAdapter": "IPAdapter",
1562
+ # "IPAdapterAdvanced": "IPAdapter Advanced",
1563
+ # "IPAdapterBatch": "IPAdapter Batch (Adv.)",
1564
+ # "IPAdapterFaceID": "IPAdapter FaceID",
1565
+ # "IPAAdapterFaceIDBatch": "IPAdapter FaceID Batch",
1566
+ # "IPAdapterTiled": "IPAdapter Tiled",
1567
+ # "IPAdapterTiledBatch": "IPAdapter Tiled Batch",
1568
+ # "IPAdapterEmbeds": "IPAdapter Embeds",
1569
+ # "IPAdapterEmbedsBatch": "IPAdapter Embeds Batch",
1570
+ # "IPAdapterStyleComposition": "IPAdapter Style & Composition SDXL",
1571
+ # "IPAdapterStyleCompositionBatch": "IPAdapter Style & Composition Batch SDXL",
1572
+ # "IPAdapterMS": "IPAdapter Mad Scientist",
1573
+ # "IPAdapterClipVisionEnhancer": "IPAdapter ClipVision Enhancer",
1574
+ # "IPAdapterFromParams": "IPAdapter from Params",
1575
+ # "IPAdapterPreciseStyleTransfer": "IPAdapter Precise Style Transfer",
1576
+ # "IPAdapterPreciseStyleTransferBatch": "IPAdapter Precise Style Transfer Batch",
1577
+ # "IPAdapterPreciseComposition": "IPAdapter Precise Composition",
1578
+ # "IPAdapterPreciseCompositionBatch": "IPAdapter Precise Composition Batch",
1579
+ # # Loaders
1580
+ # "IPAdapterUnifiedLoader": "IPAdapter Unified Loader",
1581
+ # "IPAdapterUnifiedLoaderFaceID": "IPAdapter Unified Loader FaceID",
1582
+ # "IPAdapterModelLoader": "IPAdapter Model Loader",
1583
+ # "IPAdapterInsightFaceLoader": "IPAdapter InsightFace Loader",
1584
+ # "IPAdapterUnifiedLoaderCommunity": "IPAdapter Unified Loader Community",
1585
+ # # Helpers
1586
+ # "IPAdapterEncoder": "IPAdapter Encoder",
1587
+ # "IPAdapterCombineEmbeds": "IPAdapter Combine Embeds",
1588
+ # "IPAdapterNoise": "IPAdapter Noise",
1589
+ # "PrepImageForClipVision": "Prep Image For ClipVision",
1590
+ # "IPAdapterSaveEmbeds": "IPAdapter Save Embeds",
1591
+ # "IPAdapterLoadEmbeds": "IPAdapter Load Embeds",
1592
+ # "IPAdapterWeights": "IPAdapter Weights",
1593
+ # "IPAdapterWeightsFromStrategy": "IPAdapter Weights From Strategy",
1594
+ # "IPAdapterPromptScheduleFromWeightsStrategy": "Prompt Schedule From Weights Strategy",
1595
+ # "IPAdapterCombineWeights": "IPAdapter Combine Weights",
1596
+ # "IPAdapterRegionalConditioning": "IPAdapter Regional Conditioning",
1597
+ # "IPAdapterCombineParams": "IPAdapter Combine Params",
1598
+ # }