hcpdiff 2.1__tar.gz → 2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. {hcpdiff-2.1 → hcpdiff-2.2}/PKG-INFO +15 -1
  2. {hcpdiff-2.1 → hcpdiff-2.2}/README.md +14 -0
  3. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/__init__.py +1 -1
  4. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/format/lora_webui.py +13 -5
  5. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/__init__.py +2 -2
  6. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/handler/__init__.py +1 -1
  7. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/handler/diffusion.py +17 -7
  8. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/source/__init__.py +2 -1
  9. hcpdiff-2.2/hcpdiff/data/source/text.py +40 -0
  10. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/source/text2img.py +1 -1
  11. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/cfg/__init__.py +1 -1
  12. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/cfg/sd15_train.py +10 -4
  13. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/cfg/sdxl_train.py +11 -4
  14. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/cfg/t2i.py +64 -13
  15. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/diffusion.py +6 -5
  16. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/text.py +6 -25
  17. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff.egg-info/PKG-INFO +15 -1
  18. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff.egg-info/SOURCES.txt +1 -0
  19. {hcpdiff-2.1 → hcpdiff-2.2}/setup.py +1 -1
  20. {hcpdiff-2.1 → hcpdiff-2.2}/LICENSE +0 -0
  21. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/__init__.py +0 -0
  22. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/ckpt.py +0 -0
  23. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/format/__init__.py +0 -0
  24. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/format/diffusers.py +0 -0
  25. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/format/emb.py +0 -0
  26. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/format/sd_single.py +0 -0
  27. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/ckpt_manager/loader.py +0 -0
  28. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/cache/__init__.py +0 -0
  29. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/cache/vae.py +0 -0
  30. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/dataset.py +0 -0
  31. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/handler/controlnet.py +0 -0
  32. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/handler/text.py +0 -0
  33. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/source/folder_class.py +0 -0
  34. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/data/source/text2img_cond.py +0 -0
  35. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/__init__.py +0 -0
  36. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/noise/__init__.py +0 -0
  37. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/noise/pyramid_noise.py +0 -0
  38. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/noise/zero_terminal.py +0 -0
  39. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/__init__.py +0 -0
  40. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/base.py +0 -0
  41. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/ddpm.py +0 -0
  42. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/diffusers.py +0 -0
  43. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/edm.py +0 -0
  44. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/sigma_scheduler/__init__.py +0 -0
  45. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/sigma_scheduler/base.py +0 -0
  46. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/sigma_scheduler/ddpm.py +0 -0
  47. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/diffusion/sampler/sigma_scheduler/edm.py +0 -0
  48. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/__init__.py +0 -0
  49. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/model/__init__.py +0 -0
  50. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/model/cnet.py +0 -0
  51. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/model/loader.py +0 -0
  52. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/easy/sampler.py +0 -0
  53. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/evaluate/__init__.py +0 -0
  54. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/evaluate/previewer.py +0 -0
  55. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/loss/__init__.py +0 -0
  56. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/loss/base.py +0 -0
  57. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/loss/gw.py +0 -0
  58. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/loss/ssim.py +0 -0
  59. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/loss/vlb.py +0 -0
  60. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/loss/weighting.py +0 -0
  61. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/__init__.py +0 -0
  62. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/cfg_context.py +0 -0
  63. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/compose/__init__.py +0 -0
  64. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/compose/compose_hook.py +0 -0
  65. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/compose/compose_textencoder.py +0 -0
  66. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/compose/compose_tokenizer.py +0 -0
  67. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/compose/sdxl_composer.py +0 -0
  68. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/container.py +0 -0
  69. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/controlnet.py +0 -0
  70. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/lora_base.py +0 -0
  71. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/lora_base_patch.py +0 -0
  72. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/lora_layers.py +0 -0
  73. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/lora_layers_patch.py +0 -0
  74. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/text_emb_ex.py +0 -0
  75. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/textencoder_ex.py +0 -0
  76. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/tokenizer_ex.py +0 -0
  77. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/wrapper/__init__.py +0 -0
  78. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/wrapper/pixart.py +0 -0
  79. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/wrapper/sd.py +0 -0
  80. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/models/wrapper/utils.py +0 -0
  81. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/parser/__init__.py +0 -0
  82. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/parser/embpt.py +0 -0
  83. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/__init__.py +0 -0
  84. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/convert_caption_txt2json.py +0 -0
  85. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/convert_old_lora.py +0 -0
  86. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/create_embedding.py +0 -0
  87. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/dataset_generator.py +0 -0
  88. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/diffusers2sd.py +0 -0
  89. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/download_hf_model.py +0 -0
  90. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/embedding_convert.py +0 -0
  91. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/gen_from_ptlist.py +0 -0
  92. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/init_proj.py +0 -0
  93. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/lora_convert.py +0 -0
  94. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/save_model.py +0 -0
  95. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/tools/sd2diffusers.py +0 -0
  96. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/train_colo.py +0 -0
  97. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/train_deepspeed.py +0 -0
  98. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/trainer_ac.py +0 -0
  99. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/trainer_ac_single.py +0 -0
  100. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/utils/__init__.py +0 -0
  101. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/utils/colo_utils.py +0 -0
  102. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/utils/inpaint_pipe.py +0 -0
  103. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/utils/net_utils.py +0 -0
  104. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/utils/pipe_hook.py +0 -0
  105. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/utils/utils.py +0 -0
  106. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/__init__.py +0 -0
  107. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/daam/__init__.py +0 -0
  108. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/daam/act.py +0 -0
  109. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/daam/hook.py +0 -0
  110. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/fast.py +0 -0
  111. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/flow.py +0 -0
  112. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/io.py +0 -0
  113. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/model.py +0 -0
  114. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/utils.py +0 -0
  115. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff/workflow/vae.py +0 -0
  116. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff.egg-info/dependency_links.txt +0 -0
  117. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff.egg-info/entry_points.txt +0 -0
  118. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff.egg-info/requires.txt +0 -0
  119. {hcpdiff-2.1 → hcpdiff-2.2}/hcpdiff.egg-info/top_level.txt +0 -0
  120. {hcpdiff-2.1 → hcpdiff-2.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hcpdiff
3
- Version: 2.1
3
+ Version: 2.2
4
4
  Summary: A universal Diffusion toolbox
5
5
  Home-page: https://github.com/IrisRainbowNeko/HCP-Diffusion
6
6
  Author: Ziyi Dong
@@ -65,6 +65,8 @@ Compared to the original DreamArtist, it offers better stability, image quality,
65
65
 
66
66
  ## Installation
67
67
 
68
+ Install [pytorch](https://pytorch.org/)
69
+
68
70
  Install via pip:
69
71
 
70
72
  ```bash
@@ -205,6 +207,18 @@ After parsing, the framework will instantiate the components accordingly. This m
205
207
  | CCIP Score | 🚧 In Development |
206
208
  | Corrupt Score | 🚧 In Development |
207
209
 
210
+ ---
211
+
212
+ ### ⚡️ Image Generation
213
+
214
+ | 功能 | 描述/支持情况 |
215
+ |------------------------------|------------------------------------|
216
+ | Batch Generation | ✅ Supported |
217
+ | Generate from Prompt Dataset | ✅ Supported |
218
+ | Image to Image | ✅ Supported |
219
+ | Inpaint | ✅ Supported |
220
+ | Token Weight | ✅ Supported |
221
+
208
222
  </details>
209
223
 
210
224
  ---
@@ -28,6 +28,8 @@ Compared to the original DreamArtist, it offers better stability, image quality,
28
28
 
29
29
  ## Installation
30
30
 
31
+ Install [pytorch](https://pytorch.org/)
32
+
31
33
  Install via pip:
32
34
 
33
35
  ```bash
@@ -168,6 +170,18 @@ After parsing, the framework will instantiate the components accordingly. This m
168
170
  | CCIP Score | 🚧 In Development |
169
171
  | Corrupt Score | 🚧 In Development |
170
172
 
173
+ ---
174
+
175
+ ### ⚡️ Image Generation
176
+
177
+ | 功能 | 描述/支持情况 |
178
+ |------------------------------|------------------------------------|
179
+ | Batch Generation | ✅ Supported |
180
+ | Generate from Prompt Dataset | ✅ Supported |
181
+ | Image to Image | ✅ Supported |
182
+ | Inpaint | ✅ Supported |
183
+ | Token Weight | ✅ Supported |
184
+
171
185
  </details>
172
186
 
173
187
  ---
@@ -1,4 +1,4 @@
1
1
  from .format import EmbFormat, DiffusersSD15Format, DiffusersModelFormat, DiffusersSDXLFormat, DiffusersPixArtFormat, OfficialSDXLFormat, \
2
- OfficialSD15Format
2
+ OfficialSD15Format, LoraWebuiFormat
3
3
  from .ckpt import EmbSaver, easy_emb_saver
4
4
  from .loader import HCPLoraLoader
@@ -2,7 +2,7 @@ import math
2
2
  import re
3
3
  from typing import List, Dict, Any
4
4
 
5
- from rainbowneko.ckpt_manager.format import CkptFormat
5
+ from rainbowneko.ckpt_manager.format import CkptFormat, SafeTensorFormat
6
6
  from torch.serialization import FILE_LIKE
7
7
 
8
8
  class LoraConverter:
@@ -36,7 +36,12 @@ class LoraConverter:
36
36
  if auto_scale_alpha:
37
37
  sd_unet = self.alpha_scale_from_webui(sd_unet)
38
38
  sd_TE = self.alpha_scale_from_webui(sd_TE)
39
- return {'plugin':sd_TE}, {'plugin':sd_unet}
39
+
40
+ sd = {
41
+ **{f'denoiser.{k}':v for k,v in sd_unet.items()},
42
+ **{f'TE.{k}':v for k,v in sd_TE.items()},
43
+ }
44
+ return {'base': sd}
40
45
 
41
46
  def convert_to_webui(self, sd_unet, sd_TE, auto_scale_alpha=False, sdxl=False):
42
47
  sd_unet = self.convert_to_webui_(sd_unet, prefix=self.prefix_unet)
@@ -207,9 +212,12 @@ class LoraConverter:
207
212
  return state
208
213
 
209
214
  class LoraWebuiFormat(CkptFormat):
210
- def __init__(self, format, auto_scale_alpha=False):
215
+ def __init__(self, format=None, auto_scale_alpha=False):
211
216
  self.converter = LoraConverter()
212
217
  self.auto_scale_alpha = auto_scale_alpha
218
+
219
+ if format is None:
220
+ format = SafeTensorFormat()
213
221
  self.format = format
214
222
 
215
223
  def save_ckpt(self, sd_model: Dict[str, Any], save_f: FILE_LIKE):
@@ -240,5 +248,5 @@ class LoraWebuiFormat(CkptFormat):
240
248
  sdxl = True
241
249
  break
242
250
 
243
- sd_TE, sd_unet = self.converter.convert_from_webui(sd_webui, auto_scale_alpha=self.auto_scale_alpha, sdxl=sdxl)
244
- return sd_TE, sd_unet
251
+ sd_all = self.converter.convert_from_webui(sd_webui, auto_scale_alpha=self.auto_scale_alpha, sdxl=sdxl)
252
+ return sd_all
@@ -1,4 +1,4 @@
1
1
  from .dataset import TextImagePairDataset
2
- from .source import Text2ImageSource, Text2ImageLossMapSource, Text2ImageCondSource, T2IFolderClassSource
3
- from .handler import StableDiffusionHandler, LossMapHandler, DiffusionImageHandler
2
+ from .source import Text2ImageSource, Text2ImageLossMapSource, Text2ImageCondSource, T2IFolderClassSource, TextSource
3
+ from .handler import StableDiffusionHandler, LossMapHandler, DiffusionImageHandler, DiffusionTextHandler
4
4
  from .cache import VaeCache
@@ -1,3 +1,3 @@
1
- from .diffusion import StableDiffusionHandler, DiffusionImageHandler, LossMapHandler
1
+ from .diffusion import StableDiffusionHandler, DiffusionImageHandler, LossMapHandler, DiffusionTextHandler
2
2
  from .text import TokenizeHandler, TagEraseHandler, TagDropoutHandler, TagShuffleHandler, TemplateFillHandler
3
3
  from .controlnet import ControlNetHandler
@@ -49,14 +49,11 @@ class DiffusionImageHandler(DataHandler):
49
49
  else:
50
50
  return self.handlers(dict(image=image, image_size=image_size))
51
51
 
52
- class StableDiffusionHandler(DataHandler):
53
- def __init__(self, bucket, encoder_attention_mask=False, key_map_in=('image -> image', 'image_size -> image_size', 'prompt -> prompt'),
54
- key_map_out=('image -> image', 'coord -> coord', 'prompt -> prompt'),
55
- erase=0.15, dropout=0.0, shuffle=0.0, word_names={}, tokenize=True):
52
+ class DiffusionTextHandler(DataHandler):
53
+ def __init__(self, encoder_attention_mask=False, erase=0.0, dropout=0.0, shuffle=0.0, word_names={}, tokenize=True,
54
+ key_map_in=('prompt -> prompt', ), key_map_out=('prompt -> prompt', )):
56
55
  super().__init__(key_map_in, key_map_out)
57
56
 
58
- self.image_handlers = DiffusionImageHandler(bucket)
59
-
60
57
  text_handlers = {}
61
58
  if dropout>0:
62
59
  text_handlers['dropout'] = TagDropoutHandler(p=dropout)
@@ -67,7 +64,20 @@ class StableDiffusionHandler(DataHandler):
67
64
  text_handlers['fill'] = TemplateFillHandler(word_names)
68
65
  if tokenize:
69
66
  text_handlers['tokenize'] = TokenizeHandler(encoder_attention_mask)
70
- self.text_handlers = HandlerChain(**text_handlers)
67
+ self.handlers = HandlerChain(**text_handlers)
68
+
69
+ def handle(self, prompt: Union[str, Dict[str, str]]):
70
+ return self.handlers(dict(prompt=prompt))
71
+
72
+ class StableDiffusionHandler(DataHandler):
73
+ def __init__(self, bucket, encoder_attention_mask=False, key_map_in=('image -> image', 'image_size -> image_size', 'prompt -> prompt'),
74
+ key_map_out=('image -> image', 'coord -> coord', 'prompt -> prompt'),
75
+ erase=0.0, dropout=0.0, shuffle=0.0, word_names={}, tokenize=True):
76
+ super().__init__(key_map_in, key_map_out)
77
+
78
+ self.image_handlers = DiffusionImageHandler(bucket)
79
+ self.text_handlers = DiffusionTextHandler(encoder_attention_mask=encoder_attention_mask, erase=erase, dropout=dropout, shuffle=shuffle,
80
+ word_names=word_names, tokenize=tokenize)
71
81
 
72
82
  def handle(self, image: Image.Image, image_size: np.ndarray[int], prompt: str):
73
83
  return dict(**self.image_handlers(dict(image=image, image_size=image_size)), **self.text_handlers(dict(prompt=prompt)))
@@ -1,3 +1,4 @@
1
1
  from .text2img import Text2ImageSource, Text2ImageLossMapSource
2
2
  from .text2img_cond import Text2ImageCondSource
3
- from .folder_class import T2IFolderClassSource
3
+ from .folder_class import T2IFolderClassSource
4
+ from .text import TextSource
@@ -0,0 +1,40 @@
1
+ from rainbowneko.data import UnLabelSource, DataSource
2
+ from rainbowneko.data.label_loader import BaseLabelLoader, auto_label_loader
3
+ from typing import Union, Dict, Any
4
+ import random
5
+
6
+ class TextSource(DataSource):
7
+ def __init__(self, label_file, prompt_template=None, repeat=1, **kwargs):
8
+ super().__init__(repeat=repeat)
9
+ self.label_file = label_file
10
+ self.label_dict = self._load_label_data(label_file)
11
+ self.img_ids = self._load_img_ids(self.label_dict)
12
+ self.prompt_template = self.load_template(prompt_template)
13
+
14
+ def _load_img_ids(self, label_dict):
15
+ return list(label_dict.keys()) * self.repeat
16
+
17
+ def _load_label_data(self, label_file: Union[str, BaseLabelLoader]):
18
+ if label_file is None:
19
+ return {}
20
+ elif isinstance(label_file, str):
21
+ return auto_label_loader(label_file).load()
22
+ else:
23
+ return label_file.load()
24
+
25
+ def load_template(self, template_file):
26
+ if template_file is None:
27
+ return ['{caption}']
28
+ else:
29
+ with open(template_file, 'r', encoding='utf-8') as f:
30
+ return f.read().strip().split('\n')
31
+
32
+ def __getitem__(self, index) -> Dict[str, Any]:
33
+ img_name = self.img_ids[index]
34
+ return {
35
+ 'id':img_name,
36
+ 'prompt':{
37
+ 'template':random.choice(self.prompt_template),
38
+ 'caption':self.label_dict[img_name],
39
+ }
40
+ }
@@ -25,7 +25,7 @@ class Text2ImageSource(ImageLabelSource):
25
25
 
26
26
  def __getitem__(self, index) -> Dict[str, Any]:
27
27
  img_name = self.img_ids[index]
28
- path = os.path.join(self.img_root, img_name)
28
+ path = self.img_root / img_name
29
29
 
30
30
  return {
31
31
  'id':img_name,
@@ -1,3 +1,3 @@
1
1
  from .sd15_train import SD15_lora_train, cfg_data_SD_ARB, cfg_data_SD_resize_crop, SD15_finetuning
2
2
  from .sdxl_train import SDXL_lora_train, SDXL_finetuning
3
- from .t2i import SD15_t2i, SDXL_t2i, SDXL_t2i_lora, SD15_t2i_lora
3
+ from .t2i import SD15_t2i, SDXL_t2i, SDXL_t2i_lora, SD15_t2i_lora, SDXL_t2i_parts, SD15_t2i_parts
@@ -1,9 +1,10 @@
1
1
  import torch
2
- from rainbowneko.ckpt_manager import ckpt_saver, LAYERS_TRAINABLE, plugin_saver
2
+ from rainbowneko.ckpt_manager import ckpt_saver, LAYERS_TRAINABLE, NekoPluginSaver, SafeTensorFormat
3
3
  from rainbowneko.data import RatioBucket, FixedBucket
4
4
  from rainbowneko.parser import CfgWDPluginParser, neko_cfg, CfgWDModelParser, disable_neko_cfg
5
5
  from rainbowneko.utils import ConstantLR, Path_Like
6
6
 
7
+ from hcpdiff.ckpt_manager import LoraWebuiFormat
7
8
  from hcpdiff.data import TextImagePairDataset, Text2ImageSource, StableDiffusionHandler
8
9
  from hcpdiff.data import VaeCache
9
10
  from hcpdiff.easy import SD15_auto_loader
@@ -69,7 +70,7 @@ def SD15_finetuning(base_model: str, train_steps: int, dataset, save_step: int =
69
70
  @neko_cfg
70
71
  def SD15_lora_train(base_model: str, train_steps: int, dataset, save_step: int = 200, lr: float = 1e-4, rank: int = 4, alpha: float = None,
71
72
  clip_skip: int = 0, with_conv: bool = False, dtype: str = 'fp16', low_vram: bool = False, warmup_steps: int = 0,
72
- name: str = 'SD15'):
73
+ name: str = 'SD15', save_webui_format=False):
73
74
  with disable_neko_cfg:
74
75
  if alpha is None:
75
76
  alpha = rank
@@ -95,6 +96,11 @@ def SD15_lora_train(base_model: str, train_steps: int, dataset, save_step: int =
95
96
  else:
96
97
  optimizer = torch.optim.AdamW(_partial_=True, betas=(0.9, 0.99))
97
98
 
99
+ if save_webui_format:
100
+ lora_format = LoraWebuiFormat()
101
+ else:
102
+ lora_format = SafeTensorFormat()
103
+
98
104
  from cfgs.train.py.examples import SD_FT
99
105
 
100
106
  return dict(
@@ -114,8 +120,8 @@ def SD15_lora_train(base_model: str, train_steps: int, dataset, save_step: int =
114
120
 
115
121
  ckpt_saver=dict(
116
122
  _replace_ = True,
117
- lora_unet=plugin_saver(
118
- ckpt_type='safetensors',
123
+ lora_unet=NekoPluginSaver(
124
+ format=lora_format,
119
125
  target_plugin='lora1',
120
126
  )
121
127
  ),
@@ -1,11 +1,12 @@
1
1
  import torch
2
- from rainbowneko.ckpt_manager import ckpt_saver, plugin_saver, LAYERS_TRAINABLE
2
+ from rainbowneko.ckpt_manager import ckpt_saver, NekoPluginSaver, LAYERS_TRAINABLE, SafeTensorFormat
3
3
  from rainbowneko.parser import CfgWDPluginParser, neko_cfg, CfgWDModelParser, disable_neko_cfg
4
4
  from rainbowneko.utils import ConstantLR
5
5
 
6
6
  from hcpdiff.easy import SDXL_auto_loader
7
7
  from hcpdiff.models import SDXLWrapper
8
8
  from hcpdiff.models.lora_layers_patch import LoraLayer
9
+ from hcpdiff.ckpt_manager import LoraWebuiFormat
9
10
 
10
11
  @neko_cfg
11
12
  def SDXL_finetuning(base_model: str, train_steps: int, dataset, save_step: int = 500, lr: float = 1e-5,
@@ -64,7 +65,8 @@ def SDXL_finetuning(base_model: str, train_steps: int, dataset, save_step: int =
64
65
 
65
66
  @neko_cfg
66
67
  def SDXL_lora_train(base_model: str, train_steps: int, dataset, save_step: int = 200, lr: float = 1e-4, rank: int = 4, alpha: float = None,
67
- with_conv: bool = False, dtype: str = 'fp16', low_vram: bool = False, warmup_steps: int = 0, name: str = 'SD15'):
68
+ with_conv: bool = False, dtype: str = 'fp16', low_vram: bool = False, warmup_steps: int = 0, name: str = 'SDXL',
69
+ save_webui_format=False):
68
70
  with disable_neko_cfg:
69
71
  if alpha is None:
70
72
  alpha = rank
@@ -90,6 +92,11 @@ def SDXL_lora_train(base_model: str, train_steps: int, dataset, save_step: int =
90
92
  else:
91
93
  optimizer = torch.optim.AdamW(_partial_=True, betas=(0.9, 0.99))
92
94
 
95
+ if save_webui_format:
96
+ lora_format = LoraWebuiFormat()
97
+ else:
98
+ lora_format = SafeTensorFormat()
99
+
93
100
  from cfgs.train.py.examples import SD_FT
94
101
 
95
102
  return dict(
@@ -109,8 +116,8 @@ def SDXL_lora_train(base_model: str, train_steps: int, dataset, save_step: int =
109
116
 
110
117
  ckpt_saver=dict(
111
118
  _replace_ = True,
112
- lora_unet=plugin_saver(
113
- ckpt_type='safetensors',
119
+ lora_unet=NekoPluginSaver(
120
+ format=lora_format,
114
121
  target_plugin='lora1',
115
122
  )
116
123
  ),
@@ -1,5 +1,6 @@
1
1
  import torch
2
2
  from rainbowneko.infer.workflow import (Actions, PrepareAction, LoopAction, LoadModelAction)
3
+ from rainbowneko.ckpt_manager import NekoModelLoader
3
4
  from rainbowneko.parser import neko_cfg, disable_neko_cfg
4
5
  from typing import Union, List
5
6
 
@@ -25,6 +26,29 @@ def build_model(pretrained_model='ckpts/any5', noise_sampler=Diffusers_SD.dpmpp_
25
26
  ),
26
27
  ])
27
28
 
29
+ @neko_cfg
30
+ def load_parts(info: List[str]) -> Actions:
31
+ acts = []
32
+ for i, path in enumerate(info):
33
+ part_unet = LoadModelAction(cfg={
34
+ f'part_unet_{i}':NekoModelLoader(
35
+ path=path,
36
+ state_prefix='denoiser.'
37
+ )
38
+ }, key_map_in=('denoiser -> model', 'in_preview -> in_preview'))
39
+ part_TE = LoadModelAction(cfg={
40
+ f'part_TE_{i}':NekoModelLoader(
41
+ path=path,
42
+ state_prefix='TE.',
43
+ )
44
+ }, key_map_in=('TE -> model', 'in_preview -> in_preview'))
45
+
46
+ with disable_neko_cfg:
47
+ acts.append(part_unet)
48
+ acts.append(part_TE)
49
+
50
+ return Actions(acts)
51
+
28
52
  @neko_cfg
29
53
  def load_lora(info: List[List]) -> Actions:
30
54
  lora_acts = []
@@ -37,7 +61,7 @@ def load_lora(info: List[List]) -> Actions:
37
61
  )
38
62
  }, key_map_in=('denoiser -> model', 'in_preview -> in_preview'))
39
63
  lora_TE = LoadModelAction(cfg={
40
- f'lora_unet_{i}':HCPLoraLoader(
64
+ f'lora_TE_{i}':HCPLoraLoader(
41
65
  path=item[0],
42
66
  state_prefix='TE.',
43
67
  alpha=item[1],
@@ -59,9 +83,9 @@ def optimize_model() -> Actions:
59
83
  ])
60
84
 
61
85
  @neko_cfg
62
- def text(prompt, negative_prompt=negative_prompt, bs=4) -> Actions:
86
+ def text(prompt, negative_prompt=negative_prompt, bs=4, N_repeats=1, layer_skip=1) -> Actions:
63
87
  return Actions([
64
- TextHookAction(N_repeats=1, layer_skip=1),
88
+ TextHookAction(N_repeats=N_repeats, layer_skip=layer_skip),
65
89
  AttnMultTextEncodeAction(
66
90
  prompt=prompt,
67
91
  negative_prompt=negative_prompt,
@@ -84,9 +108,9 @@ def build_model_SDXL(pretrained_model='ckpts/any5', noise_sampler=Diffusers_SD.d
84
108
  ])
85
109
 
86
110
  @neko_cfg
87
- def text_SDXL(prompt, negative_prompt=negative_prompt, bs=4) -> Actions:
111
+ def text_SDXL(prompt, negative_prompt=negative_prompt, bs=4, N_repeats=1, layer_skip=1) -> Actions:
88
112
  return Actions([
89
- TextHookAction(N_repeats=1, layer_skip=1, TE_final_norm=False),
113
+ TextHookAction(N_repeats=N_repeats, layer_skip=layer_skip, TE_final_norm=False),
90
114
  AttnMultTextEncodeAction(
91
115
  prompt=prompt,
92
116
  negative_prompt=negative_prompt,
@@ -128,11 +152,24 @@ def resize(width=1024, height=1024):
128
152
 
129
153
  @neko_cfg
130
154
  def SD15_t2i(pretrained_model, prompt, negative_prompt=negative_prompt, noise_sampler=Diffusers_SD.dpmpp_2m_karras, bs=4, width=512, height=512,
131
- seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/'):
155
+ seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/', N_repeats=1, layer_skip=1):
156
+ return dict(workflow=Actions(actions=[
157
+ build_model(pretrained_model=pretrained_model, noise_sampler=noise_sampler),
158
+ optimize_model(),
159
+ text(prompt=prompt, negative_prompt=negative_prompt, bs=bs, N_repeats=N_repeats, layer_skip=layer_skip),
160
+ config_diffusion(width=width, height=height, seed=seed, N_steps=N_steps),
161
+ diffusion(guidance_scale=guidance_scale),
162
+ decode(save_root=save_root)
163
+ ]))
164
+
165
+ @neko_cfg
166
+ def SD15_t2i_parts(pretrained_model, parts, prompt, negative_prompt=negative_prompt, noise_sampler=Diffusers_SD.dpmpp_2m_karras, bs=4, width=512, height=512,
167
+ seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/', N_repeats=1, layer_skip=1):
132
168
  return dict(workflow=Actions(actions=[
133
169
  build_model(pretrained_model=pretrained_model, noise_sampler=noise_sampler),
170
+ load_parts(parts),
134
171
  optimize_model(),
135
- text(prompt=prompt, negative_prompt=negative_prompt, bs=bs),
172
+ text(prompt=prompt, negative_prompt=negative_prompt, bs=bs, N_repeats=N_repeats, layer_skip=layer_skip),
136
173
  config_diffusion(width=width, height=height, seed=seed, N_steps=N_steps),
137
174
  diffusion(guidance_scale=guidance_scale),
138
175
  decode(save_root=save_root)
@@ -140,12 +177,12 @@ def SD15_t2i(pretrained_model, prompt, negative_prompt=negative_prompt, noise_sa
140
177
 
141
178
  @neko_cfg
142
179
  def SD15_t2i_lora(pretrained_model, lora_info, prompt, negative_prompt=negative_prompt, noise_sampler=Diffusers_SD.dpmpp_2m_karras, bs=4,
143
- width=512, height=512, seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/'):
180
+ width=512, height=512, seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/', N_repeats=1, layer_skip=1):
144
181
  return dict(workflow=Actions(actions=[
145
182
  build_model(pretrained_model=pretrained_model, noise_sampler=noise_sampler),
146
183
  load_lora(info=lora_info),
147
184
  optimize_model(),
148
- text(prompt=prompt, negative_prompt=negative_prompt, bs=bs),
185
+ text(prompt=prompt, negative_prompt=negative_prompt, bs=bs, N_repeats=N_repeats, layer_skip=layer_skip),
149
186
  config_diffusion(width=width, height=height, seed=seed, N_steps=N_steps),
150
187
  diffusion(guidance_scale=guidance_scale),
151
188
  decode(save_root=save_root)
@@ -153,24 +190,38 @@ def SD15_t2i_lora(pretrained_model, lora_info, prompt, negative_prompt=negative_
153
190
 
154
191
  @neko_cfg
155
192
  def SDXL_t2i(pretrained_model, prompt, negative_prompt=negative_prompt, noise_sampler=Diffusers_SD.dpmpp_2m_karras, bs=4, width=1024, height=1024,
156
- seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/'):
193
+ seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/', N_repeats=1, layer_skip=1):
157
194
  return dict(workflow=Actions(actions=[
158
195
  build_model_SDXL(pretrained_model=pretrained_model, noise_sampler=noise_sampler),
159
196
  optimize_model(),
160
- text_SDXL(prompt=prompt, negative_prompt=negative_prompt, bs=bs),
197
+ text_SDXL(prompt=prompt, negative_prompt=negative_prompt, bs=bs, N_repeats=N_repeats, layer_skip=layer_skip),
161
198
  config_diffusion(width=width, height=height, seed=seed, N_steps=N_steps),
162
199
  diffusion(guidance_scale=guidance_scale),
163
200
  decode(save_root=save_root)
164
201
  ]))
165
202
 
203
+ @neko_cfg
204
+ def SDXL_t2i_parts(pretrained_model, parts, prompt, negative_prompt=negative_prompt, noise_sampler=Diffusers_SD.dpmpp_2m_karras, bs=4, width=1024, height=1024,
205
+ seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/', N_repeats=1, layer_skip=1):
206
+ return dict(workflow=Actions(actions=[
207
+ build_model_SDXL(pretrained_model=pretrained_model, noise_sampler=noise_sampler),
208
+ load_parts(parts),
209
+ optimize_model(),
210
+ text_SDXL(prompt=prompt, negative_prompt=negative_prompt, bs=bs, N_repeats=N_repeats, layer_skip=layer_skip),
211
+ config_diffusion(width=width, height=height, seed=seed, N_steps=N_steps),
212
+ diffusion(guidance_scale=guidance_scale),
213
+ decode(save_root=save_root)
214
+ ]))
215
+
216
+
166
217
  @neko_cfg
167
218
  def SDXL_t2i_lora(pretrained_model, lora_info, prompt, negative_prompt=negative_prompt, noise_sampler=Diffusers_SD.dpmpp_2m_karras, bs=4,
168
- width=1024, height=1024, seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/'):
219
+ width=1024, height=1024, seed=None, N_steps=20, guidance_scale=7.0, save_root='output_pipe/', N_repeats=1, layer_skip=1):
169
220
  return dict(workflow=Actions(actions=[
170
221
  build_model_SDXL(pretrained_model=pretrained_model, noise_sampler=noise_sampler),
171
222
  load_lora(info=lora_info),
172
223
  optimize_model(),
173
- text_SDXL(prompt=prompt, negative_prompt=negative_prompt, bs=bs),
224
+ text_SDXL(prompt=prompt, negative_prompt=negative_prompt, bs=bs, N_repeats=N_repeats, layer_skip=layer_skip),
174
225
  config_diffusion(width=width, height=height, seed=seed, N_steps=N_steps),
175
226
  diffusion(guidance_scale=guidance_scale),
176
227
  decode(save_root=save_root)
@@ -32,14 +32,15 @@ class SeedAction(BasicAction):
32
32
  self.seed = seed
33
33
  self.bs = bs
34
34
 
35
- def forward(self, device, gen_step=0, **states):
35
+ def forward(self, device, seed=None, **states):
36
36
  bs = states['prompt_embeds'].shape[0]//2 if 'prompt_embeds' in states else self.bs
37
- if self.seed is None:
37
+ seed = seed or self.seed
38
+ if seed is None:
38
39
  seeds = [None]*bs
39
- elif isinstance(self.seed, int):
40
- seeds = list(range(self.seed+gen_step*bs, self.seed+(gen_step+1)*bs))
40
+ elif isinstance(seed, int):
41
+ seeds = list(range(seed, seed+bs))
41
42
  else:
42
- seeds = self.seed
43
+ seeds = seed
43
44
  seeds = [s or random.randint(0, 1 << 30) for s in seeds]
44
45
 
45
46
  G = prepare_seed(seeds, device=device)
@@ -48,18 +48,9 @@ class TextEncodeAction(BasicAction):
48
48
  self.negative_prompt = negative_prompt
49
49
  self.bs = bs
50
50
 
51
- def forward(self, te_hook, TE, dtype: str, device, amp=None, gen_step=None, prompt_all=None, negative_prompt_all=None, model_offload=False,
52
- **states):
53
- prompt_all = prompt_all or self.prompt
54
- negative_prompt_all = negative_prompt_all or self.negative_prompt
55
-
56
- if gen_step is not None:
57
- idx = (gen_step*self.bs)%len(prompt_all)
58
- prompt = prompt_all[idx:idx+self.bs]
59
- negative_prompt = negative_prompt_all[idx:idx+self.bs]
60
- else:
61
- prompt = prompt_all
62
- negative_prompt = negative_prompt_all
51
+ def forward(self, te_hook, TE, dtype: str, device, amp=None, prompt=None, negative_prompt=None, model_offload=False, **states):
52
+ prompt = prompt or self.prompt
53
+ negative_prompt = negative_prompt or self.negative_prompt
63
54
 
64
55
  if model_offload:
65
56
  to_cuda(TE)
@@ -78,19 +69,9 @@ class TextEncodeAction(BasicAction):
78
69
  'pooled_output':pooled_output}
79
70
 
80
71
  class AttnMultTextEncodeAction(TextEncodeAction):
81
-
82
- def forward(self, te_hook, token_ex, TE, dtype: str, device, amp=None, gen_step=None, prompt_all=None, negative_prompt_all=None,
83
- model_offload=False, **states):
84
- prompt_all = prompt_all if prompt_all is not None else self.prompt
85
- negative_prompt_all = negative_prompt_all if negative_prompt_all is not None else self.negative_prompt
86
-
87
- if gen_step is not None:
88
- idx = (gen_step*self.bs)%len(prompt_all)
89
- prompt = prompt_all[idx:idx+self.bs]
90
- negative_prompt = negative_prompt_all[idx:idx+self.bs]
91
- else:
92
- prompt = prompt_all
93
- negative_prompt = negative_prompt_all
72
+ def forward(self, te_hook, token_ex, TE, dtype: str, device, amp=None, prompt=None, negative_prompt=None, model_offload=False, **states):
73
+ prompt = prompt or self.prompt
74
+ negative_prompt = negative_prompt or self.negative_prompt
94
75
 
95
76
  if model_offload:
96
77
  to_cuda(TE)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hcpdiff
3
- Version: 2.1
3
+ Version: 2.2
4
4
  Summary: A universal Diffusion toolbox
5
5
  Home-page: https://github.com/IrisRainbowNeko/HCP-Diffusion
6
6
  Author: Ziyi Dong
@@ -65,6 +65,8 @@ Compared to the original DreamArtist, it offers better stability, image quality,
65
65
 
66
66
  ## Installation
67
67
 
68
+ Install [pytorch](https://pytorch.org/)
69
+
68
70
  Install via pip:
69
71
 
70
72
  ```bash
@@ -205,6 +207,18 @@ After parsing, the framework will instantiate the components accordingly. This m
205
207
  | CCIP Score | 🚧 In Development |
206
208
  | Corrupt Score | 🚧 In Development |
207
209
 
210
+ ---
211
+
212
+ ### ⚡️ Image Generation
213
+
214
+ | 功能 | 描述/支持情况 |
215
+ |------------------------------|------------------------------------|
216
+ | Batch Generation | ✅ Supported |
217
+ | Generate from Prompt Dataset | ✅ Supported |
218
+ | Image to Image | ✅ Supported |
219
+ | Inpaint | ✅ Supported |
220
+ | Token Weight | ✅ Supported |
221
+
208
222
  </details>
209
223
 
210
224
  ---
@@ -30,6 +30,7 @@ hcpdiff/data/handler/diffusion.py
30
30
  hcpdiff/data/handler/text.py
31
31
  hcpdiff/data/source/__init__.py
32
32
  hcpdiff/data/source/folder_class.py
33
+ hcpdiff/data/source/text.py
33
34
  hcpdiff/data/source/text2img.py
34
35
  hcpdiff/data/source/text2img_cond.py
35
36
  hcpdiff/diffusion/__init__.py
@@ -12,7 +12,7 @@ with open('requirements.txt', encoding='utf8') as f:
12
12
  setuptools.setup(
13
13
  name="hcpdiff",
14
14
  py_modules=["hcpdiff"],
15
- version="2.1",
15
+ version="2.2",
16
16
  author="Ziyi Dong",
17
17
  author_email="rainbow-neko@outlook.com",
18
18
  description="A universal Diffusion toolbox",
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes