hcpdiff 0.9.1__tar.gz → 2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. hcpdiff-2.2/PKG-INFO +299 -0
  2. hcpdiff-2.2/README.md +262 -0
  3. hcpdiff-2.2/hcpdiff/__init__.py +4 -0
  4. hcpdiff-2.2/hcpdiff/ckpt_manager/__init__.py +4 -0
  5. hcpdiff-2.2/hcpdiff/ckpt_manager/ckpt.py +24 -0
  6. hcpdiff-2.2/hcpdiff/ckpt_manager/format/__init__.py +4 -0
  7. hcpdiff-2.2/hcpdiff/ckpt_manager/format/diffusers.py +59 -0
  8. hcpdiff-2.2/hcpdiff/ckpt_manager/format/emb.py +21 -0
  9. hcpdiff-2.2/hcpdiff/ckpt_manager/format/lora_webui.py +252 -0
  10. hcpdiff-2.2/hcpdiff/ckpt_manager/format/sd_single.py +41 -0
  11. hcpdiff-2.2/hcpdiff/ckpt_manager/loader.py +64 -0
  12. hcpdiff-2.2/hcpdiff/data/__init__.py +4 -0
  13. hcpdiff-2.2/hcpdiff/data/cache/__init__.py +1 -0
  14. hcpdiff-2.2/hcpdiff/data/cache/vae.py +102 -0
  15. hcpdiff-2.2/hcpdiff/data/dataset.py +20 -0
  16. hcpdiff-2.2/hcpdiff/data/handler/__init__.py +3 -0
  17. hcpdiff-2.2/hcpdiff/data/handler/controlnet.py +18 -0
  18. hcpdiff-2.2/hcpdiff/data/handler/diffusion.py +90 -0
  19. hcpdiff-2.2/hcpdiff/data/handler/text.py +111 -0
  20. hcpdiff-2.2/hcpdiff/data/source/__init__.py +4 -0
  21. hcpdiff-2.2/hcpdiff/data/source/folder_class.py +23 -0
  22. hcpdiff-2.2/hcpdiff/data/source/text.py +40 -0
  23. hcpdiff-2.2/hcpdiff/data/source/text2img.py +53 -0
  24. hcpdiff-2.2/hcpdiff/data/source/text2img_cond.py +16 -0
  25. hcpdiff-2.2/hcpdiff/diffusion/noise/__init__.py +2 -0
  26. hcpdiff-2.2/hcpdiff/diffusion/noise/pyramid_noise.py +42 -0
  27. hcpdiff-2.2/hcpdiff/diffusion/noise/zero_terminal.py +39 -0
  28. hcpdiff-2.2/hcpdiff/diffusion/sampler/__init__.py +5 -0
  29. hcpdiff-2.2/hcpdiff/diffusion/sampler/base.py +72 -0
  30. hcpdiff-2.2/hcpdiff/diffusion/sampler/ddpm.py +20 -0
  31. hcpdiff-2.2/hcpdiff/diffusion/sampler/diffusers.py +66 -0
  32. hcpdiff-2.2/hcpdiff/diffusion/sampler/edm.py +22 -0
  33. hcpdiff-2.2/hcpdiff/diffusion/sampler/sigma_scheduler/__init__.py +3 -0
  34. hcpdiff-2.2/hcpdiff/diffusion/sampler/sigma_scheduler/base.py +14 -0
  35. hcpdiff-2.2/hcpdiff/diffusion/sampler/sigma_scheduler/ddpm.py +197 -0
  36. hcpdiff-2.2/hcpdiff/diffusion/sampler/sigma_scheduler/edm.py +48 -0
  37. hcpdiff-2.2/hcpdiff/easy/__init__.py +2 -0
  38. hcpdiff-2.2/hcpdiff/easy/cfg/__init__.py +3 -0
  39. hcpdiff-2.2/hcpdiff/easy/cfg/sd15_train.py +207 -0
  40. hcpdiff-2.2/hcpdiff/easy/cfg/sdxl_train.py +147 -0
  41. hcpdiff-2.2/hcpdiff/easy/cfg/t2i.py +228 -0
  42. hcpdiff-2.2/hcpdiff/easy/model/__init__.py +2 -0
  43. hcpdiff-2.2/hcpdiff/easy/model/cnet.py +31 -0
  44. hcpdiff-2.2/hcpdiff/easy/model/loader.py +79 -0
  45. hcpdiff-2.2/hcpdiff/easy/sampler.py +46 -0
  46. hcpdiff-2.2/hcpdiff/evaluate/__init__.py +1 -0
  47. hcpdiff-2.2/hcpdiff/evaluate/previewer.py +60 -0
  48. hcpdiff-2.2/hcpdiff/loss/__init__.py +4 -0
  49. hcpdiff-2.2/hcpdiff/loss/base.py +41 -0
  50. hcpdiff-2.2/hcpdiff/loss/gw.py +35 -0
  51. hcpdiff-2.2/hcpdiff/loss/ssim.py +37 -0
  52. hcpdiff-2.2/hcpdiff/loss/vlb.py +79 -0
  53. hcpdiff-2.2/hcpdiff/loss/weighting.py +66 -0
  54. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/__init__.py +2 -2
  55. hcpdiff-2.2/hcpdiff/models/cfg_context.py +42 -0
  56. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/compose/compose_hook.py +44 -23
  57. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/compose/compose_tokenizer.py +21 -8
  58. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/compose/sdxl_composer.py +4 -4
  59. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/controlnet.py +16 -16
  60. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/lora_base_patch.py +14 -25
  61. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/lora_layers.py +3 -9
  62. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/lora_layers_patch.py +14 -24
  63. hcpdiff-2.2/hcpdiff/models/text_emb_ex.py +172 -0
  64. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/textencoder_ex.py +54 -18
  65. hcpdiff-2.2/hcpdiff/models/wrapper/__init__.py +3 -0
  66. hcpdiff-2.2/hcpdiff/models/wrapper/pixart.py +19 -0
  67. hcpdiff-2.2/hcpdiff/models/wrapper/sd.py +218 -0
  68. hcpdiff-2.2/hcpdiff/models/wrapper/utils.py +20 -0
  69. hcpdiff-2.2/hcpdiff/parser/__init__.py +1 -0
  70. hcpdiff-2.2/hcpdiff/parser/embpt.py +32 -0
  71. hcpdiff-2.2/hcpdiff/tools/__init__.py +0 -0
  72. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/convert_caption_txt2json.py +1 -1
  73. hcpdiff-2.2/hcpdiff/tools/dataset_generator.py +94 -0
  74. hcpdiff-2.2/hcpdiff/tools/download_hf_model.py +24 -0
  75. hcpdiff-2.2/hcpdiff/tools/init_proj.py +5 -0
  76. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/lora_convert.py +18 -17
  77. hcpdiff-2.2/hcpdiff/tools/save_model.py +12 -0
  78. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/sd2diffusers.py +1 -1
  79. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/train_colo.py +1 -1
  80. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/train_deepspeed.py +1 -1
  81. hcpdiff-2.2/hcpdiff/trainer_ac.py +79 -0
  82. hcpdiff-2.2/hcpdiff/trainer_ac_single.py +31 -0
  83. hcpdiff-2.2/hcpdiff/utils/__init__.py +2 -0
  84. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/utils/inpaint_pipe.py +7 -2
  85. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/utils/net_utils.py +29 -6
  86. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/utils/pipe_hook.py +24 -7
  87. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/utils/utils.py +21 -4
  88. hcpdiff-2.2/hcpdiff/workflow/__init__.py +20 -0
  89. hcpdiff-2.2/hcpdiff/workflow/daam/__init__.py +1 -0
  90. hcpdiff-2.2/hcpdiff/workflow/daam/act.py +66 -0
  91. hcpdiff-2.2/hcpdiff/workflow/daam/hook.py +109 -0
  92. hcpdiff-2.2/hcpdiff/workflow/diffusion.py +199 -0
  93. hcpdiff-2.2/hcpdiff/workflow/fast.py +31 -0
  94. hcpdiff-2.2/hcpdiff/workflow/flow.py +67 -0
  95. hcpdiff-2.2/hcpdiff/workflow/io.py +56 -0
  96. hcpdiff-2.2/hcpdiff/workflow/model.py +70 -0
  97. hcpdiff-2.2/hcpdiff/workflow/text.py +93 -0
  98. hcpdiff-2.2/hcpdiff/workflow/utils.py +53 -0
  99. hcpdiff-2.2/hcpdiff/workflow/vae.py +72 -0
  100. hcpdiff-2.2/hcpdiff.egg-info/PKG-INFO +299 -0
  101. hcpdiff-2.2/hcpdiff.egg-info/SOURCES.txt +118 -0
  102. hcpdiff-2.2/hcpdiff.egg-info/entry_points.txt +5 -0
  103. hcpdiff-2.2/hcpdiff.egg-info/requires.txt +7 -0
  104. {hcpdiff-0.9.1 → hcpdiff-2.2}/setup.py +12 -27
  105. hcpdiff-0.9.1/PKG-INFO +0 -199
  106. hcpdiff-0.9.1/README.md +0 -158
  107. hcpdiff-0.9.1/cfgs/infer/anime/text2img_anime.yaml +0 -21
  108. hcpdiff-0.9.1/cfgs/infer/anime/text2img_anime_lora.yaml +0 -58
  109. hcpdiff-0.9.1/cfgs/infer/change_vae.yaml +0 -6
  110. hcpdiff-0.9.1/cfgs/infer/euler_a.yaml +0 -8
  111. hcpdiff-0.9.1/cfgs/infer/img2img.yaml +0 -10
  112. hcpdiff-0.9.1/cfgs/infer/img2img_controlnet.yaml +0 -19
  113. hcpdiff-0.9.1/cfgs/infer/inpaint.yaml +0 -11
  114. hcpdiff-0.9.1/cfgs/infer/load_lora.yaml +0 -26
  115. hcpdiff-0.9.1/cfgs/infer/load_unet_part.yaml +0 -18
  116. hcpdiff-0.9.1/cfgs/infer/offload_2GB.yaml +0 -6
  117. hcpdiff-0.9.1/cfgs/infer/save_model.yaml +0 -44
  118. hcpdiff-0.9.1/cfgs/infer/text2img.yaml +0 -53
  119. hcpdiff-0.9.1/cfgs/infer/text2img_DA++.yaml +0 -34
  120. hcpdiff-0.9.1/cfgs/infer/text2img_sdxl.yaml +0 -9
  121. hcpdiff-0.9.1/cfgs/plugins/plugin_controlnet.yaml +0 -17
  122. hcpdiff-0.9.1/cfgs/te_struct.txt +0 -193
  123. hcpdiff-0.9.1/cfgs/train/dataset/base_dataset.yaml +0 -29
  124. hcpdiff-0.9.1/cfgs/train/dataset/regularization_dataset.yaml +0 -31
  125. hcpdiff-0.9.1/cfgs/train/examples/CustomDiffusion.yaml +0 -74
  126. hcpdiff-0.9.1/cfgs/train/examples/DreamArtist++.yaml +0 -135
  127. hcpdiff-0.9.1/cfgs/train/examples/DreamArtist.yaml +0 -45
  128. hcpdiff-0.9.1/cfgs/train/examples/DreamBooth.yaml +0 -62
  129. hcpdiff-0.9.1/cfgs/train/examples/FT_sdxl.yaml +0 -33
  130. hcpdiff-0.9.1/cfgs/train/examples/Lion_optimizer.yaml +0 -17
  131. hcpdiff-0.9.1/cfgs/train/examples/TextualInversion.yaml +0 -41
  132. hcpdiff-0.9.1/cfgs/train/examples/add_logger_tensorboard_wandb.yaml +0 -15
  133. hcpdiff-0.9.1/cfgs/train/examples/controlnet.yaml +0 -53
  134. hcpdiff-0.9.1/cfgs/train/examples/ema.yaml +0 -10
  135. hcpdiff-0.9.1/cfgs/train/examples/fine-tuning.yaml +0 -53
  136. hcpdiff-0.9.1/cfgs/train/examples/locon.yaml +0 -24
  137. hcpdiff-0.9.1/cfgs/train/examples/lora_anime_character.yaml +0 -77
  138. hcpdiff-0.9.1/cfgs/train/examples/lora_conventional.yaml +0 -56
  139. hcpdiff-0.9.1/cfgs/train/examples/lora_sdxl.yaml +0 -41
  140. hcpdiff-0.9.1/cfgs/train/examples/min_snr.yaml +0 -7
  141. hcpdiff-0.9.1/cfgs/train/examples/preview_in_training.yaml +0 -6
  142. hcpdiff-0.9.1/cfgs/train/examples_noob/DreamBooth.yaml +0 -70
  143. hcpdiff-0.9.1/cfgs/train/examples_noob/TextualInversion.yaml +0 -45
  144. hcpdiff-0.9.1/cfgs/train/examples_noob/fine-tuning.yaml +0 -45
  145. hcpdiff-0.9.1/cfgs/train/examples_noob/lora.yaml +0 -63
  146. hcpdiff-0.9.1/cfgs/train/train_base.yaml +0 -81
  147. hcpdiff-0.9.1/cfgs/train/tuning_base.yaml +0 -42
  148. hcpdiff-0.9.1/cfgs/unet_struct.txt +0 -932
  149. hcpdiff-0.9.1/cfgs/workflow/highres_fix_latent.yaml +0 -86
  150. hcpdiff-0.9.1/cfgs/workflow/highres_fix_pixel.yaml +0 -99
  151. hcpdiff-0.9.1/cfgs/workflow/text2img.yaml +0 -59
  152. hcpdiff-0.9.1/cfgs/workflow/text2img_lora.yaml +0 -70
  153. hcpdiff-0.9.1/cfgs/zero2.json +0 -32
  154. hcpdiff-0.9.1/cfgs/zero3.json +0 -39
  155. hcpdiff-0.9.1/hcpdiff/__init__.py +0 -4
  156. hcpdiff-0.9.1/hcpdiff/ckpt_manager/__init__.py +0 -5
  157. hcpdiff-0.9.1/hcpdiff/ckpt_manager/base.py +0 -16
  158. hcpdiff-0.9.1/hcpdiff/ckpt_manager/ckpt_diffusers.py +0 -45
  159. hcpdiff-0.9.1/hcpdiff/ckpt_manager/ckpt_pkl.py +0 -138
  160. hcpdiff-0.9.1/hcpdiff/ckpt_manager/ckpt_safetensor.py +0 -64
  161. hcpdiff-0.9.1/hcpdiff/ckpt_manager/ckpt_webui.py +0 -54
  162. hcpdiff-0.9.1/hcpdiff/data/__init__.py +0 -28
  163. hcpdiff-0.9.1/hcpdiff/data/bucket.py +0 -358
  164. hcpdiff-0.9.1/hcpdiff/data/caption_loader.py +0 -80
  165. hcpdiff-0.9.1/hcpdiff/data/cond_dataset.py +0 -40
  166. hcpdiff-0.9.1/hcpdiff/data/crop_info_dataset.py +0 -40
  167. hcpdiff-0.9.1/hcpdiff/data/data_processor.py +0 -33
  168. hcpdiff-0.9.1/hcpdiff/data/pair_dataset.py +0 -146
  169. hcpdiff-0.9.1/hcpdiff/data/sampler.py +0 -54
  170. hcpdiff-0.9.1/hcpdiff/data/source/__init__.py +0 -4
  171. hcpdiff-0.9.1/hcpdiff/data/source/base.py +0 -30
  172. hcpdiff-0.9.1/hcpdiff/data/source/folder_class.py +0 -40
  173. hcpdiff-0.9.1/hcpdiff/data/source/text2img.py +0 -91
  174. hcpdiff-0.9.1/hcpdiff/data/source/text2img_cond.py +0 -22
  175. hcpdiff-0.9.1/hcpdiff/data/utils.py +0 -80
  176. hcpdiff-0.9.1/hcpdiff/deprecated/__init__.py +0 -1
  177. hcpdiff-0.9.1/hcpdiff/deprecated/cfg_converter.py +0 -81
  178. hcpdiff-0.9.1/hcpdiff/deprecated/lora_convert.py +0 -31
  179. hcpdiff-0.9.1/hcpdiff/infer_workflow.py +0 -57
  180. hcpdiff-0.9.1/hcpdiff/loggers/__init__.py +0 -13
  181. hcpdiff-0.9.1/hcpdiff/loggers/base_logger.py +0 -76
  182. hcpdiff-0.9.1/hcpdiff/loggers/cli_logger.py +0 -40
  183. hcpdiff-0.9.1/hcpdiff/loggers/preview/__init__.py +0 -1
  184. hcpdiff-0.9.1/hcpdiff/loggers/preview/image_previewer.py +0 -149
  185. hcpdiff-0.9.1/hcpdiff/loggers/tensorboard_logger.py +0 -30
  186. hcpdiff-0.9.1/hcpdiff/loggers/wandb_logger.py +0 -31
  187. hcpdiff-0.9.1/hcpdiff/loggers/webui_logger.py +0 -9
  188. hcpdiff-0.9.1/hcpdiff/loss/__init__.py +0 -1
  189. hcpdiff-0.9.1/hcpdiff/loss/min_snr_loss.py +0 -52
  190. hcpdiff-0.9.1/hcpdiff/models/cfg_context.py +0 -39
  191. hcpdiff-0.9.1/hcpdiff/models/layers.py +0 -81
  192. hcpdiff-0.9.1/hcpdiff/models/plugin.py +0 -348
  193. hcpdiff-0.9.1/hcpdiff/models/text_emb_ex.py +0 -94
  194. hcpdiff-0.9.1/hcpdiff/models/wrapper.py +0 -75
  195. hcpdiff-0.9.1/hcpdiff/noise/__init__.py +0 -3
  196. hcpdiff-0.9.1/hcpdiff/noise/noise_base.py +0 -16
  197. hcpdiff-0.9.1/hcpdiff/noise/pyramid_noise.py +0 -50
  198. hcpdiff-0.9.1/hcpdiff/noise/zero_terminal.py +0 -44
  199. hcpdiff-0.9.1/hcpdiff/tools/init_proj.py +0 -23
  200. hcpdiff-0.9.1/hcpdiff/train_ac.py +0 -566
  201. hcpdiff-0.9.1/hcpdiff/train_ac_single.py +0 -39
  202. hcpdiff-0.9.1/hcpdiff/utils/__init__.py +0 -4
  203. hcpdiff-0.9.1/hcpdiff/utils/caption_tools.py +0 -105
  204. hcpdiff-0.9.1/hcpdiff/utils/cfg_net_tools.py +0 -321
  205. hcpdiff-0.9.1/hcpdiff/utils/cfg_resolvers.py +0 -16
  206. hcpdiff-0.9.1/hcpdiff/utils/ema.py +0 -52
  207. hcpdiff-0.9.1/hcpdiff/utils/img_size_tool.py +0 -248
  208. hcpdiff-0.9.1/hcpdiff/vis/__init__.py +0 -3
  209. hcpdiff-0.9.1/hcpdiff/vis/base_interface.py +0 -12
  210. hcpdiff-0.9.1/hcpdiff/vis/disk_interface.py +0 -48
  211. hcpdiff-0.9.1/hcpdiff/vis/webui_interface.py +0 -17
  212. hcpdiff-0.9.1/hcpdiff/viser_fast.py +0 -138
  213. hcpdiff-0.9.1/hcpdiff/visualizer.py +0 -265
  214. hcpdiff-0.9.1/hcpdiff/visualizer_reloadable.py +0 -237
  215. hcpdiff-0.9.1/hcpdiff/workflow/__init__.py +0 -15
  216. hcpdiff-0.9.1/hcpdiff/workflow/base.py +0 -59
  217. hcpdiff-0.9.1/hcpdiff/workflow/diffusion.py +0 -209
  218. hcpdiff-0.9.1/hcpdiff/workflow/io.py +0 -150
  219. hcpdiff-0.9.1/hcpdiff/workflow/model.py +0 -67
  220. hcpdiff-0.9.1/hcpdiff/workflow/text.py +0 -80
  221. hcpdiff-0.9.1/hcpdiff/workflow/utils.py +0 -33
  222. hcpdiff-0.9.1/hcpdiff/workflow/vae.py +0 -73
  223. hcpdiff-0.9.1/hcpdiff.egg-info/PKG-INFO +0 -199
  224. hcpdiff-0.9.1/hcpdiff.egg-info/SOURCES.txt +0 -163
  225. hcpdiff-0.9.1/hcpdiff.egg-info/entry_points.txt +0 -2
  226. hcpdiff-0.9.1/hcpdiff.egg-info/requires.txt +0 -22
  227. hcpdiff-0.9.1/prompt_tuning_template/caption.txt +0 -1
  228. hcpdiff-0.9.1/prompt_tuning_template/name.txt +0 -1
  229. hcpdiff-0.9.1/prompt_tuning_template/name_2pt_caption.txt +0 -1
  230. hcpdiff-0.9.1/prompt_tuning_template/name_caption.txt +0 -1
  231. hcpdiff-0.9.1/prompt_tuning_template/object.txt +0 -27
  232. hcpdiff-0.9.1/prompt_tuning_template/object_caption.txt +0 -27
  233. hcpdiff-0.9.1/prompt_tuning_template/style.txt +0 -19
  234. hcpdiff-0.9.1/prompt_tuning_template/style_caption.txt +0 -19
  235. {hcpdiff-0.9.1 → hcpdiff-2.2}/LICENSE +0 -0
  236. {hcpdiff-0.9.1/hcpdiff/tools → hcpdiff-2.2/hcpdiff/diffusion}/__init__.py +0 -0
  237. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/compose/__init__.py +0 -0
  238. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/compose/compose_textencoder.py +0 -0
  239. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/container.py +0 -0
  240. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/lora_base.py +0 -0
  241. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/models/tokenizer_ex.py +0 -0
  242. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/convert_old_lora.py +0 -0
  243. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/create_embedding.py +0 -0
  244. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/diffusers2sd.py +0 -0
  245. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/embedding_convert.py +0 -0
  246. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/tools/gen_from_ptlist.py +0 -0
  247. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff/utils/colo_utils.py +0 -0
  248. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff.egg-info/dependency_links.txt +0 -0
  249. {hcpdiff-0.9.1 → hcpdiff-2.2}/hcpdiff.egg-info/top_level.txt +0 -0
  250. {hcpdiff-0.9.1 → hcpdiff-2.2}/setup.cfg +0 -0
hcpdiff-2.2/PKG-INFO ADDED
@@ -0,0 +1,299 @@
1
+ Metadata-Version: 2.4
2
+ Name: hcpdiff
3
+ Version: 2.2
4
+ Summary: A universal Diffusion toolbox
5
+ Home-page: https://github.com/IrisRainbowNeko/HCP-Diffusion
6
+ Author: Ziyi Dong
7
+ Author-email: rainbow-neko@outlook.com
8
+ Classifier: License :: OSI Approved :: Apache Software License
9
+ Classifier: Operating System :: OS Independent
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.8
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Requires-Python: >=3.8
18
+ Description-Content-Type: text/markdown
19
+ License-File: LICENSE
20
+ Requires-Dist: rainbowneko
21
+ Requires-Dist: diffusers
22
+ Requires-Dist: matplotlib
23
+ Requires-Dist: pyarrow
24
+ Requires-Dist: transformers>=4.25.1
25
+ Requires-Dist: pytorch-msssim
26
+ Requires-Dist: lmdb
27
+ Dynamic: author
28
+ Dynamic: author-email
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: description-content-type
32
+ Dynamic: home-page
33
+ Dynamic: license-file
34
+ Dynamic: requires-dist
35
+ Dynamic: requires-python
36
+ Dynamic: summary
37
+
38
+ # HCP-Diffusion V2
39
+
40
+ [![PyPI](https://img.shields.io/pypi/v/hcpdiff)](https://pypi.org/project/hcpdiff/)
41
+ [![GitHub stars](https://img.shields.io/github/stars/7eu7d7/HCP-Diffusion)](https://github.com/7eu7d7/HCP-Diffusion/stargazers)
42
+ [![GitHub license](https://img.shields.io/github/license/7eu7d7/HCP-Diffusion)](https://github.com/7eu7d7/HCP-Diffusion/blob/master/LICENSE)
43
+ [![codecov](https://codecov.io/gh/7eu7d7/HCP-Diffusion/branch/main/graph/badge.svg)](https://codecov.io/gh/7eu7d7/HCP-Diffusion)
44
+ [![open issues](https://isitmaintained.com/badge/open/7eu7d7/HCP-Diffusion.svg)](https://github.com/7eu7d7/HCP-Diffusion/issues)
45
+
46
+ [📘中文说明](./README_cn.md)
47
+
48
+ [📘English document](https://hcpdiff.readthedocs.io/en/latest/)
49
+ [📘中文文档](https://hcpdiff.readthedocs.io/zh_CN/latest/)
50
+
51
+ Old HCP-Diffusion V1 at [main branch](https://github.com/IrisRainbowNeko/HCP-Diffusion/tree/main)
52
+
53
+ ## Introduction
54
+
55
+ **HCP-Diffusion** is a Diffusion model toolbox built on top of the [🐱 RainbowNeko Engine](https://github.com/IrisRainbowNeko/RainbowNekoEngine).
56
+ It features a clean code structure and a flexible **Python-based configuration file**, making it easier to conduct and manage complex experiments. It includes a wide variety of training components, and compared to existing frameworks, it's more extensible, flexible, and user-friendly.
57
+
58
+ HCP-Diffusion allows you to use a single `.py` config file to unify training workflows across popular methods and model architectures, including Prompt-tuning (Textual Inversion), DreamArtist, Fine-tuning, DreamBooth, LoRA, ControlNet, ....
59
+ Different techniques can also be freely combined.
60
+
61
+ This framework also implements **DreamArtist++**, an upgraded version of DreamArtist based on LoRA. It enables high generalization and controllability with just a single image for training.
62
+ Compared to the original DreamArtist, it offers better stability, image quality, controllability, and faster training.
63
+
64
+ ---
65
+
66
+ ## Installation
67
+
68
+ Install [pytorch](https://pytorch.org/)
69
+
70
+ Install via pip:
71
+
72
+ ```bash
73
+ pip install hcpdiff
74
+ # Initialize configuration
75
+ hcpinit
76
+ ```
77
+
78
+ Install from source:
79
+
80
+ ```bash
81
+ git clone https://github.com/7eu7d7/HCP-Diffusion.git
82
+ cd HCP-Diffusion
83
+ pip install -e .
84
+ # Initialize configuration
85
+ hcpinit
86
+ ```
87
+
88
+ Use xFormers to reduce memory usage and accelerate training:
89
+
90
+ ```bash
91
+ # Choose the appropriate xformers version for your PyTorch version
92
+ pip install xformers==?
93
+ ```
94
+
95
+ ## 🚀 Python Configuration Files
96
+ RainbowNeko Engine supports configuration files written in a Python-like syntax. This allows users to call functions and classes directly within the configuration file, with function parameters inheritable from parent configuration files. The framework automatically handles the formatting of these configuration files.
97
+
98
+ For example, consider the following configuration file:
99
+ ```python
100
+ dict(
101
+ layer=Linear(in_features=4, out_features=4)
102
+ )
103
+ ```
104
+ During parsing, this will be automatically compiled into:
105
+ ```python
106
+ dict(
107
+ layer=dict(_target_=Linear, in_features=4, out_features=4)
108
+ )
109
+ ```
110
+ After parsing, the framework will instantiate the components accordingly. This means users can write configuration files using familiar Python syntax.
111
+
112
+ ---
113
+
114
+ ## ✨ Features
115
+
116
+ <details>
117
+ <summary>Features</summary>
118
+
119
+ ### 📦 Model Support
120
+
121
+ | Model Name | Status |
122
+ |--------------------------|-------------|
123
+ | Stable Diffusion 1.5 | ✅ Supported |
124
+ | Stable Diffusion XL (SDXL)| ✅ Supported |
125
+ | PixArt | ✅ Supported |
126
+ | FLUX | 🚧 In Development |
127
+ | Stable Diffusion 3 (SD3) | 🚧 In Development |
128
+
129
+ ---
130
+
131
+ ### 🧠 Fine-Tuning Capabilities
132
+
133
+ | Feature | Description/Support |
134
+ |----------------------------------|---------------------|
135
+ | LoRA Layer-wise Configuration | ✅ Supported (including Conv2d) |
136
+ | Layer-wise Fine-Tuning | ✅ Supported |
137
+ | Multi-token Prompt-Tuning | ✅ Supported |
138
+ | Layer-wise Model Merging | ✅ Supported |
139
+ | Custom Optimizers | ✅ Supported (Lion, DAdaptation, pytorch-optimizer, etc.) |
140
+ | Custom LR Schedulers | ✅ Supported |
141
+
142
+ ---
143
+
144
+ ### 🧩 Extension Method Support
145
+
146
+ | Method | Status |
147
+ |--------------------------------|-------------|
148
+ | ControlNet (including training)| ✅ Supported |
149
+ | DreamArtist / DreamArtist++ | ✅ Supported |
150
+ | Token Attention Adjustment | ✅ Supported |
151
+ | Max Sentence Length Extension | ✅ Supported |
152
+ | Textual Inversion (Custom Tokens)| ✅ Supported |
153
+ | CLIP Skip | ✅ Supported |
154
+
155
+ ---
156
+
157
+ ### 🚀 Training Acceleration
158
+
159
+ | Tool/Library | Supported Modules |
160
+ |---------------------------------------------------|---------------------------|
161
+ | [🤗 Accelerate](https://github.com/huggingface/accelerate) | ✅ Supported |
162
+ | [Colossal-AI](https://github.com/hpcaitech/ColossalAI) | ✅ Supported |
163
+ | [xFormers](https://github.com/facebookresearch/xformers) | ✅ Supported (UNet and text encoder) |
164
+
165
+ ---
166
+
167
+ ### 🗂 Dataset Support
168
+
169
+ | Feature | Description |
170
+ |----------------------------------|-------------|
171
+ | Aspect Ratio Bucket (ARB) | ✅ Auto-clustering supported |
172
+ | Multi-source / Multi-dataset | ✅ Supported |
173
+ | LMDB | ✅ Supported |
174
+ | webdataset | 🚧 In Development |
175
+ | Local Attention Enhancement | ✅ Supported |
176
+ | Tag Shuffling & Dropout | ✅ Multiple tag editing strategies |
177
+
178
+ ---
179
+
180
+ ### 📉 Supported Loss Functions
181
+
182
+ | Loss Type | Description |
183
+ |------------|-------------|
184
+ | Min-SNR | ✅ Supported |
185
+ | SSIM | ✅ Supported |
186
+ | GWLoss | ✅ Supported |
187
+
188
+ ---
189
+
190
+ ### 🌫 Supported Diffusion Strategies
191
+
192
+ | Strategy Type | Status |
193
+ |------------------|--------------|
194
+ | DDPM | ✅ Supported |
195
+ | EDM | ✅ Supported |
196
+ | Flow Matching | ✅ Supported |
197
+
198
+ ---
199
+
200
+ ### 🧠 Automatic Evaluation (Step Selection Assistant)
201
+
202
+ | Feature | Description/Status |
203
+ |------------------|------------------------------------------|
204
+ | Image Preview | ✅ Supported (workflow preview) |
205
+ | FID | 🚧 In Development |
206
+ | CLIP Score | 🚧 In Development |
207
+ | CCIP Score | 🚧 In Development |
208
+ | Corrupt Score | 🚧 In Development |
209
+
210
+ ---
211
+
212
+ ### ⚡️ Image Generation
213
+
214
+ | 功能 | 描述/支持情况 |
215
+ |------------------------------|------------------------------------|
216
+ | Batch Generation | ✅ Supported |
217
+ | Generate from Prompt Dataset | ✅ Supported |
218
+ | Image to Image | ✅ Supported |
219
+ | Inpaint | ✅ Supported |
220
+ | Token Weight | ✅ Supported |
221
+
222
+ </details>
223
+
224
+ ---
225
+
226
+ ## Getting Started
227
+
228
+ ### Training
229
+
230
+ HCP-Diffusion provides training scripts based on 🤗 Accelerate.
231
+
232
+ ```bash
233
+ # Multi-GPU training, configure GPUs in cfgs/launcher/multi.yaml
234
+ hcp_train --cfg cfgs/train/py/your_config.py
235
+
236
+ # Single-GPU training, configure GPU in cfgs/launcher/single.yaml
237
+ hcp_train_1gpu --cfg cfgs/train/py/your_config.py
238
+ ```
239
+
240
+ You can also override config items via command line:
241
+
242
+ ```bash
243
+ # Override base model path
244
+ hcp_train --cfg cfgs/train/py/your_config.py model.wrapper.models.ckpt_path=pretrained_model_path
245
+ ```
246
+
247
+ ### Image Generation
248
+
249
+ Use the workflow defined in the Python config to generate images:
250
+
251
+ ```bash
252
+ hcp_run --cfg cfgs/workflow/text2img.py
253
+ ```
254
+
255
+ Or override parameters via command line:
256
+
257
+ ```bash
258
+ hcp_run --cfg cfgs/workflow/text2img_cli.py \
259
+ pretrained_model=pretrained_model_path \
260
+ prompt='positive_prompt' \
261
+ negative_prompt='negative_prompt' \
262
+ seed=42
263
+ ```
264
+
265
+ ### Tutorials
266
+
267
+ 🚧 In Development
268
+
269
+ ---
270
+
271
+ ## Contributing
272
+
273
+ We welcome contributions to support more models and features.
274
+
275
+ ---
276
+
277
+ ## Team
278
+
279
+ Maintained by [HCP-Lab at Sun Yat-sen University](https://www.sysu-hcp.net/).
280
+
281
+ ---
282
+
283
+ ## Citation
284
+
285
+ ```bibtex
286
+ @article{DBLP:journals/corr/abs-2211-11337,
287
+ author = {Ziyi Dong and
288
+ Pengxu Wei and
289
+ Liang Lin},
290
+ title = {DreamArtist: Towards Controllable One-Shot Text-to-Image Generation
291
+ via Positive-Negative Prompt-Tuning},
292
+ journal = {CoRR},
293
+ volume = {abs/2211.11337},
294
+ year = {2022},
295
+ doi = {10.48550/arXiv.2211.11337},
296
+ eprinttype = {arXiv},
297
+ eprint = {2211.11337},
298
+ }
299
+ ```
hcpdiff-2.2/README.md ADDED
@@ -0,0 +1,262 @@
1
+ # HCP-Diffusion V2
2
+
3
+ [![PyPI](https://img.shields.io/pypi/v/hcpdiff)](https://pypi.org/project/hcpdiff/)
4
+ [![GitHub stars](https://img.shields.io/github/stars/7eu7d7/HCP-Diffusion)](https://github.com/7eu7d7/HCP-Diffusion/stargazers)
5
+ [![GitHub license](https://img.shields.io/github/license/7eu7d7/HCP-Diffusion)](https://github.com/7eu7d7/HCP-Diffusion/blob/master/LICENSE)
6
+ [![codecov](https://codecov.io/gh/7eu7d7/HCP-Diffusion/branch/main/graph/badge.svg)](https://codecov.io/gh/7eu7d7/HCP-Diffusion)
7
+ [![open issues](https://isitmaintained.com/badge/open/7eu7d7/HCP-Diffusion.svg)](https://github.com/7eu7d7/HCP-Diffusion/issues)
8
+
9
+ [📘中文说明](./README_cn.md)
10
+
11
+ [📘English document](https://hcpdiff.readthedocs.io/en/latest/)
12
+ [📘中文文档](https://hcpdiff.readthedocs.io/zh_CN/latest/)
13
+
14
+ Old HCP-Diffusion V1 at [main branch](https://github.com/IrisRainbowNeko/HCP-Diffusion/tree/main)
15
+
16
+ ## Introduction
17
+
18
+ **HCP-Diffusion** is a Diffusion model toolbox built on top of the [🐱 RainbowNeko Engine](https://github.com/IrisRainbowNeko/RainbowNekoEngine).
19
+ It features a clean code structure and a flexible **Python-based configuration file**, making it easier to conduct and manage complex experiments. It includes a wide variety of training components, and compared to existing frameworks, it's more extensible, flexible, and user-friendly.
20
+
21
+ HCP-Diffusion allows you to use a single `.py` config file to unify training workflows across popular methods and model architectures, including Prompt-tuning (Textual Inversion), DreamArtist, Fine-tuning, DreamBooth, LoRA, ControlNet, ....
22
+ Different techniques can also be freely combined.
23
+
24
+ This framework also implements **DreamArtist++**, an upgraded version of DreamArtist based on LoRA. It enables high generalization and controllability with just a single image for training.
25
+ Compared to the original DreamArtist, it offers better stability, image quality, controllability, and faster training.
26
+
27
+ ---
28
+
29
+ ## Installation
30
+
31
+ Install [pytorch](https://pytorch.org/)
32
+
33
+ Install via pip:
34
+
35
+ ```bash
36
+ pip install hcpdiff
37
+ # Initialize configuration
38
+ hcpinit
39
+ ```
40
+
41
+ Install from source:
42
+
43
+ ```bash
44
+ git clone https://github.com/7eu7d7/HCP-Diffusion.git
45
+ cd HCP-Diffusion
46
+ pip install -e .
47
+ # Initialize configuration
48
+ hcpinit
49
+ ```
50
+
51
+ Use xFormers to reduce memory usage and accelerate training:
52
+
53
+ ```bash
54
+ # Choose the appropriate xformers version for your PyTorch version
55
+ pip install xformers==?
56
+ ```
57
+
58
+ ## 🚀 Python Configuration Files
59
+ RainbowNeko Engine supports configuration files written in a Python-like syntax. This allows users to call functions and classes directly within the configuration file, with function parameters inheritable from parent configuration files. The framework automatically handles the formatting of these configuration files.
60
+
61
+ For example, consider the following configuration file:
62
+ ```python
63
+ dict(
64
+ layer=Linear(in_features=4, out_features=4)
65
+ )
66
+ ```
67
+ During parsing, this will be automatically compiled into:
68
+ ```python
69
+ dict(
70
+ layer=dict(_target_=Linear, in_features=4, out_features=4)
71
+ )
72
+ ```
73
+ After parsing, the framework will instantiate the components accordingly. This means users can write configuration files using familiar Python syntax.
74
+
75
+ ---
76
+
77
+ ## ✨ Features
78
+
79
+ <details>
80
+ <summary>Features</summary>
81
+
82
+ ### 📦 Model Support
83
+
84
+ | Model Name | Status |
85
+ |--------------------------|-------------|
86
+ | Stable Diffusion 1.5 | ✅ Supported |
87
+ | Stable Diffusion XL (SDXL)| ✅ Supported |
88
+ | PixArt | ✅ Supported |
89
+ | FLUX | 🚧 In Development |
90
+ | Stable Diffusion 3 (SD3) | 🚧 In Development |
91
+
92
+ ---
93
+
94
+ ### 🧠 Fine-Tuning Capabilities
95
+
96
+ | Feature | Description/Support |
97
+ |----------------------------------|---------------------|
98
+ | LoRA Layer-wise Configuration | ✅ Supported (including Conv2d) |
99
+ | Layer-wise Fine-Tuning | ✅ Supported |
100
+ | Multi-token Prompt-Tuning | ✅ Supported |
101
+ | Layer-wise Model Merging | ✅ Supported |
102
+ | Custom Optimizers | ✅ Supported (Lion, DAdaptation, pytorch-optimizer, etc.) |
103
+ | Custom LR Schedulers | ✅ Supported |
104
+
105
+ ---
106
+
107
+ ### 🧩 Extension Method Support
108
+
109
+ | Method | Status |
110
+ |--------------------------------|-------------|
111
+ | ControlNet (including training)| ✅ Supported |
112
+ | DreamArtist / DreamArtist++ | ✅ Supported |
113
+ | Token Attention Adjustment | ✅ Supported |
114
+ | Max Sentence Length Extension | ✅ Supported |
115
+ | Textual Inversion (Custom Tokens)| ✅ Supported |
116
+ | CLIP Skip | ✅ Supported |
117
+
118
+ ---
119
+
120
+ ### 🚀 Training Acceleration
121
+
122
+ | Tool/Library | Supported Modules |
123
+ |---------------------------------------------------|---------------------------|
124
+ | [🤗 Accelerate](https://github.com/huggingface/accelerate) | ✅ Supported |
125
+ | [Colossal-AI](https://github.com/hpcaitech/ColossalAI) | ✅ Supported |
126
+ | [xFormers](https://github.com/facebookresearch/xformers) | ✅ Supported (UNet and text encoder) |
127
+
128
+ ---
129
+
130
+ ### 🗂 Dataset Support
131
+
132
+ | Feature | Description |
133
+ |----------------------------------|-------------|
134
+ | Aspect Ratio Bucket (ARB) | ✅ Auto-clustering supported |
135
+ | Multi-source / Multi-dataset | ✅ Supported |
136
+ | LMDB | ✅ Supported |
137
+ | webdataset | 🚧 In Development |
138
+ | Local Attention Enhancement | ✅ Supported |
139
+ | Tag Shuffling & Dropout | ✅ Multiple tag editing strategies |
140
+
141
+ ---
142
+
143
+ ### 📉 Supported Loss Functions
144
+
145
+ | Loss Type | Description |
146
+ |------------|-------------|
147
+ | Min-SNR | ✅ Supported |
148
+ | SSIM | ✅ Supported |
149
+ | GWLoss | ✅ Supported |
150
+
151
+ ---
152
+
153
+ ### 🌫 Supported Diffusion Strategies
154
+
155
+ | Strategy Type | Status |
156
+ |------------------|--------------|
157
+ | DDPM | ✅ Supported |
158
+ | EDM | ✅ Supported |
159
+ | Flow Matching | ✅ Supported |
160
+
161
+ ---
162
+
163
+ ### 🧠 Automatic Evaluation (Step Selection Assistant)
164
+
165
+ | Feature | Description/Status |
166
+ |------------------|------------------------------------------|
167
+ | Image Preview | ✅ Supported (workflow preview) |
168
+ | FID | 🚧 In Development |
169
+ | CLIP Score | 🚧 In Development |
170
+ | CCIP Score | 🚧 In Development |
171
+ | Corrupt Score | 🚧 In Development |
172
+
173
+ ---
174
+
175
+ ### ⚡️ Image Generation
176
+
177
+ | 功能 | 描述/支持情况 |
178
+ |------------------------------|------------------------------------|
179
+ | Batch Generation | ✅ Supported |
180
+ | Generate from Prompt Dataset | ✅ Supported |
181
+ | Image to Image | ✅ Supported |
182
+ | Inpaint | ✅ Supported |
183
+ | Token Weight | ✅ Supported |
184
+
185
+ </details>
186
+
187
+ ---
188
+
189
+ ## Getting Started
190
+
191
+ ### Training
192
+
193
+ HCP-Diffusion provides training scripts based on 🤗 Accelerate.
194
+
195
+ ```bash
196
+ # Multi-GPU training, configure GPUs in cfgs/launcher/multi.yaml
197
+ hcp_train --cfg cfgs/train/py/your_config.py
198
+
199
+ # Single-GPU training, configure GPU in cfgs/launcher/single.yaml
200
+ hcp_train_1gpu --cfg cfgs/train/py/your_config.py
201
+ ```
202
+
203
+ You can also override config items via command line:
204
+
205
+ ```bash
206
+ # Override base model path
207
+ hcp_train --cfg cfgs/train/py/your_config.py model.wrapper.models.ckpt_path=pretrained_model_path
208
+ ```
209
+
210
+ ### Image Generation
211
+
212
+ Use the workflow defined in the Python config to generate images:
213
+
214
+ ```bash
215
+ hcp_run --cfg cfgs/workflow/text2img.py
216
+ ```
217
+
218
+ Or override parameters via command line:
219
+
220
+ ```bash
221
+ hcp_run --cfg cfgs/workflow/text2img_cli.py \
222
+ pretrained_model=pretrained_model_path \
223
+ prompt='positive_prompt' \
224
+ negative_prompt='negative_prompt' \
225
+ seed=42
226
+ ```
227
+
228
+ ### Tutorials
229
+
230
+ 🚧 In Development
231
+
232
+ ---
233
+
234
+ ## Contributing
235
+
236
+ We welcome contributions to support more models and features.
237
+
238
+ ---
239
+
240
+ ## Team
241
+
242
+ Maintained by [HCP-Lab at Sun Yat-sen University](https://www.sysu-hcp.net/).
243
+
244
+ ---
245
+
246
+ ## Citation
247
+
248
+ ```bibtex
249
+ @article{DBLP:journals/corr/abs-2211-11337,
250
+ author = {Ziyi Dong and
251
+ Pengxu Wei and
252
+ Liang Lin},
253
+ title = {DreamArtist: Towards Controllable One-Shot Text-to-Image Generation
254
+ via Positive-Negative Prompt-Tuning},
255
+ journal = {CoRR},
256
+ volume = {abs/2211.11337},
257
+ year = {2022},
258
+ doi = {10.48550/arXiv.2211.11337},
259
+ eprinttype = {arXiv},
260
+ eprint = {2211.11337},
261
+ }
262
+ ```
@@ -0,0 +1,4 @@
1
+ #from .train_ac_old import Trainer
2
+ #from .train_ac_single import TrainerSingleCard
3
+ # from .visualizer import Visualizer
4
+ # from .visualizer_reloadable import VisualizerReloadable
@@ -0,0 +1,4 @@
1
+ from .format import EmbFormat, DiffusersSD15Format, DiffusersModelFormat, DiffusersSDXLFormat, DiffusersPixArtFormat, OfficialSDXLFormat, \
2
+ OfficialSD15Format, LoraWebuiFormat
3
+ from .ckpt import EmbSaver, easy_emb_saver
4
+ from .loader import HCPLoraLoader
@@ -0,0 +1,24 @@
1
+ from rainbowneko.ckpt_manager import NekoSaver, CkptFormat, LocalCkptSource, PKLFormat
2
+ from torch import nn
3
+ from typing import Dict, Any
4
+
5
+ class EmbSaver(NekoSaver):
6
+ def __init__(self, format: CkptFormat, source: LocalCkptSource, target_key='embs', prefix=None):
7
+ super().__init__(format, source)
8
+ self.target_key = target_key
9
+ self.prefix = prefix
10
+
11
+ def save_to(self, name, model: nn.Module, plugin_groups: Dict[str, Any], model_ema=None, exclude_key=None,
12
+ name_template=None):
13
+ train_pts = plugin_groups[self.target_key]
14
+ for pt_name, pt in train_pts.items():
15
+ self.save(pt_name, (pt_name, pt), prefix=self.prefix)
16
+ if name_template is not None:
17
+ pt_name = name_template.format(pt_name)
18
+ self.save(pt_name, (pt_name, pt), prefix=self.prefix)
19
+
20
+ def easy_emb_saver():
21
+ return EmbSaver(
22
+ format=PKLFormat(),
23
+ source=LocalCkptSource(),
24
+ )
@@ -0,0 +1,4 @@
1
+ from .emb import EmbFormat
2
+ from .diffusers import DiffusersSD15Format, DiffusersModelFormat, DiffusersSDXLFormat, DiffusersPixArtFormat
3
+ from .sd_single import OfficialSD15Format, OfficialSDXLFormat
4
+ from .lora_webui import LoraWebuiFormat
@@ -0,0 +1,59 @@
1
+ import torch
2
+ from diffusers import ModelMixin, AutoencoderKL, UNet2DConditionModel, PixArtTransformer2DModel
3
+ from rainbowneko.ckpt_manager.format import CkptFormat
4
+ from transformers import CLIPTextModel, AutoTokenizer, T5EncoderModel
5
+
6
+ from hcpdiff.diffusion.sampler import DDPMSampler, DDPMDiscreteSigmaScheduler
7
+ from hcpdiff.models.compose import SDXLTokenizer, SDXLTextEncoder
8
+
9
+ class DiffusersModelFormat(CkptFormat):
10
+ def __init__(self, builder: ModelMixin):
11
+ self.builder = builder
12
+
13
+ def save_ckpt(self, sd_model: ModelMixin, save_f: str, **kwargs):
14
+ sd_model.save_pretrained(save_f)
15
+
16
+ def load_ckpt(self, ckpt_f: str, map_location="cpu", **kwargs):
17
+ self.builder.from_pretrained(ckpt_f, **kwargs)
18
+
19
+ class DiffusersSD15Format(CkptFormat):
20
+ def load_ckpt(self, pretrained_model: str, map_location="cpu", denoiser=None, TE=None, vae: AutoencoderKL = None, noise_sampler=None,
21
+ tokenizer=None, revision=None, dtype=torch.float32, **kwargs):
22
+ denoiser = denoiser or UNet2DConditionModel.from_pretrained(
23
+ pretrained_model, subfolder="unet", revision=revision, torch_dtype=dtype
24
+ )
25
+ vae = vae or AutoencoderKL.from_pretrained(pretrained_model, subfolder="vae", revision=revision, torch_dtype=dtype)
26
+ noise_sampler = noise_sampler or DDPMSampler(DDPMDiscreteSigmaScheduler())
27
+
28
+ TE = TE or CLIPTextModel.from_pretrained(pretrained_model, subfolder="text_encoder", revision=revision, torch_dtype=dtype)
29
+ tokenizer = tokenizer or AutoTokenizer.from_pretrained(pretrained_model, subfolder="tokenizer", revision=revision, use_fast=False)
30
+
31
+ return dict(denoiser=denoiser, TE=TE, vae=vae, noise_sampler=noise_sampler, tokenizer=tokenizer)
32
+
33
+ class DiffusersSDXLFormat(CkptFormat):
34
+ def load_ckpt(self, pretrained_model: str, map_location="cpu", denoiser=None, TE=None, vae: AutoencoderKL = None, noise_sampler=None,
35
+ tokenizer=None, revision=None, dtype=torch.float32, **kwargs):
36
+ denoiser = denoiser or UNet2DConditionModel.from_pretrained(
37
+ pretrained_model, subfolder="unet", revision=revision, torch_dtype=dtype
38
+ )
39
+ vae = vae or AutoencoderKL.from_pretrained(pretrained_model, subfolder="vae", revision=revision, torch_dtype=dtype)
40
+ noise_sampler = noise_sampler or DDPMSampler(DDPMDiscreteSigmaScheduler())
41
+
42
+ TE = TE or SDXLTextEncoder.from_pretrained(pretrained_model, subfolder="text_encoder", revision=revision, torch_dtype=dtype)
43
+ tokenizer = tokenizer or SDXLTokenizer.from_pretrained(pretrained_model, subfolder="tokenizer", revision=revision, use_fast=False)
44
+
45
+ return dict(denoiser=denoiser, TE=TE, vae=vae, noise_sampler=noise_sampler, tokenizer=tokenizer)
46
+
47
+ class DiffusersPixArtFormat(CkptFormat):
48
+ def load_ckpt(self, pretrained_model: str, map_location="cpu", denoiser=None, TE=None, vae: AutoencoderKL = None, noise_sampler=None,
49
+ tokenizer=None, revision=None, dtype=torch.float32, **kwargs):
50
+ denoiser = denoiser or PixArtTransformer2DModel.from_pretrained(
51
+ pretrained_model, subfolder="transformer", revision=revision, torch_dtype=dtype
52
+ )
53
+ vae = vae or AutoencoderKL.from_pretrained(pretrained_model, subfolder="vae", revision=revision, torch_dtype=dtype)
54
+ noise_sampler = noise_sampler or DDPMSampler(DDPMDiscreteSigmaScheduler())
55
+
56
+ TE = TE or T5EncoderModel.from_pretrained(pretrained_model, subfolder="text_encoder", revision=revision, torch_dtype=dtype)
57
+ tokenizer = tokenizer or AutoTokenizer.from_pretrained(pretrained_model, subfolder="tokenizer", revision=revision, use_fast=False)
58
+
59
+ return dict(denoiser=denoiser, TE=TE, vae=vae, noise_sampler=noise_sampler, tokenizer=tokenizer)
@@ -0,0 +1,21 @@
1
+ from typing import Tuple
2
+
3
+ import torch
4
+ from rainbowneko.ckpt_manager.format import CkptFormat
5
+ from torch.serialization import FILE_LIKE
6
+
7
+ class EmbFormat(CkptFormat):
8
+ EXT = 'pt'
9
+
10
+ def save_ckpt(self, sd_model: Tuple[str, torch.Tensor], save_f: FILE_LIKE):
11
+ name, emb = sd_model
12
+ torch.save({'string_to_param':{'*':emb}, 'name':name}, save_f)
13
+
14
+ def load_ckpt(self, ckpt_f: FILE_LIKE, map_location="cpu"):
15
+ state = torch.load(ckpt_f, map_location=map_location)
16
+ if 'string_to_param' in state:
17
+ emb = state['string_to_param']['*']
18
+ else:
19
+ emb = state['emb_params']
20
+ emb.requires_grad_(False)
21
+ return emb