cache-dit 0.2.3__tar.gz → 0.2.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. {cache_dit-0.2.3 → cache_dit-0.2.4}/PKG-INFO +15 -1
  2. {cache_dit-0.2.3 → cache_dit-0.2.4}/README.md +14 -0
  3. cache_dit-0.2.4/examples/README.md +57 -0
  4. cache_dit-0.2.4/examples/run_cogvideox.py +142 -0
  5. cache_dit-0.2.4/examples/run_flux.py +96 -0
  6. cache_dit-0.2.4/examples/run_flux_fill.py +100 -0
  7. cache_dit-0.2.4/examples/run_hunyuan_video.py +145 -0
  8. cache_dit-0.2.4/examples/run_mochi.py +101 -0
  9. cache_dit-0.2.4/examples/run_wan.py +134 -0
  10. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/_version.py +2 -2
  11. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/cache_context.py +225 -40
  12. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/diffusers_adapters/wan.py +1 -1
  13. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit.egg-info/PKG-INFO +15 -1
  14. cache_dit-0.2.3/examples/README.md +0 -45
  15. cache_dit-0.2.3/examples/run_cogvideox.py +0 -72
  16. cache_dit-0.2.3/examples/run_flux.py +0 -27
  17. cache_dit-0.2.3/examples/run_flux_fill.py +0 -32
  18. cache_dit-0.2.3/examples/run_hunyuan_video.py +0 -75
  19. cache_dit-0.2.3/examples/run_mochi.py +0 -32
  20. cache_dit-0.2.3/examples/run_wan.py +0 -63
  21. {cache_dit-0.2.3 → cache_dit-0.2.4}/.github/workflows/issue.yml +0 -0
  22. {cache_dit-0.2.3 → cache_dit-0.2.4}/.gitignore +0 -0
  23. {cache_dit-0.2.3 → cache_dit-0.2.4}/.pre-commit-config.yaml +0 -0
  24. {cache_dit-0.2.3 → cache_dit-0.2.4}/CONTRIBUTE.md +0 -0
  25. {cache_dit-0.2.3 → cache_dit-0.2.4}/LICENSE +0 -0
  26. {cache_dit-0.2.3 → cache_dit-0.2.4}/MANIFEST.in +0 -0
  27. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F12B12S4_R0.2_S16.png +0 -0
  28. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F12B16S4_R0.08_S6.png +0 -0
  29. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F16B16S2_R0.2_S14.png +0 -0
  30. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F16B16S4_R0.2_S13.png +0 -0
  31. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F1B0S1_R0.08_S11.png +0 -0
  32. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F1B0S1_R0.2_S19.png +0 -0
  33. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F8B0S2_R0.12_S12.png +0 -0
  34. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F8B16S1_R0.2_S18.png +0 -0
  35. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F8B8S1_R0.08_S9.png +0 -0
  36. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F8B8S1_R0.12_S12.png +0 -0
  37. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBCACHE_F8B8S1_R0.15_S15.png +0 -0
  38. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.03_P24.0_T19.43s.png +0 -0
  39. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.04_P34.6_T16.82s.png +0 -0
  40. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.05_P38.3_T15.95s.png +0 -0
  41. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.06_P45.2_T14.24s.png +0 -0
  42. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.07_P52.3_T12.53s.png +0 -0
  43. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.08_P52.4_T12.52s.png +0 -0
  44. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.09_P59.2_T10.81s.png +0 -0
  45. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.12_P59.5_T10.76s.png +0 -0
  46. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.12_P63.0_T9.90s.png +0 -0
  47. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.1_P62.8_T9.95s.png +0 -0
  48. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.2_P59.5_T10.66s.png +0 -0
  49. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/DBPRUNE_F1B0_R0.3_P63.1_T9.79s.png +0 -0
  50. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/NONE_R0.08_S0.png +0 -0
  51. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/TEXTURE_DBCACHE_F1B0_R0.08.png +0 -0
  52. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/TEXTURE_DBCACHE_F8B12_R0.12.png +0 -0
  53. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/TEXTURE_DBCACHE_F8B16_R0.2.png +0 -0
  54. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/TEXTURE_DBCACHE_F8B20_R0.2.png +0 -0
  55. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/TEXTURE_DBCACHE_F8B8_R0.12.png +0 -0
  56. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/TEXTURE_NONE_R0.08.png +0 -0
  57. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C0_DBCACHE_F1B0S1W0T0ET0_R0.12_S14_T12.85s.png +0 -0
  58. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C0_DBCACHE_F1B0S1W0T0ET0_R0.15_S17_T10.27s.png +0 -0
  59. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C0_DBCACHE_F1B0S1W0T1ET1_R0.12_S14_T12.86s.png +0 -0
  60. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C0_DBCACHE_F1B0S1W0T1ET1_R0.15_S17_T10.28s.png +0 -0
  61. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBCACHE_F1B0S1W0T1ET1_R0.15_S17_T8.48s.png +0 -0
  62. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBPRUNE_F1B0_R0.03_P24.0_T16.25s.png +0 -0
  63. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBPRUNE_F1B0_R0.045_P38.2_T13.41s.png +0 -0
  64. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBPRUNE_F1B0_R0.04_P34.6_T14.12s.png +0 -0
  65. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBPRUNE_F1B0_R0.055_P45.1_T12.00s.png +0 -0
  66. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBPRUNE_F1B0_R0.05_P41.6_T12.70s.png +0 -0
  67. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBPRUNE_F1B0_R0.2_P59.5_T8.86s.png +0 -0
  68. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_DBPRUNE_F8B8_R0.08_P23.1_T16.14s.png +0 -0
  69. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U0_C1_NONE_R0.08_S0_T20.43s.png +0 -0
  70. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.03_P27.3_T6.62s.png +0 -0
  71. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.03_P27.3_T6.63s.png +0 -0
  72. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.045_P38.2_T5.81s.png +0 -0
  73. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.045_P38.2_T5.82s.png +0 -0
  74. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.04_P34.6_T6.06s.png +0 -0
  75. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.04_P34.6_T6.07s.png +0 -0
  76. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.04_P34.6_T6.08s.png +0 -0
  77. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.055_P45.1_T5.27s.png +0 -0
  78. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.055_P45.1_T5.28s.png +0 -0
  79. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.2_P59.5_T3.95s.png +0 -0
  80. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_DBPRUNE_F1B0_R0.2_P59.5_T3.96s.png +0 -0
  81. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_NONE_R0.08_S0_T7.78s.png +0 -0
  82. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/U4_C1_NONE_R0.08_S0_T7.79s.png +0 -0
  83. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/cache-dit-v1.png +0 -0
  84. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/dbcache-fnbn-v1.png +0 -0
  85. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/dbcache-v1.png +0 -0
  86. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/dbprune-v1.png +0 -0
  87. {cache_dit-0.2.3 → cache_dit-0.2.4}/assets/fbcache-v1.png +0 -0
  88. {cache_dit-0.2.3 → cache_dit-0.2.4}/bench/.gitignore +0 -0
  89. {cache_dit-0.2.3 → cache_dit-0.2.4}/bench/bench.py +0 -0
  90. {cache_dit-0.2.3 → cache_dit-0.2.4}/docs/.gitignore +0 -0
  91. {cache_dit-0.2.3 → cache_dit-0.2.4}/examples/.gitignore +0 -0
  92. {cache_dit-0.2.3 → cache_dit-0.2.4}/examples/data/cup.png +0 -0
  93. {cache_dit-0.2.3 → cache_dit-0.2.4}/examples/data/cup_mask.png +0 -0
  94. {cache_dit-0.2.3 → cache_dit-0.2.4}/examples/requirements.txt +0 -0
  95. {cache_dit-0.2.3 → cache_dit-0.2.4}/pyproject.toml +0 -0
  96. {cache_dit-0.2.3 → cache_dit-0.2.4}/pytest.ini +0 -0
  97. {cache_dit-0.2.3 → cache_dit-0.2.4}/requirements.txt +0 -0
  98. {cache_dit-0.2.3 → cache_dit-0.2.4}/setup.cfg +0 -0
  99. {cache_dit-0.2.3 → cache_dit-0.2.4}/setup.py +0 -0
  100. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/__init__.py +0 -0
  101. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/__init__.py +0 -0
  102. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/__init__.py +0 -0
  103. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/diffusers_adapters/__init__.py +0 -0
  104. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/diffusers_adapters/cogvideox.py +0 -0
  105. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/diffusers_adapters/flux.py +0 -0
  106. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/diffusers_adapters/hunyuan_video.py +0 -0
  107. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dual_block_cache/diffusers_adapters/mochi.py +0 -0
  108. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/__init__.py +0 -0
  109. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/__init__.py +0 -0
  110. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/cogvideox.py +0 -0
  111. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/flux.py +0 -0
  112. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/hunyuan_video.py +0 -0
  113. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/mochi.py +0 -0
  114. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/wan.py +0 -0
  115. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/dynamic_block_prune/prune_context.py +0 -0
  116. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/__init__.py +0 -0
  117. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/cache_context.py +0 -0
  118. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/diffusers_adapters/__init__.py +0 -0
  119. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/diffusers_adapters/cogvideox.py +0 -0
  120. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/diffusers_adapters/flux.py +0 -0
  121. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/diffusers_adapters/hunyuan_video.py +0 -0
  122. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/diffusers_adapters/mochi.py +0 -0
  123. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/first_block_cache/diffusers_adapters/wan.py +0 -0
  124. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/taylorseer.py +0 -0
  125. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/cache_factory/utils.py +0 -0
  126. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/compile/__init__.py +0 -0
  127. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/compile/utils.py +0 -0
  128. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/custom_ops/__init__.py +0 -0
  129. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/custom_ops/triton_taylorseer.py +0 -0
  130. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/logger.py +0 -0
  131. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit/primitives.py +0 -0
  132. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit.egg-info/SOURCES.txt +0 -0
  133. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit.egg-info/dependency_links.txt +0 -0
  134. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit.egg-info/requires.txt +0 -0
  135. {cache_dit-0.2.3 → cache_dit-0.2.4}/src/cache_dit.egg-info/top_level.txt +0 -0
  136. {cache_dit-0.2.3 → cache_dit-0.2.4}/tests/.gitignore +0 -0
  137. {cache_dit-0.2.3 → cache_dit-0.2.4}/tests/README.md +0 -0
  138. {cache_dit-0.2.3 → cache_dit-0.2.4}/tests/taylorseer_approximation_order_2.png +0 -0
  139. {cache_dit-0.2.3 → cache_dit-0.2.4}/tests/taylorseer_approximation_order_4.png +0 -0
  140. {cache_dit-0.2.3 → cache_dit-0.2.4}/tests/taylorseer_approximation_test.png +0 -0
  141. {cache_dit-0.2.3 → cache_dit-0.2.4}/tests/test_taylorseer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cache_dit
3
- Version: 0.2.3
3
+ Version: 0.2.4
4
4
  Summary: 🤗 CacheDiT: A Training-free and Easy-to-use Cache Acceleration Toolbox for Diffusion Transformers
5
5
  Author: DefTruth, vipshop.com, etc.
6
6
  Maintainer: DefTruth, vipshop.com, etc
@@ -154,6 +154,7 @@ The **CacheDiT** codebase is adapted from [FBCache](https://github.com/chengzeyi
154
154
  - [🔥Supported Models](#supported)
155
155
  - [⚡️Dual Block Cache](#dbcache)
156
156
  - [🔥Hybrid TaylorSeer](#taylorseer)
157
+ - [⚡️Hybrid Cache CFG](#cfg)
157
158
  - [🎉First Block Cache](#fbcache)
158
159
  - [⚡️Dynamic Block Prune](#dbprune)
159
160
  - [🎉Context Parallelism](#context-parallelism)
@@ -299,6 +300,19 @@ cache_options = {
299
300
  |24.85s|12.85s|12.86s|10.27s|10.28s|8.48s|
300
301
  |<img src=https://github.com/vipshop/cache-dit/raw/main/assets/NONE_R0.08_S0.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T0ET0_R0.12_S14_T12.85s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T1ET1_R0.12_S14_T12.86s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T0ET0_R0.15_S17_T10.27s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T1ET1_R0.15_S17_T10.28s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C1_DBCACHE_F1B0S1W0T1ET1_R0.15_S17_T8.48s.png width=105px>|
301
302
 
303
+ ## ⚡️Hybrid Cache CFG
304
+
305
+ <div id="cfg"></div>
306
+
307
+ CacheDiT supports caching for CFG (classifier-free guidance). For models that fuse CFG and non-CFG into a single forward step, or models that do not include CFG (classifier-free guidance) in the forward step, please set `do_separate_classifier_free_guidance` param to False. Otherwise, set it to True. Wan 2.1: True. FLUX.1, HunyunVideo, CogVideoX, Mochi: False.
308
+
309
+ ```python
310
+ cache_options = {
311
+ "do_separate_classifier_free_guidance": True, # Wan 2.1
312
+ "cfg_compute_first": False,
313
+ }
314
+ ```
315
+
302
316
  ## 🎉FBCache: First Block Cache
303
317
 
304
318
  <div id="fbcache"></div>
@@ -119,6 +119,7 @@ The **CacheDiT** codebase is adapted from [FBCache](https://github.com/chengzeyi
119
119
  - [🔥Supported Models](#supported)
120
120
  - [⚡️Dual Block Cache](#dbcache)
121
121
  - [🔥Hybrid TaylorSeer](#taylorseer)
122
+ - [⚡️Hybrid Cache CFG](#cfg)
122
123
  - [🎉First Block Cache](#fbcache)
123
124
  - [⚡️Dynamic Block Prune](#dbprune)
124
125
  - [🎉Context Parallelism](#context-parallelism)
@@ -264,6 +265,19 @@ cache_options = {
264
265
  |24.85s|12.85s|12.86s|10.27s|10.28s|8.48s|
265
266
  |<img src=https://github.com/vipshop/cache-dit/raw/main/assets/NONE_R0.08_S0.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T0ET0_R0.12_S14_T12.85s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T1ET1_R0.12_S14_T12.86s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T0ET0_R0.15_S17_T10.27s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C0_DBCACHE_F1B0S1W0T1ET1_R0.15_S17_T10.28s.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/U0_C1_DBCACHE_F1B0S1W0T1ET1_R0.15_S17_T8.48s.png width=105px>|
266
267
 
268
+ ## ⚡️Hybrid Cache CFG
269
+
270
+ <div id="cfg"></div>
271
+
272
+ CacheDiT supports caching for CFG (classifier-free guidance). For models that fuse CFG and non-CFG into a single forward step, or models that do not include CFG (classifier-free guidance) in the forward step, please set `do_separate_classifier_free_guidance` param to False. Otherwise, set it to True. Wan 2.1: True. FLUX.1, HunyunVideo, CogVideoX, Mochi: False.
273
+
274
+ ```python
275
+ cache_options = {
276
+ "do_separate_classifier_free_guidance": True, # Wan 2.1
277
+ "cfg_compute_first": False,
278
+ }
279
+ ```
280
+
267
281
  ## 🎉FBCache: First Block Cache
268
282
 
269
283
  <div id="fbcache"></div>
@@ -0,0 +1,57 @@
1
+ # Examples for CacheDiT
2
+
3
+ ## Install requirements
4
+
5
+ ```bash
6
+ pip3 install -r requirements.txt
7
+ ```
8
+
9
+ ## Run examples
10
+
11
+ - FLUX.1-dev
12
+
13
+ ```bash
14
+ python3 run_flux.py # baseline
15
+ python3 run_flux.py --cache --Fn 8 --Bn 8
16
+ python3 run_flux.py --cache --Fn 8 --Bn 0 --taylorseer
17
+ ```
18
+
19
+ - FLUX.1-Fill-dev
20
+
21
+ ```bash
22
+ python3 run_flux_fill.py # baseline
23
+ python3 run_flux_fill.py --cache --Fn 8 --Bn 8
24
+ python3 run_flux_fill.py --cache --Fn 8 --Bn 0 --taylorseer
25
+ ```
26
+
27
+ - CogVideoX
28
+
29
+ ```bash
30
+ python3 run_cogvideox.py # baseline
31
+ python3 run_cogvideox.py --cache --Fn 8 --Bn 8
32
+ python3 run_cogvideox.py --cache --Fn 8 --Bn 0 --taylorseer
33
+ ```
34
+
35
+ - Wan2.1
36
+
37
+ ```bash
38
+ python3 run_wan.py # baseline
39
+ python3 run_wan.py --cache --Fn 8 --Bn 8
40
+ python3 run_wan.py --cache --Fn 8 --Bn 0 --taylorseer
41
+ ```
42
+
43
+ - Mochi
44
+
45
+ ```bash
46
+ python3 run_mochi.py # baseline
47
+ python3 run_mochi.py --cache --Fn 8 --Bn 8
48
+ python3 run_mochi.py --cache --Fn 8 --Bn 0 --taylorseer
49
+ ```
50
+
51
+ - HunyuanVideo
52
+
53
+ ```bash
54
+ python3 run_hunyuan_video.py # baseline
55
+ python3 run_hunyuan_video.py --cache --Fn 8 --Bn 8
56
+ python3 run_hunyuan_video.py --cache --Fn 8 --Bn 0 --taylorseer
57
+ ```
@@ -0,0 +1,142 @@
1
+ import os
2
+ import time
3
+ import torch
4
+ import argparse
5
+ from diffusers.utils import export_to_video
6
+ from diffusers import CogVideoXPipeline, AutoencoderKLCogVideoX
7
+ from cache_dit.cache_factory import apply_cache_on_pipe, CacheType
8
+
9
+
10
+ def get_args() -> argparse.ArgumentParser:
11
+ parser = argparse.ArgumentParser()
12
+ # General arguments
13
+ parser.add_argument("--cache", action="store_true", default=False)
14
+ parser.add_argument("--taylorseer", action="store_true", default=False)
15
+ parser.add_argument("--taylorseer-order", "--order", type=int, default=2)
16
+ parser.add_argument("--Fn-compute-blocks", "--Fn", type=int, default=1)
17
+ parser.add_argument("--Bn-compute-blocks", "--Bn", type=int, default=0)
18
+ parser.add_argument("--rdt", type=float, default=0.08)
19
+ parser.add_argument("--warmup-steps", type=int, default=0)
20
+ return parser.parse_args()
21
+
22
+
23
+ args = get_args()
24
+ print(args)
25
+
26
+
27
+ model_id = os.environ.get("COGVIDEOX_DIR", "THUDM/CogVideoX-5b")
28
+
29
+
30
+ def is_cogvideox_1_5():
31
+ return "CogVideoX1.5" in model_id or "THUDM/CogVideoX1.5" in model_id
32
+
33
+
34
+ def get_gpu_memory_in_gib():
35
+ if not torch.cuda.is_available():
36
+ return 0
37
+
38
+ try:
39
+ total_memory_bytes = torch.cuda.get_device_properties(
40
+ torch.cuda.current_device(),
41
+ ).total_memory
42
+ total_memory_gib = total_memory_bytes / (1024**3)
43
+ return int(total_memory_gib)
44
+ except Exception:
45
+ return 0
46
+
47
+
48
+ pipe = CogVideoXPipeline.from_pretrained(
49
+ model_id,
50
+ torch_dtype=torch.bfloat16,
51
+ ).to("cuda")
52
+
53
+
54
+ if args.cache:
55
+ cache_options = {
56
+ "cache_type": CacheType.DBCache,
57
+ "warmup_steps": args.warmup_steps,
58
+ "max_cached_steps": -1, # -1 means no limit
59
+ # Fn=1, Bn=0, means FB Cache, otherwise, Dual Block Cache
60
+ "Fn_compute_blocks": args.Fn_compute_blocks, # Fn, F8, etc.
61
+ "Bn_compute_blocks": args.Bn_compute_blocks, # Bn, B16, etc.
62
+ "residual_diff_threshold": args.rdt,
63
+ # releative token diff threshold, default is 0.0
64
+ "important_condition_threshold": 0.05,
65
+ # CFG: classifier free guidance or not
66
+ # CogVideoX fused CFG and non-CFG into single forward step
67
+ # so, we set do_separate_classifier_free_guidance as False.
68
+ "do_separate_classifier_free_guidance": False,
69
+ "cfg_compute_first": False,
70
+ "enable_taylorseer": args.taylorseer,
71
+ "enable_encoder_taylorseer": args.taylorseer,
72
+ # Taylorseer cache type cache be hidden_states or residual
73
+ "taylorseer_cache_type": "residual",
74
+ "taylorseer_kwargs": {
75
+ "n_derivatives": args.taylorseer_order,
76
+ },
77
+ }
78
+ cache_type_str = "DBCACHE"
79
+ cache_type_str = (
80
+ f"{cache_type_str}_F{args.Fn_compute_blocks}"
81
+ f"B{args.Bn_compute_blocks}W{args.warmup_steps}"
82
+ f"T{int(args.taylorseer)}O{args.taylorseer_order}"
83
+ )
84
+ print(f"cache options:\n{cache_options}")
85
+
86
+ apply_cache_on_pipe(pipe, **cache_options)
87
+ else:
88
+ cache_type_str = "NONE"
89
+
90
+
91
+ pipe.enable_model_cpu_offload()
92
+ assert isinstance(pipe.vae, AutoencoderKLCogVideoX) # enable type check for IDE
93
+ pipe.vae.enable_slicing()
94
+ pipe.vae.enable_tiling()
95
+
96
+ start = time.time()
97
+ prompt = (
98
+ "A panda, dressed in a small, red jacket and a tiny hat, "
99
+ "sits on a wooden stool in a serene bamboo forest. The "
100
+ "panda's fluffy paws strum a miniature acoustic guitar, "
101
+ "producing soft, melodic tunes. Nearby, a few other pandas "
102
+ "gather, watching curiously and some clapping in rhythm. "
103
+ "Sunlight filters through the tall bamboo, casting a gentle "
104
+ "glow on the scene. The panda's face is expressive, showing "
105
+ "concentration and joy as it plays. The background includes "
106
+ "a small, flowing stream and vibrant green foliage, enhancing "
107
+ "the peaceful and magical atmosphere of this unique musical "
108
+ "performance."
109
+ )
110
+ video = pipe(
111
+ prompt=prompt,
112
+ num_videos_per_prompt=1,
113
+ num_inference_steps=50,
114
+ num_frames=(
115
+ # Avoid OOM for CogVideoX1.5 model on 48GB GPU
116
+ 16
117
+ if (is_cogvideox_1_5() and get_gpu_memory_in_gib() < 48)
118
+ else 49
119
+ ),
120
+ guidance_scale=6,
121
+ generator=torch.Generator("cpu").manual_seed(0),
122
+ ).frames[0]
123
+ end = time.time()
124
+
125
+ if hasattr(pipe.transformer, "_cached_steps"):
126
+ cached_steps = pipe.transformer._cached_steps
127
+ residual_diffs = pipe.transformer._residual_diffs
128
+ print(f"Cache Steps: {len(cached_steps)}, {cached_steps}")
129
+ print(f"Residual Diffs: {len(residual_diffs)}, {residual_diffs}")
130
+ if hasattr(pipe.transformer, "_cfg_cached_steps"):
131
+ cfg_cached_steps = pipe.transformer._cfg_cached_steps
132
+ cfg_residual_diffs = pipe.transformer._cfg_residual_diffs
133
+ print(f"CFG Cache Steps: {len(cfg_cached_steps)}, {cfg_cached_steps} ")
134
+ print(
135
+ f"CFG Residual Diffs: {len(cfg_residual_diffs)}, {cfg_residual_diffs}"
136
+ )
137
+
138
+ time_cost = end - start
139
+ save_path = f"cogvideox.{cache_type_str}.mp4"
140
+ print(f"Time cost: {time_cost:.2f}s")
141
+ print(f"Saving video to {save_path}")
142
+ export_to_video(video, save_path, fps=8)
@@ -0,0 +1,96 @@
1
+ import os
2
+ import time
3
+ import torch
4
+ import argparse
5
+ from diffusers import FluxPipeline
6
+ from cache_dit.cache_factory import apply_cache_on_pipe, CacheType
7
+
8
+
9
+ def get_args() -> argparse.ArgumentParser:
10
+ parser = argparse.ArgumentParser()
11
+ # General arguments
12
+ parser.add_argument("--cache", action="store_true", default=False)
13
+ parser.add_argument("--taylorseer", action="store_true", default=False)
14
+ parser.add_argument("--taylorseer-order", "--order", type=int, default=2)
15
+ parser.add_argument("--Fn-compute-blocks", "--Fn", type=int, default=1)
16
+ parser.add_argument("--Bn-compute-blocks", "--Bn", type=int, default=0)
17
+ parser.add_argument("--rdt", type=float, default=0.08)
18
+ parser.add_argument("--warmup-steps", type=int, default=0)
19
+ return parser.parse_args()
20
+
21
+
22
+ args = get_args()
23
+ print(args)
24
+
25
+
26
+ pipe = FluxPipeline.from_pretrained(
27
+ os.environ.get(
28
+ "FLUX_DIR",
29
+ "black-forest-labs/FLUX.1-dev",
30
+ ),
31
+ torch_dtype=torch.bfloat16,
32
+ ).to("cuda")
33
+
34
+
35
+ if args.cache:
36
+ cache_options = {
37
+ "cache_type": CacheType.DBCache,
38
+ "warmup_steps": args.warmup_steps,
39
+ "max_cached_steps": -1, # -1 means no limit
40
+ # Fn=1, Bn=0, means FB Cache, otherwise, Dual Block Cache
41
+ "Fn_compute_blocks": args.Fn_compute_blocks, # Fn, F8, etc.
42
+ "Bn_compute_blocks": args.Bn_compute_blocks, # Bn, B16, etc.
43
+ "residual_diff_threshold": args.rdt,
44
+ # CFG: classifier free guidance or not
45
+ # FLUX.1 dev don not have CFG, so, we set
46
+ # do_separate_classifier_free_guidance as False.
47
+ "do_separate_classifier_free_guidance": False,
48
+ "cfg_compute_first": False,
49
+ "enable_taylorseer": args.taylorseer,
50
+ "enable_encoder_taylorseer": args.taylorseer,
51
+ # Taylorseer cache type cache be hidden_states or residual
52
+ "taylorseer_cache_type": "residual",
53
+ "taylorseer_kwargs": {
54
+ "n_derivatives": args.taylorseer_order,
55
+ },
56
+ }
57
+ cache_type_str = "DBCACHE"
58
+ cache_type_str = (
59
+ f"{cache_type_str}_F{args.Fn_compute_blocks}"
60
+ f"B{args.Bn_compute_blocks}W{args.warmup_steps}"
61
+ f"T{int(args.taylorseer)}O{args.taylorseer_order}"
62
+ )
63
+ print(f"cache options:\n{cache_options}")
64
+
65
+ apply_cache_on_pipe(pipe, **cache_options)
66
+ else:
67
+ cache_type_str = "NONE"
68
+
69
+
70
+ start = time.time()
71
+ image = pipe(
72
+ "A cat holding a sign that says hello world",
73
+ num_inference_steps=28,
74
+ generator=torch.Generator("cpu").manual_seed(0),
75
+ ).images[0]
76
+
77
+ end = time.time()
78
+
79
+ if hasattr(pipe.transformer, "_cached_steps"):
80
+ cached_steps = pipe.transformer._cached_steps
81
+ residual_diffs = pipe.transformer._residual_diffs
82
+ print(f"Cache Steps: {len(cached_steps)}, {cached_steps}")
83
+ print(f"Residual Diffs: {len(residual_diffs)}, {residual_diffs}")
84
+ if hasattr(pipe.transformer, "_cfg_cached_steps"):
85
+ cfg_cached_steps = pipe.transformer._cfg_cached_steps
86
+ cfg_residual_diffs = pipe.transformer._cfg_residual_diffs
87
+ print(f"CFG Cache Steps: {len(cfg_cached_steps)}, {cfg_cached_steps} ")
88
+ print(
89
+ f"CFG Residual Diffs: {len(cfg_residual_diffs)}, {cfg_residual_diffs}"
90
+ )
91
+
92
+ time_cost = end - start
93
+ save_path = f"flux.{cache_type_str}.png"
94
+ print(f"Time cost: {time_cost:.2f}s")
95
+ print(f"Saving image to {save_path}")
96
+ image.save(save_path)
@@ -0,0 +1,100 @@
1
+ import os
2
+ import time
3
+ import torch
4
+ import argparse
5
+ from diffusers import FluxFillPipeline
6
+ from diffusers.utils import load_image
7
+ from cache_dit.cache_factory import apply_cache_on_pipe, CacheType
8
+
9
+
10
+ def get_args() -> argparse.ArgumentParser:
11
+ parser = argparse.ArgumentParser()
12
+ # General arguments
13
+ parser.add_argument("--cache", action="store_true", default=False)
14
+ parser.add_argument("--taylorseer", action="store_true", default=False)
15
+ parser.add_argument("--taylorseer-order", "--order", type=int, default=2)
16
+ parser.add_argument("--Fn-compute-blocks", "--Fn", type=int, default=1)
17
+ parser.add_argument("--Bn-compute-blocks", "--Bn", type=int, default=0)
18
+ parser.add_argument("--rdt", type=float, default=0.08)
19
+ parser.add_argument("--warmup-steps", type=int, default=0)
20
+ return parser.parse_args()
21
+
22
+
23
+ args = get_args()
24
+ print(args)
25
+
26
+
27
+ pipe = FluxFillPipeline.from_pretrained(
28
+ os.environ.get(
29
+ "FLUX_FILL_DIR",
30
+ "black-forest-labs/FLUX.1-Fill-dev",
31
+ ),
32
+ torch_dtype=torch.bfloat16,
33
+ ).to("cuda")
34
+
35
+
36
+ if args.cache:
37
+ cache_options = {
38
+ "cache_type": CacheType.DBCache,
39
+ "warmup_steps": args.warmup_steps,
40
+ "max_cached_steps": -1, # -1 means no limit
41
+ # Fn=1, Bn=0, means FB Cache, otherwise, Dual Block Cache
42
+ "Fn_compute_blocks": args.Fn_compute_blocks, # Fn, F8, etc.
43
+ "Bn_compute_blocks": args.Bn_compute_blocks, # Bn, B16, etc.
44
+ "residual_diff_threshold": args.rdt,
45
+ # CFG: classifier free guidance or not
46
+ # FLUX.1 dev don not have CFG, so, we set
47
+ # do_separate_classifier_free_guidance as False.
48
+ "do_separate_classifier_free_guidance": False,
49
+ "cfg_compute_first": False,
50
+ "enable_taylorseer": args.taylorseer,
51
+ "enable_encoder_taylorseer": args.taylorseer,
52
+ # Taylorseer cache type cache be hidden_states or residual
53
+ "taylorseer_cache_type": "residual",
54
+ "taylorseer_kwargs": {
55
+ "n_derivatives": args.taylorseer_order,
56
+ },
57
+ }
58
+ cache_type_str = "DBCACHE"
59
+ cache_type_str = (
60
+ f"{cache_type_str}_F{args.Fn_compute_blocks}"
61
+ f"B{args.Bn_compute_blocks}W{args.warmup_steps}"
62
+ f"T{int(args.taylorseer)}O{args.taylorseer_order}"
63
+ )
64
+ print(f"cache options:\n{cache_options}")
65
+
66
+ apply_cache_on_pipe(pipe, **cache_options)
67
+ else:
68
+ cache_type_str = "NONE"
69
+
70
+ start = time.time()
71
+ image = pipe(
72
+ prompt="a white paper cup",
73
+ image=load_image("data/cup.png"),
74
+ mask_image=load_image("data/cup_mask.png"),
75
+ guidance_scale=30,
76
+ num_inference_steps=28,
77
+ max_sequence_length=512,
78
+ generator=torch.Generator("cpu").manual_seed(0),
79
+ ).images[0]
80
+
81
+ end = time.time()
82
+
83
+ if hasattr(pipe.transformer, "_cached_steps"):
84
+ cached_steps = pipe.transformer._cached_steps
85
+ residual_diffs = pipe.transformer._residual_diffs
86
+ print(f"Cache Steps: {len(cached_steps)}, {cached_steps}")
87
+ print(f"Residual Diffs: {len(residual_diffs)}, {residual_diffs}")
88
+ if hasattr(pipe.transformer, "_cfg_cached_steps"):
89
+ cfg_cached_steps = pipe.transformer._cfg_cached_steps
90
+ cfg_residual_diffs = pipe.transformer._cfg_residual_diffs
91
+ print(f"CFG Cache Steps: {len(cfg_cached_steps)}, {cfg_cached_steps} ")
92
+ print(
93
+ f"CFG Residual Diffs: {len(cfg_residual_diffs)}, {cfg_residual_diffs}"
94
+ )
95
+
96
+ time_cost = end - start
97
+ save_path = f"flux-fill.{cache_type_str}.png"
98
+ print(f"Time cost: {time_cost:.2f}s")
99
+ print(f"Saving image to {save_path}")
100
+ image.save(save_path)
@@ -0,0 +1,145 @@
1
+ # Adapted from: https://github.com/chengzeyi/ParaAttention/blob/main/first_block_cache_examples/run_hunyuan_video.py
2
+ import os
3
+ import time
4
+ import torch
5
+ import argparse
6
+ from diffusers.utils import export_to_video
7
+ from diffusers import (
8
+ HunyuanVideoPipeline,
9
+ HunyuanVideoTransformer3DModel,
10
+ AutoencoderKLHunyuanVideo,
11
+ )
12
+ from cache_dit.cache_factory import apply_cache_on_pipe, CacheType
13
+
14
+
15
+ def get_args() -> argparse.ArgumentParser:
16
+ parser = argparse.ArgumentParser()
17
+ # General arguments
18
+ parser.add_argument("--cache", action="store_true", default=False)
19
+ parser.add_argument("--taylorseer", action="store_true", default=False)
20
+ parser.add_argument("--taylorseer-order", "--order", type=int, default=2)
21
+ parser.add_argument("--Fn-compute-blocks", "--Fn", type=int, default=1)
22
+ parser.add_argument("--Bn-compute-blocks", "--Bn", type=int, default=0)
23
+ parser.add_argument("--rdt", type=float, default=0.08)
24
+ parser.add_argument("--warmup-steps", type=int, default=0)
25
+ return parser.parse_args()
26
+
27
+
28
+ args = get_args()
29
+ print(args)
30
+
31
+
32
+ def get_gpu_memory_in_gib():
33
+ if not torch.cuda.is_available():
34
+ return 0
35
+
36
+ try:
37
+ total_memory_bytes = torch.cuda.get_device_properties(
38
+ torch.cuda.current_device(),
39
+ ).total_memory
40
+ total_memory_gib = total_memory_bytes / (1024**3)
41
+ return int(total_memory_gib)
42
+ except Exception:
43
+ return 0
44
+
45
+
46
+ model_id = os.environ.get("HUNYUAN_DIR", "tencent/HunyuanVideo")
47
+ transformer = HunyuanVideoTransformer3DModel.from_pretrained(
48
+ model_id,
49
+ subfolder="transformer",
50
+ torch_dtype=torch.bfloat16,
51
+ revision="refs/pr/18",
52
+ )
53
+ pipe = HunyuanVideoPipeline.from_pretrained(
54
+ model_id,
55
+ transformer=transformer,
56
+ torch_dtype=torch.float16,
57
+ revision="refs/pr/18",
58
+ ).to("cuda")
59
+
60
+
61
+ if args.cache:
62
+ cache_options = {
63
+ "cache_type": CacheType.DBCache,
64
+ "warmup_steps": args.warmup_steps,
65
+ "max_cached_steps": -1, # -1 means no limit
66
+ # Fn=1, Bn=0, means FB Cache, otherwise, Dual Block Cache
67
+ "Fn_compute_blocks": args.Fn_compute_blocks, # Fn, F8, etc.
68
+ "Bn_compute_blocks": args.Bn_compute_blocks, # Bn, B16, etc.
69
+ "residual_diff_threshold": args.rdt,
70
+ # CFG: classifier free guidance or not
71
+ # For model that fused CFG and non-CFG into single forward step,
72
+ # should set do_separate_classifier_free_guidance as False.
73
+ # NOTE: set it as True if true_cfg_scale > 1 and has_neg_prompt
74
+ # for HunyuanVideoPipeline.
75
+ "do_separate_classifier_free_guidance": False,
76
+ "cfg_compute_first": False,
77
+ "enable_taylorseer": args.taylorseer,
78
+ "enable_encoder_taylorseer": args.taylorseer,
79
+ # Taylorseer cache type cache be hidden_states or residual
80
+ "taylorseer_cache_type": "residual",
81
+ "taylorseer_kwargs": {
82
+ "n_derivatives": args.taylorseer_order,
83
+ },
84
+ }
85
+ cache_type_str = "DBCACHE"
86
+ cache_type_str = (
87
+ f"{cache_type_str}_F{args.Fn_compute_blocks}"
88
+ f"B{args.Bn_compute_blocks}W{args.warmup_steps}"
89
+ f"T{int(args.taylorseer)}O{args.taylorseer_order}"
90
+ )
91
+ print(f"cache options:\n{cache_options}")
92
+
93
+ apply_cache_on_pipe(pipe, **cache_options)
94
+ else:
95
+ cache_type_str = "NONE"
96
+
97
+ assert isinstance(
98
+ pipe.vae, AutoencoderKLHunyuanVideo
99
+ ) # enable type check for IDE
100
+
101
+ # Enable memory savings
102
+ pipe.enable_model_cpu_offload()
103
+ if get_gpu_memory_in_gib() <= 48:
104
+ pipe.vae.enable_tiling(
105
+ # Make it runnable on GPUs with 48GB memory
106
+ tile_sample_min_height=128,
107
+ tile_sample_stride_height=96,
108
+ tile_sample_min_width=128,
109
+ tile_sample_stride_width=96,
110
+ tile_sample_min_num_frames=32,
111
+ tile_sample_stride_num_frames=24,
112
+ )
113
+ else:
114
+ pipe.vae.enable_tiling()
115
+
116
+
117
+ start = time.time()
118
+ output = pipe(
119
+ prompt="A cat walks on the grass, realistic",
120
+ height=720,
121
+ width=1280,
122
+ num_frames=129,
123
+ num_inference_steps=30,
124
+ generator=torch.Generator("cpu").manual_seed(0),
125
+ ).frames[0]
126
+ end = time.time()
127
+
128
+ if hasattr(pipe.transformer, "_cached_steps"):
129
+ cached_steps = pipe.transformer._cached_steps
130
+ residual_diffs = pipe.transformer._residual_diffs
131
+ print(f"Cache Steps: {len(cached_steps)}, {cached_steps}")
132
+ print(f"Residual Diffs: {len(residual_diffs)}, {residual_diffs}")
133
+ if hasattr(pipe.transformer, "_cfg_cached_steps"):
134
+ cfg_cached_steps = pipe.transformer._cfg_cached_steps
135
+ cfg_residual_diffs = pipe.transformer._cfg_residual_diffs
136
+ print(f"CFG Cache Steps: {len(cfg_cached_steps)}, {cfg_cached_steps} ")
137
+ print(
138
+ f"CFG Residual Diffs: {len(cfg_residual_diffs)}, {cfg_residual_diffs}"
139
+ )
140
+
141
+ time_cost = end - start
142
+ save_path = f"hunyuan_video.{cache_type_str}.mp4"
143
+ print(f"Time cost: {time_cost:.2f}s")
144
+ print(f"Saving video to {save_path}")
145
+ export_to_video(output, save_path, fps=15)