diffsynth 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. diffsynth/__init__.py +6 -0
  2. diffsynth/configs/__init__.py +0 -0
  3. diffsynth/configs/model_config.py +243 -0
  4. diffsynth/controlnets/__init__.py +2 -0
  5. diffsynth/controlnets/controlnet_unit.py +53 -0
  6. diffsynth/controlnets/processors.py +51 -0
  7. diffsynth/data/__init__.py +1 -0
  8. diffsynth/data/simple_text_image.py +35 -0
  9. diffsynth/data/video.py +148 -0
  10. diffsynth/extensions/ESRGAN/__init__.py +118 -0
  11. diffsynth/extensions/FastBlend/__init__.py +63 -0
  12. diffsynth/extensions/FastBlend/api.py +397 -0
  13. diffsynth/extensions/FastBlend/cupy_kernels.py +119 -0
  14. diffsynth/extensions/FastBlend/data.py +146 -0
  15. diffsynth/extensions/FastBlend/patch_match.py +298 -0
  16. diffsynth/extensions/FastBlend/runners/__init__.py +4 -0
  17. diffsynth/extensions/FastBlend/runners/accurate.py +35 -0
  18. diffsynth/extensions/FastBlend/runners/balanced.py +46 -0
  19. diffsynth/extensions/FastBlend/runners/fast.py +141 -0
  20. diffsynth/extensions/FastBlend/runners/interpolation.py +121 -0
  21. diffsynth/extensions/RIFE/__init__.py +242 -0
  22. diffsynth/extensions/__init__.py +0 -0
  23. diffsynth/models/__init__.py +1 -0
  24. diffsynth/models/attention.py +89 -0
  25. diffsynth/models/downloader.py +66 -0
  26. diffsynth/models/hunyuan_dit.py +451 -0
  27. diffsynth/models/hunyuan_dit_text_encoder.py +163 -0
  28. diffsynth/models/kolors_text_encoder.py +1363 -0
  29. diffsynth/models/lora.py +195 -0
  30. diffsynth/models/model_manager.py +536 -0
  31. diffsynth/models/sd3_dit.py +798 -0
  32. diffsynth/models/sd3_text_encoder.py +1107 -0
  33. diffsynth/models/sd3_vae_decoder.py +81 -0
  34. diffsynth/models/sd3_vae_encoder.py +95 -0
  35. diffsynth/models/sd_controlnet.py +588 -0
  36. diffsynth/models/sd_ipadapter.py +57 -0
  37. diffsynth/models/sd_motion.py +199 -0
  38. diffsynth/models/sd_text_encoder.py +321 -0
  39. diffsynth/models/sd_unet.py +1108 -0
  40. diffsynth/models/sd_vae_decoder.py +336 -0
  41. diffsynth/models/sd_vae_encoder.py +282 -0
  42. diffsynth/models/sdxl_ipadapter.py +122 -0
  43. diffsynth/models/sdxl_motion.py +104 -0
  44. diffsynth/models/sdxl_text_encoder.py +759 -0
  45. diffsynth/models/sdxl_unet.py +1899 -0
  46. diffsynth/models/sdxl_vae_decoder.py +24 -0
  47. diffsynth/models/sdxl_vae_encoder.py +24 -0
  48. diffsynth/models/svd_image_encoder.py +505 -0
  49. diffsynth/models/svd_unet.py +2004 -0
  50. diffsynth/models/svd_vae_decoder.py +578 -0
  51. diffsynth/models/svd_vae_encoder.py +139 -0
  52. diffsynth/models/tiler.py +106 -0
  53. diffsynth/pipelines/__init__.py +9 -0
  54. diffsynth/pipelines/base.py +34 -0
  55. diffsynth/pipelines/dancer.py +178 -0
  56. diffsynth/pipelines/hunyuan_image.py +274 -0
  57. diffsynth/pipelines/pipeline_runner.py +105 -0
  58. diffsynth/pipelines/sd3_image.py +132 -0
  59. diffsynth/pipelines/sd_image.py +173 -0
  60. diffsynth/pipelines/sd_video.py +266 -0
  61. diffsynth/pipelines/sdxl_image.py +191 -0
  62. diffsynth/pipelines/sdxl_video.py +223 -0
  63. diffsynth/pipelines/svd_video.py +297 -0
  64. diffsynth/processors/FastBlend.py +142 -0
  65. diffsynth/processors/PILEditor.py +28 -0
  66. diffsynth/processors/RIFE.py +77 -0
  67. diffsynth/processors/__init__.py +0 -0
  68. diffsynth/processors/base.py +6 -0
  69. diffsynth/processors/sequencial_processor.py +41 -0
  70. diffsynth/prompters/__init__.py +6 -0
  71. diffsynth/prompters/base_prompter.py +57 -0
  72. diffsynth/prompters/hunyuan_dit_prompter.py +69 -0
  73. diffsynth/prompters/kolors_prompter.py +353 -0
  74. diffsynth/prompters/prompt_refiners.py +77 -0
  75. diffsynth/prompters/sd3_prompter.py +92 -0
  76. diffsynth/prompters/sd_prompter.py +73 -0
  77. diffsynth/prompters/sdxl_prompter.py +61 -0
  78. diffsynth/schedulers/__init__.py +3 -0
  79. diffsynth/schedulers/continuous_ode.py +59 -0
  80. diffsynth/schedulers/ddim.py +79 -0
  81. diffsynth/schedulers/flow_match.py +51 -0
  82. diffsynth/tokenizer_configs/__init__.py +0 -0
  83. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/special_tokens_map.json +7 -0
  84. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/tokenizer_config.json +16 -0
  85. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab.txt +47020 -0
  86. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab_org.txt +21128 -0
  87. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/config.json +28 -0
  88. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/special_tokens_map.json +1 -0
  89. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/spiece.model +0 -0
  90. diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/tokenizer_config.json +1 -0
  91. diffsynth/tokenizer_configs/kolors/tokenizer/tokenizer.model +0 -0
  92. diffsynth/tokenizer_configs/kolors/tokenizer/tokenizer_config.json +12 -0
  93. diffsynth/tokenizer_configs/kolors/tokenizer/vocab.txt +0 -0
  94. diffsynth/tokenizer_configs/stable_diffusion/tokenizer/merges.txt +48895 -0
  95. diffsynth/tokenizer_configs/stable_diffusion/tokenizer/special_tokens_map.json +24 -0
  96. diffsynth/tokenizer_configs/stable_diffusion/tokenizer/tokenizer_config.json +34 -0
  97. diffsynth/tokenizer_configs/stable_diffusion/tokenizer/vocab.json +49410 -0
  98. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/merges.txt +48895 -0
  99. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/special_tokens_map.json +30 -0
  100. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/tokenizer_config.json +30 -0
  101. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_1/vocab.json +49410 -0
  102. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/merges.txt +48895 -0
  103. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/special_tokens_map.json +30 -0
  104. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/tokenizer_config.json +38 -0
  105. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_2/vocab.json +49410 -0
  106. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/special_tokens_map.json +125 -0
  107. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/spiece.model +0 -0
  108. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/tokenizer.json +129428 -0
  109. diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/tokenizer_config.json +940 -0
  110. diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/merges.txt +40213 -0
  111. diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/special_tokens_map.json +24 -0
  112. diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/tokenizer_config.json +38 -0
  113. diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/vocab.json +49411 -0
  114. diffsynth/trainers/__init__.py +0 -0
  115. diffsynth/trainers/text_to_image.py +253 -0
  116. diffsynth-1.0.0.dist-info/LICENSE +201 -0
  117. diffsynth-1.0.0.dist-info/METADATA +23 -0
  118. diffsynth-1.0.0.dist-info/RECORD +120 -0
  119. diffsynth-1.0.0.dist-info/WHEEL +5 -0
  120. diffsynth-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1108 @@
1
+ import torch, math
2
+ from .attention import Attention
3
+ from .tiler import TileWorker
4
+
5
+
6
+ class Timesteps(torch.nn.Module):
7
+ def __init__(self, num_channels):
8
+ super().__init__()
9
+ self.num_channels = num_channels
10
+
11
+ def forward(self, timesteps):
12
+ half_dim = self.num_channels // 2
13
+ exponent = -math.log(10000) * torch.arange(start=0, end=half_dim, dtype=torch.float32, device=timesteps.device) / half_dim
14
+ timesteps = timesteps.unsqueeze(-1)
15
+ emb = timesteps.float() * torch.exp(exponent)
16
+ emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=-1)
17
+ return emb
18
+
19
+
20
+ class GEGLU(torch.nn.Module):
21
+
22
+ def __init__(self, dim_in, dim_out):
23
+ super().__init__()
24
+ self.proj = torch.nn.Linear(dim_in, dim_out * 2)
25
+
26
+ def forward(self, hidden_states):
27
+ hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
28
+ return hidden_states * torch.nn.functional.gelu(gate)
29
+
30
+
31
+ class BasicTransformerBlock(torch.nn.Module):
32
+
33
+ def __init__(self, dim, num_attention_heads, attention_head_dim, cross_attention_dim):
34
+ super().__init__()
35
+
36
+ # 1. Self-Attn
37
+ self.norm1 = torch.nn.LayerNorm(dim, elementwise_affine=True)
38
+ self.attn1 = Attention(q_dim=dim, num_heads=num_attention_heads, head_dim=attention_head_dim, bias_out=True)
39
+
40
+ # 2. Cross-Attn
41
+ self.norm2 = torch.nn.LayerNorm(dim, elementwise_affine=True)
42
+ self.attn2 = Attention(q_dim=dim, kv_dim=cross_attention_dim, num_heads=num_attention_heads, head_dim=attention_head_dim, bias_out=True)
43
+
44
+ # 3. Feed-forward
45
+ self.norm3 = torch.nn.LayerNorm(dim, elementwise_affine=True)
46
+ self.act_fn = GEGLU(dim, dim * 4)
47
+ self.ff = torch.nn.Linear(dim * 4, dim)
48
+
49
+
50
+ def forward(self, hidden_states, encoder_hidden_states, ipadapter_kwargs=None):
51
+ # 1. Self-Attention
52
+ norm_hidden_states = self.norm1(hidden_states)
53
+ attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
54
+ hidden_states = attn_output + hidden_states
55
+
56
+ # 2. Cross-Attention
57
+ norm_hidden_states = self.norm2(hidden_states)
58
+ attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, ipadapter_kwargs=ipadapter_kwargs)
59
+ hidden_states = attn_output + hidden_states
60
+
61
+ # 3. Feed-forward
62
+ norm_hidden_states = self.norm3(hidden_states)
63
+ ff_output = self.act_fn(norm_hidden_states)
64
+ ff_output = self.ff(ff_output)
65
+ hidden_states = ff_output + hidden_states
66
+
67
+ return hidden_states
68
+
69
+
70
+ class DownSampler(torch.nn.Module):
71
+ def __init__(self, channels, padding=1, extra_padding=False):
72
+ super().__init__()
73
+ self.conv = torch.nn.Conv2d(channels, channels, 3, stride=2, padding=padding)
74
+ self.extra_padding = extra_padding
75
+
76
+ def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
77
+ if self.extra_padding:
78
+ hidden_states = torch.nn.functional.pad(hidden_states, (0, 1, 0, 1), mode="constant", value=0)
79
+ hidden_states = self.conv(hidden_states)
80
+ return hidden_states, time_emb, text_emb, res_stack
81
+
82
+
83
+ class UpSampler(torch.nn.Module):
84
+ def __init__(self, channels):
85
+ super().__init__()
86
+ self.conv = torch.nn.Conv2d(channels, channels, 3, padding=1)
87
+
88
+ def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
89
+ hidden_states = torch.nn.functional.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
90
+ hidden_states = self.conv(hidden_states)
91
+ return hidden_states, time_emb, text_emb, res_stack
92
+
93
+
94
+ class ResnetBlock(torch.nn.Module):
95
+ def __init__(self, in_channels, out_channels, temb_channels=None, groups=32, eps=1e-5):
96
+ super().__init__()
97
+ self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
98
+ self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
99
+ if temb_channels is not None:
100
+ self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels)
101
+ self.norm2 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True)
102
+ self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
103
+ self.nonlinearity = torch.nn.SiLU()
104
+ self.conv_shortcut = None
105
+ if in_channels != out_channels:
106
+ self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=True)
107
+
108
+ def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
109
+ x = hidden_states
110
+ x = self.norm1(x)
111
+ x = self.nonlinearity(x)
112
+ x = self.conv1(x)
113
+ if time_emb is not None:
114
+ emb = self.nonlinearity(time_emb)
115
+ emb = self.time_emb_proj(emb)[:, :, None, None]
116
+ x = x + emb
117
+ x = self.norm2(x)
118
+ x = self.nonlinearity(x)
119
+ x = self.conv2(x)
120
+ if self.conv_shortcut is not None:
121
+ hidden_states = self.conv_shortcut(hidden_states)
122
+ hidden_states = hidden_states + x
123
+ return hidden_states, time_emb, text_emb, res_stack
124
+
125
+
126
+ class AttentionBlock(torch.nn.Module):
127
+
128
+ def __init__(self, num_attention_heads, attention_head_dim, in_channels, num_layers=1, cross_attention_dim=None, norm_num_groups=32, eps=1e-5, need_proj_out=True):
129
+ super().__init__()
130
+ inner_dim = num_attention_heads * attention_head_dim
131
+
132
+ self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=eps, affine=True)
133
+ self.proj_in = torch.nn.Linear(in_channels, inner_dim)
134
+
135
+ self.transformer_blocks = torch.nn.ModuleList([
136
+ BasicTransformerBlock(
137
+ inner_dim,
138
+ num_attention_heads,
139
+ attention_head_dim,
140
+ cross_attention_dim=cross_attention_dim
141
+ )
142
+ for d in range(num_layers)
143
+ ])
144
+ self.need_proj_out = need_proj_out
145
+ if need_proj_out:
146
+ self.proj_out = torch.nn.Linear(inner_dim, in_channels)
147
+
148
+ def forward(
149
+ self,
150
+ hidden_states, time_emb, text_emb, res_stack,
151
+ cross_frame_attention=False,
152
+ tiled=False, tile_size=64, tile_stride=32,
153
+ ipadapter_kwargs_list={},
154
+ **kwargs
155
+ ):
156
+ batch, _, height, width = hidden_states.shape
157
+ residual = hidden_states
158
+
159
+ hidden_states = self.norm(hidden_states)
160
+ inner_dim = hidden_states.shape[1]
161
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
162
+ hidden_states = self.proj_in(hidden_states)
163
+
164
+ if cross_frame_attention:
165
+ hidden_states = hidden_states.reshape(1, batch * height * width, inner_dim)
166
+ encoder_hidden_states = text_emb.mean(dim=0, keepdim=True)
167
+ else:
168
+ encoder_hidden_states = text_emb
169
+ if encoder_hidden_states.shape[0] != hidden_states.shape[0]:
170
+ encoder_hidden_states = encoder_hidden_states.repeat(hidden_states.shape[0], 1, 1)
171
+
172
+ if tiled:
173
+ tile_size = min(tile_size, min(height, width))
174
+ hidden_states = hidden_states.permute(0, 2, 1).reshape(batch, inner_dim, height, width)
175
+ def block_tile_forward(x):
176
+ b, c, h, w = x.shape
177
+ x = x.permute(0, 2, 3, 1).reshape(b, h*w, c)
178
+ x = block(x, encoder_hidden_states)
179
+ x = x.reshape(b, h, w, c).permute(0, 3, 1, 2)
180
+ return x
181
+ for block in self.transformer_blocks:
182
+ hidden_states = TileWorker().tiled_forward(
183
+ block_tile_forward,
184
+ hidden_states,
185
+ tile_size,
186
+ tile_stride,
187
+ tile_device=hidden_states.device,
188
+ tile_dtype=hidden_states.dtype
189
+ )
190
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
191
+ else:
192
+ for block_id, block in enumerate(self.transformer_blocks):
193
+ hidden_states = block(
194
+ hidden_states,
195
+ encoder_hidden_states=encoder_hidden_states,
196
+ ipadapter_kwargs=ipadapter_kwargs_list.get(block_id, None)
197
+ )
198
+ if cross_frame_attention:
199
+ hidden_states = hidden_states.reshape(batch, height * width, inner_dim)
200
+
201
+ if self.need_proj_out:
202
+ hidden_states = self.proj_out(hidden_states)
203
+ hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
204
+ hidden_states = hidden_states + residual
205
+ else:
206
+ hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
207
+
208
+ return hidden_states, time_emb, text_emb, res_stack
209
+
210
+
211
+ class PushBlock(torch.nn.Module):
212
+ def __init__(self):
213
+ super().__init__()
214
+
215
+ def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
216
+ res_stack.append(hidden_states)
217
+ return hidden_states, time_emb, text_emb, res_stack
218
+
219
+
220
+ class PopBlock(torch.nn.Module):
221
+ def __init__(self):
222
+ super().__init__()
223
+
224
+ def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
225
+ res_hidden_states = res_stack.pop()
226
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
227
+ return hidden_states, time_emb, text_emb, res_stack
228
+
229
+
230
+ class SDUNet(torch.nn.Module):
231
+ def __init__(self):
232
+ super().__init__()
233
+ self.time_proj = Timesteps(320)
234
+ self.time_embedding = torch.nn.Sequential(
235
+ torch.nn.Linear(320, 1280),
236
+ torch.nn.SiLU(),
237
+ torch.nn.Linear(1280, 1280)
238
+ )
239
+ self.conv_in = torch.nn.Conv2d(4, 320, kernel_size=3, padding=1)
240
+
241
+ self.blocks = torch.nn.ModuleList([
242
+ # CrossAttnDownBlock2D
243
+ ResnetBlock(320, 320, 1280),
244
+ AttentionBlock(8, 40, 320, 1, 768, eps=1e-6),
245
+ PushBlock(),
246
+ ResnetBlock(320, 320, 1280),
247
+ AttentionBlock(8, 40, 320, 1, 768, eps=1e-6),
248
+ PushBlock(),
249
+ DownSampler(320),
250
+ PushBlock(),
251
+ # CrossAttnDownBlock2D
252
+ ResnetBlock(320, 640, 1280),
253
+ AttentionBlock(8, 80, 640, 1, 768, eps=1e-6),
254
+ PushBlock(),
255
+ ResnetBlock(640, 640, 1280),
256
+ AttentionBlock(8, 80, 640, 1, 768, eps=1e-6),
257
+ PushBlock(),
258
+ DownSampler(640),
259
+ PushBlock(),
260
+ # CrossAttnDownBlock2D
261
+ ResnetBlock(640, 1280, 1280),
262
+ AttentionBlock(8, 160, 1280, 1, 768, eps=1e-6),
263
+ PushBlock(),
264
+ ResnetBlock(1280, 1280, 1280),
265
+ AttentionBlock(8, 160, 1280, 1, 768, eps=1e-6),
266
+ PushBlock(),
267
+ DownSampler(1280),
268
+ PushBlock(),
269
+ # DownBlock2D
270
+ ResnetBlock(1280, 1280, 1280),
271
+ PushBlock(),
272
+ ResnetBlock(1280, 1280, 1280),
273
+ PushBlock(),
274
+ # UNetMidBlock2DCrossAttn
275
+ ResnetBlock(1280, 1280, 1280),
276
+ AttentionBlock(8, 160, 1280, 1, 768, eps=1e-6),
277
+ ResnetBlock(1280, 1280, 1280),
278
+ # UpBlock2D
279
+ PopBlock(),
280
+ ResnetBlock(2560, 1280, 1280),
281
+ PopBlock(),
282
+ ResnetBlock(2560, 1280, 1280),
283
+ PopBlock(),
284
+ ResnetBlock(2560, 1280, 1280),
285
+ UpSampler(1280),
286
+ # CrossAttnUpBlock2D
287
+ PopBlock(),
288
+ ResnetBlock(2560, 1280, 1280),
289
+ AttentionBlock(8, 160, 1280, 1, 768, eps=1e-6),
290
+ PopBlock(),
291
+ ResnetBlock(2560, 1280, 1280),
292
+ AttentionBlock(8, 160, 1280, 1, 768, eps=1e-6),
293
+ PopBlock(),
294
+ ResnetBlock(1920, 1280, 1280),
295
+ AttentionBlock(8, 160, 1280, 1, 768, eps=1e-6),
296
+ UpSampler(1280),
297
+ # CrossAttnUpBlock2D
298
+ PopBlock(),
299
+ ResnetBlock(1920, 640, 1280),
300
+ AttentionBlock(8, 80, 640, 1, 768, eps=1e-6),
301
+ PopBlock(),
302
+ ResnetBlock(1280, 640, 1280),
303
+ AttentionBlock(8, 80, 640, 1, 768, eps=1e-6),
304
+ PopBlock(),
305
+ ResnetBlock(960, 640, 1280),
306
+ AttentionBlock(8, 80, 640, 1, 768, eps=1e-6),
307
+ UpSampler(640),
308
+ # CrossAttnUpBlock2D
309
+ PopBlock(),
310
+ ResnetBlock(960, 320, 1280),
311
+ AttentionBlock(8, 40, 320, 1, 768, eps=1e-6),
312
+ PopBlock(),
313
+ ResnetBlock(640, 320, 1280),
314
+ AttentionBlock(8, 40, 320, 1, 768, eps=1e-6),
315
+ PopBlock(),
316
+ ResnetBlock(640, 320, 1280),
317
+ AttentionBlock(8, 40, 320, 1, 768, eps=1e-6),
318
+ ])
319
+
320
+ self.conv_norm_out = torch.nn.GroupNorm(num_channels=320, num_groups=32, eps=1e-5)
321
+ self.conv_act = torch.nn.SiLU()
322
+ self.conv_out = torch.nn.Conv2d(320, 4, kernel_size=3, padding=1)
323
+
324
+ def forward(self, sample, timestep, encoder_hidden_states, **kwargs):
325
+ # 1. time
326
+ time_emb = self.time_proj(timestep).to(sample.dtype)
327
+ time_emb = self.time_embedding(time_emb)
328
+
329
+ # 2. pre-process
330
+ hidden_states = self.conv_in(sample)
331
+ text_emb = encoder_hidden_states
332
+ res_stack = [hidden_states]
333
+
334
+ # 3. blocks
335
+ for i, block in enumerate(self.blocks):
336
+ hidden_states, time_emb, text_emb, res_stack = block(hidden_states, time_emb, text_emb, res_stack)
337
+
338
+ # 4. output
339
+ hidden_states = self.conv_norm_out(hidden_states)
340
+ hidden_states = self.conv_act(hidden_states)
341
+ hidden_states = self.conv_out(hidden_states)
342
+
343
+ return hidden_states
344
+
345
+ @staticmethod
346
+ def state_dict_converter():
347
+ return SDUNetStateDictConverter()
348
+
349
+
350
+ class SDUNetStateDictConverter:
351
+ def __init__(self):
352
+ pass
353
+
354
+ def from_diffusers(self, state_dict):
355
+ # architecture
356
+ block_types = [
357
+ 'ResnetBlock', 'AttentionBlock', 'PushBlock', 'ResnetBlock', 'AttentionBlock', 'PushBlock', 'DownSampler', 'PushBlock',
358
+ 'ResnetBlock', 'AttentionBlock', 'PushBlock', 'ResnetBlock', 'AttentionBlock', 'PushBlock', 'DownSampler', 'PushBlock',
359
+ 'ResnetBlock', 'AttentionBlock', 'PushBlock', 'ResnetBlock', 'AttentionBlock', 'PushBlock', 'DownSampler', 'PushBlock',
360
+ 'ResnetBlock', 'PushBlock', 'ResnetBlock', 'PushBlock',
361
+ 'ResnetBlock', 'AttentionBlock', 'ResnetBlock',
362
+ 'PopBlock', 'ResnetBlock', 'PopBlock', 'ResnetBlock', 'PopBlock', 'ResnetBlock', 'UpSampler',
363
+ 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'UpSampler',
364
+ 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'UpSampler',
365
+ 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'PopBlock', 'ResnetBlock', 'AttentionBlock', 'PopBlock', 'ResnetBlock', 'AttentionBlock'
366
+ ]
367
+
368
+ # Rename each parameter
369
+ name_list = sorted([name for name in state_dict])
370
+ rename_dict = {}
371
+ block_id = {"ResnetBlock": -1, "AttentionBlock": -1, "DownSampler": -1, "UpSampler": -1}
372
+ last_block_type_with_id = {"ResnetBlock": "", "AttentionBlock": "", "DownSampler": "", "UpSampler": ""}
373
+ for name in name_list:
374
+ names = name.split(".")
375
+ if names[0] in ["conv_in", "conv_norm_out", "conv_out"]:
376
+ pass
377
+ elif names[0] in ["time_embedding", "add_embedding"]:
378
+ if names[0] == "add_embedding":
379
+ names[0] = "add_time_embedding"
380
+ names[1] = {"linear_1": "0", "linear_2": "2"}[names[1]]
381
+ elif names[0] in ["down_blocks", "mid_block", "up_blocks"]:
382
+ if names[0] == "mid_block":
383
+ names.insert(1, "0")
384
+ block_type = {"resnets": "ResnetBlock", "attentions": "AttentionBlock", "downsamplers": "DownSampler", "upsamplers": "UpSampler"}[names[2]]
385
+ block_type_with_id = ".".join(names[:4])
386
+ if block_type_with_id != last_block_type_with_id[block_type]:
387
+ block_id[block_type] += 1
388
+ last_block_type_with_id[block_type] = block_type_with_id
389
+ while block_id[block_type] < len(block_types) and block_types[block_id[block_type]] != block_type:
390
+ block_id[block_type] += 1
391
+ block_type_with_id = ".".join(names[:4])
392
+ names = ["blocks", str(block_id[block_type])] + names[4:]
393
+ if "ff" in names:
394
+ ff_index = names.index("ff")
395
+ component = ".".join(names[ff_index:ff_index+3])
396
+ component = {"ff.net.0": "act_fn", "ff.net.2": "ff"}[component]
397
+ names = names[:ff_index] + [component] + names[ff_index+3:]
398
+ if "to_out" in names:
399
+ names.pop(names.index("to_out") + 1)
400
+ else:
401
+ raise ValueError(f"Unknown parameters: {name}")
402
+ rename_dict[name] = ".".join(names)
403
+
404
+ # Convert state_dict
405
+ state_dict_ = {}
406
+ for name, param in state_dict.items():
407
+ if ".proj_in." in name or ".proj_out." in name:
408
+ param = param.squeeze()
409
+ state_dict_[rename_dict[name]] = param
410
+ return state_dict_
411
+
412
+ def from_civitai(self, state_dict):
413
+ rename_dict = {
414
+ "model.diffusion_model.input_blocks.0.0.bias": "conv_in.bias",
415
+ "model.diffusion_model.input_blocks.0.0.weight": "conv_in.weight",
416
+ "model.diffusion_model.input_blocks.1.0.emb_layers.1.bias": "blocks.0.time_emb_proj.bias",
417
+ "model.diffusion_model.input_blocks.1.0.emb_layers.1.weight": "blocks.0.time_emb_proj.weight",
418
+ "model.diffusion_model.input_blocks.1.0.in_layers.0.bias": "blocks.0.norm1.bias",
419
+ "model.diffusion_model.input_blocks.1.0.in_layers.0.weight": "blocks.0.norm1.weight",
420
+ "model.diffusion_model.input_blocks.1.0.in_layers.2.bias": "blocks.0.conv1.bias",
421
+ "model.diffusion_model.input_blocks.1.0.in_layers.2.weight": "blocks.0.conv1.weight",
422
+ "model.diffusion_model.input_blocks.1.0.out_layers.0.bias": "blocks.0.norm2.bias",
423
+ "model.diffusion_model.input_blocks.1.0.out_layers.0.weight": "blocks.0.norm2.weight",
424
+ "model.diffusion_model.input_blocks.1.0.out_layers.3.bias": "blocks.0.conv2.bias",
425
+ "model.diffusion_model.input_blocks.1.0.out_layers.3.weight": "blocks.0.conv2.weight",
426
+ "model.diffusion_model.input_blocks.1.1.norm.bias": "blocks.1.norm.bias",
427
+ "model.diffusion_model.input_blocks.1.1.norm.weight": "blocks.1.norm.weight",
428
+ "model.diffusion_model.input_blocks.1.1.proj_in.bias": "blocks.1.proj_in.bias",
429
+ "model.diffusion_model.input_blocks.1.1.proj_in.weight": "blocks.1.proj_in.weight",
430
+ "model.diffusion_model.input_blocks.1.1.proj_out.bias": "blocks.1.proj_out.bias",
431
+ "model.diffusion_model.input_blocks.1.1.proj_out.weight": "blocks.1.proj_out.weight",
432
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k.weight": "blocks.1.transformer_blocks.0.attn1.to_k.weight",
433
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.1.transformer_blocks.0.attn1.to_out.bias",
434
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.1.transformer_blocks.0.attn1.to_out.weight",
435
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_q.weight": "blocks.1.transformer_blocks.0.attn1.to_q.weight",
436
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_v.weight": "blocks.1.transformer_blocks.0.attn1.to_v.weight",
437
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight": "blocks.1.transformer_blocks.0.attn2.to_k.weight",
438
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.1.transformer_blocks.0.attn2.to_out.bias",
439
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.1.transformer_blocks.0.attn2.to_out.weight",
440
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_q.weight": "blocks.1.transformer_blocks.0.attn2.to_q.weight",
441
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_v.weight": "blocks.1.transformer_blocks.0.attn2.to_v.weight",
442
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.1.transformer_blocks.0.act_fn.proj.bias",
443
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.1.transformer_blocks.0.act_fn.proj.weight",
444
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2.bias": "blocks.1.transformer_blocks.0.ff.bias",
445
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2.weight": "blocks.1.transformer_blocks.0.ff.weight",
446
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm1.bias": "blocks.1.transformer_blocks.0.norm1.bias",
447
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm1.weight": "blocks.1.transformer_blocks.0.norm1.weight",
448
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm2.bias": "blocks.1.transformer_blocks.0.norm2.bias",
449
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm2.weight": "blocks.1.transformer_blocks.0.norm2.weight",
450
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm3.bias": "blocks.1.transformer_blocks.0.norm3.bias",
451
+ "model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm3.weight": "blocks.1.transformer_blocks.0.norm3.weight",
452
+ "model.diffusion_model.input_blocks.10.0.emb_layers.1.bias": "blocks.24.time_emb_proj.bias",
453
+ "model.diffusion_model.input_blocks.10.0.emb_layers.1.weight": "blocks.24.time_emb_proj.weight",
454
+ "model.diffusion_model.input_blocks.10.0.in_layers.0.bias": "blocks.24.norm1.bias",
455
+ "model.diffusion_model.input_blocks.10.0.in_layers.0.weight": "blocks.24.norm1.weight",
456
+ "model.diffusion_model.input_blocks.10.0.in_layers.2.bias": "blocks.24.conv1.bias",
457
+ "model.diffusion_model.input_blocks.10.0.in_layers.2.weight": "blocks.24.conv1.weight",
458
+ "model.diffusion_model.input_blocks.10.0.out_layers.0.bias": "blocks.24.norm2.bias",
459
+ "model.diffusion_model.input_blocks.10.0.out_layers.0.weight": "blocks.24.norm2.weight",
460
+ "model.diffusion_model.input_blocks.10.0.out_layers.3.bias": "blocks.24.conv2.bias",
461
+ "model.diffusion_model.input_blocks.10.0.out_layers.3.weight": "blocks.24.conv2.weight",
462
+ "model.diffusion_model.input_blocks.11.0.emb_layers.1.bias": "blocks.26.time_emb_proj.bias",
463
+ "model.diffusion_model.input_blocks.11.0.emb_layers.1.weight": "blocks.26.time_emb_proj.weight",
464
+ "model.diffusion_model.input_blocks.11.0.in_layers.0.bias": "blocks.26.norm1.bias",
465
+ "model.diffusion_model.input_blocks.11.0.in_layers.0.weight": "blocks.26.norm1.weight",
466
+ "model.diffusion_model.input_blocks.11.0.in_layers.2.bias": "blocks.26.conv1.bias",
467
+ "model.diffusion_model.input_blocks.11.0.in_layers.2.weight": "blocks.26.conv1.weight",
468
+ "model.diffusion_model.input_blocks.11.0.out_layers.0.bias": "blocks.26.norm2.bias",
469
+ "model.diffusion_model.input_blocks.11.0.out_layers.0.weight": "blocks.26.norm2.weight",
470
+ "model.diffusion_model.input_blocks.11.0.out_layers.3.bias": "blocks.26.conv2.bias",
471
+ "model.diffusion_model.input_blocks.11.0.out_layers.3.weight": "blocks.26.conv2.weight",
472
+ "model.diffusion_model.input_blocks.2.0.emb_layers.1.bias": "blocks.3.time_emb_proj.bias",
473
+ "model.diffusion_model.input_blocks.2.0.emb_layers.1.weight": "blocks.3.time_emb_proj.weight",
474
+ "model.diffusion_model.input_blocks.2.0.in_layers.0.bias": "blocks.3.norm1.bias",
475
+ "model.diffusion_model.input_blocks.2.0.in_layers.0.weight": "blocks.3.norm1.weight",
476
+ "model.diffusion_model.input_blocks.2.0.in_layers.2.bias": "blocks.3.conv1.bias",
477
+ "model.diffusion_model.input_blocks.2.0.in_layers.2.weight": "blocks.3.conv1.weight",
478
+ "model.diffusion_model.input_blocks.2.0.out_layers.0.bias": "blocks.3.norm2.bias",
479
+ "model.diffusion_model.input_blocks.2.0.out_layers.0.weight": "blocks.3.norm2.weight",
480
+ "model.diffusion_model.input_blocks.2.0.out_layers.3.bias": "blocks.3.conv2.bias",
481
+ "model.diffusion_model.input_blocks.2.0.out_layers.3.weight": "blocks.3.conv2.weight",
482
+ "model.diffusion_model.input_blocks.2.1.norm.bias": "blocks.4.norm.bias",
483
+ "model.diffusion_model.input_blocks.2.1.norm.weight": "blocks.4.norm.weight",
484
+ "model.diffusion_model.input_blocks.2.1.proj_in.bias": "blocks.4.proj_in.bias",
485
+ "model.diffusion_model.input_blocks.2.1.proj_in.weight": "blocks.4.proj_in.weight",
486
+ "model.diffusion_model.input_blocks.2.1.proj_out.bias": "blocks.4.proj_out.bias",
487
+ "model.diffusion_model.input_blocks.2.1.proj_out.weight": "blocks.4.proj_out.weight",
488
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_k.weight": "blocks.4.transformer_blocks.0.attn1.to_k.weight",
489
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.4.transformer_blocks.0.attn1.to_out.bias",
490
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.4.transformer_blocks.0.attn1.to_out.weight",
491
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_q.weight": "blocks.4.transformer_blocks.0.attn1.to_q.weight",
492
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_v.weight": "blocks.4.transformer_blocks.0.attn1.to_v.weight",
493
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight": "blocks.4.transformer_blocks.0.attn2.to_k.weight",
494
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.4.transformer_blocks.0.attn2.to_out.bias",
495
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.4.transformer_blocks.0.attn2.to_out.weight",
496
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_q.weight": "blocks.4.transformer_blocks.0.attn2.to_q.weight",
497
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_v.weight": "blocks.4.transformer_blocks.0.attn2.to_v.weight",
498
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.4.transformer_blocks.0.act_fn.proj.bias",
499
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.4.transformer_blocks.0.act_fn.proj.weight",
500
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.2.bias": "blocks.4.transformer_blocks.0.ff.bias",
501
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.2.weight": "blocks.4.transformer_blocks.0.ff.weight",
502
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm1.bias": "blocks.4.transformer_blocks.0.norm1.bias",
503
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm1.weight": "blocks.4.transformer_blocks.0.norm1.weight",
504
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm2.bias": "blocks.4.transformer_blocks.0.norm2.bias",
505
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm2.weight": "blocks.4.transformer_blocks.0.norm2.weight",
506
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm3.bias": "blocks.4.transformer_blocks.0.norm3.bias",
507
+ "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm3.weight": "blocks.4.transformer_blocks.0.norm3.weight",
508
+ "model.diffusion_model.input_blocks.3.0.op.bias": "blocks.6.conv.bias",
509
+ "model.diffusion_model.input_blocks.3.0.op.weight": "blocks.6.conv.weight",
510
+ "model.diffusion_model.input_blocks.4.0.emb_layers.1.bias": "blocks.8.time_emb_proj.bias",
511
+ "model.diffusion_model.input_blocks.4.0.emb_layers.1.weight": "blocks.8.time_emb_proj.weight",
512
+ "model.diffusion_model.input_blocks.4.0.in_layers.0.bias": "blocks.8.norm1.bias",
513
+ "model.diffusion_model.input_blocks.4.0.in_layers.0.weight": "blocks.8.norm1.weight",
514
+ "model.diffusion_model.input_blocks.4.0.in_layers.2.bias": "blocks.8.conv1.bias",
515
+ "model.diffusion_model.input_blocks.4.0.in_layers.2.weight": "blocks.8.conv1.weight",
516
+ "model.diffusion_model.input_blocks.4.0.out_layers.0.bias": "blocks.8.norm2.bias",
517
+ "model.diffusion_model.input_blocks.4.0.out_layers.0.weight": "blocks.8.norm2.weight",
518
+ "model.diffusion_model.input_blocks.4.0.out_layers.3.bias": "blocks.8.conv2.bias",
519
+ "model.diffusion_model.input_blocks.4.0.out_layers.3.weight": "blocks.8.conv2.weight",
520
+ "model.diffusion_model.input_blocks.4.0.skip_connection.bias": "blocks.8.conv_shortcut.bias",
521
+ "model.diffusion_model.input_blocks.4.0.skip_connection.weight": "blocks.8.conv_shortcut.weight",
522
+ "model.diffusion_model.input_blocks.4.1.norm.bias": "blocks.9.norm.bias",
523
+ "model.diffusion_model.input_blocks.4.1.norm.weight": "blocks.9.norm.weight",
524
+ "model.diffusion_model.input_blocks.4.1.proj_in.bias": "blocks.9.proj_in.bias",
525
+ "model.diffusion_model.input_blocks.4.1.proj_in.weight": "blocks.9.proj_in.weight",
526
+ "model.diffusion_model.input_blocks.4.1.proj_out.bias": "blocks.9.proj_out.bias",
527
+ "model.diffusion_model.input_blocks.4.1.proj_out.weight": "blocks.9.proj_out.weight",
528
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_k.weight": "blocks.9.transformer_blocks.0.attn1.to_k.weight",
529
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.9.transformer_blocks.0.attn1.to_out.bias",
530
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.9.transformer_blocks.0.attn1.to_out.weight",
531
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_q.weight": "blocks.9.transformer_blocks.0.attn1.to_q.weight",
532
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_v.weight": "blocks.9.transformer_blocks.0.attn1.to_v.weight",
533
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k.weight": "blocks.9.transformer_blocks.0.attn2.to_k.weight",
534
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.9.transformer_blocks.0.attn2.to_out.bias",
535
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.9.transformer_blocks.0.attn2.to_out.weight",
536
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_q.weight": "blocks.9.transformer_blocks.0.attn2.to_q.weight",
537
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_v.weight": "blocks.9.transformer_blocks.0.attn2.to_v.weight",
538
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.9.transformer_blocks.0.act_fn.proj.bias",
539
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.9.transformer_blocks.0.act_fn.proj.weight",
540
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.2.bias": "blocks.9.transformer_blocks.0.ff.bias",
541
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.2.weight": "blocks.9.transformer_blocks.0.ff.weight",
542
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm1.bias": "blocks.9.transformer_blocks.0.norm1.bias",
543
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm1.weight": "blocks.9.transformer_blocks.0.norm1.weight",
544
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm2.bias": "blocks.9.transformer_blocks.0.norm2.bias",
545
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm2.weight": "blocks.9.transformer_blocks.0.norm2.weight",
546
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm3.bias": "blocks.9.transformer_blocks.0.norm3.bias",
547
+ "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm3.weight": "blocks.9.transformer_blocks.0.norm3.weight",
548
+ "model.diffusion_model.input_blocks.5.0.emb_layers.1.bias": "blocks.11.time_emb_proj.bias",
549
+ "model.diffusion_model.input_blocks.5.0.emb_layers.1.weight": "blocks.11.time_emb_proj.weight",
550
+ "model.diffusion_model.input_blocks.5.0.in_layers.0.bias": "blocks.11.norm1.bias",
551
+ "model.diffusion_model.input_blocks.5.0.in_layers.0.weight": "blocks.11.norm1.weight",
552
+ "model.diffusion_model.input_blocks.5.0.in_layers.2.bias": "blocks.11.conv1.bias",
553
+ "model.diffusion_model.input_blocks.5.0.in_layers.2.weight": "blocks.11.conv1.weight",
554
+ "model.diffusion_model.input_blocks.5.0.out_layers.0.bias": "blocks.11.norm2.bias",
555
+ "model.diffusion_model.input_blocks.5.0.out_layers.0.weight": "blocks.11.norm2.weight",
556
+ "model.diffusion_model.input_blocks.5.0.out_layers.3.bias": "blocks.11.conv2.bias",
557
+ "model.diffusion_model.input_blocks.5.0.out_layers.3.weight": "blocks.11.conv2.weight",
558
+ "model.diffusion_model.input_blocks.5.1.norm.bias": "blocks.12.norm.bias",
559
+ "model.diffusion_model.input_blocks.5.1.norm.weight": "blocks.12.norm.weight",
560
+ "model.diffusion_model.input_blocks.5.1.proj_in.bias": "blocks.12.proj_in.bias",
561
+ "model.diffusion_model.input_blocks.5.1.proj_in.weight": "blocks.12.proj_in.weight",
562
+ "model.diffusion_model.input_blocks.5.1.proj_out.bias": "blocks.12.proj_out.bias",
563
+ "model.diffusion_model.input_blocks.5.1.proj_out.weight": "blocks.12.proj_out.weight",
564
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_k.weight": "blocks.12.transformer_blocks.0.attn1.to_k.weight",
565
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.12.transformer_blocks.0.attn1.to_out.bias",
566
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.12.transformer_blocks.0.attn1.to_out.weight",
567
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_q.weight": "blocks.12.transformer_blocks.0.attn1.to_q.weight",
568
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_v.weight": "blocks.12.transformer_blocks.0.attn1.to_v.weight",
569
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_k.weight": "blocks.12.transformer_blocks.0.attn2.to_k.weight",
570
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.12.transformer_blocks.0.attn2.to_out.bias",
571
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.12.transformer_blocks.0.attn2.to_out.weight",
572
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_q.weight": "blocks.12.transformer_blocks.0.attn2.to_q.weight",
573
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_v.weight": "blocks.12.transformer_blocks.0.attn2.to_v.weight",
574
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.12.transformer_blocks.0.act_fn.proj.bias",
575
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.12.transformer_blocks.0.act_fn.proj.weight",
576
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.2.bias": "blocks.12.transformer_blocks.0.ff.bias",
577
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.2.weight": "blocks.12.transformer_blocks.0.ff.weight",
578
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm1.bias": "blocks.12.transformer_blocks.0.norm1.bias",
579
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm1.weight": "blocks.12.transformer_blocks.0.norm1.weight",
580
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm2.bias": "blocks.12.transformer_blocks.0.norm2.bias",
581
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm2.weight": "blocks.12.transformer_blocks.0.norm2.weight",
582
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm3.bias": "blocks.12.transformer_blocks.0.norm3.bias",
583
+ "model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm3.weight": "blocks.12.transformer_blocks.0.norm3.weight",
584
+ "model.diffusion_model.input_blocks.6.0.op.bias": "blocks.14.conv.bias",
585
+ "model.diffusion_model.input_blocks.6.0.op.weight": "blocks.14.conv.weight",
586
+ "model.diffusion_model.input_blocks.7.0.emb_layers.1.bias": "blocks.16.time_emb_proj.bias",
587
+ "model.diffusion_model.input_blocks.7.0.emb_layers.1.weight": "blocks.16.time_emb_proj.weight",
588
+ "model.diffusion_model.input_blocks.7.0.in_layers.0.bias": "blocks.16.norm1.bias",
589
+ "model.diffusion_model.input_blocks.7.0.in_layers.0.weight": "blocks.16.norm1.weight",
590
+ "model.diffusion_model.input_blocks.7.0.in_layers.2.bias": "blocks.16.conv1.bias",
591
+ "model.diffusion_model.input_blocks.7.0.in_layers.2.weight": "blocks.16.conv1.weight",
592
+ "model.diffusion_model.input_blocks.7.0.out_layers.0.bias": "blocks.16.norm2.bias",
593
+ "model.diffusion_model.input_blocks.7.0.out_layers.0.weight": "blocks.16.norm2.weight",
594
+ "model.diffusion_model.input_blocks.7.0.out_layers.3.bias": "blocks.16.conv2.bias",
595
+ "model.diffusion_model.input_blocks.7.0.out_layers.3.weight": "blocks.16.conv2.weight",
596
+ "model.diffusion_model.input_blocks.7.0.skip_connection.bias": "blocks.16.conv_shortcut.bias",
597
+ "model.diffusion_model.input_blocks.7.0.skip_connection.weight": "blocks.16.conv_shortcut.weight",
598
+ "model.diffusion_model.input_blocks.7.1.norm.bias": "blocks.17.norm.bias",
599
+ "model.diffusion_model.input_blocks.7.1.norm.weight": "blocks.17.norm.weight",
600
+ "model.diffusion_model.input_blocks.7.1.proj_in.bias": "blocks.17.proj_in.bias",
601
+ "model.diffusion_model.input_blocks.7.1.proj_in.weight": "blocks.17.proj_in.weight",
602
+ "model.diffusion_model.input_blocks.7.1.proj_out.bias": "blocks.17.proj_out.bias",
603
+ "model.diffusion_model.input_blocks.7.1.proj_out.weight": "blocks.17.proj_out.weight",
604
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_k.weight": "blocks.17.transformer_blocks.0.attn1.to_k.weight",
605
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.17.transformer_blocks.0.attn1.to_out.bias",
606
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.17.transformer_blocks.0.attn1.to_out.weight",
607
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_q.weight": "blocks.17.transformer_blocks.0.attn1.to_q.weight",
608
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_v.weight": "blocks.17.transformer_blocks.0.attn1.to_v.weight",
609
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_k.weight": "blocks.17.transformer_blocks.0.attn2.to_k.weight",
610
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.17.transformer_blocks.0.attn2.to_out.bias",
611
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.17.transformer_blocks.0.attn2.to_out.weight",
612
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_q.weight": "blocks.17.transformer_blocks.0.attn2.to_q.weight",
613
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_v.weight": "blocks.17.transformer_blocks.0.attn2.to_v.weight",
614
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.17.transformer_blocks.0.act_fn.proj.bias",
615
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.17.transformer_blocks.0.act_fn.proj.weight",
616
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.2.bias": "blocks.17.transformer_blocks.0.ff.bias",
617
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.2.weight": "blocks.17.transformer_blocks.0.ff.weight",
618
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm1.bias": "blocks.17.transformer_blocks.0.norm1.bias",
619
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm1.weight": "blocks.17.transformer_blocks.0.norm1.weight",
620
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm2.bias": "blocks.17.transformer_blocks.0.norm2.bias",
621
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm2.weight": "blocks.17.transformer_blocks.0.norm2.weight",
622
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm3.bias": "blocks.17.transformer_blocks.0.norm3.bias",
623
+ "model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm3.weight": "blocks.17.transformer_blocks.0.norm3.weight",
624
+ "model.diffusion_model.input_blocks.8.0.emb_layers.1.bias": "blocks.19.time_emb_proj.bias",
625
+ "model.diffusion_model.input_blocks.8.0.emb_layers.1.weight": "blocks.19.time_emb_proj.weight",
626
+ "model.diffusion_model.input_blocks.8.0.in_layers.0.bias": "blocks.19.norm1.bias",
627
+ "model.diffusion_model.input_blocks.8.0.in_layers.0.weight": "blocks.19.norm1.weight",
628
+ "model.diffusion_model.input_blocks.8.0.in_layers.2.bias": "blocks.19.conv1.bias",
629
+ "model.diffusion_model.input_blocks.8.0.in_layers.2.weight": "blocks.19.conv1.weight",
630
+ "model.diffusion_model.input_blocks.8.0.out_layers.0.bias": "blocks.19.norm2.bias",
631
+ "model.diffusion_model.input_blocks.8.0.out_layers.0.weight": "blocks.19.norm2.weight",
632
+ "model.diffusion_model.input_blocks.8.0.out_layers.3.bias": "blocks.19.conv2.bias",
633
+ "model.diffusion_model.input_blocks.8.0.out_layers.3.weight": "blocks.19.conv2.weight",
634
+ "model.diffusion_model.input_blocks.8.1.norm.bias": "blocks.20.norm.bias",
635
+ "model.diffusion_model.input_blocks.8.1.norm.weight": "blocks.20.norm.weight",
636
+ "model.diffusion_model.input_blocks.8.1.proj_in.bias": "blocks.20.proj_in.bias",
637
+ "model.diffusion_model.input_blocks.8.1.proj_in.weight": "blocks.20.proj_in.weight",
638
+ "model.diffusion_model.input_blocks.8.1.proj_out.bias": "blocks.20.proj_out.bias",
639
+ "model.diffusion_model.input_blocks.8.1.proj_out.weight": "blocks.20.proj_out.weight",
640
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_k.weight": "blocks.20.transformer_blocks.0.attn1.to_k.weight",
641
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.20.transformer_blocks.0.attn1.to_out.bias",
642
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.20.transformer_blocks.0.attn1.to_out.weight",
643
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_q.weight": "blocks.20.transformer_blocks.0.attn1.to_q.weight",
644
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_v.weight": "blocks.20.transformer_blocks.0.attn1.to_v.weight",
645
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_k.weight": "blocks.20.transformer_blocks.0.attn2.to_k.weight",
646
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.20.transformer_blocks.0.attn2.to_out.bias",
647
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.20.transformer_blocks.0.attn2.to_out.weight",
648
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_q.weight": "blocks.20.transformer_blocks.0.attn2.to_q.weight",
649
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_v.weight": "blocks.20.transformer_blocks.0.attn2.to_v.weight",
650
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.20.transformer_blocks.0.act_fn.proj.bias",
651
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.20.transformer_blocks.0.act_fn.proj.weight",
652
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.2.bias": "blocks.20.transformer_blocks.0.ff.bias",
653
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.2.weight": "blocks.20.transformer_blocks.0.ff.weight",
654
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm1.bias": "blocks.20.transformer_blocks.0.norm1.bias",
655
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm1.weight": "blocks.20.transformer_blocks.0.norm1.weight",
656
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm2.bias": "blocks.20.transformer_blocks.0.norm2.bias",
657
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm2.weight": "blocks.20.transformer_blocks.0.norm2.weight",
658
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm3.bias": "blocks.20.transformer_blocks.0.norm3.bias",
659
+ "model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm3.weight": "blocks.20.transformer_blocks.0.norm3.weight",
660
+ "model.diffusion_model.input_blocks.9.0.op.bias": "blocks.22.conv.bias",
661
+ "model.diffusion_model.input_blocks.9.0.op.weight": "blocks.22.conv.weight",
662
+ "model.diffusion_model.middle_block.0.emb_layers.1.bias": "blocks.28.time_emb_proj.bias",
663
+ "model.diffusion_model.middle_block.0.emb_layers.1.weight": "blocks.28.time_emb_proj.weight",
664
+ "model.diffusion_model.middle_block.0.in_layers.0.bias": "blocks.28.norm1.bias",
665
+ "model.diffusion_model.middle_block.0.in_layers.0.weight": "blocks.28.norm1.weight",
666
+ "model.diffusion_model.middle_block.0.in_layers.2.bias": "blocks.28.conv1.bias",
667
+ "model.diffusion_model.middle_block.0.in_layers.2.weight": "blocks.28.conv1.weight",
668
+ "model.diffusion_model.middle_block.0.out_layers.0.bias": "blocks.28.norm2.bias",
669
+ "model.diffusion_model.middle_block.0.out_layers.0.weight": "blocks.28.norm2.weight",
670
+ "model.diffusion_model.middle_block.0.out_layers.3.bias": "blocks.28.conv2.bias",
671
+ "model.diffusion_model.middle_block.0.out_layers.3.weight": "blocks.28.conv2.weight",
672
+ "model.diffusion_model.middle_block.1.norm.bias": "blocks.29.norm.bias",
673
+ "model.diffusion_model.middle_block.1.norm.weight": "blocks.29.norm.weight",
674
+ "model.diffusion_model.middle_block.1.proj_in.bias": "blocks.29.proj_in.bias",
675
+ "model.diffusion_model.middle_block.1.proj_in.weight": "blocks.29.proj_in.weight",
676
+ "model.diffusion_model.middle_block.1.proj_out.bias": "blocks.29.proj_out.bias",
677
+ "model.diffusion_model.middle_block.1.proj_out.weight": "blocks.29.proj_out.weight",
678
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_k.weight": "blocks.29.transformer_blocks.0.attn1.to_k.weight",
679
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.29.transformer_blocks.0.attn1.to_out.bias",
680
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.29.transformer_blocks.0.attn1.to_out.weight",
681
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight": "blocks.29.transformer_blocks.0.attn1.to_q.weight",
682
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_v.weight": "blocks.29.transformer_blocks.0.attn1.to_v.weight",
683
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_k.weight": "blocks.29.transformer_blocks.0.attn2.to_k.weight",
684
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.29.transformer_blocks.0.attn2.to_out.bias",
685
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.29.transformer_blocks.0.attn2.to_out.weight",
686
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_q.weight": "blocks.29.transformer_blocks.0.attn2.to_q.weight",
687
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_v.weight": "blocks.29.transformer_blocks.0.attn2.to_v.weight",
688
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.29.transformer_blocks.0.act_fn.proj.bias",
689
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.29.transformer_blocks.0.act_fn.proj.weight",
690
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.2.bias": "blocks.29.transformer_blocks.0.ff.bias",
691
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.2.weight": "blocks.29.transformer_blocks.0.ff.weight",
692
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.norm1.bias": "blocks.29.transformer_blocks.0.norm1.bias",
693
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.norm1.weight": "blocks.29.transformer_blocks.0.norm1.weight",
694
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.norm2.bias": "blocks.29.transformer_blocks.0.norm2.bias",
695
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.norm2.weight": "blocks.29.transformer_blocks.0.norm2.weight",
696
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.norm3.bias": "blocks.29.transformer_blocks.0.norm3.bias",
697
+ "model.diffusion_model.middle_block.1.transformer_blocks.0.norm3.weight": "blocks.29.transformer_blocks.0.norm3.weight",
698
+ "model.diffusion_model.middle_block.2.emb_layers.1.bias": "blocks.30.time_emb_proj.bias",
699
+ "model.diffusion_model.middle_block.2.emb_layers.1.weight": "blocks.30.time_emb_proj.weight",
700
+ "model.diffusion_model.middle_block.2.in_layers.0.bias": "blocks.30.norm1.bias",
701
+ "model.diffusion_model.middle_block.2.in_layers.0.weight": "blocks.30.norm1.weight",
702
+ "model.diffusion_model.middle_block.2.in_layers.2.bias": "blocks.30.conv1.bias",
703
+ "model.diffusion_model.middle_block.2.in_layers.2.weight": "blocks.30.conv1.weight",
704
+ "model.diffusion_model.middle_block.2.out_layers.0.bias": "blocks.30.norm2.bias",
705
+ "model.diffusion_model.middle_block.2.out_layers.0.weight": "blocks.30.norm2.weight",
706
+ "model.diffusion_model.middle_block.2.out_layers.3.bias": "blocks.30.conv2.bias",
707
+ "model.diffusion_model.middle_block.2.out_layers.3.weight": "blocks.30.conv2.weight",
708
+ "model.diffusion_model.out.0.bias": "conv_norm_out.bias",
709
+ "model.diffusion_model.out.0.weight": "conv_norm_out.weight",
710
+ "model.diffusion_model.out.2.bias": "conv_out.bias",
711
+ "model.diffusion_model.out.2.weight": "conv_out.weight",
712
+ "model.diffusion_model.output_blocks.0.0.emb_layers.1.bias": "blocks.32.time_emb_proj.bias",
713
+ "model.diffusion_model.output_blocks.0.0.emb_layers.1.weight": "blocks.32.time_emb_proj.weight",
714
+ "model.diffusion_model.output_blocks.0.0.in_layers.0.bias": "blocks.32.norm1.bias",
715
+ "model.diffusion_model.output_blocks.0.0.in_layers.0.weight": "blocks.32.norm1.weight",
716
+ "model.diffusion_model.output_blocks.0.0.in_layers.2.bias": "blocks.32.conv1.bias",
717
+ "model.diffusion_model.output_blocks.0.0.in_layers.2.weight": "blocks.32.conv1.weight",
718
+ "model.diffusion_model.output_blocks.0.0.out_layers.0.bias": "blocks.32.norm2.bias",
719
+ "model.diffusion_model.output_blocks.0.0.out_layers.0.weight": "blocks.32.norm2.weight",
720
+ "model.diffusion_model.output_blocks.0.0.out_layers.3.bias": "blocks.32.conv2.bias",
721
+ "model.diffusion_model.output_blocks.0.0.out_layers.3.weight": "blocks.32.conv2.weight",
722
+ "model.diffusion_model.output_blocks.0.0.skip_connection.bias": "blocks.32.conv_shortcut.bias",
723
+ "model.diffusion_model.output_blocks.0.0.skip_connection.weight": "blocks.32.conv_shortcut.weight",
724
+ "model.diffusion_model.output_blocks.1.0.emb_layers.1.bias": "blocks.34.time_emb_proj.bias",
725
+ "model.diffusion_model.output_blocks.1.0.emb_layers.1.weight": "blocks.34.time_emb_proj.weight",
726
+ "model.diffusion_model.output_blocks.1.0.in_layers.0.bias": "blocks.34.norm1.bias",
727
+ "model.diffusion_model.output_blocks.1.0.in_layers.0.weight": "blocks.34.norm1.weight",
728
+ "model.diffusion_model.output_blocks.1.0.in_layers.2.bias": "blocks.34.conv1.bias",
729
+ "model.diffusion_model.output_blocks.1.0.in_layers.2.weight": "blocks.34.conv1.weight",
730
+ "model.diffusion_model.output_blocks.1.0.out_layers.0.bias": "blocks.34.norm2.bias",
731
+ "model.diffusion_model.output_blocks.1.0.out_layers.0.weight": "blocks.34.norm2.weight",
732
+ "model.diffusion_model.output_blocks.1.0.out_layers.3.bias": "blocks.34.conv2.bias",
733
+ "model.diffusion_model.output_blocks.1.0.out_layers.3.weight": "blocks.34.conv2.weight",
734
+ "model.diffusion_model.output_blocks.1.0.skip_connection.bias": "blocks.34.conv_shortcut.bias",
735
+ "model.diffusion_model.output_blocks.1.0.skip_connection.weight": "blocks.34.conv_shortcut.weight",
736
+ "model.diffusion_model.output_blocks.10.0.emb_layers.1.bias": "blocks.62.time_emb_proj.bias",
737
+ "model.diffusion_model.output_blocks.10.0.emb_layers.1.weight": "blocks.62.time_emb_proj.weight",
738
+ "model.diffusion_model.output_blocks.10.0.in_layers.0.bias": "blocks.62.norm1.bias",
739
+ "model.diffusion_model.output_blocks.10.0.in_layers.0.weight": "blocks.62.norm1.weight",
740
+ "model.diffusion_model.output_blocks.10.0.in_layers.2.bias": "blocks.62.conv1.bias",
741
+ "model.diffusion_model.output_blocks.10.0.in_layers.2.weight": "blocks.62.conv1.weight",
742
+ "model.diffusion_model.output_blocks.10.0.out_layers.0.bias": "blocks.62.norm2.bias",
743
+ "model.diffusion_model.output_blocks.10.0.out_layers.0.weight": "blocks.62.norm2.weight",
744
+ "model.diffusion_model.output_blocks.10.0.out_layers.3.bias": "blocks.62.conv2.bias",
745
+ "model.diffusion_model.output_blocks.10.0.out_layers.3.weight": "blocks.62.conv2.weight",
746
+ "model.diffusion_model.output_blocks.10.0.skip_connection.bias": "blocks.62.conv_shortcut.bias",
747
+ "model.diffusion_model.output_blocks.10.0.skip_connection.weight": "blocks.62.conv_shortcut.weight",
748
+ "model.diffusion_model.output_blocks.10.1.norm.bias": "blocks.63.norm.bias",
749
+ "model.diffusion_model.output_blocks.10.1.norm.weight": "blocks.63.norm.weight",
750
+ "model.diffusion_model.output_blocks.10.1.proj_in.bias": "blocks.63.proj_in.bias",
751
+ "model.diffusion_model.output_blocks.10.1.proj_in.weight": "blocks.63.proj_in.weight",
752
+ "model.diffusion_model.output_blocks.10.1.proj_out.bias": "blocks.63.proj_out.bias",
753
+ "model.diffusion_model.output_blocks.10.1.proj_out.weight": "blocks.63.proj_out.weight",
754
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_k.weight": "blocks.63.transformer_blocks.0.attn1.to_k.weight",
755
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.63.transformer_blocks.0.attn1.to_out.bias",
756
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.63.transformer_blocks.0.attn1.to_out.weight",
757
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_q.weight": "blocks.63.transformer_blocks.0.attn1.to_q.weight",
758
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_v.weight": "blocks.63.transformer_blocks.0.attn1.to_v.weight",
759
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_k.weight": "blocks.63.transformer_blocks.0.attn2.to_k.weight",
760
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.63.transformer_blocks.0.attn2.to_out.bias",
761
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.63.transformer_blocks.0.attn2.to_out.weight",
762
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_q.weight": "blocks.63.transformer_blocks.0.attn2.to_q.weight",
763
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_v.weight": "blocks.63.transformer_blocks.0.attn2.to_v.weight",
764
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.63.transformer_blocks.0.act_fn.proj.bias",
765
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.63.transformer_blocks.0.act_fn.proj.weight",
766
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.2.bias": "blocks.63.transformer_blocks.0.ff.bias",
767
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.2.weight": "blocks.63.transformer_blocks.0.ff.weight",
768
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm1.bias": "blocks.63.transformer_blocks.0.norm1.bias",
769
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm1.weight": "blocks.63.transformer_blocks.0.norm1.weight",
770
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm2.bias": "blocks.63.transformer_blocks.0.norm2.bias",
771
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm2.weight": "blocks.63.transformer_blocks.0.norm2.weight",
772
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm3.bias": "blocks.63.transformer_blocks.0.norm3.bias",
773
+ "model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm3.weight": "blocks.63.transformer_blocks.0.norm3.weight",
774
+ "model.diffusion_model.output_blocks.11.0.emb_layers.1.bias": "blocks.65.time_emb_proj.bias",
775
+ "model.diffusion_model.output_blocks.11.0.emb_layers.1.weight": "blocks.65.time_emb_proj.weight",
776
+ "model.diffusion_model.output_blocks.11.0.in_layers.0.bias": "blocks.65.norm1.bias",
777
+ "model.diffusion_model.output_blocks.11.0.in_layers.0.weight": "blocks.65.norm1.weight",
778
+ "model.diffusion_model.output_blocks.11.0.in_layers.2.bias": "blocks.65.conv1.bias",
779
+ "model.diffusion_model.output_blocks.11.0.in_layers.2.weight": "blocks.65.conv1.weight",
780
+ "model.diffusion_model.output_blocks.11.0.out_layers.0.bias": "blocks.65.norm2.bias",
781
+ "model.diffusion_model.output_blocks.11.0.out_layers.0.weight": "blocks.65.norm2.weight",
782
+ "model.diffusion_model.output_blocks.11.0.out_layers.3.bias": "blocks.65.conv2.bias",
783
+ "model.diffusion_model.output_blocks.11.0.out_layers.3.weight": "blocks.65.conv2.weight",
784
+ "model.diffusion_model.output_blocks.11.0.skip_connection.bias": "blocks.65.conv_shortcut.bias",
785
+ "model.diffusion_model.output_blocks.11.0.skip_connection.weight": "blocks.65.conv_shortcut.weight",
786
+ "model.diffusion_model.output_blocks.11.1.norm.bias": "blocks.66.norm.bias",
787
+ "model.diffusion_model.output_blocks.11.1.norm.weight": "blocks.66.norm.weight",
788
+ "model.diffusion_model.output_blocks.11.1.proj_in.bias": "blocks.66.proj_in.bias",
789
+ "model.diffusion_model.output_blocks.11.1.proj_in.weight": "blocks.66.proj_in.weight",
790
+ "model.diffusion_model.output_blocks.11.1.proj_out.bias": "blocks.66.proj_out.bias",
791
+ "model.diffusion_model.output_blocks.11.1.proj_out.weight": "blocks.66.proj_out.weight",
792
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_k.weight": "blocks.66.transformer_blocks.0.attn1.to_k.weight",
793
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.66.transformer_blocks.0.attn1.to_out.bias",
794
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.66.transformer_blocks.0.attn1.to_out.weight",
795
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_q.weight": "blocks.66.transformer_blocks.0.attn1.to_q.weight",
796
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_v.weight": "blocks.66.transformer_blocks.0.attn1.to_v.weight",
797
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_k.weight": "blocks.66.transformer_blocks.0.attn2.to_k.weight",
798
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.66.transformer_blocks.0.attn2.to_out.bias",
799
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.66.transformer_blocks.0.attn2.to_out.weight",
800
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_q.weight": "blocks.66.transformer_blocks.0.attn2.to_q.weight",
801
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_v.weight": "blocks.66.transformer_blocks.0.attn2.to_v.weight",
802
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.66.transformer_blocks.0.act_fn.proj.bias",
803
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.66.transformer_blocks.0.act_fn.proj.weight",
804
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.2.bias": "blocks.66.transformer_blocks.0.ff.bias",
805
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.2.weight": "blocks.66.transformer_blocks.0.ff.weight",
806
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias": "blocks.66.transformer_blocks.0.norm1.bias",
807
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.weight": "blocks.66.transformer_blocks.0.norm1.weight",
808
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm2.bias": "blocks.66.transformer_blocks.0.norm2.bias",
809
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm2.weight": "blocks.66.transformer_blocks.0.norm2.weight",
810
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm3.bias": "blocks.66.transformer_blocks.0.norm3.bias",
811
+ "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm3.weight": "blocks.66.transformer_blocks.0.norm3.weight",
812
+ "model.diffusion_model.output_blocks.2.0.emb_layers.1.bias": "blocks.36.time_emb_proj.bias",
813
+ "model.diffusion_model.output_blocks.2.0.emb_layers.1.weight": "blocks.36.time_emb_proj.weight",
814
+ "model.diffusion_model.output_blocks.2.0.in_layers.0.bias": "blocks.36.norm1.bias",
815
+ "model.diffusion_model.output_blocks.2.0.in_layers.0.weight": "blocks.36.norm1.weight",
816
+ "model.diffusion_model.output_blocks.2.0.in_layers.2.bias": "blocks.36.conv1.bias",
817
+ "model.diffusion_model.output_blocks.2.0.in_layers.2.weight": "blocks.36.conv1.weight",
818
+ "model.diffusion_model.output_blocks.2.0.out_layers.0.bias": "blocks.36.norm2.bias",
819
+ "model.diffusion_model.output_blocks.2.0.out_layers.0.weight": "blocks.36.norm2.weight",
820
+ "model.diffusion_model.output_blocks.2.0.out_layers.3.bias": "blocks.36.conv2.bias",
821
+ "model.diffusion_model.output_blocks.2.0.out_layers.3.weight": "blocks.36.conv2.weight",
822
+ "model.diffusion_model.output_blocks.2.0.skip_connection.bias": "blocks.36.conv_shortcut.bias",
823
+ "model.diffusion_model.output_blocks.2.0.skip_connection.weight": "blocks.36.conv_shortcut.weight",
824
+ "model.diffusion_model.output_blocks.2.1.conv.bias": "blocks.37.conv.bias",
825
+ "model.diffusion_model.output_blocks.2.1.conv.weight": "blocks.37.conv.weight",
826
+ "model.diffusion_model.output_blocks.3.0.emb_layers.1.bias": "blocks.39.time_emb_proj.bias",
827
+ "model.diffusion_model.output_blocks.3.0.emb_layers.1.weight": "blocks.39.time_emb_proj.weight",
828
+ "model.diffusion_model.output_blocks.3.0.in_layers.0.bias": "blocks.39.norm1.bias",
829
+ "model.diffusion_model.output_blocks.3.0.in_layers.0.weight": "blocks.39.norm1.weight",
830
+ "model.diffusion_model.output_blocks.3.0.in_layers.2.bias": "blocks.39.conv1.bias",
831
+ "model.diffusion_model.output_blocks.3.0.in_layers.2.weight": "blocks.39.conv1.weight",
832
+ "model.diffusion_model.output_blocks.3.0.out_layers.0.bias": "blocks.39.norm2.bias",
833
+ "model.diffusion_model.output_blocks.3.0.out_layers.0.weight": "blocks.39.norm2.weight",
834
+ "model.diffusion_model.output_blocks.3.0.out_layers.3.bias": "blocks.39.conv2.bias",
835
+ "model.diffusion_model.output_blocks.3.0.out_layers.3.weight": "blocks.39.conv2.weight",
836
+ "model.diffusion_model.output_blocks.3.0.skip_connection.bias": "blocks.39.conv_shortcut.bias",
837
+ "model.diffusion_model.output_blocks.3.0.skip_connection.weight": "blocks.39.conv_shortcut.weight",
838
+ "model.diffusion_model.output_blocks.3.1.norm.bias": "blocks.40.norm.bias",
839
+ "model.diffusion_model.output_blocks.3.1.norm.weight": "blocks.40.norm.weight",
840
+ "model.diffusion_model.output_blocks.3.1.proj_in.bias": "blocks.40.proj_in.bias",
841
+ "model.diffusion_model.output_blocks.3.1.proj_in.weight": "blocks.40.proj_in.weight",
842
+ "model.diffusion_model.output_blocks.3.1.proj_out.bias": "blocks.40.proj_out.bias",
843
+ "model.diffusion_model.output_blocks.3.1.proj_out.weight": "blocks.40.proj_out.weight",
844
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_k.weight": "blocks.40.transformer_blocks.0.attn1.to_k.weight",
845
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.40.transformer_blocks.0.attn1.to_out.bias",
846
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.40.transformer_blocks.0.attn1.to_out.weight",
847
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_q.weight": "blocks.40.transformer_blocks.0.attn1.to_q.weight",
848
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_v.weight": "blocks.40.transformer_blocks.0.attn1.to_v.weight",
849
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_k.weight": "blocks.40.transformer_blocks.0.attn2.to_k.weight",
850
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.40.transformer_blocks.0.attn2.to_out.bias",
851
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.40.transformer_blocks.0.attn2.to_out.weight",
852
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_q.weight": "blocks.40.transformer_blocks.0.attn2.to_q.weight",
853
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_v.weight": "blocks.40.transformer_blocks.0.attn2.to_v.weight",
854
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.40.transformer_blocks.0.act_fn.proj.bias",
855
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.40.transformer_blocks.0.act_fn.proj.weight",
856
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.2.bias": "blocks.40.transformer_blocks.0.ff.bias",
857
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.2.weight": "blocks.40.transformer_blocks.0.ff.weight",
858
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm1.bias": "blocks.40.transformer_blocks.0.norm1.bias",
859
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm1.weight": "blocks.40.transformer_blocks.0.norm1.weight",
860
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm2.bias": "blocks.40.transformer_blocks.0.norm2.bias",
861
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm2.weight": "blocks.40.transformer_blocks.0.norm2.weight",
862
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm3.bias": "blocks.40.transformer_blocks.0.norm3.bias",
863
+ "model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm3.weight": "blocks.40.transformer_blocks.0.norm3.weight",
864
+ "model.diffusion_model.output_blocks.4.0.emb_layers.1.bias": "blocks.42.time_emb_proj.bias",
865
+ "model.diffusion_model.output_blocks.4.0.emb_layers.1.weight": "blocks.42.time_emb_proj.weight",
866
+ "model.diffusion_model.output_blocks.4.0.in_layers.0.bias": "blocks.42.norm1.bias",
867
+ "model.diffusion_model.output_blocks.4.0.in_layers.0.weight": "blocks.42.norm1.weight",
868
+ "model.diffusion_model.output_blocks.4.0.in_layers.2.bias": "blocks.42.conv1.bias",
869
+ "model.diffusion_model.output_blocks.4.0.in_layers.2.weight": "blocks.42.conv1.weight",
870
+ "model.diffusion_model.output_blocks.4.0.out_layers.0.bias": "blocks.42.norm2.bias",
871
+ "model.diffusion_model.output_blocks.4.0.out_layers.0.weight": "blocks.42.norm2.weight",
872
+ "model.diffusion_model.output_blocks.4.0.out_layers.3.bias": "blocks.42.conv2.bias",
873
+ "model.diffusion_model.output_blocks.4.0.out_layers.3.weight": "blocks.42.conv2.weight",
874
+ "model.diffusion_model.output_blocks.4.0.skip_connection.bias": "blocks.42.conv_shortcut.bias",
875
+ "model.diffusion_model.output_blocks.4.0.skip_connection.weight": "blocks.42.conv_shortcut.weight",
876
+ "model.diffusion_model.output_blocks.4.1.norm.bias": "blocks.43.norm.bias",
877
+ "model.diffusion_model.output_blocks.4.1.norm.weight": "blocks.43.norm.weight",
878
+ "model.diffusion_model.output_blocks.4.1.proj_in.bias": "blocks.43.proj_in.bias",
879
+ "model.diffusion_model.output_blocks.4.1.proj_in.weight": "blocks.43.proj_in.weight",
880
+ "model.diffusion_model.output_blocks.4.1.proj_out.bias": "blocks.43.proj_out.bias",
881
+ "model.diffusion_model.output_blocks.4.1.proj_out.weight": "blocks.43.proj_out.weight",
882
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_k.weight": "blocks.43.transformer_blocks.0.attn1.to_k.weight",
883
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.43.transformer_blocks.0.attn1.to_out.bias",
884
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.43.transformer_blocks.0.attn1.to_out.weight",
885
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_q.weight": "blocks.43.transformer_blocks.0.attn1.to_q.weight",
886
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_v.weight": "blocks.43.transformer_blocks.0.attn1.to_v.weight",
887
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_k.weight": "blocks.43.transformer_blocks.0.attn2.to_k.weight",
888
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.43.transformer_blocks.0.attn2.to_out.bias",
889
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.43.transformer_blocks.0.attn2.to_out.weight",
890
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_q.weight": "blocks.43.transformer_blocks.0.attn2.to_q.weight",
891
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_v.weight": "blocks.43.transformer_blocks.0.attn2.to_v.weight",
892
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.43.transformer_blocks.0.act_fn.proj.bias",
893
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.43.transformer_blocks.0.act_fn.proj.weight",
894
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.2.bias": "blocks.43.transformer_blocks.0.ff.bias",
895
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.2.weight": "blocks.43.transformer_blocks.0.ff.weight",
896
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm1.bias": "blocks.43.transformer_blocks.0.norm1.bias",
897
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm1.weight": "blocks.43.transformer_blocks.0.norm1.weight",
898
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm2.bias": "blocks.43.transformer_blocks.0.norm2.bias",
899
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm2.weight": "blocks.43.transformer_blocks.0.norm2.weight",
900
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm3.bias": "blocks.43.transformer_blocks.0.norm3.bias",
901
+ "model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm3.weight": "blocks.43.transformer_blocks.0.norm3.weight",
902
+ "model.diffusion_model.output_blocks.5.0.emb_layers.1.bias": "blocks.45.time_emb_proj.bias",
903
+ "model.diffusion_model.output_blocks.5.0.emb_layers.1.weight": "blocks.45.time_emb_proj.weight",
904
+ "model.diffusion_model.output_blocks.5.0.in_layers.0.bias": "blocks.45.norm1.bias",
905
+ "model.diffusion_model.output_blocks.5.0.in_layers.0.weight": "blocks.45.norm1.weight",
906
+ "model.diffusion_model.output_blocks.5.0.in_layers.2.bias": "blocks.45.conv1.bias",
907
+ "model.diffusion_model.output_blocks.5.0.in_layers.2.weight": "blocks.45.conv1.weight",
908
+ "model.diffusion_model.output_blocks.5.0.out_layers.0.bias": "blocks.45.norm2.bias",
909
+ "model.diffusion_model.output_blocks.5.0.out_layers.0.weight": "blocks.45.norm2.weight",
910
+ "model.diffusion_model.output_blocks.5.0.out_layers.3.bias": "blocks.45.conv2.bias",
911
+ "model.diffusion_model.output_blocks.5.0.out_layers.3.weight": "blocks.45.conv2.weight",
912
+ "model.diffusion_model.output_blocks.5.0.skip_connection.bias": "blocks.45.conv_shortcut.bias",
913
+ "model.diffusion_model.output_blocks.5.0.skip_connection.weight": "blocks.45.conv_shortcut.weight",
914
+ "model.diffusion_model.output_blocks.5.1.norm.bias": "blocks.46.norm.bias",
915
+ "model.diffusion_model.output_blocks.5.1.norm.weight": "blocks.46.norm.weight",
916
+ "model.diffusion_model.output_blocks.5.1.proj_in.bias": "blocks.46.proj_in.bias",
917
+ "model.diffusion_model.output_blocks.5.1.proj_in.weight": "blocks.46.proj_in.weight",
918
+ "model.diffusion_model.output_blocks.5.1.proj_out.bias": "blocks.46.proj_out.bias",
919
+ "model.diffusion_model.output_blocks.5.1.proj_out.weight": "blocks.46.proj_out.weight",
920
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_k.weight": "blocks.46.transformer_blocks.0.attn1.to_k.weight",
921
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.46.transformer_blocks.0.attn1.to_out.bias",
922
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.46.transformer_blocks.0.attn1.to_out.weight",
923
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_q.weight": "blocks.46.transformer_blocks.0.attn1.to_q.weight",
924
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_v.weight": "blocks.46.transformer_blocks.0.attn1.to_v.weight",
925
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_k.weight": "blocks.46.transformer_blocks.0.attn2.to_k.weight",
926
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.46.transformer_blocks.0.attn2.to_out.bias",
927
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.46.transformer_blocks.0.attn2.to_out.weight",
928
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_q.weight": "blocks.46.transformer_blocks.0.attn2.to_q.weight",
929
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_v.weight": "blocks.46.transformer_blocks.0.attn2.to_v.weight",
930
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.46.transformer_blocks.0.act_fn.proj.bias",
931
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.46.transformer_blocks.0.act_fn.proj.weight",
932
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.2.bias": "blocks.46.transformer_blocks.0.ff.bias",
933
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.2.weight": "blocks.46.transformer_blocks.0.ff.weight",
934
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm1.bias": "blocks.46.transformer_blocks.0.norm1.bias",
935
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm1.weight": "blocks.46.transformer_blocks.0.norm1.weight",
936
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm2.bias": "blocks.46.transformer_blocks.0.norm2.bias",
937
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm2.weight": "blocks.46.transformer_blocks.0.norm2.weight",
938
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm3.bias": "blocks.46.transformer_blocks.0.norm3.bias",
939
+ "model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm3.weight": "blocks.46.transformer_blocks.0.norm3.weight",
940
+ "model.diffusion_model.output_blocks.5.2.conv.bias": "blocks.47.conv.bias",
941
+ "model.diffusion_model.output_blocks.5.2.conv.weight": "blocks.47.conv.weight",
942
+ "model.diffusion_model.output_blocks.6.0.emb_layers.1.bias": "blocks.49.time_emb_proj.bias",
943
+ "model.diffusion_model.output_blocks.6.0.emb_layers.1.weight": "blocks.49.time_emb_proj.weight",
944
+ "model.diffusion_model.output_blocks.6.0.in_layers.0.bias": "blocks.49.norm1.bias",
945
+ "model.diffusion_model.output_blocks.6.0.in_layers.0.weight": "blocks.49.norm1.weight",
946
+ "model.diffusion_model.output_blocks.6.0.in_layers.2.bias": "blocks.49.conv1.bias",
947
+ "model.diffusion_model.output_blocks.6.0.in_layers.2.weight": "blocks.49.conv1.weight",
948
+ "model.diffusion_model.output_blocks.6.0.out_layers.0.bias": "blocks.49.norm2.bias",
949
+ "model.diffusion_model.output_blocks.6.0.out_layers.0.weight": "blocks.49.norm2.weight",
950
+ "model.diffusion_model.output_blocks.6.0.out_layers.3.bias": "blocks.49.conv2.bias",
951
+ "model.diffusion_model.output_blocks.6.0.out_layers.3.weight": "blocks.49.conv2.weight",
952
+ "model.diffusion_model.output_blocks.6.0.skip_connection.bias": "blocks.49.conv_shortcut.bias",
953
+ "model.diffusion_model.output_blocks.6.0.skip_connection.weight": "blocks.49.conv_shortcut.weight",
954
+ "model.diffusion_model.output_blocks.6.1.norm.bias": "blocks.50.norm.bias",
955
+ "model.diffusion_model.output_blocks.6.1.norm.weight": "blocks.50.norm.weight",
956
+ "model.diffusion_model.output_blocks.6.1.proj_in.bias": "blocks.50.proj_in.bias",
957
+ "model.diffusion_model.output_blocks.6.1.proj_in.weight": "blocks.50.proj_in.weight",
958
+ "model.diffusion_model.output_blocks.6.1.proj_out.bias": "blocks.50.proj_out.bias",
959
+ "model.diffusion_model.output_blocks.6.1.proj_out.weight": "blocks.50.proj_out.weight",
960
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_k.weight": "blocks.50.transformer_blocks.0.attn1.to_k.weight",
961
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.50.transformer_blocks.0.attn1.to_out.bias",
962
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.50.transformer_blocks.0.attn1.to_out.weight",
963
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_q.weight": "blocks.50.transformer_blocks.0.attn1.to_q.weight",
964
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_v.weight": "blocks.50.transformer_blocks.0.attn1.to_v.weight",
965
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_k.weight": "blocks.50.transformer_blocks.0.attn2.to_k.weight",
966
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.50.transformer_blocks.0.attn2.to_out.bias",
967
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.50.transformer_blocks.0.attn2.to_out.weight",
968
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_q.weight": "blocks.50.transformer_blocks.0.attn2.to_q.weight",
969
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_v.weight": "blocks.50.transformer_blocks.0.attn2.to_v.weight",
970
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.50.transformer_blocks.0.act_fn.proj.bias",
971
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.50.transformer_blocks.0.act_fn.proj.weight",
972
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.2.bias": "blocks.50.transformer_blocks.0.ff.bias",
973
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.2.weight": "blocks.50.transformer_blocks.0.ff.weight",
974
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm1.bias": "blocks.50.transformer_blocks.0.norm1.bias",
975
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm1.weight": "blocks.50.transformer_blocks.0.norm1.weight",
976
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm2.bias": "blocks.50.transformer_blocks.0.norm2.bias",
977
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm2.weight": "blocks.50.transformer_blocks.0.norm2.weight",
978
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm3.bias": "blocks.50.transformer_blocks.0.norm3.bias",
979
+ "model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm3.weight": "blocks.50.transformer_blocks.0.norm3.weight",
980
+ "model.diffusion_model.output_blocks.7.0.emb_layers.1.bias": "blocks.52.time_emb_proj.bias",
981
+ "model.diffusion_model.output_blocks.7.0.emb_layers.1.weight": "blocks.52.time_emb_proj.weight",
982
+ "model.diffusion_model.output_blocks.7.0.in_layers.0.bias": "blocks.52.norm1.bias",
983
+ "model.diffusion_model.output_blocks.7.0.in_layers.0.weight": "blocks.52.norm1.weight",
984
+ "model.diffusion_model.output_blocks.7.0.in_layers.2.bias": "blocks.52.conv1.bias",
985
+ "model.diffusion_model.output_blocks.7.0.in_layers.2.weight": "blocks.52.conv1.weight",
986
+ "model.diffusion_model.output_blocks.7.0.out_layers.0.bias": "blocks.52.norm2.bias",
987
+ "model.diffusion_model.output_blocks.7.0.out_layers.0.weight": "blocks.52.norm2.weight",
988
+ "model.diffusion_model.output_blocks.7.0.out_layers.3.bias": "blocks.52.conv2.bias",
989
+ "model.diffusion_model.output_blocks.7.0.out_layers.3.weight": "blocks.52.conv2.weight",
990
+ "model.diffusion_model.output_blocks.7.0.skip_connection.bias": "blocks.52.conv_shortcut.bias",
991
+ "model.diffusion_model.output_blocks.7.0.skip_connection.weight": "blocks.52.conv_shortcut.weight",
992
+ "model.diffusion_model.output_blocks.7.1.norm.bias": "blocks.53.norm.bias",
993
+ "model.diffusion_model.output_blocks.7.1.norm.weight": "blocks.53.norm.weight",
994
+ "model.diffusion_model.output_blocks.7.1.proj_in.bias": "blocks.53.proj_in.bias",
995
+ "model.diffusion_model.output_blocks.7.1.proj_in.weight": "blocks.53.proj_in.weight",
996
+ "model.diffusion_model.output_blocks.7.1.proj_out.bias": "blocks.53.proj_out.bias",
997
+ "model.diffusion_model.output_blocks.7.1.proj_out.weight": "blocks.53.proj_out.weight",
998
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_k.weight": "blocks.53.transformer_blocks.0.attn1.to_k.weight",
999
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.53.transformer_blocks.0.attn1.to_out.bias",
1000
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.53.transformer_blocks.0.attn1.to_out.weight",
1001
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_q.weight": "blocks.53.transformer_blocks.0.attn1.to_q.weight",
1002
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_v.weight": "blocks.53.transformer_blocks.0.attn1.to_v.weight",
1003
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_k.weight": "blocks.53.transformer_blocks.0.attn2.to_k.weight",
1004
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.53.transformer_blocks.0.attn2.to_out.bias",
1005
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.53.transformer_blocks.0.attn2.to_out.weight",
1006
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_q.weight": "blocks.53.transformer_blocks.0.attn2.to_q.weight",
1007
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_v.weight": "blocks.53.transformer_blocks.0.attn2.to_v.weight",
1008
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.53.transformer_blocks.0.act_fn.proj.bias",
1009
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.53.transformer_blocks.0.act_fn.proj.weight",
1010
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.2.bias": "blocks.53.transformer_blocks.0.ff.bias",
1011
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.2.weight": "blocks.53.transformer_blocks.0.ff.weight",
1012
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm1.bias": "blocks.53.transformer_blocks.0.norm1.bias",
1013
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm1.weight": "blocks.53.transformer_blocks.0.norm1.weight",
1014
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm2.bias": "blocks.53.transformer_blocks.0.norm2.bias",
1015
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm2.weight": "blocks.53.transformer_blocks.0.norm2.weight",
1016
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm3.bias": "blocks.53.transformer_blocks.0.norm3.bias",
1017
+ "model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm3.weight": "blocks.53.transformer_blocks.0.norm3.weight",
1018
+ "model.diffusion_model.output_blocks.8.0.emb_layers.1.bias": "blocks.55.time_emb_proj.bias",
1019
+ "model.diffusion_model.output_blocks.8.0.emb_layers.1.weight": "blocks.55.time_emb_proj.weight",
1020
+ "model.diffusion_model.output_blocks.8.0.in_layers.0.bias": "blocks.55.norm1.bias",
1021
+ "model.diffusion_model.output_blocks.8.0.in_layers.0.weight": "blocks.55.norm1.weight",
1022
+ "model.diffusion_model.output_blocks.8.0.in_layers.2.bias": "blocks.55.conv1.bias",
1023
+ "model.diffusion_model.output_blocks.8.0.in_layers.2.weight": "blocks.55.conv1.weight",
1024
+ "model.diffusion_model.output_blocks.8.0.out_layers.0.bias": "blocks.55.norm2.bias",
1025
+ "model.diffusion_model.output_blocks.8.0.out_layers.0.weight": "blocks.55.norm2.weight",
1026
+ "model.diffusion_model.output_blocks.8.0.out_layers.3.bias": "blocks.55.conv2.bias",
1027
+ "model.diffusion_model.output_blocks.8.0.out_layers.3.weight": "blocks.55.conv2.weight",
1028
+ "model.diffusion_model.output_blocks.8.0.skip_connection.bias": "blocks.55.conv_shortcut.bias",
1029
+ "model.diffusion_model.output_blocks.8.0.skip_connection.weight": "blocks.55.conv_shortcut.weight",
1030
+ "model.diffusion_model.output_blocks.8.1.norm.bias": "blocks.56.norm.bias",
1031
+ "model.diffusion_model.output_blocks.8.1.norm.weight": "blocks.56.norm.weight",
1032
+ "model.diffusion_model.output_blocks.8.1.proj_in.bias": "blocks.56.proj_in.bias",
1033
+ "model.diffusion_model.output_blocks.8.1.proj_in.weight": "blocks.56.proj_in.weight",
1034
+ "model.diffusion_model.output_blocks.8.1.proj_out.bias": "blocks.56.proj_out.bias",
1035
+ "model.diffusion_model.output_blocks.8.1.proj_out.weight": "blocks.56.proj_out.weight",
1036
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_k.weight": "blocks.56.transformer_blocks.0.attn1.to_k.weight",
1037
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.56.transformer_blocks.0.attn1.to_out.bias",
1038
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.56.transformer_blocks.0.attn1.to_out.weight",
1039
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_q.weight": "blocks.56.transformer_blocks.0.attn1.to_q.weight",
1040
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_v.weight": "blocks.56.transformer_blocks.0.attn1.to_v.weight",
1041
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_k.weight": "blocks.56.transformer_blocks.0.attn2.to_k.weight",
1042
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.56.transformer_blocks.0.attn2.to_out.bias",
1043
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.56.transformer_blocks.0.attn2.to_out.weight",
1044
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_q.weight": "blocks.56.transformer_blocks.0.attn2.to_q.weight",
1045
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_v.weight": "blocks.56.transformer_blocks.0.attn2.to_v.weight",
1046
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.56.transformer_blocks.0.act_fn.proj.bias",
1047
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.56.transformer_blocks.0.act_fn.proj.weight",
1048
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.2.bias": "blocks.56.transformer_blocks.0.ff.bias",
1049
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.2.weight": "blocks.56.transformer_blocks.0.ff.weight",
1050
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm1.bias": "blocks.56.transformer_blocks.0.norm1.bias",
1051
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm1.weight": "blocks.56.transformer_blocks.0.norm1.weight",
1052
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm2.bias": "blocks.56.transformer_blocks.0.norm2.bias",
1053
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm2.weight": "blocks.56.transformer_blocks.0.norm2.weight",
1054
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm3.bias": "blocks.56.transformer_blocks.0.norm3.bias",
1055
+ "model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm3.weight": "blocks.56.transformer_blocks.0.norm3.weight",
1056
+ "model.diffusion_model.output_blocks.8.2.conv.bias": "blocks.57.conv.bias",
1057
+ "model.diffusion_model.output_blocks.8.2.conv.weight": "blocks.57.conv.weight",
1058
+ "model.diffusion_model.output_blocks.9.0.emb_layers.1.bias": "blocks.59.time_emb_proj.bias",
1059
+ "model.diffusion_model.output_blocks.9.0.emb_layers.1.weight": "blocks.59.time_emb_proj.weight",
1060
+ "model.diffusion_model.output_blocks.9.0.in_layers.0.bias": "blocks.59.norm1.bias",
1061
+ "model.diffusion_model.output_blocks.9.0.in_layers.0.weight": "blocks.59.norm1.weight",
1062
+ "model.diffusion_model.output_blocks.9.0.in_layers.2.bias": "blocks.59.conv1.bias",
1063
+ "model.diffusion_model.output_blocks.9.0.in_layers.2.weight": "blocks.59.conv1.weight",
1064
+ "model.diffusion_model.output_blocks.9.0.out_layers.0.bias": "blocks.59.norm2.bias",
1065
+ "model.diffusion_model.output_blocks.9.0.out_layers.0.weight": "blocks.59.norm2.weight",
1066
+ "model.diffusion_model.output_blocks.9.0.out_layers.3.bias": "blocks.59.conv2.bias",
1067
+ "model.diffusion_model.output_blocks.9.0.out_layers.3.weight": "blocks.59.conv2.weight",
1068
+ "model.diffusion_model.output_blocks.9.0.skip_connection.bias": "blocks.59.conv_shortcut.bias",
1069
+ "model.diffusion_model.output_blocks.9.0.skip_connection.weight": "blocks.59.conv_shortcut.weight",
1070
+ "model.diffusion_model.output_blocks.9.1.norm.bias": "blocks.60.norm.bias",
1071
+ "model.diffusion_model.output_blocks.9.1.norm.weight": "blocks.60.norm.weight",
1072
+ "model.diffusion_model.output_blocks.9.1.proj_in.bias": "blocks.60.proj_in.bias",
1073
+ "model.diffusion_model.output_blocks.9.1.proj_in.weight": "blocks.60.proj_in.weight",
1074
+ "model.diffusion_model.output_blocks.9.1.proj_out.bias": "blocks.60.proj_out.bias",
1075
+ "model.diffusion_model.output_blocks.9.1.proj_out.weight": "blocks.60.proj_out.weight",
1076
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_k.weight": "blocks.60.transformer_blocks.0.attn1.to_k.weight",
1077
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_out.0.bias": "blocks.60.transformer_blocks.0.attn1.to_out.bias",
1078
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_out.0.weight": "blocks.60.transformer_blocks.0.attn1.to_out.weight",
1079
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_q.weight": "blocks.60.transformer_blocks.0.attn1.to_q.weight",
1080
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_v.weight": "blocks.60.transformer_blocks.0.attn1.to_v.weight",
1081
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_k.weight": "blocks.60.transformer_blocks.0.attn2.to_k.weight",
1082
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_out.0.bias": "blocks.60.transformer_blocks.0.attn2.to_out.bias",
1083
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_out.0.weight": "blocks.60.transformer_blocks.0.attn2.to_out.weight",
1084
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_q.weight": "blocks.60.transformer_blocks.0.attn2.to_q.weight",
1085
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_v.weight": "blocks.60.transformer_blocks.0.attn2.to_v.weight",
1086
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.0.proj.bias": "blocks.60.transformer_blocks.0.act_fn.proj.bias",
1087
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.0.proj.weight": "blocks.60.transformer_blocks.0.act_fn.proj.weight",
1088
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.2.bias": "blocks.60.transformer_blocks.0.ff.bias",
1089
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.2.weight": "blocks.60.transformer_blocks.0.ff.weight",
1090
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm1.bias": "blocks.60.transformer_blocks.0.norm1.bias",
1091
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm1.weight": "blocks.60.transformer_blocks.0.norm1.weight",
1092
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm2.bias": "blocks.60.transformer_blocks.0.norm2.bias",
1093
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm2.weight": "blocks.60.transformer_blocks.0.norm2.weight",
1094
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm3.bias": "blocks.60.transformer_blocks.0.norm3.bias",
1095
+ "model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm3.weight": "blocks.60.transformer_blocks.0.norm3.weight",
1096
+ "model.diffusion_model.time_embed.0.bias": "time_embedding.0.bias",
1097
+ "model.diffusion_model.time_embed.0.weight": "time_embedding.0.weight",
1098
+ "model.diffusion_model.time_embed.2.bias": "time_embedding.2.bias",
1099
+ "model.diffusion_model.time_embed.2.weight": "time_embedding.2.weight",
1100
+ }
1101
+ state_dict_ = {}
1102
+ for name in state_dict:
1103
+ if name in rename_dict:
1104
+ param = state_dict[name]
1105
+ if ".proj_in." in name or ".proj_out." in name:
1106
+ param = param.squeeze()
1107
+ state_dict_[rename_dict[name]] = param
1108
+ return state_dict_