InvokeAI 6.11.0rc1__py3-none-any.whl → 6.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. invokeai/app/invocations/flux2_denoise.py +25 -19
  2. invokeai/app/invocations/flux2_vae_decode.py +0 -14
  3. invokeai/app/invocations/flux_denoise.py +22 -6
  4. invokeai/app/invocations/flux_model_loader.py +2 -5
  5. invokeai/app/util/step_callback.py +52 -38
  6. invokeai/backend/flux/dype/__init__.py +18 -1
  7. invokeai/backend/flux/dype/base.py +40 -6
  8. invokeai/backend/flux/dype/presets.py +97 -35
  9. invokeai/backend/flux2/denoise.py +33 -6
  10. invokeai/backend/flux2/sampling_utils.py +19 -22
  11. invokeai/frontend/web/dist/assets/App-Drro7CYT.js +161 -0
  12. invokeai/frontend/web/dist/assets/{browser-ponyfill-Cw07u5G1.js → browser-ponyfill-B5E9kN5q.js} +1 -1
  13. invokeai/frontend/web/dist/assets/{index-DSKM8iGj.js → index-Bp-c_7R4.js} +64 -64
  14. invokeai/frontend/web/dist/index.html +1 -1
  15. invokeai/frontend/web/dist/locales/en.json +21 -1
  16. invokeai/frontend/web/dist/locales/it.json +135 -16
  17. invokeai/frontend/web/dist/locales/ru.json +42 -42
  18. invokeai/version/invokeai_version.py +1 -1
  19. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/METADATA +1 -1
  20. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/RECORD +26 -26
  21. invokeai/frontend/web/dist/assets/App-ClpIJstk.js +0 -161
  22. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/WHEEL +0 -0
  23. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/entry_points.txt +0 -0
  24. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/licenses/LICENSE +0 -0
  25. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/licenses/LICENSE-SD1+SD2.txt +0 -0
  26. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/licenses/LICENSE-SDXL.txt +0 -0
  27. {invokeai-6.11.0rc1.dist-info → invokeai-6.11.1.dist-info}/top_level.txt +0 -0
@@ -4,6 +4,7 @@ This module provides the denoising function for FLUX.2 Klein models,
4
4
  which use Qwen3 as the text encoder instead of CLIP+T5.
5
5
  """
6
6
 
7
+ import inspect
7
8
  import math
8
9
  from typing import Any, Callable
9
10
 
@@ -87,11 +88,18 @@ def denoise(
87
88
  # The scheduler will apply dynamic shifting internally using mu (if enabled in scheduler config)
88
89
  sigmas = np.array(timesteps[:-1], dtype=np.float32) # Exclude final 0.0
89
90
 
90
- # Pass mu if provided - it will only be used if scheduler has use_dynamic_shifting=True
91
- if mu is not None:
91
+ # Check if scheduler supports sigmas parameter using inspect.signature
92
+ # FlowMatchHeunDiscreteScheduler and FlowMatchLCMScheduler don't support sigmas
93
+ set_timesteps_sig = inspect.signature(scheduler.set_timesteps)
94
+ supports_sigmas = "sigmas" in set_timesteps_sig.parameters
95
+ if supports_sigmas and mu is not None:
96
+ # Pass mu if provided - it will only be used if scheduler has use_dynamic_shifting=True
92
97
  scheduler.set_timesteps(sigmas=sigmas.tolist(), mu=mu, device=img.device)
93
- else:
98
+ elif supports_sigmas:
94
99
  scheduler.set_timesteps(sigmas=sigmas.tolist(), device=img.device)
100
+ else:
101
+ # Scheduler doesn't support sigmas (e.g., Heun, LCM) - use num_inference_steps
102
+ scheduler.set_timesteps(num_inference_steps=len(sigmas), device=img.device)
95
103
  num_scheduler_steps = len(scheduler.timesteps)
96
104
  is_heun = hasattr(scheduler, "state_in_first_order")
97
105
  user_step = 0
@@ -152,7 +160,15 @@ def denoise(
152
160
 
153
161
  # Apply inpainting merge at each step
154
162
  if inpaint_extension is not None:
155
- img = inpaint_extension.merge_intermediate_latents_with_init_latents(img, t_prev)
163
+ # Separate the generated latents from the reference conditioning
164
+ gen_img = img[:, :original_seq_len, :]
165
+ ref_img = img[:, original_seq_len:, :]
166
+
167
+ # Merge only the generated part
168
+ gen_img = inpaint_extension.merge_intermediate_latents_with_init_latents(gen_img, t_prev)
169
+
170
+ # Concatenate back together
171
+ img = torch.cat([gen_img, ref_img], dim=1)
156
172
 
157
173
  # For Heun, only increment user step after second-order step completes
158
174
  if is_heun:
@@ -239,8 +255,19 @@ def denoise(
239
255
 
240
256
  # Apply inpainting merge at each step
241
257
  if inpaint_extension is not None:
242
- img = inpaint_extension.merge_intermediate_latents_with_init_latents(img, t_prev)
243
- preview_img = inpaint_extension.merge_intermediate_latents_with_init_latents(preview_img, 0.0)
258
+ # Separate the generated latents from the reference conditioning
259
+ gen_img = img[:, :original_seq_len, :]
260
+ ref_img = img[:, original_seq_len:, :]
261
+
262
+ # Merge only the generated part
263
+ gen_img = inpaint_extension.merge_intermediate_latents_with_init_latents(gen_img, t_prev)
264
+
265
+ # Concatenate back together
266
+ img = torch.cat([gen_img, ref_img], dim=1)
267
+
268
+ # Handling preview images
269
+ preview_gen = preview_img[:, :original_seq_len, :]
270
+ preview_gen = inpaint_extension.merge_intermediate_latents_with_init_latents(preview_gen, 0.0)
244
271
 
245
272
  # Extract only the generated image portion for preview (exclude reference images)
246
273
  callback_latents = preview_img[:, :original_seq_len, :] if img_cond_seq is not None else preview_img
@@ -108,33 +108,27 @@ def unpack_flux2(x: torch.Tensor, height: int, width: int) -> torch.Tensor:
108
108
 
109
109
 
110
110
  def compute_empirical_mu(image_seq_len: int, num_steps: int) -> float:
111
- """Compute empirical mu for FLUX.2 schedule shifting.
111
+ """Compute mu for FLUX.2 schedule shifting.
112
112
 
113
- This matches the diffusers Flux2Pipeline implementation.
114
- The mu value controls how much the schedule is shifted towards higher timesteps.
113
+ Uses a fixed mu value of 2.02, matching ComfyUI's proven FLUX.2 configuration.
114
+
115
+ The previous implementation (from diffusers' FLUX.1 pipeline) computed mu as a
116
+ linear function of image_seq_len, which produced excessively high values at
117
+ high resolutions (e.g., mu=3.23 at 2048x2048). This over-shifted the sigma
118
+ schedule, compressing almost all values above 0.9 and forcing the model to
119
+ denoise everything in the final 1-2 steps, causing severe grid/diamond artifacts.
120
+
121
+ ComfyUI uses a fixed shift=2.02 for FLUX.2 Klein at all resolutions and produces
122
+ artifact-free images even at 2048x2048.
115
123
 
116
124
  Args:
117
- image_seq_len: Number of image tokens (packed_h * packed_w).
118
- num_steps: Number of denoising steps.
125
+ image_seq_len: Number of image tokens (packed_h * packed_w). Currently unused.
126
+ num_steps: Number of denoising steps. Currently unused.
119
127
 
120
128
  Returns:
121
- The empirical mu value.
129
+ The mu value (fixed at 2.02).
122
130
  """
123
- a1, b1 = 8.73809524e-05, 1.89833333
124
- a2, b2 = 0.00016927, 0.45666666
125
-
126
- if image_seq_len > 4300:
127
- mu = a2 * image_seq_len + b2
128
- return float(mu)
129
-
130
- m_200 = a2 * image_seq_len + b2
131
- m_10 = a1 * image_seq_len + b1
132
-
133
- a = (m_200 - m_10) / 190.0
134
- b = m_200 - 200.0 * a
135
- mu = a * num_steps + b
136
-
137
- return float(mu)
131
+ return 2.02
138
132
 
139
133
 
140
134
  def get_schedule_flux2(
@@ -169,11 +163,14 @@ def get_schedule_flux2(
169
163
 
170
164
 
171
165
  def generate_img_ids_flux2(h: int, w: int, batch_size: int, device: torch.device) -> torch.Tensor:
172
- """Generate tensor of image position ids for FLUX.2.
166
+ """Generate tensor of image position ids for FLUX.2 with RoPE scaling.
173
167
 
174
168
  FLUX.2 uses 4D position coordinates (T, H, W, L) for its rotary position embeddings.
175
169
  This is different from FLUX.1 which uses 3D coordinates.
176
170
 
171
+ RoPE Scaling: For resolutions >1536x1536, position IDs are scaled down using
172
+ Position Interpolation to prevent RoPE degradation and diamond/grid artifacts.
173
+
177
174
  IMPORTANT: Position IDs must use int64 (long) dtype like diffusers, not bfloat16.
178
175
  Using floating point dtype for position IDs can cause NaN in rotary embeddings.
179
176