diffusers 0.30.3__py3-none-any.whl → 0.32.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +97 -4
- diffusers/callbacks.py +56 -3
- diffusers/configuration_utils.py +13 -1
- diffusers/image_processor.py +282 -71
- diffusers/loaders/__init__.py +24 -3
- diffusers/loaders/ip_adapter.py +543 -16
- diffusers/loaders/lora_base.py +138 -125
- diffusers/loaders/lora_conversion_utils.py +647 -0
- diffusers/loaders/lora_pipeline.py +2216 -230
- diffusers/loaders/peft.py +380 -0
- diffusers/loaders/single_file_model.py +71 -4
- diffusers/loaders/single_file_utils.py +597 -10
- diffusers/loaders/textual_inversion.py +5 -3
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +56 -12
- diffusers/models/__init__.py +49 -12
- diffusers/models/activations.py +22 -9
- diffusers/models/adapter.py +53 -53
- diffusers/models/attention.py +98 -13
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +2160 -346
- diffusers/models/autoencoders/__init__.py +5 -0
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +73 -12
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +213 -105
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +3 -10
- diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
- diffusers/models/autoencoders/vae.py +18 -5
- diffusers/models/controlnet.py +47 -802
- diffusers/models/controlnet_flux.py +70 -0
- diffusers/models/controlnet_sd3.py +26 -376
- diffusers/models/controlnet_sparsectrl.py +46 -719
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +5 -5
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/{controlnet_hunyuan.py → controlnets/controlnet_hunyuan.py} +7 -7
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/{controlnet_xs.py → controlnets/controlnet_xs.py} +14 -13
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/embeddings.py +996 -92
- diffusers/models/embeddings_flax.py +23 -9
- diffusers/models/model_loading_utils.py +264 -14
- diffusers/models/modeling_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +334 -51
- diffusers/models/normalization.py +157 -13
- diffusers/models/transformers/__init__.py +6 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +3 -2
- diffusers/models/transformers/cogvideox_transformer_3d.py +69 -13
- diffusers/models/transformers/dit_transformer_2d.py +1 -1
- diffusers/models/transformers/latte_transformer_3d.py +4 -4
- diffusers/models/transformers/pixart_transformer_2d.py +10 -2
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +1 -1
- diffusers/models/transformers/transformer_2d.py +1 -1
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +386 -0
- diffusers/models/transformers/transformer_flux.py +189 -51
- diffusers/models/transformers/transformer_hunyuan_video.py +789 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +112 -18
- diffusers/models/transformers/transformer_temporal.py +1 -1
- diffusers/models/unets/unet_1d_blocks.py +1 -1
- diffusers/models/unets/unet_2d.py +8 -1
- diffusers/models/unets/unet_2d_blocks.py +88 -21
- diffusers/models/unets/unet_2d_condition.py +9 -9
- diffusers/models/unets/unet_3d_blocks.py +9 -7
- diffusers/models/unets/unet_motion_model.py +46 -68
- diffusers/models/unets/unet_spatio_temporal_condition.py +23 -0
- diffusers/models/unets/unet_stable_cascade.py +2 -2
- diffusers/models/unets/uvit_2d.py +1 -1
- diffusers/models/upsampling.py +14 -6
- diffusers/pipelines/__init__.py +69 -6
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/animatediff/__init__.py +2 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +45 -21
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +52 -22
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +18 -4
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +3 -1
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +104 -72
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +3 -3
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +2 -9
- diffusers/pipelines/auto_pipeline.py +88 -10
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
- diffusers/pipelines/cogvideo/__init__.py +2 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +80 -39
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +108 -50
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +89 -50
- diffusers/pipelines/cogview3/__init__.py +47 -0
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
- diffusers/pipelines/cogview3/pipeline_output.py +21 -0
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -178
- diffusers/pipelines/controlnet/pipeline_controlnet.py +20 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +9 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +9 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +37 -15
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +12 -4
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +9 -4
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +22 -4
- diffusers/pipelines/controlnet_sd3/__init__.py +4 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +56 -20
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
- diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +16 -4
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +1 -1
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +32 -9
- diffusers/pipelines/flux/__init__.py +23 -1
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +256 -48
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +16 -0
- diffusers/pipelines/free_noise_utils.py +365 -5
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +20 -4
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +9 -9
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +2 -2
- diffusers/pipelines/kolors/pipeline_kolors.py +1 -1
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +14 -11
- diffusers/pipelines/kolors/text_encoder.py +2 -2
- diffusers/pipelines/kolors/tokenizer.py +4 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +1 -1
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +1 -1
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
- diffusers/pipelines/latte/pipeline_latte.py +2 -2
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +15 -3
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +15 -3
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +3 -10
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/pag/__init__.py +13 -0
- diffusers/pipelines/pag/pag_utils.py +8 -2
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +2 -3
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +3 -5
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +22 -6
- diffusers/pipelines/pag/pipeline_pag_kolors.py +1 -1
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +7 -14
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd.py +18 -6
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +18 -9
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +5 -1
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +18 -6
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +31 -16
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +42 -19
- diffusers/pipelines/pia/pipeline_pia.py +2 -0
- diffusers/pipelines/pipeline_flax_utils.py +1 -1
- diffusers/pipelines/pipeline_loading_utils.py +250 -31
- diffusers/pipelines/pipeline_utils.py +158 -186
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +7 -14
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +7 -14
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +12 -1
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +35 -3
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +46 -9
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +241 -81
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +228 -23
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +82 -13
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +60 -11
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -1
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +16 -4
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +16 -4
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +16 -12
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +29 -22
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +29 -22
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +1 -1
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +1 -1
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +16 -4
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +15 -3
- diffusers/pipelines/unidiffuser/modeling_uvit.py +2 -2
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
- diffusers/quantizers/__init__.py +16 -0
- diffusers/quantizers/auto.py +139 -0
- diffusers/quantizers/base.py +233 -0
- diffusers/quantizers/bitsandbytes/__init__.py +2 -0
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
- diffusers/quantizers/bitsandbytes/utils.py +306 -0
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +669 -0
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +285 -0
- diffusers/schedulers/scheduling_ddim.py +4 -1
- diffusers/schedulers/scheduling_ddim_cogvideox.py +4 -1
- diffusers/schedulers/scheduling_ddim_parallel.py +4 -1
- diffusers/schedulers/scheduling_ddpm.py +6 -7
- diffusers/schedulers/scheduling_ddpm_parallel.py +6 -7
- diffusers/schedulers/scheduling_deis_multistep.py +102 -6
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +113 -6
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +111 -5
- diffusers/schedulers/scheduling_dpmsolver_sde.py +125 -10
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +126 -7
- diffusers/schedulers/scheduling_edm_euler.py +8 -6
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +4 -1
- diffusers/schedulers/scheduling_euler_discrete.py +92 -7
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +153 -6
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +4 -5
- diffusers/schedulers/scheduling_heun_discrete.py +114 -8
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +116 -11
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +110 -8
- diffusers/schedulers/scheduling_lcm.py +2 -6
- diffusers/schedulers/scheduling_lms_discrete.py +76 -1
- diffusers/schedulers/scheduling_repaint.py +1 -1
- diffusers/schedulers/scheduling_sasolver.py +102 -6
- diffusers/schedulers/scheduling_tcd.py +2 -6
- diffusers/schedulers/scheduling_unclip.py +4 -1
- diffusers/schedulers/scheduling_unipc_multistep.py +127 -5
- diffusers/training_utils.py +63 -19
- diffusers/utils/__init__.py +7 -1
- diffusers/utils/constants.py +1 -0
- diffusers/utils/dummy_pt_objects.py +240 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +435 -0
- diffusers/utils/dynamic_modules_utils.py +3 -3
- diffusers/utils/hub_utils.py +44 -40
- diffusers/utils/import_utils.py +98 -8
- diffusers/utils/loading_utils.py +28 -4
- diffusers/utils/peft_utils.py +6 -3
- diffusers/utils/testing_utils.py +115 -1
- diffusers/utils/torch_utils.py +3 -0
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/METADATA +73 -72
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/RECORD +268 -193
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/WHEEL +1 -1
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/LICENSE +0 -0
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/entry_points.txt +0 -0
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,561 @@
|
|
1
|
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""
|
15
|
+
Adapted from
|
16
|
+
https://github.com/huggingface/transformers/blob/c409cd81777fb27aadc043ed3d8339dbc020fb3b/src/transformers/quantizers/quantizer_bnb_4bit.py
|
17
|
+
"""
|
18
|
+
|
19
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
20
|
+
|
21
|
+
from ...utils import get_module_from_name
|
22
|
+
from ..base import DiffusersQuantizer
|
23
|
+
|
24
|
+
|
25
|
+
if TYPE_CHECKING:
|
26
|
+
from ...models.modeling_utils import ModelMixin
|
27
|
+
|
28
|
+
from ...utils import (
|
29
|
+
is_accelerate_available,
|
30
|
+
is_accelerate_version,
|
31
|
+
is_bitsandbytes_available,
|
32
|
+
is_bitsandbytes_version,
|
33
|
+
is_torch_available,
|
34
|
+
logging,
|
35
|
+
)
|
36
|
+
|
37
|
+
|
38
|
+
if is_torch_available():
|
39
|
+
import torch
|
40
|
+
|
41
|
+
logger = logging.get_logger(__name__)
|
42
|
+
|
43
|
+
|
44
|
+
class BnB4BitDiffusersQuantizer(DiffusersQuantizer):
|
45
|
+
"""
|
46
|
+
4-bit quantization from bitsandbytes.py quantization method:
|
47
|
+
before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the
|
48
|
+
layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call saving:
|
49
|
+
from state dict, as usual; saves weights and `quant_state` components
|
50
|
+
loading:
|
51
|
+
need to locate `quant_state` components and pass to Param4bit constructor
|
52
|
+
"""
|
53
|
+
|
54
|
+
use_keep_in_fp32_modules = True
|
55
|
+
requires_calibration = False
|
56
|
+
|
57
|
+
def __init__(self, quantization_config, **kwargs):
|
58
|
+
super().__init__(quantization_config, **kwargs)
|
59
|
+
|
60
|
+
if self.quantization_config.llm_int8_skip_modules is not None:
|
61
|
+
self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
|
62
|
+
|
63
|
+
def validate_environment(self, *args, **kwargs):
|
64
|
+
if not torch.cuda.is_available():
|
65
|
+
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
|
66
|
+
if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"):
|
67
|
+
raise ImportError(
|
68
|
+
"Using `bitsandbytes` 4-bit quantization requires Accelerate: `pip install 'accelerate>=0.26.0'`"
|
69
|
+
)
|
70
|
+
if not is_bitsandbytes_available() or is_bitsandbytes_version("<", "0.43.3"):
|
71
|
+
raise ImportError(
|
72
|
+
"Using `bitsandbytes` 4-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`"
|
73
|
+
)
|
74
|
+
|
75
|
+
if kwargs.get("from_flax", False):
|
76
|
+
raise ValueError(
|
77
|
+
"Converting into 4-bit weights from flax weights is currently not supported, please make"
|
78
|
+
" sure the weights are in PyTorch format."
|
79
|
+
)
|
80
|
+
|
81
|
+
device_map = kwargs.get("device_map", None)
|
82
|
+
if (
|
83
|
+
device_map is not None
|
84
|
+
and isinstance(device_map, dict)
|
85
|
+
and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
|
86
|
+
):
|
87
|
+
device_map_without_no_convert = {
|
88
|
+
key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
|
89
|
+
}
|
90
|
+
if "cpu" in device_map_without_no_convert.values() or "disk" in device_map_without_no_convert.values():
|
91
|
+
raise ValueError(
|
92
|
+
"Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the "
|
93
|
+
"quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules "
|
94
|
+
"in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom `device_map` to "
|
95
|
+
"`from_pretrained`. Check "
|
96
|
+
"https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu "
|
97
|
+
"for more details. "
|
98
|
+
)
|
99
|
+
|
100
|
+
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
|
101
|
+
if target_dtype != torch.int8:
|
102
|
+
from accelerate.utils import CustomDtype
|
103
|
+
|
104
|
+
logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization")
|
105
|
+
return CustomDtype.INT4
|
106
|
+
else:
|
107
|
+
raise ValueError(f"Wrong `target_dtype` ({target_dtype}) provided.")
|
108
|
+
|
109
|
+
def check_if_quantized_param(
|
110
|
+
self,
|
111
|
+
model: "ModelMixin",
|
112
|
+
param_value: "torch.Tensor",
|
113
|
+
param_name: str,
|
114
|
+
state_dict: Dict[str, Any],
|
115
|
+
**kwargs,
|
116
|
+
) -> bool:
|
117
|
+
import bitsandbytes as bnb
|
118
|
+
|
119
|
+
module, tensor_name = get_module_from_name(model, param_name)
|
120
|
+
if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit):
|
121
|
+
# Add here check for loaded components' dtypes once serialization is implemented
|
122
|
+
return True
|
123
|
+
elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias":
|
124
|
+
# bias could be loaded by regular set_module_tensor_to_device() from accelerate,
|
125
|
+
# but it would wrongly use uninitialized weight there.
|
126
|
+
return True
|
127
|
+
else:
|
128
|
+
return False
|
129
|
+
|
130
|
+
def create_quantized_param(
|
131
|
+
self,
|
132
|
+
model: "ModelMixin",
|
133
|
+
param_value: "torch.Tensor",
|
134
|
+
param_name: str,
|
135
|
+
target_device: "torch.device",
|
136
|
+
state_dict: Dict[str, Any],
|
137
|
+
unexpected_keys: Optional[List[str]] = None,
|
138
|
+
):
|
139
|
+
import bitsandbytes as bnb
|
140
|
+
|
141
|
+
module, tensor_name = get_module_from_name(model, param_name)
|
142
|
+
|
143
|
+
if tensor_name not in module._parameters:
|
144
|
+
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
|
145
|
+
|
146
|
+
old_value = getattr(module, tensor_name)
|
147
|
+
|
148
|
+
if tensor_name == "bias":
|
149
|
+
if param_value is None:
|
150
|
+
new_value = old_value.to(target_device)
|
151
|
+
else:
|
152
|
+
new_value = param_value.to(target_device)
|
153
|
+
|
154
|
+
new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad)
|
155
|
+
module._parameters[tensor_name] = new_value
|
156
|
+
return
|
157
|
+
|
158
|
+
if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit):
|
159
|
+
raise ValueError("this function only loads `Linear4bit components`")
|
160
|
+
if (
|
161
|
+
old_value.device == torch.device("meta")
|
162
|
+
and target_device not in ["meta", torch.device("meta")]
|
163
|
+
and param_value is None
|
164
|
+
):
|
165
|
+
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
|
166
|
+
|
167
|
+
# construct `new_value` for the module._parameters[tensor_name]:
|
168
|
+
if self.pre_quantized:
|
169
|
+
# 4bit loading. Collecting components for restoring quantized weight
|
170
|
+
# This can be expanded to make a universal call for any quantized weight loading
|
171
|
+
|
172
|
+
if not self.is_serializable:
|
173
|
+
raise ValueError(
|
174
|
+
"Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. "
|
175
|
+
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
|
176
|
+
)
|
177
|
+
|
178
|
+
if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and (
|
179
|
+
param_name + ".quant_state.bitsandbytes__nf4" not in state_dict
|
180
|
+
):
|
181
|
+
raise ValueError(
|
182
|
+
f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components."
|
183
|
+
)
|
184
|
+
|
185
|
+
quantized_stats = {}
|
186
|
+
for k, v in state_dict.items():
|
187
|
+
# `startswith` to counter for edge cases where `param_name`
|
188
|
+
# substring can be present in multiple places in the `state_dict`
|
189
|
+
if param_name + "." in k and k.startswith(param_name):
|
190
|
+
quantized_stats[k] = v
|
191
|
+
if unexpected_keys is not None and k in unexpected_keys:
|
192
|
+
unexpected_keys.remove(k)
|
193
|
+
|
194
|
+
new_value = bnb.nn.Params4bit.from_prequantized(
|
195
|
+
data=param_value,
|
196
|
+
quantized_stats=quantized_stats,
|
197
|
+
requires_grad=False,
|
198
|
+
device=target_device,
|
199
|
+
)
|
200
|
+
else:
|
201
|
+
new_value = param_value.to("cpu")
|
202
|
+
kwargs = old_value.__dict__
|
203
|
+
new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device)
|
204
|
+
|
205
|
+
module._parameters[tensor_name] = new_value
|
206
|
+
|
207
|
+
def check_quantized_param_shape(self, param_name, current_param, loaded_param):
|
208
|
+
current_param_shape = current_param.shape
|
209
|
+
loaded_param_shape = loaded_param.shape
|
210
|
+
|
211
|
+
n = current_param_shape.numel()
|
212
|
+
inferred_shape = (n,) if "bias" in param_name else ((n + 1) // 2, 1)
|
213
|
+
if loaded_param_shape != inferred_shape:
|
214
|
+
raise ValueError(
|
215
|
+
f"Expected the flattened shape of the current param ({param_name}) to be {loaded_param_shape} but is {inferred_shape}."
|
216
|
+
)
|
217
|
+
else:
|
218
|
+
return True
|
219
|
+
|
220
|
+
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
|
221
|
+
# need more space for buffers that are created during quantization
|
222
|
+
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
|
223
|
+
return max_memory
|
224
|
+
|
225
|
+
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
|
226
|
+
if torch_dtype is None:
|
227
|
+
# We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
|
228
|
+
logger.info(
|
229
|
+
"Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
|
230
|
+
"requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
|
231
|
+
"Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
|
232
|
+
" torch_dtype=torch.float16 to remove this warning.",
|
233
|
+
torch_dtype,
|
234
|
+
)
|
235
|
+
torch_dtype = torch.float16
|
236
|
+
return torch_dtype
|
237
|
+
|
238
|
+
# (sayakpaul): I think it could be better to disable custom `device_map`s
|
239
|
+
# for the first phase of the integration in the interest of simplicity.
|
240
|
+
# Commenting this for discussions on the PR.
|
241
|
+
# def update_device_map(self, device_map):
|
242
|
+
# if device_map is None:
|
243
|
+
# device_map = {"": torch.cuda.current_device()}
|
244
|
+
# logger.info(
|
245
|
+
# "The device_map was not initialized. "
|
246
|
+
# "Setting device_map to {'':torch.cuda.current_device()}. "
|
247
|
+
# "If you want to use the model for inference, please set device_map ='auto' "
|
248
|
+
# )
|
249
|
+
# return device_map
|
250
|
+
|
251
|
+
def _process_model_before_weight_loading(
|
252
|
+
self,
|
253
|
+
model: "ModelMixin",
|
254
|
+
device_map,
|
255
|
+
keep_in_fp32_modules: List[str] = [],
|
256
|
+
**kwargs,
|
257
|
+
):
|
258
|
+
from .utils import replace_with_bnb_linear
|
259
|
+
|
260
|
+
load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload
|
261
|
+
|
262
|
+
# We may keep some modules such as the `proj_out` in their original dtype for numerical stability reasons
|
263
|
+
self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
|
264
|
+
|
265
|
+
if not isinstance(self.modules_to_not_convert, list):
|
266
|
+
self.modules_to_not_convert = [self.modules_to_not_convert]
|
267
|
+
|
268
|
+
self.modules_to_not_convert.extend(keep_in_fp32_modules)
|
269
|
+
|
270
|
+
# Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
|
271
|
+
if isinstance(device_map, dict) and len(device_map.keys()) > 1:
|
272
|
+
keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
|
273
|
+
|
274
|
+
if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
|
275
|
+
raise ValueError(
|
276
|
+
"If you want to offload some keys to `cpu` or `disk`, you need to set "
|
277
|
+
"`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
|
278
|
+
" converted to 8-bit but kept in 32-bit."
|
279
|
+
)
|
280
|
+
self.modules_to_not_convert.extend(keys_on_cpu)
|
281
|
+
|
282
|
+
# Purge `None`.
|
283
|
+
# Unlike `transformers`, we don't know if we should always keep certain modules in FP32
|
284
|
+
# in case of diffusion transformer models. For language models and others alike, `lm_head`
|
285
|
+
# and tied modules are usually kept in FP32.
|
286
|
+
self.modules_to_not_convert = [module for module in self.modules_to_not_convert if module is not None]
|
287
|
+
|
288
|
+
model = replace_with_bnb_linear(
|
289
|
+
model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
|
290
|
+
)
|
291
|
+
model.config.quantization_config = self.quantization_config
|
292
|
+
|
293
|
+
def _process_model_after_weight_loading(self, model: "ModelMixin", **kwargs):
|
294
|
+
model.is_loaded_in_4bit = True
|
295
|
+
model.is_4bit_serializable = self.is_serializable
|
296
|
+
return model
|
297
|
+
|
298
|
+
@property
|
299
|
+
def is_serializable(self):
|
300
|
+
# Because we're mandating `bitsandbytes` 0.43.3.
|
301
|
+
return True
|
302
|
+
|
303
|
+
@property
|
304
|
+
def is_trainable(self) -> bool:
|
305
|
+
# Because we're mandating `bitsandbytes` 0.43.3.
|
306
|
+
return True
|
307
|
+
|
308
|
+
def _dequantize(self, model):
|
309
|
+
from .utils import dequantize_and_replace
|
310
|
+
|
311
|
+
is_model_on_cpu = model.device.type == "cpu"
|
312
|
+
if is_model_on_cpu:
|
313
|
+
logger.info(
|
314
|
+
"Model was found to be on CPU (could happen as a result of `enable_model_cpu_offload()`). So, moving it to GPU. After dequantization, will move the model back to CPU again to preserve the previous device."
|
315
|
+
)
|
316
|
+
model.to(torch.cuda.current_device())
|
317
|
+
|
318
|
+
model = dequantize_and_replace(
|
319
|
+
model, self.modules_to_not_convert, quantization_config=self.quantization_config
|
320
|
+
)
|
321
|
+
if is_model_on_cpu:
|
322
|
+
model.to("cpu")
|
323
|
+
return model
|
324
|
+
|
325
|
+
|
326
|
+
class BnB8BitDiffusersQuantizer(DiffusersQuantizer):
|
327
|
+
"""
|
328
|
+
8-bit quantization from bitsandbytes quantization method:
|
329
|
+
before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the
|
330
|
+
layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call
|
331
|
+
saving:
|
332
|
+
from state dict, as usual; saves weights and 'SCB' component
|
333
|
+
loading:
|
334
|
+
need to locate SCB component and pass to the Linear8bitLt object
|
335
|
+
"""
|
336
|
+
|
337
|
+
use_keep_in_fp32_modules = True
|
338
|
+
requires_calibration = False
|
339
|
+
|
340
|
+
def __init__(self, quantization_config, **kwargs):
|
341
|
+
super().__init__(quantization_config, **kwargs)
|
342
|
+
|
343
|
+
if self.quantization_config.llm_int8_skip_modules is not None:
|
344
|
+
self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
|
345
|
+
|
346
|
+
def validate_environment(self, *args, **kwargs):
|
347
|
+
if not torch.cuda.is_available():
|
348
|
+
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
|
349
|
+
if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"):
|
350
|
+
raise ImportError(
|
351
|
+
"Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install 'accelerate>=0.26.0'`"
|
352
|
+
)
|
353
|
+
if not is_bitsandbytes_available() or is_bitsandbytes_version("<", "0.43.3"):
|
354
|
+
raise ImportError(
|
355
|
+
"Using `bitsandbytes` 8-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`"
|
356
|
+
)
|
357
|
+
|
358
|
+
if kwargs.get("from_flax", False):
|
359
|
+
raise ValueError(
|
360
|
+
"Converting into 8-bit weights from flax weights is currently not supported, please make"
|
361
|
+
" sure the weights are in PyTorch format."
|
362
|
+
)
|
363
|
+
|
364
|
+
device_map = kwargs.get("device_map", None)
|
365
|
+
if (
|
366
|
+
device_map is not None
|
367
|
+
and isinstance(device_map, dict)
|
368
|
+
and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
|
369
|
+
):
|
370
|
+
device_map_without_no_convert = {
|
371
|
+
key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
|
372
|
+
}
|
373
|
+
if "cpu" in device_map_without_no_convert.values() or "disk" in device_map_without_no_convert.values():
|
374
|
+
raise ValueError(
|
375
|
+
"Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the "
|
376
|
+
"quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules "
|
377
|
+
"in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom `device_map` to "
|
378
|
+
"`from_pretrained`. Check "
|
379
|
+
"https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu "
|
380
|
+
"for more details. "
|
381
|
+
)
|
382
|
+
|
383
|
+
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.adjust_max_memory
|
384
|
+
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
|
385
|
+
# need more space for buffers that are created during quantization
|
386
|
+
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
|
387
|
+
return max_memory
|
388
|
+
|
389
|
+
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.update_torch_dtype
|
390
|
+
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
|
391
|
+
if torch_dtype is None:
|
392
|
+
# We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
|
393
|
+
logger.info(
|
394
|
+
"Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
|
395
|
+
"requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
|
396
|
+
"Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
|
397
|
+
" torch_dtype=torch.float16 to remove this warning.",
|
398
|
+
torch_dtype,
|
399
|
+
)
|
400
|
+
torch_dtype = torch.float16
|
401
|
+
return torch_dtype
|
402
|
+
|
403
|
+
# # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.update_device_map
|
404
|
+
# def update_device_map(self, device_map):
|
405
|
+
# if device_map is None:
|
406
|
+
# device_map = {"": torch.cuda.current_device()}
|
407
|
+
# logger.info(
|
408
|
+
# "The device_map was not initialized. "
|
409
|
+
# "Setting device_map to {'':torch.cuda.current_device()}. "
|
410
|
+
# "If you want to use the model for inference, please set device_map ='auto' "
|
411
|
+
# )
|
412
|
+
# return device_map
|
413
|
+
|
414
|
+
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
|
415
|
+
if target_dtype != torch.int8:
|
416
|
+
logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization")
|
417
|
+
return torch.int8
|
418
|
+
|
419
|
+
def check_if_quantized_param(
|
420
|
+
self,
|
421
|
+
model: "ModelMixin",
|
422
|
+
param_value: "torch.Tensor",
|
423
|
+
param_name: str,
|
424
|
+
state_dict: Dict[str, Any],
|
425
|
+
**kwargs,
|
426
|
+
):
|
427
|
+
import bitsandbytes as bnb
|
428
|
+
|
429
|
+
module, tensor_name = get_module_from_name(model, param_name)
|
430
|
+
if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params):
|
431
|
+
if self.pre_quantized:
|
432
|
+
if param_name.replace("weight", "SCB") not in state_dict.keys():
|
433
|
+
raise ValueError("Missing quantization component `SCB`")
|
434
|
+
if param_value.dtype != torch.int8:
|
435
|
+
raise ValueError(
|
436
|
+
f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`."
|
437
|
+
)
|
438
|
+
return True
|
439
|
+
return False
|
440
|
+
|
441
|
+
def create_quantized_param(
|
442
|
+
self,
|
443
|
+
model: "ModelMixin",
|
444
|
+
param_value: "torch.Tensor",
|
445
|
+
param_name: str,
|
446
|
+
target_device: "torch.device",
|
447
|
+
state_dict: Dict[str, Any],
|
448
|
+
unexpected_keys: Optional[List[str]] = None,
|
449
|
+
):
|
450
|
+
import bitsandbytes as bnb
|
451
|
+
|
452
|
+
fp16_statistics_key = param_name.replace("weight", "SCB")
|
453
|
+
fp16_weights_format_key = param_name.replace("weight", "weight_format")
|
454
|
+
|
455
|
+
fp16_statistics = state_dict.get(fp16_statistics_key, None)
|
456
|
+
fp16_weights_format = state_dict.get(fp16_weights_format_key, None)
|
457
|
+
|
458
|
+
module, tensor_name = get_module_from_name(model, param_name)
|
459
|
+
if tensor_name not in module._parameters:
|
460
|
+
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
|
461
|
+
|
462
|
+
old_value = getattr(module, tensor_name)
|
463
|
+
|
464
|
+
if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params):
|
465
|
+
raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.")
|
466
|
+
if (
|
467
|
+
old_value.device == torch.device("meta")
|
468
|
+
and target_device not in ["meta", torch.device("meta")]
|
469
|
+
and param_value is None
|
470
|
+
):
|
471
|
+
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
|
472
|
+
|
473
|
+
new_value = param_value.to("cpu")
|
474
|
+
if self.pre_quantized and not self.is_serializable:
|
475
|
+
raise ValueError(
|
476
|
+
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
|
477
|
+
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
|
478
|
+
)
|
479
|
+
|
480
|
+
kwargs = old_value.__dict__
|
481
|
+
new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device)
|
482
|
+
|
483
|
+
module._parameters[tensor_name] = new_value
|
484
|
+
if fp16_statistics is not None:
|
485
|
+
setattr(module.weight, "SCB", fp16_statistics.to(target_device))
|
486
|
+
if unexpected_keys is not None:
|
487
|
+
unexpected_keys.remove(fp16_statistics_key)
|
488
|
+
|
489
|
+
# We just need to pop the `weight_format` keys from the state dict to remove unneeded
|
490
|
+
# messages. The correct format is correctly retrieved during the first forward pass.
|
491
|
+
if fp16_weights_format is not None and unexpected_keys is not None:
|
492
|
+
unexpected_keys.remove(fp16_weights_format_key)
|
493
|
+
|
494
|
+
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer._process_model_after_weight_loading with 4bit->8bit
|
495
|
+
def _process_model_after_weight_loading(self, model: "ModelMixin", **kwargs):
|
496
|
+
model.is_loaded_in_8bit = True
|
497
|
+
model.is_8bit_serializable = self.is_serializable
|
498
|
+
return model
|
499
|
+
|
500
|
+
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer._process_model_before_weight_loading
|
501
|
+
def _process_model_before_weight_loading(
|
502
|
+
self,
|
503
|
+
model: "ModelMixin",
|
504
|
+
device_map,
|
505
|
+
keep_in_fp32_modules: List[str] = [],
|
506
|
+
**kwargs,
|
507
|
+
):
|
508
|
+
from .utils import replace_with_bnb_linear
|
509
|
+
|
510
|
+
load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload
|
511
|
+
|
512
|
+
# We may keep some modules such as the `proj_out` in their original dtype for numerical stability reasons
|
513
|
+
self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
|
514
|
+
|
515
|
+
if not isinstance(self.modules_to_not_convert, list):
|
516
|
+
self.modules_to_not_convert = [self.modules_to_not_convert]
|
517
|
+
|
518
|
+
self.modules_to_not_convert.extend(keep_in_fp32_modules)
|
519
|
+
|
520
|
+
# Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
|
521
|
+
if isinstance(device_map, dict) and len(device_map.keys()) > 1:
|
522
|
+
keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
|
523
|
+
|
524
|
+
if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
|
525
|
+
raise ValueError(
|
526
|
+
"If you want to offload some keys to `cpu` or `disk`, you need to set "
|
527
|
+
"`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
|
528
|
+
" converted to 8-bit but kept in 32-bit."
|
529
|
+
)
|
530
|
+
self.modules_to_not_convert.extend(keys_on_cpu)
|
531
|
+
|
532
|
+
# Purge `None`.
|
533
|
+
# Unlike `transformers`, we don't know if we should always keep certain modules in FP32
|
534
|
+
# in case of diffusion transformer models. For language models and others alike, `lm_head`
|
535
|
+
# and tied modules are usually kept in FP32.
|
536
|
+
self.modules_to_not_convert = [module for module in self.modules_to_not_convert if module is not None]
|
537
|
+
|
538
|
+
model = replace_with_bnb_linear(
|
539
|
+
model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
|
540
|
+
)
|
541
|
+
model.config.quantization_config = self.quantization_config
|
542
|
+
|
543
|
+
@property
|
544
|
+
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.is_serializable
|
545
|
+
def is_serializable(self):
|
546
|
+
# Because we're mandating `bitsandbytes` 0.43.3.
|
547
|
+
return True
|
548
|
+
|
549
|
+
@property
|
550
|
+
# Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.is_serializable
|
551
|
+
def is_trainable(self) -> bool:
|
552
|
+
# Because we're mandating `bitsandbytes` 0.43.3.
|
553
|
+
return True
|
554
|
+
|
555
|
+
def _dequantize(self, model):
|
556
|
+
from .utils import dequantize_and_replace
|
557
|
+
|
558
|
+
model = dequantize_and_replace(
|
559
|
+
model, self.modules_to_not_convert, quantization_config=self.quantization_config
|
560
|
+
)
|
561
|
+
return model
|