mct-nightly 1.8.0.7032023.post439__py3-none-any.whl → 1.8.0.7042023.post403__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mct_nightly-1.8.0.7032023.post439.dist-info → mct_nightly-1.8.0.7042023.post403.dist-info}/METADATA +7 -7
- {mct_nightly-1.8.0.7032023.post439.dist-info → mct_nightly-1.8.0.7042023.post403.dist-info}/RECORD +63 -59
- {mct_nightly-1.8.0.7032023.post439.dist-info → mct_nightly-1.8.0.7042023.post403.dist-info}/WHEEL +1 -1
- model_compression_toolkit/__init__.py +9 -15
- model_compression_toolkit/core/common/logger.py +10 -2
- model_compression_toolkit/core/keras/back2framework/model_gradients.py +3 -2
- model_compression_toolkit/core/keras/quantization_facade.py +1 -1
- model_compression_toolkit/core/pytorch/back2framework/model_gradients.py +13 -6
- model_compression_toolkit/core/pytorch/constants.py +4 -0
- model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +16 -2
- model_compression_toolkit/exporter/__init__.py +5 -0
- model_compression_toolkit/exporter/model_exporter/__init__.py +0 -3
- model_compression_toolkit/exporter/model_exporter/tflite/fakely_quant_tflite_exporter.py +1 -1
- model_compression_toolkit/exporter/model_wrapper/__init__.py +4 -8
- model_compression_toolkit/exporter/model_wrapper/keras/builder/fully_quantized_model_builder.py +45 -39
- model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +39 -24
- model_compression_toolkit/exporter/model_wrapper/keras/validate_layer.py +50 -42
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +43 -36
- model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +24 -5
- model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +25 -18
- model_compression_toolkit/gptq/__init__.py +6 -0
- model_compression_toolkit/gptq/common/gptq_config.py +57 -104
- model_compression_toolkit/gptq/common/gptq_constants.py +0 -7
- model_compression_toolkit/gptq/common/gptq_training.py +28 -38
- model_compression_toolkit/gptq/keras/gptq_training.py +10 -28
- model_compression_toolkit/gptq/keras/graph_info.py +8 -33
- model_compression_toolkit/gptq/keras/quantization_facade.py +6 -12
- model_compression_toolkit/gptq/keras/quantizer/base_keras_gptq_quantizer.py +0 -1
- model_compression_toolkit/gptq/keras/quantizer/quantization_builder.py +2 -2
- model_compression_toolkit/gptq/keras/quantizer/regularization_factory.py +45 -0
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/soft_quantizer_reg.py +112 -0
- model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +38 -135
- model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +11 -41
- model_compression_toolkit/gptq/pytorch/gptq_training.py +9 -24
- model_compression_toolkit/gptq/pytorch/graph_info.py +7 -27
- model_compression_toolkit/gptq/pytorch/quantization_facade.py +9 -22
- model_compression_toolkit/gptq/pytorch/quantizer/__init__.py +1 -0
- model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +0 -20
- model_compression_toolkit/gptq/pytorch/quantizer/quant_utils.py +10 -1
- model_compression_toolkit/gptq/pytorch/quantizer/quantization_builder.py +2 -2
- model_compression_toolkit/gptq/pytorch/quantizer/regularization_factory.py +45 -0
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/soft_quantizer_reg.py +115 -0
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +30 -117
- model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +196 -0
- model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +9 -31
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +30 -37
- model_compression_toolkit/qat/keras/quantizer/ste_rounding/uniform_ste.py +27 -36
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +21 -21
- model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +25 -26
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_all_subclasses.py +1 -2
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/common/get_quantizers.py +1 -1
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/__init__.py +4 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/keras/quantizers/constants.py +1 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantize_wrapper.py +13 -3
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/__init__.py +6 -0
- model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/pytorch/quantizers/constants.py +3 -0
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/base_trainable_quantizer.py +53 -2
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/common/get_quantizers.py +2 -1
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/keras/base_keras_quantizer.py +22 -4
- model_compression_toolkit/quantizers_infrastructure/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +24 -3
- model_compression_toolkit/gptq/common/gptq_quantizer_config.py +0 -93
- {mct_nightly-1.8.0.7032023.post439.dist-info → mct_nightly-1.8.0.7042023.post403.dist-info}/LICENSE.md +0 -0
- {mct_nightly-1.8.0.7032023.post439.dist-info → mct_nightly-1.8.0.7042023.post403.dist-info}/top_level.txt +0 -0
- /model_compression_toolkit/quantizers_infrastructure/inferable_infrastructure/{common → pytorch/quantizers/activation_inferable_quantizers}/activation_lut_pot_inferable_quantizer.py +0 -0
|
@@ -15,11 +15,11 @@
|
|
|
15
15
|
import torch
|
|
16
16
|
import torch.nn as nn
|
|
17
17
|
from typing import List
|
|
18
|
-
|
|
19
18
|
from model_compression_toolkit.core.pytorch.constants import BIAS
|
|
20
19
|
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
|
|
21
20
|
from model_compression_toolkit.gptq.common.gptq_graph import get_kernel_attribute_name_for_gptq
|
|
22
21
|
from model_compression_toolkit.quantizers_infrastructure import PytorchQuantizationWrapper
|
|
22
|
+
from model_compression_toolkit.quantizers_infrastructure.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
def get_gptq_trainable_parameters(fxp_model: nn.Module,
|
|
@@ -39,21 +39,23 @@ def get_gptq_trainable_parameters(fxp_model: nn.Module,
|
|
|
39
39
|
trainable_aux_weights = nn.ParameterList()
|
|
40
40
|
trainable_threshold = nn.ParameterList()
|
|
41
41
|
trainable_bias = nn.ParameterList()
|
|
42
|
-
trainable_temperature = nn.ParameterList()
|
|
43
42
|
|
|
44
43
|
for layer in fxp_model.modules():
|
|
45
44
|
if isinstance(layer, PytorchQuantizationWrapper):
|
|
46
45
|
kernel_attribute = get_kernel_attribute_name_for_gptq(layer_type=type(layer.layer),
|
|
47
46
|
fw_info=DEFAULT_PYTORCH_INFO)
|
|
48
47
|
|
|
49
|
-
|
|
50
|
-
|
|
48
|
+
# collect trainable weights per quantizer
|
|
49
|
+
quantizer_trainable_weights = layer.weights_quantizers[kernel_attribute].get_trainable_variables(VariableGroup.WEIGHTS)
|
|
50
|
+
quantizer_trainable_threshold = layer.weights_quantizers[kernel_attribute].get_trainable_variables(VariableGroup.QPARAMS)
|
|
51
|
+
trainable_aux_weights.extend(quantizer_trainable_weights)
|
|
52
|
+
trainable_threshold.extend(quantizer_trainable_threshold)
|
|
51
53
|
|
|
52
54
|
if add_bias and hasattr(layer.layer, BIAS):
|
|
53
55
|
bias = getattr(layer.layer, BIAS)
|
|
54
56
|
trainable_bias.append(bias)
|
|
55
57
|
|
|
56
|
-
return trainable_aux_weights, trainable_bias, trainable_threshold
|
|
58
|
+
return trainable_aux_weights, trainable_bias, trainable_threshold
|
|
57
59
|
|
|
58
60
|
|
|
59
61
|
def get_weights_for_loss(fxp_model: nn.Module) -> [List[nn.Parameter], List[torch.Tensor]]:
|
|
@@ -77,25 +79,3 @@ def get_weights_for_loss(fxp_model: nn.Module) -> [List[nn.Parameter], List[torc
|
|
|
77
79
|
fxp_weights_list.append(quantizer(training=False, inputs=quantizer_vars))
|
|
78
80
|
|
|
79
81
|
return flp_weights_list, fxp_weights_list
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
# TODO: this function need to move to location that is relevant only for soft quantizer -
|
|
83
|
-
# once deciding how to handle GPTQ quantizers regularization.
|
|
84
|
-
def get_soft_rounding_reg(fxp_model: nn.Module) -> List[torch.Tensor]:
|
|
85
|
-
"""
|
|
86
|
-
This function returns the soft quantizer regularization values for SoftRounding.
|
|
87
|
-
|
|
88
|
-
Args:
|
|
89
|
-
fxp_model: A model to be quantized with SoftRounding.
|
|
90
|
-
|
|
91
|
-
Returns: A list of tensors.
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
soft_reg_aux: List[torch.Tensor] = []
|
|
95
|
-
for layer in fxp_model.modules():
|
|
96
|
-
if isinstance(layer, PytorchQuantizationWrapper):
|
|
97
|
-
kernel_attribute = get_kernel_attribute_name_for_gptq(layer_type=type(layer.layer),
|
|
98
|
-
fw_info=DEFAULT_PYTORCH_INFO)
|
|
99
|
-
|
|
100
|
-
soft_reg_aux.append(layer.weights_quantizers[kernel_attribute].get_regularization())
|
|
101
|
-
return soft_reg_aux
|
|
@@ -17,14 +17,15 @@ from model_compression_toolkit.core import common
|
|
|
17
17
|
from model_compression_toolkit.core.common.constants import FOUND_TORCH
|
|
18
18
|
from model_compression_toolkit.core.common import Logger
|
|
19
19
|
from model_compression_toolkit.core.common.constants import PYTORCH
|
|
20
|
-
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfigV2
|
|
20
|
+
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfigV2
|
|
21
21
|
from model_compression_toolkit.core.common.target_platform import TargetPlatformCapabilities
|
|
22
22
|
from model_compression_toolkit.core.common.mixed_precision.kpi_tools.kpi import KPI
|
|
23
23
|
from model_compression_toolkit.core.runner import core_runner, _init_tensorboard_writer
|
|
24
|
+
from model_compression_toolkit.gptq.keras.quantization_facade import GPTQ_MOMENTUM
|
|
24
25
|
from model_compression_toolkit.gptq.runner import gptq_runner
|
|
25
26
|
from model_compression_toolkit.core.exporter import export_model
|
|
26
27
|
from model_compression_toolkit.core.analyzer import analyzer_model_quantization
|
|
27
|
-
from model_compression_toolkit import CoreConfig
|
|
28
|
+
from model_compression_toolkit import CoreConfig
|
|
28
29
|
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
|
|
29
30
|
MixedPrecisionQuantizationConfigV2
|
|
30
31
|
|
|
@@ -71,33 +72,19 @@ if FOUND_TORCH:
|
|
|
71
72
|
Import MCT and Create a GradientPTQConfigV2 to run for 5 epochs:
|
|
72
73
|
|
|
73
74
|
>>> import model_compression_toolkit as mct
|
|
74
|
-
>>> gptq_conf = mct.get_pytorch_gptq_config(n_epochs=5)
|
|
75
|
+
>>> gptq_conf = mct.gptq.get_pytorch_gptq_config(n_epochs=5)
|
|
75
76
|
|
|
76
77
|
Other PyTorch optimizers can be passed with dummy params:
|
|
77
78
|
|
|
78
79
|
>>> import torch
|
|
79
|
-
>>> gptq_conf = mct.get_pytorch_gptq_config(n_epochs=3, optimizer=torch.optim.Adam([torch.Tensor(1)]))
|
|
80
|
+
>>> gptq_conf = mct.gptq.get_pytorch_gptq_config(n_epochs=3, optimizer=torch.optim.Adam([torch.Tensor(1)]))
|
|
80
81
|
|
|
81
82
|
The configuration can be passed to :func:`~model_compression_toolkit.pytorch_post_training_quantization` in order to quantize a pytorch model using gptq.
|
|
82
83
|
|
|
83
84
|
"""
|
|
84
|
-
bias_optimizer =
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
# - change default quantization_parameters_learning to True.
|
|
88
|
-
# - remove explicit rounding_type and quantizer_config (and let it use the default GradientPTQConfig).
|
|
89
|
-
return GradientPTQConfigV2(n_epochs,
|
|
90
|
-
optimizer,
|
|
91
|
-
optimizer_rest=optimizer_rest,
|
|
92
|
-
loss=loss,
|
|
93
|
-
log_function=log_function,
|
|
94
|
-
train_bias=True,
|
|
95
|
-
optimizer_quantization_parameter=optimizer_quantization_parameter,
|
|
96
|
-
optimizer_bias=bias_optimizer,
|
|
97
|
-
rounding_type=RoundingType.STE,
|
|
98
|
-
quantizer_config=GPTQQuantizerConfig(),
|
|
99
|
-
quantization_parameters_learning=False,
|
|
100
|
-
)
|
|
85
|
+
bias_optimizer = torch.optim.SGD([torch.Tensor([])], lr=LR_BIAS_DEFAULT, momentum=GPTQ_MOMENTUM)
|
|
86
|
+
return GradientPTQConfigV2(n_epochs, optimizer, optimizer_rest=optimizer_rest, loss=loss,
|
|
87
|
+
log_function=log_function, train_bias=True, optimizer_bias=bias_optimizer)
|
|
101
88
|
|
|
102
89
|
|
|
103
90
|
def pytorch_gradient_post_training_quantization_experimental(model: Module,
|
|
@@ -159,7 +146,7 @@ if FOUND_TORCH:
|
|
|
159
146
|
|
|
160
147
|
Pass the module, the representative dataset generator and the configuration (optional) to get a quantized module
|
|
161
148
|
|
|
162
|
-
>>> quantized_module, quantization_info = mct.pytorch_gradient_post_training_quantization_experimental(module, repr_datagen, core_config=config, gptq_config=gptq_conf)
|
|
149
|
+
>>> quantized_module, quantization_info = mct.gptq.pytorch_gradient_post_training_quantization_experimental(module, repr_datagen, core_config=config, gptq_config=gptq_conf)
|
|
163
150
|
|
|
164
151
|
"""
|
|
165
152
|
|
|
@@ -15,3 +15,4 @@
|
|
|
15
15
|
|
|
16
16
|
import model_compression_toolkit.gptq.pytorch.quantizer.ste_rounding.symmetric_ste
|
|
17
17
|
import model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.symmetric_soft_quantizer
|
|
18
|
+
import model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.uniform_soft_quantizer
|
|
@@ -71,26 +71,6 @@ if FOUND_TORCH:
|
|
|
71
71
|
|
|
72
72
|
return weights, quant_config, {}
|
|
73
73
|
|
|
74
|
-
def get_aux_variable(self) -> List[Tensor]:
|
|
75
|
-
"""
|
|
76
|
-
This function return a list with the quantizer's quantization auxiliary variables.
|
|
77
|
-
|
|
78
|
-
Returns: A list with the quantization auxiliary variables.
|
|
79
|
-
|
|
80
|
-
"""
|
|
81
|
-
|
|
82
|
-
return [] # pragma: no cover
|
|
83
|
-
|
|
84
|
-
def get_quantization_variable(self) -> List[Tensor]:
|
|
85
|
-
"""
|
|
86
|
-
This function return a list with the quantizer's quantization parameters variables.
|
|
87
|
-
|
|
88
|
-
Returns: A list with the quantization parameters.
|
|
89
|
-
|
|
90
|
-
"""
|
|
91
|
-
|
|
92
|
-
return [] # pragma: no cover
|
|
93
|
-
|
|
94
74
|
@abstractmethod
|
|
95
75
|
def get_quant_config(self):
|
|
96
76
|
"""
|
|
@@ -30,11 +30,20 @@ def calculate_delta(max_tensor: torch.Tensor,
|
|
|
30
30
|
num_bits: int,
|
|
31
31
|
signed: bool) -> torch.Tensor:
|
|
32
32
|
"""
|
|
33
|
-
Compute the step size for the quantization.
|
|
33
|
+
Compute the step size for the symmetric quantization.
|
|
34
34
|
"""
|
|
35
35
|
return max_tensor / (2 ** (num_bits - int(signed)))
|
|
36
36
|
|
|
37
37
|
|
|
38
|
+
def calculate_delta_uniform(min_tensor: torch.Tensor,
|
|
39
|
+
max_tensor: torch.Tensor,
|
|
40
|
+
num_bits: int) -> torch.Tensor:
|
|
41
|
+
"""
|
|
42
|
+
Compute the step size for the uniform quantization.
|
|
43
|
+
"""
|
|
44
|
+
return (max_tensor-min_tensor) / (2 ** num_bits - 1)
|
|
45
|
+
|
|
46
|
+
|
|
38
47
|
def ste_ceil(x: torch.Tensor) -> torch.Tensor:
|
|
39
48
|
"""
|
|
40
49
|
Return the ceil values of a tensor.
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
from typing import List, Dict, Tuple
|
|
16
16
|
|
|
17
|
-
from model_compression_toolkit import GradientPTQConfigV2
|
|
17
|
+
from model_compression_toolkit.gptq import GradientPTQConfigV2
|
|
18
18
|
from model_compression_toolkit.core import common
|
|
19
19
|
from model_compression_toolkit.core.pytorch.constants import KERNEL
|
|
20
20
|
from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.node_to_quantizer import \
|
|
@@ -59,7 +59,7 @@ def quantization_builder(n: common.BaseNode,
|
|
|
59
59
|
quant_method=quant_method,
|
|
60
60
|
quantizer_base_class=BasePytorchGPTQTrainableQuantizer)
|
|
61
61
|
weights_quantizers.update({KERNEL: quantizer_class(get_trainable_quantizer_weights_config(n),
|
|
62
|
-
**gptq_config.
|
|
62
|
+
**gptq_config.gptq_quantizer_params_override)})
|
|
63
63
|
activation_quantizers = []
|
|
64
64
|
if n.is_activation_quantization_enabled():
|
|
65
65
|
quant_method = n.final_activation_quantization_cfg.activation_quantization_method
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
from typing import Callable
|
|
16
|
+
|
|
17
|
+
from model_compression_toolkit.gptq import RoundingType, GradientPTQConfigV2, GradientPTQConfig
|
|
18
|
+
from model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.soft_quantizer_reg import \
|
|
19
|
+
SoftQuantizerRegularization
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_regularization(gptq_config: GradientPTQConfig, representative_data_gen: Callable) -> Callable:
|
|
23
|
+
"""
|
|
24
|
+
Returns a function that computes the regularization term for GPTQ training based on the given
|
|
25
|
+
rounding type in the GPTQ configuration.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
gptq_config: A GPTQ configuration.
|
|
29
|
+
representative_data_gen: Dataset used for the GPTQ training.
|
|
30
|
+
|
|
31
|
+
Returns: A function for computing the regularization. If there is no regularization function defined for the given
|
|
32
|
+
rounding type, then it returns a function that just returns 0.
|
|
33
|
+
|
|
34
|
+
"""
|
|
35
|
+
if gptq_config.rounding_type == RoundingType.SoftQuantizer:
|
|
36
|
+
# dry run on the representative dataset to count number of batches
|
|
37
|
+
num_batches = 0
|
|
38
|
+
for _ in representative_data_gen():
|
|
39
|
+
num_batches += 1
|
|
40
|
+
|
|
41
|
+
n_epochs = GradientPTQConfigV2.from_v1(n_ptq_iter=num_batches, config_v1=gptq_config).n_epochs if \
|
|
42
|
+
not type(gptq_config) == GradientPTQConfigV2 else gptq_config.n_epochs
|
|
43
|
+
return SoftQuantizerRegularization(total_gradient_steps=num_batches * n_epochs)
|
|
44
|
+
else:
|
|
45
|
+
return lambda m, e_reg: 0
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
# Copyright 2023 Sony Semiconductor Israel, Inc. All rights reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
from typing import List
|
|
16
|
+
|
|
17
|
+
import torch
|
|
18
|
+
import numpy as np
|
|
19
|
+
from torch import nn
|
|
20
|
+
|
|
21
|
+
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
|
|
22
|
+
from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
|
|
23
|
+
from model_compression_toolkit.gptq.common.gptq_graph import get_kernel_attribute_name_for_gptq
|
|
24
|
+
from model_compression_toolkit.quantizers_infrastructure import PytorchQuantizationWrapper
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class LinearTempDecay:
|
|
28
|
+
"""
|
|
29
|
+
Annealing process for the soft quantizer regularization temperature term.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, t_max: int, rel_start_decay: float = 0.2, start_b: int = 20, end_b: int = 2):
|
|
33
|
+
"""
|
|
34
|
+
Initializes a LinearTempDecay object.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
t_max: maximal time step.
|
|
38
|
+
rel_start_decay: Decay step size at the beginning of the process.
|
|
39
|
+
start_b: Starting value of the regularization term.
|
|
40
|
+
end_b: Target value of the regularization term.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
self.t_max = t_max
|
|
44
|
+
self.start_decay = rel_start_decay * t_max
|
|
45
|
+
self.start_b = start_b
|
|
46
|
+
self.end_b = end_b
|
|
47
|
+
|
|
48
|
+
def __call__(self, t: float) -> float:
|
|
49
|
+
"""
|
|
50
|
+
Cosine annealing scheduler for soft quantizer regularization temperature term.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
t: The current time step.
|
|
54
|
+
|
|
55
|
+
Returns: Scheduled temperature.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
is_before_start_decay = (t < self.start_decay)
|
|
59
|
+
|
|
60
|
+
rel_t = (t - self.start_decay) / (self.t_max - self.start_decay)
|
|
61
|
+
|
|
62
|
+
return self.start_b * is_before_start_decay + \
|
|
63
|
+
(1 - is_before_start_decay) * \
|
|
64
|
+
(self.end_b + (self.start_b - self.end_b) * torch.maximum(to_torch_tensor(np.array([0.0])),
|
|
65
|
+
to_torch_tensor(np.array((1 - rel_t)))))
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class SoftQuantizerRegularization:
|
|
69
|
+
"""
|
|
70
|
+
A class to handle the computation of soft quantizer regularization for GPTQ training.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(self, total_gradient_steps: int):
|
|
74
|
+
"""
|
|
75
|
+
Initializes the regularization computation object with a LinearDecay object.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
total_gradient_steps: The number of gradient steps during optimization.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
# Initializing the temperature decay according to the number of expected gradient steps
|
|
82
|
+
self.linear_decay = LinearTempDecay(total_gradient_steps)
|
|
83
|
+
|
|
84
|
+
self.count_iter = 0
|
|
85
|
+
|
|
86
|
+
def __call__(self, model: nn.Module, entropy_reg: float):
|
|
87
|
+
"""
|
|
88
|
+
Returns the soft quantizer regularization value for SoftRounding.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
model: A model to be quantized with SoftRounding.
|
|
92
|
+
entropy_reg: Entropy value to scale the quantizer regularization.
|
|
93
|
+
|
|
94
|
+
Returns: Regularization value.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
soft_reg_aux: List[torch.Tensor] = []
|
|
98
|
+
for layer in model.modules():
|
|
99
|
+
if isinstance(layer, PytorchQuantizationWrapper):
|
|
100
|
+
kernel_attribute = get_kernel_attribute_name_for_gptq(layer_type=type(layer.layer),
|
|
101
|
+
fw_info=DEFAULT_PYTORCH_INFO)
|
|
102
|
+
|
|
103
|
+
st = layer.weights_quantizers[kernel_attribute].get_soft_targets()
|
|
104
|
+
b = self.linear_decay(self.count_iter)
|
|
105
|
+
|
|
106
|
+
soft_reg_aux.append((1 - torch.pow(torch.abs(st - .5) * 2, b)).sum())
|
|
107
|
+
|
|
108
|
+
reg = 0
|
|
109
|
+
|
|
110
|
+
for sq in soft_reg_aux:
|
|
111
|
+
reg += sq
|
|
112
|
+
|
|
113
|
+
self.count_iter += 1
|
|
114
|
+
|
|
115
|
+
return entropy_reg * reg
|
|
@@ -14,10 +14,10 @@
|
|
|
14
14
|
# ==============================================================================
|
|
15
15
|
import torch
|
|
16
16
|
import torch.nn as nn
|
|
17
|
-
from typing import
|
|
17
|
+
from typing import Dict
|
|
18
18
|
import numpy as np
|
|
19
19
|
|
|
20
|
-
from model_compression_toolkit.core.common import
|
|
20
|
+
from model_compression_toolkit.core.common import max_power_of_two
|
|
21
21
|
from model_compression_toolkit import quantizers_infrastructure as qi
|
|
22
22
|
from model_compression_toolkit.core.common.target_platform import QuantizationMethod
|
|
23
23
|
from model_compression_toolkit.gptq.common.gptq_config import RoundingType
|
|
@@ -25,13 +25,14 @@ from model_compression_toolkit.gptq.pytorch.quantizer.base_pytorch_gptq_quantize
|
|
|
25
25
|
BasePytorchGPTQTrainableQuantizer
|
|
26
26
|
from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy
|
|
27
27
|
from model_compression_toolkit.gptq.pytorch.quantizer import quant_utils as qutils
|
|
28
|
-
from model_compression_toolkit.gptq.common.gptq_constants import PTQ_THRESHOLD, SCALE_PTQ,
|
|
29
|
-
|
|
28
|
+
from model_compression_toolkit.gptq.common.gptq_constants import PTQ_THRESHOLD, SCALE_PTQ, \
|
|
29
|
+
SOFT_ROUNDING_GAMMA, SOFT_ROUNDING_ZETA, AUXVAR
|
|
30
30
|
from model_compression_toolkit.core.common.constants import THRESHOLD, MIN_THRESHOLD
|
|
31
31
|
from model_compression_toolkit.quantizers_infrastructure import TrainableQuantizerWeightsConfig
|
|
32
32
|
from model_compression_toolkit.quantizers_infrastructure.inferable_infrastructure.common.base_inferable_quantizer import mark_quantizer
|
|
33
33
|
from model_compression_toolkit.quantizers_infrastructure.trainable_infrastructure.common.quant_utils import \
|
|
34
34
|
get_threshold_reshape_shape
|
|
35
|
+
from model_compression_toolkit.quantizers_infrastructure.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
|
|
35
36
|
|
|
36
37
|
|
|
37
38
|
def soft_rounding_symmetric_quantizer(input_tensor: torch.Tensor,
|
|
@@ -67,46 +68,6 @@ def soft_rounding_symmetric_quantizer(input_tensor: torch.Tensor,
|
|
|
67
68
|
max_val=int_threshold - 1)
|
|
68
69
|
|
|
69
70
|
|
|
70
|
-
class LinearTempDecay:
|
|
71
|
-
"""
|
|
72
|
-
Annealing process for the soft quantizer regularization temperature term.
|
|
73
|
-
"""
|
|
74
|
-
|
|
75
|
-
def __init__(self, t_max: int, rel_start_decay: float = 0.2, start_b: int = 20, end_b: int = 2):
|
|
76
|
-
"""
|
|
77
|
-
Initializes a LinearTempDecay object.
|
|
78
|
-
|
|
79
|
-
Args:
|
|
80
|
-
t_max: maximal time step.
|
|
81
|
-
rel_start_decay: Decay step size at the beginning of the process.
|
|
82
|
-
start_b: Starting value of the regularization term.
|
|
83
|
-
end_b: Target value of the regularization term.
|
|
84
|
-
"""
|
|
85
|
-
|
|
86
|
-
self.t_max = t_max
|
|
87
|
-
self.start_decay = rel_start_decay * t_max
|
|
88
|
-
self.start_b = start_b
|
|
89
|
-
self.end_b = end_b
|
|
90
|
-
|
|
91
|
-
def __call__(self, t: nn.Parameter) -> float:
|
|
92
|
-
"""
|
|
93
|
-
Cosine annealing scheduler for soft quantizer regularization temperature term.
|
|
94
|
-
|
|
95
|
-
Args:
|
|
96
|
-
t: The current time step.
|
|
97
|
-
|
|
98
|
-
Returns: Scheduled temperature.
|
|
99
|
-
"""
|
|
100
|
-
|
|
101
|
-
is_before_start_decay = (t < self.start_decay).to(torch.float32)
|
|
102
|
-
|
|
103
|
-
rel_t = (t - self.start_decay) / (self.t_max - self.start_decay)
|
|
104
|
-
|
|
105
|
-
return self.start_b * is_before_start_decay + \
|
|
106
|
-
(1 - is_before_start_decay) * \
|
|
107
|
-
(self.end_b + (self.start_b - self.end_b) * torch.maximum(to_torch_tensor(np.array([0.0])), (1 - rel_t)))
|
|
108
|
-
|
|
109
|
-
|
|
110
71
|
@mark_quantizer(quantization_target=qi.QuantizationTarget.Weights,
|
|
111
72
|
quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC],
|
|
112
73
|
quantizer_type=RoundingType.SoftQuantizer)
|
|
@@ -117,22 +78,15 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
117
78
|
|
|
118
79
|
def __init__(self,
|
|
119
80
|
quantization_config: TrainableQuantizerWeightsConfig,
|
|
120
|
-
|
|
121
|
-
quantization_parameter_learning: bool = False,
|
|
122
|
-
n_epochs: int = N_EPOCHS):
|
|
81
|
+
quantization_parameter_learning: bool = False):
|
|
123
82
|
"""
|
|
124
83
|
Construct a Pytorch model that utilize a fake weight quantizer of soft-quantizer for symmetric quantizer.
|
|
125
84
|
|
|
126
85
|
Args:
|
|
127
86
|
quantization_config: Trainable weights quantizer config.
|
|
128
|
-
n_batches (int): number of batches in representative dataset
|
|
129
87
|
quantization_parameter_learning (Bool): Whether to learn the threshold or not
|
|
130
|
-
n_epochs (int): number of epochs the representative dataset is run during fine-tuning
|
|
131
88
|
"""
|
|
132
89
|
|
|
133
|
-
if n_batches is None:
|
|
134
|
-
Logger.error("SymmetricSoftRoundingGPTQ got an uninitialized n_batches argument.")
|
|
135
|
-
|
|
136
90
|
super().__init__(quantization_config)
|
|
137
91
|
self.num_bits = quantization_config.weights_n_bits
|
|
138
92
|
self.per_channel = quantization_config.weights_per_channel_threshold
|
|
@@ -147,35 +101,24 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
147
101
|
self.quantization_parameter_learning = quantization_parameter_learning
|
|
148
102
|
|
|
149
103
|
# gamma and zeta are stretch parameters for computing the rectified sigmoind function.
|
|
150
|
-
# beta is used to set the regularization term.
|
|
151
104
|
# See: https://arxiv.org/pdf/2004.10568.pdf
|
|
152
105
|
self.gamma = SOFT_ROUNDING_GAMMA
|
|
153
106
|
self.zeta = SOFT_ROUNDING_ZETA
|
|
154
|
-
self.beta = SOFT_ROUNDING_BETA
|
|
155
107
|
|
|
156
108
|
self.quantizer_parameters = {}
|
|
157
109
|
|
|
158
|
-
# Initializing the temperature decay according to the number of expected gradient steps
|
|
159
|
-
num_iterations = MAX_ITERATIONS_DEFAULT if n_batches is None else n_epochs * n_batches
|
|
160
|
-
self.linear_decay = LinearTempDecay(num_iterations)
|
|
161
|
-
|
|
162
110
|
def initialize_quantization(self,
|
|
163
111
|
tensor_shape: torch.Size,
|
|
164
112
|
name: str,
|
|
165
|
-
layer: qi.PytorchQuantizationWrapper)
|
|
113
|
+
layer: qi.PytorchQuantizationWrapper):
|
|
166
114
|
"""
|
|
167
|
-
|
|
115
|
+
Add quantizer parameters to the quantizer parameters dictionary
|
|
168
116
|
|
|
169
117
|
Args:
|
|
170
118
|
tensor_shape: tensor shape of the quantized tensor.
|
|
171
119
|
name: Tensor name.
|
|
172
120
|
layer: Layer to quantize.
|
|
173
|
-
|
|
174
|
-
Returns:
|
|
175
|
-
Dictionary of parameters names to the variables.
|
|
176
121
|
"""
|
|
177
|
-
layer.register_parameter(f"{name}_{GPTQ_ITER}",
|
|
178
|
-
nn.Parameter(to_torch_tensor(np.array([0])), requires_grad=False))
|
|
179
122
|
|
|
180
123
|
if self.per_channel:
|
|
181
124
|
threshold_tensor = to_torch_tensor(self.threshold_values)
|
|
@@ -195,31 +138,18 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
195
138
|
layer.register_parameter(f"{name}_{AUXVAR}", nn.Parameter(alpha, requires_grad=True))
|
|
196
139
|
|
|
197
140
|
# save the quantizer added parameters for later calculations
|
|
198
|
-
self.
|
|
199
|
-
|
|
200
|
-
GPTQ_ITER: layer.get_parameter(f"{name}_{GPTQ_ITER}")}
|
|
141
|
+
self.add_quantizer_variable(PTQ_THRESHOLD, layer.get_parameter(f"{name}_{PTQ_THRESHOLD}"), VariableGroup.QPARAMS)
|
|
142
|
+
self.add_quantizer_variable(AUXVAR, layer.get_parameter(f"{name}_{AUXVAR}"), VariableGroup.WEIGHTS)
|
|
201
143
|
|
|
202
144
|
if self.quantization_parameter_learning:
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
def get_regularization(self) -> torch.Tensor:
|
|
212
|
-
"""
|
|
213
|
-
Computes the regularization term for the soft rounding loss.
|
|
214
|
-
|
|
215
|
-
Returns:
|
|
216
|
-
regularization term.
|
|
217
|
-
"""
|
|
218
|
-
|
|
219
|
-
st = self.get_soft_targets()
|
|
220
|
-
ar_iter = self.quantizer_parameters[GPTQ_ITER]
|
|
221
|
-
b = self.linear_decay(ar_iter)
|
|
222
|
-
return (1 - torch.pow(torch.abs(st - .5) * 2, b)).sum()
|
|
145
|
+
if self.per_channel:
|
|
146
|
+
layer.register_parameter(f"{name}_{SCALE_PTQ}",
|
|
147
|
+
nn.Parameter(to_torch_tensor(torch.ones_like(torch.Tensor(self.threshold_values))),
|
|
148
|
+
requires_grad=True))
|
|
149
|
+
else:
|
|
150
|
+
layer.register_parameter(f"{name}_{SCALE_PTQ}",
|
|
151
|
+
nn.Parameter(to_torch_tensor((torch.tensor([1.0], requires_grad=True)))))
|
|
152
|
+
self.add_quantizer_variable(SCALE_PTQ, layer.get_parameter(f"{name}_{SCALE_PTQ}"), VariableGroup.QPARAMS)
|
|
223
153
|
|
|
224
154
|
def get_soft_targets(self) -> torch.Tensor:
|
|
225
155
|
"""
|
|
@@ -229,28 +159,9 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
229
159
|
A tensor with the soft rounding targets values.
|
|
230
160
|
|
|
231
161
|
"""
|
|
232
|
-
scaled_sigmoid = torch.sigmoid(self.
|
|
162
|
+
scaled_sigmoid = torch.sigmoid(self.get_quantizer_variable(AUXVAR)) * (self.zeta - self.gamma) + self.gamma
|
|
233
163
|
return torch.clip(scaled_sigmoid, min=0, max=1)
|
|
234
164
|
|
|
235
|
-
def get_aux_variable(self) -> List[torch.Tensor]:
|
|
236
|
-
"""
|
|
237
|
-
This function return a list with the quantizer's quantization auxiliary variables.
|
|
238
|
-
|
|
239
|
-
Returns: A list with the quantization auxiliary variables.
|
|
240
|
-
"""
|
|
241
|
-
return [self.quantizer_parameters.get(AUXVAR)]
|
|
242
|
-
|
|
243
|
-
def get_quantization_variable(self) -> List[torch.Tensor]:
|
|
244
|
-
"""
|
|
245
|
-
This function return a list with the quantizer's quantization parameters variables.
|
|
246
|
-
|
|
247
|
-
Returns: A list with the quantization parameters.
|
|
248
|
-
"""
|
|
249
|
-
if self.quantization_parameter_learning and not self.power_of_two:
|
|
250
|
-
return [self.quantizer_parameters[SCALE_PTQ]]
|
|
251
|
-
else:
|
|
252
|
-
return []
|
|
253
|
-
|
|
254
165
|
def get_quant_config(self) -> Dict[str, np.ndarray]:
|
|
255
166
|
"""
|
|
256
167
|
Returns the config used to edit NodeQuantizationConfig after GPTQ retraining
|
|
@@ -260,12 +171,13 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
260
171
|
Keys must match NodeQuantizationConfig attributes
|
|
261
172
|
|
|
262
173
|
"""
|
|
263
|
-
old_threshold = torch_tensor_to_numpy(self.
|
|
174
|
+
old_threshold = torch_tensor_to_numpy(self.get_quantizer_variable(PTQ_THRESHOLD))
|
|
175
|
+
old_threshold = np.resize(old_threshold, self.threshold_shape)
|
|
264
176
|
if self.power_of_two:
|
|
265
177
|
old_threshold = max_power_of_two(old_threshold, MIN_THRESHOLD)
|
|
266
178
|
else:
|
|
267
179
|
if self.quantization_parameter_learning:
|
|
268
|
-
scale = torch.reshape(self.
|
|
180
|
+
scale = torch.reshape(self.get_quantizer_variable(SCALE_PTQ), self.threshold_shape)
|
|
269
181
|
old_threshold = old_threshold * torch_tensor_to_numpy(scale)
|
|
270
182
|
old_threshold = old_threshold.reshape(self.threshold_shape)
|
|
271
183
|
return {THRESHOLD: old_threshold}
|
|
@@ -283,17 +195,14 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
283
195
|
Returns:
|
|
284
196
|
quantized tensor
|
|
285
197
|
"""
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
ptq_threshold_tensor = self.quantizer_parameters[PTQ_THRESHOLD]
|
|
198
|
+
auxvar = self.get_quantizer_variable(AUXVAR)
|
|
199
|
+
ptq_threshold_tensor = self.get_quantizer_variable(PTQ_THRESHOLD)
|
|
289
200
|
|
|
290
201
|
#####################################################
|
|
291
202
|
# Soft Rounding
|
|
292
203
|
#####################################################
|
|
293
204
|
aux_var = self.get_soft_targets()
|
|
294
|
-
if training:
|
|
295
|
-
ar_iter.set_(ar_iter + 1)
|
|
296
|
-
else:
|
|
205
|
+
if not training:
|
|
297
206
|
aux_var = (aux_var >= 0.5).to(auxvar.dtype)
|
|
298
207
|
|
|
299
208
|
if self.per_channel:
|
|
@@ -317,7 +226,7 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
317
226
|
power_of_two=self.power_of_two)
|
|
318
227
|
|
|
319
228
|
if self.quantization_parameter_learning and not self.power_of_two:
|
|
320
|
-
scale = torch.reshape(self.
|
|
229
|
+
scale = torch.reshape(self.get_quantizer_variable(SCALE_PTQ), reshape_shape)
|
|
321
230
|
q_tensor *= scale
|
|
322
231
|
|
|
323
232
|
else:
|
|
@@ -328,4 +237,8 @@ class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
|
|
|
328
237
|
signed=True,
|
|
329
238
|
power_of_two=self.power_of_two)
|
|
330
239
|
|
|
240
|
+
if self.quantization_parameter_learning and not self.power_of_two:
|
|
241
|
+
scale = self.get_quantizer_variable(SCALE_PTQ)
|
|
242
|
+
q_tensor *= scale
|
|
243
|
+
|
|
331
244
|
return q_tensor
|