compressed-tensors 0.3.2__tar.gz → 0.3.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {compressed-tensors-0.3.2/src/compressed_tensors.egg-info → compressed-tensors-0.3.3}/PKG-INFO +2 -3
  2. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/README.md +1 -2
  3. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/setup.py +1 -1
  4. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/compressors/sparse_bitmask.py +1 -1
  5. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/lifecycle/apply.py +14 -9
  6. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/lifecycle/forward.py +92 -13
  7. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/observers/base.py +64 -3
  8. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/quant_args.py +31 -2
  9. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/utils/helpers.py +1 -0
  10. compressed-tensors-0.3.3/src/compressed_tensors/utils/helpers.py +45 -0
  11. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3/src/compressed_tensors.egg-info}/PKG-INFO +2 -3
  12. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors.egg-info/SOURCES.txt +1 -0
  13. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/LICENSE +0 -0
  14. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/pyproject.toml +0 -0
  15. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/setup.cfg +0 -0
  16. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/__init__.py +0 -0
  17. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/base.py +0 -0
  18. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/compressors/__init__.py +0 -0
  19. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/compressors/base.py +0 -0
  20. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/compressors/dense.py +0 -0
  21. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/compressors/helpers.py +0 -0
  22. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/config/__init__.py +0 -0
  23. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/config/base.py +0 -0
  24. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/config/dense.py +0 -0
  25. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/config/sparse_bitmask.py +0 -0
  26. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/__init__.py +0 -0
  27. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/lifecycle/__init__.py +0 -0
  28. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/lifecycle/calibration.py +0 -0
  29. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/lifecycle/frozen.py +0 -0
  30. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/lifecycle/initialize.py +0 -0
  31. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/observers/__init__.py +0 -0
  32. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/observers/helpers.py +0 -0
  33. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/observers/memoryless.py +0 -0
  34. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/observers/min_max.py +0 -0
  35. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/quant_config.py +0 -0
  36. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/quant_scheme.py +0 -0
  37. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/quantization/utils/__init__.py +0 -0
  38. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/registry/__init__.py +0 -0
  39. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/registry/registry.py +0 -0
  40. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/utils/__init__.py +0 -0
  41. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors/utils/safetensors_load.py +0 -0
  42. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors.egg-info/dependency_links.txt +0 -0
  43. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors.egg-info/requires.txt +0 -0
  44. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/src/compressed_tensors.egg-info/top_level.txt +0 -0
  45. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/tests/test_bitmask.py +0 -0
  46. {compressed-tensors-0.3.2 → compressed-tensors-0.3.3}/tests/test_registry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: compressed-tensors
3
- Version: 0.3.2
3
+ Version: 0.3.3
4
4
  Summary: Library for utilization of compressed safetensors of neural network models
5
5
  Home-page: https://github.com/neuralmagic/compressed-tensors
6
6
  Author: Neuralmagic, Inc.
@@ -11,7 +11,7 @@ Description-Content-Type: text/markdown
11
11
  Provides-Extra: dev
12
12
  License-File: LICENSE
13
13
 
14
- # compressed-tensors
14
+ # compressed_tensors
15
15
 
16
16
  This repository extends a [safetensors](https://github.com/huggingface/safetensors) format to efficiently store sparse and/or quantized tensors on disk. `compressed-tensors` format supports multiple compression types to minimize the disk space and facilitate the tensor manipulation.
17
17
 
@@ -97,4 +97,3 @@ state_dict = dict(load_compressed("compressed_model.safetensors", compression_co
97
97
  For more in-depth tutorial on bitmask compression, refer to the [notebook](https://github.com/neuralmagic/compressed-tensors/blob/d707c5b84bc3fef164aebdcd97cb6eaa571982f8/examples/bitmask_compression.ipynb).
98
98
 
99
99
 
100
-
@@ -1,4 +1,4 @@
1
- # compressed-tensors
1
+ # compressed_tensors
2
2
 
3
3
  This repository extends a [safetensors](https://github.com/huggingface/safetensors) format to efficiently store sparse and/or quantized tensors on disk. `compressed-tensors` format supports multiple compression types to minimize the disk space and facilitate the tensor manipulation.
4
4
 
@@ -82,4 +82,3 @@ state_dict = dict(load_compressed("compressed_model.safetensors", compression_co
82
82
  ```
83
83
 
84
84
  For more in-depth tutorial on bitmask compression, refer to the [notebook](https://github.com/neuralmagic/compressed-tensors/blob/d707c5b84bc3fef164aebdcd97cb6eaa571982f8/examples/bitmask_compression.ipynb).
85
-
@@ -32,7 +32,7 @@ def _setup_extras() -> Dict:
32
32
 
33
33
  setup(
34
34
  name="compressed-tensors",
35
- version="0.3.2",
35
+ version="0.3.3",
36
36
  author="Neuralmagic, Inc.",
37
37
  author_email="support@neuralmagic.com",
38
38
  license="Apache 2.0",
@@ -67,7 +67,7 @@ class BitmaskCompressor(ModelCompressor):
67
67
  f"found an existing entry for {key}. The existing entry will "
68
68
  "be replaced."
69
69
  )
70
- compressed_dict |= bitmask_dict
70
+ compressed_dict.update(bitmask_dict)
71
71
 
72
72
  return compressed_dict
73
73
 
@@ -36,6 +36,7 @@ __all__ = [
36
36
  "load_pretrained_quantization",
37
37
  "apply_quantization_config",
38
38
  "apply_quantization_status",
39
+ "find_first_name_or_class_match",
39
40
  ]
40
41
 
41
42
  from compressed_tensors.quantization.utils.helpers import is_module_quantized
@@ -99,9 +100,9 @@ def apply_quantization_config(model: Module, config: QuantizationConfig):
99
100
 
100
101
  # mark appropriate layers for quantization by setting their quantization schemes
101
102
  for name, submodule in iter_named_leaf_modules(model):
102
- if _find_first_name_or_class_match(name, submodule, config.ignore):
103
+ if find_first_name_or_class_match(name, submodule, config.ignore):
103
104
  continue # layer matches ignore list, continue
104
- target = _find_first_name_or_class_match(name, submodule, target_to_scheme)
105
+ target = find_first_name_or_class_match(name, submodule, target_to_scheme)
105
106
  if target is not None:
106
107
  # target matched - add layer and scheme to target list
107
108
  submodule.quantization_scheme = target_to_scheme[target]
@@ -125,27 +126,31 @@ def apply_quantization_status(model: Module, status: QuantizationStatus):
125
126
  model.apply(freeze_module_quantization)
126
127
 
127
128
 
128
- def _find_first_name_or_class_match(
129
- name: str,
130
- module: Module,
131
- targets: Iterable[str],
129
+ def find_first_name_or_class_match(
130
+ name: str, module: Module, targets: Iterable[str], check_contains: bool = False
132
131
  ) -> Optional[str]:
133
132
  # first element of targets that matches the given name
134
133
  # if no name matches returns first target that matches the class name
135
134
  # returns None otherwise
136
135
  return _find_first_match(name, targets) or _find_first_match(
137
- module.__class__.__name__, targets
136
+ module.__class__.__name__, targets, check_contains
138
137
  )
139
138
 
140
139
 
141
- def _find_first_match(value: str, targets: Iterable[str]) -> Optional[str]:
140
+ def _find_first_match(
141
+ value: str, targets: Iterable[str], check_contains: bool = False
142
+ ) -> Optional[str]:
142
143
  # returns first element of target that matches value either
143
- # exactly or as a regex after 're:'
144
+ # exactly or as a regex after 're:'. if check_contains is set to True,
145
+ # additionally checks if the target string is contained with value.
144
146
  for target in targets:
145
147
  if target.startswith("re:"):
146
148
  pattern = target[3:]
147
149
  if re.match(pattern, value):
148
150
  return target
151
+ elif check_contains:
152
+ if target.lower() in value.lower():
153
+ return target
149
154
  elif target == value:
150
155
  return target
151
156
  return None
@@ -13,15 +13,19 @@
13
13
  # limitations under the License.
14
14
 
15
15
  from functools import wraps
16
+ from math import ceil
16
17
 
17
18
  import torch
18
- from compressed_tensors.quantization.quant_args import QuantizationArgs
19
+ from compressed_tensors.quantization.quant_args import (
20
+ QuantizationArgs,
21
+ QuantizationStrategy,
22
+ )
19
23
  from compressed_tensors.quantization.quant_config import QuantizationStatus
20
24
  from compressed_tensors.quantization.quant_scheme import QuantizationScheme
21
25
  from torch.nn import Module
22
26
 
23
27
 
24
- __all__ = ["wrap_module_forward_quantized"]
28
+ __all__ = ["wrap_module_forward_quantized", "maybe_calibrate_or_quantize"]
25
29
 
26
30
 
27
31
  @torch.no_grad()
@@ -32,10 +36,9 @@ def quantize(
32
36
  q_min: torch.Tensor,
33
37
  q_max: torch.Tensor,
34
38
  ) -> torch.Tensor:
39
+
35
40
  return torch.clamp(
36
- torch.round(
37
- x / scale + zero_point,
38
- ),
41
+ torch.round(x / scale + zero_point),
39
42
  q_min,
40
43
  q_max,
41
44
  )
@@ -57,12 +60,88 @@ def fake_quantize(
57
60
  zero_point: torch.Tensor,
58
61
  args: QuantizationArgs,
59
62
  ) -> torch.Tensor:
63
+ """
64
+ Fake quantize the input tensor x depending on the group_size.
65
+ if group_size is greater than 0, then q/dq by groups. The groups
66
+ must be divisible by the column size
67
+ if group_size is -1, then channel wise q/dq. THe input scale and
68
+ zero_points are reshaped to support vectorization (Assumes 1 is
69
+ the channel dimension)
70
+
71
+ :param x: Input tensor
72
+ :param scale: scale tensor
73
+ :param zero_point: zero point tensor
74
+ :param args: quantization args that contain group_size info
75
+ :return: fake quantized tensor
76
+
77
+ """
60
78
  bit_range = 2**args.num_bits
61
79
  max_q = torch.tensor(bit_range / 2 - 1, device=x.device)
62
80
  min_q = torch.tensor(-bit_range / 2, device=x.device)
63
- Q = torch.zeros_like(x)
64
- Q = quantize(x, scale, zero_point, min_q, max_q)
65
- return dequantize(Q, scale, zero_point)
81
+
82
+ group_size = args.group_size
83
+
84
+ # group
85
+ if args.strategy == QuantizationStrategy.GROUP:
86
+
87
+ DQ = torch.zeros_like(x)
88
+
89
+ # TODO: vectorize the for loop
90
+ # TODO: fix genetric assumption about the tensor size for computing group
91
+
92
+ # TODO: make validation step for inputs
93
+
94
+ while scale.ndim < 2:
95
+ # pad scale and zero point dims for slicing
96
+ scale = scale.unsqueeze(1)
97
+ zero_point = zero_point.unsqueeze(1)
98
+
99
+ columns = x.shape[1]
100
+ if columns >= group_size:
101
+ if columns % group_size != 0:
102
+ raise ValueError(
103
+ "tesnor column shape must be divisble "
104
+ f"by the given group_size {group_size}"
105
+ )
106
+ for i in range(ceil(columns / group_size)):
107
+ # scale.shape should be [nchan, ndim]
108
+ # sc.shape should be [nchan, 1] after unsqueeze
109
+
110
+ sc = scale[:, i].unsqueeze(1)
111
+ zp = zero_point[:, i].unsqueeze(1)
112
+
113
+ idx = i * group_size
114
+ Q = quantize(x[:, idx : (idx + group_size)], sc, zp, min_q, max_q)
115
+ DQ[:, idx : (idx + group_size)] = dequantize(Q, sc, zp)
116
+
117
+ # channel-wise
118
+ elif args.strategy == QuantizationStrategy.CHANNEL: # group_size == -1
119
+ # before: scale shape = [channel_size]
120
+ # after: scale shape = [1, channel_size]
121
+ scale = scale.unsqueeze(0)
122
+ zero_point = zero_point.unsqueeze(0)
123
+
124
+ Q = quantize(x, scale, zero_point, min_q, max_q)
125
+ DQ = dequantize(Q, scale, zero_point)
126
+
127
+ # per-token
128
+ elif args.strategy == QuantizationStrategy.TOKEN:
129
+ # before: scale shape = [num_tokens]
130
+ # after: scale shape = [num_tokens, 1]
131
+ # x.shape = 1, num_tokens, 1]
132
+ # scale gets broadcasted as expected withput having [1, num_tokens, 1] shape
133
+
134
+ scale = scale.unsqueeze(1)
135
+ zero_point = zero_point.unsqueeze(1)
136
+
137
+ Q = quantize(x, scale, zero_point, min_q, max_q)
138
+ DQ = dequantize(Q, scale, zero_point)
139
+
140
+ else:
141
+ Q = quantize(x, scale, zero_point, min_q, max_q)
142
+ DQ = dequantize(Q, scale, zero_point)
143
+
144
+ return DQ
66
145
 
67
146
 
68
147
  def wrap_module_forward_quantized(module: Module, scheme: QuantizationScheme):
@@ -76,14 +155,14 @@ def wrap_module_forward_quantized(module: Module, scheme: QuantizationScheme):
76
155
 
77
156
  if scheme.input_activations is not None:
78
157
  # calibrate and (fake) quantize input activations when applicable
79
- input_ = _maybe_calibrate_or_quantize(
158
+ input_ = maybe_calibrate_or_quantize(
80
159
  module, input_, "input", scheme.input_activations
81
160
  )
82
161
 
83
162
  if scheme.weights is not None:
84
163
  # calibrate and (fake) quantize weights when applicable
85
164
  unquantized_weight = self.weight.data.clone()
86
- self.weight.data = _maybe_calibrate_or_quantize(
165
+ self.weight.data = maybe_calibrate_or_quantize(
87
166
  module, self.weight, "weight", scheme.weights
88
167
  )
89
168
 
@@ -94,7 +173,7 @@ def wrap_module_forward_quantized(module: Module, scheme: QuantizationScheme):
94
173
 
95
174
  if scheme.output_activations is not None:
96
175
  # calibrate and (fake) quantize output activations when applicable
97
- output = _maybe_calibrate_or_quantize(
176
+ output = maybe_calibrate_or_quantize(
98
177
  module, output, "output", scheme.output_activations
99
178
  )
100
179
 
@@ -110,7 +189,7 @@ def wrap_module_forward_quantized(module: Module, scheme: QuantizationScheme):
110
189
  setattr(module, "forward", bound_wrapped_forward)
111
190
 
112
191
 
113
- def _maybe_calibrate_or_quantize(
192
+ def maybe_calibrate_or_quantize(
114
193
  module: Module, value: torch.Tensor, base_name: str, args: "QuantizationArgs"
115
194
  ) -> torch.Tensor:
116
195
  # only run quantized for the included stages
@@ -132,11 +211,11 @@ def _maybe_calibrate_or_quantize(
132
211
  if module.quantization_status == QuantizationStatus.CALIBRATION:
133
212
  # calibration mode - get new quant params from observer
134
213
  observer = getattr(module, f"{base_name}_observer")
214
+
135
215
  updated_scale, updated_zero_point = observer(value)
136
216
 
137
217
  # update scale and zero point
138
218
  device = next(module.parameters()).device
139
219
  scale.data = updated_scale.to(device)
140
220
  zero_point.data = updated_zero_point.to(device)
141
-
142
221
  return fake_quantize(value, scale, zero_point, args)
@@ -14,7 +14,11 @@
14
14
 
15
15
  from typing import Optional, Tuple
16
16
 
17
- from compressed_tensors.quantization.quant_args import QuantizationArgs
17
+ import torch
18
+ from compressed_tensors.quantization.quant_args import (
19
+ QuantizationArgs,
20
+ QuantizationStrategy,
21
+ )
18
22
  from compressed_tensors.registry.registry import RegistryMixin
19
23
  from torch import FloatTensor, IntTensor, Tensor
20
24
  from torch.nn import Module
@@ -52,6 +56,12 @@ class Observer(Module, RegistryMixin):
52
56
  """
53
57
  raise NotImplementedError(f"{self.__class__} must implement calculate_qparams")
54
58
 
59
+ def post_calculate_qparams(self) -> None:
60
+ """
61
+ Run any logic specific to its observers after running calculate_qparams
62
+ """
63
+ ...
64
+
55
65
  def get_qparams(
56
66
  self, observed: Optional[Tensor] = None
57
67
  ) -> Tuple[FloatTensor, IntTensor]:
@@ -64,6 +74,57 @@ class Observer(Module, RegistryMixin):
64
74
  :return: tuple of scale and zero point based on last observed value
65
75
  """
66
76
  if observed is not None:
67
- # re-calcualte scale and zero point, update the stored value
68
- self._scale, self._zero_point = self.calculate_qparams(observed)
77
+ group_size = self.quantization_args.group_size
78
+
79
+ if self.quantization_args.strategy == QuantizationStrategy.TENSOR:
80
+
81
+ # re-calculate scale and zero point, update the stored value
82
+ self._scale, self._zero_point = self.calculate_qparams(observed)
83
+
84
+ elif self.quantization_args.strategy == QuantizationStrategy.GROUP:
85
+ columns = observed.shape[1]
86
+ scales, zero_points = [], []
87
+ for i in range(0, columns, self.quantization_args.group_size):
88
+ scale, zero_point = self.get_qparams_along_dim(
89
+ observed[:, i : (i + group_size)],
90
+ 0,
91
+ )
92
+ scales.append(scale)
93
+ zero_points.append(zero_point)
94
+
95
+ self._scale = torch.stack(scales, dim=1)
96
+ self._zero_point = torch.stack(zero_points, dim=1)
97
+
98
+ elif self.quantization_args.strategy == QuantizationStrategy.CHANNEL:
99
+ # assume observed is transposed, because its the output, hence use dim 0
100
+ self._scale, self._zero_point = self.get_qparams_along_dim(observed, 0)
101
+
102
+ elif self.quantization_args.strategy == QuantizationStrategy.TOKEN:
103
+
104
+ # use dim 1, assume the obsersed.shape = [batch, token, hidden]
105
+ # should be batch, token
106
+
107
+ self._scale, self._zero_point = self.get_qparams_along_dim(
108
+ observed, dim=1
109
+ )
110
+
69
111
  return self._scale, self._zero_point
112
+
113
+ def get_qparams_along_dim(self, observed, dim: int):
114
+ # TODO: add documentation that specifies the shape must
115
+ # be padded with 1-dims so the scales are along the right channel
116
+ # TODO: generalize the logic for reduce_dims
117
+ scales, zero_points = [], []
118
+
119
+ # TODO: make a more generic way to get the channel
120
+ num_dims = observed.shape[dim]
121
+
122
+ for dim_idx in range(num_dims):
123
+ scale, zero_point = self.calculate_qparams(
124
+ observed.select(dim=dim, index=dim_idx)
125
+ )
126
+
127
+ scales.append(scale)
128
+ zero_points.append(zero_point)
129
+ # breakpoint()
130
+ return torch.stack(scales), torch.stack(zero_points)
@@ -15,7 +15,7 @@
15
15
  from enum import Enum
16
16
  from typing import Any, Dict, Optional
17
17
 
18
- from pydantic import BaseModel, Field
18
+ from pydantic import BaseModel, Field, validator
19
19
 
20
20
 
21
21
  __all__ = ["QuantizationType", "QuantizationStrategy", "QuantizationArgs"]
@@ -39,6 +39,7 @@ class QuantizationStrategy(str, Enum):
39
39
  CHANNEL = "channel"
40
40
  GROUP = "group"
41
41
  BLOCK = "block"
42
+ TOKEN = "token"
42
43
 
43
44
 
44
45
  class QuantizationArgs(BaseModel):
@@ -63,8 +64,8 @@ class QuantizationArgs(BaseModel):
63
64
  num_bits: int = 8
64
65
  type: QuantizationType = QuantizationType.INT
65
66
  symmetric: bool = True
66
- strategy: QuantizationStrategy = QuantizationStrategy.TENSOR
67
67
  group_size: Optional[int] = None
68
+ strategy: Optional[QuantizationStrategy] = None
68
69
  block_structure: Optional[str] = None
69
70
  dynamic: bool = False
70
71
  observer: str = Field(
@@ -94,3 +95,31 @@ class QuantizationArgs(BaseModel):
94
95
  self.observer = "memoryless"
95
96
 
96
97
  return Observer.load_from_registry(self.observer, quantization_args=self)
98
+
99
+ @validator("strategy", pre=True, always=True)
100
+ def validate_strategy(cls, value, values):
101
+ group_size = values.get("group_size")
102
+
103
+ # use group_size to determinine strategy if not given explicity
104
+ if group_size is not None and value is None:
105
+ if group_size > 0:
106
+ return QuantizationStrategy.GROUP
107
+
108
+ elif group_size == -1:
109
+ return QuantizationStrategy.CHANNEL
110
+
111
+ else:
112
+ raise ValueError(
113
+ f"group_size={group_size} with strategy {value} is invald. "
114
+ "group_size > 0 for strategy='group' and "
115
+ "group_size = -1 for 'channel'"
116
+ )
117
+
118
+ if value == QuantizationStrategy.GROUP:
119
+ if group_size is None:
120
+ raise ValueError(f"strategy {value} requires group_size to be set.")
121
+
122
+ if value is None:
123
+ return QuantizationStrategy.TENSOR
124
+
125
+ return value
@@ -108,6 +108,7 @@ def calculate_compression_ratio(model: Module) -> float:
108
108
  compressed_bits = uncompressed_bits
109
109
  if is_module_quantized(submodule):
110
110
  compressed_bits = submodule.quantization_scheme.weights.num_bits
111
+
111
112
  num_weights = parameter.numel()
112
113
  total_compressed += compressed_bits * num_weights
113
114
  total_uncompressed += uncompressed_bits * num_weights
@@ -0,0 +1,45 @@
1
+ # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing,
10
+ # software distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Optional
17
+
18
+ from compressed_tensors.base import SPARSITY_CONFIG_NAME
19
+ from compressed_tensors.compressors import ModelCompressor
20
+ from compressed_tensors.config import CompressionConfig
21
+ from transformers import AutoConfig
22
+
23
+
24
+ __all__ = ["infer_compressor_from_model_config"]
25
+
26
+
27
+ def infer_compressor_from_model_config(
28
+ pretrained_model_name_or_path: str,
29
+ ) -> Optional[ModelCompressor]:
30
+ """
31
+ Given a path to a model config, extract a sparsity config if it exists and return
32
+ the associated ModelCompressor
33
+
34
+ :param pretrained_model_name_or_path: path to model config on disk or HF hub
35
+ :return: matching compressor if config contains a sparsity config
36
+ """
37
+ config = AutoConfig.from_pretrained(pretrained_model_name_or_path)
38
+ sparsity_config = getattr(config, SPARSITY_CONFIG_NAME, None)
39
+ if sparsity_config is None:
40
+ return None
41
+
42
+ format = sparsity_config.get("format")
43
+ sparsity_config = CompressionConfig.load_from_registry(format, **sparsity_config)
44
+ compressor = ModelCompressor.load_from_registry(format, config=sparsity_config)
45
+ return compressor
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: compressed-tensors
3
- Version: 0.3.2
3
+ Version: 0.3.3
4
4
  Summary: Library for utilization of compressed safetensors of neural network models
5
5
  Home-page: https://github.com/neuralmagic/compressed-tensors
6
6
  Author: Neuralmagic, Inc.
@@ -11,7 +11,7 @@ Description-Content-Type: text/markdown
11
11
  Provides-Extra: dev
12
12
  License-File: LICENSE
13
13
 
14
- # compressed-tensors
14
+ # compressed_tensors
15
15
 
16
16
  This repository extends a [safetensors](https://github.com/huggingface/safetensors) format to efficiently store sparse and/or quantized tensors on disk. `compressed-tensors` format supports multiple compression types to minimize the disk space and facilitate the tensor manipulation.
17
17
 
@@ -97,4 +97,3 @@ state_dict = dict(load_compressed("compressed_model.safetensors", compression_co
97
97
  For more in-depth tutorial on bitmask compression, refer to the [notebook](https://github.com/neuralmagic/compressed-tensors/blob/d707c5b84bc3fef164aebdcd97cb6eaa571982f8/examples/bitmask_compression.ipynb).
98
98
 
99
99
 
100
-
@@ -39,6 +39,7 @@ src/compressed_tensors/quantization/utils/helpers.py
39
39
  src/compressed_tensors/registry/__init__.py
40
40
  src/compressed_tensors/registry/registry.py
41
41
  src/compressed_tensors/utils/__init__.py
42
+ src/compressed_tensors/utils/helpers.py
42
43
  src/compressed_tensors/utils/safetensors_load.py
43
44
  tests/test_bitmask.py
44
45
  tests/test_registry.py