pquant-ml 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pquant_ml-0.0.1/.github/workflows/python-publish.yml +33 -0
- pquant_ml-0.0.1/.gitignore +11 -0
- pquant_ml-0.0.1/.pre-commit-config.yaml +64 -0
- pquant_ml-0.0.1/LICENSE +201 -0
- pquant_ml-0.0.1/PKG-INFO +68 -0
- pquant_ml-0.0.1/README.md +42 -0
- pquant_ml-0.0.1/__init__.py +0 -0
- pquant_ml-0.0.1/docs/_static/image.png +0 -0
- pquant_ml-0.0.1/docs/_static/pquant.png +0 -0
- pquant_ml-0.0.1/docs/_static/pquant_transform.png +0 -0
- pquant_ml-0.0.1/docs/pruning_methods.md +111 -0
- pquant_ml-0.0.1/docs/quantization_parameters.md +11 -0
- pquant_ml-0.0.1/examples/example_prune_quantize_resnet.ipynb +494 -0
- pquant_ml-0.0.1/examples/images/config_example.png +0 -0
- pquant_ml-0.0.1/examples/images/pruning_method_stages.png +0 -0
- pquant_ml-0.0.1/examples/images/replace_layer.png +0 -0
- pquant_ml-0.0.1/examples/images/training_loop.png +0 -0
- pquant_ml-0.0.1/pyproject.toml +35 -0
- pquant_ml-0.0.1/setup.cfg +4 -0
- pquant_ml-0.0.1/src/pquant/__init__.py +26 -0
- pquant_ml-0.0.1/src/pquant/_version.py +34 -0
- pquant_ml-0.0.1/src/pquant/configs/__init__.py +0 -0
- pquant_ml-0.0.1/src/pquant/configs/config_ap.yaml +51 -0
- pquant_ml-0.0.1/src/pquant/configs/config_autosparse.yaml +54 -0
- pquant_ml-0.0.1/src/pquant/configs/config_cs.yaml +50 -0
- pquant_ml-0.0.1/src/pquant/configs/config_dst.yaml +53 -0
- pquant_ml-0.0.1/src/pquant/configs/config_mdmm.yaml +64 -0
- pquant_ml-0.0.1/src/pquant/configs/config_pdp.yaml +52 -0
- pquant_ml-0.0.1/src/pquant/configs/config_wanda.yaml +54 -0
- pquant_ml-0.0.1/src/pquant/core/__init__.py +0 -0
- pquant_ml-0.0.1/src/pquant/core/activations_quantizer.py +159 -0
- pquant_ml-0.0.1/src/pquant/core/compressed_layers.py +106 -0
- pquant_ml-0.0.1/src/pquant/core/p_optim.py +69 -0
- pquant_ml-0.0.1/src/pquant/core/tf_impl/compressed_layers_tf.py +1093 -0
- pquant_ml-0.0.1/src/pquant/core/tf_impl/train_tf.py +45 -0
- pquant_ml-0.0.1/src/pquant/core/torch_impl/compressed_layers_torch.py +666 -0
- pquant_ml-0.0.1/src/pquant/core/torch_impl/fit_compress.py +1677 -0
- pquant_ml-0.0.1/src/pquant/core/torch_impl/train_torch.py +51 -0
- pquant_ml-0.0.1/src/pquant/core/train.py +12 -0
- pquant_ml-0.0.1/src/pquant/core/utils.py +159 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/__init__.py +0 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/activation_pruning.py +96 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/autosparse.py +118 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/cs.py +65 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/dst.py +91 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/mdmm.py +340 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/pdp.py +151 -0
- pquant_ml-0.0.1/src/pquant/pruning_methods/wanda.py +137 -0
- pquant_ml-0.0.1/src/pquant_ml.egg-info/PKG-INFO +68 -0
- pquant_ml-0.0.1/src/pquant_ml.egg-info/SOURCES.txt +60 -0
- pquant_ml-0.0.1/src/pquant_ml.egg-info/dependency_links.txt +1 -0
- pquant_ml-0.0.1/src/pquant_ml.egg-info/requires.txt +4 -0
- pquant_ml-0.0.1/src/pquant_ml.egg-info/top_level.txt +1 -0
- pquant_ml-0.0.1/tests/__init__.py +0 -0
- pquant_ml-0.0.1/tests/conftest.py +53 -0
- pquant_ml-0.0.1/tests/run_tests.sh +11 -0
- pquant_ml-0.0.1/tests/test_ap.py +75 -0
- pquant_ml-0.0.1/tests/test_keras_compression_layers.py +1481 -0
- pquant_ml-0.0.1/tests/test_pdp.py +121 -0
- pquant_ml-0.0.1/tests/test_torch_compression_layers.py +645 -0
- pquant_ml-0.0.1/tests/test_wanda.py +131 -0
- pquant_ml-0.0.1/tox.ini +93 -0
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
name: Upload Python Package
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
release:
|
|
5
|
+
types: [published]
|
|
6
|
+
|
|
7
|
+
permissions:
|
|
8
|
+
contents: read
|
|
9
|
+
|
|
10
|
+
jobs:
|
|
11
|
+
deploy:
|
|
12
|
+
environment:
|
|
13
|
+
name: pypi
|
|
14
|
+
url: https://pypi.org/p/pquant-ml
|
|
15
|
+
runs-on: ubuntu-latest
|
|
16
|
+
permissions:
|
|
17
|
+
id-token: write
|
|
18
|
+
steps:
|
|
19
|
+
- uses: actions/checkout@v4
|
|
20
|
+
with:
|
|
21
|
+
fetch-depth: 0
|
|
22
|
+
- name: Set up Python
|
|
23
|
+
uses: actions/setup-python@v3
|
|
24
|
+
with:
|
|
25
|
+
python-version: "3.10"
|
|
26
|
+
- name: Install dependencies
|
|
27
|
+
run: |
|
|
28
|
+
python -m pip install --upgrade pip
|
|
29
|
+
pip install build
|
|
30
|
+
- name: Build package
|
|
31
|
+
run: python -m build
|
|
32
|
+
- name: Publish package
|
|
33
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
repos:
|
|
2
|
+
- repo: https://github.com/psf/black
|
|
3
|
+
rev: 25.1.0
|
|
4
|
+
hooks:
|
|
5
|
+
- id: black
|
|
6
|
+
language_version: python3
|
|
7
|
+
args: ['--line-length=125',
|
|
8
|
+
'--skip-string-normalization']
|
|
9
|
+
|
|
10
|
+
- repo: https://github.com/tox-dev/pyproject-fmt
|
|
11
|
+
rev: v2.5.1
|
|
12
|
+
hooks:
|
|
13
|
+
- id: pyproject-fmt
|
|
14
|
+
|
|
15
|
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
16
|
+
rev: v5.0.0
|
|
17
|
+
hooks:
|
|
18
|
+
- id: check-added-large-files
|
|
19
|
+
- id: check-case-conflict
|
|
20
|
+
- id: check-merge-conflict
|
|
21
|
+
- id: check-symlinks
|
|
22
|
+
- id: check-toml
|
|
23
|
+
- id: check-yaml
|
|
24
|
+
- id: debug-statements
|
|
25
|
+
- id: end-of-file-fixer
|
|
26
|
+
- id: mixed-line-ending
|
|
27
|
+
- id: requirements-txt-fixer
|
|
28
|
+
- id: trailing-whitespace
|
|
29
|
+
|
|
30
|
+
- repo: https://github.com/PyCQA/isort
|
|
31
|
+
rev: 6.0.1
|
|
32
|
+
hooks:
|
|
33
|
+
- id: isort
|
|
34
|
+
args: ["--profile=black"] # <-- this one
|
|
35
|
+
|
|
36
|
+
- repo: https://github.com/asottile/pyupgrade
|
|
37
|
+
rev: v3.19.1
|
|
38
|
+
hooks:
|
|
39
|
+
- id: pyupgrade
|
|
40
|
+
args: ["--py36-plus"]
|
|
41
|
+
|
|
42
|
+
- repo: https://github.com/pycqa/flake8
|
|
43
|
+
rev: 7.1.2
|
|
44
|
+
hooks:
|
|
45
|
+
- id: flake8
|
|
46
|
+
exclude: docs/conf.py
|
|
47
|
+
additional_dependencies: [flake8-bugbear, flake8-print]
|
|
48
|
+
args: ['--max-line-length=125', # github viewer width
|
|
49
|
+
'--extend-ignore=E203'] # E203 is not PEP8 compliant
|
|
50
|
+
|
|
51
|
+
- repo: https://github.com/mgedmin/check-manifest
|
|
52
|
+
rev: "0.50"
|
|
53
|
+
hooks:
|
|
54
|
+
- id: check-manifest
|
|
55
|
+
stages: [manual]
|
|
56
|
+
|
|
57
|
+
ci:
|
|
58
|
+
autofix_commit_msg: '[pre-commit.ci] auto fixes from pre-commit hooks'
|
|
59
|
+
autofix_prs: false # default is true
|
|
60
|
+
autoupdate_branch: 'main'
|
|
61
|
+
autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate'
|
|
62
|
+
autoupdate_schedule: weekly
|
|
63
|
+
skip: []
|
|
64
|
+
submodules: true
|
pquant_ml-0.0.1/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
Apache License
|
|
2
|
+
Version 2.0, January 2004
|
|
3
|
+
http://www.apache.org/licenses/
|
|
4
|
+
|
|
5
|
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
6
|
+
|
|
7
|
+
1. Definitions.
|
|
8
|
+
|
|
9
|
+
"License" shall mean the terms and conditions for use, reproduction,
|
|
10
|
+
and distribution as defined by Sections 1 through 9 of this document.
|
|
11
|
+
|
|
12
|
+
"Licensor" shall mean the copyright owner or entity authorized by
|
|
13
|
+
the copyright owner that is granting the License.
|
|
14
|
+
|
|
15
|
+
"Legal Entity" shall mean the union of the acting entity and all
|
|
16
|
+
other entities that control, are controlled by, or are under common
|
|
17
|
+
control with that entity. For the purposes of this definition,
|
|
18
|
+
"control" means (i) the power, direct or indirect, to cause the
|
|
19
|
+
direction or management of such entity, whether by contract or
|
|
20
|
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
21
|
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
22
|
+
|
|
23
|
+
"You" (or "Your") shall mean an individual or Legal Entity
|
|
24
|
+
exercising permissions granted by this License.
|
|
25
|
+
|
|
26
|
+
"Source" form shall mean the preferred form for making modifications,
|
|
27
|
+
including but not limited to software source code, documentation
|
|
28
|
+
source, and configuration files.
|
|
29
|
+
|
|
30
|
+
"Object" form shall mean any form resulting from mechanical
|
|
31
|
+
transformation or translation of a Source form, including but
|
|
32
|
+
not limited to compiled object code, generated documentation,
|
|
33
|
+
and conversions to other media types.
|
|
34
|
+
|
|
35
|
+
"Work" shall mean the work of authorship, whether in Source or
|
|
36
|
+
Object form, made available under the License, as indicated by a
|
|
37
|
+
copyright notice that is included in or attached to the work
|
|
38
|
+
(an example is provided in the Appendix below).
|
|
39
|
+
|
|
40
|
+
"Derivative Works" shall mean any work, whether in Source or Object
|
|
41
|
+
form, that is based on (or derived from) the Work and for which the
|
|
42
|
+
editorial revisions, annotations, elaborations, or other modifications
|
|
43
|
+
represent, as a whole, an original work of authorship. For the purposes
|
|
44
|
+
of this License, Derivative Works shall not include works that remain
|
|
45
|
+
separable from, or merely link (or bind by name) to the interfaces of,
|
|
46
|
+
the Work and Derivative Works thereof.
|
|
47
|
+
|
|
48
|
+
"Contribution" shall mean any work of authorship, including
|
|
49
|
+
the original version of the Work and any modifications or additions
|
|
50
|
+
to that Work or Derivative Works thereof, that is intentionally
|
|
51
|
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
52
|
+
or by an individual or Legal Entity authorized to submit on behalf of
|
|
53
|
+
the copyright owner. For the purposes of this definition, "submitted"
|
|
54
|
+
means any form of electronic, verbal, or written communication sent
|
|
55
|
+
to the Licensor or its representatives, including but not limited to
|
|
56
|
+
communication on electronic mailing lists, source code control systems,
|
|
57
|
+
and issue tracking systems that are managed by, or on behalf of, the
|
|
58
|
+
Licensor for the purpose of discussing and improving the Work, but
|
|
59
|
+
excluding communication that is conspicuously marked or otherwise
|
|
60
|
+
designated in writing by the copyright owner as "Not a Contribution."
|
|
61
|
+
|
|
62
|
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
63
|
+
on behalf of whom a Contribution has been received by Licensor and
|
|
64
|
+
subsequently incorporated within the Work.
|
|
65
|
+
|
|
66
|
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
67
|
+
this License, each Contributor hereby grants to You a perpetual,
|
|
68
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
69
|
+
copyright license to reproduce, prepare Derivative Works of,
|
|
70
|
+
publicly display, publicly perform, sublicense, and distribute the
|
|
71
|
+
Work and such Derivative Works in Source or Object form.
|
|
72
|
+
|
|
73
|
+
3. Grant of Patent License. Subject to the terms and conditions of
|
|
74
|
+
this License, each Contributor hereby grants to You a perpetual,
|
|
75
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
76
|
+
(except as stated in this section) patent license to make, have made,
|
|
77
|
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
78
|
+
where such license applies only to those patent claims licensable
|
|
79
|
+
by such Contributor that are necessarily infringed by their
|
|
80
|
+
Contribution(s) alone or by combination of their Contribution(s)
|
|
81
|
+
with the Work to which such Contribution(s) was submitted. If You
|
|
82
|
+
institute patent litigation against any entity (including a
|
|
83
|
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
84
|
+
or a Contribution incorporated within the Work constitutes direct
|
|
85
|
+
or contributory patent infringement, then any patent licenses
|
|
86
|
+
granted to You under this License for that Work shall terminate
|
|
87
|
+
as of the date such litigation is filed.
|
|
88
|
+
|
|
89
|
+
4. Redistribution. You may reproduce and distribute copies of the
|
|
90
|
+
Work or Derivative Works thereof in any medium, with or without
|
|
91
|
+
modifications, and in Source or Object form, provided that You
|
|
92
|
+
meet the following conditions:
|
|
93
|
+
|
|
94
|
+
(a) You must give any other recipients of the Work or
|
|
95
|
+
Derivative Works a copy of this License; and
|
|
96
|
+
|
|
97
|
+
(b) You must cause any modified files to carry prominent notices
|
|
98
|
+
stating that You changed the files; and
|
|
99
|
+
|
|
100
|
+
(c) You must retain, in the Source form of any Derivative Works
|
|
101
|
+
that You distribute, all copyright, patent, trademark, and
|
|
102
|
+
attribution notices from the Source form of the Work,
|
|
103
|
+
excluding those notices that do not pertain to any part of
|
|
104
|
+
the Derivative Works; and
|
|
105
|
+
|
|
106
|
+
(d) If the Work includes a "NOTICE" text file as part of its
|
|
107
|
+
distribution, then any Derivative Works that You distribute must
|
|
108
|
+
include a readable copy of the attribution notices contained
|
|
109
|
+
within such NOTICE file, excluding those notices that do not
|
|
110
|
+
pertain to any part of the Derivative Works, in at least one
|
|
111
|
+
of the following places: within a NOTICE text file distributed
|
|
112
|
+
as part of the Derivative Works; within the Source form or
|
|
113
|
+
documentation, if provided along with the Derivative Works; or,
|
|
114
|
+
within a display generated by the Derivative Works, if and
|
|
115
|
+
wherever such third-party notices normally appear. The contents
|
|
116
|
+
of the NOTICE file are for informational purposes only and
|
|
117
|
+
do not modify the License. You may add Your own attribution
|
|
118
|
+
notices within Derivative Works that You distribute, alongside
|
|
119
|
+
or as an addendum to the NOTICE text from the Work, provided
|
|
120
|
+
that such additional attribution notices cannot be construed
|
|
121
|
+
as modifying the License.
|
|
122
|
+
|
|
123
|
+
You may add Your own copyright statement to Your modifications and
|
|
124
|
+
may provide additional or different license terms and conditions
|
|
125
|
+
for use, reproduction, or distribution of Your modifications, or
|
|
126
|
+
for any such Derivative Works as a whole, provided Your use,
|
|
127
|
+
reproduction, and distribution of the Work otherwise complies with
|
|
128
|
+
the conditions stated in this License.
|
|
129
|
+
|
|
130
|
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
131
|
+
any Contribution intentionally submitted for inclusion in the Work
|
|
132
|
+
by You to the Licensor shall be under the terms and conditions of
|
|
133
|
+
this License, without any additional terms or conditions.
|
|
134
|
+
Notwithstanding the above, nothing herein shall supersede or modify
|
|
135
|
+
the terms of any separate license agreement you may have executed
|
|
136
|
+
with Licensor regarding such Contributions.
|
|
137
|
+
|
|
138
|
+
6. Trademarks. This License does not grant permission to use the trade
|
|
139
|
+
names, trademarks, service marks, or product names of the Licensor,
|
|
140
|
+
except as required for reasonable and customary use in describing the
|
|
141
|
+
origin of the Work and reproducing the content of the NOTICE file.
|
|
142
|
+
|
|
143
|
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
144
|
+
agreed to in writing, Licensor provides the Work (and each
|
|
145
|
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
146
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
147
|
+
implied, including, without limitation, any warranties or conditions
|
|
148
|
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
149
|
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
150
|
+
appropriateness of using or redistributing the Work and assume any
|
|
151
|
+
risks associated with Your exercise of permissions under this License.
|
|
152
|
+
|
|
153
|
+
8. Limitation of Liability. In no event and under no legal theory,
|
|
154
|
+
whether in tort (including negligence), contract, or otherwise,
|
|
155
|
+
unless required by applicable law (such as deliberate and grossly
|
|
156
|
+
negligent acts) or agreed to in writing, shall any Contributor be
|
|
157
|
+
liable to You for damages, including any direct, indirect, special,
|
|
158
|
+
incidental, or consequential damages of any character arising as a
|
|
159
|
+
result of this License or out of the use or inability to use the
|
|
160
|
+
Work (including but not limited to damages for loss of goodwill,
|
|
161
|
+
work stoppage, computer failure or malfunction, or any and all
|
|
162
|
+
other commercial damages or losses), even if such Contributor
|
|
163
|
+
has been advised of the possibility of such damages.
|
|
164
|
+
|
|
165
|
+
9. Accepting Warranty or Additional Liability. While redistributing
|
|
166
|
+
the Work or Derivative Works thereof, You may choose to offer,
|
|
167
|
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
168
|
+
or other liability obligations and/or rights consistent with this
|
|
169
|
+
License. However, in accepting such obligations, You may act only
|
|
170
|
+
on Your own behalf and on Your sole responsibility, not on behalf
|
|
171
|
+
of any other Contributor, and only if You agree to indemnify,
|
|
172
|
+
defend, and hold each Contributor harmless for any liability
|
|
173
|
+
incurred by, or claims asserted against, such Contributor by reason
|
|
174
|
+
of your accepting any such warranty or additional liability.
|
|
175
|
+
|
|
176
|
+
END OF TERMS AND CONDITIONS
|
|
177
|
+
|
|
178
|
+
APPENDIX: How to apply the Apache License to your work.
|
|
179
|
+
|
|
180
|
+
To apply the Apache License to your work, attach the following
|
|
181
|
+
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
182
|
+
replaced with your own identifying information. (Don't include
|
|
183
|
+
the brackets!) The text should be enclosed in the appropriate
|
|
184
|
+
comment syntax for the file format. We also recommend that a
|
|
185
|
+
file or class name and description of purpose be included on the
|
|
186
|
+
same "printed page" as the copyright notice for easier
|
|
187
|
+
identification within third-party archives.
|
|
188
|
+
|
|
189
|
+
Copyright {yyyy} {name of copyright owner}
|
|
190
|
+
|
|
191
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
192
|
+
you may not use this file except in compliance with the License.
|
|
193
|
+
You may obtain a copy of the License at
|
|
194
|
+
|
|
195
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
196
|
+
|
|
197
|
+
Unless required by applicable law or agreed to in writing, software
|
|
198
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
199
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
200
|
+
See the License for the specific language governing permissions and
|
|
201
|
+
limitations under the License.
|
pquant_ml-0.0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pquant-ml
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Pruning and Quantization of ML models
|
|
5
|
+
Author: NGT 1.3 team
|
|
6
|
+
License: Apache-2.0
|
|
7
|
+
Project-URL: repository, https://github.com/nroope/PQuant
|
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Intended Audience :: Science/Research
|
|
11
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
12
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
18
|
+
Requires-Python: >=3.10
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
License-File: LICENSE
|
|
21
|
+
Requires-Dist: keras>=3
|
|
22
|
+
Requires-Dist: pyyaml>=6.0.1
|
|
23
|
+
Requires-Dist: quantizers>=1.1
|
|
24
|
+
Requires-Dist: torch>=2.1
|
|
25
|
+
Dynamic: license-file
|
|
26
|
+
|
|
27
|
+

|
|
28
|
+
|
|
29
|
+
## Prune and Quantize ML models
|
|
30
|
+
PQuant is a library for training compressed machine learning models, developed at CERN as part of the [Next Generation Triggers](https://nextgentriggers.web.cern.ch/t13/) project.
|
|
31
|
+
|
|
32
|
+
PQuant replaces the layers and activations it finds with a Compressed (in the case of layers) or Quantized (in the case of activations) variant. These automatically handle the quantization of the weights, biases and activations, and the pruning of the weights.
|
|
33
|
+
Both PyTorch and TensorFlow models are supported.
|
|
34
|
+
|
|
35
|
+
Layers that can be compressed: Conv2D and Linear layers, Tanh and ReLU activations for both TensorFlow and PyTorch. For PyTorch, also Conv1D.
|
|
36
|
+
|
|
37
|
+

|
|
38
|
+
|
|
39
|
+
The various pruning methods have different training steps, such as a pre-training step and fine-tuning step. PQuant provides a training function, where the user provides the functions to train and validate an epoch, and PQuant handles the training while triggering the different training steps.
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
### Example
|
|
44
|
+
Example notebook can be found [here](https://github.com/nroope/PQuant/tree/main/examples). It handles the
|
|
45
|
+
1. Creation of a torch model and data loaders.
|
|
46
|
+
2. Creation of the training and validation functions.
|
|
47
|
+
3. Loading a default pruning configuration of a pruning method.
|
|
48
|
+
4. Using the configuration, the model, and the training and validation functions, call the training function of PQuant to train and compress the model.
|
|
49
|
+
5. Creating a custom quantization and pruning configuration for a given model (disable pruning for some layers, different quantization bitwidths for different layers).
|
|
50
|
+
|
|
51
|
+
### Pruning methods
|
|
52
|
+
A description of the pruning methods and their hyperparameters can be found [here](docs/pruning_methods.md).
|
|
53
|
+
|
|
54
|
+
### Quantization parameters
|
|
55
|
+
A description of the quantization parameters can be found [here](docs/quantization_parameters.md).
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
### Installation
|
|
59
|
+
|
|
60
|
+
```pip install .``` for regular install, ```pip install -e .``` if you wish to install as a local editable package
|
|
61
|
+
To run the code, [HGQ2](https://github.com/calad0i/HGQ2) is also needed. For now it only has local install available, so download the repository and install it locally.
|
|
62
|
+
|
|
63
|
+
### Authors
|
|
64
|
+
- Roope Niemi (CERN)
|
|
65
|
+
- Anastasiia Petrovych (CERN)
|
|
66
|
+
- Chang Sun (Caltech)
|
|
67
|
+
- Michael Kagan (SLAC National Accelerator Laboratory)
|
|
68
|
+
- Vladimir Loncar (CERN)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+

|
|
2
|
+
|
|
3
|
+
## Prune and Quantize ML models
|
|
4
|
+
PQuant is a library for training compressed machine learning models, developed at CERN as part of the [Next Generation Triggers](https://nextgentriggers.web.cern.ch/t13/) project.
|
|
5
|
+
|
|
6
|
+
PQuant replaces the layers and activations it finds with a Compressed (in the case of layers) or Quantized (in the case of activations) variant. These automatically handle the quantization of the weights, biases and activations, and the pruning of the weights.
|
|
7
|
+
Both PyTorch and TensorFlow models are supported.
|
|
8
|
+
|
|
9
|
+
Layers that can be compressed: Conv2D and Linear layers, Tanh and ReLU activations for both TensorFlow and PyTorch. For PyTorch, also Conv1D.
|
|
10
|
+
|
|
11
|
+

|
|
12
|
+
|
|
13
|
+
The various pruning methods have different training steps, such as a pre-training step and fine-tuning step. PQuant provides a training function, where the user provides the functions to train and validate an epoch, and PQuant handles the training while triggering the different training steps.
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
### Example
|
|
18
|
+
Example notebook can be found [here](https://github.com/nroope/PQuant/tree/main/examples). It handles the
|
|
19
|
+
1. Creation of a torch model and data loaders.
|
|
20
|
+
2. Creation of the training and validation functions.
|
|
21
|
+
3. Loading a default pruning configuration of a pruning method.
|
|
22
|
+
4. Using the configuration, the model, and the training and validation functions, call the training function of PQuant to train and compress the model.
|
|
23
|
+
5. Creating a custom quantization and pruning configuration for a given model (disable pruning for some layers, different quantization bitwidths for different layers).
|
|
24
|
+
|
|
25
|
+
### Pruning methods
|
|
26
|
+
A description of the pruning methods and their hyperparameters can be found [here](docs/pruning_methods.md).
|
|
27
|
+
|
|
28
|
+
### Quantization parameters
|
|
29
|
+
A description of the quantization parameters can be found [here](docs/quantization_parameters.md).
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
### Installation
|
|
33
|
+
|
|
34
|
+
```pip install .``` for regular install, ```pip install -e .``` if you wish to install as a local editable package
|
|
35
|
+
To run the code, [HGQ2](https://github.com/calad0i/HGQ2) is also needed. For now it only has local install available, so download the repository and install it locally.
|
|
36
|
+
|
|
37
|
+
### Authors
|
|
38
|
+
- Roope Niemi (CERN)
|
|
39
|
+
- Anastasiia Petrovych (CERN)
|
|
40
|
+
- Chang Sun (Caltech)
|
|
41
|
+
- Michael Kagan (SLAC National Accelerator Laboratory)
|
|
42
|
+
- Vladimir Loncar (CERN)
|
|
File without changes
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# Descriptions of the pruning methods
|
|
2
|
+
|
|
3
|
+
Our implementations follow the actual implementations of the author's of the papers, whenever we were able to find one. Because of this some of the functionality of the pruning methods can differ slightly from the equations shown in the papers.
|
|
4
|
+
|
|
5
|
+
#### [Activation pruning](https://arxiv.org/abs/1903.04476)
|
|
6
|
+
Collect layer outputs to calculate average layer activity (how often layer neuron / channel outputs values greater than 0). Prune those neurons and channels which have smaller activity value than a given threshold.
|
|
7
|
+
|
|
8
|
+
**Hyperparameters**
|
|
9
|
+
- `threshold`: If a neuron or channel is less active than this threshold, prune it.
|
|
10
|
+
- `threshold_decay`: Not used.
|
|
11
|
+
- `t_delta`: How many batches to collect as calibration data.
|
|
12
|
+
- `t_start_collecting_batch`: At which epoch during training the collection begins
|
|
13
|
+
|
|
14
|
+
#### [AutoSparse](https://arxiv.org/abs/2304.06941)
|
|
15
|
+
$x = sign(W) \cdot ReLU(|W| - \sigma(T))$.
|
|
16
|
+
```math
|
|
17
|
+
g = \begin{cases}
|
|
18
|
+
1, & \text{if W > 0} \\
|
|
19
|
+
\alpha, & \text{otherwise}\quad,
|
|
20
|
+
\end{cases}
|
|
21
|
+
```
|
|
22
|
+
where T is threshold, W is the weight matrix, g is the gradient.
|
|
23
|
+
$\alpha$ is decayed after each epoch using cosine sigmoid decay.
|
|
24
|
+
|
|
25
|
+
**Hyperparameters:**
|
|
26
|
+
- `alpha`: initial value for $\alpha$
|
|
27
|
+
- `backward_sparsity`: if true, sets gradients to 0 for weights in the bottom 50% magnitude of weights in the layer. False in the default config.
|
|
28
|
+
- `threshold_decay`: threshold decay for optimizer. 0 in the default config.
|
|
29
|
+
- `threshold_init`: initial value for threshold. -5 in the default config.
|
|
30
|
+
- `threshold_type`: weightwise/channelwise/layerwise. Defines whether each weight has its own threshold, or is threshold shared between weights in a channel, or does the whole layer have one threshold.
|
|
31
|
+
|
|
32
|
+
#### [Continuous Sparsification](https://arxiv.org/abs/1912.04427)
|
|
33
|
+
A multi-round pruning algorithm.
|
|
34
|
+
```math
|
|
35
|
+
x = W\cdot M
|
|
36
|
+
```
|
|
37
|
+
where
|
|
38
|
+
```math
|
|
39
|
+
M=(\frac{\sigma(\beta s)}{\sigma(s_{init})})
|
|
40
|
+
```
|
|
41
|
+
$\beta$ starts from the initial value at the beginning of each round, and increased exponentially until reaching a final value. $s$ is a learnable matrix with a same shape as the weight matrix. $s_{init}$ is the initial value of $s$.
|
|
42
|
+
|
|
43
|
+
During each round, as the $s$ matrix is learning and the $\beta$ is increased, the values of the mask get pushed more and more towards 0 and 1. After each round, $\beta$ is reset, and the positive values of $s$ are set to $s_{init}$ value, and negative values are kept as they are. This means that the weights pruned by $s$ stay pruned after each round, but the weights that have not been pruned previously can be pruned after a new round begins, since their values are reset in $s$.
|
|
44
|
+
|
|
45
|
+
Before fine-tuning the mask is fixed and converted to a hard mask of 0s and 1s, and all the weights rewinded back to an earlier state.
|
|
46
|
+
|
|
47
|
+
**Hyperparameters**
|
|
48
|
+
- `final_temp`: Value up to which $\beta$ is increased during each round. 200 in the default config.
|
|
49
|
+
- `threshold_decay`: L1 decay for the $s$ matrix. 1.0e-09 in the default config.
|
|
50
|
+
- `threshold_init`: Initial value for $s$. 0 in the default config. Lower value means more pruning, higher value means less pruning.
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
#### [DST](https://arxiv.org/abs/2005.06870)
|
|
54
|
+
$x = ReLU(|W| - T)$.
|
|
55
|
+
```math
|
|
56
|
+
g = \begin{cases}
|
|
57
|
+
2-4\cdot|W|, & \text{if } |x| \leq 0.4 \\
|
|
58
|
+
0.4, & \text{if } 0.4 < |x| \leq 1 \\
|
|
59
|
+
0, & \text{if }|x| > 1\quad.
|
|
60
|
+
\end{cases}
|
|
61
|
+
```
|
|
62
|
+
The threshold T is controlled by additional loss, which is calculated by
|
|
63
|
+
```math
|
|
64
|
+
\alpha \cdot \sum_{i,j}{e^{-T_{i,j}}}
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
**Hyperparameters**
|
|
68
|
+
- `alpha`: Used to control the threshold via loss. 5.0e-06 in the default config.
|
|
69
|
+
- `max_pruning_pct`: The algorithm has a tendency to prune whole layers, so if pruning goes higher than this value, reset the threshold. 0.99 in the default config.
|
|
70
|
+
- `threshold_decay`: threshold decay for optimizer. 0 in the default config.
|
|
71
|
+
- `threshold_init`: Initial value for threshold. 0 in the default config.
|
|
72
|
+
- `threshold_type`: weightwise/channelwise/layerwise. Defines whether each weight has its own threshold, or is threshold shared between weights in a channel, or does the whole layer have one threshold.
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
#### [PDP](https://arxiv.org/abs/2305.11203)
|
|
77
|
+
Captures weight distribution of each layer and calculates a threshold, then does a softmax between the weights and this value, creating a soft mask.
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
$`W_h = topK(|W|, (1-r) \cdot n(W))\newline`$\
|
|
81
|
+
$`W_i = bottomK(|W|, r \cdot n(W))`$\
|
|
82
|
+
$`t = 0.5 \cdot (min(W_h) + max(W_i))`$\
|
|
83
|
+
$`zw, mw = softmax(\frac{t^2, w^2}{\tau})\text{ for $w$ in $W$}`$\
|
|
84
|
+
$`w = mw \cdot w`$,
|
|
85
|
+
|
|
86
|
+
where $\tau$ is the temperature, $r$ is the target sparsity of the layer for that iteration, $n(W)$ is the number of weights. The $mw$ in the above equation will have all the softmax values of the weights between the weight tensor and the threshold. If a weight is above the threshold, due to the temperature, the softmax result will very quickly go towards 1. The $r$ is increased linearly during training. The layerwise budget sparsity is calculated after a pre-training phase, in a way that the total sparsity of the model is the target sparsity given in the config.
|
|
87
|
+
|
|
88
|
+
PDP has an unstructured, N:M pruning (not yet implemented here), and channel pruning version.
|
|
89
|
+
|
|
90
|
+
**Hyperparameters**
|
|
91
|
+
`epsilon`: How fast to increaes the sparsity during training. After each epoch, the sparsity is increased by this amount, until the value reaches 1 (100% of target sparsity). 0.015 in the default config, which means after ~70 epochs the target sparsity has been reached.
|
|
92
|
+
- `sparsity`: Target sparsity for the whole model
|
|
93
|
+
- `temperature`: Temperature of the softmax. 1e-5 in the default config
|
|
94
|
+
- `threshold_decay`: Not used
|
|
95
|
+
- `structured_pruning`: Whether to use a structured pruning variant or not. Structured pruning uses l2 norms of the channels/neurons instead of absolute values of weights when calculating the threshold, and prunes whole channels/neurons using that threshold value.
|
|
96
|
+
|
|
97
|
+
#### [Wanda](https://arxiv.org/abs/2306.11695)
|
|
98
|
+
One shot pruning, originally a post-training pruning method without fine-tuning (to implement the post-training version is on the to-do list).
|
|
99
|
+
|
|
100
|
+
Using a calibration data set, calculate a metric based on the average input to the layer, and multiply the absolute values of the weights with that metric. Prune weights based on this multiplication result (lowest values being pruned first), until a target sparsity has been reached.
|
|
101
|
+
|
|
102
|
+
For linear layers, the metric is calculated as L2 norm over the batch dimension. For convolutions, reduce dimensions by taking the average of the batch dimension, then calculate L2 norm over a flattened kernel dimension.
|
|
103
|
+
|
|
104
|
+
**Hyperparameters**
|
|
105
|
+
- `calculate_pruning_budget`: If True, calculate the pruning budget for each layer, while keeping the target sparsity. If False, prunes every layer using target sparsity.
|
|
106
|
+
- `M`: If doing N:M pruning, N and M should be non-null (N < M)
|
|
107
|
+
- `N`: If doing N:M pruning, N and M should be non-null (N < M)
|
|
108
|
+
- `threshold_decay`: not used
|
|
109
|
+
- `sparsity`: target sparsity. 0.9 in the default config
|
|
110
|
+
- `t_delta`: how many batches to collect as calibration data
|
|
111
|
+
- `t_start_collecting`: training step when collection starts
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# Descriptions of the quantization parameters
|
|
2
|
+
- `default_integer_bits`: Default integer bits used for quantization
|
|
3
|
+
- `default_fractional_bits`: Default fractional bits used for quantization. For ReLU, because it is unsigned and no bits are used for the sign, 1 bit is added to the default value during adding of compression layers.
|
|
4
|
+
- `enable_quantization`: Enables quantization
|
|
5
|
+
- `hgq_gamma`: scales the loss of HGQ. If too high, can prune the whole model.
|
|
6
|
+
- `hgq_heterogeneous`: If true, HGQ learns one set of bits for each weight in the model. If false, learns one set of bits for each layer in the model
|
|
7
|
+
- `layer_specific`: Layers that use non-default quantization bits, should be added here. A default config with all the layers can be created using the function `pquant.add_default_layer_quantization_pruning_to_config`
|
|
8
|
+
- `use_high_granularity_quantization`: If true, uses HGQ instead of fixed quantizers
|
|
9
|
+
- `use_real_tanh`: If true, use real tanh function before quantization. If false, uses hard tanh
|
|
10
|
+
- `use_relu_multiplier`: If true, multiply the input of QuantizedReLU with a learned multiplier before the QuantizedReLU operation. The multiplication operation will be `inputs_to_relu = inputs_to_relu * 2 ** (round(learned_multiplier))`. Learned multiplier is initialized at -1, therefore at the beginning of the training the inputs are multiplied by 0.5 before QuantizedReLU.
|
|
11
|
+
- `use_symmetric_quantization`: if true, `minimum_quantized_value == -maximum_quantized_value`
|