compressed-tensors 0.3.2__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. compressed_tensors/base.py +2 -1
  2. compressed_tensors/compressors/__init__.py +5 -1
  3. compressed_tensors/compressors/base.py +11 -54
  4. compressed_tensors/compressors/dense.py +4 -4
  5. compressed_tensors/compressors/helpers.py +12 -12
  6. compressed_tensors/compressors/int_quantized.py +126 -0
  7. compressed_tensors/compressors/marlin_24.py +250 -0
  8. compressed_tensors/compressors/model_compressor.py +315 -0
  9. compressed_tensors/compressors/pack_quantized.py +212 -0
  10. compressed_tensors/compressors/sparse_bitmask.py +4 -4
  11. compressed_tensors/compressors/utils/__init__.py +19 -0
  12. compressed_tensors/compressors/utils/helpers.py +43 -0
  13. compressed_tensors/compressors/utils/permutations_24.py +65 -0
  14. compressed_tensors/compressors/utils/semi_structured_conversions.py +341 -0
  15. compressed_tensors/config/base.py +7 -4
  16. compressed_tensors/config/dense.py +4 -4
  17. compressed_tensors/config/sparse_bitmask.py +3 -3
  18. compressed_tensors/quantization/lifecycle/__init__.py +1 -0
  19. compressed_tensors/quantization/lifecycle/apply.py +75 -19
  20. compressed_tensors/quantization/lifecycle/compressed.py +69 -0
  21. compressed_tensors/quantization/lifecycle/forward.py +208 -22
  22. compressed_tensors/quantization/lifecycle/frozen.py +4 -0
  23. compressed_tensors/quantization/lifecycle/initialize.py +33 -5
  24. compressed_tensors/quantization/observers/base.py +70 -5
  25. compressed_tensors/quantization/observers/helpers.py +6 -1
  26. compressed_tensors/quantization/observers/memoryless.py +17 -9
  27. compressed_tensors/quantization/observers/min_max.py +44 -13
  28. compressed_tensors/quantization/quant_args.py +33 -4
  29. compressed_tensors/quantization/quant_config.py +69 -21
  30. compressed_tensors/quantization/quant_scheme.py +81 -1
  31. compressed_tensors/quantization/utils/helpers.py +77 -8
  32. compressed_tensors/utils/helpers.py +26 -122
  33. compressed_tensors/utils/safetensors_load.py +3 -2
  34. compressed_tensors/version.py +53 -0
  35. {compressed_tensors-0.3.2.dist-info → compressed_tensors-0.4.0.dist-info}/METADATA +46 -9
  36. compressed_tensors-0.4.0.dist-info/RECORD +48 -0
  37. compressed_tensors-0.3.2.dist-info/RECORD +0 -38
  38. {compressed_tensors-0.3.2.dist-info → compressed_tensors-0.4.0.dist-info}/LICENSE +0 -0
  39. {compressed_tensors-0.3.2.dist-info → compressed_tensors-0.4.0.dist-info}/WHEEL +0 -0
  40. {compressed_tensors-0.3.2.dist-info → compressed_tensors-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,53 @@
1
+ # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing,
10
+ # software distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Functionality for storing and setting the version info for SparseML
17
+ """
18
+
19
+
20
+ version_base = "0.4.0"
21
+ is_release = True # change to True to set the generated version as a release version
22
+
23
+
24
+ def _generate_version(
25
+ is_release: bool,
26
+ version_base: str,
27
+ ):
28
+ from datetime import date
29
+
30
+ if is_release:
31
+ return version_base
32
+ else:
33
+ return f"{version_base}.{date.today().strftime('%Y%m%d')}"
34
+
35
+
36
+ __all__ = [
37
+ "__version__",
38
+ "version_base",
39
+ "is_release",
40
+ "version",
41
+ "version_major",
42
+ "version_minor",
43
+ "version_patch",
44
+ "version_build",
45
+ "version_major_minor",
46
+ ]
47
+ __version__ = _generate_version(is_release, version_base)
48
+
49
+ version = __version__
50
+ version_major, version_minor, version_patch, version_build = version.split(".") + (
51
+ [None] if len(version.split(".")) < 4 else []
52
+ ) # handle conditional for version being 3 parts or 4 (4 containing build date)
53
+ version_major_minor = f"{version_major}.{version_minor}"
@@ -1,26 +1,25 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: compressed-tensors
3
- Version: 0.3.2
3
+ Version: 0.4.0
4
4
  Summary: Library for utilization of compressed safetensors of neural network models
5
5
  Home-page: https://github.com/neuralmagic/compressed-tensors
6
6
  Author: Neuralmagic, Inc.
7
7
  Author-email: support@neuralmagic.com
8
8
  License: Apache 2.0
9
- Platform: UNKNOWN
10
9
  Description-Content-Type: text/markdown
11
10
  License-File: LICENSE
12
- Requires-Dist: pydantic <2.7
13
11
  Requires-Dist: torch >=1.7.0
14
- Requires-Dist: transformers <4.41
12
+ Requires-Dist: transformers
13
+ Requires-Dist: pydantic >=2.0
15
14
  Provides-Extra: dev
16
15
  Requires-Dist: black ==22.12.0 ; extra == 'dev'
17
- Requires-Dist: flake8 >=3.8.3 ; extra == 'dev'
18
16
  Requires-Dist: isort ==5.8.0 ; extra == 'dev'
19
- Requires-Dist: nbconvert >=7.16.3 ; extra == 'dev'
20
- Requires-Dist: pytest >=6.0.0 ; extra == 'dev'
21
17
  Requires-Dist: wheel >=0.36.2 ; extra == 'dev'
18
+ Requires-Dist: flake8 >=3.8.3 ; extra == 'dev'
19
+ Requires-Dist: pytest >=6.0.0 ; extra == 'dev'
20
+ Requires-Dist: nbconvert >=7.16.3 ; extra == 'dev'
22
21
 
23
- # compressed-tensors
22
+ # compressed_tensors
24
23
 
25
24
  This repository extends a [safetensors](https://github.com/huggingface/safetensors) format to efficiently store sparse and/or quantized tensors on disk. `compressed-tensors` format supports multiple compression types to minimize the disk space and facilitate the tensor manipulation.
26
25
 
@@ -90,7 +89,7 @@ from compressed_tensors import save_compressed_model, load_compressed, BitmaskCo
90
89
  from transformers import AutoModelForCausalLM
91
90
 
92
91
  model_name = "neuralmagic/llama2.c-stories110M-pruned50"
93
- model = AutoModelForCausalLM.from_pretrained(model_name)
92
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto")
94
93
 
95
94
  original_state_dict = model.state_dict()
96
95
 
@@ -106,4 +105,42 @@ state_dict = dict(load_compressed("compressed_model.safetensors", compression_co
106
105
  For more in-depth tutorial on bitmask compression, refer to the [notebook](https://github.com/neuralmagic/compressed-tensors/blob/d707c5b84bc3fef164aebdcd97cb6eaa571982f8/examples/bitmask_compression.ipynb).
107
106
 
108
107
 
108
+ ## Saving a Compressed Model with PTQ
109
+
110
+ We can use compressed-tensors to run basic post training quantization (PTQ) and save the quantized model compressed on disk
111
+
112
+ ```python
113
+ model_name = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T"
114
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cuda:0", torch_dtype="auto")
115
+
116
+ config = QuantizationConfig.parse_file("./examples/bit_packing/int4_config.json")
117
+ config.quantization_status = QuantizationStatus.CALIBRATION
118
+ apply_quantization_config(model, config)
119
+
120
+ dataset = load_dataset("ptb_text_only")["train"]
121
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
122
+
123
+ def tokenize_function(examples):
124
+ return tokenizer(examples["sentence"], padding=False, truncation=True, max_length=1024)
125
+
126
+ tokenized_dataset = dataset.map(tokenize_function, batched=True)
127
+ data_loader = DataLoader(tokenized_dataset, batch_size=1, collate_fn=DefaultDataCollator())
128
+
129
+ with torch.no_grad():
130
+ for idx, sample in tqdm(enumerate(data_loader), desc="Running calibration"):
131
+ sample = {key: value.to(device) for key,value in sample.items()}
132
+ _ = model(**sample)
133
+
134
+ if idx >= 512:
135
+ break
136
+
137
+ model.apply(freeze_module_quantization)
138
+ model.apply(compress_quantized_weights)
139
+
140
+ output_dir = "./ex_llama1.1b_w4a16_packed_quantize"
141
+ compressor = ModelCompressor(quantization_config=config)
142
+ compressed_state_dict = compressor.compress(model)
143
+ model.save_pretrained(output_dir, state_dict=compressed_state_dict)
144
+ ```
109
145
 
146
+ For more in-depth tutorial on quantization compression, refer to the [notebook](./examples/quantize_and_pack_int4.ipynb).
@@ -0,0 +1,48 @@
1
+ compressed_tensors/__init__.py,sha256=SV1csvHUVCd8kHXz6UDZim1HZ_fAVG3vfk-j_4Bb6hY,789
2
+ compressed_tensors/base.py,sha256=OA2TOLP1gP3LSH7gp508eqr2ZtDQ-pqRHElCp-aB0vs,755
3
+ compressed_tensors/version.py,sha256=_nj1yS4msz1OXd0H1v1m-z1JkMOuy19M9lFDTWP5xf0,1585
4
+ compressed_tensors/compressors/__init__.py,sha256=rhqPp3YXFxCJRLZs1KRNSHTIxK2rNU--sYwDI8MW47w,1061
5
+ compressed_tensors/compressors/base.py,sha256=LWEgbpgTxzmoqQ7Xhq2OQszUgWoDtFuGCiV1Y8nlBGw,2134
6
+ compressed_tensors/compressors/dense.py,sha256=G_XHbvuENyupIKlXSITOQgvPkNkcMEOLcLWQr70V9EE,1257
7
+ compressed_tensors/compressors/helpers.py,sha256=k9avlkmeYj6vkOAvl-MgcixtP7ib24SCfhzZ-RusXfw,5403
8
+ compressed_tensors/compressors/int_quantized.py,sha256=Ct2vCK0yoPm6vkIFlzDMGQ7m14xT1GyURsSwH9DP770,5242
9
+ compressed_tensors/compressors/marlin_24.py,sha256=X_BjtFB3Mn0hqiLz56UM3jGX2eNmGLnvEIPfbg7di6U,9444
10
+ compressed_tensors/compressors/model_compressor.py,sha256=h3ixQtfzt6HxSNtdnB9OVdpCucTmIo4paDoaM7XYZXE,12559
11
+ compressed_tensors/compressors/pack_quantized.py,sha256=VPiLlgJlDgARrn7YmiQoLqUfxErKBfj54epMYWRsF8k,8451
12
+ compressed_tensors/compressors/sparse_bitmask.py,sha256=H9oZSTYI1oRCzAMbd4zThUnZd1h2rfs8DmA3tPcvuNE,8637
13
+ compressed_tensors/compressors/utils/__init__.py,sha256=-mbGDZh1hd9T6u62Ht_iBIK255UmMg0f5bLkSs1f9Cc,731
14
+ compressed_tensors/compressors/utils/helpers.py,sha256=4fq7KclSIK__jemCG9pwYlgWLrQjsaAMxhIrhjdw0BQ,1506
15
+ compressed_tensors/compressors/utils/permutations_24.py,sha256=kx6fsfDHebx94zsSzhXGyCyuC9sVyah6BUUir_StT28,2530
16
+ compressed_tensors/compressors/utils/semi_structured_conversions.py,sha256=g1EZHzdv-ko7ufPX430dp7wE33o6FWJXuSP4zZydCu0,13488
17
+ compressed_tensors/config/__init__.py,sha256=ZBqWn3r6ku1qfmlHHYp0mQueY0i7Pwhr9rbQk9dDlMc,704
18
+ compressed_tensors/config/base.py,sha256=ZnpuOevCE0pXdA8OJfIJnxj-ccproH7o1EOwRY8_hUU,1482
19
+ compressed_tensors/config/dense.py,sha256=NgSxnFCnckU9-iunxEaqiFwqgdO7YYxlWKR74jNbjks,1317
20
+ compressed_tensors/config/sparse_bitmask.py,sha256=pZUboRNZTu6NajGOQEFExoPknak5ynVAUeiiYpS1Gt8,1308
21
+ compressed_tensors/quantization/__init__.py,sha256=83J5bPB7PavN2TfCoW7_vEDhfYpm4TDrqYO9vdSQ5bk,760
22
+ compressed_tensors/quantization/quant_args.py,sha256=Z9Zu20ooAwEWlliAdUw1f1zwSrheuD6vqm3YXgJ1Lws,4388
23
+ compressed_tensors/quantization/quant_config.py,sha256=hL42sXp1wAZxyrkHarw7tAMRcwSVEr0MT3wmrmL3NhE,8285
24
+ compressed_tensors/quantization/quant_scheme.py,sha256=aX4h8t8RDqrWeUqoqrYMOxc0xkWcu8Ue_CHLoG-fRjQ,3569
25
+ compressed_tensors/quantization/lifecycle/__init__.py,sha256=ggRGWRqhCxCaTTDWRcgTVX3axnS2xV6rc5YvdzK7fSg,798
26
+ compressed_tensors/quantization/lifecycle/apply.py,sha256=aZrglJ5mR3Xaxwj51-1BVVB1JGVkKQEeHxGfBaVmsHI,8881
27
+ compressed_tensors/quantization/lifecycle/calibration.py,sha256=mLns4jlaWmBwOW8Jtlm5bMX-JET1AiZYUBO7qa-XuxI,1776
28
+ compressed_tensors/quantization/lifecycle/compressed.py,sha256=VreB10xPwgSLQQlTu20UCrFpRS--cA7-lx5s7nrPPrg,2247
29
+ compressed_tensors/quantization/lifecycle/forward.py,sha256=0T817yzYqFR1wUjk2XCtOISwr4u7cdkKqAv13jjfu24,11113
30
+ compressed_tensors/quantization/lifecycle/frozen.py,sha256=h1XYt89MouBTf3jTYLG_6OdFxIu5q2N8tPjsy6J4E6Y,1726
31
+ compressed_tensors/quantization/lifecycle/initialize.py,sha256=9xgPzHejQUO_AkZcc_SH5kqFeieG-9uo0fMRYV51i7Y,4577
32
+ compressed_tensors/quantization/observers/__init__.py,sha256=DNH31NQYrIBBcmHsMyFA6whh4pbRsLwuNa6L8AeXaGc,745
33
+ compressed_tensors/quantization/observers/base.py,sha256=z_JC-CRz-PY7WlpSoyOoSQQWz5ekTEd5LbXt0iHQRes,5239
34
+ compressed_tensors/quantization/observers/helpers.py,sha256=FUyYUNd-3LbXt0-8Lwr7EPI2m-LXXBTXW1l5iOajNhA,2272
35
+ compressed_tensors/quantization/observers/memoryless.py,sha256=jH_c6K3gxf4W3VNXQ7tbnP-J_86QTrEfjBn6Kh1C-H8,2165
36
+ compressed_tensors/quantization/observers/min_max.py,sha256=UK7zCMzxv9GGn6BflBxdajV20RiWaCY2RHcvZodCP1w,3669
37
+ compressed_tensors/quantization/utils/__init__.py,sha256=VdtEmP0bvuND_IGQnyqUPc5lnFp-1_yD7StKSX4x80w,656
38
+ compressed_tensors/quantization/utils/helpers.py,sha256=NzAH18Cn_-mTAR87y6IlcQU5gC393XSjgNKC9CRkr78,6017
39
+ compressed_tensors/registry/__init__.py,sha256=FwLSNYqfIrb5JD_6OK_MT4_svvKTN_nEhpgQlQvGbjI,658
40
+ compressed_tensors/registry/registry.py,sha256=fxjOjh2wklCvJhQxwofdy-zV8q7MkQ85SLG77nml2iA,11890
41
+ compressed_tensors/utils/__init__.py,sha256=5DrYjoZbaEvSkJcC-GRSbM_RBHVF4tG9gMd3zsJnjLw,665
42
+ compressed_tensors/utils/helpers.py,sha256=5ull5yFT31M2zVxKeFvpvvlvX5f1Sk1LGuj_wrfZWCY,2267
43
+ compressed_tensors/utils/safetensors_load.py,sha256=0MheXwx1jeY12PeISppiSIZHs6rmN2YddwPpFb9V67I,8527
44
+ compressed_tensors-0.4.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
45
+ compressed_tensors-0.4.0.dist-info/METADATA,sha256=NtnK_A9ck3KPmh4syGcGtMBGX-_2FyFa7ntCAdf-KGo,5651
46
+ compressed_tensors-0.4.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
47
+ compressed_tensors-0.4.0.dist-info/top_level.txt,sha256=w2i-GyPs2s1UwVxvutSvN_lM22SXC2hQFBmoMcPnV7Y,19
48
+ compressed_tensors-0.4.0.dist-info/RECORD,,
@@ -1,38 +0,0 @@
1
- compressed_tensors/__init__.py,sha256=SV1csvHUVCd8kHXz6UDZim1HZ_fAVG3vfk-j_4Bb6hY,789
2
- compressed_tensors/base.py,sha256=8zbgK87LpHkKoSknM55svXCT4E4dLLjPijwF9HfzmsQ,717
3
- compressed_tensors/compressors/__init__.py,sha256=UcHp0CwUBJoS2MBN6mLUT7B3uRf1TEoRGbME7gLPD38,841
4
- compressed_tensors/compressors/base.py,sha256=CqQo00ZIkAWpy0yVux5TXhK7WK_6Ws6qb5mCAvIoxB4,3902
5
- compressed_tensors/compressors/dense.py,sha256=ig9lItmyCX5-VzgMuUqea-s8fHsTjPj5-0VIsPLl0g0,1271
6
- compressed_tensors/compressors/helpers.py,sha256=wstgUEUYUCTMMu6G1YLF9G7vXqIJPj3MsWhqwU4J6Vw,5458
7
- compressed_tensors/compressors/sparse_bitmask.py,sha256=VbCGFC4Q3nfhKWqJdkM4hKQmrZZqLT8wLNxbVt4kLSs,8647
8
- compressed_tensors/config/__init__.py,sha256=ZBqWn3r6ku1qfmlHHYp0mQueY0i7Pwhr9rbQk9dDlMc,704
9
- compressed_tensors/config/base.py,sha256=IP-3Y416w-811WozDzKHycIBXjdlG4Ddy7vpbwhOPD8,1373
10
- compressed_tensors/config/dense.py,sha256=xtkri7DkP7USu44FnSoTgTSqdGegCBtjRf3DfblSEL0,1311
11
- compressed_tensors/config/sparse_bitmask.py,sha256=y8fmQaOoGjIiI4FR6BJjfIqisAcqNQ_zjKyjT75bXwY,1284
12
- compressed_tensors/quantization/__init__.py,sha256=83J5bPB7PavN2TfCoW7_vEDhfYpm4TDrqYO9vdSQ5bk,760
13
- compressed_tensors/quantization/quant_args.py,sha256=cZhe5hRmvU_HnnUDw1kmqzMAGFb0r5t0IL2cobBNw28,3371
14
- compressed_tensors/quantization/quant_config.py,sha256=DWx8ae3gDlw99zAn3MUN9I4qeksbbmITmOXHRynqPB8,6650
15
- compressed_tensors/quantization/quant_scheme.py,sha256=X3oqmZPiIKtX5tEKKUj-0N6hB68NeiU2b1GcQEQPadQ,1480
16
- compressed_tensors/quantization/lifecycle/__init__.py,sha256=fM9XBtPgJX6z54PTm3Sd0SpK5od95ibwaSf2FFR8DqE,772
17
- compressed_tensors/quantization/lifecycle/apply.py,sha256=WXUL3q1g0s244k0wuqGYZPXTXiscdyrp7RScN2j_KGA,6651
18
- compressed_tensors/quantization/lifecycle/calibration.py,sha256=mLns4jlaWmBwOW8Jtlm5bMX-JET1AiZYUBO7qa-XuxI,1776
19
- compressed_tensors/quantization/lifecycle/forward.py,sha256=COTlfH92JkwM9Vd6m07tK_dhSiC77SFS0-MHU_DbQko,4941
20
- compressed_tensors/quantization/lifecycle/frozen.py,sha256=FF7BleuOUX46Egk7F1ZE5r4fjWt9jG5-tO8BjXU1r78,1606
21
- compressed_tensors/quantization/lifecycle/initialize.py,sha256=U6g9qifSF6pagQZQZEwd-rwWC6uQ_dZXn1wg6nr1Abg,3697
22
- compressed_tensors/quantization/observers/__init__.py,sha256=DNH31NQYrIBBcmHsMyFA6whh4pbRsLwuNa6L8AeXaGc,745
23
- compressed_tensors/quantization/observers/base.py,sha256=O76dAxkin7bB602e9kjmxc84p71-PxBtjIq5L69xplI,2786
24
- compressed_tensors/quantization/observers/helpers.py,sha256=SxvOf9zwZ9NDRC3E4Xm7z3RqHcbcPtCABLKX9GnGGHM,2109
25
- compressed_tensors/quantization/observers/memoryless.py,sha256=ZHTPh4aURE8LvHBFaP--HIC2JanMX5-VRdIkE2JHthw,1859
26
- compressed_tensors/quantization/observers/min_max.py,sha256=uAcZd5aY6WKM-KumTb2ybX28s8iKGVy6Nrje5Sddqew,2439
27
- compressed_tensors/quantization/utils/__init__.py,sha256=VdtEmP0bvuND_IGQnyqUPc5lnFp-1_yD7StKSX4x80w,656
28
- compressed_tensors/quantization/utils/helpers.py,sha256=N_wYfrPcFr__Q1mn6mHoNUTclwpTW8P5PDHkR7GvXWo,3694
29
- compressed_tensors/registry/__init__.py,sha256=FwLSNYqfIrb5JD_6OK_MT4_svvKTN_nEhpgQlQvGbjI,658
30
- compressed_tensors/registry/registry.py,sha256=fxjOjh2wklCvJhQxwofdy-zV8q7MkQ85SLG77nml2iA,11890
31
- compressed_tensors/utils/__init__.py,sha256=5DrYjoZbaEvSkJcC-GRSbM_RBHVF4tG9gMd3zsJnjLw,665
32
- compressed_tensors/utils/helpers.py,sha256=b2zQimHNn3emCgUGsVYMpaWQJH_tR9Uso819bU5r78Y,5909
33
- compressed_tensors/utils/safetensors_load.py,sha256=wo9UirGrGlenBqZeqotvpCT7D5MEdjCo2J3HeRaIFoU,8502
34
- compressed_tensors-0.3.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
35
- compressed_tensors-0.3.2.dist-info/METADATA,sha256=658VPFfv3kqbIbEjY0tJNOkRoTdeZApVUbqcC1vtMLM,4060
36
- compressed_tensors-0.3.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
37
- compressed_tensors-0.3.2.dist-info/top_level.txt,sha256=w2i-GyPs2s1UwVxvutSvN_lM22SXC2hQFBmoMcPnV7Y,19
38
- compressed_tensors-0.3.2.dist-info/RECORD,,