compressed-tensors 0.10.3a20250701__tar.gz → 0.10.3a20250707__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {compressed_tensors-0.10.3a20250701/src/compressed_tensors.egg-info → compressed_tensors-0.10.3a20250707}/PKG-INFO +2 -1
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/setup.py +1 -1
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/factory/hadamard.py +33 -12
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/factory/matrix_multiply.py +1 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/factory/random_hadamard.py +10 -3
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/transform_config.py +2 -2
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/transform_scheme.py +3 -4
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/utils/helpers.py +17 -4
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/version.py +1 -1
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707/src/compressed_tensors.egg-info}/PKG-INFO +2 -1
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors.egg-info/SOURCES.txt +1 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors.egg-info/requires.txt +1 -0
- compressed_tensors-0.10.3a20250707/tests/test_transform/conftest.py +54 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_transform/factory/test_correctness.py +30 -49
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_transform/factory/test_memory.py +30 -43
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_transform/test_transform_scheme.py +4 -4
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/.gitkeep +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/actions/test/action.yml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/scripts/step-status +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/build-test.yml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/build.yml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/report.yml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/test-check.yaml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/test.yml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/trigger-all.yml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/upload.yml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.gitignore +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/LICENSE +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/Makefile +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/README.md +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/bit_packing/ex_quantize_and_pack.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/bit_packing/int4_config.json +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/bitmask_compression.ipynb +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/llama_1.1b/ex_config_quantization.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/llama_1.1b/ex_llmcompressor_quantization.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/llama_1.1b/example_quant_config.json +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/llama_1.1b/example_quant_recipe.yaml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/examples/quantize_and_pack_int4.ipynb +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/pyproject.toml +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/setup.cfg +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/README.md +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/base.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/base.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/helpers.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/model_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/model_compressors/model_compressor.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/quantized_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/quantized_compressors/base.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/quantized_compressors/naive_quantized.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/quantized_compressors/nvfp4_quantized.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/quantized_compressors/pack_quantized.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/sparse_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/sparse_compressors/base.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/sparse_compressors/dense.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/sparse_compressors/sparse_24_bitmask.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/sparse_compressors/sparse_bitmask.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/sparse_quantized_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/compressors/sparse_quantized_compressors/marlin_24.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/config/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/config/base.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/config/dense.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/config/sparse_24_bitmask.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/config/sparse_bitmask.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/linear/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/linear/compressed_linear.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/lifecycle/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/lifecycle/apply.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/lifecycle/compressed.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/lifecycle/forward.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/lifecycle/helpers.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/lifecycle/initialize.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/quant_args.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/quant_config.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/quant_scheme.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/utils/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/quantization/utils/helpers.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/registry/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/registry/registry.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/factory/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/factory/base.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/transform_args.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/utils/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/utils/hadamard.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/utils/hadamards.safetensors +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/transform/utils/utils.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/utils/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/utils/offload.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/utils/permutations_24.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/utils/permute.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/utils/safetensors_load.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors/utils/semi_structured_conversions.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors.egg-info/dependency_links.txt +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/src/compressed_tensors.egg-info/top_level.txt +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/conftest.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/model_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/model_compressors/test_model_compressor.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/quantized_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/quantized_compressors/test_fp8_quant.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/quantized_compressors/test_int_quant.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/quantized_compressors/test_nvfp4_quant.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/quantized_compressors/test_pack_quant.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/sparse_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/sparse_compressors/test_bitmask.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/sparse_compressors/test_sparse_24_bitmask.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/sparse_quantized_compressors/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_compressors/sparse_quantized_compressors/test_marlin_24.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_configs/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_configs/test_base.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_examples/test_bitmask_compression_ipynb.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_linear/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_linear/test_compressed_linear.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/conftest.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/test_apply.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/test_dynamic_lifecycle.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/test_enabled.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/test_forward.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/test_helpers.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/test_initialize.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/lifecycle/test_lifecycle.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/test_configs/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/test_configs/test_bit_depths.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/test_configs/test_strategies.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/test_quant_args.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/test_quant_config.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/test_quant_scheme.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_quantization/test_utils/test_helpers.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_registry.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_transform/test_transform_args.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_transform/test_transform_config.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_transform/utils/test_hadamard.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_utils/__init__.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_utils/test_helpers.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_utils/test_offload.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_utils/test_safetensors_load.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/testing_utils.py +0 -0
- {compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/utils/copyright.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: compressed-tensors
|
3
|
-
Version: 0.10.
|
3
|
+
Version: 0.10.3a20250707
|
4
4
|
Summary: Library for utilization of compressed safetensors of neural network models
|
5
5
|
Home-page: https://github.com/neuralmagic/compressed-tensors
|
6
6
|
Author: Neuralmagic, Inc.
|
@@ -11,6 +11,7 @@ License-File: LICENSE
|
|
11
11
|
Requires-Dist: torch>=1.7.0
|
12
12
|
Requires-Dist: transformers
|
13
13
|
Requires-Dist: pydantic>=2.0
|
14
|
+
Requires-Dist: frozendict
|
14
15
|
Provides-Extra: dev
|
15
16
|
Requires-Dist: black==22.12.0; extra == "dev"
|
16
17
|
Requires-Dist: isort==5.8.0; extra == "dev"
|
@@ -88,7 +88,7 @@ def _setup_packages() -> List:
|
|
88
88
|
)
|
89
89
|
|
90
90
|
def _setup_install_requires() -> List:
|
91
|
-
return ["torch>=1.7.0", "transformers", "pydantic>=2.0"]
|
91
|
+
return ["torch>=1.7.0", "transformers", "pydantic>=2.0", "frozendict"]
|
92
92
|
|
93
93
|
def _setup_extras() -> Dict:
|
94
94
|
return {
|
@@ -12,7 +12,7 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
-
from typing import Optional
|
15
|
+
from typing import Optional, Union
|
16
16
|
|
17
17
|
import torch
|
18
18
|
from compressed_tensors.transform import TransformArgs, TransformScheme
|
@@ -22,7 +22,7 @@ from compressed_tensors.transform.utils.utils import (
|
|
22
22
|
apply_transform_weight,
|
23
23
|
get_matrix_size,
|
24
24
|
)
|
25
|
-
from compressed_tensors.utils import get_offloaded_device
|
25
|
+
from compressed_tensors.utils import get_execution_device, get_offloaded_device
|
26
26
|
from compressed_tensors.utils.helpers import ParameterizedDefaultDict
|
27
27
|
from torch import Tensor, device, dtype
|
28
28
|
from torch.nn import Linear, Module, Parameter
|
@@ -41,6 +41,7 @@ class HadamardFactory(TransformFactory):
|
|
41
41
|
def __init__(self, name: str, scheme: TransformScheme, seed: Optional[int] = None):
|
42
42
|
super().__init__(name, scheme, seed)
|
43
43
|
self.weights = ParameterizedDefaultDict(self._create_weight)
|
44
|
+
self.perms = ParameterizedDefaultDict(self._create_permutation)
|
44
45
|
|
45
46
|
def create_transform(self, module: Module, args: TransformArgs):
|
46
47
|
"""
|
@@ -54,26 +55,46 @@ class HadamardFactory(TransformFactory):
|
|
54
55
|
size = get_matrix_size(module, args.location)
|
55
56
|
dtype = module.weight.dtype
|
56
57
|
device = get_offloaded_device(module)
|
58
|
+
exec_device = get_execution_device(module)
|
57
59
|
|
58
|
-
|
59
|
-
|
60
|
+
factory_kwargs = {"construct_device": exec_device}
|
61
|
+
weight = self.weights.get(size, dtype, device, factory_kwargs=factory_kwargs)
|
62
|
+
perm = self.perms[weight] if self.scheme.randomize else None
|
63
|
+
return HadamardTransform(weight, perm, args)
|
60
64
|
|
61
|
-
def _create_weight(
|
62
|
-
|
63
|
-
|
65
|
+
def _create_weight(
|
66
|
+
self,
|
67
|
+
size: int,
|
68
|
+
dtype: dtype,
|
69
|
+
device: device,
|
70
|
+
construct_device: device,
|
71
|
+
) -> Parameter:
|
72
|
+
# construct on execution device, cache on offload device
|
73
|
+
data = deterministic_hadamard_matrix(size, dtype, construct_device)
|
74
|
+
data = data.to(device=device)
|
64
75
|
return Parameter(data, requires_grad=self.scheme.requires_grad)
|
65
76
|
|
77
|
+
def _create_permutation(self, weight: Parameter) -> Parameter:
|
78
|
+
data = torch.randperm(weight.size(0), generator=self.generator)
|
79
|
+
return Parameter(data, requires_grad=False)
|
80
|
+
|
66
81
|
|
67
82
|
class HadamardTransform(TransformBase):
|
68
|
-
def __init__(
|
83
|
+
def __init__(
|
84
|
+
self, weight: Parameter, perm: Union[Parameter, None], args: TransformArgs
|
85
|
+
):
|
69
86
|
super().__init__()
|
70
87
|
self.weight = weight
|
88
|
+
self.perm = perm
|
71
89
|
self.args = args
|
72
90
|
|
73
91
|
def forward(self, value: Tensor) -> Tensor:
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
weight = self.
|
92
|
+
weight = self.weight
|
93
|
+
|
94
|
+
if self.perm is not None:
|
95
|
+
weight = weight[self.perm][:, self.perm]
|
96
|
+
|
97
|
+
if self.args.inverse:
|
98
|
+
weight = weight.T
|
78
99
|
|
79
100
|
return apply_transform_weight(weight, value, self.args.location)
|
@@ -62,6 +62,7 @@ class RandomMatrixFactory(TransformFactory):
|
|
62
62
|
return RandomMatrixTransform(weight, args)
|
63
63
|
|
64
64
|
def _create_weight(self, size: int, dtype: dtype, device: device) -> Parameter:
|
65
|
+
# TODO: verify that weight is invertible (has non-zero determinant)
|
65
66
|
data = torch.rand(
|
66
67
|
(size, size), generator=self.generator, dtype=dtype, device=device
|
67
68
|
)
|
@@ -28,7 +28,14 @@ class RandomHadamardFactory(HadamardFactory):
|
|
28
28
|
:param seed: random seed used to transform weight randomization
|
29
29
|
"""
|
30
30
|
|
31
|
-
def _create_weight(
|
32
|
-
|
33
|
-
|
31
|
+
def _create_weight(
|
32
|
+
self,
|
33
|
+
size: int,
|
34
|
+
dtype: dtype,
|
35
|
+
device: device,
|
36
|
+
construct_device: device,
|
37
|
+
) -> Parameter:
|
38
|
+
# construct on execution device, cache on offload device
|
39
|
+
data = random_hadamard_matrix(size, dtype, construct_device, self.generator)
|
40
|
+
data = data.to(device=device)
|
34
41
|
return Parameter(data, requires_grad=self.scheme.requires_grad)
|
@@ -49,7 +49,7 @@ QUIP = TransformConfig(
|
|
49
49
|
inverse=True,
|
50
50
|
),
|
51
51
|
],
|
52
|
-
|
52
|
+
randomize=True,
|
53
53
|
),
|
54
54
|
"u": TransformScheme(
|
55
55
|
type="hadamard",
|
@@ -62,7 +62,7 @@ QUIP = TransformConfig(
|
|
62
62
|
targets=["Linear"], location="output", inverse=True # non-mergable
|
63
63
|
),
|
64
64
|
],
|
65
|
-
|
65
|
+
randomize=True,
|
66
66
|
),
|
67
67
|
}
|
68
68
|
)
|
@@ -31,13 +31,12 @@ class TransformScheme(BaseModel):
|
|
31
31
|
(see `Transforms.registered_names()`)
|
32
32
|
:param apply: list of TransformationArgs containing the information about the
|
33
33
|
modules that should be targeted by the specified transform
|
34
|
-
:param
|
35
|
-
|
36
|
-
applicable
|
34
|
+
:param randomize: True if uniquely randomized transform weights should be used,
|
35
|
+
otherwise use identical transform weights where applicable
|
37
36
|
:param requires_grad: True if weights include gradients for training
|
38
37
|
"""
|
39
38
|
|
40
39
|
type: str
|
41
40
|
apply: List[TransformArgs] = Field(default_factory=list)
|
42
|
-
|
41
|
+
randomize: bool = Field(default=False)
|
43
42
|
requires_grad: bool = Field(default=False)
|
@@ -15,10 +15,11 @@
|
|
15
15
|
import contextlib
|
16
16
|
import warnings
|
17
17
|
from functools import wraps
|
18
|
-
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
18
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional
|
19
19
|
|
20
20
|
import numpy
|
21
21
|
import torch
|
22
|
+
from frozendict import frozendict
|
22
23
|
from transformers import AutoConfig
|
23
24
|
|
24
25
|
|
@@ -373,11 +374,23 @@ class ParameterizedDefaultDict(dict):
|
|
373
374
|
|
374
375
|
def __init__(self, default_factory: Callable[[Any], Any]):
|
375
376
|
self.default_factory = default_factory
|
377
|
+
self._factory_kwargs = frozendict()
|
376
378
|
|
377
|
-
def __missing__(self, key):
|
379
|
+
def __missing__(self, key: Any) -> Any:
|
378
380
|
if isinstance(key, tuple):
|
379
|
-
value = self.default_factory(*key)
|
381
|
+
value = self.default_factory(*key, **self._factory_kwargs)
|
380
382
|
else:
|
381
|
-
value = self.default_factory(key)
|
383
|
+
value = self.default_factory(key, **self._factory_kwargs)
|
382
384
|
self[key] = value
|
383
385
|
return value
|
386
|
+
|
387
|
+
def get(self, *args, factory_kwargs: Mapping = frozendict()) -> Any:
|
388
|
+
"""
|
389
|
+
Similar to `__getitem__`, but allows passing kwargs to factory function
|
390
|
+
|
391
|
+
:param \\*args: args whose tuple will value will be treated as key
|
392
|
+
:param factory_kwargs: keyword arguments to pass to `default_factory`
|
393
|
+
:return: dictionary entry for given key
|
394
|
+
"""
|
395
|
+
with patch_attr(self, "_factory_kwargs", factory_kwargs):
|
396
|
+
return self[args]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: compressed-tensors
|
3
|
-
Version: 0.10.
|
3
|
+
Version: 0.10.3a20250707
|
4
4
|
Summary: Library for utilization of compressed safetensors of neural network models
|
5
5
|
Home-page: https://github.com/neuralmagic/compressed-tensors
|
6
6
|
Author: Neuralmagic, Inc.
|
@@ -11,6 +11,7 @@ License-File: LICENSE
|
|
11
11
|
Requires-Dist: torch>=1.7.0
|
12
12
|
Requires-Dist: transformers
|
13
13
|
Requires-Dist: pydantic>=2.0
|
14
|
+
Requires-Dist: frozendict
|
14
15
|
Provides-Extra: dev
|
15
16
|
Requires-Dist: black==22.12.0; extra == "dev"
|
16
17
|
Requires-Dist: isort==5.8.0; extra == "dev"
|
@@ -130,6 +130,7 @@ tests/test_quantization/test_configs/__init__.py
|
|
130
130
|
tests/test_quantization/test_configs/test_bit_depths.py
|
131
131
|
tests/test_quantization/test_configs/test_strategies.py
|
132
132
|
tests/test_quantization/test_utils/test_helpers.py
|
133
|
+
tests/test_transform/conftest.py
|
133
134
|
tests/test_transform/test_transform_args.py
|
134
135
|
tests/test_transform/test_transform_config.py
|
135
136
|
tests/test_transform/test_transform_scheme.py
|
@@ -0,0 +1,54 @@
|
|
1
|
+
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing,
|
10
|
+
# software distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import pytest
|
16
|
+
import torch
|
17
|
+
from compressed_tensors.transform import TransformArgs
|
18
|
+
|
19
|
+
|
20
|
+
class TransformableModel(torch.nn.Module):
|
21
|
+
def __init__(self, *sizes):
|
22
|
+
super().__init__()
|
23
|
+
self.fcs = torch.nn.ModuleList(
|
24
|
+
[
|
25
|
+
torch.nn.Linear(sizes[index], sizes[index + 1], bias=False)
|
26
|
+
for index in range(0, len(sizes) - 1)
|
27
|
+
]
|
28
|
+
)
|
29
|
+
|
30
|
+
def forward(self, x):
|
31
|
+
for layer in self.fcs:
|
32
|
+
x = layer(x)
|
33
|
+
return x
|
34
|
+
|
35
|
+
|
36
|
+
@pytest.fixture(scope="function")
|
37
|
+
def model_apply():
|
38
|
+
model = TransformableModel(2, 4, 8, 16, 32, 64)
|
39
|
+
apply = [
|
40
|
+
# weight output -> input
|
41
|
+
TransformArgs(targets="fcs.0", location="weight_output"),
|
42
|
+
TransformArgs(targets="fcs.1", location="input", inverse=True),
|
43
|
+
# output -> weight input
|
44
|
+
TransformArgs(targets="fcs.1", location="output"),
|
45
|
+
TransformArgs(targets="fcs.2", location="weight_input", inverse=True),
|
46
|
+
# output -> input
|
47
|
+
TransformArgs(targets="fcs.2", location="output"),
|
48
|
+
TransformArgs(targets="fcs.3", location="input", inverse=True),
|
49
|
+
# weight output -> weight input
|
50
|
+
TransformArgs(targets="fcs.3", location="weight_output"),
|
51
|
+
TransformArgs(targets="fcs.4", location="weight_input", inverse=True),
|
52
|
+
]
|
53
|
+
|
54
|
+
return model, apply
|
@@ -16,6 +16,7 @@ import pytest
|
|
16
16
|
import torch
|
17
17
|
from compressed_tensors.transform import (
|
18
18
|
TransformArgs,
|
19
|
+
TransformConfig,
|
19
20
|
TransformFactory,
|
20
21
|
TransformScheme,
|
21
22
|
)
|
@@ -23,27 +24,18 @@ from compressed_tensors.utils import offloaded_dispatch
|
|
23
24
|
from tests.testing_utils import requires_accelerate, requires_gpu
|
24
25
|
|
25
26
|
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
for index in range(1, len(sizes) - 1):
|
32
|
-
self.fcs.append(torch.nn.Linear(sizes[index], sizes[index + 1], bias=False))
|
27
|
+
def scheme_kwargs():
|
28
|
+
all_types = TransformFactory.registered_names()
|
29
|
+
base = [{"type": type} for type in all_types]
|
30
|
+
randomized = [{"type": type, "randomize": True} for type in all_types]
|
31
|
+
return base + randomized
|
33
32
|
|
34
|
-
def forward(self, x):
|
35
|
-
for layer in self.fcs:
|
36
|
-
x = layer(x)
|
37
|
-
return x
|
38
33
|
|
39
|
-
|
40
|
-
|
41
|
-
"scheme",
|
42
|
-
[TransformScheme(type=name) for name in TransformFactory.registered_names()],
|
43
|
-
)
|
44
|
-
def test_correctness_linear(scheme):
|
34
|
+
@pytest.mark.parametrize("scheme_kwargs", scheme_kwargs())
|
35
|
+
def test_correctness_linear(scheme_kwargs):
|
45
36
|
size = (4, 8)
|
46
37
|
module = torch.nn.Linear(*size, bias=True)
|
38
|
+
scheme = TransformScheme(**scheme_kwargs)
|
47
39
|
factory = TransformFactory.from_scheme(scheme, name="")
|
48
40
|
|
49
41
|
input_tfm = factory.create_transform(
|
@@ -67,50 +59,39 @@ def test_correctness_linear(scheme):
|
|
67
59
|
assert torch.allclose(true_output, output, atol=1e-5, rtol=0.0)
|
68
60
|
|
69
61
|
|
70
|
-
@pytest.mark.parametrize(
|
71
|
-
|
72
|
-
[TransformScheme(type=name) for name in TransformFactory.registered_names()],
|
73
|
-
)
|
74
|
-
def test_correctness_model(scheme, offload=False):
|
62
|
+
@pytest.mark.parametrize("scheme_kwargs", scheme_kwargs())
|
63
|
+
def test_correctness_model(scheme_kwargs, model_apply, offload=False):
|
75
64
|
# load model
|
76
|
-
model =
|
65
|
+
model = model_apply[0]
|
77
66
|
if offload:
|
78
67
|
model = offloaded_dispatch(model, torch.device("cuda"))
|
79
68
|
|
80
|
-
#
|
81
|
-
scheme.apply = [
|
82
|
-
# weight output -> input
|
83
|
-
TransformArgs(targets="fcs.0", location="weight_output"),
|
84
|
-
TransformArgs(targets="fcs.1", location="input", inverse=True),
|
85
|
-
# output -> weight input
|
86
|
-
TransformArgs(targets="fcs.1", location="output"),
|
87
|
-
TransformArgs(targets="fcs.2", location="weight_input", inverse=True),
|
88
|
-
# output -> input
|
89
|
-
TransformArgs(targets="fcs.2", location="output"),
|
90
|
-
TransformArgs(targets="fcs.3", location="input", inverse=True),
|
91
|
-
# weight output -> weight input
|
92
|
-
TransformArgs(targets="fcs.3", location="weight_output"),
|
93
|
-
TransformArgs(targets="fcs.4", location="weight_input", inverse=True),
|
94
|
-
]
|
95
|
-
factory = TransformFactory.from_scheme(scheme, name="")
|
96
|
-
|
97
|
-
# create inputs
|
69
|
+
# get output
|
98
70
|
input = torch.rand((17, model.fcs[0].in_features))
|
99
71
|
if offload:
|
100
72
|
input = input.to(torch.device("cuda"))
|
73
|
+
true_output = model(input)
|
74
|
+
|
75
|
+
# apply transforms
|
76
|
+
config = TransformConfig(
|
77
|
+
config_groups={
|
78
|
+
"": TransformScheme(
|
79
|
+
**scheme_kwargs,
|
80
|
+
apply=model_apply[1],
|
81
|
+
)
|
82
|
+
}
|
83
|
+
)
|
84
|
+
for name, scheme in config.config_groups.items():
|
85
|
+
factory = TransformFactory.from_scheme(scheme, name=name)
|
86
|
+
factory.apply_to_model(model)
|
101
87
|
|
102
88
|
# compare outputs
|
103
|
-
true_output = model(input)
|
104
|
-
factory.apply_to_model(model)
|
105
89
|
output = model(input)
|
106
90
|
assert torch.allclose(true_output, output, atol=1e-5, rtol=0.0)
|
107
91
|
|
108
92
|
|
109
93
|
@requires_gpu
|
110
94
|
@requires_accelerate()
|
111
|
-
@pytest.mark.parametrize(
|
112
|
-
|
113
|
-
|
114
|
-
)
|
115
|
-
def test_correctness_model_offload(scheme):
|
116
|
-
test_correctness_model(scheme, offload=True)
|
95
|
+
@pytest.mark.parametrize("scheme_kwargs", scheme_kwargs())
|
96
|
+
def test_correctness_model_offload(scheme_kwargs, model_apply):
|
97
|
+
test_correctness_model(scheme_kwargs, model_apply, offload=True)
|
@@ -19,49 +19,44 @@ import torch
|
|
19
19
|
from compressed_tensors.transform import (
|
20
20
|
TransformArgs,
|
21
21
|
TransformBase,
|
22
|
+
TransformConfig,
|
22
23
|
TransformFactory,
|
23
24
|
TransformScheme,
|
24
25
|
)
|
25
26
|
from compressed_tensors.utils import align_modules, offloaded_dispatch
|
27
|
+
from tests.test_transform.conftest import TransformableModel
|
26
28
|
from tests.testing_utils import requires_accelerate, requires_gpu
|
27
29
|
|
28
30
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
for index in range(1, len(sizes) - 1):
|
35
|
-
self.fcs.append(torch.nn.Linear(sizes[index], sizes[index + 1], bias=False))
|
31
|
+
def scheme_kwargs():
|
32
|
+
all_types = TransformFactory.registered_names()
|
33
|
+
base = [{"type": type} for type in all_types]
|
34
|
+
randomized = [{"type": type, "randomize": True} for type in all_types]
|
35
|
+
return base + randomized
|
36
36
|
|
37
|
-
def forward(self, x):
|
38
|
-
for layer in self.fcs:
|
39
|
-
x = layer(x)
|
40
|
-
return x
|
41
|
-
|
42
|
-
|
43
|
-
@pytest.mark.parametrize(
|
44
|
-
"scheme",
|
45
|
-
[TransformScheme(type=name) for name in TransformFactory.registered_names()],
|
46
|
-
)
|
47
|
-
def test_memory_sharing(scheme, offload=False):
|
48
|
-
# load scheme and factory
|
49
|
-
scheme = TransformScheme(
|
50
|
-
type="hadamard",
|
51
|
-
apply=[
|
52
|
-
TransformArgs(targets="Linear", location="input"),
|
53
|
-
TransformArgs(targets="Linear", location="output"),
|
54
|
-
],
|
55
|
-
)
|
56
|
-
factory = TransformFactory.from_scheme(scheme, name="")
|
57
37
|
|
38
|
+
@pytest.mark.parametrize("scheme_kwargs", scheme_kwargs())
|
39
|
+
def test_memory_sharing(scheme_kwargs, offload=False):
|
58
40
|
# load model (maybe with offloading)
|
59
41
|
model = TransformableModel(2, 2, 4, 4, 8, 8)
|
60
42
|
if offload:
|
61
43
|
offloaded_dispatch(model, torch.device("cuda"))
|
62
44
|
|
63
45
|
# add transforms to model
|
64
|
-
|
46
|
+
config = TransformConfig(
|
47
|
+
config_groups={
|
48
|
+
"": TransformScheme(
|
49
|
+
**scheme_kwargs,
|
50
|
+
apply=[
|
51
|
+
TransformArgs(targets="Linear", location="input"),
|
52
|
+
TransformArgs(targets="Linear", location="output"),
|
53
|
+
],
|
54
|
+
)
|
55
|
+
}
|
56
|
+
)
|
57
|
+
for name, scheme in config.config_groups.items():
|
58
|
+
factory = TransformFactory.from_scheme(scheme, name=name)
|
59
|
+
factory.apply_to_model(model)
|
65
60
|
|
66
61
|
# check that memory is shared when onloaded
|
67
62
|
with align_modules(model.modules()):
|
@@ -93,20 +88,12 @@ def test_memory_sharing(scheme, offload=False):
|
|
93
88
|
|
94
89
|
@requires_gpu
|
95
90
|
@requires_accelerate()
|
96
|
-
@pytest.mark.parametrize(
|
97
|
-
|
98
|
-
|
99
|
-
)
|
100
|
-
def test_memory_sharing_offload(scheme):
|
101
|
-
test_memory_sharing(scheme, offload=True)
|
91
|
+
@pytest.mark.parametrize("scheme_kwargs", scheme_kwargs())
|
92
|
+
def test_memory_sharing_offload(scheme_kwargs):
|
93
|
+
test_memory_sharing(scheme_kwargs, offload=True)
|
102
94
|
|
103
95
|
|
104
|
-
@pytest.mark.parametrize(
|
105
|
-
|
106
|
-
[
|
107
|
-
|
108
|
-
for name in TransformFactory.registered_names()
|
109
|
-
],
|
110
|
-
)
|
111
|
-
def test_memory_sharing_training(scheme):
|
112
|
-
test_memory_sharing(scheme, offload=False)
|
96
|
+
@pytest.mark.parametrize("scheme_kwargs", scheme_kwargs())
|
97
|
+
def test_memory_sharing_training(scheme_kwargs):
|
98
|
+
scheme_kwargs["requires_grad"] = True
|
99
|
+
test_memory_sharing(scheme_kwargs, offload=False)
|
@@ -24,7 +24,7 @@ def test_basic_scheme():
|
|
24
24
|
type="hadamard",
|
25
25
|
apply=[basic_args],
|
26
26
|
)
|
27
|
-
assert not scheme.
|
27
|
+
assert not scheme.randomize
|
28
28
|
assert scheme.type == "hadamard"
|
29
29
|
assert len(scheme.apply) == 1
|
30
30
|
assert isinstance(scheme.apply[0], TransformArgs)
|
@@ -43,10 +43,10 @@ def test_multiple_groups_global():
|
|
43
43
|
scheme = TransformScheme(
|
44
44
|
type="hadamard",
|
45
45
|
apply=[embedding_args, linear_args],
|
46
|
-
|
46
|
+
randomize=True,
|
47
47
|
)
|
48
48
|
|
49
|
-
assert scheme.
|
49
|
+
assert scheme.randomize
|
50
50
|
assert scheme.type == "hadamard"
|
51
51
|
assert len(scheme.apply) == 2
|
52
52
|
assert isinstance(scheme.apply[0], TransformArgs)
|
@@ -69,6 +69,6 @@ def test_multiple_groups():
|
|
69
69
|
apply=apply,
|
70
70
|
)
|
71
71
|
|
72
|
-
assert not scheme.
|
72
|
+
assert not scheme.randomize
|
73
73
|
assert scheme.type == "hadamard"
|
74
74
|
assert len(scheme.apply) == 20
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/.github/workflows/test.yml
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/test_registry.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/tests/testing_utils.py
RENAMED
File without changes
|
{compressed_tensors-0.10.3a20250701 → compressed_tensors-0.10.3a20250707}/utils/copyright.py
RENAMED
File without changes
|