bayesianflow-for-chem 1.2.5__tar.gz → 1.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bayesianflow-for-chem might be problematic. Click here for more details.
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/PKG-INFO +1 -1
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem/__init__.py +1 -1
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem/tool.py +19 -8
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem.egg-info/PKG-INFO +1 -1
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/LICENSE +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/README.md +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem/data.py +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem/model.py +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem/scorer.py +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem/train.py +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem/vocab.txt +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem.egg-info/SOURCES.txt +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem.egg-info/dependency_links.txt +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem.egg-info/requires.txt +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/bayesianflow_for_chem.egg-info/top_level.txt +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/pyproject.toml +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/setup.cfg +0 -0
- {bayesianflow_for_chem-1.2.5 → bayesianflow_for_chem-1.2.6}/setup.py +0 -0
|
@@ -492,15 +492,16 @@ def inpaint(
|
|
|
492
492
|
|
|
493
493
|
def quantise_model(model: ChemBFN) -> nn.Module:
|
|
494
494
|
"""
|
|
495
|
-
Dynamic quantisation of the trained model.
|
|
495
|
+
Dynamic quantisation of the trained model to `torch.qint8` data type.
|
|
496
496
|
|
|
497
497
|
:param model: trained ChemBFN model
|
|
498
498
|
:type model: bayesianflow_for_chem.model.ChemBFN
|
|
499
499
|
:return: quantised model
|
|
500
500
|
:rtype: torch.nn.Module
|
|
501
501
|
"""
|
|
502
|
-
from torch.ao.nn.quantized.modules.utils import _quantize_weight
|
|
503
502
|
from torch.ao.nn.quantized import dynamic
|
|
503
|
+
from torch.ao.nn.quantized.modules.utils import _quantize_weight
|
|
504
|
+
from torch.ao.quantization.qconfig import default_dynamic_qconfig
|
|
504
505
|
|
|
505
506
|
class QuantisedLinear(dynamic.Linear):
|
|
506
507
|
# Modified from https://github.com/pytorch/pytorch/blob/main/torch/ao/nn/quantized/dynamic/modules/linear.py
|
|
@@ -543,7 +544,22 @@ def quantise_model(model: ChemBFN) -> nn.Module:
|
|
|
543
544
|
self._packed_params.requires_grad_(False)
|
|
544
545
|
|
|
545
546
|
def forward(self, x: Tensor) -> Tensor:
|
|
546
|
-
|
|
547
|
+
if self._packed_params.dtype == torch.qint8:
|
|
548
|
+
if self.version is None or self.version < 4:
|
|
549
|
+
Y = torch.ops.quantized.linear_dynamic(
|
|
550
|
+
x, self._packed_params._packed_params
|
|
551
|
+
)
|
|
552
|
+
else:
|
|
553
|
+
Y = torch.ops.quantized.linear_dynamic(
|
|
554
|
+
x, self._packed_params._packed_params, reduce_range=True
|
|
555
|
+
)
|
|
556
|
+
elif self._packed_params.dtype == torch.float16:
|
|
557
|
+
Y = torch.ops.quantized.linear_dynamic_fp16(
|
|
558
|
+
x, self._packed_params._packed_params
|
|
559
|
+
)
|
|
560
|
+
else:
|
|
561
|
+
raise RuntimeError("Unsupported dtype on dynamic quantized linear!")
|
|
562
|
+
result = Y.to(x.dtype)
|
|
547
563
|
if self.lora_enabled and isinstance(self.lora_dropout, float):
|
|
548
564
|
result += (
|
|
549
565
|
nn.functional.dropout(x, self.lora_dropout, self.training)
|
|
@@ -562,11 +578,6 @@ def quantise_model(model: ChemBFN) -> nn.Module:
|
|
|
562
578
|
if mod.qconfig is not None and mod.qconfig.weight is not None:
|
|
563
579
|
weight_observer = mod.qconfig.weight()
|
|
564
580
|
else:
|
|
565
|
-
# We have the circular import issues if we import the qconfig in the beginning of this file:
|
|
566
|
-
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
|
|
567
|
-
# import until we need it.
|
|
568
|
-
from torch.ao.quantization.qconfig import default_dynamic_qconfig
|
|
569
|
-
|
|
570
581
|
weight_observer = default_dynamic_qconfig.weight()
|
|
571
582
|
dtype = weight_observer.dtype
|
|
572
583
|
assert dtype in [torch.qint8, torch.float16], (
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|