bayesianflow-for-chem 1.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bayesianflow-for-chem might be problematic. Click here for more details.
- bayesianflow_for_chem-1.2.0/PKG-INFO +162 -0
- bayesianflow_for_chem-1.2.0/README.md +116 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem/__init__.py +11 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem/data.py +250 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem/model.py +927 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem/scorer.py +134 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem/tool.py +470 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem/train.py +243 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem/vocab.txt +246 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem.egg-info/PKG-INFO +162 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem.egg-info/SOURCES.txt +14 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem.egg-info/dependency_links.txt +1 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem.egg-info/requires.txt +10 -0
- bayesianflow_for_chem-1.2.0/bayesianflow_for_chem.egg-info/top_level.txt +1 -0
- bayesianflow_for_chem-1.2.0/setup.cfg +4 -0
- bayesianflow_for_chem-1.2.0/setup.py +71 -0
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: bayesianflow_for_chem
|
|
3
|
+
Version: 1.2.0
|
|
4
|
+
Summary: Bayesian flow network framework for Chemistry
|
|
5
|
+
Home-page: https://augus1999.github.io/bayesian-flow-network-for-chemistry/
|
|
6
|
+
Author: Nianze A. Tao
|
|
7
|
+
Author-email: tao-nianze@hiroshima-u.ac.jp
|
|
8
|
+
License: AGPL-3.0 licence
|
|
9
|
+
Project-URL: Source, https://github.com/Augus1999/bayesian-flow-network-for-chemistry
|
|
10
|
+
Keywords: Chemistry,CLM,ChemBFN
|
|
11
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
12
|
+
Classifier: Intended Audience :: Science/Research
|
|
13
|
+
Classifier: License :: OSI Approved :: GNU Affero General Public License v3
|
|
14
|
+
Classifier: Natural Language :: English
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Chemistry
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Requires-Python: >=3.9
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
Requires-Dist: rdkit>=2023.9.6
|
|
25
|
+
Requires-Dist: torch>=2.3.1
|
|
26
|
+
Requires-Dist: numpy>=1.26.4
|
|
27
|
+
Requires-Dist: loralib>=0.1.2
|
|
28
|
+
Requires-Dist: lightning>=2.2.0
|
|
29
|
+
Requires-Dist: scikit-learn>=1.5.0
|
|
30
|
+
Requires-Dist: typing_extensions>=4.8.0
|
|
31
|
+
Provides-Extra: geo2seq
|
|
32
|
+
Requires-Dist: pynauty>=2.8.8.1; extra == "geo2seq"
|
|
33
|
+
Dynamic: author
|
|
34
|
+
Dynamic: author-email
|
|
35
|
+
Dynamic: classifier
|
|
36
|
+
Dynamic: description
|
|
37
|
+
Dynamic: description-content-type
|
|
38
|
+
Dynamic: home-page
|
|
39
|
+
Dynamic: keywords
|
|
40
|
+
Dynamic: license
|
|
41
|
+
Dynamic: project-url
|
|
42
|
+
Dynamic: provides-extra
|
|
43
|
+
Dynamic: requires-dist
|
|
44
|
+
Dynamic: requires-python
|
|
45
|
+
Dynamic: summary
|
|
46
|
+
|
|
47
|
+
# ChemBFN: Bayesian Flow Network for Chemistry
|
|
48
|
+
|
|
49
|
+
[](https://doi.org/10.1021/acs.jcim.4c01792)
|
|
50
|
+
[](https://arxiv.org/abs/2412.11439)
|
|
51
|
+
|
|
52
|
+
This is the repository of the PyTorch implementation of ChemBFN model.
|
|
53
|
+
|
|
54
|
+
## Features
|
|
55
|
+
|
|
56
|
+
ChemBFN provides the state-of-the-art functionalities of
|
|
57
|
+
* SMILES or SELFIES-based *de novo* molecule generation
|
|
58
|
+
* Protein sequence *de novo* generation
|
|
59
|
+
* Classifier-free guidance conditional generation (single or multi-objective optimisation)
|
|
60
|
+
* Context-guided conditional generation (inpaint)
|
|
61
|
+
* Outstanding out-of-distribution chemical space sampling
|
|
62
|
+
* Fast sampling via ODE solver
|
|
63
|
+
* Molecular property and activity prediction finetuning
|
|
64
|
+
* Reaction yield prediction finetuning
|
|
65
|
+
|
|
66
|
+
in an all-in-one-model style.
|
|
67
|
+
|
|
68
|
+
## News
|
|
69
|
+
|
|
70
|
+
* [21/01/2025] Our first paper has been accepted by [JCIM](https://pubs.acs.org/doi/10.1021/acs.jcim.4c01792).
|
|
71
|
+
* [17/12/2024] The second paper of out-of-distribution generation is available on [arxiv.org](https://arxiv.org/abs/2412.11439).
|
|
72
|
+
* [31/07/2024] Paper is available on [arxiv.org](https://arxiv.org/abs/2407.20294).
|
|
73
|
+
* [21/07/2024] Paper was submitted to arXiv.
|
|
74
|
+
|
|
75
|
+
## Install
|
|
76
|
+
|
|
77
|
+
```bash
|
|
78
|
+
$ pip install -U bayesianflow_for_chemistry
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
## Usage
|
|
82
|
+
|
|
83
|
+
You can find example scripts in [📁example](./example) folder.
|
|
84
|
+
|
|
85
|
+
## Pre-trained Model
|
|
86
|
+
|
|
87
|
+
You can find pretrained models in [release](https://github.com/Augus1999/bayesian-flow-network-for-chemistry/releases) or on our [🤗Hugging Face model page](https://huggingface.co/suenoomozawa/ChemBFN).
|
|
88
|
+
|
|
89
|
+
## Dataset Handling
|
|
90
|
+
|
|
91
|
+
We provide a Python class [`CSVData`](./bayesianflow_for_chem/data.py) to handle data stored in CSV or similar format containing headers to identify the entities. The following is a quickstart.
|
|
92
|
+
|
|
93
|
+
1. Download your dataset file (e.g., ESOL form [MoleculeNet](https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/delaney-processed.csv)) and split the file:
|
|
94
|
+
```python
|
|
95
|
+
>>> from bayesianflow_for_chem.tool import split_data
|
|
96
|
+
|
|
97
|
+
>>> split_data("delaney-processed.csv", method="scaffold")
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
2. Load the split data:
|
|
101
|
+
```python
|
|
102
|
+
>>> from bayesianflow_for_chem.data import smiles2token, collate, CSVData
|
|
103
|
+
|
|
104
|
+
>>> dataset = CSVData("delaney-processed_train.csv")
|
|
105
|
+
>>> dataset[0]
|
|
106
|
+
{'Compound ID': ['Thiophene'],
|
|
107
|
+
'ESOL predicted log solubility in mols per litre': ['-2.2319999999999998'],
|
|
108
|
+
'Minimum Degree': ['2'],
|
|
109
|
+
'Molecular Weight': ['84.14299999999999'],
|
|
110
|
+
'Number of H-Bond Donors': ['0'],
|
|
111
|
+
'Number of Rings': ['1'],
|
|
112
|
+
'Number of Rotatable Bonds': ['0'],
|
|
113
|
+
'Polar Surface Area': ['0.0'],
|
|
114
|
+
'measured log solubility in mols per litre': ['-1.33'],
|
|
115
|
+
'smiles': ['c1ccsc1']}
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
3. Create a mapping function to tokenise the dataset and select values:
|
|
119
|
+
```python
|
|
120
|
+
>>> import torch
|
|
121
|
+
|
|
122
|
+
>>> def encode(x):
|
|
123
|
+
... smiles = x["smiles"][0]
|
|
124
|
+
... value = [float(i) for i in x["measured log solubility in mols per litre"]]
|
|
125
|
+
... return {"token": smiles2token(smiles), "value": torch.tensor(value)}
|
|
126
|
+
|
|
127
|
+
>>> dataset.map(encode)
|
|
128
|
+
>>> dataset[0]
|
|
129
|
+
{'token': tensor([ 1, 151, 23, 151, 151, 154, 151, 23, 2]),
|
|
130
|
+
'value': tensor([-1.3300])}
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
4. Wrap the dataset in <u>torch.utils.data.DataLoader</u>:
|
|
134
|
+
```python
|
|
135
|
+
>>> dataloader = torch.utils.data.DataLoader(dataset, 32, collate_fn=collate)
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
## Cite This Work
|
|
139
|
+
|
|
140
|
+
```bibtex
|
|
141
|
+
@misc{2024chembfn,
|
|
142
|
+
title={A Bayesian Flow Network Framework for Chemistry Tasks},
|
|
143
|
+
author={Nianze Tao and Minori Abe},
|
|
144
|
+
year={2024},
|
|
145
|
+
eprint={2407.20294},
|
|
146
|
+
archivePrefix={arXiv},
|
|
147
|
+
primaryClass={cs.LG},
|
|
148
|
+
url={https://arxiv.org/abs/2407.20294},
|
|
149
|
+
}
|
|
150
|
+
```
|
|
151
|
+
Out-of-distribution generation:
|
|
152
|
+
```bibtex
|
|
153
|
+
@misc{2024chembfn_ood,
|
|
154
|
+
title={Bayesian Flow Is All You Need to Sample Out-of-Distribution Chemical Spaces},
|
|
155
|
+
author={Nianze Tao},
|
|
156
|
+
year={2024},
|
|
157
|
+
eprint={2412.11439},
|
|
158
|
+
archivePrefix={arXiv},
|
|
159
|
+
primaryClass={cs.LG},
|
|
160
|
+
url={https://arxiv.org/abs/2412.11439},
|
|
161
|
+
}
|
|
162
|
+
```
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# ChemBFN: Bayesian Flow Network for Chemistry
|
|
2
|
+
|
|
3
|
+
[](https://doi.org/10.1021/acs.jcim.4c01792)
|
|
4
|
+
[](https://arxiv.org/abs/2412.11439)
|
|
5
|
+
|
|
6
|
+
This is the repository of the PyTorch implementation of ChemBFN model.
|
|
7
|
+
|
|
8
|
+
## Features
|
|
9
|
+
|
|
10
|
+
ChemBFN provides the state-of-the-art functionalities of
|
|
11
|
+
* SMILES or SELFIES-based *de novo* molecule generation
|
|
12
|
+
* Protein sequence *de novo* generation
|
|
13
|
+
* Classifier-free guidance conditional generation (single or multi-objective optimisation)
|
|
14
|
+
* Context-guided conditional generation (inpaint)
|
|
15
|
+
* Outstanding out-of-distribution chemical space sampling
|
|
16
|
+
* Fast sampling via ODE solver
|
|
17
|
+
* Molecular property and activity prediction finetuning
|
|
18
|
+
* Reaction yield prediction finetuning
|
|
19
|
+
|
|
20
|
+
in an all-in-one-model style.
|
|
21
|
+
|
|
22
|
+
## News
|
|
23
|
+
|
|
24
|
+
* [21/01/2025] Our first paper has been accepted by [JCIM](https://pubs.acs.org/doi/10.1021/acs.jcim.4c01792).
|
|
25
|
+
* [17/12/2024] The second paper of out-of-distribution generation is available on [arxiv.org](https://arxiv.org/abs/2412.11439).
|
|
26
|
+
* [31/07/2024] Paper is available on [arxiv.org](https://arxiv.org/abs/2407.20294).
|
|
27
|
+
* [21/07/2024] Paper was submitted to arXiv.
|
|
28
|
+
|
|
29
|
+
## Install
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
$ pip install -U bayesianflow_for_chemistry
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Usage
|
|
36
|
+
|
|
37
|
+
You can find example scripts in [📁example](./example) folder.
|
|
38
|
+
|
|
39
|
+
## Pre-trained Model
|
|
40
|
+
|
|
41
|
+
You can find pretrained models in [release](https://github.com/Augus1999/bayesian-flow-network-for-chemistry/releases) or on our [🤗Hugging Face model page](https://huggingface.co/suenoomozawa/ChemBFN).
|
|
42
|
+
|
|
43
|
+
## Dataset Handling
|
|
44
|
+
|
|
45
|
+
We provide a Python class [`CSVData`](./bayesianflow_for_chem/data.py) to handle data stored in CSV or similar format containing headers to identify the entities. The following is a quickstart.
|
|
46
|
+
|
|
47
|
+
1. Download your dataset file (e.g., ESOL form [MoleculeNet](https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/delaney-processed.csv)) and split the file:
|
|
48
|
+
```python
|
|
49
|
+
>>> from bayesianflow_for_chem.tool import split_data
|
|
50
|
+
|
|
51
|
+
>>> split_data("delaney-processed.csv", method="scaffold")
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
2. Load the split data:
|
|
55
|
+
```python
|
|
56
|
+
>>> from bayesianflow_for_chem.data import smiles2token, collate, CSVData
|
|
57
|
+
|
|
58
|
+
>>> dataset = CSVData("delaney-processed_train.csv")
|
|
59
|
+
>>> dataset[0]
|
|
60
|
+
{'Compound ID': ['Thiophene'],
|
|
61
|
+
'ESOL predicted log solubility in mols per litre': ['-2.2319999999999998'],
|
|
62
|
+
'Minimum Degree': ['2'],
|
|
63
|
+
'Molecular Weight': ['84.14299999999999'],
|
|
64
|
+
'Number of H-Bond Donors': ['0'],
|
|
65
|
+
'Number of Rings': ['1'],
|
|
66
|
+
'Number of Rotatable Bonds': ['0'],
|
|
67
|
+
'Polar Surface Area': ['0.0'],
|
|
68
|
+
'measured log solubility in mols per litre': ['-1.33'],
|
|
69
|
+
'smiles': ['c1ccsc1']}
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
3. Create a mapping function to tokenise the dataset and select values:
|
|
73
|
+
```python
|
|
74
|
+
>>> import torch
|
|
75
|
+
|
|
76
|
+
>>> def encode(x):
|
|
77
|
+
... smiles = x["smiles"][0]
|
|
78
|
+
... value = [float(i) for i in x["measured log solubility in mols per litre"]]
|
|
79
|
+
... return {"token": smiles2token(smiles), "value": torch.tensor(value)}
|
|
80
|
+
|
|
81
|
+
>>> dataset.map(encode)
|
|
82
|
+
>>> dataset[0]
|
|
83
|
+
{'token': tensor([ 1, 151, 23, 151, 151, 154, 151, 23, 2]),
|
|
84
|
+
'value': tensor([-1.3300])}
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
4. Wrap the dataset in <u>torch.utils.data.DataLoader</u>:
|
|
88
|
+
```python
|
|
89
|
+
>>> dataloader = torch.utils.data.DataLoader(dataset, 32, collate_fn=collate)
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
## Cite This Work
|
|
93
|
+
|
|
94
|
+
```bibtex
|
|
95
|
+
@misc{2024chembfn,
|
|
96
|
+
title={A Bayesian Flow Network Framework for Chemistry Tasks},
|
|
97
|
+
author={Nianze Tao and Minori Abe},
|
|
98
|
+
year={2024},
|
|
99
|
+
eprint={2407.20294},
|
|
100
|
+
archivePrefix={arXiv},
|
|
101
|
+
primaryClass={cs.LG},
|
|
102
|
+
url={https://arxiv.org/abs/2407.20294},
|
|
103
|
+
}
|
|
104
|
+
```
|
|
105
|
+
Out-of-distribution generation:
|
|
106
|
+
```bibtex
|
|
107
|
+
@misc{2024chembfn_ood,
|
|
108
|
+
title={Bayesian Flow Is All You Need to Sample Out-of-Distribution Chemical Spaces},
|
|
109
|
+
author={Nianze Tao},
|
|
110
|
+
year={2024},
|
|
111
|
+
eprint={2412.11439},
|
|
112
|
+
archivePrefix={arXiv},
|
|
113
|
+
primaryClass={cs.LG},
|
|
114
|
+
url={https://arxiv.org/abs/2412.11439},
|
|
115
|
+
}
|
|
116
|
+
```
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Author: Nianze A. Tao (Omozawa Sueno)
|
|
3
|
+
"""
|
|
4
|
+
ChemBFN package.
|
|
5
|
+
"""
|
|
6
|
+
from . import data, tool, train, scorer
|
|
7
|
+
from .model import ChemBFN, MLP
|
|
8
|
+
|
|
9
|
+
__all__ = ["data", "tool", "train", "scorer", "ChemBFN", "MLP"]
|
|
10
|
+
__version__ = "1.2.0"
|
|
11
|
+
__author__ = "Nianze A. Tao (Omozawa Sueno)"
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Author: Nianze A. TAO (Omozawa SUENO)
|
|
3
|
+
"""
|
|
4
|
+
Tokenise SMILES/SAFE/SELFIES/GEO2SEQ/protein-sequence strings.
|
|
5
|
+
"""
|
|
6
|
+
import os
|
|
7
|
+
import re
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, List, Dict, Union, Callable
|
|
10
|
+
import torch
|
|
11
|
+
import torch.nn.functional as F
|
|
12
|
+
from torch import Tensor
|
|
13
|
+
from torch.utils.data import Dataset
|
|
14
|
+
|
|
15
|
+
__filedir__ = Path(__file__).parent
|
|
16
|
+
|
|
17
|
+
SMI_REGEX_PATTERN = (
|
|
18
|
+
r"(\[|\]|H[e,f,g,s,o]?|"
|
|
19
|
+
r"L[i,v,a,r,u]|"
|
|
20
|
+
r"B[e,r,a,i,h,k]?|"
|
|
21
|
+
r"C[l,a,r,o,u,d,s,n,e,m,f]?|"
|
|
22
|
+
r"N[e,a,i,b,h,d,o,p]?|"
|
|
23
|
+
r"O[s,g]?|S[i,c,e,r,n,m,b,g]?|"
|
|
24
|
+
r"K[r]?|T[i,c,e,a,l,b,h,m,s]|"
|
|
25
|
+
r"G[a,e,d]|R[b,u,h,e,n,a,f,g]|"
|
|
26
|
+
r"Yb?|Z[n,r]|P[t,o,d,r,a,u,b,m]?|"
|
|
27
|
+
r"F[e,r,l,m]?|M[g,n,o,t,c,d]|"
|
|
28
|
+
r"A[l,r,s,g,u,t,c,m]|I[n,r]?|"
|
|
29
|
+
r"W|X[e]|E[u,r,s]|U|D[b,s,y]|"
|
|
30
|
+
r"b|c|n|o|s|p|"
|
|
31
|
+
r"\(|\)|\.|=|#|-|\+|\\|\/|:|"
|
|
32
|
+
r"~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"
|
|
33
|
+
)
|
|
34
|
+
SEL_REGEX_PATTERN = r"(\[[^\]]+]|\.)"
|
|
35
|
+
GEO_REGEX_PATTERN = (
|
|
36
|
+
r"(H[e,f,g,s,o]?|"
|
|
37
|
+
r"L[i,v,a,r,u]|"
|
|
38
|
+
r"B[e,r,a,i,h,k]?|"
|
|
39
|
+
r"C[l,a,r,o,u,d,s,n,e,m,f]?|"
|
|
40
|
+
r"N[e,a,i,b,h,d,o,p]?|"
|
|
41
|
+
r"O[s,g]?|S[i,c,e,r,n,m,b,g]?|"
|
|
42
|
+
r"K[r]?|T[i,c,e,a,l,b,h,m,s]|"
|
|
43
|
+
r"G[a,e,d]|R[b,u,h,e,n,a,f,g]|"
|
|
44
|
+
r"Yb?|Z[n,r]|P[t,o,d,r,a,u,b,m]?|"
|
|
45
|
+
r"F[e,r,l,m]?|M[g,n,o,t,c,d]|"
|
|
46
|
+
r"A[l,r,s,g,u,t,c,m]|I[n,r]?|"
|
|
47
|
+
r"W|X[e]|E[u,r,s]|U|D[b,s,y]|"
|
|
48
|
+
r"-|.| |[0-9])"
|
|
49
|
+
)
|
|
50
|
+
AA_REGEX_PATTERN = r"(A|B|C|D|E|F|G|H|I|K|L|M|N|P|Q|R|S|T|V|W|Y|Z|-|.)"
|
|
51
|
+
smi_regex = re.compile(SMI_REGEX_PATTERN)
|
|
52
|
+
sel_regex = re.compile(SEL_REGEX_PATTERN)
|
|
53
|
+
geo_regex = re.compile(GEO_REGEX_PATTERN)
|
|
54
|
+
aa_regex = re.compile(AA_REGEX_PATTERN)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def load_vocab(
|
|
58
|
+
vocab_file: Union[str, Path]
|
|
59
|
+
) -> Dict[str, Union[int, List[str], Dict[str, int]]]:
|
|
60
|
+
"""
|
|
61
|
+
Load vocabulary from source file.
|
|
62
|
+
|
|
63
|
+
:param vocab_file: file that contains vocabulary
|
|
64
|
+
:type vocab_file: str | pathlib.Path
|
|
65
|
+
:return: {"vocab_keys": vocab_keys, "vocab_count": vocab_count, "vocab_dict": vocab_dict}
|
|
66
|
+
:rtype: dict
|
|
67
|
+
"""
|
|
68
|
+
with open(vocab_file, "r", encoding="utf-8") as f:
|
|
69
|
+
lines = f.read().strip()
|
|
70
|
+
vocab_keys = lines.split("\n")
|
|
71
|
+
vocab_count = len(vocab_keys)
|
|
72
|
+
vocab_dict = dict(zip(vocab_keys, range(vocab_count)))
|
|
73
|
+
return {
|
|
74
|
+
"vocab_keys": vocab_keys,
|
|
75
|
+
"vocab_count": vocab_count,
|
|
76
|
+
"vocab_dict": vocab_dict,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
_DEFUALT_VOCAB = load_vocab(__filedir__ / "vocab.txt")
|
|
81
|
+
VOCAB_KEYS: List[str] = _DEFUALT_VOCAB["vocab_keys"]
|
|
82
|
+
VOCAB_DICT: Dict[str, int] = _DEFUALT_VOCAB["vocab_dict"]
|
|
83
|
+
VOCAB_COUNT: int = _DEFUALT_VOCAB["vocab_count"]
|
|
84
|
+
AA_VOCAB_KEYS = (
|
|
85
|
+
VOCAB_KEYS[0:3] + "A B C D E F G H I K L M N P Q R S T V W Y Z - .".split()
|
|
86
|
+
)
|
|
87
|
+
AA_VOCAB_COUNT = len(AA_VOCAB_KEYS)
|
|
88
|
+
AA_VOCAB_DICT = dict(zip(AA_VOCAB_KEYS, range(AA_VOCAB_COUNT)))
|
|
89
|
+
GEO_VOCAB_KEYS = VOCAB_KEYS[0:3] + [" "] + VOCAB_KEYS[22:150] + [".", "-"]
|
|
90
|
+
GEO_VOCAB_COUNT = len(GEO_VOCAB_KEYS)
|
|
91
|
+
GEO_VOCAB_DICT = dict(zip(GEO_VOCAB_KEYS, range(GEO_VOCAB_COUNT)))
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def smiles2vec(smiles: str) -> List[int]:
|
|
95
|
+
"""
|
|
96
|
+
SMILES tokenisation using a dataset-independent regex pattern.
|
|
97
|
+
|
|
98
|
+
:param smiles: SMILES string
|
|
99
|
+
:type smiles: str
|
|
100
|
+
:return: tokens w/o `<start>` and `<end>`
|
|
101
|
+
:rtype: list
|
|
102
|
+
"""
|
|
103
|
+
tokens = [token for token in smi_regex.findall(smiles)]
|
|
104
|
+
return [VOCAB_DICT[token] for token in tokens]
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def geo2vec(geo2seq: str) -> List[int]:
|
|
108
|
+
"""
|
|
109
|
+
Geo2Seq tokenisation using a dataset-independent regex pattern.
|
|
110
|
+
|
|
111
|
+
:param geo2seq: Geo2Seq string
|
|
112
|
+
:type geo2seq: str
|
|
113
|
+
:return: tokens w/o `<start>` and `<end>`
|
|
114
|
+
:rtype: list
|
|
115
|
+
"""
|
|
116
|
+
tokens = [token for token in geo_regex.findall(geo2seq)]
|
|
117
|
+
return [GEO_VOCAB_DICT[token] for token in tokens]
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def aa2vec(aa_seq: str) -> List[int]:
|
|
121
|
+
"""
|
|
122
|
+
Protein sequence tokenisation using a dataset-independent regex pattern.
|
|
123
|
+
|
|
124
|
+
:param aa_seq: protein (amino acid) sequence
|
|
125
|
+
:type aa_seq: str
|
|
126
|
+
:return: tokens w/o `<start>` and `<end>`
|
|
127
|
+
:rtype: list
|
|
128
|
+
"""
|
|
129
|
+
tokens = [token for token in aa_regex.findall(aa_seq)]
|
|
130
|
+
return [AA_VOCAB_DICT[token] for token in tokens]
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def split_selfies(selfies: str) -> List[str]:
|
|
134
|
+
"""
|
|
135
|
+
SELFIES tokenisation.
|
|
136
|
+
|
|
137
|
+
:param selfies: SELFIES string
|
|
138
|
+
:type selfies: str
|
|
139
|
+
:return: SELFIES vocab
|
|
140
|
+
:rtype: list
|
|
141
|
+
"""
|
|
142
|
+
return [token for token in sel_regex.findall(selfies)]
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def smiles2token(smiles: str) -> Tensor:
|
|
146
|
+
# start token: <start> = 1; end token: <esc> = 2
|
|
147
|
+
return torch.tensor([1] + smiles2vec(smiles) + [2], dtype=torch.long)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def geo2token(geo2seq: str) -> Tensor:
|
|
151
|
+
# start token: <start> = 1; end token: <esc> = 2
|
|
152
|
+
return torch.tensor([1] + geo2vec(geo2seq) + [2], dtype=torch.long)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def aa2token(aa_seq: str) -> Tensor:
|
|
156
|
+
# start token: <start> = 1; end token: <end> = 2
|
|
157
|
+
return torch.tensor([1] + aa2vec(aa_seq) + [2], dtype=torch.long)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def collate(batch: List[Dict[str, Tensor]]) -> Dict[str, Tensor]:
|
|
161
|
+
"""
|
|
162
|
+
Padding the data in one batch into the same size.\n
|
|
163
|
+
Should be passed to `~torch.utils.data.DataLoader` as `DataLoader(collate_fn=collate, ...)`.
|
|
164
|
+
|
|
165
|
+
:param batch: a list of data (one batch)
|
|
166
|
+
:type batch: list
|
|
167
|
+
:return: batched {"token": token} or {"token": token, "value": value}
|
|
168
|
+
:rtype: dict
|
|
169
|
+
"""
|
|
170
|
+
token = [i["token"] for i in batch]
|
|
171
|
+
if "MAX_PADDING_LENGTH" in os.environ:
|
|
172
|
+
lmax = int(os.environ["MAX_PADDING_LENGTH"])
|
|
173
|
+
else:
|
|
174
|
+
lmax = max([len(w) for w in token])
|
|
175
|
+
token = torch.cat(
|
|
176
|
+
[F.pad(i, (0, lmax - len(i)), value=0)[None, :] for i in token], 0
|
|
177
|
+
)
|
|
178
|
+
out_dict = {"token": token}
|
|
179
|
+
if "value" in batch[0]:
|
|
180
|
+
out_dict["value"] = torch.cat([i["value"][None, :] for i in batch], 0)
|
|
181
|
+
if "mask" in batch[0]:
|
|
182
|
+
mask = [i["mask"] for i in batch]
|
|
183
|
+
out_dict["mask"] = torch.cat(
|
|
184
|
+
[F.pad(i, (0, lmax - len(i)), value=0)[None, :] for i in mask], 0
|
|
185
|
+
)
|
|
186
|
+
return out_dict
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class CSVData(Dataset):
|
|
190
|
+
def __init__(self, file: Union[str, Path]):
|
|
191
|
+
"""
|
|
192
|
+
Define dataset stored in CSV file.
|
|
193
|
+
|
|
194
|
+
:param file: dataset file name <file>
|
|
195
|
+
:type file: str | pathlib.Path
|
|
196
|
+
"""
|
|
197
|
+
super().__init__()
|
|
198
|
+
with open(file, "r") as db:
|
|
199
|
+
self.data = db.readlines()
|
|
200
|
+
self.header_idx_dict: Dict[str, List[int]] = {}
|
|
201
|
+
for key, i in enumerate(self.data[0].replace("\n", "").split(",")):
|
|
202
|
+
if i in self.header_idx_dict:
|
|
203
|
+
self.header_idx_dict[i].append(key)
|
|
204
|
+
else:
|
|
205
|
+
self.header_idx_dict[i] = [key]
|
|
206
|
+
self.mapping = lambda x: x
|
|
207
|
+
|
|
208
|
+
def __len__(self) -> int:
|
|
209
|
+
return len(self.data) - 1
|
|
210
|
+
|
|
211
|
+
def __getitem__(self, idx: Union[int, Tensor]) -> Dict[str, Tensor]:
|
|
212
|
+
if torch.is_tensor(idx):
|
|
213
|
+
idx = idx.tolist()
|
|
214
|
+
# valid `idx` should start from 1 instead of 0
|
|
215
|
+
data: List[str] = self.data[idx + 1].replace("\n", "").split(",")
|
|
216
|
+
data_dict: Dict[str, List[str]] = {}
|
|
217
|
+
for key in self.header_idx_dict:
|
|
218
|
+
data_dict[key] = [data[i] for i in self.header_idx_dict[key]]
|
|
219
|
+
return self.mapping(data_dict)
|
|
220
|
+
|
|
221
|
+
def map(self, mapping: Callable[[Dict[str, List[str]]], Any]) -> None:
|
|
222
|
+
"""
|
|
223
|
+
Pass a customised mapping function to transform the data entities to tensors.
|
|
224
|
+
|
|
225
|
+
e.g.
|
|
226
|
+
```python
|
|
227
|
+
import torch
|
|
228
|
+
from bayesianflow_for_chem.data import smiles2token, CSVData
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def encode(x):
|
|
232
|
+
return {
|
|
233
|
+
"token": smiles2token(".".join(x["smiles"])),
|
|
234
|
+
"value": torch.tensor([float(i) if i != "" else torch.inf for i in x["value"]]),
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
dataset = CSVData(...)
|
|
238
|
+
dataset.map(encode)
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
:param mapping: customised mapping function
|
|
242
|
+
:type mapping: callable
|
|
243
|
+
:return:
|
|
244
|
+
:rtype: None
|
|
245
|
+
"""
|
|
246
|
+
self.mapping = mapping
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
if __name__ == "__main__":
|
|
250
|
+
...
|