bayesianflow-for-chem 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bayesianflow-for-chem might be problematic. Click here for more details.

@@ -0,0 +1,11 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Author: Nianze A. Tao (Omozawa Sueno)
3
+ """
4
+ ChemBFN package.
5
+ """
6
+ from . import data, tool, train, scorer
7
+ from .model import ChemBFN, MLP
8
+
9
+ __all__ = ["data", "tool", "train", "scorer", "ChemBFN", "MLP"]
10
+ __version__ = "1.2.0"
11
+ __author__ = "Nianze A. Tao (Omozawa Sueno)"
@@ -0,0 +1,250 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Author: Nianze A. TAO (Omozawa SUENO)
3
+ """
4
+ Tokenise SMILES/SAFE/SELFIES/GEO2SEQ/protein-sequence strings.
5
+ """
6
+ import os
7
+ import re
8
+ from pathlib import Path
9
+ from typing import Any, List, Dict, Union, Callable
10
+ import torch
11
+ import torch.nn.functional as F
12
+ from torch import Tensor
13
+ from torch.utils.data import Dataset
14
+
15
+ __filedir__ = Path(__file__).parent
16
+
17
+ SMI_REGEX_PATTERN = (
18
+ r"(\[|\]|H[e,f,g,s,o]?|"
19
+ r"L[i,v,a,r,u]|"
20
+ r"B[e,r,a,i,h,k]?|"
21
+ r"C[l,a,r,o,u,d,s,n,e,m,f]?|"
22
+ r"N[e,a,i,b,h,d,o,p]?|"
23
+ r"O[s,g]?|S[i,c,e,r,n,m,b,g]?|"
24
+ r"K[r]?|T[i,c,e,a,l,b,h,m,s]|"
25
+ r"G[a,e,d]|R[b,u,h,e,n,a,f,g]|"
26
+ r"Yb?|Z[n,r]|P[t,o,d,r,a,u,b,m]?|"
27
+ r"F[e,r,l,m]?|M[g,n,o,t,c,d]|"
28
+ r"A[l,r,s,g,u,t,c,m]|I[n,r]?|"
29
+ r"W|X[e]|E[u,r,s]|U|D[b,s,y]|"
30
+ r"b|c|n|o|s|p|"
31
+ r"\(|\)|\.|=|#|-|\+|\\|\/|:|"
32
+ r"~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"
33
+ )
34
+ SEL_REGEX_PATTERN = r"(\[[^\]]+]|\.)"
35
+ GEO_REGEX_PATTERN = (
36
+ r"(H[e,f,g,s,o]?|"
37
+ r"L[i,v,a,r,u]|"
38
+ r"B[e,r,a,i,h,k]?|"
39
+ r"C[l,a,r,o,u,d,s,n,e,m,f]?|"
40
+ r"N[e,a,i,b,h,d,o,p]?|"
41
+ r"O[s,g]?|S[i,c,e,r,n,m,b,g]?|"
42
+ r"K[r]?|T[i,c,e,a,l,b,h,m,s]|"
43
+ r"G[a,e,d]|R[b,u,h,e,n,a,f,g]|"
44
+ r"Yb?|Z[n,r]|P[t,o,d,r,a,u,b,m]?|"
45
+ r"F[e,r,l,m]?|M[g,n,o,t,c,d]|"
46
+ r"A[l,r,s,g,u,t,c,m]|I[n,r]?|"
47
+ r"W|X[e]|E[u,r,s]|U|D[b,s,y]|"
48
+ r"-|.| |[0-9])"
49
+ )
50
+ AA_REGEX_PATTERN = r"(A|B|C|D|E|F|G|H|I|K|L|M|N|P|Q|R|S|T|V|W|Y|Z|-|.)"
51
+ smi_regex = re.compile(SMI_REGEX_PATTERN)
52
+ sel_regex = re.compile(SEL_REGEX_PATTERN)
53
+ geo_regex = re.compile(GEO_REGEX_PATTERN)
54
+ aa_regex = re.compile(AA_REGEX_PATTERN)
55
+
56
+
57
+ def load_vocab(
58
+ vocab_file: Union[str, Path]
59
+ ) -> Dict[str, Union[int, List[str], Dict[str, int]]]:
60
+ """
61
+ Load vocabulary from source file.
62
+
63
+ :param vocab_file: file that contains vocabulary
64
+ :type vocab_file: str | pathlib.Path
65
+ :return: {"vocab_keys": vocab_keys, "vocab_count": vocab_count, "vocab_dict": vocab_dict}
66
+ :rtype: dict
67
+ """
68
+ with open(vocab_file, "r", encoding="utf-8") as f:
69
+ lines = f.read().strip()
70
+ vocab_keys = lines.split("\n")
71
+ vocab_count = len(vocab_keys)
72
+ vocab_dict = dict(zip(vocab_keys, range(vocab_count)))
73
+ return {
74
+ "vocab_keys": vocab_keys,
75
+ "vocab_count": vocab_count,
76
+ "vocab_dict": vocab_dict,
77
+ }
78
+
79
+
80
+ _DEFUALT_VOCAB = load_vocab(__filedir__ / "vocab.txt")
81
+ VOCAB_KEYS: List[str] = _DEFUALT_VOCAB["vocab_keys"]
82
+ VOCAB_DICT: Dict[str, int] = _DEFUALT_VOCAB["vocab_dict"]
83
+ VOCAB_COUNT: int = _DEFUALT_VOCAB["vocab_count"]
84
+ AA_VOCAB_KEYS = (
85
+ VOCAB_KEYS[0:3] + "A B C D E F G H I K L M N P Q R S T V W Y Z - .".split()
86
+ )
87
+ AA_VOCAB_COUNT = len(AA_VOCAB_KEYS)
88
+ AA_VOCAB_DICT = dict(zip(AA_VOCAB_KEYS, range(AA_VOCAB_COUNT)))
89
+ GEO_VOCAB_KEYS = VOCAB_KEYS[0:3] + [" "] + VOCAB_KEYS[22:150] + [".", "-"]
90
+ GEO_VOCAB_COUNT = len(GEO_VOCAB_KEYS)
91
+ GEO_VOCAB_DICT = dict(zip(GEO_VOCAB_KEYS, range(GEO_VOCAB_COUNT)))
92
+
93
+
94
+ def smiles2vec(smiles: str) -> List[int]:
95
+ """
96
+ SMILES tokenisation using a dataset-independent regex pattern.
97
+
98
+ :param smiles: SMILES string
99
+ :type smiles: str
100
+ :return: tokens w/o `<start>` and `<end>`
101
+ :rtype: list
102
+ """
103
+ tokens = [token for token in smi_regex.findall(smiles)]
104
+ return [VOCAB_DICT[token] for token in tokens]
105
+
106
+
107
+ def geo2vec(geo2seq: str) -> List[int]:
108
+ """
109
+ Geo2Seq tokenisation using a dataset-independent regex pattern.
110
+
111
+ :param geo2seq: Geo2Seq string
112
+ :type geo2seq: str
113
+ :return: tokens w/o `<start>` and `<end>`
114
+ :rtype: list
115
+ """
116
+ tokens = [token for token in geo_regex.findall(geo2seq)]
117
+ return [GEO_VOCAB_DICT[token] for token in tokens]
118
+
119
+
120
+ def aa2vec(aa_seq: str) -> List[int]:
121
+ """
122
+ Protein sequence tokenisation using a dataset-independent regex pattern.
123
+
124
+ :param aa_seq: protein (amino acid) sequence
125
+ :type aa_seq: str
126
+ :return: tokens w/o `<start>` and `<end>`
127
+ :rtype: list
128
+ """
129
+ tokens = [token for token in aa_regex.findall(aa_seq)]
130
+ return [AA_VOCAB_DICT[token] for token in tokens]
131
+
132
+
133
+ def split_selfies(selfies: str) -> List[str]:
134
+ """
135
+ SELFIES tokenisation.
136
+
137
+ :param selfies: SELFIES string
138
+ :type selfies: str
139
+ :return: SELFIES vocab
140
+ :rtype: list
141
+ """
142
+ return [token for token in sel_regex.findall(selfies)]
143
+
144
+
145
+ def smiles2token(smiles: str) -> Tensor:
146
+ # start token: <start> = 1; end token: <esc> = 2
147
+ return torch.tensor([1] + smiles2vec(smiles) + [2], dtype=torch.long)
148
+
149
+
150
+ def geo2token(geo2seq: str) -> Tensor:
151
+ # start token: <start> = 1; end token: <esc> = 2
152
+ return torch.tensor([1] + geo2vec(geo2seq) + [2], dtype=torch.long)
153
+
154
+
155
+ def aa2token(aa_seq: str) -> Tensor:
156
+ # start token: <start> = 1; end token: <end> = 2
157
+ return torch.tensor([1] + aa2vec(aa_seq) + [2], dtype=torch.long)
158
+
159
+
160
+ def collate(batch: List[Dict[str, Tensor]]) -> Dict[str, Tensor]:
161
+ """
162
+ Padding the data in one batch into the same size.\n
163
+ Should be passed to `~torch.utils.data.DataLoader` as `DataLoader(collate_fn=collate, ...)`.
164
+
165
+ :param batch: a list of data (one batch)
166
+ :type batch: list
167
+ :return: batched {"token": token} or {"token": token, "value": value}
168
+ :rtype: dict
169
+ """
170
+ token = [i["token"] for i in batch]
171
+ if "MAX_PADDING_LENGTH" in os.environ:
172
+ lmax = int(os.environ["MAX_PADDING_LENGTH"])
173
+ else:
174
+ lmax = max([len(w) for w in token])
175
+ token = torch.cat(
176
+ [F.pad(i, (0, lmax - len(i)), value=0)[None, :] for i in token], 0
177
+ )
178
+ out_dict = {"token": token}
179
+ if "value" in batch[0]:
180
+ out_dict["value"] = torch.cat([i["value"][None, :] for i in batch], 0)
181
+ if "mask" in batch[0]:
182
+ mask = [i["mask"] for i in batch]
183
+ out_dict["mask"] = torch.cat(
184
+ [F.pad(i, (0, lmax - len(i)), value=0)[None, :] for i in mask], 0
185
+ )
186
+ return out_dict
187
+
188
+
189
+ class CSVData(Dataset):
190
+ def __init__(self, file: Union[str, Path]):
191
+ """
192
+ Define dataset stored in CSV file.
193
+
194
+ :param file: dataset file name <file>
195
+ :type file: str | pathlib.Path
196
+ """
197
+ super().__init__()
198
+ with open(file, "r") as db:
199
+ self.data = db.readlines()
200
+ self.header_idx_dict: Dict[str, List[int]] = {}
201
+ for key, i in enumerate(self.data[0].replace("\n", "").split(",")):
202
+ if i in self.header_idx_dict:
203
+ self.header_idx_dict[i].append(key)
204
+ else:
205
+ self.header_idx_dict[i] = [key]
206
+ self.mapping = lambda x: x
207
+
208
+ def __len__(self) -> int:
209
+ return len(self.data) - 1
210
+
211
+ def __getitem__(self, idx: Union[int, Tensor]) -> Dict[str, Tensor]:
212
+ if torch.is_tensor(idx):
213
+ idx = idx.tolist()
214
+ # valid `idx` should start from 1 instead of 0
215
+ data: List[str] = self.data[idx + 1].replace("\n", "").split(",")
216
+ data_dict: Dict[str, List[str]] = {}
217
+ for key in self.header_idx_dict:
218
+ data_dict[key] = [data[i] for i in self.header_idx_dict[key]]
219
+ return self.mapping(data_dict)
220
+
221
+ def map(self, mapping: Callable[[Dict[str, List[str]]], Any]) -> None:
222
+ """
223
+ Pass a customised mapping function to transform the data entities to tensors.
224
+
225
+ e.g.
226
+ ```python
227
+ import torch
228
+ from bayesianflow_for_chem.data import smiles2token, CSVData
229
+
230
+
231
+ def encode(x):
232
+ return {
233
+ "token": smiles2token(".".join(x["smiles"])),
234
+ "value": torch.tensor([float(i) if i != "" else torch.inf for i in x["value"]]),
235
+ }
236
+
237
+ dataset = CSVData(...)
238
+ dataset.map(encode)
239
+ ```
240
+
241
+ :param mapping: customised mapping function
242
+ :type mapping: callable
243
+ :return:
244
+ :rtype: None
245
+ """
246
+ self.mapping = mapping
247
+
248
+
249
+ if __name__ == "__main__":
250
+ ...