rcsb-embedding-model 0.0.5__tar.gz → 0.0.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rcsb-embedding-model might be problematic. Click here for more details.
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/PKG-INFO +3 -1
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/pyproject.toml +9 -3
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/cli/args_utils.py +11 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/cli/inference.py +149 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/dataset/esm_prot_from_csv.py +91 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/dataset/residue_embedding_from_csv.py +32 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/inference/chain_inference.py +50 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/inference/esm_inference.py +50 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/modules/chain_module.py +16 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/modules/esm_module.py +24 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/rcsb_structure_embedding.py +128 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/types/api_types.py +29 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/utils/data.py +47 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/utils/model.py +29 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/utils/structure_parser.py +51 -0
- rcsb_embedding_model-0.0.6/src/rcsb_embedding_model/writer/batch_writer.py +113 -0
- rcsb_embedding_model-0.0.5/tests/test_model.py → rcsb_embedding_model-0.0.6/tests/test_embedding_model.py +3 -2
- rcsb_embedding_model-0.0.5/src/rcsb_embedding_model/rcsb_structure_embedding.py +0 -171
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/.gitignore +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/LICENSE.md +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/README.md +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/assets/embedding-model-architecture.png +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/examples/esm_embeddings.py +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/src/rcsb_embedding_model/__init__.py +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/src/rcsb_embedding_model/model/layers.py +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/src/rcsb_embedding_model/model/residue_embedding_aggregator.py +0 -0
- {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/tests/resources/1acb.cif +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rcsb-embedding-model
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.6
|
|
4
4
|
Summary: Protein Embedding Model for Structure Search
|
|
5
5
|
Project-URL: Homepage, https://github.com/rcsb/rcsb-embedding-model
|
|
6
6
|
Project-URL: Issues, https://github.com/rcsb/rcsb-embedding-model/issues
|
|
@@ -11,7 +11,9 @@ Classifier: Operating System :: OS Independent
|
|
|
11
11
|
Classifier: Programming Language :: Python :: 3
|
|
12
12
|
Requires-Python: >=3.10
|
|
13
13
|
Requires-Dist: esm>=3.2.0
|
|
14
|
+
Requires-Dist: lightning>=2.5.0
|
|
14
15
|
Requires-Dist: torch>=2.2.0
|
|
16
|
+
Requires-Dist: typer>=0.15.0
|
|
15
17
|
Description-Content-Type: text/markdown
|
|
16
18
|
|
|
17
19
|
# RCSB Embedding Model: A Deep Learning Approach for 3D Structure Embeddings
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "rcsb-embedding-model"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.6"
|
|
4
4
|
authors = [
|
|
5
5
|
{ name="Joan Segura", email="joan.segura@rcsb.org" },
|
|
6
6
|
]
|
|
@@ -15,8 +15,11 @@ license = "BSD-3-Clause"
|
|
|
15
15
|
license-files = ["LICEN[CS]E*"]
|
|
16
16
|
dependencies=[
|
|
17
17
|
"esm >= 3.2.0",
|
|
18
|
-
"torch >= 2.2.0"
|
|
18
|
+
"torch >= 2.2.0",
|
|
19
|
+
"lightning >= 2.5.0",
|
|
20
|
+
"typer >= 0.15.0"
|
|
19
21
|
]
|
|
22
|
+
|
|
20
23
|
[project.urls]
|
|
21
24
|
Homepage = "https://github.com/rcsb/rcsb-embedding-model"
|
|
22
25
|
Issues = "https://github.com/rcsb/rcsb-embedding-model/issues"
|
|
@@ -26,4 +29,7 @@ requires = [
|
|
|
26
29
|
"hatchling >= 1.14.1"
|
|
27
30
|
]
|
|
28
31
|
|
|
29
|
-
build-backend = "hatchling.build"
|
|
32
|
+
build-backend = "hatchling.build"
|
|
33
|
+
|
|
34
|
+
[project.scripts]
|
|
35
|
+
inference = "rcsb_embedding_model.cli.inference:app"
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
from typing import Annotated, List
|
|
2
|
+
|
|
3
|
+
import typer
|
|
4
|
+
|
|
5
|
+
from rcsb_embedding_model.cli.args_utils import arg_devices
|
|
6
|
+
from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, SrcLocation
|
|
7
|
+
|
|
8
|
+
app = typer.Typer()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@app.command(name="residue-embedding")
|
|
12
|
+
def residue_embedding(
|
|
13
|
+
src_file: Annotated[typer.FileText, typer.Option(
|
|
14
|
+
exists=True,
|
|
15
|
+
file_okay=True,
|
|
16
|
+
dir_okay=False,
|
|
17
|
+
resolve_path=True,
|
|
18
|
+
help='CSV file 3 columns: Structure File | Chain Id (asym_i for cif files) | Output file name.'
|
|
19
|
+
)],
|
|
20
|
+
src_location: Annotated[SrcLocation, typer.Option(
|
|
21
|
+
help='Source input location.'
|
|
22
|
+
)] = SrcLocation.local,
|
|
23
|
+
src_format: Annotated[SrcFormat, typer.Option(
|
|
24
|
+
help='Structure file format.'
|
|
25
|
+
)] = SrcFormat.mmcif,
|
|
26
|
+
batch_size: Annotated[int, typer.Option(
|
|
27
|
+
help='Number of samples processed together in one iteration.'
|
|
28
|
+
)] = 1,
|
|
29
|
+
num_workers: Annotated[int, typer.Option(
|
|
30
|
+
help='Number of subprocesses to use for data loading.'
|
|
31
|
+
)] = 0,
|
|
32
|
+
num_nodes: Annotated[int, typer.Option(
|
|
33
|
+
help='Number of nodes to use for inference.'
|
|
34
|
+
)] = 1,
|
|
35
|
+
accelerator: Annotated[Accelerator, typer.Option(
|
|
36
|
+
help='Device used for inference.'
|
|
37
|
+
)] = Accelerator.auto,
|
|
38
|
+
devices: Annotated[List[str], typer.Option(
|
|
39
|
+
help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
|
|
40
|
+
)] = tuple(['auto']),
|
|
41
|
+
output_path: Annotated[typer.FileText, typer.Option(
|
|
42
|
+
exists=True,
|
|
43
|
+
file_okay=False,
|
|
44
|
+
dir_okay=True,
|
|
45
|
+
resolve_path=True,
|
|
46
|
+
help='Output path to store predictions.'
|
|
47
|
+
)] = None
|
|
48
|
+
):
|
|
49
|
+
from rcsb_embedding_model.inference.esm_inference import predict
|
|
50
|
+
predict(
|
|
51
|
+
csv_file=src_file,
|
|
52
|
+
src_location=src_location,
|
|
53
|
+
src_format=src_format,
|
|
54
|
+
batch_size=batch_size,
|
|
55
|
+
num_workers=num_workers,
|
|
56
|
+
num_nodes=num_nodes,
|
|
57
|
+
accelerator=accelerator,
|
|
58
|
+
devices=arg_devices(devices),
|
|
59
|
+
out_path=output_path
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@app.command(name="structure-embedding")
|
|
64
|
+
def structure_embedding(
|
|
65
|
+
src_file: Annotated[typer.FileText, typer.Option(
|
|
66
|
+
exists=True,
|
|
67
|
+
file_okay=True,
|
|
68
|
+
dir_okay=False,
|
|
69
|
+
resolve_path=True,
|
|
70
|
+
help='CSV file 3 columns: Structure File | Chain Id (asym_i for cif files) | Output file name.'
|
|
71
|
+
)],
|
|
72
|
+
src_location: Annotated[SrcLocation, typer.Option(
|
|
73
|
+
help='Source input location.'
|
|
74
|
+
)] = SrcLocation.local,
|
|
75
|
+
src_format: Annotated[SrcFormat, typer.Option(
|
|
76
|
+
help='Structure file format.'
|
|
77
|
+
)] = SrcFormat.mmcif,
|
|
78
|
+
batch_size: Annotated[int, typer.Option(
|
|
79
|
+
help='Number of samples processed together in one iteration.'
|
|
80
|
+
)] = 1,
|
|
81
|
+
num_workers: Annotated[int, typer.Option(
|
|
82
|
+
help='Number of subprocesses to use for data loading.'
|
|
83
|
+
)] = 0,
|
|
84
|
+
num_nodes: Annotated[int, typer.Option(
|
|
85
|
+
help='Number of nodes to use for inference.'
|
|
86
|
+
)] = 1,
|
|
87
|
+
accelerator: Annotated[Accelerator, typer.Option(
|
|
88
|
+
help='Device used for inference.'
|
|
89
|
+
)] = Accelerator.auto,
|
|
90
|
+
devices: Annotated[List[str], typer.Option(
|
|
91
|
+
help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
|
|
92
|
+
)] = tuple(['auto']),
|
|
93
|
+
output_path: Annotated[typer.FileText, typer.Option(
|
|
94
|
+
exists=True,
|
|
95
|
+
file_okay=False,
|
|
96
|
+
dir_okay=True,
|
|
97
|
+
resolve_path=True,
|
|
98
|
+
help='Output path to store predictions.'
|
|
99
|
+
)] = None
|
|
100
|
+
):
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@app.command(name="chain-embedding")
|
|
105
|
+
def chain_embedding(
|
|
106
|
+
src_file: Annotated[typer.FileText, typer.Option(
|
|
107
|
+
exists=True,
|
|
108
|
+
file_okay=True,
|
|
109
|
+
dir_okay=False,
|
|
110
|
+
resolve_path=True,
|
|
111
|
+
help='CSV file 2 columns: Residue Embedding Tensor File | Output file name.'
|
|
112
|
+
)],
|
|
113
|
+
batch_size: Annotated[int, typer.Option(
|
|
114
|
+
help='Number of samples processed together in one iteration.'
|
|
115
|
+
)] = 1,
|
|
116
|
+
num_workers: Annotated[int, typer.Option(
|
|
117
|
+
help='Number of subprocesses to use for data loading.'
|
|
118
|
+
)] = 0,
|
|
119
|
+
num_nodes: Annotated[int, typer.Option(
|
|
120
|
+
help='Number of nodes to use for inference.'
|
|
121
|
+
)] = 1,
|
|
122
|
+
accelerator: Annotated[Accelerator, typer.Option(
|
|
123
|
+
help='Device used for inference.'
|
|
124
|
+
)] = Accelerator.auto,
|
|
125
|
+
devices: Annotated[List[str], typer.Option(
|
|
126
|
+
help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
|
|
127
|
+
)] = tuple(['auto']),
|
|
128
|
+
output_path: Annotated[typer.FileText, typer.Option(
|
|
129
|
+
exists=True,
|
|
130
|
+
file_okay=False,
|
|
131
|
+
dir_okay=True,
|
|
132
|
+
resolve_path=True,
|
|
133
|
+
help='Output path to store predictions.'
|
|
134
|
+
)] = None
|
|
135
|
+
):
|
|
136
|
+
from rcsb_embedding_model.inference.chain_inference import predict
|
|
137
|
+
predict(
|
|
138
|
+
csv_file=src_file,
|
|
139
|
+
batch_size=batch_size,
|
|
140
|
+
num_workers=num_workers,
|
|
141
|
+
num_nodes=num_nodes,
|
|
142
|
+
accelerator=accelerator,
|
|
143
|
+
devices=arg_devices(devices),
|
|
144
|
+
out_path=output_path
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
if __name__ == "__main__":
|
|
149
|
+
app()
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
import torch
|
|
5
|
+
from biotite.structure import chain_iter
|
|
6
|
+
from esm.models.esm3 import ESM3
|
|
7
|
+
from esm.sdk.api import ESMProtein, SamplingConfig
|
|
8
|
+
from esm.utils.constants.models import ESM3_OPEN_SMALL
|
|
9
|
+
from esm.utils.structure.protein_chain import ProteinChain
|
|
10
|
+
from torch.utils.data import Dataset, DataLoader
|
|
11
|
+
import pandas as pd
|
|
12
|
+
|
|
13
|
+
from rcsb_embedding_model.types.api_types import SrcFormat, SrcLocation
|
|
14
|
+
from rcsb_embedding_model.utils.data import stringio_from_url
|
|
15
|
+
from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class EsmProtFromCsv(Dataset):
|
|
19
|
+
|
|
20
|
+
MIN_RES = 10
|
|
21
|
+
STREAM_ATTR = 'stream'
|
|
22
|
+
CH_ATTR = 'chain_id'
|
|
23
|
+
NAME_ATTR = 'name'
|
|
24
|
+
|
|
25
|
+
COLUMNS = [STREAM_ATTR, CH_ATTR, NAME_ATTR]
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
csv_file,
|
|
30
|
+
src_location=SrcLocation.local,
|
|
31
|
+
src_format=SrcFormat.mmcif,
|
|
32
|
+
):
|
|
33
|
+
super().__init__()
|
|
34
|
+
self.src_location = src_location
|
|
35
|
+
self.src_format = src_format
|
|
36
|
+
self.data = pd.DataFrame()
|
|
37
|
+
self.__load_stream(csv_file)
|
|
38
|
+
|
|
39
|
+
def __load_stream(self, stream_list):
|
|
40
|
+
self.data = pd.read_csv(
|
|
41
|
+
stream_list,
|
|
42
|
+
header=None,
|
|
43
|
+
index_col=None,
|
|
44
|
+
names=EsmProtFromCsv.COLUMNS
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
def __len__(self):
|
|
48
|
+
return len(self.data)
|
|
49
|
+
|
|
50
|
+
def __getitem__(self, idx):
|
|
51
|
+
structure_src = self.data.loc[idx, EsmProtFromCsv.STREAM_ATTR]
|
|
52
|
+
chain_id = self.data.loc[idx, EsmProtFromCsv.CH_ATTR]
|
|
53
|
+
name = self.data.loc[idx, EsmProtFromCsv.NAME_ATTR]
|
|
54
|
+
structure = get_structure_from_src(
|
|
55
|
+
structure_src if self.src_location == SrcLocation.local else stringio_from_url(structure_src),
|
|
56
|
+
src_format=self.src_format,
|
|
57
|
+
chain_id=chain_id
|
|
58
|
+
)
|
|
59
|
+
for atom_ch in chain_iter(structure):
|
|
60
|
+
protein_chain = ProteinChain.from_atomarray(atom_ch)
|
|
61
|
+
return ESMProtein.from_protein_chain(protein_chain), name
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
if __name__ == '__main__':
|
|
65
|
+
|
|
66
|
+
parser = argparse.ArgumentParser()
|
|
67
|
+
parser.add_argument('--file_list', type=argparse.FileType('r'), required=True)
|
|
68
|
+
args = parser.parse_args()
|
|
69
|
+
|
|
70
|
+
dataset = EsmProtFromCsv(
|
|
71
|
+
args.file_list
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
esm3 = ESM3.from_pretrained(
|
|
75
|
+
ESM3_OPEN_SMALL,
|
|
76
|
+
torch.device("cpu")
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
dataloader = DataLoader(
|
|
80
|
+
dataset,
|
|
81
|
+
batch_size=2,
|
|
82
|
+
collate_fn=lambda _: _
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
for _batch in dataloader:
|
|
86
|
+
for esm_prot, name in _batch:
|
|
87
|
+
protein_tensor = esm3.encode(esm_prot)
|
|
88
|
+
embeddings = esm3.forward_and_sample(
|
|
89
|
+
protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
|
|
90
|
+
).per_residue_embedding
|
|
91
|
+
print(name, embeddings.shape)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import torch
|
|
3
|
+
from torch.utils.data import Dataset
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ResidueEmbeddingFromCSV(Dataset):
|
|
7
|
+
|
|
8
|
+
STREAM_ATTR = 'stream'
|
|
9
|
+
NAME_ATTR = 'name'
|
|
10
|
+
|
|
11
|
+
COLUMNS = [STREAM_ATTR, NAME_ATTR]
|
|
12
|
+
|
|
13
|
+
def __init__(self, csv_file):
|
|
14
|
+
super().__init__()
|
|
15
|
+
self.data = pd.DataFrame()
|
|
16
|
+
self.__load_stream(csv_file)
|
|
17
|
+
|
|
18
|
+
def __load_stream(self, csv_file):
|
|
19
|
+
self.data = pd.read_csv(
|
|
20
|
+
csv_file,
|
|
21
|
+
header=None,
|
|
22
|
+
index_col=None,
|
|
23
|
+
names=ResidueEmbeddingFromCSV.COLUMNS
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def __len__(self):
|
|
27
|
+
return len(self.data)
|
|
28
|
+
|
|
29
|
+
def __getitem__(self, idx):
|
|
30
|
+
embedding_src = self.data.loc[idx, ResidueEmbeddingFromCSV.STREAM_ATTR]
|
|
31
|
+
name = self.data.loc[idx, ResidueEmbeddingFromCSV.NAME_ATTR]
|
|
32
|
+
return torch.load(embedding_src, map_location=torch.device('cpu')), name
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from torch.utils.data import DataLoader
|
|
2
|
+
from lightning import Trainer
|
|
3
|
+
from typer import FileText
|
|
4
|
+
|
|
5
|
+
from rcsb_embedding_model.dataset.residue_embedding_from_csv import ResidueEmbeddingFromCSV
|
|
6
|
+
from rcsb_embedding_model.modules.chain_module import ChainModule
|
|
7
|
+
from rcsb_embedding_model.types.api_types import Accelerator, Devices, OptionalPath
|
|
8
|
+
from rcsb_embedding_model.utils.data import collate_seq_embeddings
|
|
9
|
+
from rcsb_embedding_model.writer.batch_writer import CsvBatchWriter
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def predict(
|
|
13
|
+
csv_file: FileText,
|
|
14
|
+
batch_size: int = 1,
|
|
15
|
+
num_workers: int = 0,
|
|
16
|
+
num_nodes: int = 1,
|
|
17
|
+
accelerator: Accelerator = Accelerator.auto,
|
|
18
|
+
devices: Devices = 'auto',
|
|
19
|
+
out_path: OptionalPath = None
|
|
20
|
+
):
|
|
21
|
+
inference_set = ResidueEmbeddingFromCSV(
|
|
22
|
+
csv_file=csv_file
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
inference_dataloader = DataLoader(
|
|
26
|
+
dataset=inference_set,
|
|
27
|
+
batch_size=batch_size,
|
|
28
|
+
num_workers=num_workers,
|
|
29
|
+
collate_fn=lambda emb: (
|
|
30
|
+
collate_seq_embeddings([x for x, z in emb]),
|
|
31
|
+
tuple([z for x, z in emb])
|
|
32
|
+
)
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
module = ChainModule()
|
|
36
|
+
|
|
37
|
+
inference_writer = CsvBatchWriter(out_path) if out_path is not None else None
|
|
38
|
+
trainer = Trainer(
|
|
39
|
+
callbacks=[inference_writer] if inference_writer is not None else None,
|
|
40
|
+
num_nodes=num_nodes,
|
|
41
|
+
accelerator=accelerator,
|
|
42
|
+
devices=devices
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
prediction = trainer.predict(
|
|
46
|
+
module,
|
|
47
|
+
inference_dataloader
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
return prediction
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from torch.utils.data import DataLoader
|
|
2
|
+
from lightning import Trainer
|
|
3
|
+
from typer import FileText
|
|
4
|
+
|
|
5
|
+
from rcsb_embedding_model.dataset.esm_prot_from_csv import EsmProtFromCsv
|
|
6
|
+
from rcsb_embedding_model.modules.esm_module import EsmModule
|
|
7
|
+
from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, Devices, OptionalPath, SrcLocation
|
|
8
|
+
from rcsb_embedding_model.writer.batch_writer import TensorBatchWriter
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def predict(
|
|
12
|
+
csv_file: FileText,
|
|
13
|
+
src_location: SrcLocation = SrcLocation.local,
|
|
14
|
+
src_format: SrcFormat = SrcFormat.mmcif,
|
|
15
|
+
batch_size: int = 1,
|
|
16
|
+
num_workers: int = 0,
|
|
17
|
+
num_nodes: int = 1,
|
|
18
|
+
accelerator: Accelerator = Accelerator.auto,
|
|
19
|
+
devices: Devices = 'auto',
|
|
20
|
+
out_path: OptionalPath = None
|
|
21
|
+
):
|
|
22
|
+
|
|
23
|
+
inference_set = EsmProtFromCsv(
|
|
24
|
+
csv_file=csv_file,
|
|
25
|
+
src_location=src_location,
|
|
26
|
+
src_format=src_format
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
inference_dataloader = DataLoader(
|
|
30
|
+
dataset=inference_set,
|
|
31
|
+
batch_size=batch_size,
|
|
32
|
+
num_workers=num_workers,
|
|
33
|
+
collate_fn=lambda _: _
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
module = EsmModule()
|
|
37
|
+
inference_writer = TensorBatchWriter(out_path) if out_path is not None else None
|
|
38
|
+
trainer = Trainer(
|
|
39
|
+
callbacks=[inference_writer] if inference_writer is not None else None,
|
|
40
|
+
num_nodes=num_nodes,
|
|
41
|
+
accelerator=accelerator,
|
|
42
|
+
devices=devices
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
prediction = trainer.predict(
|
|
46
|
+
module,
|
|
47
|
+
inference_dataloader
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
return prediction
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from lightning import LightningModule
|
|
2
|
+
|
|
3
|
+
from rcsb_embedding_model.utils.model import get_aggregator_model
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ChainModule(LightningModule):
|
|
7
|
+
|
|
8
|
+
def __init__(
|
|
9
|
+
self
|
|
10
|
+
):
|
|
11
|
+
super().__init__()
|
|
12
|
+
self.model = get_aggregator_model(device=self.device)
|
|
13
|
+
|
|
14
|
+
def predict_step(self, batch, batch_idx):
|
|
15
|
+
(x, x_mask), dom_id = batch
|
|
16
|
+
return self.model(x, x_mask), dom_id
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from esm.sdk.api import SamplingConfig
|
|
2
|
+
from lightning import LightningModule
|
|
3
|
+
|
|
4
|
+
from rcsb_embedding_model.utils.model import get_residue_model
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class EsmModule(LightningModule):
|
|
8
|
+
|
|
9
|
+
def __init__(
|
|
10
|
+
self
|
|
11
|
+
):
|
|
12
|
+
super().__init__()
|
|
13
|
+
self.esm3 = get_residue_model(self.device)
|
|
14
|
+
|
|
15
|
+
def predict_step(self, prot_batch, batch_idx):
|
|
16
|
+
prot_embeddings = []
|
|
17
|
+
prot_names = []
|
|
18
|
+
for esm_prot, name in prot_batch:
|
|
19
|
+
embeddings = self.esm3.forward_and_sample(
|
|
20
|
+
self.esm3.encode(esm_prot), SamplingConfig(return_per_residue_embeddings=True)
|
|
21
|
+
).per_residue_embedding
|
|
22
|
+
prot_embeddings.append(embeddings)
|
|
23
|
+
prot_names.append(name)
|
|
24
|
+
return tuple(prot_embeddings), tuple(prot_names)
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from biotite.structure import get_residues, chain_iter, filter_amino_acids
|
|
3
|
+
from esm.sdk.api import ESMProtein, SamplingConfig
|
|
4
|
+
from esm.utils.structure.protein_chain import ProteinChain
|
|
5
|
+
from huggingface_hub import hf_hub_download
|
|
6
|
+
|
|
7
|
+
from rcsb_embedding_model.types.api_types import StreamSrc, SrcFormat
|
|
8
|
+
from rcsb_embedding_model.utils.model import get_aggregator_model, get_residue_model
|
|
9
|
+
from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RcsbStructureEmbedding:
|
|
13
|
+
|
|
14
|
+
MIN_RES = 10
|
|
15
|
+
|
|
16
|
+
def __init__(self):
|
|
17
|
+
self.__residue_embedding = None
|
|
18
|
+
self.__aggregator_embedding = None
|
|
19
|
+
|
|
20
|
+
def load_models(
|
|
21
|
+
self,
|
|
22
|
+
device: torch.device = None
|
|
23
|
+
):
|
|
24
|
+
self.load_residue_embedding(device)
|
|
25
|
+
self.load_aggregator_embedding(device)
|
|
26
|
+
|
|
27
|
+
def load_residue_embedding(
|
|
28
|
+
self,
|
|
29
|
+
device: torch.device = None
|
|
30
|
+
):
|
|
31
|
+
if not device:
|
|
32
|
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
33
|
+
self.__residue_embedding = _load_res_model(device)
|
|
34
|
+
|
|
35
|
+
def load_aggregator_embedding(
|
|
36
|
+
self,
|
|
37
|
+
device: torch.device = None
|
|
38
|
+
):
|
|
39
|
+
if not device:
|
|
40
|
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
41
|
+
self.__aggregator_embedding = _load_model(device)
|
|
42
|
+
|
|
43
|
+
def structure_embedding(
|
|
44
|
+
self,
|
|
45
|
+
structure_src: StreamSrc,
|
|
46
|
+
src_format: SrcFormat = SrcFormat.mmcif,
|
|
47
|
+
chain_id: str = None,
|
|
48
|
+
assembly_id: str = None
|
|
49
|
+
):
|
|
50
|
+
res_embedding = self.residue_embedding(structure_src, src_format, chain_id, assembly_id)
|
|
51
|
+
return self.aggregator_embedding(res_embedding)
|
|
52
|
+
|
|
53
|
+
def residue_embedding(
|
|
54
|
+
self,
|
|
55
|
+
structure_src: StreamSrc,
|
|
56
|
+
src_format: SrcFormat = SrcFormat.mmcif,
|
|
57
|
+
chain_id: str = None,
|
|
58
|
+
assembly_id: str = None
|
|
59
|
+
):
|
|
60
|
+
self.__check_residue_embedding()
|
|
61
|
+
structure = get_structure_from_src(structure_src, src_format, chain_id, assembly_id)
|
|
62
|
+
embedding_ch = []
|
|
63
|
+
for atom_ch in chain_iter(structure):
|
|
64
|
+
atom_res = atom_ch[filter_amino_acids(atom_ch)]
|
|
65
|
+
if len(atom_res) == 0 or len(get_residues(atom_res)[0]) < RcsbStructureEmbedding.MIN_RES:
|
|
66
|
+
continue
|
|
67
|
+
protein_chain = ProteinChain.from_atomarray(atom_ch)
|
|
68
|
+
protein = ESMProtein.from_protein_chain(protein_chain)
|
|
69
|
+
protein_tensor = self.__residue_embedding.encode(protein)
|
|
70
|
+
embedding_ch.append(self.__residue_embedding.forward_and_sample(
|
|
71
|
+
protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
|
|
72
|
+
).per_residue_embedding)
|
|
73
|
+
return torch.cat(
|
|
74
|
+
embedding_ch,
|
|
75
|
+
dim=0
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def sequence_embedding(
|
|
79
|
+
self,
|
|
80
|
+
sequence: str
|
|
81
|
+
):
|
|
82
|
+
self.__check_residue_embedding()
|
|
83
|
+
|
|
84
|
+
if sequence.startswith(">"):
|
|
85
|
+
sequence = "".join(line.strip() for line in sequence.splitlines() if not line.startswith(">"))
|
|
86
|
+
|
|
87
|
+
if len(sequence) < RcsbStructureEmbedding.MIN_RES:
|
|
88
|
+
raise ValueError(f"Sequence too short for embedding (min {RcsbStructureEmbedding.MIN_RES} residues)")
|
|
89
|
+
|
|
90
|
+
protein = ESMProtein(sequence=sequence)
|
|
91
|
+
protein_tensor = self.__residue_embedding.encode(protein)
|
|
92
|
+
|
|
93
|
+
result = self.__residue_embedding.forward_and_sample(
|
|
94
|
+
protein_tensor,
|
|
95
|
+
SamplingConfig(return_per_residue_embeddings=True)
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
return result.per_residue_embedding
|
|
99
|
+
|
|
100
|
+
def aggregator_embedding(
|
|
101
|
+
self,
|
|
102
|
+
residue_embedding: torch.Tensor
|
|
103
|
+
):
|
|
104
|
+
self.__check_aggregator_embedding()
|
|
105
|
+
return self.__aggregator_embedding(residue_embedding)
|
|
106
|
+
|
|
107
|
+
def __check_residue_embedding(self):
|
|
108
|
+
if self.__residue_embedding is None:
|
|
109
|
+
self.load_residue_embedding()
|
|
110
|
+
|
|
111
|
+
def __check_aggregator_embedding(self):
|
|
112
|
+
if self.__aggregator_embedding is None:
|
|
113
|
+
self.load_aggregator_embedding()
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _load_model(device=None):
|
|
117
|
+
if not device:
|
|
118
|
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
119
|
+
aggregator_model = get_aggregator_model(device=device)
|
|
120
|
+
aggregator_model.to(device)
|
|
121
|
+
aggregator_model.eval()
|
|
122
|
+
return aggregator_model
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _load_res_model(device=None):
|
|
126
|
+
if not device:
|
|
127
|
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
128
|
+
return get_residue_model(device)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from os import PathLike
|
|
3
|
+
from typing import NewType, Union, IO, Tuple, List, Optional
|
|
4
|
+
|
|
5
|
+
StreamSrc = NewType('StreamSrc', Union[PathLike, IO])
|
|
6
|
+
StreamTuple = NewType('StreamTuple', Tuple[StreamSrc, str, str])
|
|
7
|
+
|
|
8
|
+
Devices = NewType('Devices', Union[int, List[int], "auto"])
|
|
9
|
+
|
|
10
|
+
OptionalPath = NewType('OptionalPath', Optional[PathLike])
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SrcFormat(str, Enum):
|
|
14
|
+
pdb = "pdb"
|
|
15
|
+
mmcif = "mmcif"
|
|
16
|
+
bciff = "binarycif"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Accelerator(str, Enum):
|
|
20
|
+
cpu = "cpu"
|
|
21
|
+
gpu = "gpu"
|
|
22
|
+
tpu = "tpu"
|
|
23
|
+
hpu = "hpu"
|
|
24
|
+
auto = "auto"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class SrcLocation(str, Enum):
|
|
28
|
+
local = "local"
|
|
29
|
+
remote = "remote"
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from io import StringIO
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
import torch
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def collate_seq_embeddings(batch_list):
|
|
8
|
+
"""
|
|
9
|
+
Pads the tensors in a batch to the same size.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
batch_list (list of torch.Tensor): A list of samples, where each sample is a tensor of shape (sequence_length, embedding_dim).
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
tuple: A tuple containing:
|
|
16
|
+
- padded_batch (torch.Tensor): A tensor of shape (batch_size, max_seq_length, embedding_dim), where each sample is padded to the max sequence length.
|
|
17
|
+
- mask_batch (torch.Tensor): A tensor of shape (batch_size, max_seq_length) where padded positions are marked as False.
|
|
18
|
+
"""
|
|
19
|
+
if batch_list[0] is None:
|
|
20
|
+
return None
|
|
21
|
+
device = batch_list[0].device # Get the device of the input tensors
|
|
22
|
+
max_len = max(sample.size(0) for sample in batch_list) # Determine the maximum sequence length
|
|
23
|
+
dim = batch_list[0].size(1) # Determine the embedding dimension
|
|
24
|
+
batch_size = len(batch_list) # Determine the batch size
|
|
25
|
+
|
|
26
|
+
# Initialize tensors for the padded batch and masks on the same device as the input tensors
|
|
27
|
+
padded_batch = torch.zeros((batch_size, max_len, dim), dtype=batch_list[0].dtype, device=device)
|
|
28
|
+
mask_batch = torch.ones((batch_size, max_len), dtype=torch.bool, device=device)
|
|
29
|
+
|
|
30
|
+
for i, sample in enumerate(batch_list):
|
|
31
|
+
seq_len = sample.size(0) # Get the length of the current sequence
|
|
32
|
+
padded_batch[i, :seq_len] = sample # Pad the sequence with zeros
|
|
33
|
+
mask_batch[i, :seq_len] = False # Set mask positions for the actual data to False
|
|
34
|
+
|
|
35
|
+
return padded_batch, mask_batch
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def stringio_from_url(url):
|
|
39
|
+
try:
|
|
40
|
+
response = requests.get(url)
|
|
41
|
+
response.raise_for_status()
|
|
42
|
+
return StringIO(response.text)
|
|
43
|
+
except requests.exceptions.RequestException as e:
|
|
44
|
+
print(f"Error fetching URL: {e}")
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from esm.models.esm3 import ESM3
|
|
3
|
+
from esm.utils.constants.models import ESM3_OPEN_SMALL
|
|
4
|
+
from huggingface_hub import hf_hub_download
|
|
5
|
+
|
|
6
|
+
from rcsb_embedding_model.model.residue_embedding_aggregator import ResidueEmbeddingAggregator
|
|
7
|
+
|
|
8
|
+
REPO_ID = "rcsb/rcsb-embedding-model"
|
|
9
|
+
FILE_NAME = "rcsb-embedding-model.pt"
|
|
10
|
+
REVISION = "410606e40b1bb7968ce318c41009355c3ac32503"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_aggregator_model(device=None):
|
|
14
|
+
model_path = hf_hub_download(
|
|
15
|
+
repo_id=REPO_ID,
|
|
16
|
+
filename=FILE_NAME,
|
|
17
|
+
revision=REVISION
|
|
18
|
+
)
|
|
19
|
+
weights = torch.load(model_path, weights_only=True, map_location=device)
|
|
20
|
+
aggregator_model = ResidueEmbeddingAggregator()
|
|
21
|
+
aggregator_model.load_state_dict(weights)
|
|
22
|
+
return aggregator_model
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_residue_model(device=None):
|
|
26
|
+
return ESM3.from_pretrained(
|
|
27
|
+
ESM3_OPEN_SMALL,
|
|
28
|
+
device
|
|
29
|
+
)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
|
|
2
|
+
from biotite.structure.io.pdb import PDBFile, get_structure as get_pdb_structure, get_assembly as get_pdb_assembly
|
|
3
|
+
from biotite.structure.io.pdbx import CIFFile, get_structure, get_assembly, BinaryCIFFile
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def get_structure_from_src(
|
|
7
|
+
structure_src,
|
|
8
|
+
src_format="mmcif",
|
|
9
|
+
chain_id=None,
|
|
10
|
+
assembly_id=None
|
|
11
|
+
):
|
|
12
|
+
if src_format == "pdb":
|
|
13
|
+
pdb_file = PDBFile.read(structure_src)
|
|
14
|
+
structure = __get_pdb_structure(pdb_file, assembly_id)
|
|
15
|
+
elif src_format == "mmcif":
|
|
16
|
+
cif_file = CIFFile.read(structure_src)
|
|
17
|
+
structure = __get_structure(cif_file, assembly_id)
|
|
18
|
+
elif src_format == "binarycif":
|
|
19
|
+
cif_file = BinaryCIFFile.read(structure_src)
|
|
20
|
+
structure = __get_structure(cif_file, assembly_id)
|
|
21
|
+
else:
|
|
22
|
+
raise RuntimeError(f"Unknown file format {src_format}")
|
|
23
|
+
|
|
24
|
+
if chain_id is not None:
|
|
25
|
+
structure = structure[structure.chain_id == chain_id]
|
|
26
|
+
|
|
27
|
+
return structure
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def __get_pdb_structure(pdb_file, assembly_id=None):
|
|
31
|
+
return get_pdb_structure(
|
|
32
|
+
pdb_file,
|
|
33
|
+
model=1
|
|
34
|
+
) if assembly_id is None else get_pdb_assembly(
|
|
35
|
+
pdb_file,
|
|
36
|
+
assembly_id=assembly_id,
|
|
37
|
+
model=1
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def __get_structure(cif_file, assembly_id=None):
|
|
42
|
+
return get_structure(
|
|
43
|
+
cif_file,
|
|
44
|
+
model=1,
|
|
45
|
+
use_author_fields=False
|
|
46
|
+
) if assembly_id is None else get_assembly(
|
|
47
|
+
cif_file,
|
|
48
|
+
assembly_id=assembly_id,
|
|
49
|
+
model=1,
|
|
50
|
+
use_author_fields=False
|
|
51
|
+
)
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
|
|
2
|
+
from abc import abstractmethod
|
|
3
|
+
from collections import deque
|
|
4
|
+
from abc import ABC
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
import pandas as pd
|
|
8
|
+
|
|
9
|
+
from lightning.pytorch.callbacks import BasePredictionWriter
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CoreBatchWriter(BasePredictionWriter, ABC):
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
output_path,
|
|
16
|
+
postfix,
|
|
17
|
+
write_interval="batch"
|
|
18
|
+
):
|
|
19
|
+
super().__init__(write_interval)
|
|
20
|
+
self.out_path = output_path
|
|
21
|
+
self.postfix = postfix
|
|
22
|
+
|
|
23
|
+
def write_on_batch_end(
|
|
24
|
+
self,
|
|
25
|
+
trainer,
|
|
26
|
+
pl_module,
|
|
27
|
+
prediction,
|
|
28
|
+
batch_indices,
|
|
29
|
+
batch,
|
|
30
|
+
batch_idx,
|
|
31
|
+
dataloader_idx
|
|
32
|
+
):
|
|
33
|
+
if prediction is None:
|
|
34
|
+
return
|
|
35
|
+
embeddings, dom_ids = prediction
|
|
36
|
+
deque(map(
|
|
37
|
+
self._write_embedding,
|
|
38
|
+
embeddings,
|
|
39
|
+
dom_ids
|
|
40
|
+
))
|
|
41
|
+
|
|
42
|
+
def file_name(self, dom_id):
|
|
43
|
+
return f'{self.out_path}/{dom_id}.{self.postfix}'
|
|
44
|
+
|
|
45
|
+
@abstractmethod
|
|
46
|
+
def _write_embedding(self, embedding, dom_id):
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class CsvBatchWriter(CoreBatchWriter, ABC):
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
output_path,
|
|
54
|
+
postfix="csv",
|
|
55
|
+
write_interval="batch"
|
|
56
|
+
):
|
|
57
|
+
super().__init__(output_path, postfix, write_interval)
|
|
58
|
+
|
|
59
|
+
def _write_embedding(self, embedding, dom_id):
|
|
60
|
+
pd.DataFrame(embedding.to('cpu').numpy()).to_csv(
|
|
61
|
+
self.file_name(dom_id),
|
|
62
|
+
index=False,
|
|
63
|
+
header=False
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class TensorBatchWriter(CoreBatchWriter, ABC):
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
output_path,
|
|
71
|
+
postfix="pt",
|
|
72
|
+
write_interval="batch",
|
|
73
|
+
device="cpu"
|
|
74
|
+
):
|
|
75
|
+
super().__init__(output_path, postfix, write_interval)
|
|
76
|
+
self.device = device
|
|
77
|
+
|
|
78
|
+
def _write_embedding(self, embedding, dom_id):
|
|
79
|
+
torch.save(
|
|
80
|
+
embedding.to(self.device),
|
|
81
|
+
self.file_name(dom_id)
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class DataFrameStorage(CoreBatchWriter, ABC):
|
|
86
|
+
def __init__(
|
|
87
|
+
self,
|
|
88
|
+
output_path,
|
|
89
|
+
df_id,
|
|
90
|
+
postfix="pkl",
|
|
91
|
+
write_interval="batch"
|
|
92
|
+
):
|
|
93
|
+
super().__init__(output_path, postfix, write_interval)
|
|
94
|
+
self.df_id = df_id
|
|
95
|
+
self.embedding = pd.DataFrame(
|
|
96
|
+
data={},
|
|
97
|
+
columns=['id', 'embedding'],
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
def _write_embedding(self, embedding, dom_id):
|
|
101
|
+
self.embedding = pd.concat([
|
|
102
|
+
self.embedding,
|
|
103
|
+
pd.DataFrame(
|
|
104
|
+
data={'id': dom_id, 'embedding': [embedding.to('cpu').numpy()]},
|
|
105
|
+
columns=['id', 'embedding'],
|
|
106
|
+
)
|
|
107
|
+
], ignore_index=True)
|
|
108
|
+
|
|
109
|
+
def on_predict_end(self, trainer, pl_module):
|
|
110
|
+
self.embedding.to_pickle(
|
|
111
|
+
f"{self.out_path}/{self.df_id}.pkl.gz",
|
|
112
|
+
compression='gzip'
|
|
113
|
+
)
|
|
@@ -2,6 +2,7 @@ import os
|
|
|
2
2
|
import unittest
|
|
3
3
|
|
|
4
4
|
from rcsb_embedding_model import RcsbStructureEmbedding
|
|
5
|
+
from rcsb_embedding_model.types.api_types import SrcFormat
|
|
5
6
|
|
|
6
7
|
|
|
7
8
|
class TestEmbeddingModel(unittest.TestCase):
|
|
@@ -13,7 +14,7 @@ class TestEmbeddingModel(unittest.TestCase):
|
|
|
13
14
|
model = RcsbStructureEmbedding()
|
|
14
15
|
res_embedding = model.residue_embedding(
|
|
15
16
|
f"{self.__test_path}/resources/1acb.cif",
|
|
16
|
-
|
|
17
|
+
src_format=SrcFormat.mmcif,
|
|
17
18
|
chain_id='A'
|
|
18
19
|
)
|
|
19
20
|
self.assertEqual(list(res_embedding.shape), [243, 1536])
|
|
@@ -33,7 +34,7 @@ class TestEmbeddingModel(unittest.TestCase):
|
|
|
33
34
|
model = RcsbStructureEmbedding()
|
|
34
35
|
res_embedding = model.residue_embedding(
|
|
35
36
|
f"{self.__test_path}/resources/1acb.cif",
|
|
36
|
-
|
|
37
|
+
src_format=SrcFormat.mmcif,
|
|
37
38
|
chain_id='A'
|
|
38
39
|
)
|
|
39
40
|
structure_embedding = model.aggregator_embedding(
|
|
@@ -1,171 +0,0 @@
|
|
|
1
|
-
import torch
|
|
2
|
-
from biotite.structure import get_residues, chain_iter, filter_amino_acids
|
|
3
|
-
from biotite.structure.io.pdb import PDBFile, get_structure as get_pdb_structure, get_assembly as get_pdb_assembly
|
|
4
|
-
from biotite.structure.io.pdbx import CIFFile, get_structure, get_assembly, BinaryCIFFile
|
|
5
|
-
from esm.models.esm3 import ESM3
|
|
6
|
-
from esm.sdk.api import ESMProtein, SamplingConfig
|
|
7
|
-
from esm.utils.constants.models import ESM3_OPEN_SMALL
|
|
8
|
-
from esm.utils.structure.protein_chain import ProteinChain
|
|
9
|
-
from huggingface_hub import hf_hub_download
|
|
10
|
-
|
|
11
|
-
from rcsb_embedding_model.model.residue_embedding_aggregator import ResidueEmbeddingAggregator
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class RcsbStructureEmbedding:
|
|
15
|
-
|
|
16
|
-
MIN_RES = 10
|
|
17
|
-
REPO_ID = "rcsb/rcsb-embedding-model"
|
|
18
|
-
FILE_NAME = "rcsb-embedding-model.pt"
|
|
19
|
-
VERSION = "410606e40b1bb7968ce318c41009355c3ac32503"
|
|
20
|
-
|
|
21
|
-
def __init__(self):
|
|
22
|
-
self.__residue_embedding = None
|
|
23
|
-
self.__aggregator_embedding = None
|
|
24
|
-
|
|
25
|
-
def load_models(self, device=None):
|
|
26
|
-
self.load_residue_embedding(device)
|
|
27
|
-
self.load_aggregator_embedding(device)
|
|
28
|
-
|
|
29
|
-
def load_residue_embedding(self, device=None):
|
|
30
|
-
if not device:
|
|
31
|
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
32
|
-
self.__residue_embedding = _load_res_model(device)
|
|
33
|
-
|
|
34
|
-
def load_aggregator_embedding(self, device=None):
|
|
35
|
-
if not device:
|
|
36
|
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
37
|
-
self.__aggregator_embedding = _load_model(
|
|
38
|
-
_download_model(
|
|
39
|
-
RcsbStructureEmbedding.REPO_ID,
|
|
40
|
-
RcsbStructureEmbedding.FILE_NAME,
|
|
41
|
-
RcsbStructureEmbedding.VERSION
|
|
42
|
-
),
|
|
43
|
-
device
|
|
44
|
-
)
|
|
45
|
-
|
|
46
|
-
def structure_embedding(self, structure_src, format="pdb", chain_id=None, assembly_id=None):
|
|
47
|
-
res_embedding = self.residue_embedding(structure_src, format, chain_id, assembly_id)
|
|
48
|
-
return self.aggregator_embedding(res_embedding)
|
|
49
|
-
|
|
50
|
-
def residue_embedding(self, structure_src, format="pdb", chain_id=None, assembly_id=None):
|
|
51
|
-
self.__check_residue_embedding()
|
|
52
|
-
structure = _get_structure_from_src(structure_src, format, chain_id, assembly_id)
|
|
53
|
-
embedding_ch = []
|
|
54
|
-
for atom_ch in chain_iter(structure):
|
|
55
|
-
atom_res = atom_ch[filter_amino_acids(atom_ch)]
|
|
56
|
-
if len(atom_res) == 0 or len(get_residues(atom_res)[0]) < RcsbStructureEmbedding.MIN_RES:
|
|
57
|
-
continue
|
|
58
|
-
protein_chain = ProteinChain.from_atomarray(atom_ch)
|
|
59
|
-
protein = ESMProtein.from_protein_chain(protein_chain)
|
|
60
|
-
protein_tensor = self.__residue_embedding.encode(protein)
|
|
61
|
-
embedding_ch.append(self.__residue_embedding.forward_and_sample(
|
|
62
|
-
protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
|
|
63
|
-
).per_residue_embedding)
|
|
64
|
-
return torch.cat(
|
|
65
|
-
embedding_ch,
|
|
66
|
-
dim=0
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
def sequence_embedding(self, sequence):
|
|
70
|
-
self.__check_residue_embedding()
|
|
71
|
-
|
|
72
|
-
if sequence.startswith(">"):
|
|
73
|
-
sequence = "".join(line.strip() for line in sequence.splitlines() if not line.startswith(">"))
|
|
74
|
-
|
|
75
|
-
if len(sequence) < RcsbStructureEmbedding.MIN_RES:
|
|
76
|
-
raise ValueError(f"Sequence too short for embedding (min {RcsbStructureEmbedding.MIN_RES} residues)")
|
|
77
|
-
|
|
78
|
-
protein = ESMProtein(sequence=sequence)
|
|
79
|
-
protein_tensor = self.__residue_embedding.encode(protein)
|
|
80
|
-
|
|
81
|
-
result = self.__residue_embedding.forward_and_sample(
|
|
82
|
-
protein_tensor,
|
|
83
|
-
SamplingConfig(return_per_residue_embeddings=True)
|
|
84
|
-
)
|
|
85
|
-
|
|
86
|
-
return result.per_residue_embedding
|
|
87
|
-
|
|
88
|
-
def aggregator_embedding(self, residue_embedding):
|
|
89
|
-
self.__check_aggregator_embedding()
|
|
90
|
-
return self.__aggregator_embedding(residue_embedding)
|
|
91
|
-
|
|
92
|
-
def __check_residue_embedding(self):
|
|
93
|
-
if self.__residue_embedding is None:
|
|
94
|
-
self.load_residue_embedding()
|
|
95
|
-
|
|
96
|
-
def __check_aggregator_embedding(self):
|
|
97
|
-
if self.__aggregator_embedding is None:
|
|
98
|
-
self.load_aggregator_embedding()
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def _get_structure_from_src(structure_src, format="pdb", chain_id=None, assembly_id=None):
|
|
102
|
-
if format == "pdb":
|
|
103
|
-
pdb_file = PDBFile.read(structure_src)
|
|
104
|
-
structure = _get_pdb_structure(pdb_file, assembly_id)
|
|
105
|
-
elif format == "mmcif":
|
|
106
|
-
cif_file = CIFFile.read(structure_src)
|
|
107
|
-
structure = _get_structure(cif_file, assembly_id)
|
|
108
|
-
elif format == "binarycif":
|
|
109
|
-
cif_file = BinaryCIFFile.read(structure_src)
|
|
110
|
-
structure = _get_structure(cif_file, assembly_id)
|
|
111
|
-
else:
|
|
112
|
-
raise RuntimeError(f"Unknown file format {format}")
|
|
113
|
-
|
|
114
|
-
if chain_id is not None:
|
|
115
|
-
structure = structure[structure.chain_id == chain_id]
|
|
116
|
-
|
|
117
|
-
return structure
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
def _get_pdb_structure(pdb_file, assembly_id = None):
|
|
121
|
-
return get_pdb_structure(
|
|
122
|
-
pdb_file,
|
|
123
|
-
model=1
|
|
124
|
-
) if assembly_id is None else get_pdb_assembly(
|
|
125
|
-
pdb_file,
|
|
126
|
-
assembly_id=assembly_id,
|
|
127
|
-
model=1
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
def _get_structure(cif_file, assembly_id = None):
|
|
132
|
-
return get_structure(
|
|
133
|
-
cif_file,
|
|
134
|
-
model=1,
|
|
135
|
-
use_author_fields=False
|
|
136
|
-
) if assembly_id is None else get_assembly(
|
|
137
|
-
cif_file,
|
|
138
|
-
assembly_id=assembly_id,
|
|
139
|
-
model=1,
|
|
140
|
-
use_author_fields=False
|
|
141
|
-
)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
def _download_model(
|
|
145
|
-
repo_id,
|
|
146
|
-
filename,
|
|
147
|
-
revision
|
|
148
|
-
):
|
|
149
|
-
return hf_hub_download(
|
|
150
|
-
repo_id=repo_id,
|
|
151
|
-
filename=filename,
|
|
152
|
-
revision=revision
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def _load_model(model_path, device=None):
|
|
157
|
-
if not device:
|
|
158
|
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
159
|
-
weights = torch.load(model_path, weights_only=True, map_location=device)
|
|
160
|
-
aggregator_model = ResidueEmbeddingAggregator()
|
|
161
|
-
aggregator_model.load_state_dict(weights)
|
|
162
|
-
aggregator_model.to(device)
|
|
163
|
-
aggregator_model.eval()
|
|
164
|
-
return aggregator_model
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
def _load_res_model(device=None):
|
|
168
|
-
return ESM3.from_pretrained(
|
|
169
|
-
ESM3_OPEN_SMALL,
|
|
170
|
-
device
|
|
171
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/assets/embedding-model-architecture.png
RENAMED
|
File without changes
|
|
File without changes
|
{rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/src/rcsb_embedding_model/__init__.py
RENAMED
|
File without changes
|
{rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.6}/src/rcsb_embedding_model/model/layers.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|