rcsb-embedding-model 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rcsb-embedding-model might be problematic. Click here for more details.
- rcsb_embedding_model/cli/args_utils.py +0 -2
- rcsb_embedding_model/cli/inference.py +164 -42
- rcsb_embedding_model/dataset/esm_prot_from_chain.py +102 -0
- rcsb_embedding_model/dataset/esm_prot_from_structure.py +63 -0
- rcsb_embedding_model/dataset/resdiue_assembly_embedding_from_structure.py +68 -0
- rcsb_embedding_model/dataset/residue_assembly_embedding_from_tensor_file.py +94 -0
- rcsb_embedding_model/dataset/residue_embedding_from_tensor_file.py +43 -0
- rcsb_embedding_model/inference/assembly_inferece.py +53 -0
- rcsb_embedding_model/inference/chain_inference.py +12 -8
- rcsb_embedding_model/inference/esm_inference.py +18 -8
- rcsb_embedding_model/inference/structure_inference.py +61 -0
- rcsb_embedding_model/modules/structure_module.py +27 -0
- rcsb_embedding_model/rcsb_structure_embedding.py +7 -8
- rcsb_embedding_model/types/api_types.py +27 -5
- rcsb_embedding_model/utils/data.py +30 -0
- rcsb_embedding_model/utils/structure_parser.py +43 -13
- rcsb_embedding_model/utils/structure_provider.py +27 -0
- rcsb_embedding_model-0.0.8.dist-info/METADATA +129 -0
- rcsb_embedding_model-0.0.8.dist-info/RECORD +29 -0
- rcsb_embedding_model/dataset/esm_prot_from_csv.py +0 -91
- rcsb_embedding_model/dataset/residue_embedding_from_csv.py +0 -32
- rcsb_embedding_model-0.0.6.dist-info/METADATA +0 -117
- rcsb_embedding_model-0.0.6.dist-info/RECORD +0 -22
- {rcsb_embedding_model-0.0.6.dist-info → rcsb_embedding_model-0.0.8.dist-info}/WHEEL +0 -0
- {rcsb_embedding_model-0.0.6.dist-info → rcsb_embedding_model-0.0.8.dist-info}/entry_points.txt +0 -0
- {rcsb_embedding_model-0.0.6.dist-info → rcsb_embedding_model-0.0.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -1,28 +1,48 @@
|
|
|
1
|
+
import sys
|
|
1
2
|
from typing import Annotated, List
|
|
2
3
|
|
|
3
4
|
import typer
|
|
4
5
|
|
|
5
6
|
from rcsb_embedding_model.cli.args_utils import arg_devices
|
|
6
|
-
from rcsb_embedding_model.types.api_types import
|
|
7
|
+
from rcsb_embedding_model.types.api_types import StructureFormat, Accelerator, SrcLocation, SrcProteinFrom, \
|
|
8
|
+
StructureLocation, SrcAssemblyFrom
|
|
7
9
|
|
|
8
|
-
app = typer.Typer(
|
|
10
|
+
app = typer.Typer(
|
|
11
|
+
add_completion=False
|
|
12
|
+
)
|
|
9
13
|
|
|
10
14
|
|
|
11
|
-
@app.command(
|
|
15
|
+
@app.command(
|
|
16
|
+
name="residue-embedding",
|
|
17
|
+
help="Calculate residue level embeddings of protein structures using ESM3. Predictions are stored as torch tensor files."
|
|
18
|
+
)
|
|
12
19
|
def residue_embedding(
|
|
13
20
|
src_file: Annotated[typer.FileText, typer.Option(
|
|
14
21
|
exists=True,
|
|
15
22
|
file_okay=True,
|
|
16
23
|
dir_okay=False,
|
|
17
24
|
resolve_path=True,
|
|
18
|
-
help='CSV file 3 columns: Structure File | Chain Id (asym_i for cif files) | Output
|
|
25
|
+
help='CSV file 4 (or 3) columns: Structure Name | Structure File Path | Chain Id (asym_i for cif files. This field is required if src-from=chain) | Output Embedding Name.'
|
|
19
26
|
)],
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
27
|
+
output_path: Annotated[typer.FileText, typer.Option(
|
|
28
|
+
exists=True,
|
|
29
|
+
file_okay=False,
|
|
30
|
+
dir_okay=True,
|
|
31
|
+
resolve_path=True,
|
|
32
|
+
help='Output path to store predictions. Embeddings are stored as torch tensor files.'
|
|
33
|
+
)],
|
|
34
|
+
src_from: Annotated[SrcProteinFrom, typer.Option(
|
|
35
|
+
help='Use specific chains or all chains in a structure.'
|
|
36
|
+
)] = SrcProteinFrom.chain,
|
|
37
|
+
structure_location: Annotated[StructureLocation, typer.Option(
|
|
38
|
+
help='Structure file location.'
|
|
39
|
+
)] = StructureLocation.local,
|
|
40
|
+
structure_format: Annotated[StructureFormat, typer.Option(
|
|
24
41
|
help='Structure file format.'
|
|
25
|
-
)] =
|
|
42
|
+
)] = StructureFormat.mmcif,
|
|
43
|
+
min_res_n: Annotated[int, typer.Option(
|
|
44
|
+
help='When using all chains in a structure, consider only chains with more than <min_res_n> residues.'
|
|
45
|
+
)] = 0,
|
|
26
46
|
batch_size: Annotated[int, typer.Option(
|
|
27
47
|
help='Number of samples processed together in one iteration.'
|
|
28
48
|
)] = 1,
|
|
@@ -37,20 +57,16 @@ def residue_embedding(
|
|
|
37
57
|
)] = Accelerator.auto,
|
|
38
58
|
devices: Annotated[List[str], typer.Option(
|
|
39
59
|
help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
|
|
40
|
-
)] = tuple(['auto'])
|
|
41
|
-
output_path: Annotated[typer.FileText, typer.Option(
|
|
42
|
-
exists=True,
|
|
43
|
-
file_okay=False,
|
|
44
|
-
dir_okay=True,
|
|
45
|
-
resolve_path=True,
|
|
46
|
-
help='Output path to store predictions.'
|
|
47
|
-
)] = None
|
|
60
|
+
)] = tuple(['auto'])
|
|
48
61
|
):
|
|
49
62
|
from rcsb_embedding_model.inference.esm_inference import predict
|
|
50
63
|
predict(
|
|
51
|
-
|
|
52
|
-
src_location=
|
|
53
|
-
|
|
64
|
+
src_stream=src_file,
|
|
65
|
+
src_location=SrcLocation.local,
|
|
66
|
+
src_from=src_from,
|
|
67
|
+
structure_location=structure_location,
|
|
68
|
+
structure_format=structure_format,
|
|
69
|
+
min_res_n=min_res_n,
|
|
54
70
|
batch_size=batch_size,
|
|
55
71
|
num_workers=num_workers,
|
|
56
72
|
num_nodes=num_nodes,
|
|
@@ -60,21 +76,40 @@ def residue_embedding(
|
|
|
60
76
|
)
|
|
61
77
|
|
|
62
78
|
|
|
63
|
-
@app.command(
|
|
79
|
+
@app.command(
|
|
80
|
+
name="structure-embedding",
|
|
81
|
+
help="Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas DataFrame file."
|
|
82
|
+
)
|
|
64
83
|
def structure_embedding(
|
|
65
84
|
src_file: Annotated[typer.FileText, typer.Option(
|
|
66
85
|
exists=True,
|
|
67
86
|
file_okay=True,
|
|
68
87
|
dir_okay=False,
|
|
69
88
|
resolve_path=True,
|
|
70
|
-
help='CSV file 3 columns: Structure File | Chain Id (asym_i for cif files) | Output
|
|
89
|
+
help='CSV file 4 (or 3) columns: Structure Name | Structure File Path | Chain Id (asym_i for cif files. This field is required if src-from=chain) | Output Embedding Name.'
|
|
90
|
+
)],
|
|
91
|
+
output_path: Annotated[typer.FileText, typer.Option(
|
|
92
|
+
exists=True,
|
|
93
|
+
file_okay=False,
|
|
94
|
+
dir_okay=True,
|
|
95
|
+
resolve_path=True,
|
|
96
|
+
help='Output path to store predictions. Embeddings are stored as a single DataFrame file (see out-df-name).'
|
|
97
|
+
)],
|
|
98
|
+
out_df_name: Annotated[str, typer.Option(
|
|
99
|
+
help='File name (without extension) for storing embeddings as a pandas DataFrame pickle (.pkl). The DataFrame contains 2 columns: Id | Embedding'
|
|
71
100
|
)],
|
|
72
|
-
|
|
101
|
+
src_from: Annotated[SrcProteinFrom, typer.Option(
|
|
102
|
+
help='Use specific chains or all chains in a structure.'
|
|
103
|
+
)] = SrcProteinFrom.chain,
|
|
104
|
+
structure_location: Annotated[StructureLocation, typer.Option(
|
|
73
105
|
help='Source input location.'
|
|
74
|
-
)] =
|
|
75
|
-
|
|
106
|
+
)] = StructureLocation.local,
|
|
107
|
+
structure_format: Annotated[StructureFormat, typer.Option(
|
|
76
108
|
help='Structure file format.'
|
|
77
|
-
)] =
|
|
109
|
+
)] = StructureFormat.mmcif,
|
|
110
|
+
min_res_n: Annotated[int, typer.Option(
|
|
111
|
+
help='When using all chains in a structure, consider only chains with more than <min_res_n> residues.'
|
|
112
|
+
)] = 0,
|
|
78
113
|
batch_size: Annotated[int, typer.Option(
|
|
79
114
|
help='Number of samples processed together in one iteration.'
|
|
80
115
|
)] = 1,
|
|
@@ -89,26 +124,44 @@ def structure_embedding(
|
|
|
89
124
|
)] = Accelerator.auto,
|
|
90
125
|
devices: Annotated[List[str], typer.Option(
|
|
91
126
|
help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
|
|
92
|
-
)] = tuple(['auto'])
|
|
93
|
-
output_path: Annotated[typer.FileText, typer.Option(
|
|
94
|
-
exists=True,
|
|
95
|
-
file_okay=False,
|
|
96
|
-
dir_okay=True,
|
|
97
|
-
resolve_path=True,
|
|
98
|
-
help='Output path to store predictions.'
|
|
99
|
-
)] = None
|
|
127
|
+
)] = tuple(['auto'])
|
|
100
128
|
):
|
|
101
|
-
|
|
129
|
+
from rcsb_embedding_model.inference.structure_inference import predict
|
|
130
|
+
predict(
|
|
131
|
+
src_stream=src_file,
|
|
132
|
+
src_location=SrcLocation.local,
|
|
133
|
+
src_from=src_from,
|
|
134
|
+
structure_location=structure_location,
|
|
135
|
+
structure_format=structure_format,
|
|
136
|
+
min_res_n=min_res_n,
|
|
137
|
+
batch_size=batch_size,
|
|
138
|
+
num_workers=num_workers,
|
|
139
|
+
num_nodes=num_nodes,
|
|
140
|
+
accelerator=accelerator,
|
|
141
|
+
devices=arg_devices(devices),
|
|
142
|
+
out_path=output_path,
|
|
143
|
+
out_df_name=out_df_name
|
|
144
|
+
)
|
|
102
145
|
|
|
103
146
|
|
|
104
|
-
@app.command(
|
|
147
|
+
@app.command(
|
|
148
|
+
name="chain-embedding",
|
|
149
|
+
help="Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files. Predictions are stored as csv files."
|
|
150
|
+
)
|
|
105
151
|
def chain_embedding(
|
|
106
152
|
src_file: Annotated[typer.FileText, typer.Option(
|
|
107
153
|
exists=True,
|
|
108
154
|
file_okay=True,
|
|
109
155
|
dir_okay=False,
|
|
110
156
|
resolve_path=True,
|
|
111
|
-
help='CSV file 2 columns: Residue
|
|
157
|
+
help='CSV file 2 columns: Residue embedding torch tensor file | Output embedding name.'
|
|
158
|
+
)],
|
|
159
|
+
output_path: Annotated[typer.FileText, typer.Option(
|
|
160
|
+
exists=True,
|
|
161
|
+
file_okay=False,
|
|
162
|
+
dir_okay=True,
|
|
163
|
+
resolve_path=True,
|
|
164
|
+
help='Output path to store predictions. Embeddings are stored as csv files.'
|
|
112
165
|
)],
|
|
113
166
|
batch_size: Annotated[int, typer.Option(
|
|
114
167
|
help='Number of samples processed together in one iteration.'
|
|
@@ -124,18 +177,87 @@ def chain_embedding(
|
|
|
124
177
|
)] = Accelerator.auto,
|
|
125
178
|
devices: Annotated[List[str], typer.Option(
|
|
126
179
|
help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
|
|
127
|
-
)] = tuple(['auto'])
|
|
180
|
+
)] = tuple(['auto'])
|
|
181
|
+
):
|
|
182
|
+
from rcsb_embedding_model.inference.chain_inference import predict
|
|
183
|
+
predict(
|
|
184
|
+
src_stream=src_file,
|
|
185
|
+
src_location=SrcLocation.local,
|
|
186
|
+
batch_size=batch_size,
|
|
187
|
+
num_workers=num_workers,
|
|
188
|
+
num_nodes=num_nodes,
|
|
189
|
+
accelerator=accelerator,
|
|
190
|
+
devices=arg_devices(devices),
|
|
191
|
+
out_path=output_path
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
@app.command(
|
|
195
|
+
name="assembly-embedding",
|
|
196
|
+
help="Calculate assembly embeddings from residue level embeddings stored as torch tensor files. Predictions are stored as csv files."
|
|
197
|
+
)
|
|
198
|
+
def assembly_embedding(
|
|
199
|
+
src_file: Annotated[typer.FileText, typer.Option(
|
|
200
|
+
exists=True,
|
|
201
|
+
file_okay=True,
|
|
202
|
+
dir_okay=False,
|
|
203
|
+
resolve_path=True,
|
|
204
|
+
help='CSV file 4 columns: Structure Name | Structure File Path | Assembly Id | Output embedding name.'
|
|
205
|
+
)],
|
|
206
|
+
res_embedding_location: Annotated[typer.FileText, typer.Option(
|
|
207
|
+
exists=True,
|
|
208
|
+
file_okay=False,
|
|
209
|
+
dir_okay=True,
|
|
210
|
+
resolve_path=True,
|
|
211
|
+
help='Path where residue level embeddings for single chains are located.'
|
|
212
|
+
)],
|
|
128
213
|
output_path: Annotated[typer.FileText, typer.Option(
|
|
129
214
|
exists=True,
|
|
130
215
|
file_okay=False,
|
|
131
216
|
dir_okay=True,
|
|
132
217
|
resolve_path=True,
|
|
133
|
-
help='Output path to store predictions.'
|
|
134
|
-
)]
|
|
218
|
+
help='Output path to store predictions. Embeddings are stored as csv files.'
|
|
219
|
+
)],
|
|
220
|
+
src_from: Annotated[SrcAssemblyFrom, typer.Option(
|
|
221
|
+
help='Use specific assembly or all assemblies in a structure.'
|
|
222
|
+
)] = SrcAssemblyFrom.assembly,
|
|
223
|
+
structure_location: Annotated[StructureLocation, typer.Option(
|
|
224
|
+
help='Source input location.'
|
|
225
|
+
)] = StructureLocation.local,
|
|
226
|
+
structure_format: Annotated[StructureFormat, typer.Option(
|
|
227
|
+
help='Structure file format.'
|
|
228
|
+
)] = StructureFormat.mmcif,
|
|
229
|
+
min_res_n: Annotated[int, typer.Option(
|
|
230
|
+
help='Consider only assembly chains with more than <min_res_n> residues.'
|
|
231
|
+
)] = 0,
|
|
232
|
+
max_res_n: Annotated[int, typer.Option(
|
|
233
|
+
help='Stop adding assembly chains when number of residues is greater than <max_res_n> residues.'
|
|
234
|
+
)] = sys.maxsize,
|
|
235
|
+
batch_size: Annotated[int, typer.Option(
|
|
236
|
+
help='Number of samples processed together in one iteration.'
|
|
237
|
+
)] = 1,
|
|
238
|
+
num_workers: Annotated[int, typer.Option(
|
|
239
|
+
help='Number of subprocesses to use for data loading.'
|
|
240
|
+
)] = 0,
|
|
241
|
+
num_nodes: Annotated[int, typer.Option(
|
|
242
|
+
help='Number of nodes to use for inference.'
|
|
243
|
+
)] = 1,
|
|
244
|
+
accelerator: Annotated[Accelerator, typer.Option(
|
|
245
|
+
help='Device used for inference.'
|
|
246
|
+
)] = Accelerator.auto,
|
|
247
|
+
devices: Annotated[List[str], typer.Option(
|
|
248
|
+
help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
|
|
249
|
+
)] = tuple(['auto'])
|
|
135
250
|
):
|
|
136
|
-
from rcsb_embedding_model.inference.
|
|
251
|
+
from rcsb_embedding_model.inference.assembly_inferece import predict
|
|
137
252
|
predict(
|
|
138
|
-
|
|
253
|
+
src_stream=src_file,
|
|
254
|
+
res_embedding_location=res_embedding_location,
|
|
255
|
+
src_location=SrcLocation.local,
|
|
256
|
+
src_from=src_from,
|
|
257
|
+
structure_location=structure_location,
|
|
258
|
+
structure_format=structure_format,
|
|
259
|
+
min_res_n=min_res_n,
|
|
260
|
+
max_res_n=max_res_n,
|
|
139
261
|
batch_size=batch_size,
|
|
140
262
|
num_workers=num_workers,
|
|
141
263
|
num_nodes=num_nodes,
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
from biotite.structure import chain_iter
|
|
5
|
+
from esm.models.esm3 import ESM3
|
|
6
|
+
from esm.sdk.api import ESMProtein, SamplingConfig
|
|
7
|
+
from esm.utils.constants.models import ESM3_OPEN_SMALL
|
|
8
|
+
from esm.utils.structure.protein_chain import ProteinChain
|
|
9
|
+
from torch.utils.data import Dataset, DataLoader
|
|
10
|
+
import pandas as pd
|
|
11
|
+
|
|
12
|
+
from rcsb_embedding_model.types.api_types import StructureFormat, StructureLocation, SrcLocation
|
|
13
|
+
from rcsb_embedding_model.utils.data import stringio_from_url
|
|
14
|
+
from rcsb_embedding_model.utils.structure_parser import rename_atom_ch
|
|
15
|
+
from rcsb_embedding_model.utils.structure_provider import StructureProvider
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class EsmProtFromChain(Dataset):
|
|
19
|
+
|
|
20
|
+
STREAM_NAME_ATTR = 'stream_name'
|
|
21
|
+
STREAM_ATTR = 'stream'
|
|
22
|
+
CH_ATTR = 'chain_id'
|
|
23
|
+
ITEM_NAME_ATTR = 'item_name'
|
|
24
|
+
|
|
25
|
+
COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, CH_ATTR, ITEM_NAME_ATTR]
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
src_stream,
|
|
30
|
+
src_location=SrcLocation.local,
|
|
31
|
+
structure_location=StructureLocation.local,
|
|
32
|
+
structure_format=StructureFormat.mmcif,
|
|
33
|
+
structure_provider=StructureProvider()
|
|
34
|
+
):
|
|
35
|
+
super().__init__()
|
|
36
|
+
self.__structure_provider = structure_provider
|
|
37
|
+
self.src_location = src_location
|
|
38
|
+
self.structure_location = structure_location
|
|
39
|
+
self.structure_format = structure_format
|
|
40
|
+
self.data = pd.DataFrame()
|
|
41
|
+
self.__load_stream(src_stream)
|
|
42
|
+
|
|
43
|
+
def __load_stream(self, src_stream):
|
|
44
|
+
self.data = pd.DataFrame(
|
|
45
|
+
src_stream,
|
|
46
|
+
dtype=str,
|
|
47
|
+
columns=EsmProtFromChain.COLUMNS
|
|
48
|
+
) if self.src_location == SrcLocation.stream else pd.read_csv(
|
|
49
|
+
src_stream,
|
|
50
|
+
header=None,
|
|
51
|
+
index_col=None,
|
|
52
|
+
dtype=str,
|
|
53
|
+
names=EsmProtFromChain.COLUMNS
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def __len__(self):
|
|
57
|
+
return len(self.data)
|
|
58
|
+
|
|
59
|
+
def __getitem__(self, idx):
|
|
60
|
+
src_name = self.data.loc[idx, EsmProtFromChain.STREAM_NAME_ATTR]
|
|
61
|
+
src_structure = self.data.loc[idx, EsmProtFromChain.STREAM_ATTR]
|
|
62
|
+
chain_id = self.data.loc[idx, EsmProtFromChain.CH_ATTR]
|
|
63
|
+
item_name = self.data.loc[idx, EsmProtFromChain.ITEM_NAME_ATTR]
|
|
64
|
+
structure = self.__structure_provider.get_structure(
|
|
65
|
+
src_name=src_name,
|
|
66
|
+
src_structure=stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure,
|
|
67
|
+
structure_format=self.structure_format,
|
|
68
|
+
chain_id=chain_id
|
|
69
|
+
)
|
|
70
|
+
for atom_ch in chain_iter(structure):
|
|
71
|
+
protein_chain = ProteinChain.from_atomarray(rename_atom_ch(atom_ch))
|
|
72
|
+
return ESMProtein.from_protein_chain(protein_chain), item_name
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
if __name__ == '__main__':
|
|
76
|
+
|
|
77
|
+
parser = argparse.ArgumentParser()
|
|
78
|
+
parser.add_argument('--file_list', type=argparse.FileType('r'), required=True)
|
|
79
|
+
args = parser.parse_args()
|
|
80
|
+
|
|
81
|
+
dataset = EsmProtFromChain(
|
|
82
|
+
args.file_list
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
esm3 = ESM3.from_pretrained(
|
|
86
|
+
ESM3_OPEN_SMALL,
|
|
87
|
+
torch.device("cpu")
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
dataloader = DataLoader(
|
|
91
|
+
dataset,
|
|
92
|
+
batch_size=2,
|
|
93
|
+
collate_fn=lambda _: _
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
for _batch in dataloader:
|
|
97
|
+
for esm_prot, prot_name in _batch:
|
|
98
|
+
protein_tensor = esm3.encode(esm_prot)
|
|
99
|
+
embeddings = esm3.forward_and_sample(
|
|
100
|
+
protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
|
|
101
|
+
).per_residue_embedding
|
|
102
|
+
print(prot_name, embeddings.shape)
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
from rcsb_embedding_model.dataset.esm_prot_from_chain import EsmProtFromChain
|
|
5
|
+
from rcsb_embedding_model.types.api_types import StructureLocation, StructureFormat, SrcLocation
|
|
6
|
+
from rcsb_embedding_model.utils.data import stringio_from_url
|
|
7
|
+
from rcsb_embedding_model.utils.structure_parser import get_protein_chains
|
|
8
|
+
from rcsb_embedding_model.utils.structure_provider import StructureProvider
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class EsmProtFromStructure(EsmProtFromChain):
|
|
12
|
+
|
|
13
|
+
STREAM_NAME_ATTR = 'stream_name'
|
|
14
|
+
STREAM_ATTR = 'stream'
|
|
15
|
+
ITEM_NAME_ATTR = 'item_name'
|
|
16
|
+
|
|
17
|
+
COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, ITEM_NAME_ATTR]
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
src_stream,
|
|
22
|
+
src_location=SrcLocation.local,
|
|
23
|
+
structure_location=StructureLocation.local,
|
|
24
|
+
structure_format=StructureFormat.mmcif,
|
|
25
|
+
min_res_n=0,
|
|
26
|
+
structure_provider=StructureProvider()
|
|
27
|
+
):
|
|
28
|
+
self.min_res_n = min_res_n
|
|
29
|
+
self.src_location = src_location
|
|
30
|
+
self.structure_location = structure_location
|
|
31
|
+
self.structure_format = structure_format
|
|
32
|
+
self.__structure_provider = structure_provider
|
|
33
|
+
super().__init__(
|
|
34
|
+
src_stream=self.__get_chains(src_stream),
|
|
35
|
+
src_location=SrcLocation.stream,
|
|
36
|
+
structure_location=StructureLocation.local,
|
|
37
|
+
structure_format=structure_format
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def __get_chains(self, src_stream):
|
|
41
|
+
chains = []
|
|
42
|
+
for idx, row in (pd.DataFrame(
|
|
43
|
+
src_stream,
|
|
44
|
+
dtype=str,
|
|
45
|
+
columns=self.COLUMNS
|
|
46
|
+
) if self.src_location == SrcLocation.stream else pd.read_csv(
|
|
47
|
+
src_stream,
|
|
48
|
+
header=None,
|
|
49
|
+
index_col=None,
|
|
50
|
+
dtype=str,
|
|
51
|
+
names=EsmProtFromStructure.COLUMNS
|
|
52
|
+
)).iterrows():
|
|
53
|
+
src_name = row[EsmProtFromStructure.STREAM_NAME_ATTR]
|
|
54
|
+
src_structure = row[EsmProtFromStructure.STREAM_ATTR]
|
|
55
|
+
item_name = row[EsmProtFromStructure.ITEM_NAME_ATTR]
|
|
56
|
+
structure = self.__structure_provider.get_structure(
|
|
57
|
+
src_name=src_name,
|
|
58
|
+
src_structure=stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure,
|
|
59
|
+
structure_format=self.structure_format
|
|
60
|
+
)
|
|
61
|
+
for ch in get_protein_chains(structure, self.min_res_n):
|
|
62
|
+
chains.append((src_name, src_structure, ch, f"{item_name}.{ch}"))
|
|
63
|
+
return tuple(chains)
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
|
|
5
|
+
from rcsb_embedding_model.dataset.residue_assembly_embedding_from_tensor_file import ResidueAssemblyEmbeddingFromTensorFile
|
|
6
|
+
from rcsb_embedding_model.types.api_types import SrcLocation, StructureLocation, StructureFormat
|
|
7
|
+
from rcsb_embedding_model.utils.data import stringio_from_url
|
|
8
|
+
from rcsb_embedding_model.utils.structure_parser import get_assemblies
|
|
9
|
+
from rcsb_embedding_model.utils.structure_provider import StructureProvider
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ResidueAssemblyDatasetFromStructure(ResidueAssemblyEmbeddingFromTensorFile):
|
|
13
|
+
|
|
14
|
+
STREAM_NAME_ATTR = 'stream_name'
|
|
15
|
+
STREAM_ATTR = 'stream'
|
|
16
|
+
ITEM_NAME_ATTR = 'item_name'
|
|
17
|
+
|
|
18
|
+
COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, ITEM_NAME_ATTR]
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
src_stream,
|
|
23
|
+
res_embedding_location,
|
|
24
|
+
src_location=SrcLocation.local,
|
|
25
|
+
structure_location=StructureLocation.local,
|
|
26
|
+
structure_format=StructureFormat.mmcif,
|
|
27
|
+
min_res_n=0,
|
|
28
|
+
max_res_n=sys.maxsize,
|
|
29
|
+
structure_provider=StructureProvider()
|
|
30
|
+
):
|
|
31
|
+
self.src_location = src_location
|
|
32
|
+
self.structure_location = structure_location
|
|
33
|
+
self.structure_format = structure_format
|
|
34
|
+
self.min_res_n = min_res_n
|
|
35
|
+
self.max_res_n = max_res_n
|
|
36
|
+
self.__structure_provider = structure_provider
|
|
37
|
+
super().__init__(
|
|
38
|
+
src_stream=self.__get_assemblies(src_stream),
|
|
39
|
+
res_embedding_location=res_embedding_location,
|
|
40
|
+
src_location=src_location,
|
|
41
|
+
structure_location=structure_location,
|
|
42
|
+
structure_format=structure_format,
|
|
43
|
+
min_res_n=min_res_n,
|
|
44
|
+
max_res_n=max_res_n,
|
|
45
|
+
structure_provider=structure_provider
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
def __get_assemblies(self, src_stream):
|
|
49
|
+
assemblies = []
|
|
50
|
+
for idx, row in (pd.DataFrame(
|
|
51
|
+
src_stream,
|
|
52
|
+
dtype=str,
|
|
53
|
+
columns=self.COLUMNS
|
|
54
|
+
) if self.src_location == SrcLocation.stream else pd.read_csv(
|
|
55
|
+
src_stream,
|
|
56
|
+
header=None,
|
|
57
|
+
index_col=None,
|
|
58
|
+
dtype=str,
|
|
59
|
+
names=ResidueAssemblyDatasetFromStructure.COLUMNS
|
|
60
|
+
)).iterrows():
|
|
61
|
+
src_name = row[ResidueAssemblyDatasetFromStructure.STREAM_NAME_ATTR]
|
|
62
|
+
src_structure = row[ResidueAssemblyDatasetFromStructure.STREAM_ATTR]
|
|
63
|
+
src_structure = stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure
|
|
64
|
+
item_name = row[ResidueAssemblyDatasetFromStructure.ITEM_NAME_ATTR]
|
|
65
|
+
for assembly_id in get_assemblies(src_structure=src_structure, structure_format=self.structure_format):
|
|
66
|
+
assemblies.append((src_name, src_structure, str(assembly_id), f"{item_name}.{assembly_id}"))
|
|
67
|
+
|
|
68
|
+
return tuple(assemblies)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from torch.utils.data import Dataset, DataLoader
|
|
5
|
+
|
|
6
|
+
from rcsb_embedding_model.types.api_types import StructureLocation, StructureFormat, SrcLocation
|
|
7
|
+
from rcsb_embedding_model.utils.data import stringio_from_url, concatenate_tensors
|
|
8
|
+
from rcsb_embedding_model.utils.structure_parser import get_protein_chains
|
|
9
|
+
from rcsb_embedding_model.utils.structure_provider import StructureProvider
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ResidueAssemblyEmbeddingFromTensorFile(Dataset):
|
|
13
|
+
|
|
14
|
+
STREAM_NAME_ATTR = 'stream_name'
|
|
15
|
+
STREAM_ATTR = 'stream'
|
|
16
|
+
ASSEMBLY_ATTR = 'assembly_id'
|
|
17
|
+
ITEM_NAME_ATTR = 'item_name'
|
|
18
|
+
|
|
19
|
+
COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, ASSEMBLY_ATTR, ITEM_NAME_ATTR]
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
src_stream,
|
|
24
|
+
res_embedding_location,
|
|
25
|
+
src_location=SrcLocation.local,
|
|
26
|
+
structure_location=StructureLocation.local,
|
|
27
|
+
structure_format=StructureFormat.mmcif,
|
|
28
|
+
min_res_n=0,
|
|
29
|
+
max_res_n=sys.maxsize,
|
|
30
|
+
structure_provider=StructureProvider()
|
|
31
|
+
):
|
|
32
|
+
super().__init__()
|
|
33
|
+
self.res_embedding_location = res_embedding_location
|
|
34
|
+
self.src_location = src_location
|
|
35
|
+
self.structure_location = structure_location
|
|
36
|
+
self.structure_format = structure_format
|
|
37
|
+
self.min_res_n = min_res_n
|
|
38
|
+
self.max_res_n = max_res_n
|
|
39
|
+
self.data = pd.DataFrame()
|
|
40
|
+
self.__load_stream(src_stream)
|
|
41
|
+
self.__structure_provider = structure_provider
|
|
42
|
+
|
|
43
|
+
def __load_stream(self, src_stream):
|
|
44
|
+
self.data = pd.DataFrame(
|
|
45
|
+
src_stream,
|
|
46
|
+
dtype=str,
|
|
47
|
+
columns=ResidueAssemblyEmbeddingFromTensorFile.COLUMNS
|
|
48
|
+
) if self.src_location == SrcLocation.stream else pd.read_csv(
|
|
49
|
+
src_stream,
|
|
50
|
+
header=None,
|
|
51
|
+
index_col=None,
|
|
52
|
+
dtype=str,
|
|
53
|
+
names=ResidueAssemblyEmbeddingFromTensorFile.COLUMNS
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def __len__(self):
|
|
57
|
+
return len(self.data)
|
|
58
|
+
|
|
59
|
+
def __getitem__(self, idx):
|
|
60
|
+
src_name = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.STREAM_NAME_ATTR]
|
|
61
|
+
src_structure = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.STREAM_ATTR]
|
|
62
|
+
assembly_id = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.ASSEMBLY_ATTR]
|
|
63
|
+
item_name = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.ITEM_NAME_ATTR]
|
|
64
|
+
|
|
65
|
+
structure = self.__structure_provider.get_structure(
|
|
66
|
+
src_name=src_name,
|
|
67
|
+
src_structure=stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure,
|
|
68
|
+
structure_format=self.structure_format,
|
|
69
|
+
assembly_id=assembly_id
|
|
70
|
+
)
|
|
71
|
+
residue_embedding_files = [
|
|
72
|
+
f"{self.res_embedding_location}/{src_name}.{ch}.pt" for ch in get_protein_chains(structure, self.min_res_n)
|
|
73
|
+
]
|
|
74
|
+
return concatenate_tensors(residue_embedding_files, self.max_res_n), item_name
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
if __name__ == "__main__":
|
|
78
|
+
|
|
79
|
+
dataset = ResidueAssemblyEmbeddingFromTensorFile(
|
|
80
|
+
src_stream="/Users/joan/tmp/assembly-test.csv",
|
|
81
|
+
res_embedding_location="/Users/joan/tmp",
|
|
82
|
+
src_location=SrcLocation.local,
|
|
83
|
+
structure_location=StructureLocation.local,
|
|
84
|
+
structure_format=StructureFormat.mmcif
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
dataloader = DataLoader(
|
|
88
|
+
dataset,
|
|
89
|
+
batch_size=1,
|
|
90
|
+
collate_fn=lambda _: _
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
for _batch in dataloader:
|
|
94
|
+
print(_batch)
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import torch
|
|
3
|
+
from torch.utils.data import Dataset
|
|
4
|
+
|
|
5
|
+
from rcsb_embedding_model.types.api_types import StructureLocation, SrcLocation
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ResidueEmbeddingFromTensorFile(Dataset):
|
|
9
|
+
|
|
10
|
+
FILE_ATTR = 'file'
|
|
11
|
+
ITEM_NAME_ATTR = 'item_name'
|
|
12
|
+
|
|
13
|
+
COLUMNS = [FILE_ATTR, ITEM_NAME_ATTR]
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
src_stream,
|
|
18
|
+
src_location=SrcLocation.local
|
|
19
|
+
):
|
|
20
|
+
super().__init__()
|
|
21
|
+
self.src_location = src_location
|
|
22
|
+
self.data = pd.DataFrame()
|
|
23
|
+
self.__load_stream(src_stream)
|
|
24
|
+
|
|
25
|
+
def __load_stream(self, src_stream):
|
|
26
|
+
self.data = pd.DataFrame(
|
|
27
|
+
src_stream,
|
|
28
|
+
dtype=str,
|
|
29
|
+
columns=self.COLUMNS
|
|
30
|
+
) if self.src_location == SrcLocation.stream else pd.read_csv(
|
|
31
|
+
src_stream,
|
|
32
|
+
header=None,
|
|
33
|
+
index_col=None,
|
|
34
|
+
names=ResidueEmbeddingFromTensorFile.COLUMNS
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
def __len__(self):
|
|
38
|
+
return len(self.data)
|
|
39
|
+
|
|
40
|
+
def __getitem__(self, idx):
|
|
41
|
+
embedding_src = self.data.loc[idx, ResidueEmbeddingFromTensorFile.FILE_ATTR]
|
|
42
|
+
item_name = self.data.loc[idx, ResidueEmbeddingFromTensorFile.ITEM_NAME_ATTR]
|
|
43
|
+
return torch.load(embedding_src, map_location=torch.device('cpu')), item_name
|