rcsb-embedding-model 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rcsb-embedding-model might be problematic. Click here for more details.

Files changed (23) hide show
  1. rcsb_embedding_model/cli/inference.py +125 -29
  2. rcsb_embedding_model/dataset/esm_prot_from_chain.py +102 -0
  3. rcsb_embedding_model/dataset/esm_prot_from_structure.py +64 -0
  4. rcsb_embedding_model/dataset/resdiue_assembly_embedding_from_structure.py +68 -0
  5. rcsb_embedding_model/dataset/residue_assembly_embedding_from_tensor_file.py +94 -0
  6. rcsb_embedding_model/dataset/residue_embedding_from_tensor_file.py +43 -0
  7. rcsb_embedding_model/inference/assembly_inferece.py +53 -0
  8. rcsb_embedding_model/inference/chain_inference.py +12 -8
  9. rcsb_embedding_model/inference/esm_inference.py +18 -8
  10. rcsb_embedding_model/inference/structure_inference.py +22 -12
  11. rcsb_embedding_model/rcsb_structure_embedding.py +5 -5
  12. rcsb_embedding_model/types/api_types.py +27 -5
  13. rcsb_embedding_model/utils/data.py +30 -0
  14. rcsb_embedding_model/utils/structure_parser.py +39 -9
  15. rcsb_embedding_model/utils/structure_provider.py +27 -0
  16. {rcsb_embedding_model-0.0.7.dist-info → rcsb_embedding_model-0.0.9.dist-info}/METADATA +13 -10
  17. rcsb_embedding_model-0.0.9.dist-info/RECORD +29 -0
  18. rcsb_embedding_model/dataset/esm_prot_from_csv.py +0 -90
  19. rcsb_embedding_model/dataset/residue_embedding_from_csv.py +0 -32
  20. rcsb_embedding_model-0.0.7.dist-info/RECORD +0 -24
  21. {rcsb_embedding_model-0.0.7.dist-info → rcsb_embedding_model-0.0.9.dist-info}/WHEEL +0 -0
  22. {rcsb_embedding_model-0.0.7.dist-info → rcsb_embedding_model-0.0.9.dist-info}/entry_points.txt +0 -0
  23. {rcsb_embedding_model-0.0.7.dist-info → rcsb_embedding_model-0.0.9.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,9 +1,11 @@
1
+ import sys
1
2
  from typing import Annotated, List
2
3
 
3
4
  import typer
4
5
 
5
6
  from rcsb_embedding_model.cli.args_utils import arg_devices
6
- from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, SrcLocation
7
+ from rcsb_embedding_model.types.api_types import StructureFormat, Accelerator, SrcLocation, SrcProteinFrom, \
8
+ StructureLocation, SrcAssemblyFrom
7
9
 
8
10
  app = typer.Typer(
9
11
  add_completion=False
@@ -12,7 +14,7 @@ app = typer.Typer(
12
14
 
13
15
  @app.command(
14
16
  name="residue-embedding",
15
- help="Calculate residue level embeddings of protein structures using ESM3."
17
+ help="Calculate residue level embeddings of protein structures using ESM3. Predictions are stored as torch tensor files."
16
18
  )
17
19
  def residue_embedding(
18
20
  src_file: Annotated[typer.FileText, typer.Option(
@@ -20,21 +22,27 @@ def residue_embedding(
20
22
  file_okay=True,
21
23
  dir_okay=False,
22
24
  resolve_path=True,
23
- help='CSV file 3 columns: Structure File Path | Chain Id (asym_i for cif files) | Output file name.'
25
+ help='CSV file 4 (or 3) columns: Structure Name | Structure File Path | Chain Id (asym_i for cif files. This field is required if src-from=chain) | Output Embedding Name.'
24
26
  )],
25
27
  output_path: Annotated[typer.FileText, typer.Option(
26
28
  exists=True,
27
29
  file_okay=False,
28
30
  dir_okay=True,
29
31
  resolve_path=True,
30
- help='Output path to store predictions.'
32
+ help='Output path to store predictions. Embeddings are stored as torch tensor files.'
31
33
  )],
32
- src_location: Annotated[SrcLocation, typer.Option(
33
- help='Source input location.'
34
- )] = SrcLocation.local,
35
- src_format: Annotated[SrcFormat, typer.Option(
34
+ src_from: Annotated[SrcProteinFrom, typer.Option(
35
+ help='Use specific chains or all chains in a structure.'
36
+ )] = SrcProteinFrom.chain,
37
+ structure_location: Annotated[StructureLocation, typer.Option(
38
+ help='Structure file location.'
39
+ )] = StructureLocation.local,
40
+ structure_format: Annotated[StructureFormat, typer.Option(
36
41
  help='Structure file format.'
37
- )] = SrcFormat.mmcif,
42
+ )] = StructureFormat.mmcif,
43
+ min_res_n: Annotated[int, typer.Option(
44
+ help='When using all chains in a structure, consider only chains with more than <min_res_n> residues.'
45
+ )] = 0,
38
46
  batch_size: Annotated[int, typer.Option(
39
47
  help='Number of samples processed together in one iteration.'
40
48
  )] = 1,
@@ -53,9 +61,12 @@ def residue_embedding(
53
61
  ):
54
62
  from rcsb_embedding_model.inference.esm_inference import predict
55
63
  predict(
56
- csv_file=src_file,
57
- src_location=src_location,
58
- src_format=src_format,
64
+ src_stream=src_file,
65
+ src_location=SrcLocation.local,
66
+ src_from=src_from,
67
+ structure_location=structure_location,
68
+ structure_format=structure_format,
69
+ min_res_n=min_res_n,
59
70
  batch_size=batch_size,
60
71
  num_workers=num_workers,
61
72
  num_nodes=num_nodes,
@@ -67,7 +78,7 @@ def residue_embedding(
67
78
 
68
79
  @app.command(
69
80
  name="structure-embedding",
70
- help="Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas data-frame file."
81
+ help="Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas DataFrame file."
71
82
  )
72
83
  def structure_embedding(
73
84
  src_file: Annotated[typer.FileText, typer.Option(
@@ -75,24 +86,30 @@ def structure_embedding(
75
86
  file_okay=True,
76
87
  dir_okay=False,
77
88
  resolve_path=True,
78
- help='CSV file 3 columns: Structure File Path | Chain Id (asym_i for cif files) | Output file name.'
89
+ help='CSV file 4 (or 3) columns: Structure Name | Structure File Path | Chain Id (asym_i for cif files. This field is required if src-from=chain) | Output Embedding Name.'
79
90
  )],
80
91
  output_path: Annotated[typer.FileText, typer.Option(
81
92
  exists=True,
82
93
  file_okay=False,
83
94
  dir_okay=True,
84
95
  resolve_path=True,
85
- help='Output path to store predictions.'
96
+ help='Output path to store predictions. Embeddings are stored as a single DataFrame file (see out-df-name).'
86
97
  )],
87
- out_df_id: Annotated[str, typer.Option(
88
- help='File name to store predicted embeddings.'
98
+ out_df_name: Annotated[str, typer.Option(
99
+ help='File name (without extension) for storing embeddings as a pandas DataFrame pickle (.pkl). The DataFrame contains 2 columns: Id | Embedding'
89
100
  )],
90
- src_location: Annotated[SrcLocation, typer.Option(
101
+ src_from: Annotated[SrcProteinFrom, typer.Option(
102
+ help='Use specific chains or all chains in a structure.'
103
+ )] = SrcProteinFrom.chain,
104
+ structure_location: Annotated[StructureLocation, typer.Option(
91
105
  help='Source input location.'
92
- )] = SrcLocation.local,
93
- src_format: Annotated[SrcFormat, typer.Option(
106
+ )] = StructureLocation.local,
107
+ structure_format: Annotated[StructureFormat, typer.Option(
94
108
  help='Structure file format.'
95
- )] = SrcFormat.mmcif,
109
+ )] = StructureFormat.mmcif,
110
+ min_res_n: Annotated[int, typer.Option(
111
+ help='When using all chains in a structure, consider only chains with more than <min_res_n> residues.'
112
+ )] = 0,
96
113
  batch_size: Annotated[int, typer.Option(
97
114
  help='Number of samples processed together in one iteration.'
98
115
  )] = 1,
@@ -111,22 +128,25 @@ def structure_embedding(
111
128
  ):
112
129
  from rcsb_embedding_model.inference.structure_inference import predict
113
130
  predict(
114
- csv_file=src_file,
115
- src_location=src_location,
116
- src_format=src_format,
131
+ src_stream=src_file,
132
+ src_location=SrcLocation.local,
133
+ src_from=src_from,
134
+ structure_location=structure_location,
135
+ structure_format=structure_format,
136
+ min_res_n=min_res_n,
117
137
  batch_size=batch_size,
118
138
  num_workers=num_workers,
119
139
  num_nodes=num_nodes,
120
140
  accelerator=accelerator,
121
141
  devices=arg_devices(devices),
122
142
  out_path=output_path,
123
- out_df_id=out_df_id
143
+ out_df_name=out_df_name
124
144
  )
125
145
 
126
146
 
127
147
  @app.command(
128
148
  name="chain-embedding",
129
- help="Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files."
149
+ help="Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files. Predictions are stored as csv files."
130
150
  )
131
151
  def chain_embedding(
132
152
  src_file: Annotated[typer.FileText, typer.Option(
@@ -134,14 +154,14 @@ def chain_embedding(
134
154
  file_okay=True,
135
155
  dir_okay=False,
136
156
  resolve_path=True,
137
- help='CSV file 2 columns: Residue Embedding Tensor File | Output file name.'
157
+ help='CSV file 2 columns: Residue embedding torch tensor file | Output embedding name.'
138
158
  )],
139
159
  output_path: Annotated[typer.FileText, typer.Option(
140
160
  exists=True,
141
161
  file_okay=False,
142
162
  dir_okay=True,
143
163
  resolve_path=True,
144
- help='Output path to store predictions.'
164
+ help='Output path to store predictions. Embeddings are stored as csv files.'
145
165
  )],
146
166
  batch_size: Annotated[int, typer.Option(
147
167
  help='Number of samples processed together in one iteration.'
@@ -161,7 +181,83 @@ def chain_embedding(
161
181
  ):
162
182
  from rcsb_embedding_model.inference.chain_inference import predict
163
183
  predict(
164
- csv_file=src_file,
184
+ src_stream=src_file,
185
+ src_location=SrcLocation.local,
186
+ batch_size=batch_size,
187
+ num_workers=num_workers,
188
+ num_nodes=num_nodes,
189
+ accelerator=accelerator,
190
+ devices=arg_devices(devices),
191
+ out_path=output_path
192
+ )
193
+
194
+ @app.command(
195
+ name="assembly-embedding",
196
+ help="Calculate assembly embeddings from residue level embeddings stored as torch tensor files. Predictions are stored as csv files."
197
+ )
198
+ def assembly_embedding(
199
+ src_file: Annotated[typer.FileText, typer.Option(
200
+ exists=True,
201
+ file_okay=True,
202
+ dir_okay=False,
203
+ resolve_path=True,
204
+ help='CSV file 4 columns: Structure Name | Structure File Path | Assembly Id | Output embedding name.'
205
+ )],
206
+ res_embedding_location: Annotated[typer.FileText, typer.Option(
207
+ exists=True,
208
+ file_okay=False,
209
+ dir_okay=True,
210
+ resolve_path=True,
211
+ help='Path where residue level embeddings for single chains are located.'
212
+ )],
213
+ output_path: Annotated[typer.FileText, typer.Option(
214
+ exists=True,
215
+ file_okay=False,
216
+ dir_okay=True,
217
+ resolve_path=True,
218
+ help='Output path to store predictions. Embeddings are stored as csv files.'
219
+ )],
220
+ src_from: Annotated[SrcAssemblyFrom, typer.Option(
221
+ help='Use specific assembly or all assemblies in a structure.'
222
+ )] = SrcAssemblyFrom.assembly,
223
+ structure_location: Annotated[StructureLocation, typer.Option(
224
+ help='Source input location.'
225
+ )] = StructureLocation.local,
226
+ structure_format: Annotated[StructureFormat, typer.Option(
227
+ help='Structure file format.'
228
+ )] = StructureFormat.mmcif,
229
+ min_res_n: Annotated[int, typer.Option(
230
+ help='Consider only assembly chains with more than <min_res_n> residues.'
231
+ )] = 0,
232
+ max_res_n: Annotated[int, typer.Option(
233
+ help='Stop adding assembly chains when number of residues is greater than <max_res_n> residues.'
234
+ )] = sys.maxsize,
235
+ batch_size: Annotated[int, typer.Option(
236
+ help='Number of samples processed together in one iteration.'
237
+ )] = 1,
238
+ num_workers: Annotated[int, typer.Option(
239
+ help='Number of subprocesses to use for data loading.'
240
+ )] = 0,
241
+ num_nodes: Annotated[int, typer.Option(
242
+ help='Number of nodes to use for inference.'
243
+ )] = 1,
244
+ accelerator: Annotated[Accelerator, typer.Option(
245
+ help='Device used for inference.'
246
+ )] = Accelerator.auto,
247
+ devices: Annotated[List[str], typer.Option(
248
+ help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
249
+ )] = tuple(['auto'])
250
+ ):
251
+ from rcsb_embedding_model.inference.assembly_inferece import predict
252
+ predict(
253
+ src_stream=src_file,
254
+ res_embedding_location=res_embedding_location,
255
+ src_location=SrcLocation.local,
256
+ src_from=src_from,
257
+ structure_location=structure_location,
258
+ structure_format=structure_format,
259
+ min_res_n=min_res_n,
260
+ max_res_n=max_res_n,
165
261
  batch_size=batch_size,
166
262
  num_workers=num_workers,
167
263
  num_nodes=num_nodes,
@@ -0,0 +1,102 @@
1
+ import argparse
2
+
3
+ import torch
4
+ from biotite.structure import chain_iter
5
+ from esm.models.esm3 import ESM3
6
+ from esm.sdk.api import ESMProtein, SamplingConfig
7
+ from esm.utils.constants.models import ESM3_OPEN_SMALL
8
+ from esm.utils.structure.protein_chain import ProteinChain
9
+ from torch.utils.data import Dataset, DataLoader
10
+ import pandas as pd
11
+
12
+ from rcsb_embedding_model.types.api_types import StructureFormat, StructureLocation, SrcLocation
13
+ from rcsb_embedding_model.utils.data import stringio_from_url
14
+ from rcsb_embedding_model.utils.structure_parser import rename_atom_ch
15
+ from rcsb_embedding_model.utils.structure_provider import StructureProvider
16
+
17
+
18
+ class EsmProtFromChain(Dataset):
19
+
20
+ STREAM_NAME_ATTR = 'stream_name'
21
+ STREAM_ATTR = 'stream'
22
+ CH_ATTR = 'chain_id'
23
+ ITEM_NAME_ATTR = 'item_name'
24
+
25
+ COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, CH_ATTR, ITEM_NAME_ATTR]
26
+
27
+ def __init__(
28
+ self,
29
+ src_stream,
30
+ src_location=SrcLocation.local,
31
+ structure_location=StructureLocation.local,
32
+ structure_format=StructureFormat.mmcif,
33
+ structure_provider=StructureProvider()
34
+ ):
35
+ super().__init__()
36
+ self.__structure_provider = structure_provider
37
+ self.src_location = src_location
38
+ self.structure_location = structure_location
39
+ self.structure_format = structure_format
40
+ self.data = pd.DataFrame()
41
+ self.__load_stream(src_stream)
42
+
43
+ def __load_stream(self, src_stream):
44
+ self.data = pd.DataFrame(
45
+ src_stream,
46
+ dtype=str,
47
+ columns=EsmProtFromChain.COLUMNS
48
+ ) if self.src_location == SrcLocation.stream else pd.read_csv(
49
+ src_stream,
50
+ header=None,
51
+ index_col=None,
52
+ dtype=str,
53
+ names=EsmProtFromChain.COLUMNS
54
+ )
55
+
56
+ def __len__(self):
57
+ return len(self.data)
58
+
59
+ def __getitem__(self, idx):
60
+ src_name = self.data.loc[idx, EsmProtFromChain.STREAM_NAME_ATTR]
61
+ src_structure = self.data.loc[idx, EsmProtFromChain.STREAM_ATTR]
62
+ chain_id = self.data.loc[idx, EsmProtFromChain.CH_ATTR]
63
+ item_name = self.data.loc[idx, EsmProtFromChain.ITEM_NAME_ATTR]
64
+ structure = self.__structure_provider.get_structure(
65
+ src_name=src_name,
66
+ src_structure=stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure,
67
+ structure_format=self.structure_format,
68
+ chain_id=chain_id
69
+ )
70
+ for atom_ch in chain_iter(structure):
71
+ protein_chain = ProteinChain.from_atomarray(rename_atom_ch(atom_ch))
72
+ return ESMProtein.from_protein_chain(protein_chain), item_name
73
+
74
+
75
+ if __name__ == '__main__':
76
+
77
+ parser = argparse.ArgumentParser()
78
+ parser.add_argument('--file_list', type=argparse.FileType('r'), required=True)
79
+ args = parser.parse_args()
80
+
81
+ dataset = EsmProtFromChain(
82
+ args.file_list
83
+ )
84
+
85
+ esm3 = ESM3.from_pretrained(
86
+ ESM3_OPEN_SMALL,
87
+ torch.device("cpu")
88
+ )
89
+
90
+ dataloader = DataLoader(
91
+ dataset,
92
+ batch_size=2,
93
+ collate_fn=lambda _: _
94
+ )
95
+
96
+ for _batch in dataloader:
97
+ for esm_prot, prot_name in _batch:
98
+ protein_tensor = esm3.encode(esm_prot)
99
+ embeddings = esm3.forward_and_sample(
100
+ protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
101
+ ).per_residue_embedding
102
+ print(prot_name, embeddings.shape)
@@ -0,0 +1,64 @@
1
+
2
+ import pandas as pd
3
+
4
+ from rcsb_embedding_model.dataset.esm_prot_from_chain import EsmProtFromChain
5
+ from rcsb_embedding_model.types.api_types import StructureLocation, StructureFormat, SrcLocation
6
+ from rcsb_embedding_model.utils.data import stringio_from_url
7
+ from rcsb_embedding_model.utils.structure_parser import get_protein_chains
8
+ from rcsb_embedding_model.utils.structure_provider import StructureProvider
9
+
10
+
11
+ class EsmProtFromStructure(EsmProtFromChain):
12
+
13
+ STREAM_NAME_ATTR = 'stream_name'
14
+ STREAM_ATTR = 'stream'
15
+ ITEM_NAME_ATTR = 'item_name'
16
+
17
+ COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, ITEM_NAME_ATTR]
18
+
19
+ def __init__(
20
+ self,
21
+ src_stream,
22
+ src_location=SrcLocation.local,
23
+ structure_location=StructureLocation.local,
24
+ structure_format=StructureFormat.mmcif,
25
+ min_res_n=0,
26
+ structure_provider=StructureProvider()
27
+ ):
28
+ self.min_res_n = min_res_n
29
+ self.src_location = src_location
30
+ self.structure_location = structure_location
31
+ self.structure_format = structure_format
32
+ self.__structure_provider = structure_provider
33
+ super().__init__(
34
+ src_stream=self.__get_chains(src_stream),
35
+ src_location=SrcLocation.stream,
36
+ structure_location=structure_location,
37
+ structure_format=structure_format,
38
+ structure_provider=structure_provider
39
+ )
40
+
41
+ def __get_chains(self, src_stream):
42
+ chains = []
43
+ for idx, row in (pd.DataFrame(
44
+ src_stream,
45
+ dtype=str,
46
+ columns=self.COLUMNS
47
+ ) if self.src_location == SrcLocation.stream else pd.read_csv(
48
+ src_stream,
49
+ header=None,
50
+ index_col=None,
51
+ dtype=str,
52
+ names=EsmProtFromStructure.COLUMNS
53
+ )).iterrows():
54
+ src_name = row[EsmProtFromStructure.STREAM_NAME_ATTR]
55
+ src_structure = row[EsmProtFromStructure.STREAM_ATTR]
56
+ item_name = row[EsmProtFromStructure.ITEM_NAME_ATTR]
57
+ structure = self.__structure_provider.get_structure(
58
+ src_name=src_name,
59
+ src_structure=stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure,
60
+ structure_format=self.structure_format
61
+ )
62
+ for ch in get_protein_chains(structure, self.min_res_n):
63
+ chains.append((src_name, src_structure, ch, f"{item_name}.{ch}"))
64
+ return tuple(chains)
@@ -0,0 +1,68 @@
1
+ import sys
2
+
3
+ import pandas as pd
4
+
5
+ from rcsb_embedding_model.dataset.residue_assembly_embedding_from_tensor_file import ResidueAssemblyEmbeddingFromTensorFile
6
+ from rcsb_embedding_model.types.api_types import SrcLocation, StructureLocation, StructureFormat
7
+ from rcsb_embedding_model.utils.data import stringio_from_url
8
+ from rcsb_embedding_model.utils.structure_parser import get_assemblies
9
+ from rcsb_embedding_model.utils.structure_provider import StructureProvider
10
+
11
+
12
+ class ResidueAssemblyDatasetFromStructure(ResidueAssemblyEmbeddingFromTensorFile):
13
+
14
+ STREAM_NAME_ATTR = 'stream_name'
15
+ STREAM_ATTR = 'stream'
16
+ ITEM_NAME_ATTR = 'item_name'
17
+
18
+ COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, ITEM_NAME_ATTR]
19
+
20
+ def __init__(
21
+ self,
22
+ src_stream,
23
+ res_embedding_location,
24
+ src_location=SrcLocation.local,
25
+ structure_location=StructureLocation.local,
26
+ structure_format=StructureFormat.mmcif,
27
+ min_res_n=0,
28
+ max_res_n=sys.maxsize,
29
+ structure_provider=StructureProvider()
30
+ ):
31
+ self.src_location = src_location
32
+ self.structure_location = structure_location
33
+ self.structure_format = structure_format
34
+ self.min_res_n = min_res_n
35
+ self.max_res_n = max_res_n
36
+ self.__structure_provider = structure_provider
37
+ super().__init__(
38
+ src_stream=self.__get_assemblies(src_stream),
39
+ res_embedding_location=res_embedding_location,
40
+ src_location=src_location,
41
+ structure_location=structure_location,
42
+ structure_format=structure_format,
43
+ min_res_n=min_res_n,
44
+ max_res_n=max_res_n,
45
+ structure_provider=structure_provider
46
+ )
47
+
48
+ def __get_assemblies(self, src_stream):
49
+ assemblies = []
50
+ for idx, row in (pd.DataFrame(
51
+ src_stream,
52
+ dtype=str,
53
+ columns=self.COLUMNS
54
+ ) if self.src_location == SrcLocation.stream else pd.read_csv(
55
+ src_stream,
56
+ header=None,
57
+ index_col=None,
58
+ dtype=str,
59
+ names=ResidueAssemblyDatasetFromStructure.COLUMNS
60
+ )).iterrows():
61
+ src_name = row[ResidueAssemblyDatasetFromStructure.STREAM_NAME_ATTR]
62
+ src_structure = row[ResidueAssemblyDatasetFromStructure.STREAM_ATTR]
63
+ src_structure = stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure
64
+ item_name = row[ResidueAssemblyDatasetFromStructure.ITEM_NAME_ATTR]
65
+ for assembly_id in get_assemblies(src_structure=src_structure, structure_format=self.structure_format):
66
+ assemblies.append((src_name, src_structure, str(assembly_id), f"{item_name}.{assembly_id}"))
67
+
68
+ return tuple(assemblies)
@@ -0,0 +1,94 @@
1
+ import sys
2
+
3
+ import pandas as pd
4
+ from torch.utils.data import Dataset, DataLoader
5
+
6
+ from rcsb_embedding_model.types.api_types import StructureLocation, StructureFormat, SrcLocation
7
+ from rcsb_embedding_model.utils.data import stringio_from_url, concatenate_tensors
8
+ from rcsb_embedding_model.utils.structure_parser import get_protein_chains
9
+ from rcsb_embedding_model.utils.structure_provider import StructureProvider
10
+
11
+
12
+ class ResidueAssemblyEmbeddingFromTensorFile(Dataset):
13
+
14
+ STREAM_NAME_ATTR = 'stream_name'
15
+ STREAM_ATTR = 'stream'
16
+ ASSEMBLY_ATTR = 'assembly_id'
17
+ ITEM_NAME_ATTR = 'item_name'
18
+
19
+ COLUMNS = [STREAM_NAME_ATTR, STREAM_ATTR, ASSEMBLY_ATTR, ITEM_NAME_ATTR]
20
+
21
+ def __init__(
22
+ self,
23
+ src_stream,
24
+ res_embedding_location,
25
+ src_location=SrcLocation.local,
26
+ structure_location=StructureLocation.local,
27
+ structure_format=StructureFormat.mmcif,
28
+ min_res_n=0,
29
+ max_res_n=sys.maxsize,
30
+ structure_provider=StructureProvider()
31
+ ):
32
+ super().__init__()
33
+ self.res_embedding_location = res_embedding_location
34
+ self.src_location = src_location
35
+ self.structure_location = structure_location
36
+ self.structure_format = structure_format
37
+ self.min_res_n = min_res_n
38
+ self.max_res_n = max_res_n
39
+ self.data = pd.DataFrame()
40
+ self.__load_stream(src_stream)
41
+ self.__structure_provider = structure_provider
42
+
43
+ def __load_stream(self, src_stream):
44
+ self.data = pd.DataFrame(
45
+ src_stream,
46
+ dtype=str,
47
+ columns=ResidueAssemblyEmbeddingFromTensorFile.COLUMNS
48
+ ) if self.src_location == SrcLocation.stream else pd.read_csv(
49
+ src_stream,
50
+ header=None,
51
+ index_col=None,
52
+ dtype=str,
53
+ names=ResidueAssemblyEmbeddingFromTensorFile.COLUMNS
54
+ )
55
+
56
+ def __len__(self):
57
+ return len(self.data)
58
+
59
+ def __getitem__(self, idx):
60
+ src_name = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.STREAM_NAME_ATTR]
61
+ src_structure = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.STREAM_ATTR]
62
+ assembly_id = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.ASSEMBLY_ATTR]
63
+ item_name = self.data.loc[idx, ResidueAssemblyEmbeddingFromTensorFile.ITEM_NAME_ATTR]
64
+
65
+ structure = self.__structure_provider.get_structure(
66
+ src_name=src_name,
67
+ src_structure=stringio_from_url(src_structure) if self.structure_location == StructureLocation.remote else src_structure,
68
+ structure_format=self.structure_format,
69
+ assembly_id=assembly_id
70
+ )
71
+ residue_embedding_files = [
72
+ f"{self.res_embedding_location}/{src_name}.{ch}.pt" for ch in get_protein_chains(structure, self.min_res_n)
73
+ ]
74
+ return concatenate_tensors(residue_embedding_files, self.max_res_n), item_name
75
+
76
+
77
+ if __name__ == "__main__":
78
+
79
+ dataset = ResidueAssemblyEmbeddingFromTensorFile(
80
+ src_stream="/Users/joan/tmp/assembly-test.csv",
81
+ res_embedding_location="/Users/joan/tmp",
82
+ src_location=SrcLocation.local,
83
+ structure_location=StructureLocation.local,
84
+ structure_format=StructureFormat.mmcif
85
+ )
86
+
87
+ dataloader = DataLoader(
88
+ dataset,
89
+ batch_size=1,
90
+ collate_fn=lambda _: _
91
+ )
92
+
93
+ for _batch in dataloader:
94
+ print(_batch)
@@ -0,0 +1,43 @@
1
+ import pandas as pd
2
+ import torch
3
+ from torch.utils.data import Dataset
4
+
5
+ from rcsb_embedding_model.types.api_types import StructureLocation, SrcLocation
6
+
7
+
8
+ class ResidueEmbeddingFromTensorFile(Dataset):
9
+
10
+ FILE_ATTR = 'file'
11
+ ITEM_NAME_ATTR = 'item_name'
12
+
13
+ COLUMNS = [FILE_ATTR, ITEM_NAME_ATTR]
14
+
15
+ def __init__(
16
+ self,
17
+ src_stream,
18
+ src_location=SrcLocation.local
19
+ ):
20
+ super().__init__()
21
+ self.src_location = src_location
22
+ self.data = pd.DataFrame()
23
+ self.__load_stream(src_stream)
24
+
25
+ def __load_stream(self, src_stream):
26
+ self.data = pd.DataFrame(
27
+ src_stream,
28
+ dtype=str,
29
+ columns=self.COLUMNS
30
+ ) if self.src_location == SrcLocation.stream else pd.read_csv(
31
+ src_stream,
32
+ header=None,
33
+ index_col=None,
34
+ names=ResidueEmbeddingFromTensorFile.COLUMNS
35
+ )
36
+
37
+ def __len__(self):
38
+ return len(self.data)
39
+
40
+ def __getitem__(self, idx):
41
+ embedding_src = self.data.loc[idx, ResidueEmbeddingFromTensorFile.FILE_ATTR]
42
+ item_name = self.data.loc[idx, ResidueEmbeddingFromTensorFile.ITEM_NAME_ATTR]
43
+ return torch.load(embedding_src, map_location=torch.device('cpu')), item_name
@@ -0,0 +1,53 @@
1
+ import sys
2
+
3
+ from rcsb_embedding_model.dataset.resdiue_assembly_embedding_from_structure import ResidueAssemblyDatasetFromStructure
4
+ from rcsb_embedding_model.dataset.residue_assembly_embedding_from_tensor_file import ResidueAssemblyEmbeddingFromTensorFile
5
+ from rcsb_embedding_model.types.api_types import FileOrStreamTuple, SrcLocation, Accelerator, Devices, OptionalPath, EmbeddingPath, StructureLocation, StructureFormat, SrcAssemblyFrom
6
+ from rcsb_embedding_model.inference.chain_inference import predict as chain_predict
7
+
8
+
9
+ def predict(
10
+ src_stream: FileOrStreamTuple,
11
+ res_embedding_location: EmbeddingPath,
12
+ src_location: SrcLocation = SrcLocation.local,
13
+ src_from: SrcAssemblyFrom = SrcAssemblyFrom.assembly,
14
+ structure_location: StructureLocation = StructureLocation.local,
15
+ structure_format: StructureFormat = StructureFormat.mmcif,
16
+ min_res_n: int = 0,
17
+ max_res_n: int = sys.maxsize,
18
+ batch_size: int = 1,
19
+ num_workers: int = 0,
20
+ num_nodes: int = 1,
21
+ accelerator: Accelerator = Accelerator.auto,
22
+ devices: Devices = 'auto',
23
+ out_path: OptionalPath = None
24
+ ):
25
+ inference_set = ResidueAssemblyEmbeddingFromTensorFile(
26
+ src_stream=src_stream,
27
+ res_embedding_location=res_embedding_location,
28
+ src_location=src_location,
29
+ structure_location=structure_location,
30
+ structure_format=structure_format,
31
+ min_res_n=min_res_n,
32
+ max_res_n=max_res_n
33
+ ) if src_from == SrcAssemblyFrom.assembly else ResidueAssemblyDatasetFromStructure(
34
+ src_stream=src_stream,
35
+ res_embedding_location=res_embedding_location,
36
+ src_location=src_location,
37
+ structure_location=structure_location,
38
+ structure_format=structure_format,
39
+ min_res_n=min_res_n,
40
+ max_res_n=max_res_n
41
+ )
42
+
43
+ return chain_predict(
44
+ src_stream=src_stream,
45
+ src_location=src_location,
46
+ batch_size=batch_size,
47
+ num_workers=num_workers,
48
+ num_nodes=num_nodes,
49
+ accelerator=accelerator,
50
+ devices=devices,
51
+ out_path=out_path,
52
+ inference_set=inference_set
53
+ )
@@ -1,26 +1,30 @@
1
1
  from torch.utils.data import DataLoader
2
2
  from lightning import Trainer
3
- from typer import FileText
4
3
 
5
- from rcsb_embedding_model.dataset.residue_embedding_from_csv import ResidueEmbeddingFromCSV
4
+ from rcsb_embedding_model.dataset.residue_embedding_from_tensor_file import ResidueEmbeddingFromTensorFile
6
5
  from rcsb_embedding_model.modules.chain_module import ChainModule
7
- from rcsb_embedding_model.types.api_types import Accelerator, Devices, OptionalPath
6
+ from rcsb_embedding_model.types.api_types import Accelerator, Devices, OptionalPath, FileOrStreamTuple, SrcLocation
8
7
  from rcsb_embedding_model.utils.data import collate_seq_embeddings
9
8
  from rcsb_embedding_model.writer.batch_writer import CsvBatchWriter
10
9
 
11
10
 
12
11
  def predict(
13
- csv_file: FileText,
12
+ src_stream: FileOrStreamTuple,
13
+ src_location: SrcLocation = SrcLocation.local,
14
14
  batch_size: int = 1,
15
15
  num_workers: int = 0,
16
16
  num_nodes: int = 1,
17
17
  accelerator: Accelerator = Accelerator.auto,
18
18
  devices: Devices = 'auto',
19
- out_path: OptionalPath = None
19
+ out_path: OptionalPath = None,
20
+ inference_set=None
20
21
  ):
21
- inference_set = ResidueEmbeddingFromCSV(
22
- csv_file=csv_file
23
- )
22
+
23
+ if inference_set is None:
24
+ inference_set = ResidueEmbeddingFromTensorFile(
25
+ src_stream=src_stream,
26
+ src_location=src_location
27
+ )
24
28
 
25
29
  inference_dataloader = DataLoader(
26
30
  dataset=inference_set,
@@ -1,17 +1,20 @@
1
1
  from torch.utils.data import DataLoader
2
2
  from lightning import Trainer
3
- from typer import FileText
4
3
 
5
- from rcsb_embedding_model.dataset.esm_prot_from_csv import EsmProtFromCsv
4
+ from rcsb_embedding_model.dataset.esm_prot_from_structure import EsmProtFromStructure
5
+ from rcsb_embedding_model.dataset.esm_prot_from_chain import EsmProtFromChain
6
6
  from rcsb_embedding_model.modules.esm_module import EsmModule
7
- from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, Devices, OptionalPath, SrcLocation
7
+ from rcsb_embedding_model.types.api_types import StructureFormat, Accelerator, Devices, OptionalPath, StructureLocation, SrcProteinFrom, FileOrStreamTuple, SrcLocation
8
8
  from rcsb_embedding_model.writer.batch_writer import TensorBatchWriter
9
9
 
10
10
 
11
11
  def predict(
12
- csv_file: FileText,
12
+ src_stream: FileOrStreamTuple,
13
13
  src_location: SrcLocation = SrcLocation.local,
14
- src_format: SrcFormat = SrcFormat.mmcif,
14
+ src_from: SrcProteinFrom = SrcProteinFrom.chain,
15
+ structure_location: StructureLocation = StructureLocation.local,
16
+ structure_format: StructureFormat = StructureFormat.mmcif,
17
+ min_res_n: int = 0,
15
18
  batch_size: int = 1,
16
19
  num_workers: int = 0,
17
20
  num_nodes: int = 1,
@@ -20,10 +23,17 @@ def predict(
20
23
  out_path: OptionalPath = None
21
24
  ):
22
25
 
23
- inference_set = EsmProtFromCsv(
24
- csv_file=csv_file,
26
+ inference_set = EsmProtFromChain(
27
+ src_stream=src_stream,
25
28
  src_location=src_location,
26
- src_format=src_format
29
+ structure_location=structure_location,
30
+ structure_format=structure_format
31
+ ) if src_from == SrcProteinFrom.chain else EsmProtFromStructure(
32
+ src_stream=src_stream,
33
+ src_location=src_location,
34
+ structure_location=structure_location,
35
+ structure_format=structure_format,
36
+ min_res_n=min_res_n
27
37
  )
28
38
 
29
39
  inference_dataloader = DataLoader(
@@ -1,30 +1,40 @@
1
1
  from torch.utils.data import DataLoader
2
2
  from lightning import Trainer
3
- from typer import FileText
4
3
 
5
- from rcsb_embedding_model.dataset.esm_prot_from_csv import EsmProtFromCsv
6
- from rcsb_embedding_model.modules.esm_module import EsmModule
7
- from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, Devices, OptionalPath, SrcLocation
4
+ from rcsb_embedding_model.dataset.esm_prot_from_structure import EsmProtFromStructure
5
+ from rcsb_embedding_model.dataset.esm_prot_from_chain import EsmProtFromChain
6
+ from rcsb_embedding_model.modules.structure_module import StructureModule
7
+ from rcsb_embedding_model.types.api_types import StructureFormat, Accelerator, Devices, OptionalPath, StructureLocation, SrcProteinFrom, FileOrStreamTuple, SrcLocation
8
8
  from rcsb_embedding_model.writer.batch_writer import DataFrameStorage
9
9
 
10
10
 
11
11
  def predict(
12
- csv_file: FileText,
12
+ src_stream: FileOrStreamTuple,
13
13
  src_location: SrcLocation = SrcLocation.local,
14
- src_format: SrcFormat = SrcFormat.mmcif,
14
+ src_from: SrcProteinFrom = SrcProteinFrom.chain,
15
+ structure_location: StructureLocation = StructureLocation.local,
16
+ structure_format: StructureFormat = StructureFormat.mmcif,
17
+ min_res_n: int = 0,
15
18
  batch_size: int = 1,
16
19
  num_workers: int = 0,
17
20
  num_nodes: int = 1,
18
21
  accelerator: Accelerator = Accelerator.auto,
19
22
  devices: Devices = 'auto',
20
23
  out_path: OptionalPath = None,
21
- out_df_id: str = None
24
+ out_df_name: str = None
22
25
  ):
23
26
 
24
- inference_set = EsmProtFromCsv(
25
- csv_file=csv_file,
27
+ inference_set = EsmProtFromChain(
28
+ src_stream=src_stream,
26
29
  src_location=src_location,
27
- src_format=src_format
30
+ structure_location=structure_location,
31
+ structure_format=structure_format
32
+ ) if src_from == SrcProteinFrom.chain else EsmProtFromStructure(
33
+ src_stream=src_stream,
34
+ src_location=src_location,
35
+ structure_location=structure_location,
36
+ structure_format=structure_format,
37
+ min_res_n=min_res_n
28
38
  )
29
39
 
30
40
  inference_dataloader = DataLoader(
@@ -34,8 +44,8 @@ def predict(
34
44
  collate_fn=lambda _: _
35
45
  )
36
46
 
37
- module = EsmModule()
38
- inference_writer = DataFrameStorage(out_path, out_df_id) if out_path is not None and out_df_id is not None else None
47
+ module = StructureModule()
48
+ inference_writer = DataFrameStorage(out_path, out_df_name) if out_path is not None and out_df_name is not None else None
39
49
  trainer = Trainer(
40
50
  callbacks=[inference_writer] if inference_writer is not None else None,
41
51
  num_nodes=num_nodes,
@@ -3,7 +3,7 @@ from biotite.structure import get_residues, chain_iter, filter_amino_acids
3
3
  from esm.sdk.api import ESMProtein, SamplingConfig
4
4
  from esm.utils.structure.protein_chain import ProteinChain
5
5
 
6
- from rcsb_embedding_model.types.api_types import StreamSrc, SrcFormat
6
+ from rcsb_embedding_model.types.api_types import StreamSrc, StructureFormat
7
7
  from rcsb_embedding_model.utils.model import get_aggregator_model, get_residue_model
8
8
  from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
9
9
 
@@ -42,22 +42,22 @@ class RcsbStructureEmbedding:
42
42
  def structure_embedding(
43
43
  self,
44
44
  src_structure: StreamSrc,
45
- src_format: SrcFormat = SrcFormat.mmcif,
45
+ structure_format: StructureFormat = StructureFormat.mmcif,
46
46
  chain_id: str = None,
47
47
  assembly_id: str = None
48
48
  ):
49
- res_embedding = self.residue_embedding(src_structure, src_format, chain_id, assembly_id)
49
+ res_embedding = self.residue_embedding(src_structure, structure_format, chain_id, assembly_id)
50
50
  return self.aggregator_embedding(res_embedding)
51
51
 
52
52
  def residue_embedding(
53
53
  self,
54
54
  src_structure: StreamSrc,
55
- src_format: SrcFormat = SrcFormat.mmcif,
55
+ structure_format: StructureFormat = StructureFormat.mmcif,
56
56
  chain_id: str = None,
57
57
  assembly_id: str = None
58
58
  ):
59
59
  self.__check_residue_embedding()
60
- structure = get_structure_from_src(src_structure, src_format, chain_id, assembly_id)
60
+ structure = get_structure_from_src(src_structure, structure_format, chain_id, assembly_id)
61
61
  embedding_ch = []
62
62
  for atom_ch in chain_iter(structure):
63
63
  atom_res = atom_ch[filter_amino_acids(atom_ch)]
@@ -1,16 +1,23 @@
1
1
  from enum import Enum
2
- from os import PathLike
3
2
  from typing import NewType, Union, IO, Tuple, List, Optional
4
3
 
5
- StreamSrc = NewType('StreamSrc', Union[PathLike, IO])
6
- StreamTuple = NewType('StreamTuple', Tuple[StreamSrc, str, str])
4
+ from typer import FileText
5
+
6
+ StreamSrc = NewType('StreamSrc', Union[FileText, IO])
7
+ StreamTuple = NewType('StreamTuple', Union[
8
+ Tuple[str, StreamSrc, str, str],
9
+ Tuple[str, StreamSrc, str],
10
+ Tuple[str, str]
11
+ ])
12
+ FileOrStreamTuple = NewType('FileOrStreamTuple', Union[FileText, StreamTuple])
7
13
 
8
14
  Devices = NewType('Devices', Union[int, List[int], "auto"])
9
15
 
10
- OptionalPath = NewType('OptionalPath', Optional[PathLike])
16
+ EmbeddingPath = Union[str, FileText]
17
+ OptionalPath = NewType('OptionalPath', Optional[FileText])
11
18
 
12
19
 
13
- class SrcFormat(str, Enum):
20
+ class StructureFormat(str, Enum):
14
21
  pdb = "pdb"
15
22
  mmcif = "mmcif"
16
23
  bciff = "binarycif"
@@ -25,5 +32,20 @@ class Accelerator(str, Enum):
25
32
 
26
33
 
27
34
  class SrcLocation(str, Enum):
35
+ local = "local"
36
+ stream = "stream"
37
+
38
+
39
+ class StructureLocation(str, Enum):
28
40
  local = "local"
29
41
  remote = "remote"
42
+
43
+
44
+ class SrcProteinFrom(str, Enum):
45
+ chain = "chain"
46
+ structure = "structure"
47
+
48
+
49
+ class SrcAssemblyFrom(str, Enum):
50
+ assembly = "assembly"
51
+ structure = "structure"
@@ -44,4 +44,34 @@ def stringio_from_url(url):
44
44
  print(f"Error fetching URL: {e}")
45
45
  return None
46
46
 
47
+ def concatenate_tensors(file_list, max_residues, dim=0):
48
+ """
49
+ Concatenates a list of tensors stored in individual files along a specified dimension.
50
+
51
+ Args:
52
+ file_list (list of str): List of file paths to tensor files.
53
+ max_residues (int): Maximum number of residues allowed in the assembly
54
+ dim (int): The dimension along which to concatenate the tensors. Default is 0.
47
55
 
56
+ Returns:
57
+ torch.Tensor: The concatenated tensor.
58
+ """
59
+ tensors = []
60
+ total_residues = 0
61
+ for file in file_list:
62
+ try:
63
+ tensor = torch.load(
64
+ file,
65
+ map_location=torch.device('cpu')
66
+ )
67
+ total_residues += tensor.shape[0]
68
+ tensors.append(tensor)
69
+ except Exception as e:
70
+ continue
71
+ if total_residues > max_residues:
72
+ break
73
+ if tensors and len(tensors) > 0:
74
+ tensor_cat = torch.cat(tensors, dim=dim)
75
+ return tensor_cat
76
+ else:
77
+ raise ValueError("No valid tensors were loaded to concatenate.")
@@ -1,32 +1,62 @@
1
-
2
- from biotite.structure.io.pdb import PDBFile, get_structure as get_pdb_structure, get_assembly as get_pdb_assembly
3
- from biotite.structure.io.pdbx import CIFFile, get_structure, get_assembly, BinaryCIFFile
1
+ from biotite.structure import filter_amino_acids, chain_iter, get_chains, get_residues, AtomArray
2
+ from biotite.structure.io.pdb import PDBFile, get_structure as get_pdb_structure, get_assembly as get_pdb_assembly, list_assemblies as list_pdb_assemblies
3
+ from biotite.structure.io.pdbx import CIFFile, get_structure, get_assembly, BinaryCIFFile, list_assemblies
4
4
 
5
5
 
6
6
  def get_structure_from_src(
7
7
  src_structure,
8
- src_format="mmcif",
8
+ structure_format="mmcif",
9
9
  chain_id=None,
10
10
  assembly_id=None
11
11
  ):
12
- if src_format == "pdb":
12
+ if structure_format == "pdb":
13
13
  pdb_file = PDBFile.read(src_structure)
14
14
  structure = __get_pdb_structure(pdb_file, assembly_id)
15
- elif src_format == "mmcif":
15
+ elif structure_format == "mmcif":
16
16
  cif_file = CIFFile.read(src_structure)
17
17
  structure = __get_structure(cif_file, assembly_id)
18
- elif src_format == "binarycif":
18
+ elif structure_format == "binarycif":
19
19
  cif_file = BinaryCIFFile.read(src_structure)
20
20
  structure = __get_structure(cif_file, assembly_id)
21
21
  else:
22
- raise RuntimeError(f"Unknown file format {src_format}")
22
+ raise RuntimeError(f"Unknown file format {structure_format}")
23
23
 
24
24
  if chain_id is not None:
25
- structure = structure[structure.chain_id == chain_id]
25
+ return structure[structure.chain_id == chain_id]
26
26
 
27
27
  return structure
28
28
 
29
29
 
30
+ def get_protein_chains(structure, min_res_n=0):
31
+ chain_ids = []
32
+ for atom_ch in chain_iter(structure):
33
+ atom_res = atom_ch[filter_amino_acids(atom_ch)]
34
+ if len(atom_res) > 0 and len(get_residues(atom_res)) > min_res_n:
35
+ chain_ids.append(str(get_chains(atom_res)[0]))
36
+ return tuple(chain_ids)
37
+
38
+
39
+ def get_assemblies(src_structure, structure_format="mmcif"):
40
+ if structure_format == "pdb":
41
+ return tuple(list_pdb_assemblies(PDBFile.read(src_structure)))
42
+ elif structure_format == "mmcif":
43
+ return tuple(list_assemblies(CIFFile.read(src_structure)).keys())
44
+ elif structure_format == "binarycif":
45
+ return tuple(list_assemblies(BinaryCIFFile.read(src_structure)))
46
+ else:
47
+ raise RuntimeError(f"Unknown file format {structure_format}")
48
+
49
+
50
+ def rename_atom_ch(atom_ch, ch="A"):
51
+ renamed_atom_ch = AtomArray(len(atom_ch))
52
+ n = 0
53
+ for atom in atom_ch:
54
+ atom.chain_id = ch
55
+ renamed_atom_ch[n] = atom
56
+ n += 1
57
+ return renamed_atom_ch
58
+
59
+
30
60
  def __get_pdb_structure(pdb_file, assembly_id=None):
31
61
  return get_pdb_structure(
32
62
  pdb_file,
@@ -0,0 +1,27 @@
1
+ from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
2
+
3
+
4
+ class StructureProvider:
5
+
6
+ def __init__(self):
7
+ self.__src_name = None
8
+ self.__structure = None
9
+
10
+ def get_structure(
11
+ self,
12
+ src_name,
13
+ src_structure,
14
+ structure_format="mmcif",
15
+ chain_id=None,
16
+ assembly_id=None
17
+ ):
18
+ if src_name != self.__src_name:
19
+ self.__src_name = src_name
20
+ self.__structure = get_structure_from_src(
21
+ src_structure=src_structure,
22
+ structure_format=structure_format,
23
+ assembly_id=assembly_id
24
+ )
25
+ if chain_id is not None:
26
+ return self.__structure[self.__structure.chain_id == chain_id]
27
+ return self.__structure
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rcsb-embedding-model
3
- Version: 0.0.7
3
+ Version: 0.0.9
4
4
  Summary: Protein Embedding Model for Structure Search
5
5
  Project-URL: Homepage, https://github.com/rcsb/rcsb-embedding-model
6
6
  Project-URL: Issues, https://github.com/rcsb/rcsb-embedding-model/issues
@@ -18,7 +18,7 @@ Description-Content-Type: text/markdown
18
18
 
19
19
  # RCSB Embedding Model
20
20
 
21
- **Version** 0.0.7
21
+ **Version** 0.0.9
22
22
 
23
23
 
24
24
  ## Overview
@@ -60,14 +60,17 @@ If you are interested in training the model with a new dataset, visit the [rcsb-
60
60
 
61
61
  ### CLI
62
62
 
63
- # 1. Compute residue embeddings: Calculate residue level embeddings of protein structures using ESM3.
64
- inference residue-embedding --src-file data/structures.csv --output-path results/residue_embeddings --src-format mmcif --batch-size 8 --devices auto
63
+ # 1. Compute residue embeddings: Calculate residue level embeddings of protein structures using ESM3. Predictions are stored as torch tensor files.
64
+ inference residue-embedding --src-file data/structures.csv --output-path results/residue_embeddings --structure-format mmcif --batch-size 8 --devices auto
65
65
 
66
- # 2. Compute structure embeddings: Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas data-frame file.
67
- inference structure-embedding --src-file results/residue_embeddings.csv --output-path results/structure_embeddings --out-df-id embeddings.pkl --batch-size 4 --devices 0 --devives 1
66
+ # 2. Compute structure embeddings: Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas DataFrame file.
67
+ inference structure-embedding --src-file data/structures.csv --output-path results/residue_embeddings --out-df-name df-res-embeddings --batch-size 4 --devices 0 --devives 1
68
68
 
69
- # 3. Compute chain embeddings: Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files.
70
- inference chain-embedding --src-file results/residue_embeddings.csv --output-path results/chain_embeddings --batch-size 4
69
+ # 3. Compute chain embeddings: Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files. Predictions a re stored as csv files.
70
+ inference chain-embedding --src-file data/structures.csv --output-path results/chain_embeddings --batch-size 4
71
+
72
+ # 4. Compute assembly embeddings: Calculate assembly embeddings from residue level embeddings stored as torch tensor files. Predictions are stored as csv files.
73
+ inference assembly-embedding --src-file data/structures.csv --res-embedding-location results/residue_embeddings --output-path results/assembly_embeddings
71
74
 
72
75
  ### Python API
73
76
 
@@ -85,7 +88,7 @@ If you are interested in training the model with a new dataset, visit the [rcsb-
85
88
  # Aggregate to structure-level embedding
86
89
  struct_emb = model.aggregator_embedding(res_emb)
87
90
 
88
- See the examples directory for complete scripts.
91
+ See the examples and tests directories for more use cases.
89
92
 
90
93
  ---
91
94
 
@@ -117,7 +120,7 @@ The aggregation component consists of six transformer encoder layers, each with
117
120
 
118
121
  ## Citation
119
122
 
120
- Segura, J., Bittrich, S., et al. (2024). *Multi-scale structural similarity embedding search across entire proteomes*. bioRxiv. (Preprint: https://www.biorxiv.org/content/10.1101/2024.03.07.XXXXX)
123
+ Segura, J., Bittrich, S., et al. (2024). *Multi-scale structural similarity embedding search across entire proteomes*. bioRxiv. (Preprint: https://www.biorxiv.org/content/10.1101/2025.02.28.640875v1)
121
124
 
122
125
  ---
123
126
 
@@ -0,0 +1,29 @@
1
+ rcsb_embedding_model/__init__.py,sha256=r3gLdeBIXkQEQA_K6QcRPO-TtYuAQSutk6pXRUE_nas,120
2
+ rcsb_embedding_model/rcsb_structure_embedding.py,sha256=dKp9hXQO0JAnO4SEfjJ_mG_jHu3UxAPguv6jkOjp-BI,4487
3
+ rcsb_embedding_model/cli/args_utils.py,sha256=7nP2q8pL5dWK_U7opxtWmoFcYVwasky6elHk-dASFaI,165
4
+ rcsb_embedding_model/cli/inference.py,sha256=KPZLqznbxZE_CBCGigUGg7yOfGsi8ID4aWMTExniRj4,11464
5
+ rcsb_embedding_model/dataset/esm_prot_from_chain.py,sha256=dBD2N0Y-GoN6p3z2yLnOvv6JGn-skAxwgbOYhXKDngc,3487
6
+ rcsb_embedding_model/dataset/esm_prot_from_structure.py,sha256=URbT_SAp60MOZlOHnI3hdOBghR73TMNBV6jrtj5YUXA,2536
7
+ rcsb_embedding_model/dataset/resdiue_assembly_embedding_from_structure.py,sha256=d8C7HRJBZWuOKhPQpihv1koT4aIvyt5QN2yndC2ABuE,2842
8
+ rcsb_embedding_model/dataset/residue_assembly_embedding_from_tensor_file.py,sha256=KXiohnPjjfZEFbPZQ46HGE8eEYWrVX8bfbTz4zPlo7o,3451
9
+ rcsb_embedding_model/dataset/residue_embedding_from_tensor_file.py,sha256=cOxT--Spkel10JJCeGlqgLXN5vNCZzPdfSxDgUSrdPI,1268
10
+ rcsb_embedding_model/inference/assembly_inferece.py,sha256=MPssN5bsOqOU-LGwa6AKX99cv5LD43Mnbaqhuuww1Tw,2165
11
+ rcsb_embedding_model/inference/chain_inference.py,sha256=R9gi0MZ_HaM3v9c433W_5w4suse4nJmy4SgUTHJVZLg,1713
12
+ rcsb_embedding_model/inference/esm_inference.py,sha256=oVN4r9_6V8TS0pYoNn7GR92Xo0Zn7eBsnt_OfDSaH6g,2126
13
+ rcsb_embedding_model/inference/structure_inference.py,sha256=QIUEo8eEc-kTSYKGdlX2rxT74huw4ZAw6U8Px9kYajE,2216
14
+ rcsb_embedding_model/model/layers.py,sha256=lhKaWC4gTS_T5lHOP0mgnnP8nKTPEOm4MrjhESA4hE8,743
15
+ rcsb_embedding_model/model/residue_embedding_aggregator.py,sha256=k3UW63Ax8DtjCMdD3O5xNxtyAu28l2n3-Ab6nS0atm0,1967
16
+ rcsb_embedding_model/modules/chain_module.py,sha256=sDSPXJmWuU2C3lt1NorlbUVWZvRSLzumPdFQk01h3VI,403
17
+ rcsb_embedding_model/modules/esm_module.py,sha256=CTHGOATXiarqZsBsZ8oxGJBj20A73186Slpr0EzMJsE,770
18
+ rcsb_embedding_model/modules/structure_module.py,sha256=dEtDNdWo1j2sSDa0JiOHQfEfQzIWqSLEKpvOX0GrXZ4,1048
19
+ rcsb_embedding_model/types/api_types.py,sha256=3sPh33yb3Ya9r3O5vuiTfhb1WyFuhQWCQmewSbqEyG0,1076
20
+ rcsb_embedding_model/utils/data.py,sha256=x6ca_bVdBXEAp9ugCi1rVEQ-G5nGTFKpzDKqZKpkFBE,2933
21
+ rcsb_embedding_model/utils/model.py,sha256=rpZa-gfm3cEtbBd7UXMHrZv3x6f0AC8TJT3gtrSxr5I,852
22
+ rcsb_embedding_model/utils/structure_parser.py,sha256=jat4SCtPHYMZ6JJR-T7lPQoMbT_E8CwYSGDNSZjG86U,2697
23
+ rcsb_embedding_model/utils/structure_provider.py,sha256=eWtxjkPpmRfmil_DKR1J6miaXR3lQ28DF5O0qrqSgGA,786
24
+ rcsb_embedding_model/writer/batch_writer.py,sha256=ekgzFZyoKpcnZ3IDP9hfOWBpuHxUQ31P35ViDAi-Edw,2843
25
+ rcsb_embedding_model-0.0.9.dist-info/METADATA,sha256=QfzpBYhNIOBNOjOWkV1p4pDaFFXPmXIn9CThzR23oqo,5366
26
+ rcsb_embedding_model-0.0.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
27
+ rcsb_embedding_model-0.0.9.dist-info/entry_points.txt,sha256=MK11jTIEmaV-x4CkPX5IymDaVs7Ky_f2xxU8BJVZ_9Q,69
28
+ rcsb_embedding_model-0.0.9.dist-info/licenses/LICENSE.md,sha256=oUaHiKgfBkChth_Sm67WemEvatO1U0Go8LHjaskXY0w,1522
29
+ rcsb_embedding_model-0.0.9.dist-info/RECORD,,
@@ -1,90 +0,0 @@
1
- import argparse
2
-
3
- import torch
4
- from biotite.structure import chain_iter
5
- from esm.models.esm3 import ESM3
6
- from esm.sdk.api import ESMProtein, SamplingConfig
7
- from esm.utils.constants.models import ESM3_OPEN_SMALL
8
- from esm.utils.structure.protein_chain import ProteinChain
9
- from torch.utils.data import Dataset, DataLoader
10
- import pandas as pd
11
-
12
- from rcsb_embedding_model.types.api_types import SrcFormat, SrcLocation
13
- from rcsb_embedding_model.utils.data import stringio_from_url
14
- from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
15
-
16
-
17
- class EsmProtFromCsv(Dataset):
18
-
19
- MIN_RES = 10
20
- STREAM_ATTR = 'stream'
21
- CH_ATTR = 'chain_id'
22
- NAME_ATTR = 'name'
23
-
24
- COLUMNS = [STREAM_ATTR, CH_ATTR, NAME_ATTR]
25
-
26
- def __init__(
27
- self,
28
- csv_file,
29
- src_location=SrcLocation.local,
30
- src_format=SrcFormat.mmcif,
31
- ):
32
- super().__init__()
33
- self.src_location = src_location
34
- self.src_format = src_format
35
- self.data = pd.DataFrame()
36
- self.__load_stream(csv_file)
37
-
38
- def __load_stream(self, stream_list):
39
- self.data = pd.read_csv(
40
- stream_list,
41
- header=None,
42
- index_col=None,
43
- names=EsmProtFromCsv.COLUMNS
44
- )
45
-
46
- def __len__(self):
47
- return len(self.data)
48
-
49
- def __getitem__(self, idx):
50
- src_structure = self.data.loc[idx, EsmProtFromCsv.STREAM_ATTR]
51
- chain_id = self.data.loc[idx, EsmProtFromCsv.CH_ATTR]
52
- name = self.data.loc[idx, EsmProtFromCsv.NAME_ATTR]
53
- structure = get_structure_from_src(
54
- src_structure=src_structure if self.src_location == SrcLocation.local else stringio_from_url(src_structure),
55
- src_format=self.src_format,
56
- chain_id=chain_id
57
- )
58
- for atom_ch in chain_iter(structure):
59
- protein_chain = ProteinChain.from_atomarray(atom_ch)
60
- return ESMProtein.from_protein_chain(protein_chain), name
61
-
62
-
63
- if __name__ == '__main__':
64
-
65
- parser = argparse.ArgumentParser()
66
- parser.add_argument('--file_list', type=argparse.FileType('r'), required=True)
67
- args = parser.parse_args()
68
-
69
- dataset = EsmProtFromCsv(
70
- args.file_list
71
- )
72
-
73
- esm3 = ESM3.from_pretrained(
74
- ESM3_OPEN_SMALL,
75
- torch.device("cpu")
76
- )
77
-
78
- dataloader = DataLoader(
79
- dataset,
80
- batch_size=2,
81
- collate_fn=lambda _: _
82
- )
83
-
84
- for _batch in dataloader:
85
- for esm_prot, name in _batch:
86
- protein_tensor = esm3.encode(esm_prot)
87
- embeddings = esm3.forward_and_sample(
88
- protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
89
- ).per_residue_embedding
90
- print(name, embeddings.shape)
@@ -1,32 +0,0 @@
1
- import pandas as pd
2
- import torch
3
- from torch.utils.data import Dataset
4
-
5
-
6
- class ResidueEmbeddingFromCSV(Dataset):
7
-
8
- STREAM_ATTR = 'stream'
9
- NAME_ATTR = 'name'
10
-
11
- COLUMNS = [STREAM_ATTR, NAME_ATTR]
12
-
13
- def __init__(self, csv_file):
14
- super().__init__()
15
- self.data = pd.DataFrame()
16
- self.__load_stream(csv_file)
17
-
18
- def __load_stream(self, csv_file):
19
- self.data = pd.read_csv(
20
- csv_file,
21
- header=None,
22
- index_col=None,
23
- names=ResidueEmbeddingFromCSV.COLUMNS
24
- )
25
-
26
- def __len__(self):
27
- return len(self.data)
28
-
29
- def __getitem__(self, idx):
30
- embedding_src = self.data.loc[idx, ResidueEmbeddingFromCSV.STREAM_ATTR]
31
- name = self.data.loc[idx, ResidueEmbeddingFromCSV.NAME_ATTR]
32
- return torch.load(embedding_src, map_location=torch.device('cpu')), name
@@ -1,24 +0,0 @@
1
- rcsb_embedding_model/__init__.py,sha256=r3gLdeBIXkQEQA_K6QcRPO-TtYuAQSutk6pXRUE_nas,120
2
- rcsb_embedding_model/rcsb_structure_embedding.py,sha256=qGUEdRPjYbsFWThsQa_ZVaSJ7nURnfRBLBqJlLbcY0I,4433
3
- rcsb_embedding_model/cli/args_utils.py,sha256=7nP2q8pL5dWK_U7opxtWmoFcYVwasky6elHk-dASFaI,165
4
- rcsb_embedding_model/cli/inference.py,sha256=sx8cGiq_japc0mKFarK1aVkGfK-FhTeZdn_Ng0ijezE,6590
5
- rcsb_embedding_model/dataset/esm_prot_from_csv.py,sha256=1XMiYyJXfodXZGSrU07uyoYbdKR9-KvNfb1xNqab_W8,2722
6
- rcsb_embedding_model/dataset/residue_embedding_from_csv.py,sha256=0-5L64tyER-RpT166pC71qxOpUdVZbcuBQONPcAIuno,862
7
- rcsb_embedding_model/inference/chain_inference.py,sha256=SgXDa-TkDcvlkQxqEwDt81RdE7NmgiaJD8uaROgMbl8,1506
8
- rcsb_embedding_model/inference/esm_inference.py,sha256=pX-_RhzAIvL0Zdg9wjScLBP6Y1sq4RLNio4-vdR5MLU,1498
9
- rcsb_embedding_model/inference/structure_inference.py,sha256=qPzAGWyzFWqeKV9yoPSw4LrEB9XgKTJnRQysSBhfg14,1564
10
- rcsb_embedding_model/model/layers.py,sha256=lhKaWC4gTS_T5lHOP0mgnnP8nKTPEOm4MrjhESA4hE8,743
11
- rcsb_embedding_model/model/residue_embedding_aggregator.py,sha256=k3UW63Ax8DtjCMdD3O5xNxtyAu28l2n3-Ab6nS0atm0,1967
12
- rcsb_embedding_model/modules/chain_module.py,sha256=sDSPXJmWuU2C3lt1NorlbUVWZvRSLzumPdFQk01h3VI,403
13
- rcsb_embedding_model/modules/esm_module.py,sha256=CTHGOATXiarqZsBsZ8oxGJBj20A73186Slpr0EzMJsE,770
14
- rcsb_embedding_model/modules/structure_module.py,sha256=dEtDNdWo1j2sSDa0JiOHQfEfQzIWqSLEKpvOX0GrXZ4,1048
15
- rcsb_embedding_model/types/api_types.py,sha256=x7274MyjkRXn8B-W-PY5PK9g0CP1pT_clZbrAuFuHPA,626
16
- rcsb_embedding_model/utils/data.py,sha256=LGw3wvq_LCcqSovHZacOqxEczn12SZk2i51WK9xkk0k,1877
17
- rcsb_embedding_model/utils/model.py,sha256=rpZa-gfm3cEtbBd7UXMHrZv3x6f0AC8TJT3gtrSxr5I,852
18
- rcsb_embedding_model/utils/structure_parser.py,sha256=0lcjCuQMCh0lb3OMj76rqf7kACzJgOwdk3EZ7-ZOQfI,1492
19
- rcsb_embedding_model/writer/batch_writer.py,sha256=ekgzFZyoKpcnZ3IDP9hfOWBpuHxUQ31P35ViDAi-Edw,2843
20
- rcsb_embedding_model-0.0.7.dist-info/METADATA,sha256=mfl1YYB48Um5FdZZkHOwzzMPRvsw_HlFHeqXsCGWs0Q,4959
21
- rcsb_embedding_model-0.0.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
- rcsb_embedding_model-0.0.7.dist-info/entry_points.txt,sha256=MK11jTIEmaV-x4CkPX5IymDaVs7Ky_f2xxU8BJVZ_9Q,69
23
- rcsb_embedding_model-0.0.7.dist-info/licenses/LICENSE.md,sha256=oUaHiKgfBkChth_Sm67WemEvatO1U0Go8LHjaskXY0w,1522
24
- rcsb_embedding_model-0.0.7.dist-info/RECORD,,